xref: /openbmc/linux/arch/arm64/kernel/proton-pack.c (revision dee435be)
1455697adSWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2455697adSWill Deacon /*
3c4792b6dSWill Deacon  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4455697adSWill Deacon  * detailed at:
5455697adSWill Deacon  *
6455697adSWill Deacon  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7455697adSWill Deacon  *
8455697adSWill Deacon  * This code was originally written hastily under an awful lot of stress and so
9455697adSWill Deacon  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10455697adSWill Deacon  * instantly makes me feel ill. Thanks, Jann. Thann.
11455697adSWill Deacon  *
12455697adSWill Deacon  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13455697adSWill Deacon  * Copyright (C) 2020 Google LLC
14455697adSWill Deacon  *
15455697adSWill Deacon  * "If there's something strange in your neighbourhood, who you gonna call?"
16455697adSWill Deacon  *
17455697adSWill Deacon  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18455697adSWill Deacon  */
19455697adSWill Deacon 
20d4647f0aSWill Deacon #include <linux/arm-smccc.h>
21d4647f0aSWill Deacon #include <linux/cpu.h>
22455697adSWill Deacon #include <linux/device.h>
239e78b659SWill Deacon #include <linux/nospec.h>
24d4647f0aSWill Deacon #include <linux/prctl.h>
255c8b0cbdSWill Deacon #include <linux/sched/task_stack.h>
26d4647f0aSWill Deacon 
277cda23daSWill Deacon #include <asm/insn.h>
28d4647f0aSWill Deacon #include <asm/spectre.h>
29d4647f0aSWill Deacon #include <asm/traps.h>
30b881cdceSWill Deacon #include <asm/virt.h>
31d4647f0aSWill Deacon 
32d4647f0aSWill Deacon /*
33d4647f0aSWill Deacon  * We try to ensure that the mitigation state can never change as the result of
34d4647f0aSWill Deacon  * onlining a late CPU.
35d4647f0aSWill Deacon  */
36d4647f0aSWill Deacon static void update_mitigation_state(enum mitigation_state *oldp,
37d4647f0aSWill Deacon 				    enum mitigation_state new)
38d4647f0aSWill Deacon {
39d4647f0aSWill Deacon 	enum mitigation_state state;
40d4647f0aSWill Deacon 
41d4647f0aSWill Deacon 	do {
42d4647f0aSWill Deacon 		state = READ_ONCE(*oldp);
43d4647f0aSWill Deacon 		if (new <= state)
44d4647f0aSWill Deacon 			break;
45d4647f0aSWill Deacon 
46d4647f0aSWill Deacon 		/* Userspace almost certainly can't deal with this. */
47d4647f0aSWill Deacon 		if (WARN_ON(system_capabilities_finalized()))
48d4647f0aSWill Deacon 			break;
49d4647f0aSWill Deacon 	} while (cmpxchg_relaxed(oldp, state, new) != state);
50d4647f0aSWill Deacon }
51455697adSWill Deacon 
52455697adSWill Deacon /*
53455697adSWill Deacon  * Spectre v1.
54455697adSWill Deacon  *
55455697adSWill Deacon  * The kernel can't protect userspace for this one: it's each person for
56455697adSWill Deacon  * themselves. Advertise what we're doing and be done with it.
57455697adSWill Deacon  */
58455697adSWill Deacon ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
59455697adSWill Deacon 			    char *buf)
60455697adSWill Deacon {
61455697adSWill Deacon 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
62455697adSWill Deacon }
63d4647f0aSWill Deacon 
64d4647f0aSWill Deacon /*
65d4647f0aSWill Deacon  * Spectre v2.
66d4647f0aSWill Deacon  *
67d4647f0aSWill Deacon  * This one sucks. A CPU is either:
68d4647f0aSWill Deacon  *
69d4647f0aSWill Deacon  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
70d4647f0aSWill Deacon  * - Mitigated in hardware and listed in our "safe list".
71d4647f0aSWill Deacon  * - Mitigated in software by firmware.
72ea8f8c99SWill Deacon  * - Mitigated in software by a CPU-specific dance in the kernel and a
73ea8f8c99SWill Deacon  *   firmware call at EL2.
74d4647f0aSWill Deacon  * - Vulnerable.
75d4647f0aSWill Deacon  *
76d4647f0aSWill Deacon  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
77d4647f0aSWill Deacon  * different camps.
78d4647f0aSWill Deacon  */
79d4647f0aSWill Deacon static enum mitigation_state spectre_v2_state;
80d4647f0aSWill Deacon 
81d4647f0aSWill Deacon static bool __read_mostly __nospectre_v2;
82d4647f0aSWill Deacon static int __init parse_spectre_v2_param(char *str)
83d4647f0aSWill Deacon {
84d4647f0aSWill Deacon 	__nospectre_v2 = true;
85d4647f0aSWill Deacon 	return 0;
86d4647f0aSWill Deacon }
87d4647f0aSWill Deacon early_param("nospectre_v2", parse_spectre_v2_param);
88d4647f0aSWill Deacon 
89d4647f0aSWill Deacon static bool spectre_v2_mitigations_off(void)
90d4647f0aSWill Deacon {
91d4647f0aSWill Deacon 	bool ret = __nospectre_v2 || cpu_mitigations_off();
92d4647f0aSWill Deacon 
93d4647f0aSWill Deacon 	if (ret)
94d4647f0aSWill Deacon 		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
95d4647f0aSWill Deacon 
96d4647f0aSWill Deacon 	return ret;
97d4647f0aSWill Deacon }
98d4647f0aSWill Deacon 
99*dee435beSJames Morse static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
100*dee435beSJames Morse {
101*dee435beSJames Morse 	switch (bhb_state) {
102*dee435beSJames Morse 	case SPECTRE_UNAFFECTED:
103*dee435beSJames Morse 		return "";
104*dee435beSJames Morse 	default:
105*dee435beSJames Morse 	case SPECTRE_VULNERABLE:
106*dee435beSJames Morse 		return ", but not BHB";
107*dee435beSJames Morse 	case SPECTRE_MITIGATED:
108*dee435beSJames Morse 		return ", BHB";
109*dee435beSJames Morse 	}
110*dee435beSJames Morse }
111*dee435beSJames Morse 
112d4647f0aSWill Deacon ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
113d4647f0aSWill Deacon 			    char *buf)
114d4647f0aSWill Deacon {
115*dee435beSJames Morse 	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
116*dee435beSJames Morse 	const char *bhb_str = get_bhb_affected_string(bhb_state);
117*dee435beSJames Morse 	const char *v2_str = "Branch predictor hardening";
118*dee435beSJames Morse 
119d4647f0aSWill Deacon 	switch (spectre_v2_state) {
120d4647f0aSWill Deacon 	case SPECTRE_UNAFFECTED:
121*dee435beSJames Morse 		if (bhb_state == SPECTRE_UNAFFECTED)
122d4647f0aSWill Deacon 			return sprintf(buf, "Not affected\n");
123*dee435beSJames Morse 
124*dee435beSJames Morse 		/*
125*dee435beSJames Morse 		 * Platforms affected by Spectre-BHB can't report
126*dee435beSJames Morse 		 * "Not affected" for Spectre-v2.
127*dee435beSJames Morse 		 */
128*dee435beSJames Morse 		v2_str = "CSV2";
129*dee435beSJames Morse 		fallthrough;
130d4647f0aSWill Deacon 	case SPECTRE_MITIGATED:
131*dee435beSJames Morse 		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
132d4647f0aSWill Deacon 	case SPECTRE_VULNERABLE:
133d4647f0aSWill Deacon 		fallthrough;
134d4647f0aSWill Deacon 	default:
135d4647f0aSWill Deacon 		return sprintf(buf, "Vulnerable\n");
136d4647f0aSWill Deacon 	}
137d4647f0aSWill Deacon }
138d4647f0aSWill Deacon 
139d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
140d4647f0aSWill Deacon {
141d4647f0aSWill Deacon 	u64 pfr0;
142d4647f0aSWill Deacon 	static const struct midr_range spectre_v2_safe_list[] = {
143d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
144d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
145d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
146d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
147d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
14838328d40SKonrad Dybcio 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
149d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
150d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
151d4647f0aSWill Deacon 		{ /* sentinel */ }
152d4647f0aSWill Deacon 	};
153d4647f0aSWill Deacon 
154d4647f0aSWill Deacon 	/* If the CPU has CSV2 set, we're safe */
155d4647f0aSWill Deacon 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
156d4647f0aSWill Deacon 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
157d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
158d4647f0aSWill Deacon 
159d4647f0aSWill Deacon 	/* Alternatively, we have a list of unaffected CPUs */
160d4647f0aSWill Deacon 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
161d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
162d4647f0aSWill Deacon 
163d4647f0aSWill Deacon 	return SPECTRE_VULNERABLE;
164d4647f0aSWill Deacon }
165d4647f0aSWill Deacon 
166d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
167d4647f0aSWill Deacon {
168d4647f0aSWill Deacon 	int ret;
169d4647f0aSWill Deacon 	struct arm_smccc_res res;
170d4647f0aSWill Deacon 
171d4647f0aSWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
172d4647f0aSWill Deacon 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
173d4647f0aSWill Deacon 
174d4647f0aSWill Deacon 	ret = res.a0;
175d4647f0aSWill Deacon 	switch (ret) {
176d4647f0aSWill Deacon 	case SMCCC_RET_SUCCESS:
177d4647f0aSWill Deacon 		return SPECTRE_MITIGATED;
178d4647f0aSWill Deacon 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
179d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
180d4647f0aSWill Deacon 	default:
181d4647f0aSWill Deacon 		fallthrough;
182d4647f0aSWill Deacon 	case SMCCC_RET_NOT_SUPPORTED:
183d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
184d4647f0aSWill Deacon 	}
185d4647f0aSWill Deacon }
186d4647f0aSWill Deacon 
187d4647f0aSWill Deacon bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
188d4647f0aSWill Deacon {
189d4647f0aSWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
190d4647f0aSWill Deacon 
191d4647f0aSWill Deacon 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
192d4647f0aSWill Deacon 		return false;
193d4647f0aSWill Deacon 
194d4647f0aSWill Deacon 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
195d4647f0aSWill Deacon 		return false;
196d4647f0aSWill Deacon 
197d4647f0aSWill Deacon 	return true;
198d4647f0aSWill Deacon }
199d4647f0aSWill Deacon 
200d4647f0aSWill Deacon enum mitigation_state arm64_get_spectre_v2_state(void)
201d4647f0aSWill Deacon {
202d4647f0aSWill Deacon 	return spectre_v2_state;
203d4647f0aSWill Deacon }
204d4647f0aSWill Deacon 
205b881cdceSWill Deacon DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
206d4647f0aSWill Deacon 
207d4647f0aSWill Deacon static void install_bp_hardening_cb(bp_hardening_cb_t fn)
208d4647f0aSWill Deacon {
209b881cdceSWill Deacon 	__this_cpu_write(bp_hardening_data.fn, fn);
210d4647f0aSWill Deacon 
211d4647f0aSWill Deacon 	/*
21266dd3474SStephen Boyd 	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
21366dd3474SStephen Boyd 	 * the door when we're a guest. Skip the hyp-vectors work.
214d4647f0aSWill Deacon 	 */
215b881cdceSWill Deacon 	if (!is_hyp_mode_available())
216d4647f0aSWill Deacon 		return;
217d4647f0aSWill Deacon 
218b881cdceSWill Deacon 	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
219d4647f0aSWill Deacon }
220d4647f0aSWill Deacon 
221d4647f0aSWill Deacon static void call_smc_arch_workaround_1(void)
222d4647f0aSWill Deacon {
223d4647f0aSWill Deacon 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
224d4647f0aSWill Deacon }
225d4647f0aSWill Deacon 
226d4647f0aSWill Deacon static void call_hvc_arch_workaround_1(void)
227d4647f0aSWill Deacon {
228d4647f0aSWill Deacon 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
229d4647f0aSWill Deacon }
230d4647f0aSWill Deacon 
231d4647f0aSWill Deacon static void qcom_link_stack_sanitisation(void)
232d4647f0aSWill Deacon {
233d4647f0aSWill Deacon 	u64 tmp;
234d4647f0aSWill Deacon 
235d4647f0aSWill Deacon 	asm volatile("mov	%0, x30		\n"
236d4647f0aSWill Deacon 		     ".rept	16		\n"
237d4647f0aSWill Deacon 		     "bl	. + 4		\n"
238d4647f0aSWill Deacon 		     ".endr			\n"
239d4647f0aSWill Deacon 		     "mov	x30, %0		\n"
240d4647f0aSWill Deacon 		     : "=&r" (tmp));
241d4647f0aSWill Deacon }
242d4647f0aSWill Deacon 
243ea8f8c99SWill Deacon static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
244ea8f8c99SWill Deacon {
245ea8f8c99SWill Deacon 	u32 midr = read_cpuid_id();
246ea8f8c99SWill Deacon 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
247ea8f8c99SWill Deacon 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
248ea8f8c99SWill Deacon 		return NULL;
249ea8f8c99SWill Deacon 
250ea8f8c99SWill Deacon 	return qcom_link_stack_sanitisation;
251ea8f8c99SWill Deacon }
252ea8f8c99SWill Deacon 
253d4647f0aSWill Deacon static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
254d4647f0aSWill Deacon {
255d4647f0aSWill Deacon 	bp_hardening_cb_t cb;
256d4647f0aSWill Deacon 	enum mitigation_state state;
257d4647f0aSWill Deacon 
258d4647f0aSWill Deacon 	state = spectre_v2_get_cpu_fw_mitigation_state();
259d4647f0aSWill Deacon 	if (state != SPECTRE_MITIGATED)
260d4647f0aSWill Deacon 		return state;
261d4647f0aSWill Deacon 
262d4647f0aSWill Deacon 	if (spectre_v2_mitigations_off())
263d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
264d4647f0aSWill Deacon 
265d4647f0aSWill Deacon 	switch (arm_smccc_1_1_get_conduit()) {
266d4647f0aSWill Deacon 	case SMCCC_CONDUIT_HVC:
267d4647f0aSWill Deacon 		cb = call_hvc_arch_workaround_1;
268d4647f0aSWill Deacon 		break;
269d4647f0aSWill Deacon 
270d4647f0aSWill Deacon 	case SMCCC_CONDUIT_SMC:
271d4647f0aSWill Deacon 		cb = call_smc_arch_workaround_1;
272d4647f0aSWill Deacon 		break;
273d4647f0aSWill Deacon 
274d4647f0aSWill Deacon 	default:
275d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
276d4647f0aSWill Deacon 	}
277d4647f0aSWill Deacon 
278ea8f8c99SWill Deacon 	/*
279ea8f8c99SWill Deacon 	 * Prefer a CPU-specific workaround if it exists. Note that we
280ea8f8c99SWill Deacon 	 * still rely on firmware for the mitigation at EL2.
281ea8f8c99SWill Deacon 	 */
282ea8f8c99SWill Deacon 	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
283d4647f0aSWill Deacon 	install_bp_hardening_cb(cb);
284d4647f0aSWill Deacon 	return SPECTRE_MITIGATED;
285d4647f0aSWill Deacon }
286d4647f0aSWill Deacon 
287d4647f0aSWill Deacon void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
288d4647f0aSWill Deacon {
289d4647f0aSWill Deacon 	enum mitigation_state state;
290d4647f0aSWill Deacon 
291d4647f0aSWill Deacon 	WARN_ON(preemptible());
292d4647f0aSWill Deacon 
293d4647f0aSWill Deacon 	state = spectre_v2_get_cpu_hw_mitigation_state();
294d4647f0aSWill Deacon 	if (state == SPECTRE_VULNERABLE)
295d4647f0aSWill Deacon 		state = spectre_v2_enable_fw_mitigation();
296d4647f0aSWill Deacon 
297d4647f0aSWill Deacon 	update_mitigation_state(&spectre_v2_state, state);
298d4647f0aSWill Deacon }
2999e78b659SWill Deacon 
300c2876207SWill Deacon /*
301c4792b6dSWill Deacon  * Spectre-v3a.
302c4792b6dSWill Deacon  *
303c4792b6dSWill Deacon  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
304c4792b6dSWill Deacon  * an indirect trampoline for the hyp vectors so that guests can't read
305c4792b6dSWill Deacon  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
306c4792b6dSWill Deacon  */
307cd1f56b9SWill Deacon bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
308cd1f56b9SWill Deacon {
309cd1f56b9SWill Deacon 	static const struct midr_range spectre_v3a_unsafe_list[] = {
310cd1f56b9SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
311cd1f56b9SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
312cd1f56b9SWill Deacon 		{},
313cd1f56b9SWill Deacon 	};
314cd1f56b9SWill Deacon 
315cd1f56b9SWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
316cd1f56b9SWill Deacon 	return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
317cd1f56b9SWill Deacon }
318cd1f56b9SWill Deacon 
319c4792b6dSWill Deacon void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
320b881cdceSWill Deacon {
321b881cdceSWill Deacon 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
322b881cdceSWill Deacon 
323c4792b6dSWill Deacon 	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
324b881cdceSWill Deacon 		data->slot += HYP_VECTOR_INDIRECT;
325b881cdceSWill Deacon }
326b881cdceSWill Deacon 
327c2876207SWill Deacon /*
328c2876207SWill Deacon  * Spectre v4.
329c2876207SWill Deacon  *
330c2876207SWill Deacon  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
331c2876207SWill Deacon  * either:
332c2876207SWill Deacon  *
333c2876207SWill Deacon  * - Mitigated in hardware and listed in our "safe list".
334c2876207SWill Deacon  * - Mitigated in hardware via PSTATE.SSBS.
335c2876207SWill Deacon  * - Mitigated in software by firmware (sometimes referred to as SSBD).
336c2876207SWill Deacon  *
337c2876207SWill Deacon  * Wait, that doesn't sound so bad, does it? Keep reading...
338c2876207SWill Deacon  *
339c2876207SWill Deacon  * A major source of headaches is that the software mitigation is enabled both
340c2876207SWill Deacon  * on a per-task basis, but can also be forced on for the kernel, necessitating
341c2876207SWill Deacon  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
342c2876207SWill Deacon  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
343c2876207SWill Deacon  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
344c2876207SWill Deacon  * so you can have systems that have both firmware and SSBS mitigations. This
345c2876207SWill Deacon  * means we actually have to reject late onlining of CPUs with mitigations if
346c2876207SWill Deacon  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
347c2876207SWill Deacon  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
348c2876207SWill Deacon  *
349c2876207SWill Deacon  * The only good part is that if the firmware mitigation is present, then it is
350c2876207SWill Deacon  * present for all CPUs, meaning we don't have to worry about late onlining of a
351c2876207SWill Deacon  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
352c2876207SWill Deacon  *
353c2876207SWill Deacon  * Give me a VAX-11/780 any day of the week...
354c2876207SWill Deacon  */
355c2876207SWill Deacon static enum mitigation_state spectre_v4_state;
3569e78b659SWill Deacon 
357c2876207SWill Deacon /* This is the per-cpu state tracking whether we need to talk to firmware */
358c2876207SWill Deacon DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
359c2876207SWill Deacon 
360c2876207SWill Deacon enum spectre_v4_policy {
361c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
362c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
363c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
364c2876207SWill Deacon };
365c2876207SWill Deacon 
366c2876207SWill Deacon static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
367c2876207SWill Deacon 
368c2876207SWill Deacon static const struct spectre_v4_param {
369c2876207SWill Deacon 	const char		*str;
370c2876207SWill Deacon 	enum spectre_v4_policy	policy;
371c2876207SWill Deacon } spectre_v4_params[] = {
372c2876207SWill Deacon 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
373c2876207SWill Deacon 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
374c2876207SWill Deacon 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
375c2876207SWill Deacon };
376c2876207SWill Deacon static int __init parse_spectre_v4_param(char *str)
377c2876207SWill Deacon {
378c2876207SWill Deacon 	int i;
379c2876207SWill Deacon 
380c2876207SWill Deacon 	if (!str || !str[0])
381c2876207SWill Deacon 		return -EINVAL;
382c2876207SWill Deacon 
383c2876207SWill Deacon 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
384c2876207SWill Deacon 		const struct spectre_v4_param *param = &spectre_v4_params[i];
385c2876207SWill Deacon 
386c2876207SWill Deacon 		if (strncmp(str, param->str, strlen(param->str)))
387c2876207SWill Deacon 			continue;
388c2876207SWill Deacon 
389c2876207SWill Deacon 		__spectre_v4_policy = param->policy;
390c2876207SWill Deacon 		return 0;
3919e78b659SWill Deacon 	}
3929e78b659SWill Deacon 
393c2876207SWill Deacon 	return -EINVAL;
394c2876207SWill Deacon }
395c2876207SWill Deacon early_param("ssbd", parse_spectre_v4_param);
3969e78b659SWill Deacon 
397c2876207SWill Deacon /*
398c2876207SWill Deacon  * Because this was all written in a rush by people working in different silos,
399c2876207SWill Deacon  * we've ended up with multiple command line options to control the same thing.
400c2876207SWill Deacon  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
401c2876207SWill Deacon  * with contradictory parameters. The mitigation is always either "off",
402c2876207SWill Deacon  * "dynamic" or "on".
403c2876207SWill Deacon  */
404c2876207SWill Deacon static bool spectre_v4_mitigations_off(void)
405c2876207SWill Deacon {
406c2876207SWill Deacon 	bool ret = cpu_mitigations_off() ||
407c2876207SWill Deacon 		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
408c2876207SWill Deacon 
409c2876207SWill Deacon 	if (ret)
410c2876207SWill Deacon 		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
411c2876207SWill Deacon 
412c2876207SWill Deacon 	return ret;
413c2876207SWill Deacon }
414c2876207SWill Deacon 
415c2876207SWill Deacon /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
416c2876207SWill Deacon static bool spectre_v4_mitigations_dynamic(void)
417c2876207SWill Deacon {
418c2876207SWill Deacon 	return !spectre_v4_mitigations_off() &&
419c2876207SWill Deacon 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
420c2876207SWill Deacon }
421c2876207SWill Deacon 
422c2876207SWill Deacon static bool spectre_v4_mitigations_on(void)
423c2876207SWill Deacon {
424c2876207SWill Deacon 	return !spectre_v4_mitigations_off() &&
425c2876207SWill Deacon 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
426c2876207SWill Deacon }
427c2876207SWill Deacon 
428c2876207SWill Deacon ssize_t cpu_show_spec_store_bypass(struct device *dev,
429c2876207SWill Deacon 				   struct device_attribute *attr, char *buf)
430c2876207SWill Deacon {
431c2876207SWill Deacon 	switch (spectre_v4_state) {
432c2876207SWill Deacon 	case SPECTRE_UNAFFECTED:
433c2876207SWill Deacon 		return sprintf(buf, "Not affected\n");
434c2876207SWill Deacon 	case SPECTRE_MITIGATED:
435c2876207SWill Deacon 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
436c2876207SWill Deacon 	case SPECTRE_VULNERABLE:
437c2876207SWill Deacon 		fallthrough;
438c2876207SWill Deacon 	default:
439c2876207SWill Deacon 		return sprintf(buf, "Vulnerable\n");
440c2876207SWill Deacon 	}
441c2876207SWill Deacon }
442c2876207SWill Deacon 
443c2876207SWill Deacon enum mitigation_state arm64_get_spectre_v4_state(void)
444c2876207SWill Deacon {
445c2876207SWill Deacon 	return spectre_v4_state;
446c2876207SWill Deacon }
447c2876207SWill Deacon 
448c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
449c2876207SWill Deacon {
450c2876207SWill Deacon 	static const struct midr_range spectre_v4_safe_list[] = {
451c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
452c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
453c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
454c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
455c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
456c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
457c2876207SWill Deacon 		{ /* sentinel */ },
458c2876207SWill Deacon 	};
459c2876207SWill Deacon 
460c2876207SWill Deacon 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
461c2876207SWill Deacon 		return SPECTRE_UNAFFECTED;
462c2876207SWill Deacon 
463c2876207SWill Deacon 	/* CPU features are detected first */
464c2876207SWill Deacon 	if (this_cpu_has_cap(ARM64_SSBS))
465c2876207SWill Deacon 		return SPECTRE_MITIGATED;
466c2876207SWill Deacon 
467c2876207SWill Deacon 	return SPECTRE_VULNERABLE;
468c2876207SWill Deacon }
469c2876207SWill Deacon 
470c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
471c2876207SWill Deacon {
472c2876207SWill Deacon 	int ret;
473c2876207SWill Deacon 	struct arm_smccc_res res;
474c2876207SWill Deacon 
475c2876207SWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
476c2876207SWill Deacon 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
477c2876207SWill Deacon 
478c2876207SWill Deacon 	ret = res.a0;
479c2876207SWill Deacon 	switch (ret) {
480c2876207SWill Deacon 	case SMCCC_RET_SUCCESS:
481c2876207SWill Deacon 		return SPECTRE_MITIGATED;
482c2876207SWill Deacon 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
483c2876207SWill Deacon 		fallthrough;
484c2876207SWill Deacon 	case SMCCC_RET_NOT_REQUIRED:
485c2876207SWill Deacon 		return SPECTRE_UNAFFECTED;
486c2876207SWill Deacon 	default:
487c2876207SWill Deacon 		fallthrough;
488c2876207SWill Deacon 	case SMCCC_RET_NOT_SUPPORTED:
489c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
490c2876207SWill Deacon 	}
491c2876207SWill Deacon }
492c2876207SWill Deacon 
493c2876207SWill Deacon bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
494c2876207SWill Deacon {
495c2876207SWill Deacon 	enum mitigation_state state;
496c2876207SWill Deacon 
497c2876207SWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
498c2876207SWill Deacon 
499c2876207SWill Deacon 	state = spectre_v4_get_cpu_hw_mitigation_state();
500c2876207SWill Deacon 	if (state == SPECTRE_VULNERABLE)
501c2876207SWill Deacon 		state = spectre_v4_get_cpu_fw_mitigation_state();
502c2876207SWill Deacon 
503c2876207SWill Deacon 	return state != SPECTRE_UNAFFECTED;
504c2876207SWill Deacon }
505c2876207SWill Deacon 
506c2876207SWill Deacon static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
507c2876207SWill Deacon {
508c2876207SWill Deacon 	if (user_mode(regs))
509c2876207SWill Deacon 		return 1;
510c2876207SWill Deacon 
511c2876207SWill Deacon 	if (instr & BIT(PSTATE_Imm_shift))
512c2876207SWill Deacon 		regs->pstate |= PSR_SSBS_BIT;
513c2876207SWill Deacon 	else
514c2876207SWill Deacon 		regs->pstate &= ~PSR_SSBS_BIT;
515c2876207SWill Deacon 
516c2876207SWill Deacon 	arm64_skip_faulting_instruction(regs, 4);
517c2876207SWill Deacon 	return 0;
518c2876207SWill Deacon }
519c2876207SWill Deacon 
520c2876207SWill Deacon static struct undef_hook ssbs_emulation_hook = {
521c2876207SWill Deacon 	.instr_mask	= ~(1U << PSTATE_Imm_shift),
522c2876207SWill Deacon 	.instr_val	= 0xd500401f | PSTATE_SSBS,
523c2876207SWill Deacon 	.fn		= ssbs_emulation_handler,
524c2876207SWill Deacon };
525c2876207SWill Deacon 
526c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
527c2876207SWill Deacon {
528c2876207SWill Deacon 	static bool undef_hook_registered = false;
529c2876207SWill Deacon 	static DEFINE_RAW_SPINLOCK(hook_lock);
530c2876207SWill Deacon 	enum mitigation_state state;
531c2876207SWill Deacon 
532c2876207SWill Deacon 	/*
533c2876207SWill Deacon 	 * If the system is mitigated but this CPU doesn't have SSBS, then
534c2876207SWill Deacon 	 * we must be on the safelist and there's nothing more to do.
535c2876207SWill Deacon 	 */
536c2876207SWill Deacon 	state = spectre_v4_get_cpu_hw_mitigation_state();
537c2876207SWill Deacon 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
538c2876207SWill Deacon 		return state;
539c2876207SWill Deacon 
540c2876207SWill Deacon 	raw_spin_lock(&hook_lock);
541c2876207SWill Deacon 	if (!undef_hook_registered) {
542c2876207SWill Deacon 		register_undef_hook(&ssbs_emulation_hook);
543c2876207SWill Deacon 		undef_hook_registered = true;
544c2876207SWill Deacon 	}
545c2876207SWill Deacon 	raw_spin_unlock(&hook_lock);
546c2876207SWill Deacon 
547c2876207SWill Deacon 	if (spectre_v4_mitigations_off()) {
548c2876207SWill Deacon 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
549515d5c8aSMark Rutland 		set_pstate_ssbs(1);
550c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
551c2876207SWill Deacon 	}
552c2876207SWill Deacon 
553c2876207SWill Deacon 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
554515d5c8aSMark Rutland 	set_pstate_ssbs(0);
555c2876207SWill Deacon 	return SPECTRE_MITIGATED;
5569e78b659SWill Deacon }
5579e78b659SWill Deacon 
5589e78b659SWill Deacon /*
559c2876207SWill Deacon  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
560c2876207SWill Deacon  * we fallthrough and check whether firmware needs to be called on this CPU.
561c2876207SWill Deacon  */
562c2876207SWill Deacon void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
563c2876207SWill Deacon 						  __le32 *origptr,
564c2876207SWill Deacon 						  __le32 *updptr, int nr_inst)
565c2876207SWill Deacon {
566c2876207SWill Deacon 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
567c2876207SWill Deacon 
568c2876207SWill Deacon 	if (spectre_v4_mitigations_off())
569c2876207SWill Deacon 		return;
570c2876207SWill Deacon 
571c2876207SWill Deacon 	if (cpus_have_final_cap(ARM64_SSBS))
572c2876207SWill Deacon 		return;
573c2876207SWill Deacon 
574c2876207SWill Deacon 	if (spectre_v4_mitigations_dynamic())
575c2876207SWill Deacon 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
576c2876207SWill Deacon }
577c2876207SWill Deacon 
578c2876207SWill Deacon /*
579c2876207SWill Deacon  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
580c2876207SWill Deacon  * to call into firmware to adjust the mitigation state.
581c2876207SWill Deacon  */
5821b33d486SJames Morse void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
583c2876207SWill Deacon 					       __le32 *origptr,
584c2876207SWill Deacon 					       __le32 *updptr, int nr_inst)
585c2876207SWill Deacon {
586c2876207SWill Deacon 	u32 insn;
587c2876207SWill Deacon 
588c2876207SWill Deacon 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
589c2876207SWill Deacon 
590c2876207SWill Deacon 	switch (arm_smccc_1_1_get_conduit()) {
591c2876207SWill Deacon 	case SMCCC_CONDUIT_HVC:
592c2876207SWill Deacon 		insn = aarch64_insn_get_hvc_value();
593c2876207SWill Deacon 		break;
594c2876207SWill Deacon 	case SMCCC_CONDUIT_SMC:
595c2876207SWill Deacon 		insn = aarch64_insn_get_smc_value();
596c2876207SWill Deacon 		break;
597c2876207SWill Deacon 	default:
598c2876207SWill Deacon 		return;
599c2876207SWill Deacon 	}
600c2876207SWill Deacon 
601c2876207SWill Deacon 	*updptr = cpu_to_le32(insn);
602c2876207SWill Deacon }
603c2876207SWill Deacon 
604c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
605c2876207SWill Deacon {
606c2876207SWill Deacon 	enum mitigation_state state;
607c2876207SWill Deacon 
608c2876207SWill Deacon 	state = spectre_v4_get_cpu_fw_mitigation_state();
609c2876207SWill Deacon 	if (state != SPECTRE_MITIGATED)
610c2876207SWill Deacon 		return state;
611c2876207SWill Deacon 
612c2876207SWill Deacon 	if (spectre_v4_mitigations_off()) {
613c2876207SWill Deacon 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
614c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
615c2876207SWill Deacon 	}
616c2876207SWill Deacon 
617c2876207SWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
618c2876207SWill Deacon 
619c2876207SWill Deacon 	if (spectre_v4_mitigations_dynamic())
620c2876207SWill Deacon 		__this_cpu_write(arm64_ssbd_callback_required, 1);
621c2876207SWill Deacon 
622c2876207SWill Deacon 	return SPECTRE_MITIGATED;
623c2876207SWill Deacon }
624c2876207SWill Deacon 
625c2876207SWill Deacon void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
626c2876207SWill Deacon {
627c2876207SWill Deacon 	enum mitigation_state state;
628c2876207SWill Deacon 
629c2876207SWill Deacon 	WARN_ON(preemptible());
630c2876207SWill Deacon 
631c2876207SWill Deacon 	state = spectre_v4_enable_hw_mitigation();
632c2876207SWill Deacon 	if (state == SPECTRE_VULNERABLE)
633c2876207SWill Deacon 		state = spectre_v4_enable_fw_mitigation();
634c2876207SWill Deacon 
635c2876207SWill Deacon 	update_mitigation_state(&spectre_v4_state, state);
636c2876207SWill Deacon }
637c2876207SWill Deacon 
638c2876207SWill Deacon static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
639c2876207SWill Deacon {
640c2876207SWill Deacon 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
641c2876207SWill Deacon 
642c2876207SWill Deacon 	if (state)
643c2876207SWill Deacon 		regs->pstate |= bit;
644c2876207SWill Deacon 	else
645c2876207SWill Deacon 		regs->pstate &= ~bit;
646c2876207SWill Deacon }
647c2876207SWill Deacon 
648c2876207SWill Deacon void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
649c2876207SWill Deacon {
650c2876207SWill Deacon 	struct pt_regs *regs = task_pt_regs(tsk);
651c2876207SWill Deacon 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
652c2876207SWill Deacon 
653c2876207SWill Deacon 	if (spectre_v4_mitigations_off())
654c2876207SWill Deacon 		ssbs = true;
655c2876207SWill Deacon 	else if (spectre_v4_mitigations_dynamic() && !kthread)
656c2876207SWill Deacon 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
657c2876207SWill Deacon 
658c2876207SWill Deacon 	__update_pstate_ssbs(regs, ssbs);
659c2876207SWill Deacon }
660c2876207SWill Deacon 
661c2876207SWill Deacon /*
662c2876207SWill Deacon  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
663c2876207SWill Deacon  * This is interesting because the "speculation disabled" behaviour can be
664c2876207SWill Deacon  * configured so that it is preserved across exec(), which means that the
665c2876207SWill Deacon  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
666c2876207SWill Deacon  * from userspace.
6679e78b659SWill Deacon  */
668780c083aSWill Deacon static void ssbd_prctl_enable_mitigation(struct task_struct *task)
669780c083aSWill Deacon {
670780c083aSWill Deacon 	task_clear_spec_ssb_noexec(task);
671780c083aSWill Deacon 	task_set_spec_ssb_disable(task);
672780c083aSWill Deacon 	set_tsk_thread_flag(task, TIF_SSBD);
673780c083aSWill Deacon }
674780c083aSWill Deacon 
675780c083aSWill Deacon static void ssbd_prctl_disable_mitigation(struct task_struct *task)
676780c083aSWill Deacon {
677780c083aSWill Deacon 	task_clear_spec_ssb_noexec(task);
678780c083aSWill Deacon 	task_clear_spec_ssb_disable(task);
679780c083aSWill Deacon 	clear_tsk_thread_flag(task, TIF_SSBD);
680780c083aSWill Deacon }
681780c083aSWill Deacon 
6829e78b659SWill Deacon static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
6839e78b659SWill Deacon {
6849e78b659SWill Deacon 	switch (ctrl) {
6859e78b659SWill Deacon 	case PR_SPEC_ENABLE:
686c2876207SWill Deacon 		/* Enable speculation: disable mitigation */
687c2876207SWill Deacon 		/*
688c2876207SWill Deacon 		 * Force disabled speculation prevents it from being
689c2876207SWill Deacon 		 * re-enabled.
690c2876207SWill Deacon 		 */
691c2876207SWill Deacon 		if (task_spec_ssb_force_disable(task))
6929e78b659SWill Deacon 			return -EPERM;
6939e78b659SWill Deacon 
6949e78b659SWill Deacon 		/*
695c2876207SWill Deacon 		 * If the mitigation is forced on, then speculation is forced
696c2876207SWill Deacon 		 * off and we again prevent it from being re-enabled.
6979e78b659SWill Deacon 		 */
698c2876207SWill Deacon 		if (spectre_v4_mitigations_on())
6999e78b659SWill Deacon 			return -EPERM;
700c2876207SWill Deacon 
701780c083aSWill Deacon 		ssbd_prctl_disable_mitigation(task);
7029e78b659SWill Deacon 		break;
7039e78b659SWill Deacon 	case PR_SPEC_FORCE_DISABLE:
704c2876207SWill Deacon 		/* Force disable speculation: force enable mitigation */
705c2876207SWill Deacon 		/*
706c2876207SWill Deacon 		 * If the mitigation is forced off, then speculation is forced
707c2876207SWill Deacon 		 * on and we prevent it from being disabled.
708c2876207SWill Deacon 		 */
709c2876207SWill Deacon 		if (spectre_v4_mitigations_off())
7109e78b659SWill Deacon 			return -EPERM;
711c2876207SWill Deacon 
7129e78b659SWill Deacon 		task_set_spec_ssb_force_disable(task);
713c2876207SWill Deacon 		fallthrough;
714c2876207SWill Deacon 	case PR_SPEC_DISABLE:
715c2876207SWill Deacon 		/* Disable speculation: enable mitigation */
716c2876207SWill Deacon 		/* Same as PR_SPEC_FORCE_DISABLE */
717c2876207SWill Deacon 		if (spectre_v4_mitigations_off())
718c2876207SWill Deacon 			return -EPERM;
719c2876207SWill Deacon 
720780c083aSWill Deacon 		ssbd_prctl_enable_mitigation(task);
721780c083aSWill Deacon 		break;
722780c083aSWill Deacon 	case PR_SPEC_DISABLE_NOEXEC:
723780c083aSWill Deacon 		/* Disable speculation until execve(): enable mitigation */
724780c083aSWill Deacon 		/*
725780c083aSWill Deacon 		 * If the mitigation state is forced one way or the other, then
726780c083aSWill Deacon 		 * we must fail now before we try to toggle it on execve().
727780c083aSWill Deacon 		 */
728780c083aSWill Deacon 		if (task_spec_ssb_force_disable(task) ||
729780c083aSWill Deacon 		    spectre_v4_mitigations_off() ||
730780c083aSWill Deacon 		    spectre_v4_mitigations_on()) {
731780c083aSWill Deacon 			return -EPERM;
732780c083aSWill Deacon 		}
733780c083aSWill Deacon 
734780c083aSWill Deacon 		ssbd_prctl_enable_mitigation(task);
735780c083aSWill Deacon 		task_set_spec_ssb_noexec(task);
7369e78b659SWill Deacon 		break;
7379e78b659SWill Deacon 	default:
7389e78b659SWill Deacon 		return -ERANGE;
7399e78b659SWill Deacon 	}
7409e78b659SWill Deacon 
741c2876207SWill Deacon 	spectre_v4_enable_task_mitigation(task);
7429e78b659SWill Deacon 	return 0;
7439e78b659SWill Deacon }
7449e78b659SWill Deacon 
7459e78b659SWill Deacon int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
7469e78b659SWill Deacon 			     unsigned long ctrl)
7479e78b659SWill Deacon {
7489e78b659SWill Deacon 	switch (which) {
7499e78b659SWill Deacon 	case PR_SPEC_STORE_BYPASS:
7509e78b659SWill Deacon 		return ssbd_prctl_set(task, ctrl);
7519e78b659SWill Deacon 	default:
7529e78b659SWill Deacon 		return -ENODEV;
7539e78b659SWill Deacon 	}
7549e78b659SWill Deacon }
7559e78b659SWill Deacon 
7569e78b659SWill Deacon static int ssbd_prctl_get(struct task_struct *task)
7579e78b659SWill Deacon {
758c2876207SWill Deacon 	switch (spectre_v4_state) {
759c2876207SWill Deacon 	case SPECTRE_UNAFFECTED:
760c2876207SWill Deacon 		return PR_SPEC_NOT_AFFECTED;
761c2876207SWill Deacon 	case SPECTRE_MITIGATED:
762c2876207SWill Deacon 		if (spectre_v4_mitigations_on())
763c2876207SWill Deacon 			return PR_SPEC_NOT_AFFECTED;
764c2876207SWill Deacon 
765c2876207SWill Deacon 		if (spectre_v4_mitigations_dynamic())
766c2876207SWill Deacon 			break;
767c2876207SWill Deacon 
768c2876207SWill Deacon 		/* Mitigations are disabled, so we're vulnerable. */
769c2876207SWill Deacon 		fallthrough;
770c2876207SWill Deacon 	case SPECTRE_VULNERABLE:
771c2876207SWill Deacon 		fallthrough;
772c2876207SWill Deacon 	default:
773c2876207SWill Deacon 		return PR_SPEC_ENABLE;
774c2876207SWill Deacon 	}
775c2876207SWill Deacon 
776c2876207SWill Deacon 	/* Check the mitigation state for this task */
7779e78b659SWill Deacon 	if (task_spec_ssb_force_disable(task))
7789e78b659SWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
779c2876207SWill Deacon 
780780c083aSWill Deacon 	if (task_spec_ssb_noexec(task))
781780c083aSWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
782780c083aSWill Deacon 
7839e78b659SWill Deacon 	if (task_spec_ssb_disable(task))
7849e78b659SWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
785c2876207SWill Deacon 
7869e78b659SWill Deacon 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
7879e78b659SWill Deacon }
7889e78b659SWill Deacon 
7899e78b659SWill Deacon int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
7909e78b659SWill Deacon {
7919e78b659SWill Deacon 	switch (which) {
7929e78b659SWill Deacon 	case PR_SPEC_STORE_BYPASS:
7939e78b659SWill Deacon 		return ssbd_prctl_get(task);
7949e78b659SWill Deacon 	default:
7959e78b659SWill Deacon 		return -ENODEV;
7969e78b659SWill Deacon 	}
7979e78b659SWill Deacon }
798ba268923SJames Morse 
799*dee435beSJames Morse static enum mitigation_state spectre_bhb_state;
800*dee435beSJames Morse 
801*dee435beSJames Morse enum mitigation_state arm64_get_spectre_bhb_state(void)
802*dee435beSJames Morse {
803*dee435beSJames Morse 	return spectre_bhb_state;
804*dee435beSJames Morse }
805*dee435beSJames Morse 
806ba268923SJames Morse /* Patched to NOP when enabled */
807ba268923SJames Morse void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
808ba268923SJames Morse 						     __le32 *origptr,
809ba268923SJames Morse 						      __le32 *updptr, int nr_inst)
810ba268923SJames Morse {
811ba268923SJames Morse 	BUG_ON(nr_inst != 1);
812ba268923SJames Morse }
813ba268923SJames Morse 
814ba268923SJames Morse /* Patched to NOP when enabled */
815ba268923SJames Morse void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
816ba268923SJames Morse 						   __le32 *origptr,
817ba268923SJames Morse 						   __le32 *updptr, int nr_inst)
818ba268923SJames Morse {
819ba268923SJames Morse 	BUG_ON(nr_inst != 1);
820ba268923SJames Morse }
821