xref: /openbmc/linux/arch/arm64/kernel/proton-pack.c (revision 460e70e2)
1455697adSWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2455697adSWill Deacon /*
3c4792b6dSWill Deacon  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4455697adSWill Deacon  * detailed at:
5455697adSWill Deacon  *
6455697adSWill Deacon  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7455697adSWill Deacon  *
8455697adSWill Deacon  * This code was originally written hastily under an awful lot of stress and so
9455697adSWill Deacon  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10455697adSWill Deacon  * instantly makes me feel ill. Thanks, Jann. Thann.
11455697adSWill Deacon  *
12455697adSWill Deacon  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13455697adSWill Deacon  * Copyright (C) 2020 Google LLC
14455697adSWill Deacon  *
15455697adSWill Deacon  * "If there's something strange in your neighbourhood, who you gonna call?"
16455697adSWill Deacon  *
17455697adSWill Deacon  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18455697adSWill Deacon  */
19455697adSWill Deacon 
20d4647f0aSWill Deacon #include <linux/arm-smccc.h>
2158c9a506SJames Morse #include <linux/bpf.h>
22d4647f0aSWill Deacon #include <linux/cpu.h>
23455697adSWill Deacon #include <linux/device.h>
249e78b659SWill Deacon #include <linux/nospec.h>
25d4647f0aSWill Deacon #include <linux/prctl.h>
265c8b0cbdSWill Deacon #include <linux/sched/task_stack.h>
27d4647f0aSWill Deacon 
28558c303cSJames Morse #include <asm/debug-monitors.h>
297cda23daSWill Deacon #include <asm/insn.h>
30d4647f0aSWill Deacon #include <asm/spectre.h>
31d4647f0aSWill Deacon #include <asm/traps.h>
32558c303cSJames Morse #include <asm/vectors.h>
33b881cdceSWill Deacon #include <asm/virt.h>
34d4647f0aSWill Deacon 
35d4647f0aSWill Deacon /*
36d4647f0aSWill Deacon  * We try to ensure that the mitigation state can never change as the result of
37d4647f0aSWill Deacon  * onlining a late CPU.
38d4647f0aSWill Deacon  */
update_mitigation_state(enum mitigation_state * oldp,enum mitigation_state new)39d4647f0aSWill Deacon static void update_mitigation_state(enum mitigation_state *oldp,
40d4647f0aSWill Deacon 				    enum mitigation_state new)
41d4647f0aSWill Deacon {
42d4647f0aSWill Deacon 	enum mitigation_state state;
43d4647f0aSWill Deacon 
44d4647f0aSWill Deacon 	do {
45d4647f0aSWill Deacon 		state = READ_ONCE(*oldp);
46d4647f0aSWill Deacon 		if (new <= state)
47d4647f0aSWill Deacon 			break;
48d4647f0aSWill Deacon 
49d4647f0aSWill Deacon 		/* Userspace almost certainly can't deal with this. */
50d4647f0aSWill Deacon 		if (WARN_ON(system_capabilities_finalized()))
51d4647f0aSWill Deacon 			break;
52d4647f0aSWill Deacon 	} while (cmpxchg_relaxed(oldp, state, new) != state);
53d4647f0aSWill Deacon }
54455697adSWill Deacon 
55455697adSWill Deacon /*
56455697adSWill Deacon  * Spectre v1.
57455697adSWill Deacon  *
58455697adSWill Deacon  * The kernel can't protect userspace for this one: it's each person for
59455697adSWill Deacon  * themselves. Advertise what we're doing and be done with it.
60455697adSWill Deacon  */
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)61455697adSWill Deacon ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
62455697adSWill Deacon 			    char *buf)
63455697adSWill Deacon {
64455697adSWill Deacon 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
65455697adSWill Deacon }
66d4647f0aSWill Deacon 
67d4647f0aSWill Deacon /*
68d4647f0aSWill Deacon  * Spectre v2.
69d4647f0aSWill Deacon  *
70d4647f0aSWill Deacon  * This one sucks. A CPU is either:
71d4647f0aSWill Deacon  *
72d4647f0aSWill Deacon  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
73d4647f0aSWill Deacon  * - Mitigated in hardware and listed in our "safe list".
74d4647f0aSWill Deacon  * - Mitigated in software by firmware.
75ea8f8c99SWill Deacon  * - Mitigated in software by a CPU-specific dance in the kernel and a
76ea8f8c99SWill Deacon  *   firmware call at EL2.
77d4647f0aSWill Deacon  * - Vulnerable.
78d4647f0aSWill Deacon  *
79d4647f0aSWill Deacon  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
80d4647f0aSWill Deacon  * different camps.
81d4647f0aSWill Deacon  */
82d4647f0aSWill Deacon static enum mitigation_state spectre_v2_state;
83d4647f0aSWill Deacon 
84d4647f0aSWill Deacon static bool __read_mostly __nospectre_v2;
parse_spectre_v2_param(char * str)85d4647f0aSWill Deacon static int __init parse_spectre_v2_param(char *str)
86d4647f0aSWill Deacon {
87d4647f0aSWill Deacon 	__nospectre_v2 = true;
88d4647f0aSWill Deacon 	return 0;
89d4647f0aSWill Deacon }
90d4647f0aSWill Deacon early_param("nospectre_v2", parse_spectre_v2_param);
91d4647f0aSWill Deacon 
spectre_v2_mitigations_off(void)92d4647f0aSWill Deacon static bool spectre_v2_mitigations_off(void)
93d4647f0aSWill Deacon {
94d4647f0aSWill Deacon 	bool ret = __nospectre_v2 || cpu_mitigations_off();
95d4647f0aSWill Deacon 
96d4647f0aSWill Deacon 	if (ret)
97d4647f0aSWill Deacon 		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
98d4647f0aSWill Deacon 
99d4647f0aSWill Deacon 	return ret;
100d4647f0aSWill Deacon }
101d4647f0aSWill Deacon 
get_bhb_affected_string(enum mitigation_state bhb_state)102dee435beSJames Morse static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
103dee435beSJames Morse {
104dee435beSJames Morse 	switch (bhb_state) {
105dee435beSJames Morse 	case SPECTRE_UNAFFECTED:
106dee435beSJames Morse 		return "";
107dee435beSJames Morse 	default:
108dee435beSJames Morse 	case SPECTRE_VULNERABLE:
109dee435beSJames Morse 		return ", but not BHB";
110dee435beSJames Morse 	case SPECTRE_MITIGATED:
111dee435beSJames Morse 		return ", BHB";
112dee435beSJames Morse 	}
113dee435beSJames Morse }
114dee435beSJames Morse 
_unprivileged_ebpf_enabled(void)11558c9a506SJames Morse static bool _unprivileged_ebpf_enabled(void)
11658c9a506SJames Morse {
11758c9a506SJames Morse #ifdef CONFIG_BPF_SYSCALL
11858c9a506SJames Morse 	return !sysctl_unprivileged_bpf_disabled;
11958c9a506SJames Morse #else
12058c9a506SJames Morse 	return false;
12158c9a506SJames Morse #endif
12258c9a506SJames Morse }
12358c9a506SJames Morse 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)124d4647f0aSWill Deacon ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
125d4647f0aSWill Deacon 			    char *buf)
126d4647f0aSWill Deacon {
127dee435beSJames Morse 	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
128dee435beSJames Morse 	const char *bhb_str = get_bhb_affected_string(bhb_state);
129dee435beSJames Morse 	const char *v2_str = "Branch predictor hardening";
130dee435beSJames Morse 
131d4647f0aSWill Deacon 	switch (spectre_v2_state) {
132d4647f0aSWill Deacon 	case SPECTRE_UNAFFECTED:
133dee435beSJames Morse 		if (bhb_state == SPECTRE_UNAFFECTED)
134d4647f0aSWill Deacon 			return sprintf(buf, "Not affected\n");
135dee435beSJames Morse 
136dee435beSJames Morse 		/*
137dee435beSJames Morse 		 * Platforms affected by Spectre-BHB can't report
138dee435beSJames Morse 		 * "Not affected" for Spectre-v2.
139dee435beSJames Morse 		 */
140dee435beSJames Morse 		v2_str = "CSV2";
141dee435beSJames Morse 		fallthrough;
142d4647f0aSWill Deacon 	case SPECTRE_MITIGATED:
14358c9a506SJames Morse 		if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
14458c9a506SJames Morse 			return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
14558c9a506SJames Morse 
146dee435beSJames Morse 		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
147d4647f0aSWill Deacon 	case SPECTRE_VULNERABLE:
148d4647f0aSWill Deacon 		fallthrough;
149d4647f0aSWill Deacon 	default:
150d4647f0aSWill Deacon 		return sprintf(buf, "Vulnerable\n");
151d4647f0aSWill Deacon 	}
152d4647f0aSWill Deacon }
153d4647f0aSWill Deacon 
spectre_v2_get_cpu_hw_mitigation_state(void)154d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
155d4647f0aSWill Deacon {
156d4647f0aSWill Deacon 	u64 pfr0;
157d4647f0aSWill Deacon 	static const struct midr_range spectre_v2_safe_list[] = {
158d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
159d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
160d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
161d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
162d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
16338328d40SKonrad Dybcio 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
164d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
165d4647f0aSWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
166d4647f0aSWill Deacon 		{ /* sentinel */ }
167d4647f0aSWill Deacon 	};
168d4647f0aSWill Deacon 
169d4647f0aSWill Deacon 	/* If the CPU has CSV2 set, we're safe */
170d4647f0aSWill Deacon 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
17155adc08dSMark Brown 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
172d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
173d4647f0aSWill Deacon 
174d4647f0aSWill Deacon 	/* Alternatively, we have a list of unaffected CPUs */
175d4647f0aSWill Deacon 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
176d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
177d4647f0aSWill Deacon 
178d4647f0aSWill Deacon 	return SPECTRE_VULNERABLE;
179d4647f0aSWill Deacon }
180d4647f0aSWill Deacon 
spectre_v2_get_cpu_fw_mitigation_state(void)181d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
182d4647f0aSWill Deacon {
183d4647f0aSWill Deacon 	int ret;
184d4647f0aSWill Deacon 	struct arm_smccc_res res;
185d4647f0aSWill Deacon 
186d4647f0aSWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
187d4647f0aSWill Deacon 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
188d4647f0aSWill Deacon 
189d4647f0aSWill Deacon 	ret = res.a0;
190d4647f0aSWill Deacon 	switch (ret) {
191d4647f0aSWill Deacon 	case SMCCC_RET_SUCCESS:
192d4647f0aSWill Deacon 		return SPECTRE_MITIGATED;
193d4647f0aSWill Deacon 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
194d4647f0aSWill Deacon 		return SPECTRE_UNAFFECTED;
195d4647f0aSWill Deacon 	default:
196d4647f0aSWill Deacon 		fallthrough;
197d4647f0aSWill Deacon 	case SMCCC_RET_NOT_SUPPORTED:
198d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
199d4647f0aSWill Deacon 	}
200d4647f0aSWill Deacon }
201d4647f0aSWill Deacon 
has_spectre_v2(const struct arm64_cpu_capabilities * entry,int scope)202d4647f0aSWill Deacon bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
203d4647f0aSWill Deacon {
204d4647f0aSWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
205d4647f0aSWill Deacon 
206d4647f0aSWill Deacon 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
207d4647f0aSWill Deacon 		return false;
208d4647f0aSWill Deacon 
209d4647f0aSWill Deacon 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
210d4647f0aSWill Deacon 		return false;
211d4647f0aSWill Deacon 
212d4647f0aSWill Deacon 	return true;
213d4647f0aSWill Deacon }
214d4647f0aSWill Deacon 
arm64_get_spectre_v2_state(void)215d4647f0aSWill Deacon enum mitigation_state arm64_get_spectre_v2_state(void)
216d4647f0aSWill Deacon {
217d4647f0aSWill Deacon 	return spectre_v2_state;
218d4647f0aSWill Deacon }
219d4647f0aSWill Deacon 
220b881cdceSWill Deacon DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
221d4647f0aSWill Deacon 
install_bp_hardening_cb(bp_hardening_cb_t fn)222d4647f0aSWill Deacon static void install_bp_hardening_cb(bp_hardening_cb_t fn)
223d4647f0aSWill Deacon {
224b881cdceSWill Deacon 	__this_cpu_write(bp_hardening_data.fn, fn);
225d4647f0aSWill Deacon 
226d4647f0aSWill Deacon 	/*
22766dd3474SStephen Boyd 	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
22866dd3474SStephen Boyd 	 * the door when we're a guest. Skip the hyp-vectors work.
229d4647f0aSWill Deacon 	 */
230b881cdceSWill Deacon 	if (!is_hyp_mode_available())
231d4647f0aSWill Deacon 		return;
232d4647f0aSWill Deacon 
233b881cdceSWill Deacon 	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
234d4647f0aSWill Deacon }
235d4647f0aSWill Deacon 
236614c0b9fSMark Rutland /* Called during entry so must be noinstr */
call_smc_arch_workaround_1(void)237614c0b9fSMark Rutland static noinstr void call_smc_arch_workaround_1(void)
238d4647f0aSWill Deacon {
239d4647f0aSWill Deacon 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
240d4647f0aSWill Deacon }
241d4647f0aSWill Deacon 
242614c0b9fSMark Rutland /* Called during entry so must be noinstr */
call_hvc_arch_workaround_1(void)243614c0b9fSMark Rutland static noinstr void call_hvc_arch_workaround_1(void)
244d4647f0aSWill Deacon {
245d4647f0aSWill Deacon 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
246d4647f0aSWill Deacon }
247d4647f0aSWill Deacon 
248614c0b9fSMark Rutland /* Called during entry so must be noinstr */
qcom_link_stack_sanitisation(void)249614c0b9fSMark Rutland static noinstr void qcom_link_stack_sanitisation(void)
250d4647f0aSWill Deacon {
251d4647f0aSWill Deacon 	u64 tmp;
252d4647f0aSWill Deacon 
253d4647f0aSWill Deacon 	asm volatile("mov	%0, x30		\n"
254d4647f0aSWill Deacon 		     ".rept	16		\n"
255d4647f0aSWill Deacon 		     "bl	. + 4		\n"
256d4647f0aSWill Deacon 		     ".endr			\n"
257d4647f0aSWill Deacon 		     "mov	x30, %0		\n"
258d4647f0aSWill Deacon 		     : "=&r" (tmp));
259d4647f0aSWill Deacon }
260d4647f0aSWill Deacon 
spectre_v2_get_sw_mitigation_cb(void)261ea8f8c99SWill Deacon static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
262ea8f8c99SWill Deacon {
263ea8f8c99SWill Deacon 	u32 midr = read_cpuid_id();
264ea8f8c99SWill Deacon 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
265ea8f8c99SWill Deacon 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
266ea8f8c99SWill Deacon 		return NULL;
267ea8f8c99SWill Deacon 
268ea8f8c99SWill Deacon 	return qcom_link_stack_sanitisation;
269ea8f8c99SWill Deacon }
270ea8f8c99SWill Deacon 
spectre_v2_enable_fw_mitigation(void)271d4647f0aSWill Deacon static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
272d4647f0aSWill Deacon {
273d4647f0aSWill Deacon 	bp_hardening_cb_t cb;
274d4647f0aSWill Deacon 	enum mitigation_state state;
275d4647f0aSWill Deacon 
276d4647f0aSWill Deacon 	state = spectre_v2_get_cpu_fw_mitigation_state();
277d4647f0aSWill Deacon 	if (state != SPECTRE_MITIGATED)
278d4647f0aSWill Deacon 		return state;
279d4647f0aSWill Deacon 
280d4647f0aSWill Deacon 	if (spectre_v2_mitigations_off())
281d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
282d4647f0aSWill Deacon 
283d4647f0aSWill Deacon 	switch (arm_smccc_1_1_get_conduit()) {
284d4647f0aSWill Deacon 	case SMCCC_CONDUIT_HVC:
285d4647f0aSWill Deacon 		cb = call_hvc_arch_workaround_1;
286d4647f0aSWill Deacon 		break;
287d4647f0aSWill Deacon 
288d4647f0aSWill Deacon 	case SMCCC_CONDUIT_SMC:
289d4647f0aSWill Deacon 		cb = call_smc_arch_workaround_1;
290d4647f0aSWill Deacon 		break;
291d4647f0aSWill Deacon 
292d4647f0aSWill Deacon 	default:
293d4647f0aSWill Deacon 		return SPECTRE_VULNERABLE;
294d4647f0aSWill Deacon 	}
295d4647f0aSWill Deacon 
296ea8f8c99SWill Deacon 	/*
297ea8f8c99SWill Deacon 	 * Prefer a CPU-specific workaround if it exists. Note that we
298ea8f8c99SWill Deacon 	 * still rely on firmware for the mitigation at EL2.
299ea8f8c99SWill Deacon 	 */
300ea8f8c99SWill Deacon 	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
301d4647f0aSWill Deacon 	install_bp_hardening_cb(cb);
302d4647f0aSWill Deacon 	return SPECTRE_MITIGATED;
303d4647f0aSWill Deacon }
304d4647f0aSWill Deacon 
spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities * __unused)305d4647f0aSWill Deacon void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
306d4647f0aSWill Deacon {
307d4647f0aSWill Deacon 	enum mitigation_state state;
308d4647f0aSWill Deacon 
309d4647f0aSWill Deacon 	WARN_ON(preemptible());
310d4647f0aSWill Deacon 
311d4647f0aSWill Deacon 	state = spectre_v2_get_cpu_hw_mitigation_state();
312d4647f0aSWill Deacon 	if (state == SPECTRE_VULNERABLE)
313d4647f0aSWill Deacon 		state = spectre_v2_enable_fw_mitigation();
314d4647f0aSWill Deacon 
315d4647f0aSWill Deacon 	update_mitigation_state(&spectre_v2_state, state);
316d4647f0aSWill Deacon }
3179e78b659SWill Deacon 
318c2876207SWill Deacon /*
319c4792b6dSWill Deacon  * Spectre-v3a.
320c4792b6dSWill Deacon  *
321c4792b6dSWill Deacon  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
322c4792b6dSWill Deacon  * an indirect trampoline for the hyp vectors so that guests can't read
323c4792b6dSWill Deacon  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
324c4792b6dSWill Deacon  */
has_spectre_v3a(const struct arm64_cpu_capabilities * entry,int scope)325cd1f56b9SWill Deacon bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
326cd1f56b9SWill Deacon {
327cd1f56b9SWill Deacon 	static const struct midr_range spectre_v3a_unsafe_list[] = {
328cd1f56b9SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
329cd1f56b9SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
330cd1f56b9SWill Deacon 		{},
331cd1f56b9SWill Deacon 	};
332cd1f56b9SWill Deacon 
333cd1f56b9SWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
334cd1f56b9SWill Deacon 	return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
335cd1f56b9SWill Deacon }
336cd1f56b9SWill Deacon 
spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities * __unused)337c4792b6dSWill Deacon void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
338b881cdceSWill Deacon {
339b881cdceSWill Deacon 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
340b881cdceSWill Deacon 
341c4792b6dSWill Deacon 	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
342b881cdceSWill Deacon 		data->slot += HYP_VECTOR_INDIRECT;
343b881cdceSWill Deacon }
344b881cdceSWill Deacon 
345c2876207SWill Deacon /*
346c2876207SWill Deacon  * Spectre v4.
347c2876207SWill Deacon  *
348c2876207SWill Deacon  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
349c2876207SWill Deacon  * either:
350c2876207SWill Deacon  *
351c2876207SWill Deacon  * - Mitigated in hardware and listed in our "safe list".
352c2876207SWill Deacon  * - Mitigated in hardware via PSTATE.SSBS.
353c2876207SWill Deacon  * - Mitigated in software by firmware (sometimes referred to as SSBD).
354c2876207SWill Deacon  *
355c2876207SWill Deacon  * Wait, that doesn't sound so bad, does it? Keep reading...
356c2876207SWill Deacon  *
357c2876207SWill Deacon  * A major source of headaches is that the software mitigation is enabled both
358c2876207SWill Deacon  * on a per-task basis, but can also be forced on for the kernel, necessitating
359c2876207SWill Deacon  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
360c2876207SWill Deacon  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
361c2876207SWill Deacon  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
362c2876207SWill Deacon  * so you can have systems that have both firmware and SSBS mitigations. This
363c2876207SWill Deacon  * means we actually have to reject late onlining of CPUs with mitigations if
364c2876207SWill Deacon  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
365c2876207SWill Deacon  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
366c2876207SWill Deacon  *
367c2876207SWill Deacon  * The only good part is that if the firmware mitigation is present, then it is
368c2876207SWill Deacon  * present for all CPUs, meaning we don't have to worry about late onlining of a
369c2876207SWill Deacon  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
370c2876207SWill Deacon  *
371c2876207SWill Deacon  * Give me a VAX-11/780 any day of the week...
372c2876207SWill Deacon  */
373c2876207SWill Deacon static enum mitigation_state spectre_v4_state;
3749e78b659SWill Deacon 
375c2876207SWill Deacon /* This is the per-cpu state tracking whether we need to talk to firmware */
376c2876207SWill Deacon DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
377c2876207SWill Deacon 
378c2876207SWill Deacon enum spectre_v4_policy {
379c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
380c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
381c2876207SWill Deacon 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
382c2876207SWill Deacon };
383c2876207SWill Deacon 
384c2876207SWill Deacon static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
385c2876207SWill Deacon 
386c2876207SWill Deacon static const struct spectre_v4_param {
387c2876207SWill Deacon 	const char		*str;
388c2876207SWill Deacon 	enum spectre_v4_policy	policy;
389c2876207SWill Deacon } spectre_v4_params[] = {
390c2876207SWill Deacon 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
391c2876207SWill Deacon 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
392c2876207SWill Deacon 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
393c2876207SWill Deacon };
parse_spectre_v4_param(char * str)394c2876207SWill Deacon static int __init parse_spectre_v4_param(char *str)
395c2876207SWill Deacon {
396c2876207SWill Deacon 	int i;
397c2876207SWill Deacon 
398c2876207SWill Deacon 	if (!str || !str[0])
399c2876207SWill Deacon 		return -EINVAL;
400c2876207SWill Deacon 
401c2876207SWill Deacon 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
402c2876207SWill Deacon 		const struct spectre_v4_param *param = &spectre_v4_params[i];
403c2876207SWill Deacon 
404c2876207SWill Deacon 		if (strncmp(str, param->str, strlen(param->str)))
405c2876207SWill Deacon 			continue;
406c2876207SWill Deacon 
407c2876207SWill Deacon 		__spectre_v4_policy = param->policy;
408c2876207SWill Deacon 		return 0;
4099e78b659SWill Deacon 	}
4109e78b659SWill Deacon 
411c2876207SWill Deacon 	return -EINVAL;
412c2876207SWill Deacon }
413c2876207SWill Deacon early_param("ssbd", parse_spectre_v4_param);
4149e78b659SWill Deacon 
415c2876207SWill Deacon /*
416c2876207SWill Deacon  * Because this was all written in a rush by people working in different silos,
417c2876207SWill Deacon  * we've ended up with multiple command line options to control the same thing.
418c2876207SWill Deacon  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
419c2876207SWill Deacon  * with contradictory parameters. The mitigation is always either "off",
420c2876207SWill Deacon  * "dynamic" or "on".
421c2876207SWill Deacon  */
spectre_v4_mitigations_off(void)422c2876207SWill Deacon static bool spectre_v4_mitigations_off(void)
423c2876207SWill Deacon {
424c2876207SWill Deacon 	bool ret = cpu_mitigations_off() ||
425c2876207SWill Deacon 		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
426c2876207SWill Deacon 
427c2876207SWill Deacon 	if (ret)
428c2876207SWill Deacon 		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
429c2876207SWill Deacon 
430c2876207SWill Deacon 	return ret;
431c2876207SWill Deacon }
432c2876207SWill Deacon 
433c2876207SWill Deacon /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
spectre_v4_mitigations_dynamic(void)434c2876207SWill Deacon static bool spectre_v4_mitigations_dynamic(void)
435c2876207SWill Deacon {
436c2876207SWill Deacon 	return !spectre_v4_mitigations_off() &&
437c2876207SWill Deacon 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
438c2876207SWill Deacon }
439c2876207SWill Deacon 
spectre_v4_mitigations_on(void)440c2876207SWill Deacon static bool spectre_v4_mitigations_on(void)
441c2876207SWill Deacon {
442c2876207SWill Deacon 	return !spectre_v4_mitigations_off() &&
443c2876207SWill Deacon 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
444c2876207SWill Deacon }
445c2876207SWill Deacon 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)446c2876207SWill Deacon ssize_t cpu_show_spec_store_bypass(struct device *dev,
447c2876207SWill Deacon 				   struct device_attribute *attr, char *buf)
448c2876207SWill Deacon {
449c2876207SWill Deacon 	switch (spectre_v4_state) {
450c2876207SWill Deacon 	case SPECTRE_UNAFFECTED:
451c2876207SWill Deacon 		return sprintf(buf, "Not affected\n");
452c2876207SWill Deacon 	case SPECTRE_MITIGATED:
453c2876207SWill Deacon 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
454c2876207SWill Deacon 	case SPECTRE_VULNERABLE:
455c2876207SWill Deacon 		fallthrough;
456c2876207SWill Deacon 	default:
457c2876207SWill Deacon 		return sprintf(buf, "Vulnerable\n");
458c2876207SWill Deacon 	}
459c2876207SWill Deacon }
460c2876207SWill Deacon 
arm64_get_spectre_v4_state(void)461c2876207SWill Deacon enum mitigation_state arm64_get_spectre_v4_state(void)
462c2876207SWill Deacon {
463c2876207SWill Deacon 	return spectre_v4_state;
464c2876207SWill Deacon }
465c2876207SWill Deacon 
spectre_v4_get_cpu_hw_mitigation_state(void)466c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
467c2876207SWill Deacon {
468c2876207SWill Deacon 	static const struct midr_range spectre_v4_safe_list[] = {
469c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
470c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
471c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
472c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
473c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
474c2876207SWill Deacon 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
475c2876207SWill Deacon 		{ /* sentinel */ },
476c2876207SWill Deacon 	};
477c2876207SWill Deacon 
478c2876207SWill Deacon 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
479c2876207SWill Deacon 		return SPECTRE_UNAFFECTED;
480c2876207SWill Deacon 
481c2876207SWill Deacon 	/* CPU features are detected first */
482c2876207SWill Deacon 	if (this_cpu_has_cap(ARM64_SSBS))
483c2876207SWill Deacon 		return SPECTRE_MITIGATED;
484c2876207SWill Deacon 
485c2876207SWill Deacon 	return SPECTRE_VULNERABLE;
486c2876207SWill Deacon }
487c2876207SWill Deacon 
spectre_v4_get_cpu_fw_mitigation_state(void)488c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
489c2876207SWill Deacon {
490c2876207SWill Deacon 	int ret;
491c2876207SWill Deacon 	struct arm_smccc_res res;
492c2876207SWill Deacon 
493c2876207SWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
494c2876207SWill Deacon 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
495c2876207SWill Deacon 
496c2876207SWill Deacon 	ret = res.a0;
497c2876207SWill Deacon 	switch (ret) {
498c2876207SWill Deacon 	case SMCCC_RET_SUCCESS:
499c2876207SWill Deacon 		return SPECTRE_MITIGATED;
500c2876207SWill Deacon 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
501c2876207SWill Deacon 		fallthrough;
502c2876207SWill Deacon 	case SMCCC_RET_NOT_REQUIRED:
503c2876207SWill Deacon 		return SPECTRE_UNAFFECTED;
504c2876207SWill Deacon 	default:
505c2876207SWill Deacon 		fallthrough;
506c2876207SWill Deacon 	case SMCCC_RET_NOT_SUPPORTED:
507c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
508c2876207SWill Deacon 	}
509c2876207SWill Deacon }
510c2876207SWill Deacon 
has_spectre_v4(const struct arm64_cpu_capabilities * cap,int scope)511c2876207SWill Deacon bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
512c2876207SWill Deacon {
513c2876207SWill Deacon 	enum mitigation_state state;
514c2876207SWill Deacon 
515c2876207SWill Deacon 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
516c2876207SWill Deacon 
517c2876207SWill Deacon 	state = spectre_v4_get_cpu_hw_mitigation_state();
518c2876207SWill Deacon 	if (state == SPECTRE_VULNERABLE)
519c2876207SWill Deacon 		state = spectre_v4_get_cpu_fw_mitigation_state();
520c2876207SWill Deacon 
521c2876207SWill Deacon 	return state != SPECTRE_UNAFFECTED;
522c2876207SWill Deacon }
523c2876207SWill Deacon 
try_emulate_el1_ssbs(struct pt_regs * regs,u32 instr)524*bff8f413SMark Rutland bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
525c2876207SWill Deacon {
526*bff8f413SMark Rutland 	const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
527*bff8f413SMark Rutland 	const u32 instr_val = 0xd500401f | PSTATE_SSBS;
528*bff8f413SMark Rutland 
529*bff8f413SMark Rutland 	if ((instr & instr_mask) != instr_val)
530*bff8f413SMark Rutland 		return false;
531c2876207SWill Deacon 
532c2876207SWill Deacon 	if (instr & BIT(PSTATE_Imm_shift))
533c2876207SWill Deacon 		regs->pstate |= PSR_SSBS_BIT;
534c2876207SWill Deacon 	else
535c2876207SWill Deacon 		regs->pstate &= ~PSR_SSBS_BIT;
536c2876207SWill Deacon 
537c2876207SWill Deacon 	arm64_skip_faulting_instruction(regs, 4);
538*bff8f413SMark Rutland 	return true;
539c2876207SWill Deacon }
540c2876207SWill Deacon 
spectre_v4_enable_hw_mitigation(void)541c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
542c2876207SWill Deacon {
543c2876207SWill Deacon 	enum mitigation_state state;
544c2876207SWill Deacon 
545c2876207SWill Deacon 	/*
546c2876207SWill Deacon 	 * If the system is mitigated but this CPU doesn't have SSBS, then
547c2876207SWill Deacon 	 * we must be on the safelist and there's nothing more to do.
548c2876207SWill Deacon 	 */
549c2876207SWill Deacon 	state = spectre_v4_get_cpu_hw_mitigation_state();
550c2876207SWill Deacon 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
551c2876207SWill Deacon 		return state;
552c2876207SWill Deacon 
553c2876207SWill Deacon 	if (spectre_v4_mitigations_off()) {
554c2876207SWill Deacon 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
555515d5c8aSMark Rutland 		set_pstate_ssbs(1);
556c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
557c2876207SWill Deacon 	}
558c2876207SWill Deacon 
559c2876207SWill Deacon 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
560515d5c8aSMark Rutland 	set_pstate_ssbs(0);
561c2876207SWill Deacon 	return SPECTRE_MITIGATED;
5629e78b659SWill Deacon }
5639e78b659SWill Deacon 
5649e78b659SWill Deacon /*
565c2876207SWill Deacon  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
566c2876207SWill Deacon  * we fallthrough and check whether firmware needs to be called on this CPU.
567c2876207SWill Deacon  */
spectre_v4_patch_fw_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)568c2876207SWill Deacon void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
569c2876207SWill Deacon 						  __le32 *origptr,
570c2876207SWill Deacon 						  __le32 *updptr, int nr_inst)
571c2876207SWill Deacon {
572c2876207SWill Deacon 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
573c2876207SWill Deacon 
574c2876207SWill Deacon 	if (spectre_v4_mitigations_off())
575c2876207SWill Deacon 		return;
576c2876207SWill Deacon 
577747ad8d5SMark Rutland 	if (cpus_have_cap(ARM64_SSBS))
578c2876207SWill Deacon 		return;
579c2876207SWill Deacon 
580c2876207SWill Deacon 	if (spectre_v4_mitigations_dynamic())
581c2876207SWill Deacon 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
582c2876207SWill Deacon }
583c2876207SWill Deacon 
584c2876207SWill Deacon /*
585c2876207SWill Deacon  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
586c2876207SWill Deacon  * to call into firmware to adjust the mitigation state.
587c2876207SWill Deacon  */
smccc_patch_fw_mitigation_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)5881b33d486SJames Morse void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
589c2876207SWill Deacon 					       __le32 *origptr,
590c2876207SWill Deacon 					       __le32 *updptr, int nr_inst)
591c2876207SWill Deacon {
592c2876207SWill Deacon 	u32 insn;
593c2876207SWill Deacon 
594c2876207SWill Deacon 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
595c2876207SWill Deacon 
596c2876207SWill Deacon 	switch (arm_smccc_1_1_get_conduit()) {
597c2876207SWill Deacon 	case SMCCC_CONDUIT_HVC:
598c2876207SWill Deacon 		insn = aarch64_insn_get_hvc_value();
599c2876207SWill Deacon 		break;
600c2876207SWill Deacon 	case SMCCC_CONDUIT_SMC:
601c2876207SWill Deacon 		insn = aarch64_insn_get_smc_value();
602c2876207SWill Deacon 		break;
603c2876207SWill Deacon 	default:
604c2876207SWill Deacon 		return;
605c2876207SWill Deacon 	}
606c2876207SWill Deacon 
607c2876207SWill Deacon 	*updptr = cpu_to_le32(insn);
608c2876207SWill Deacon }
609c2876207SWill Deacon 
spectre_v4_enable_fw_mitigation(void)610c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
611c2876207SWill Deacon {
612c2876207SWill Deacon 	enum mitigation_state state;
613c2876207SWill Deacon 
614c2876207SWill Deacon 	state = spectre_v4_get_cpu_fw_mitigation_state();
615c2876207SWill Deacon 	if (state != SPECTRE_MITIGATED)
616c2876207SWill Deacon 		return state;
617c2876207SWill Deacon 
618c2876207SWill Deacon 	if (spectre_v4_mitigations_off()) {
619c2876207SWill Deacon 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
620c2876207SWill Deacon 		return SPECTRE_VULNERABLE;
621c2876207SWill Deacon 	}
622c2876207SWill Deacon 
623c2876207SWill Deacon 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
624c2876207SWill Deacon 
625c2876207SWill Deacon 	if (spectre_v4_mitigations_dynamic())
626c2876207SWill Deacon 		__this_cpu_write(arm64_ssbd_callback_required, 1);
627c2876207SWill Deacon 
628c2876207SWill Deacon 	return SPECTRE_MITIGATED;
629c2876207SWill Deacon }
630c2876207SWill Deacon 
spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities * __unused)631c2876207SWill Deacon void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
632c2876207SWill Deacon {
633c2876207SWill Deacon 	enum mitigation_state state;
634c2876207SWill Deacon 
635c2876207SWill Deacon 	WARN_ON(preemptible());
636c2876207SWill Deacon 
637c2876207SWill Deacon 	state = spectre_v4_enable_hw_mitigation();
638c2876207SWill Deacon 	if (state == SPECTRE_VULNERABLE)
639c2876207SWill Deacon 		state = spectre_v4_enable_fw_mitigation();
640c2876207SWill Deacon 
641c2876207SWill Deacon 	update_mitigation_state(&spectre_v4_state, state);
642c2876207SWill Deacon }
643c2876207SWill Deacon 
__update_pstate_ssbs(struct pt_regs * regs,bool state)644c2876207SWill Deacon static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
645c2876207SWill Deacon {
646c2876207SWill Deacon 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
647c2876207SWill Deacon 
648c2876207SWill Deacon 	if (state)
649c2876207SWill Deacon 		regs->pstate |= bit;
650c2876207SWill Deacon 	else
651c2876207SWill Deacon 		regs->pstate &= ~bit;
652c2876207SWill Deacon }
653c2876207SWill Deacon 
spectre_v4_enable_task_mitigation(struct task_struct * tsk)654c2876207SWill Deacon void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
655c2876207SWill Deacon {
656c2876207SWill Deacon 	struct pt_regs *regs = task_pt_regs(tsk);
657c2876207SWill Deacon 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
658c2876207SWill Deacon 
659c2876207SWill Deacon 	if (spectre_v4_mitigations_off())
660c2876207SWill Deacon 		ssbs = true;
661c2876207SWill Deacon 	else if (spectre_v4_mitigations_dynamic() && !kthread)
662c2876207SWill Deacon 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
663c2876207SWill Deacon 
664c2876207SWill Deacon 	__update_pstate_ssbs(regs, ssbs);
665c2876207SWill Deacon }
666c2876207SWill Deacon 
667c2876207SWill Deacon /*
668c2876207SWill Deacon  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
669c2876207SWill Deacon  * This is interesting because the "speculation disabled" behaviour can be
670c2876207SWill Deacon  * configured so that it is preserved across exec(), which means that the
671c2876207SWill Deacon  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
672c2876207SWill Deacon  * from userspace.
6739e78b659SWill Deacon  */
ssbd_prctl_enable_mitigation(struct task_struct * task)674780c083aSWill Deacon static void ssbd_prctl_enable_mitigation(struct task_struct *task)
675780c083aSWill Deacon {
676780c083aSWill Deacon 	task_clear_spec_ssb_noexec(task);
677780c083aSWill Deacon 	task_set_spec_ssb_disable(task);
678780c083aSWill Deacon 	set_tsk_thread_flag(task, TIF_SSBD);
679780c083aSWill Deacon }
680780c083aSWill Deacon 
ssbd_prctl_disable_mitigation(struct task_struct * task)681780c083aSWill Deacon static void ssbd_prctl_disable_mitigation(struct task_struct *task)
682780c083aSWill Deacon {
683780c083aSWill Deacon 	task_clear_spec_ssb_noexec(task);
684780c083aSWill Deacon 	task_clear_spec_ssb_disable(task);
685780c083aSWill Deacon 	clear_tsk_thread_flag(task, TIF_SSBD);
686780c083aSWill Deacon }
687780c083aSWill Deacon 
ssbd_prctl_set(struct task_struct * task,unsigned long ctrl)6889e78b659SWill Deacon static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
6899e78b659SWill Deacon {
6909e78b659SWill Deacon 	switch (ctrl) {
6919e78b659SWill Deacon 	case PR_SPEC_ENABLE:
692c2876207SWill Deacon 		/* Enable speculation: disable mitigation */
693c2876207SWill Deacon 		/*
694c2876207SWill Deacon 		 * Force disabled speculation prevents it from being
695c2876207SWill Deacon 		 * re-enabled.
696c2876207SWill Deacon 		 */
697c2876207SWill Deacon 		if (task_spec_ssb_force_disable(task))
6989e78b659SWill Deacon 			return -EPERM;
6999e78b659SWill Deacon 
7009e78b659SWill Deacon 		/*
701c2876207SWill Deacon 		 * If the mitigation is forced on, then speculation is forced
702c2876207SWill Deacon 		 * off and we again prevent it from being re-enabled.
7039e78b659SWill Deacon 		 */
704c2876207SWill Deacon 		if (spectre_v4_mitigations_on())
7059e78b659SWill Deacon 			return -EPERM;
706c2876207SWill Deacon 
707780c083aSWill Deacon 		ssbd_prctl_disable_mitigation(task);
7089e78b659SWill Deacon 		break;
7099e78b659SWill Deacon 	case PR_SPEC_FORCE_DISABLE:
710c2876207SWill Deacon 		/* Force disable speculation: force enable mitigation */
711c2876207SWill Deacon 		/*
712c2876207SWill Deacon 		 * If the mitigation is forced off, then speculation is forced
713c2876207SWill Deacon 		 * on and we prevent it from being disabled.
714c2876207SWill Deacon 		 */
715c2876207SWill Deacon 		if (spectre_v4_mitigations_off())
7169e78b659SWill Deacon 			return -EPERM;
717c2876207SWill Deacon 
7189e78b659SWill Deacon 		task_set_spec_ssb_force_disable(task);
719c2876207SWill Deacon 		fallthrough;
720c2876207SWill Deacon 	case PR_SPEC_DISABLE:
721c2876207SWill Deacon 		/* Disable speculation: enable mitigation */
722c2876207SWill Deacon 		/* Same as PR_SPEC_FORCE_DISABLE */
723c2876207SWill Deacon 		if (spectre_v4_mitigations_off())
724c2876207SWill Deacon 			return -EPERM;
725c2876207SWill Deacon 
726780c083aSWill Deacon 		ssbd_prctl_enable_mitigation(task);
727780c083aSWill Deacon 		break;
728780c083aSWill Deacon 	case PR_SPEC_DISABLE_NOEXEC:
729780c083aSWill Deacon 		/* Disable speculation until execve(): enable mitigation */
730780c083aSWill Deacon 		/*
731780c083aSWill Deacon 		 * If the mitigation state is forced one way or the other, then
732780c083aSWill Deacon 		 * we must fail now before we try to toggle it on execve().
733780c083aSWill Deacon 		 */
734780c083aSWill Deacon 		if (task_spec_ssb_force_disable(task) ||
735780c083aSWill Deacon 		    spectre_v4_mitigations_off() ||
736780c083aSWill Deacon 		    spectre_v4_mitigations_on()) {
737780c083aSWill Deacon 			return -EPERM;
738780c083aSWill Deacon 		}
739780c083aSWill Deacon 
740780c083aSWill Deacon 		ssbd_prctl_enable_mitigation(task);
741780c083aSWill Deacon 		task_set_spec_ssb_noexec(task);
7429e78b659SWill Deacon 		break;
7439e78b659SWill Deacon 	default:
7449e78b659SWill Deacon 		return -ERANGE;
7459e78b659SWill Deacon 	}
7469e78b659SWill Deacon 
747c2876207SWill Deacon 	spectre_v4_enable_task_mitigation(task);
7489e78b659SWill Deacon 	return 0;
7499e78b659SWill Deacon }
7509e78b659SWill Deacon 
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)7519e78b659SWill Deacon int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
7529e78b659SWill Deacon 			     unsigned long ctrl)
7539e78b659SWill Deacon {
7549e78b659SWill Deacon 	switch (which) {
7559e78b659SWill Deacon 	case PR_SPEC_STORE_BYPASS:
7569e78b659SWill Deacon 		return ssbd_prctl_set(task, ctrl);
7579e78b659SWill Deacon 	default:
7589e78b659SWill Deacon 		return -ENODEV;
7599e78b659SWill Deacon 	}
7609e78b659SWill Deacon }
7619e78b659SWill Deacon 
ssbd_prctl_get(struct task_struct * task)7629e78b659SWill Deacon static int ssbd_prctl_get(struct task_struct *task)
7639e78b659SWill Deacon {
764c2876207SWill Deacon 	switch (spectre_v4_state) {
765c2876207SWill Deacon 	case SPECTRE_UNAFFECTED:
766c2876207SWill Deacon 		return PR_SPEC_NOT_AFFECTED;
767c2876207SWill Deacon 	case SPECTRE_MITIGATED:
768c2876207SWill Deacon 		if (spectre_v4_mitigations_on())
769c2876207SWill Deacon 			return PR_SPEC_NOT_AFFECTED;
770c2876207SWill Deacon 
771c2876207SWill Deacon 		if (spectre_v4_mitigations_dynamic())
772c2876207SWill Deacon 			break;
773c2876207SWill Deacon 
774c2876207SWill Deacon 		/* Mitigations are disabled, so we're vulnerable. */
775c2876207SWill Deacon 		fallthrough;
776c2876207SWill Deacon 	case SPECTRE_VULNERABLE:
777c2876207SWill Deacon 		fallthrough;
778c2876207SWill Deacon 	default:
779c2876207SWill Deacon 		return PR_SPEC_ENABLE;
780c2876207SWill Deacon 	}
781c2876207SWill Deacon 
782c2876207SWill Deacon 	/* Check the mitigation state for this task */
7839e78b659SWill Deacon 	if (task_spec_ssb_force_disable(task))
7849e78b659SWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
785c2876207SWill Deacon 
786780c083aSWill Deacon 	if (task_spec_ssb_noexec(task))
787780c083aSWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
788780c083aSWill Deacon 
7899e78b659SWill Deacon 	if (task_spec_ssb_disable(task))
7909e78b659SWill Deacon 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
791c2876207SWill Deacon 
7929e78b659SWill Deacon 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
7939e78b659SWill Deacon }
7949e78b659SWill Deacon 
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)7959e78b659SWill Deacon int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
7969e78b659SWill Deacon {
7979e78b659SWill Deacon 	switch (which) {
7989e78b659SWill Deacon 	case PR_SPEC_STORE_BYPASS:
7999e78b659SWill Deacon 		return ssbd_prctl_get(task);
8009e78b659SWill Deacon 	default:
8019e78b659SWill Deacon 		return -ENODEV;
8029e78b659SWill Deacon 	}
8039e78b659SWill Deacon }
804ba268923SJames Morse 
805558c303cSJames Morse /*
806558c303cSJames Morse  * Spectre BHB.
807558c303cSJames Morse  *
808558c303cSJames Morse  * A CPU is either:
809558c303cSJames Morse  * - Mitigated by a branchy loop a CPU specific number of times, and listed
810558c303cSJames Morse  *   in our "loop mitigated list".
811558c303cSJames Morse  * - Mitigated in software by the firmware Spectre v2 call.
812228a26b9SJames Morse  * - Has the ClearBHB instruction to perform the mitigation.
813558c303cSJames Morse  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
814558c303cSJames Morse  *   software mitigation in the vectors is needed.
815558c303cSJames Morse  * - Has CSV2.3, so is unaffected.
816558c303cSJames Morse  */
817dee435beSJames Morse static enum mitigation_state spectre_bhb_state;
818dee435beSJames Morse 
arm64_get_spectre_bhb_state(void)819dee435beSJames Morse enum mitigation_state arm64_get_spectre_bhb_state(void)
820dee435beSJames Morse {
821dee435beSJames Morse 	return spectre_bhb_state;
822dee435beSJames Morse }
823dee435beSJames Morse 
824558c303cSJames Morse enum bhb_mitigation_bits {
825558c303cSJames Morse 	BHB_LOOP,
826558c303cSJames Morse 	BHB_FW,
827558c303cSJames Morse 	BHB_HW,
828228a26b9SJames Morse 	BHB_INSN,
829558c303cSJames Morse };
830558c303cSJames Morse static unsigned long system_bhb_mitigations;
831558c303cSJames Morse 
832558c303cSJames Morse /*
833558c303cSJames Morse  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
834558c303cSJames Morse  * SCOPE_SYSTEM call will give the right answer.
835558c303cSJames Morse  */
spectre_bhb_loop_affected(int scope)836558c303cSJames Morse u8 spectre_bhb_loop_affected(int scope)
837558c303cSJames Morse {
838558c303cSJames Morse 	u8 k = 0;
839558c303cSJames Morse 	static u8 max_bhb_k;
840558c303cSJames Morse 
841558c303cSJames Morse 	if (scope == SCOPE_LOCAL_CPU) {
842558c303cSJames Morse 		static const struct midr_range spectre_bhb_k32_list[] = {
843558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
84483bea32aSChanho Park 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
845558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
846558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
847558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
848558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
849558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
850558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
851558c303cSJames Morse 			{},
852558c303cSJames Morse 		};
853558c303cSJames Morse 		static const struct midr_range spectre_bhb_k24_list[] = {
854558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
855558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
856558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
857558c303cSJames Morse 			{},
858558c303cSJames Morse 		};
8590e5d5ae8SD Scott Phillips 		static const struct midr_range spectre_bhb_k11_list[] = {
8600e5d5ae8SD Scott Phillips 			MIDR_ALL_VERSIONS(MIDR_AMPERE1),
8610e5d5ae8SD Scott Phillips 			{},
8620e5d5ae8SD Scott Phillips 		};
863558c303cSJames Morse 		static const struct midr_range spectre_bhb_k8_list[] = {
864558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
865558c303cSJames Morse 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
866558c303cSJames Morse 			{},
867558c303cSJames Morse 		};
868558c303cSJames Morse 
869558c303cSJames Morse 		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
870558c303cSJames Morse 			k = 32;
871558c303cSJames Morse 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
872558c303cSJames Morse 			k = 24;
8730e5d5ae8SD Scott Phillips 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
8740e5d5ae8SD Scott Phillips 			k = 11;
875558c303cSJames Morse 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
876558c303cSJames Morse 			k =  8;
877558c303cSJames Morse 
878558c303cSJames Morse 		max_bhb_k = max(max_bhb_k, k);
879558c303cSJames Morse 	} else {
880558c303cSJames Morse 		k = max_bhb_k;
881558c303cSJames Morse 	}
882558c303cSJames Morse 
883558c303cSJames Morse 	return k;
884558c303cSJames Morse }
885558c303cSJames Morse 
spectre_bhb_get_cpu_fw_mitigation_state(void)886558c303cSJames Morse static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
887558c303cSJames Morse {
888558c303cSJames Morse 	int ret;
889558c303cSJames Morse 	struct arm_smccc_res res;
890558c303cSJames Morse 
891558c303cSJames Morse 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
892558c303cSJames Morse 			     ARM_SMCCC_ARCH_WORKAROUND_3, &res);
893558c303cSJames Morse 
894558c303cSJames Morse 	ret = res.a0;
895558c303cSJames Morse 	switch (ret) {
896558c303cSJames Morse 	case SMCCC_RET_SUCCESS:
897558c303cSJames Morse 		return SPECTRE_MITIGATED;
898558c303cSJames Morse 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
899558c303cSJames Morse 		return SPECTRE_UNAFFECTED;
900558c303cSJames Morse 	default:
901558c303cSJames Morse 		fallthrough;
902558c303cSJames Morse 	case SMCCC_RET_NOT_SUPPORTED:
903558c303cSJames Morse 		return SPECTRE_VULNERABLE;
904558c303cSJames Morse 	}
905558c303cSJames Morse }
906558c303cSJames Morse 
is_spectre_bhb_fw_affected(int scope)907558c303cSJames Morse static bool is_spectre_bhb_fw_affected(int scope)
908558c303cSJames Morse {
909558c303cSJames Morse 	static bool system_affected;
910558c303cSJames Morse 	enum mitigation_state fw_state;
911558c303cSJames Morse 	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
912558c303cSJames Morse 	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
913558c303cSJames Morse 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
914558c303cSJames Morse 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
915558c303cSJames Morse 		{},
916558c303cSJames Morse 	};
917558c303cSJames Morse 	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
918558c303cSJames Morse 					 spectre_bhb_firmware_mitigated_list);
919558c303cSJames Morse 
920558c303cSJames Morse 	if (scope != SCOPE_LOCAL_CPU)
921558c303cSJames Morse 		return system_affected;
922558c303cSJames Morse 
923558c303cSJames Morse 	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
924558c303cSJames Morse 	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
925558c303cSJames Morse 		system_affected = true;
926558c303cSJames Morse 		return true;
927558c303cSJames Morse 	}
928558c303cSJames Morse 
929558c303cSJames Morse 	return false;
930558c303cSJames Morse }
931558c303cSJames Morse 
supports_ecbhb(int scope)932558c303cSJames Morse static bool supports_ecbhb(int scope)
933558c303cSJames Morse {
934558c303cSJames Morse 	u64 mmfr1;
935558c303cSJames Morse 
936558c303cSJames Morse 	if (scope == SCOPE_LOCAL_CPU)
937558c303cSJames Morse 		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
938558c303cSJames Morse 	else
939558c303cSJames Morse 		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
940558c303cSJames Morse 
941558c303cSJames Morse 	return cpuid_feature_extract_unsigned_field(mmfr1,
9426fcd0193SKristina Martsenko 						    ID_AA64MMFR1_EL1_ECBHB_SHIFT);
943558c303cSJames Morse }
944558c303cSJames Morse 
is_spectre_bhb_affected(const struct arm64_cpu_capabilities * entry,int scope)945558c303cSJames Morse bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
946558c303cSJames Morse 			     int scope)
947558c303cSJames Morse {
948558c303cSJames Morse 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
949558c303cSJames Morse 
950558c303cSJames Morse 	if (supports_csv2p3(scope))
951558c303cSJames Morse 		return false;
952558c303cSJames Morse 
953228a26b9SJames Morse 	if (supports_clearbhb(scope))
954228a26b9SJames Morse 		return true;
955228a26b9SJames Morse 
956558c303cSJames Morse 	if (spectre_bhb_loop_affected(scope))
957558c303cSJames Morse 		return true;
958558c303cSJames Morse 
959558c303cSJames Morse 	if (is_spectre_bhb_fw_affected(scope))
960558c303cSJames Morse 		return true;
961558c303cSJames Morse 
962558c303cSJames Morse 	return false;
963558c303cSJames Morse }
964558c303cSJames Morse 
this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)965558c303cSJames Morse static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
966558c303cSJames Morse {
967558c303cSJames Morse 	const char *v = arm64_get_bp_hardening_vector(slot);
968558c303cSJames Morse 
969558c303cSJames Morse 	__this_cpu_write(this_cpu_vector, v);
970558c303cSJames Morse 
971558c303cSJames Morse 	/*
972558c303cSJames Morse 	 * When KPTI is in use, the vectors are switched when exiting to
973558c303cSJames Morse 	 * user-space.
974558c303cSJames Morse 	 */
975558c303cSJames Morse 	if (arm64_kernel_unmapped_at_el0())
976558c303cSJames Morse 		return;
977558c303cSJames Morse 
978558c303cSJames Morse 	write_sysreg(v, vbar_el1);
979558c303cSJames Morse 	isb();
980558c303cSJames Morse }
981558c303cSJames Morse 
982877ace9eSLiu Song static bool __read_mostly __nospectre_bhb;
parse_spectre_bhb_param(char * str)983877ace9eSLiu Song static int __init parse_spectre_bhb_param(char *str)
984877ace9eSLiu Song {
985877ace9eSLiu Song 	__nospectre_bhb = true;
986877ace9eSLiu Song 	return 0;
987877ace9eSLiu Song }
988877ace9eSLiu Song early_param("nospectre_bhb", parse_spectre_bhb_param);
989877ace9eSLiu Song 
spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities * entry)990558c303cSJames Morse void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
991558c303cSJames Morse {
992558c303cSJames Morse 	bp_hardening_cb_t cpu_cb;
993558c303cSJames Morse 	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
994558c303cSJames Morse 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
995558c303cSJames Morse 
996558c303cSJames Morse 	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
997558c303cSJames Morse 		return;
998558c303cSJames Morse 
999558c303cSJames Morse 	if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1000558c303cSJames Morse 		/* No point mitigating Spectre-BHB alone. */
1001558c303cSJames Morse 	} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1002558c303cSJames Morse 		pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1003877ace9eSLiu Song 	} else if (cpu_mitigations_off() || __nospectre_bhb) {
1004558c303cSJames Morse 		pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1005558c303cSJames Morse 	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1006558c303cSJames Morse 		state = SPECTRE_MITIGATED;
1007558c303cSJames Morse 		set_bit(BHB_HW, &system_bhb_mitigations);
1008228a26b9SJames Morse 	} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1009228a26b9SJames Morse 		/*
1010228a26b9SJames Morse 		 * Ensure KVM uses the indirect vector which will have ClearBHB
1011228a26b9SJames Morse 		 * added.
1012228a26b9SJames Morse 		 */
1013228a26b9SJames Morse 		if (!data->slot)
1014228a26b9SJames Morse 			data->slot = HYP_VECTOR_INDIRECT;
1015228a26b9SJames Morse 
1016228a26b9SJames Morse 		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1017228a26b9SJames Morse 		state = SPECTRE_MITIGATED;
1018228a26b9SJames Morse 		set_bit(BHB_INSN, &system_bhb_mitigations);
1019558c303cSJames Morse 	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1020558c303cSJames Morse 		/*
1021558c303cSJames Morse 		 * Ensure KVM uses the indirect vector which will have the
1022558c303cSJames Morse 		 * branchy-loop added. A57/A72-r0 will already have selected
1023558c303cSJames Morse 		 * the spectre-indirect vector, which is sufficient for BHB
1024558c303cSJames Morse 		 * too.
1025558c303cSJames Morse 		 */
1026558c303cSJames Morse 		if (!data->slot)
1027558c303cSJames Morse 			data->slot = HYP_VECTOR_INDIRECT;
1028558c303cSJames Morse 
1029558c303cSJames Morse 		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1030558c303cSJames Morse 		state = SPECTRE_MITIGATED;
1031558c303cSJames Morse 		set_bit(BHB_LOOP, &system_bhb_mitigations);
1032558c303cSJames Morse 	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1033558c303cSJames Morse 		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1034558c303cSJames Morse 		if (fw_state == SPECTRE_MITIGATED) {
1035558c303cSJames Morse 			/*
1036558c303cSJames Morse 			 * Ensure KVM uses one of the spectre bp_hardening
1037558c303cSJames Morse 			 * vectors. The indirect vector doesn't include the EL3
1038558c303cSJames Morse 			 * call, so needs upgrading to
1039558c303cSJames Morse 			 * HYP_VECTOR_SPECTRE_INDIRECT.
1040558c303cSJames Morse 			 */
1041558c303cSJames Morse 			if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1042558c303cSJames Morse 				data->slot += 1;
1043558c303cSJames Morse 
1044558c303cSJames Morse 			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1045558c303cSJames Morse 
1046558c303cSJames Morse 			/*
1047558c303cSJames Morse 			 * The WA3 call in the vectors supersedes the WA1 call
1048558c303cSJames Morse 			 * made during context-switch. Uninstall any firmware
1049558c303cSJames Morse 			 * bp_hardening callback.
1050558c303cSJames Morse 			 */
1051558c303cSJames Morse 			cpu_cb = spectre_v2_get_sw_mitigation_cb();
1052558c303cSJames Morse 			if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1053558c303cSJames Morse 				__this_cpu_write(bp_hardening_data.fn, NULL);
1054558c303cSJames Morse 
1055558c303cSJames Morse 			state = SPECTRE_MITIGATED;
1056558c303cSJames Morse 			set_bit(BHB_FW, &system_bhb_mitigations);
1057558c303cSJames Morse 		}
1058558c303cSJames Morse 	}
1059558c303cSJames Morse 
1060558c303cSJames Morse 	update_mitigation_state(&spectre_bhb_state, state);
1061558c303cSJames Morse }
1062558c303cSJames Morse 
1063ba268923SJames Morse /* Patched to NOP when enabled */
spectre_bhb_patch_loop_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1064ba268923SJames Morse void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1065ba268923SJames Morse 						     __le32 *origptr,
1066ba268923SJames Morse 						      __le32 *updptr, int nr_inst)
1067ba268923SJames Morse {
1068ba268923SJames Morse 	BUG_ON(nr_inst != 1);
1069558c303cSJames Morse 
1070558c303cSJames Morse 	if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1071558c303cSJames Morse 		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1072ba268923SJames Morse }
1073ba268923SJames Morse 
1074ba268923SJames Morse /* Patched to NOP when enabled */
spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1075ba268923SJames Morse void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1076ba268923SJames Morse 						   __le32 *origptr,
1077ba268923SJames Morse 						   __le32 *updptr, int nr_inst)
1078ba268923SJames Morse {
1079ba268923SJames Morse 	BUG_ON(nr_inst != 1);
1080558c303cSJames Morse 
1081558c303cSJames Morse 	if (test_bit(BHB_FW, &system_bhb_mitigations))
1082558c303cSJames Morse 		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1083558c303cSJames Morse }
1084558c303cSJames Morse 
1085558c303cSJames Morse /* Patched to correct the immediate */
spectre_bhb_patch_loop_iter(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1086558c303cSJames Morse void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1087558c303cSJames Morse 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1088558c303cSJames Morse {
1089558c303cSJames Morse 	u8 rd;
1090558c303cSJames Morse 	u32 insn;
1091558c303cSJames Morse 	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1092558c303cSJames Morse 
1093558c303cSJames Morse 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1094558c303cSJames Morse 
1095558c303cSJames Morse 	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1096558c303cSJames Morse 		return;
1097558c303cSJames Morse 
1098558c303cSJames Morse 	insn = le32_to_cpu(*origptr);
1099558c303cSJames Morse 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1100558c303cSJames Morse 	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1101558c303cSJames Morse 					 AARCH64_INSN_VARIANT_64BIT,
1102558c303cSJames Morse 					 AARCH64_INSN_MOVEWIDE_ZERO);
1103558c303cSJames Morse 	*updptr++ = cpu_to_le32(insn);
1104558c303cSJames Morse }
1105558c303cSJames Morse 
1106558c303cSJames Morse /* Patched to mov WA3 when supported */
spectre_bhb_patch_wa3(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1107558c303cSJames Morse void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1108558c303cSJames Morse 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1109558c303cSJames Morse {
1110558c303cSJames Morse 	u8 rd;
1111558c303cSJames Morse 	u32 insn;
1112558c303cSJames Morse 
1113558c303cSJames Morse 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1114558c303cSJames Morse 
1115558c303cSJames Morse 	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1116558c303cSJames Morse 	    !test_bit(BHB_FW, &system_bhb_mitigations))
1117558c303cSJames Morse 		return;
1118558c303cSJames Morse 
1119558c303cSJames Morse 	insn = le32_to_cpu(*origptr);
1120558c303cSJames Morse 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1121558c303cSJames Morse 
1122558c303cSJames Morse 	insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1123558c303cSJames Morse 						  AARCH64_INSN_VARIANT_32BIT,
1124558c303cSJames Morse 						  AARCH64_INSN_REG_ZR, rd,
1125558c303cSJames Morse 						  ARM_SMCCC_ARCH_WORKAROUND_3);
1126558c303cSJames Morse 	if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1127558c303cSJames Morse 		return;
1128558c303cSJames Morse 
1129558c303cSJames Morse 	*updptr++ = cpu_to_le32(insn);
1130ba268923SJames Morse }
1131228a26b9SJames Morse 
1132228a26b9SJames Morse /* Patched to NOP when not supported */
spectre_bhb_patch_clearbhb(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1133228a26b9SJames Morse void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1134228a26b9SJames Morse 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1135228a26b9SJames Morse {
1136228a26b9SJames Morse 	BUG_ON(nr_inst != 2);
1137228a26b9SJames Morse 
1138228a26b9SJames Morse 	if (test_bit(BHB_INSN, &system_bhb_mitigations))
1139228a26b9SJames Morse 		return;
1140228a26b9SJames Morse 
1141228a26b9SJames Morse 	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1142228a26b9SJames Morse 	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1143228a26b9SJames Morse }
114458c9a506SJames Morse 
114558c9a506SJames Morse #ifdef CONFIG_BPF_SYSCALL
114658c9a506SJames Morse #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
unpriv_ebpf_notify(int new_state)114758c9a506SJames Morse void unpriv_ebpf_notify(int new_state)
114858c9a506SJames Morse {
114958c9a506SJames Morse 	if (spectre_v2_state == SPECTRE_VULNERABLE ||
115058c9a506SJames Morse 	    spectre_bhb_state != SPECTRE_MITIGATED)
115158c9a506SJames Morse 		return;
115258c9a506SJames Morse 
115358c9a506SJames Morse 	if (!new_state)
115458c9a506SJames Morse 		pr_err("WARNING: %s", EBPF_WARN);
115558c9a506SJames Morse }
115658c9a506SJames Morse #endif
1157