xref: /openbmc/linux/arch/arm64/kernel/proton-pack.c (revision 7af6fbdd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19 
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26 
27 #include <asm/spectre.h>
28 #include <asm/traps.h>
29 
30 /*
31  * We try to ensure that the mitigation state can never change as the result of
32  * onlining a late CPU.
33  */
34 static void update_mitigation_state(enum mitigation_state *oldp,
35 				    enum mitigation_state new)
36 {
37 	enum mitigation_state state;
38 
39 	do {
40 		state = READ_ONCE(*oldp);
41 		if (new <= state)
42 			break;
43 
44 		/* Userspace almost certainly can't deal with this. */
45 		if (WARN_ON(system_capabilities_finalized()))
46 			break;
47 	} while (cmpxchg_relaxed(oldp, state, new) != state);
48 }
49 
50 /*
51  * Spectre v1.
52  *
53  * The kernel can't protect userspace for this one: it's each person for
54  * themselves. Advertise what we're doing and be done with it.
55  */
56 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
57 			    char *buf)
58 {
59 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
60 }
61 
62 /*
63  * Spectre v2.
64  *
65  * This one sucks. A CPU is either:
66  *
67  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
68  * - Mitigated in hardware and listed in our "safe list".
69  * - Mitigated in software by firmware.
70  * - Mitigated in software by a CPU-specific dance in the kernel.
71  * - Vulnerable.
72  *
73  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
74  * different camps.
75  */
76 static enum mitigation_state spectre_v2_state;
77 
78 static bool __read_mostly __nospectre_v2;
79 static int __init parse_spectre_v2_param(char *str)
80 {
81 	__nospectre_v2 = true;
82 	return 0;
83 }
84 early_param("nospectre_v2", parse_spectre_v2_param);
85 
86 static bool spectre_v2_mitigations_off(void)
87 {
88 	bool ret = __nospectre_v2 || cpu_mitigations_off();
89 
90 	if (ret)
91 		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
92 
93 	return ret;
94 }
95 
96 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
97 			    char *buf)
98 {
99 	switch (spectre_v2_state) {
100 	case SPECTRE_UNAFFECTED:
101 		return sprintf(buf, "Not affected\n");
102 	case SPECTRE_MITIGATED:
103 		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
104 	case SPECTRE_VULNERABLE:
105 		fallthrough;
106 	default:
107 		return sprintf(buf, "Vulnerable\n");
108 	}
109 }
110 
111 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
112 {
113 	u64 pfr0;
114 	static const struct midr_range spectre_v2_safe_list[] = {
115 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
116 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
117 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
118 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
119 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
120 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
121 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
122 		{ /* sentinel */ }
123 	};
124 
125 	/* If the CPU has CSV2 set, we're safe */
126 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
127 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
128 		return SPECTRE_UNAFFECTED;
129 
130 	/* Alternatively, we have a list of unaffected CPUs */
131 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
132 		return SPECTRE_UNAFFECTED;
133 
134 	return SPECTRE_VULNERABLE;
135 }
136 
137 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED	(1)
138 
139 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
140 {
141 	int ret;
142 	struct arm_smccc_res res;
143 
144 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
145 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
146 
147 	ret = res.a0;
148 	switch (ret) {
149 	case SMCCC_RET_SUCCESS:
150 		return SPECTRE_MITIGATED;
151 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
152 		return SPECTRE_UNAFFECTED;
153 	default:
154 		fallthrough;
155 	case SMCCC_RET_NOT_SUPPORTED:
156 		return SPECTRE_VULNERABLE;
157 	}
158 }
159 
160 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
161 {
162 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
163 
164 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
165 		return false;
166 
167 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
168 		return false;
169 
170 	return true;
171 }
172 
173 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
174 
175 enum mitigation_state arm64_get_spectre_v2_state(void)
176 {
177 	return spectre_v2_state;
178 }
179 
180 #ifdef CONFIG_KVM
181 #include <asm/cacheflush.h>
182 #include <asm/kvm_asm.h>
183 
184 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
185 
186 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
187 				const char *hyp_vecs_end)
188 {
189 	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
190 	int i;
191 
192 	for (i = 0; i < SZ_2K; i += 0x80)
193 		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
194 
195 	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
196 }
197 
198 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
199 {
200 	static DEFINE_RAW_SPINLOCK(bp_lock);
201 	int cpu, slot = -1;
202 	const char *hyp_vecs_start = __smccc_workaround_1_smc;
203 	const char *hyp_vecs_end = __smccc_workaround_1_smc +
204 				   __SMCCC_WORKAROUND_1_SMC_SZ;
205 
206 	/*
207 	 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
208 	 * we're a guest. Skip the hyp-vectors work.
209 	 */
210 	if (!is_hyp_mode_available()) {
211 		__this_cpu_write(bp_hardening_data.fn, fn);
212 		return;
213 	}
214 
215 	raw_spin_lock(&bp_lock);
216 	for_each_possible_cpu(cpu) {
217 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
218 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
219 			break;
220 		}
221 	}
222 
223 	if (slot == -1) {
224 		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
225 		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
226 		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
227 	}
228 
229 	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
230 	__this_cpu_write(bp_hardening_data.fn, fn);
231 	raw_spin_unlock(&bp_lock);
232 }
233 #else
234 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
235 {
236 	__this_cpu_write(bp_hardening_data.fn, fn);
237 }
238 #endif	/* CONFIG_KVM */
239 
240 static void call_smc_arch_workaround_1(void)
241 {
242 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
243 }
244 
245 static void call_hvc_arch_workaround_1(void)
246 {
247 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
248 }
249 
250 static void qcom_link_stack_sanitisation(void)
251 {
252 	u64 tmp;
253 
254 	asm volatile("mov	%0, x30		\n"
255 		     ".rept	16		\n"
256 		     "bl	. + 4		\n"
257 		     ".endr			\n"
258 		     "mov	x30, %0		\n"
259 		     : "=&r" (tmp));
260 }
261 
262 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
263 {
264 	bp_hardening_cb_t cb;
265 	enum mitigation_state state;
266 
267 	state = spectre_v2_get_cpu_fw_mitigation_state();
268 	if (state != SPECTRE_MITIGATED)
269 		return state;
270 
271 	if (spectre_v2_mitigations_off())
272 		return SPECTRE_VULNERABLE;
273 
274 	switch (arm_smccc_1_1_get_conduit()) {
275 	case SMCCC_CONDUIT_HVC:
276 		cb = call_hvc_arch_workaround_1;
277 		break;
278 
279 	case SMCCC_CONDUIT_SMC:
280 		cb = call_smc_arch_workaround_1;
281 		break;
282 
283 	default:
284 		return SPECTRE_VULNERABLE;
285 	}
286 
287 	install_bp_hardening_cb(cb);
288 	return SPECTRE_MITIGATED;
289 }
290 
291 static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
292 {
293 	u32 midr;
294 
295 	if (spectre_v2_mitigations_off())
296 		return SPECTRE_VULNERABLE;
297 
298 	midr = read_cpuid_id();
299 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
300 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
301 		return SPECTRE_VULNERABLE;
302 
303 	install_bp_hardening_cb(qcom_link_stack_sanitisation);
304 	return SPECTRE_MITIGATED;
305 }
306 
307 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
308 {
309 	enum mitigation_state state;
310 
311 	WARN_ON(preemptible());
312 
313 	state = spectre_v2_get_cpu_hw_mitigation_state();
314 	if (state == SPECTRE_VULNERABLE)
315 		state = spectre_v2_enable_fw_mitigation();
316 	if (state == SPECTRE_VULNERABLE)
317 		state = spectre_v2_enable_sw_mitigation();
318 
319 	update_mitigation_state(&spectre_v2_state, state);
320 }
321 
322 /*
323  * Spectre v4.
324  *
325  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
326  * either:
327  *
328  * - Mitigated in hardware and listed in our "safe list".
329  * - Mitigated in hardware via PSTATE.SSBS.
330  * - Mitigated in software by firmware (sometimes referred to as SSBD).
331  *
332  * Wait, that doesn't sound so bad, does it? Keep reading...
333  *
334  * A major source of headaches is that the software mitigation is enabled both
335  * on a per-task basis, but can also be forced on for the kernel, necessitating
336  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
337  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
338  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
339  * so you can have systems that have both firmware and SSBS mitigations. This
340  * means we actually have to reject late onlining of CPUs with mitigations if
341  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
342  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
343  *
344  * The only good part is that if the firmware mitigation is present, then it is
345  * present for all CPUs, meaning we don't have to worry about late onlining of a
346  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
347  *
348  * Give me a VAX-11/780 any day of the week...
349  */
350 static enum mitigation_state spectre_v4_state;
351 
352 /* This is the per-cpu state tracking whether we need to talk to firmware */
353 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
354 
355 enum spectre_v4_policy {
356 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
357 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
358 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
359 };
360 
361 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
362 
363 static const struct spectre_v4_param {
364 	const char		*str;
365 	enum spectre_v4_policy	policy;
366 } spectre_v4_params[] = {
367 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
368 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
369 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
370 };
371 static int __init parse_spectre_v4_param(char *str)
372 {
373 	int i;
374 
375 	if (!str || !str[0])
376 		return -EINVAL;
377 
378 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
379 		const struct spectre_v4_param *param = &spectre_v4_params[i];
380 
381 		if (strncmp(str, param->str, strlen(param->str)))
382 			continue;
383 
384 		__spectre_v4_policy = param->policy;
385 		return 0;
386 	}
387 
388 	return -EINVAL;
389 }
390 early_param("ssbd", parse_spectre_v4_param);
391 
392 /*
393  * Because this was all written in a rush by people working in different silos,
394  * we've ended up with multiple command line options to control the same thing.
395  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
396  * with contradictory parameters. The mitigation is always either "off",
397  * "dynamic" or "on".
398  */
399 static bool spectre_v4_mitigations_off(void)
400 {
401 	bool ret = cpu_mitigations_off() ||
402 		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
403 
404 	if (ret)
405 		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
406 
407 	return ret;
408 }
409 
410 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
411 static bool spectre_v4_mitigations_dynamic(void)
412 {
413 	return !spectre_v4_mitigations_off() &&
414 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
415 }
416 
417 static bool spectre_v4_mitigations_on(void)
418 {
419 	return !spectre_v4_mitigations_off() &&
420 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
421 }
422 
423 ssize_t cpu_show_spec_store_bypass(struct device *dev,
424 				   struct device_attribute *attr, char *buf)
425 {
426 	switch (spectre_v4_state) {
427 	case SPECTRE_UNAFFECTED:
428 		return sprintf(buf, "Not affected\n");
429 	case SPECTRE_MITIGATED:
430 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
431 	case SPECTRE_VULNERABLE:
432 		fallthrough;
433 	default:
434 		return sprintf(buf, "Vulnerable\n");
435 	}
436 }
437 
438 enum mitigation_state arm64_get_spectre_v4_state(void)
439 {
440 	return spectre_v4_state;
441 }
442 
443 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
444 {
445 	static const struct midr_range spectre_v4_safe_list[] = {
446 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
447 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
448 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
449 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
450 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
451 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
452 		{ /* sentinel */ },
453 	};
454 
455 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
456 		return SPECTRE_UNAFFECTED;
457 
458 	/* CPU features are detected first */
459 	if (this_cpu_has_cap(ARM64_SSBS))
460 		return SPECTRE_MITIGATED;
461 
462 	return SPECTRE_VULNERABLE;
463 }
464 
465 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
466 {
467 	int ret;
468 	struct arm_smccc_res res;
469 
470 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
471 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
472 
473 	ret = res.a0;
474 	switch (ret) {
475 	case SMCCC_RET_SUCCESS:
476 		return SPECTRE_MITIGATED;
477 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
478 		fallthrough;
479 	case SMCCC_RET_NOT_REQUIRED:
480 		return SPECTRE_UNAFFECTED;
481 	default:
482 		fallthrough;
483 	case SMCCC_RET_NOT_SUPPORTED:
484 		return SPECTRE_VULNERABLE;
485 	}
486 }
487 
488 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
489 {
490 	enum mitigation_state state;
491 
492 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
493 
494 	state = spectre_v4_get_cpu_hw_mitigation_state();
495 	if (state == SPECTRE_VULNERABLE)
496 		state = spectre_v4_get_cpu_fw_mitigation_state();
497 
498 	return state != SPECTRE_UNAFFECTED;
499 }
500 
501 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
502 {
503 	if (user_mode(regs))
504 		return 1;
505 
506 	if (instr & BIT(PSTATE_Imm_shift))
507 		regs->pstate |= PSR_SSBS_BIT;
508 	else
509 		regs->pstate &= ~PSR_SSBS_BIT;
510 
511 	arm64_skip_faulting_instruction(regs, 4);
512 	return 0;
513 }
514 
515 static struct undef_hook ssbs_emulation_hook = {
516 	.instr_mask	= ~(1U << PSTATE_Imm_shift),
517 	.instr_val	= 0xd500401f | PSTATE_SSBS,
518 	.fn		= ssbs_emulation_handler,
519 };
520 
521 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
522 {
523 	static bool undef_hook_registered = false;
524 	static DEFINE_RAW_SPINLOCK(hook_lock);
525 	enum mitigation_state state;
526 
527 	/*
528 	 * If the system is mitigated but this CPU doesn't have SSBS, then
529 	 * we must be on the safelist and there's nothing more to do.
530 	 */
531 	state = spectre_v4_get_cpu_hw_mitigation_state();
532 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
533 		return state;
534 
535 	raw_spin_lock(&hook_lock);
536 	if (!undef_hook_registered) {
537 		register_undef_hook(&ssbs_emulation_hook);
538 		undef_hook_registered = true;
539 	}
540 	raw_spin_unlock(&hook_lock);
541 
542 	if (spectre_v4_mitigations_off()) {
543 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
544 		asm volatile(SET_PSTATE_SSBS(1));
545 		return SPECTRE_VULNERABLE;
546 	}
547 
548 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
549 	asm volatile(SET_PSTATE_SSBS(0));
550 	return SPECTRE_MITIGATED;
551 }
552 
553 /*
554  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
555  * we fallthrough and check whether firmware needs to be called on this CPU.
556  */
557 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
558 						  __le32 *origptr,
559 						  __le32 *updptr, int nr_inst)
560 {
561 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
562 
563 	if (spectre_v4_mitigations_off())
564 		return;
565 
566 	if (cpus_have_final_cap(ARM64_SSBS))
567 		return;
568 
569 	if (spectre_v4_mitigations_dynamic())
570 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
571 }
572 
573 /*
574  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
575  * to call into firmware to adjust the mitigation state.
576  */
577 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
578 						   __le32 *origptr,
579 						   __le32 *updptr, int nr_inst)
580 {
581 	u32 insn;
582 
583 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
584 
585 	switch (arm_smccc_1_1_get_conduit()) {
586 	case SMCCC_CONDUIT_HVC:
587 		insn = aarch64_insn_get_hvc_value();
588 		break;
589 	case SMCCC_CONDUIT_SMC:
590 		insn = aarch64_insn_get_smc_value();
591 		break;
592 	default:
593 		return;
594 	}
595 
596 	*updptr = cpu_to_le32(insn);
597 }
598 
599 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
600 {
601 	enum mitigation_state state;
602 
603 	state = spectre_v4_get_cpu_fw_mitigation_state();
604 	if (state != SPECTRE_MITIGATED)
605 		return state;
606 
607 	if (spectre_v4_mitigations_off()) {
608 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
609 		return SPECTRE_VULNERABLE;
610 	}
611 
612 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
613 
614 	if (spectre_v4_mitigations_dynamic())
615 		__this_cpu_write(arm64_ssbd_callback_required, 1);
616 
617 	return SPECTRE_MITIGATED;
618 }
619 
620 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
621 {
622 	enum mitigation_state state;
623 
624 	WARN_ON(preemptible());
625 
626 	state = spectre_v4_enable_hw_mitigation();
627 	if (state == SPECTRE_VULNERABLE)
628 		state = spectre_v4_enable_fw_mitigation();
629 
630 	update_mitigation_state(&spectre_v4_state, state);
631 }
632 
633 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
634 {
635 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
636 
637 	if (state)
638 		regs->pstate |= bit;
639 	else
640 		regs->pstate &= ~bit;
641 }
642 
643 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
644 {
645 	struct pt_regs *regs = task_pt_regs(tsk);
646 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
647 
648 	if (spectre_v4_mitigations_off())
649 		ssbs = true;
650 	else if (spectre_v4_mitigations_dynamic() && !kthread)
651 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
652 
653 	__update_pstate_ssbs(regs, ssbs);
654 }
655 
656 /*
657  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
658  * This is interesting because the "speculation disabled" behaviour can be
659  * configured so that it is preserved across exec(), which means that the
660  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
661  * from userspace.
662  */
663 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
664 {
665 	task_clear_spec_ssb_noexec(task);
666 	task_set_spec_ssb_disable(task);
667 	set_tsk_thread_flag(task, TIF_SSBD);
668 }
669 
670 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
671 {
672 	task_clear_spec_ssb_noexec(task);
673 	task_clear_spec_ssb_disable(task);
674 	clear_tsk_thread_flag(task, TIF_SSBD);
675 }
676 
677 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
678 {
679 	switch (ctrl) {
680 	case PR_SPEC_ENABLE:
681 		/* Enable speculation: disable mitigation */
682 		/*
683 		 * Force disabled speculation prevents it from being
684 		 * re-enabled.
685 		 */
686 		if (task_spec_ssb_force_disable(task))
687 			return -EPERM;
688 
689 		/*
690 		 * If the mitigation is forced on, then speculation is forced
691 		 * off and we again prevent it from being re-enabled.
692 		 */
693 		if (spectre_v4_mitigations_on())
694 			return -EPERM;
695 
696 		ssbd_prctl_disable_mitigation(task);
697 		break;
698 	case PR_SPEC_FORCE_DISABLE:
699 		/* Force disable speculation: force enable mitigation */
700 		/*
701 		 * If the mitigation is forced off, then speculation is forced
702 		 * on and we prevent it from being disabled.
703 		 */
704 		if (spectre_v4_mitigations_off())
705 			return -EPERM;
706 
707 		task_set_spec_ssb_force_disable(task);
708 		fallthrough;
709 	case PR_SPEC_DISABLE:
710 		/* Disable speculation: enable mitigation */
711 		/* Same as PR_SPEC_FORCE_DISABLE */
712 		if (spectre_v4_mitigations_off())
713 			return -EPERM;
714 
715 		ssbd_prctl_enable_mitigation(task);
716 		break;
717 	case PR_SPEC_DISABLE_NOEXEC:
718 		/* Disable speculation until execve(): enable mitigation */
719 		/*
720 		 * If the mitigation state is forced one way or the other, then
721 		 * we must fail now before we try to toggle it on execve().
722 		 */
723 		if (task_spec_ssb_force_disable(task) ||
724 		    spectre_v4_mitigations_off() ||
725 		    spectre_v4_mitigations_on()) {
726 			return -EPERM;
727 		}
728 
729 		ssbd_prctl_enable_mitigation(task);
730 		task_set_spec_ssb_noexec(task);
731 		break;
732 	default:
733 		return -ERANGE;
734 	}
735 
736 	spectre_v4_enable_task_mitigation(task);
737 	return 0;
738 }
739 
740 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
741 			     unsigned long ctrl)
742 {
743 	switch (which) {
744 	case PR_SPEC_STORE_BYPASS:
745 		return ssbd_prctl_set(task, ctrl);
746 	default:
747 		return -ENODEV;
748 	}
749 }
750 
751 static int ssbd_prctl_get(struct task_struct *task)
752 {
753 	switch (spectre_v4_state) {
754 	case SPECTRE_UNAFFECTED:
755 		return PR_SPEC_NOT_AFFECTED;
756 	case SPECTRE_MITIGATED:
757 		if (spectre_v4_mitigations_on())
758 			return PR_SPEC_NOT_AFFECTED;
759 
760 		if (spectre_v4_mitigations_dynamic())
761 			break;
762 
763 		/* Mitigations are disabled, so we're vulnerable. */
764 		fallthrough;
765 	case SPECTRE_VULNERABLE:
766 		fallthrough;
767 	default:
768 		return PR_SPEC_ENABLE;
769 	}
770 
771 	/* Check the mitigation state for this task */
772 	if (task_spec_ssb_force_disable(task))
773 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
774 
775 	if (task_spec_ssb_noexec(task))
776 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
777 
778 	if (task_spec_ssb_disable(task))
779 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
780 
781 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
782 }
783 
784 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
785 {
786 	switch (which) {
787 	case PR_SPEC_STORE_BYPASS:
788 		return ssbd_prctl_get(task);
789 	default:
790 		return -ENODEV;
791 	}
792 }
793