xref: /openbmc/linux/arch/arm64/kernel/proton-pack.c (revision 11a163f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19 
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26 
27 #include <asm/spectre.h>
28 #include <asm/traps.h>
29 
30 /*
31  * We try to ensure that the mitigation state can never change as the result of
32  * onlining a late CPU.
33  */
34 static void update_mitigation_state(enum mitigation_state *oldp,
35 				    enum mitigation_state new)
36 {
37 	enum mitigation_state state;
38 
39 	do {
40 		state = READ_ONCE(*oldp);
41 		if (new <= state)
42 			break;
43 
44 		/* Userspace almost certainly can't deal with this. */
45 		if (WARN_ON(system_capabilities_finalized()))
46 			break;
47 	} while (cmpxchg_relaxed(oldp, state, new) != state);
48 }
49 
50 /*
51  * Spectre v1.
52  *
53  * The kernel can't protect userspace for this one: it's each person for
54  * themselves. Advertise what we're doing and be done with it.
55  */
56 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
57 			    char *buf)
58 {
59 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
60 }
61 
62 /*
63  * Spectre v2.
64  *
65  * This one sucks. A CPU is either:
66  *
67  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
68  * - Mitigated in hardware and listed in our "safe list".
69  * - Mitigated in software by firmware.
70  * - Mitigated in software by a CPU-specific dance in the kernel and a
71  *   firmware call at EL2.
72  * - Vulnerable.
73  *
74  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
75  * different camps.
76  */
77 static enum mitigation_state spectre_v2_state;
78 
79 static bool __read_mostly __nospectre_v2;
80 static int __init parse_spectre_v2_param(char *str)
81 {
82 	__nospectre_v2 = true;
83 	return 0;
84 }
85 early_param("nospectre_v2", parse_spectre_v2_param);
86 
87 static bool spectre_v2_mitigations_off(void)
88 {
89 	bool ret = __nospectre_v2 || cpu_mitigations_off();
90 
91 	if (ret)
92 		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
93 
94 	return ret;
95 }
96 
97 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
98 			    char *buf)
99 {
100 	switch (spectre_v2_state) {
101 	case SPECTRE_UNAFFECTED:
102 		return sprintf(buf, "Not affected\n");
103 	case SPECTRE_MITIGATED:
104 		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
105 	case SPECTRE_VULNERABLE:
106 		fallthrough;
107 	default:
108 		return sprintf(buf, "Vulnerable\n");
109 	}
110 }
111 
112 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
113 {
114 	u64 pfr0;
115 	static const struct midr_range spectre_v2_safe_list[] = {
116 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
117 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
118 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
119 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
120 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
121 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
122 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
123 		{ /* sentinel */ }
124 	};
125 
126 	/* If the CPU has CSV2 set, we're safe */
127 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
128 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
129 		return SPECTRE_UNAFFECTED;
130 
131 	/* Alternatively, we have a list of unaffected CPUs */
132 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
133 		return SPECTRE_UNAFFECTED;
134 
135 	return SPECTRE_VULNERABLE;
136 }
137 
138 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED	(1)
139 
140 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
141 {
142 	int ret;
143 	struct arm_smccc_res res;
144 
145 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
146 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
147 
148 	ret = res.a0;
149 	switch (ret) {
150 	case SMCCC_RET_SUCCESS:
151 		return SPECTRE_MITIGATED;
152 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
153 		return SPECTRE_UNAFFECTED;
154 	default:
155 		fallthrough;
156 	case SMCCC_RET_NOT_SUPPORTED:
157 		return SPECTRE_VULNERABLE;
158 	}
159 }
160 
161 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
162 {
163 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
164 
165 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
166 		return false;
167 
168 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
169 		return false;
170 
171 	return true;
172 }
173 
174 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
175 
176 enum mitigation_state arm64_get_spectre_v2_state(void)
177 {
178 	return spectre_v2_state;
179 }
180 
181 #ifdef CONFIG_KVM
182 #include <asm/cacheflush.h>
183 #include <asm/kvm_asm.h>
184 
185 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
186 
187 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
188 				const char *hyp_vecs_end)
189 {
190 	void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
191 	int i;
192 
193 	for (i = 0; i < SZ_2K; i += 0x80)
194 		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
195 
196 	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
197 }
198 
199 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
200 {
201 	static DEFINE_RAW_SPINLOCK(bp_lock);
202 	int cpu, slot = -1;
203 	const char *hyp_vecs_start = __smccc_workaround_1_smc;
204 	const char *hyp_vecs_end = __smccc_workaround_1_smc +
205 				   __SMCCC_WORKAROUND_1_SMC_SZ;
206 
207 	/*
208 	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
209 	 * the door when we're a guest. Skip the hyp-vectors work.
210 	 */
211 	if (!is_hyp_mode_available()) {
212 		__this_cpu_write(bp_hardening_data.fn, fn);
213 		return;
214 	}
215 
216 	raw_spin_lock(&bp_lock);
217 	for_each_possible_cpu(cpu) {
218 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
219 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
220 			break;
221 		}
222 	}
223 
224 	if (slot == -1) {
225 		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
226 		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
227 		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
228 	}
229 
230 	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
231 	__this_cpu_write(bp_hardening_data.fn, fn);
232 	raw_spin_unlock(&bp_lock);
233 }
234 #else
235 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
236 {
237 	__this_cpu_write(bp_hardening_data.fn, fn);
238 }
239 #endif	/* CONFIG_KVM */
240 
241 static void call_smc_arch_workaround_1(void)
242 {
243 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
244 }
245 
246 static void call_hvc_arch_workaround_1(void)
247 {
248 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
249 }
250 
251 static void qcom_link_stack_sanitisation(void)
252 {
253 	u64 tmp;
254 
255 	asm volatile("mov	%0, x30		\n"
256 		     ".rept	16		\n"
257 		     "bl	. + 4		\n"
258 		     ".endr			\n"
259 		     "mov	x30, %0		\n"
260 		     : "=&r" (tmp));
261 }
262 
263 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
264 {
265 	u32 midr = read_cpuid_id();
266 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
267 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
268 		return NULL;
269 
270 	return qcom_link_stack_sanitisation;
271 }
272 
273 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
274 {
275 	bp_hardening_cb_t cb;
276 	enum mitigation_state state;
277 
278 	state = spectre_v2_get_cpu_fw_mitigation_state();
279 	if (state != SPECTRE_MITIGATED)
280 		return state;
281 
282 	if (spectre_v2_mitigations_off())
283 		return SPECTRE_VULNERABLE;
284 
285 	switch (arm_smccc_1_1_get_conduit()) {
286 	case SMCCC_CONDUIT_HVC:
287 		cb = call_hvc_arch_workaround_1;
288 		break;
289 
290 	case SMCCC_CONDUIT_SMC:
291 		cb = call_smc_arch_workaround_1;
292 		break;
293 
294 	default:
295 		return SPECTRE_VULNERABLE;
296 	}
297 
298 	/*
299 	 * Prefer a CPU-specific workaround if it exists. Note that we
300 	 * still rely on firmware for the mitigation at EL2.
301 	 */
302 	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
303 	install_bp_hardening_cb(cb);
304 	return SPECTRE_MITIGATED;
305 }
306 
307 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
308 {
309 	enum mitigation_state state;
310 
311 	WARN_ON(preemptible());
312 
313 	state = spectre_v2_get_cpu_hw_mitigation_state();
314 	if (state == SPECTRE_VULNERABLE)
315 		state = spectre_v2_enable_fw_mitigation();
316 
317 	update_mitigation_state(&spectre_v2_state, state);
318 }
319 
320 /*
321  * Spectre v4.
322  *
323  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
324  * either:
325  *
326  * - Mitigated in hardware and listed in our "safe list".
327  * - Mitigated in hardware via PSTATE.SSBS.
328  * - Mitigated in software by firmware (sometimes referred to as SSBD).
329  *
330  * Wait, that doesn't sound so bad, does it? Keep reading...
331  *
332  * A major source of headaches is that the software mitigation is enabled both
333  * on a per-task basis, but can also be forced on for the kernel, necessitating
334  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
335  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
336  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
337  * so you can have systems that have both firmware and SSBS mitigations. This
338  * means we actually have to reject late onlining of CPUs with mitigations if
339  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
340  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
341  *
342  * The only good part is that if the firmware mitigation is present, then it is
343  * present for all CPUs, meaning we don't have to worry about late onlining of a
344  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
345  *
346  * Give me a VAX-11/780 any day of the week...
347  */
348 static enum mitigation_state spectre_v4_state;
349 
350 /* This is the per-cpu state tracking whether we need to talk to firmware */
351 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
352 
353 enum spectre_v4_policy {
354 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
355 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
356 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
357 };
358 
359 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
360 
361 static const struct spectre_v4_param {
362 	const char		*str;
363 	enum spectre_v4_policy	policy;
364 } spectre_v4_params[] = {
365 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
366 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
367 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
368 };
369 static int __init parse_spectre_v4_param(char *str)
370 {
371 	int i;
372 
373 	if (!str || !str[0])
374 		return -EINVAL;
375 
376 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
377 		const struct spectre_v4_param *param = &spectre_v4_params[i];
378 
379 		if (strncmp(str, param->str, strlen(param->str)))
380 			continue;
381 
382 		__spectre_v4_policy = param->policy;
383 		return 0;
384 	}
385 
386 	return -EINVAL;
387 }
388 early_param("ssbd", parse_spectre_v4_param);
389 
390 /*
391  * Because this was all written in a rush by people working in different silos,
392  * we've ended up with multiple command line options to control the same thing.
393  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
394  * with contradictory parameters. The mitigation is always either "off",
395  * "dynamic" or "on".
396  */
397 static bool spectre_v4_mitigations_off(void)
398 {
399 	bool ret = cpu_mitigations_off() ||
400 		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
401 
402 	if (ret)
403 		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
404 
405 	return ret;
406 }
407 
408 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
409 static bool spectre_v4_mitigations_dynamic(void)
410 {
411 	return !spectre_v4_mitigations_off() &&
412 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
413 }
414 
415 static bool spectre_v4_mitigations_on(void)
416 {
417 	return !spectre_v4_mitigations_off() &&
418 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
419 }
420 
421 ssize_t cpu_show_spec_store_bypass(struct device *dev,
422 				   struct device_attribute *attr, char *buf)
423 {
424 	switch (spectre_v4_state) {
425 	case SPECTRE_UNAFFECTED:
426 		return sprintf(buf, "Not affected\n");
427 	case SPECTRE_MITIGATED:
428 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
429 	case SPECTRE_VULNERABLE:
430 		fallthrough;
431 	default:
432 		return sprintf(buf, "Vulnerable\n");
433 	}
434 }
435 
436 enum mitigation_state arm64_get_spectre_v4_state(void)
437 {
438 	return spectre_v4_state;
439 }
440 
441 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
442 {
443 	static const struct midr_range spectre_v4_safe_list[] = {
444 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
445 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
446 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
447 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
448 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
449 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
450 		{ /* sentinel */ },
451 	};
452 
453 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
454 		return SPECTRE_UNAFFECTED;
455 
456 	/* CPU features are detected first */
457 	if (this_cpu_has_cap(ARM64_SSBS))
458 		return SPECTRE_MITIGATED;
459 
460 	return SPECTRE_VULNERABLE;
461 }
462 
463 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
464 {
465 	int ret;
466 	struct arm_smccc_res res;
467 
468 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
469 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
470 
471 	ret = res.a0;
472 	switch (ret) {
473 	case SMCCC_RET_SUCCESS:
474 		return SPECTRE_MITIGATED;
475 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
476 		fallthrough;
477 	case SMCCC_RET_NOT_REQUIRED:
478 		return SPECTRE_UNAFFECTED;
479 	default:
480 		fallthrough;
481 	case SMCCC_RET_NOT_SUPPORTED:
482 		return SPECTRE_VULNERABLE;
483 	}
484 }
485 
486 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
487 {
488 	enum mitigation_state state;
489 
490 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
491 
492 	state = spectre_v4_get_cpu_hw_mitigation_state();
493 	if (state == SPECTRE_VULNERABLE)
494 		state = spectre_v4_get_cpu_fw_mitigation_state();
495 
496 	return state != SPECTRE_UNAFFECTED;
497 }
498 
499 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
500 {
501 	if (user_mode(regs))
502 		return 1;
503 
504 	if (instr & BIT(PSTATE_Imm_shift))
505 		regs->pstate |= PSR_SSBS_BIT;
506 	else
507 		regs->pstate &= ~PSR_SSBS_BIT;
508 
509 	arm64_skip_faulting_instruction(regs, 4);
510 	return 0;
511 }
512 
513 static struct undef_hook ssbs_emulation_hook = {
514 	.instr_mask	= ~(1U << PSTATE_Imm_shift),
515 	.instr_val	= 0xd500401f | PSTATE_SSBS,
516 	.fn		= ssbs_emulation_handler,
517 };
518 
519 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
520 {
521 	static bool undef_hook_registered = false;
522 	static DEFINE_RAW_SPINLOCK(hook_lock);
523 	enum mitigation_state state;
524 
525 	/*
526 	 * If the system is mitigated but this CPU doesn't have SSBS, then
527 	 * we must be on the safelist and there's nothing more to do.
528 	 */
529 	state = spectre_v4_get_cpu_hw_mitigation_state();
530 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
531 		return state;
532 
533 	raw_spin_lock(&hook_lock);
534 	if (!undef_hook_registered) {
535 		register_undef_hook(&ssbs_emulation_hook);
536 		undef_hook_registered = true;
537 	}
538 	raw_spin_unlock(&hook_lock);
539 
540 	if (spectre_v4_mitigations_off()) {
541 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
542 		asm volatile(SET_PSTATE_SSBS(1));
543 		return SPECTRE_VULNERABLE;
544 	}
545 
546 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
547 	asm volatile(SET_PSTATE_SSBS(0));
548 	return SPECTRE_MITIGATED;
549 }
550 
551 /*
552  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
553  * we fallthrough and check whether firmware needs to be called on this CPU.
554  */
555 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
556 						  __le32 *origptr,
557 						  __le32 *updptr, int nr_inst)
558 {
559 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
560 
561 	if (spectre_v4_mitigations_off())
562 		return;
563 
564 	if (cpus_have_final_cap(ARM64_SSBS))
565 		return;
566 
567 	if (spectre_v4_mitigations_dynamic())
568 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
569 }
570 
571 /*
572  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
573  * to call into firmware to adjust the mitigation state.
574  */
575 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
576 						   __le32 *origptr,
577 						   __le32 *updptr, int nr_inst)
578 {
579 	u32 insn;
580 
581 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
582 
583 	switch (arm_smccc_1_1_get_conduit()) {
584 	case SMCCC_CONDUIT_HVC:
585 		insn = aarch64_insn_get_hvc_value();
586 		break;
587 	case SMCCC_CONDUIT_SMC:
588 		insn = aarch64_insn_get_smc_value();
589 		break;
590 	default:
591 		return;
592 	}
593 
594 	*updptr = cpu_to_le32(insn);
595 }
596 
597 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
598 {
599 	enum mitigation_state state;
600 
601 	state = spectre_v4_get_cpu_fw_mitigation_state();
602 	if (state != SPECTRE_MITIGATED)
603 		return state;
604 
605 	if (spectre_v4_mitigations_off()) {
606 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
607 		return SPECTRE_VULNERABLE;
608 	}
609 
610 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
611 
612 	if (spectre_v4_mitigations_dynamic())
613 		__this_cpu_write(arm64_ssbd_callback_required, 1);
614 
615 	return SPECTRE_MITIGATED;
616 }
617 
618 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
619 {
620 	enum mitigation_state state;
621 
622 	WARN_ON(preemptible());
623 
624 	state = spectre_v4_enable_hw_mitigation();
625 	if (state == SPECTRE_VULNERABLE)
626 		state = spectre_v4_enable_fw_mitigation();
627 
628 	update_mitigation_state(&spectre_v4_state, state);
629 }
630 
631 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
632 {
633 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
634 
635 	if (state)
636 		regs->pstate |= bit;
637 	else
638 		regs->pstate &= ~bit;
639 }
640 
641 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
642 {
643 	struct pt_regs *regs = task_pt_regs(tsk);
644 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
645 
646 	if (spectre_v4_mitigations_off())
647 		ssbs = true;
648 	else if (spectre_v4_mitigations_dynamic() && !kthread)
649 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
650 
651 	__update_pstate_ssbs(regs, ssbs);
652 }
653 
654 /*
655  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
656  * This is interesting because the "speculation disabled" behaviour can be
657  * configured so that it is preserved across exec(), which means that the
658  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
659  * from userspace.
660  */
661 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
662 {
663 	task_clear_spec_ssb_noexec(task);
664 	task_set_spec_ssb_disable(task);
665 	set_tsk_thread_flag(task, TIF_SSBD);
666 }
667 
668 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
669 {
670 	task_clear_spec_ssb_noexec(task);
671 	task_clear_spec_ssb_disable(task);
672 	clear_tsk_thread_flag(task, TIF_SSBD);
673 }
674 
675 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
676 {
677 	switch (ctrl) {
678 	case PR_SPEC_ENABLE:
679 		/* Enable speculation: disable mitigation */
680 		/*
681 		 * Force disabled speculation prevents it from being
682 		 * re-enabled.
683 		 */
684 		if (task_spec_ssb_force_disable(task))
685 			return -EPERM;
686 
687 		/*
688 		 * If the mitigation is forced on, then speculation is forced
689 		 * off and we again prevent it from being re-enabled.
690 		 */
691 		if (spectre_v4_mitigations_on())
692 			return -EPERM;
693 
694 		ssbd_prctl_disable_mitigation(task);
695 		break;
696 	case PR_SPEC_FORCE_DISABLE:
697 		/* Force disable speculation: force enable mitigation */
698 		/*
699 		 * If the mitigation is forced off, then speculation is forced
700 		 * on and we prevent it from being disabled.
701 		 */
702 		if (spectre_v4_mitigations_off())
703 			return -EPERM;
704 
705 		task_set_spec_ssb_force_disable(task);
706 		fallthrough;
707 	case PR_SPEC_DISABLE:
708 		/* Disable speculation: enable mitigation */
709 		/* Same as PR_SPEC_FORCE_DISABLE */
710 		if (spectre_v4_mitigations_off())
711 			return -EPERM;
712 
713 		ssbd_prctl_enable_mitigation(task);
714 		break;
715 	case PR_SPEC_DISABLE_NOEXEC:
716 		/* Disable speculation until execve(): enable mitigation */
717 		/*
718 		 * If the mitigation state is forced one way or the other, then
719 		 * we must fail now before we try to toggle it on execve().
720 		 */
721 		if (task_spec_ssb_force_disable(task) ||
722 		    spectre_v4_mitigations_off() ||
723 		    spectre_v4_mitigations_on()) {
724 			return -EPERM;
725 		}
726 
727 		ssbd_prctl_enable_mitigation(task);
728 		task_set_spec_ssb_noexec(task);
729 		break;
730 	default:
731 		return -ERANGE;
732 	}
733 
734 	spectre_v4_enable_task_mitigation(task);
735 	return 0;
736 }
737 
738 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
739 			     unsigned long ctrl)
740 {
741 	switch (which) {
742 	case PR_SPEC_STORE_BYPASS:
743 		return ssbd_prctl_set(task, ctrl);
744 	default:
745 		return -ENODEV;
746 	}
747 }
748 
749 static int ssbd_prctl_get(struct task_struct *task)
750 {
751 	switch (spectre_v4_state) {
752 	case SPECTRE_UNAFFECTED:
753 		return PR_SPEC_NOT_AFFECTED;
754 	case SPECTRE_MITIGATED:
755 		if (spectre_v4_mitigations_on())
756 			return PR_SPEC_NOT_AFFECTED;
757 
758 		if (spectre_v4_mitigations_dynamic())
759 			break;
760 
761 		/* Mitigations are disabled, so we're vulnerable. */
762 		fallthrough;
763 	case SPECTRE_VULNERABLE:
764 		fallthrough;
765 	default:
766 		return PR_SPEC_ENABLE;
767 	}
768 
769 	/* Check the mitigation state for this task */
770 	if (task_spec_ssb_force_disable(task))
771 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
772 
773 	if (task_spec_ssb_noexec(task))
774 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
775 
776 	if (task_spec_ssb_disable(task))
777 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
778 
779 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
780 }
781 
782 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
783 {
784 	switch (which) {
785 	case PR_SPEC_STORE_BYPASS:
786 		return ssbd_prctl_get(task);
787 	default:
788 		return -ENODEV;
789 	}
790 }
791