1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4 * detailed at:
5 *
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7 *
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
11 *
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
14 *
15 * "If there's something strange in your neighbourhood, who you gonna call?"
16 *
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18 */
19
20 #include <linux/arm-smccc.h>
21 #include <linux/bpf.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/nospec.h>
25 #include <linux/prctl.h>
26 #include <linux/sched/task_stack.h>
27
28 #include <asm/debug-monitors.h>
29 #include <asm/insn.h>
30 #include <asm/spectre.h>
31 #include <asm/traps.h>
32 #include <asm/vectors.h>
33 #include <asm/virt.h>
34
35 /*
36 * We try to ensure that the mitigation state can never change as the result of
37 * onlining a late CPU.
38 */
update_mitigation_state(enum mitigation_state * oldp,enum mitigation_state new)39 static void update_mitigation_state(enum mitigation_state *oldp,
40 enum mitigation_state new)
41 {
42 enum mitigation_state state;
43
44 do {
45 state = READ_ONCE(*oldp);
46 if (new <= state)
47 break;
48
49 /* Userspace almost certainly can't deal with this. */
50 if (WARN_ON(system_capabilities_finalized()))
51 break;
52 } while (cmpxchg_relaxed(oldp, state, new) != state);
53 }
54
55 /*
56 * Spectre v1.
57 *
58 * The kernel can't protect userspace for this one: it's each person for
59 * themselves. Advertise what we're doing and be done with it.
60 */
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)61 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
62 char *buf)
63 {
64 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
65 }
66
67 /*
68 * Spectre v2.
69 *
70 * This one sucks. A CPU is either:
71 *
72 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
73 * - Mitigated in hardware and listed in our "safe list".
74 * - Mitigated in software by firmware.
75 * - Mitigated in software by a CPU-specific dance in the kernel and a
76 * firmware call at EL2.
77 * - Vulnerable.
78 *
79 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
80 * different camps.
81 */
82 static enum mitigation_state spectre_v2_state;
83
84 static bool __read_mostly __nospectre_v2;
parse_spectre_v2_param(char * str)85 static int __init parse_spectre_v2_param(char *str)
86 {
87 __nospectre_v2 = true;
88 return 0;
89 }
90 early_param("nospectre_v2", parse_spectre_v2_param);
91
spectre_v2_mitigations_off(void)92 static bool spectre_v2_mitigations_off(void)
93 {
94 bool ret = __nospectre_v2 || cpu_mitigations_off();
95
96 if (ret)
97 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
98
99 return ret;
100 }
101
get_bhb_affected_string(enum mitigation_state bhb_state)102 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
103 {
104 switch (bhb_state) {
105 case SPECTRE_UNAFFECTED:
106 return "";
107 default:
108 case SPECTRE_VULNERABLE:
109 return ", but not BHB";
110 case SPECTRE_MITIGATED:
111 return ", BHB";
112 }
113 }
114
_unprivileged_ebpf_enabled(void)115 static bool _unprivileged_ebpf_enabled(void)
116 {
117 #ifdef CONFIG_BPF_SYSCALL
118 return !sysctl_unprivileged_bpf_disabled;
119 #else
120 return false;
121 #endif
122 }
123
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)124 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
125 char *buf)
126 {
127 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
128 const char *bhb_str = get_bhb_affected_string(bhb_state);
129 const char *v2_str = "Branch predictor hardening";
130
131 switch (spectre_v2_state) {
132 case SPECTRE_UNAFFECTED:
133 if (bhb_state == SPECTRE_UNAFFECTED)
134 return sprintf(buf, "Not affected\n");
135
136 /*
137 * Platforms affected by Spectre-BHB can't report
138 * "Not affected" for Spectre-v2.
139 */
140 v2_str = "CSV2";
141 fallthrough;
142 case SPECTRE_MITIGATED:
143 if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
144 return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
145
146 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
147 case SPECTRE_VULNERABLE:
148 fallthrough;
149 default:
150 return sprintf(buf, "Vulnerable\n");
151 }
152 }
153
spectre_v2_get_cpu_hw_mitigation_state(void)154 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
155 {
156 u64 pfr0;
157 static const struct midr_range spectre_v2_safe_list[] = {
158 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
159 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
160 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
161 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
162 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
163 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
164 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
165 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
166 { /* sentinel */ }
167 };
168
169 /* If the CPU has CSV2 set, we're safe */
170 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
171 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
172 return SPECTRE_UNAFFECTED;
173
174 /* Alternatively, we have a list of unaffected CPUs */
175 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
176 return SPECTRE_UNAFFECTED;
177
178 return SPECTRE_VULNERABLE;
179 }
180
spectre_v2_get_cpu_fw_mitigation_state(void)181 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
182 {
183 int ret;
184 struct arm_smccc_res res;
185
186 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
187 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
188
189 ret = res.a0;
190 switch (ret) {
191 case SMCCC_RET_SUCCESS:
192 return SPECTRE_MITIGATED;
193 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
194 return SPECTRE_UNAFFECTED;
195 default:
196 fallthrough;
197 case SMCCC_RET_NOT_SUPPORTED:
198 return SPECTRE_VULNERABLE;
199 }
200 }
201
has_spectre_v2(const struct arm64_cpu_capabilities * entry,int scope)202 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
203 {
204 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
205
206 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
207 return false;
208
209 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
210 return false;
211
212 return true;
213 }
214
arm64_get_spectre_v2_state(void)215 enum mitigation_state arm64_get_spectre_v2_state(void)
216 {
217 return spectre_v2_state;
218 }
219
220 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
221
install_bp_hardening_cb(bp_hardening_cb_t fn)222 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
223 {
224 __this_cpu_write(bp_hardening_data.fn, fn);
225
226 /*
227 * Vinz Clortho takes the hyp_vecs start/end "keys" at
228 * the door when we're a guest. Skip the hyp-vectors work.
229 */
230 if (!is_hyp_mode_available())
231 return;
232
233 __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
234 }
235
236 /* Called during entry so must be noinstr */
call_smc_arch_workaround_1(void)237 static noinstr void call_smc_arch_workaround_1(void)
238 {
239 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
240 }
241
242 /* Called during entry so must be noinstr */
call_hvc_arch_workaround_1(void)243 static noinstr void call_hvc_arch_workaround_1(void)
244 {
245 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
246 }
247
248 /* Called during entry so must be noinstr */
qcom_link_stack_sanitisation(void)249 static noinstr void qcom_link_stack_sanitisation(void)
250 {
251 u64 tmp;
252
253 asm volatile("mov %0, x30 \n"
254 ".rept 16 \n"
255 "bl . + 4 \n"
256 ".endr \n"
257 "mov x30, %0 \n"
258 : "=&r" (tmp));
259 }
260
spectre_v2_get_sw_mitigation_cb(void)261 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
262 {
263 u32 midr = read_cpuid_id();
264 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
265 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
266 return NULL;
267
268 return qcom_link_stack_sanitisation;
269 }
270
spectre_v2_enable_fw_mitigation(void)271 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
272 {
273 bp_hardening_cb_t cb;
274 enum mitigation_state state;
275
276 state = spectre_v2_get_cpu_fw_mitigation_state();
277 if (state != SPECTRE_MITIGATED)
278 return state;
279
280 if (spectre_v2_mitigations_off())
281 return SPECTRE_VULNERABLE;
282
283 switch (arm_smccc_1_1_get_conduit()) {
284 case SMCCC_CONDUIT_HVC:
285 cb = call_hvc_arch_workaround_1;
286 break;
287
288 case SMCCC_CONDUIT_SMC:
289 cb = call_smc_arch_workaround_1;
290 break;
291
292 default:
293 return SPECTRE_VULNERABLE;
294 }
295
296 /*
297 * Prefer a CPU-specific workaround if it exists. Note that we
298 * still rely on firmware for the mitigation at EL2.
299 */
300 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
301 install_bp_hardening_cb(cb);
302 return SPECTRE_MITIGATED;
303 }
304
spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities * __unused)305 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
306 {
307 enum mitigation_state state;
308
309 WARN_ON(preemptible());
310
311 state = spectre_v2_get_cpu_hw_mitigation_state();
312 if (state == SPECTRE_VULNERABLE)
313 state = spectre_v2_enable_fw_mitigation();
314
315 update_mitigation_state(&spectre_v2_state, state);
316 }
317
318 /*
319 * Spectre-v3a.
320 *
321 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
322 * an indirect trampoline for the hyp vectors so that guests can't read
323 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
324 */
has_spectre_v3a(const struct arm64_cpu_capabilities * entry,int scope)325 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
326 {
327 static const struct midr_range spectre_v3a_unsafe_list[] = {
328 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
330 {},
331 };
332
333 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
334 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
335 }
336
spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities * __unused)337 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
338 {
339 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
340
341 if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
342 data->slot += HYP_VECTOR_INDIRECT;
343 }
344
345 /*
346 * Spectre v4.
347 *
348 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
349 * either:
350 *
351 * - Mitigated in hardware and listed in our "safe list".
352 * - Mitigated in hardware via PSTATE.SSBS.
353 * - Mitigated in software by firmware (sometimes referred to as SSBD).
354 *
355 * Wait, that doesn't sound so bad, does it? Keep reading...
356 *
357 * A major source of headaches is that the software mitigation is enabled both
358 * on a per-task basis, but can also be forced on for the kernel, necessitating
359 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
360 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
361 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
362 * so you can have systems that have both firmware and SSBS mitigations. This
363 * means we actually have to reject late onlining of CPUs with mitigations if
364 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
365 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
366 *
367 * The only good part is that if the firmware mitigation is present, then it is
368 * present for all CPUs, meaning we don't have to worry about late onlining of a
369 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
370 *
371 * Give me a VAX-11/780 any day of the week...
372 */
373 static enum mitigation_state spectre_v4_state;
374
375 /* This is the per-cpu state tracking whether we need to talk to firmware */
376 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
377
378 enum spectre_v4_policy {
379 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
380 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
381 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
382 };
383
384 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
385
386 static const struct spectre_v4_param {
387 const char *str;
388 enum spectre_v4_policy policy;
389 } spectre_v4_params[] = {
390 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
391 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
392 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
393 };
parse_spectre_v4_param(char * str)394 static int __init parse_spectre_v4_param(char *str)
395 {
396 int i;
397
398 if (!str || !str[0])
399 return -EINVAL;
400
401 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
402 const struct spectre_v4_param *param = &spectre_v4_params[i];
403
404 if (strncmp(str, param->str, strlen(param->str)))
405 continue;
406
407 __spectre_v4_policy = param->policy;
408 return 0;
409 }
410
411 return -EINVAL;
412 }
413 early_param("ssbd", parse_spectre_v4_param);
414
415 /*
416 * Because this was all written in a rush by people working in different silos,
417 * we've ended up with multiple command line options to control the same thing.
418 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
419 * with contradictory parameters. The mitigation is always either "off",
420 * "dynamic" or "on".
421 */
spectre_v4_mitigations_off(void)422 static bool spectre_v4_mitigations_off(void)
423 {
424 bool ret = cpu_mitigations_off() ||
425 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
426
427 if (ret)
428 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
429
430 return ret;
431 }
432
433 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
spectre_v4_mitigations_dynamic(void)434 static bool spectre_v4_mitigations_dynamic(void)
435 {
436 return !spectre_v4_mitigations_off() &&
437 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
438 }
439
spectre_v4_mitigations_on(void)440 static bool spectre_v4_mitigations_on(void)
441 {
442 return !spectre_v4_mitigations_off() &&
443 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
444 }
445
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)446 ssize_t cpu_show_spec_store_bypass(struct device *dev,
447 struct device_attribute *attr, char *buf)
448 {
449 switch (spectre_v4_state) {
450 case SPECTRE_UNAFFECTED:
451 return sprintf(buf, "Not affected\n");
452 case SPECTRE_MITIGATED:
453 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
454 case SPECTRE_VULNERABLE:
455 fallthrough;
456 default:
457 return sprintf(buf, "Vulnerable\n");
458 }
459 }
460
arm64_get_spectre_v4_state(void)461 enum mitigation_state arm64_get_spectre_v4_state(void)
462 {
463 return spectre_v4_state;
464 }
465
spectre_v4_get_cpu_hw_mitigation_state(void)466 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
467 {
468 static const struct midr_range spectre_v4_safe_list[] = {
469 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
471 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
472 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
473 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
474 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
475 { /* sentinel */ },
476 };
477
478 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
479 return SPECTRE_UNAFFECTED;
480
481 /* CPU features are detected first */
482 if (this_cpu_has_cap(ARM64_SSBS))
483 return SPECTRE_MITIGATED;
484
485 return SPECTRE_VULNERABLE;
486 }
487
spectre_v4_get_cpu_fw_mitigation_state(void)488 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
489 {
490 int ret;
491 struct arm_smccc_res res;
492
493 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
494 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
495
496 ret = res.a0;
497 switch (ret) {
498 case SMCCC_RET_SUCCESS:
499 return SPECTRE_MITIGATED;
500 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
501 fallthrough;
502 case SMCCC_RET_NOT_REQUIRED:
503 return SPECTRE_UNAFFECTED;
504 default:
505 fallthrough;
506 case SMCCC_RET_NOT_SUPPORTED:
507 return SPECTRE_VULNERABLE;
508 }
509 }
510
has_spectre_v4(const struct arm64_cpu_capabilities * cap,int scope)511 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
512 {
513 enum mitigation_state state;
514
515 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
516
517 state = spectre_v4_get_cpu_hw_mitigation_state();
518 if (state == SPECTRE_VULNERABLE)
519 state = spectre_v4_get_cpu_fw_mitigation_state();
520
521 return state != SPECTRE_UNAFFECTED;
522 }
523
try_emulate_el1_ssbs(struct pt_regs * regs,u32 instr)524 bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
525 {
526 const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
527 const u32 instr_val = 0xd500401f | PSTATE_SSBS;
528
529 if ((instr & instr_mask) != instr_val)
530 return false;
531
532 if (instr & BIT(PSTATE_Imm_shift))
533 regs->pstate |= PSR_SSBS_BIT;
534 else
535 regs->pstate &= ~PSR_SSBS_BIT;
536
537 arm64_skip_faulting_instruction(regs, 4);
538 return true;
539 }
540
spectre_v4_enable_hw_mitigation(void)541 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
542 {
543 enum mitigation_state state;
544
545 /*
546 * If the system is mitigated but this CPU doesn't have SSBS, then
547 * we must be on the safelist and there's nothing more to do.
548 */
549 state = spectre_v4_get_cpu_hw_mitigation_state();
550 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
551 return state;
552
553 if (spectre_v4_mitigations_off()) {
554 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
555 set_pstate_ssbs(1);
556 return SPECTRE_VULNERABLE;
557 }
558
559 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
560 set_pstate_ssbs(0);
561
562 /*
563 * SSBS is self-synchronizing and is intended to affect subsequent
564 * speculative instructions, but some CPUs can speculate with a stale
565 * value of SSBS.
566 *
567 * Mitigate this with an unconditional speculation barrier, as CPUs
568 * could mis-speculate branches and bypass a conditional barrier.
569 */
570 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
571 spec_bar();
572
573 return SPECTRE_MITIGATED;
574 }
575
576 /*
577 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
578 * we fallthrough and check whether firmware needs to be called on this CPU.
579 */
spectre_v4_patch_fw_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)580 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
581 __le32 *origptr,
582 __le32 *updptr, int nr_inst)
583 {
584 BUG_ON(nr_inst != 1); /* Branch -> NOP */
585
586 if (spectre_v4_mitigations_off())
587 return;
588
589 if (cpus_have_cap(ARM64_SSBS))
590 return;
591
592 if (spectre_v4_mitigations_dynamic())
593 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
594 }
595
596 /*
597 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
598 * to call into firmware to adjust the mitigation state.
599 */
smccc_patch_fw_mitigation_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)600 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
601 __le32 *origptr,
602 __le32 *updptr, int nr_inst)
603 {
604 u32 insn;
605
606 BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
607
608 switch (arm_smccc_1_1_get_conduit()) {
609 case SMCCC_CONDUIT_HVC:
610 insn = aarch64_insn_get_hvc_value();
611 break;
612 case SMCCC_CONDUIT_SMC:
613 insn = aarch64_insn_get_smc_value();
614 break;
615 default:
616 return;
617 }
618
619 *updptr = cpu_to_le32(insn);
620 }
621
spectre_v4_enable_fw_mitigation(void)622 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
623 {
624 enum mitigation_state state;
625
626 state = spectre_v4_get_cpu_fw_mitigation_state();
627 if (state != SPECTRE_MITIGATED)
628 return state;
629
630 if (spectre_v4_mitigations_off()) {
631 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
632 return SPECTRE_VULNERABLE;
633 }
634
635 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
636
637 if (spectre_v4_mitigations_dynamic())
638 __this_cpu_write(arm64_ssbd_callback_required, 1);
639
640 return SPECTRE_MITIGATED;
641 }
642
spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities * __unused)643 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
644 {
645 enum mitigation_state state;
646
647 WARN_ON(preemptible());
648
649 state = spectre_v4_enable_hw_mitigation();
650 if (state == SPECTRE_VULNERABLE)
651 state = spectre_v4_enable_fw_mitigation();
652
653 update_mitigation_state(&spectre_v4_state, state);
654 }
655
__update_pstate_ssbs(struct pt_regs * regs,bool state)656 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
657 {
658 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
659
660 if (state)
661 regs->pstate |= bit;
662 else
663 regs->pstate &= ~bit;
664 }
665
spectre_v4_enable_task_mitigation(struct task_struct * tsk)666 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
667 {
668 struct pt_regs *regs = task_pt_regs(tsk);
669 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
670
671 if (spectre_v4_mitigations_off())
672 ssbs = true;
673 else if (spectre_v4_mitigations_dynamic() && !kthread)
674 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
675
676 __update_pstate_ssbs(regs, ssbs);
677 }
678
679 /*
680 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
681 * This is interesting because the "speculation disabled" behaviour can be
682 * configured so that it is preserved across exec(), which means that the
683 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
684 * from userspace.
685 */
ssbd_prctl_enable_mitigation(struct task_struct * task)686 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
687 {
688 task_clear_spec_ssb_noexec(task);
689 task_set_spec_ssb_disable(task);
690 set_tsk_thread_flag(task, TIF_SSBD);
691 }
692
ssbd_prctl_disable_mitigation(struct task_struct * task)693 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
694 {
695 task_clear_spec_ssb_noexec(task);
696 task_clear_spec_ssb_disable(task);
697 clear_tsk_thread_flag(task, TIF_SSBD);
698 }
699
ssbd_prctl_set(struct task_struct * task,unsigned long ctrl)700 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
701 {
702 switch (ctrl) {
703 case PR_SPEC_ENABLE:
704 /* Enable speculation: disable mitigation */
705 /*
706 * Force disabled speculation prevents it from being
707 * re-enabled.
708 */
709 if (task_spec_ssb_force_disable(task))
710 return -EPERM;
711
712 /*
713 * If the mitigation is forced on, then speculation is forced
714 * off and we again prevent it from being re-enabled.
715 */
716 if (spectre_v4_mitigations_on())
717 return -EPERM;
718
719 ssbd_prctl_disable_mitigation(task);
720 break;
721 case PR_SPEC_FORCE_DISABLE:
722 /* Force disable speculation: force enable mitigation */
723 /*
724 * If the mitigation is forced off, then speculation is forced
725 * on and we prevent it from being disabled.
726 */
727 if (spectre_v4_mitigations_off())
728 return -EPERM;
729
730 task_set_spec_ssb_force_disable(task);
731 fallthrough;
732 case PR_SPEC_DISABLE:
733 /* Disable speculation: enable mitigation */
734 /* Same as PR_SPEC_FORCE_DISABLE */
735 if (spectre_v4_mitigations_off())
736 return -EPERM;
737
738 ssbd_prctl_enable_mitigation(task);
739 break;
740 case PR_SPEC_DISABLE_NOEXEC:
741 /* Disable speculation until execve(): enable mitigation */
742 /*
743 * If the mitigation state is forced one way or the other, then
744 * we must fail now before we try to toggle it on execve().
745 */
746 if (task_spec_ssb_force_disable(task) ||
747 spectre_v4_mitigations_off() ||
748 spectre_v4_mitigations_on()) {
749 return -EPERM;
750 }
751
752 ssbd_prctl_enable_mitigation(task);
753 task_set_spec_ssb_noexec(task);
754 break;
755 default:
756 return -ERANGE;
757 }
758
759 spectre_v4_enable_task_mitigation(task);
760 return 0;
761 }
762
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)763 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
764 unsigned long ctrl)
765 {
766 switch (which) {
767 case PR_SPEC_STORE_BYPASS:
768 return ssbd_prctl_set(task, ctrl);
769 default:
770 return -ENODEV;
771 }
772 }
773
ssbd_prctl_get(struct task_struct * task)774 static int ssbd_prctl_get(struct task_struct *task)
775 {
776 switch (spectre_v4_state) {
777 case SPECTRE_UNAFFECTED:
778 return PR_SPEC_NOT_AFFECTED;
779 case SPECTRE_MITIGATED:
780 if (spectre_v4_mitigations_on())
781 return PR_SPEC_NOT_AFFECTED;
782
783 if (spectre_v4_mitigations_dynamic())
784 break;
785
786 /* Mitigations are disabled, so we're vulnerable. */
787 fallthrough;
788 case SPECTRE_VULNERABLE:
789 fallthrough;
790 default:
791 return PR_SPEC_ENABLE;
792 }
793
794 /* Check the mitigation state for this task */
795 if (task_spec_ssb_force_disable(task))
796 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
797
798 if (task_spec_ssb_noexec(task))
799 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
800
801 if (task_spec_ssb_disable(task))
802 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
803
804 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
805 }
806
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)807 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
808 {
809 switch (which) {
810 case PR_SPEC_STORE_BYPASS:
811 return ssbd_prctl_get(task);
812 default:
813 return -ENODEV;
814 }
815 }
816
817 /*
818 * Spectre BHB.
819 *
820 * A CPU is either:
821 * - Mitigated by a branchy loop a CPU specific number of times, and listed
822 * in our "loop mitigated list".
823 * - Mitigated in software by the firmware Spectre v2 call.
824 * - Has the ClearBHB instruction to perform the mitigation.
825 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
826 * software mitigation in the vectors is needed.
827 * - Has CSV2.3, so is unaffected.
828 */
829 static enum mitigation_state spectre_bhb_state;
830
arm64_get_spectre_bhb_state(void)831 enum mitigation_state arm64_get_spectre_bhb_state(void)
832 {
833 return spectre_bhb_state;
834 }
835
836 enum bhb_mitigation_bits {
837 BHB_LOOP,
838 BHB_FW,
839 BHB_HW,
840 BHB_INSN,
841 };
842 static unsigned long system_bhb_mitigations;
843
844 /*
845 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
846 * SCOPE_SYSTEM call will give the right answer.
847 */
spectre_bhb_loop_affected(int scope)848 u8 spectre_bhb_loop_affected(int scope)
849 {
850 u8 k = 0;
851 static u8 max_bhb_k;
852
853 if (scope == SCOPE_LOCAL_CPU) {
854 static const struct midr_range spectre_bhb_k32_list[] = {
855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
856 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
857 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
858 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
859 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
860 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
861 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
862 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
863 {},
864 };
865 static const struct midr_range spectre_bhb_k24_list[] = {
866 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
867 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
868 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
869 {},
870 };
871 static const struct midr_range spectre_bhb_k11_list[] = {
872 MIDR_ALL_VERSIONS(MIDR_AMPERE1),
873 {},
874 };
875 static const struct midr_range spectre_bhb_k8_list[] = {
876 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
877 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
878 {},
879 };
880
881 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
882 k = 32;
883 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
884 k = 24;
885 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
886 k = 11;
887 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
888 k = 8;
889
890 max_bhb_k = max(max_bhb_k, k);
891 } else {
892 k = max_bhb_k;
893 }
894
895 return k;
896 }
897
spectre_bhb_get_cpu_fw_mitigation_state(void)898 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
899 {
900 int ret;
901 struct arm_smccc_res res;
902
903 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
904 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
905
906 ret = res.a0;
907 switch (ret) {
908 case SMCCC_RET_SUCCESS:
909 return SPECTRE_MITIGATED;
910 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
911 return SPECTRE_UNAFFECTED;
912 default:
913 fallthrough;
914 case SMCCC_RET_NOT_SUPPORTED:
915 return SPECTRE_VULNERABLE;
916 }
917 }
918
is_spectre_bhb_fw_affected(int scope)919 static bool is_spectre_bhb_fw_affected(int scope)
920 {
921 static bool system_affected;
922 enum mitigation_state fw_state;
923 bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
924 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
925 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
926 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
927 {},
928 };
929 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
930 spectre_bhb_firmware_mitigated_list);
931
932 if (scope != SCOPE_LOCAL_CPU)
933 return system_affected;
934
935 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
936 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
937 system_affected = true;
938 return true;
939 }
940
941 return false;
942 }
943
supports_ecbhb(int scope)944 static bool supports_ecbhb(int scope)
945 {
946 u64 mmfr1;
947
948 if (scope == SCOPE_LOCAL_CPU)
949 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
950 else
951 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
952
953 return cpuid_feature_extract_unsigned_field(mmfr1,
954 ID_AA64MMFR1_EL1_ECBHB_SHIFT);
955 }
956
is_spectre_bhb_affected(const struct arm64_cpu_capabilities * entry,int scope)957 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
958 int scope)
959 {
960 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
961
962 if (supports_csv2p3(scope))
963 return false;
964
965 if (supports_clearbhb(scope))
966 return true;
967
968 if (spectre_bhb_loop_affected(scope))
969 return true;
970
971 if (is_spectre_bhb_fw_affected(scope))
972 return true;
973
974 return false;
975 }
976
this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)977 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
978 {
979 const char *v = arm64_get_bp_hardening_vector(slot);
980
981 __this_cpu_write(this_cpu_vector, v);
982
983 /*
984 * When KPTI is in use, the vectors are switched when exiting to
985 * user-space.
986 */
987 if (arm64_kernel_unmapped_at_el0())
988 return;
989
990 write_sysreg(v, vbar_el1);
991 isb();
992 }
993
994 static bool __read_mostly __nospectre_bhb;
parse_spectre_bhb_param(char * str)995 static int __init parse_spectre_bhb_param(char *str)
996 {
997 __nospectre_bhb = true;
998 return 0;
999 }
1000 early_param("nospectre_bhb", parse_spectre_bhb_param);
1001
spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities * entry)1002 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1003 {
1004 bp_hardening_cb_t cpu_cb;
1005 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1006 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
1007
1008 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1009 return;
1010
1011 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1012 /* No point mitigating Spectre-BHB alone. */
1013 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1014 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1015 } else if (cpu_mitigations_off() || __nospectre_bhb) {
1016 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1017 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1018 state = SPECTRE_MITIGATED;
1019 set_bit(BHB_HW, &system_bhb_mitigations);
1020 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1021 /*
1022 * Ensure KVM uses the indirect vector which will have ClearBHB
1023 * added.
1024 */
1025 if (!data->slot)
1026 data->slot = HYP_VECTOR_INDIRECT;
1027
1028 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1029 state = SPECTRE_MITIGATED;
1030 set_bit(BHB_INSN, &system_bhb_mitigations);
1031 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1032 /*
1033 * Ensure KVM uses the indirect vector which will have the
1034 * branchy-loop added. A57/A72-r0 will already have selected
1035 * the spectre-indirect vector, which is sufficient for BHB
1036 * too.
1037 */
1038 if (!data->slot)
1039 data->slot = HYP_VECTOR_INDIRECT;
1040
1041 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1042 state = SPECTRE_MITIGATED;
1043 set_bit(BHB_LOOP, &system_bhb_mitigations);
1044 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1045 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1046 if (fw_state == SPECTRE_MITIGATED) {
1047 /*
1048 * Ensure KVM uses one of the spectre bp_hardening
1049 * vectors. The indirect vector doesn't include the EL3
1050 * call, so needs upgrading to
1051 * HYP_VECTOR_SPECTRE_INDIRECT.
1052 */
1053 if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1054 data->slot += 1;
1055
1056 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1057
1058 /*
1059 * The WA3 call in the vectors supersedes the WA1 call
1060 * made during context-switch. Uninstall any firmware
1061 * bp_hardening callback.
1062 */
1063 cpu_cb = spectre_v2_get_sw_mitigation_cb();
1064 if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1065 __this_cpu_write(bp_hardening_data.fn, NULL);
1066
1067 state = SPECTRE_MITIGATED;
1068 set_bit(BHB_FW, &system_bhb_mitigations);
1069 }
1070 }
1071
1072 update_mitigation_state(&spectre_bhb_state, state);
1073 }
1074
1075 /* Patched to NOP when enabled */
spectre_bhb_patch_loop_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1076 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1077 __le32 *origptr,
1078 __le32 *updptr, int nr_inst)
1079 {
1080 BUG_ON(nr_inst != 1);
1081
1082 if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1083 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1084 }
1085
1086 /* Patched to NOP when enabled */
spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1087 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1088 __le32 *origptr,
1089 __le32 *updptr, int nr_inst)
1090 {
1091 BUG_ON(nr_inst != 1);
1092
1093 if (test_bit(BHB_FW, &system_bhb_mitigations))
1094 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1095 }
1096
1097 /* Patched to correct the immediate */
spectre_bhb_patch_loop_iter(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1098 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1099 __le32 *origptr, __le32 *updptr, int nr_inst)
1100 {
1101 u8 rd;
1102 u32 insn;
1103 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1104
1105 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1106
1107 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1108 return;
1109
1110 insn = le32_to_cpu(*origptr);
1111 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1112 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1113 AARCH64_INSN_VARIANT_64BIT,
1114 AARCH64_INSN_MOVEWIDE_ZERO);
1115 *updptr++ = cpu_to_le32(insn);
1116 }
1117
1118 /* Patched to mov WA3 when supported */
spectre_bhb_patch_wa3(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1119 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1120 __le32 *origptr, __le32 *updptr, int nr_inst)
1121 {
1122 u8 rd;
1123 u32 insn;
1124
1125 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1126
1127 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1128 !test_bit(BHB_FW, &system_bhb_mitigations))
1129 return;
1130
1131 insn = le32_to_cpu(*origptr);
1132 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1133
1134 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1135 AARCH64_INSN_VARIANT_32BIT,
1136 AARCH64_INSN_REG_ZR, rd,
1137 ARM_SMCCC_ARCH_WORKAROUND_3);
1138 if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1139 return;
1140
1141 *updptr++ = cpu_to_le32(insn);
1142 }
1143
1144 /* Patched to NOP when not supported */
spectre_bhb_patch_clearbhb(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1145 void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1146 __le32 *origptr, __le32 *updptr, int nr_inst)
1147 {
1148 BUG_ON(nr_inst != 2);
1149
1150 if (test_bit(BHB_INSN, &system_bhb_mitigations))
1151 return;
1152
1153 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1154 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1155 }
1156
1157 #ifdef CONFIG_BPF_SYSCALL
1158 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
unpriv_ebpf_notify(int new_state)1159 void unpriv_ebpf_notify(int new_state)
1160 {
1161 if (spectre_v2_state == SPECTRE_VULNERABLE ||
1162 spectre_bhb_state != SPECTRE_MITIGATED)
1163 return;
1164
1165 if (!new_state)
1166 pr_err("WARNING: %s", EBPF_WARN);
1167 }
1168 #endif
1169