xref: /openbmc/linux/arch/x86/kernel/cpu/bugs.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/set_memory.h>
29 #include <asm/intel-family.h>
30 #include <asm/hypervisor.h>
31 
32 static void __init spectre_v2_select_mitigation(void);
33 static void __init ssb_select_mitigation(void);
34 
35 /*
36  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
37  * writes to SPEC_CTRL contain whatever reserved bits have been set.
38  */
39 u64 __ro_after_init x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41 
42 /*
43  * The vendor and possibly platform specific bits which can be modified in
44  * x86_spec_ctrl_base.
45  */
46 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
47 
48 /*
49  * AMD specific MSR info for Speculative Store Bypass control.
50  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51  */
52 u64 __ro_after_init x86_amd_ls_cfg_base;
53 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
54 
55 void __init check_bugs(void)
56 {
57 	identify_boot_cpu();
58 
59 	if (!IS_ENABLED(CONFIG_SMP)) {
60 		pr_info("CPU: ");
61 		print_cpu_info(&boot_cpu_data);
62 	}
63 
64 	/*
65 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
66 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
67 	 * init code as it is not enumerated and depends on the family.
68 	 */
69 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
70 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
71 
72 	/* Allow STIBP in MSR_SPEC_CTRL if supported */
73 	if (boot_cpu_has(X86_FEATURE_STIBP))
74 		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
75 
76 	/* Select the proper spectre mitigation before patching alternatives */
77 	spectre_v2_select_mitigation();
78 
79 	/*
80 	 * Select proper mitigation for any exposure to the Speculative Store
81 	 * Bypass vulnerability.
82 	 */
83 	ssb_select_mitigation();
84 
85 #ifdef CONFIG_X86_32
86 	/*
87 	 * Check whether we are able to run this kernel safely on SMP.
88 	 *
89 	 * - i386 is no longer supported.
90 	 * - In order to run on anything without a TSC, we need to be
91 	 *   compiled for a i486.
92 	 */
93 	if (boot_cpu_data.x86 < 4)
94 		panic("Kernel requires i486+ for 'invlpg' and other features");
95 
96 	init_utsname()->machine[1] =
97 		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
98 	alternative_instructions();
99 
100 	fpu__init_check_bugs();
101 #else /* CONFIG_X86_64 */
102 	alternative_instructions();
103 
104 	/*
105 	 * Make sure the first 2MB area is not mapped by huge pages
106 	 * There are typically fixed size MTRRs in there and overlapping
107 	 * MTRRs into large pages causes slow downs.
108 	 *
109 	 * Right now we don't do that with gbpages because there seems
110 	 * very little benefit for that case.
111 	 */
112 	if (!direct_gbpages)
113 		set_memory_4k((unsigned long)__va(0), 1);
114 #endif
115 }
116 
117 /* The kernel command line selection */
118 enum spectre_v2_mitigation_cmd {
119 	SPECTRE_V2_CMD_NONE,
120 	SPECTRE_V2_CMD_AUTO,
121 	SPECTRE_V2_CMD_FORCE,
122 	SPECTRE_V2_CMD_RETPOLINE,
123 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
124 	SPECTRE_V2_CMD_RETPOLINE_AMD,
125 };
126 
127 static const char *spectre_v2_strings[] = {
128 	[SPECTRE_V2_NONE]			= "Vulnerable",
129 	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
130 	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
131 	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
132 	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
133 };
134 
135 #undef pr_fmt
136 #define pr_fmt(fmt)     "Spectre V2 : " fmt
137 
138 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
139 	SPECTRE_V2_NONE;
140 
141 void
142 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
143 {
144 	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
145 	struct thread_info *ti = current_thread_info();
146 
147 	/* Is MSR_SPEC_CTRL implemented ? */
148 	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
149 		/*
150 		 * Restrict guest_spec_ctrl to supported values. Clear the
151 		 * modifiable bits in the host base value and or the
152 		 * modifiable bits from the guest value.
153 		 */
154 		guestval = hostval & ~x86_spec_ctrl_mask;
155 		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
156 
157 		/* SSBD controlled in MSR_SPEC_CTRL */
158 		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
159 			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
160 
161 		if (hostval != guestval) {
162 			msrval = setguest ? guestval : hostval;
163 			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
164 		}
165 	}
166 
167 	/*
168 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
169 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
170 	 */
171 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
172 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
173 		return;
174 
175 	/*
176 	 * If the host has SSBD mitigation enabled, force it in the host's
177 	 * virtual MSR value. If its not permanently enabled, evaluate
178 	 * current's TIF_SSBD thread flag.
179 	 */
180 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
181 		hostval = SPEC_CTRL_SSBD;
182 	else
183 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
184 
185 	/* Sanitize the guest value */
186 	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
187 
188 	if (hostval != guestval) {
189 		unsigned long tif;
190 
191 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
192 				 ssbd_spec_ctrl_to_tif(hostval);
193 
194 		speculative_store_bypass_update(tif);
195 	}
196 }
197 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
198 
199 static void x86_amd_ssb_disable(void)
200 {
201 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
202 
203 	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
204 		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
205 	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
206 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
207 }
208 
209 #ifdef RETPOLINE
210 static bool spectre_v2_bad_module;
211 
212 bool retpoline_module_ok(bool has_retpoline)
213 {
214 	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
215 		return true;
216 
217 	pr_err("System may be vulnerable to spectre v2\n");
218 	spectre_v2_bad_module = true;
219 	return false;
220 }
221 
222 static inline const char *spectre_v2_module_string(void)
223 {
224 	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
225 }
226 #else
227 static inline const char *spectre_v2_module_string(void) { return ""; }
228 #endif
229 
230 static void __init spec2_print_if_insecure(const char *reason)
231 {
232 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
233 		pr_info("%s selected on command line.\n", reason);
234 }
235 
236 static void __init spec2_print_if_secure(const char *reason)
237 {
238 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
239 		pr_info("%s selected on command line.\n", reason);
240 }
241 
242 static inline bool retp_compiler(void)
243 {
244 	return __is_defined(RETPOLINE);
245 }
246 
247 static inline bool match_option(const char *arg, int arglen, const char *opt)
248 {
249 	int len = strlen(opt);
250 
251 	return len == arglen && !strncmp(arg, opt, len);
252 }
253 
254 static const struct {
255 	const char *option;
256 	enum spectre_v2_mitigation_cmd cmd;
257 	bool secure;
258 } mitigation_options[] = {
259 	{ "off",               SPECTRE_V2_CMD_NONE,              false },
260 	{ "on",                SPECTRE_V2_CMD_FORCE,             true },
261 	{ "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
262 	{ "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
263 	{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
264 	{ "auto",              SPECTRE_V2_CMD_AUTO,              false },
265 };
266 
267 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
268 {
269 	char arg[20];
270 	int ret, i;
271 	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
272 
273 	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
274 		return SPECTRE_V2_CMD_NONE;
275 	else {
276 		ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
277 		if (ret < 0)
278 			return SPECTRE_V2_CMD_AUTO;
279 
280 		for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
281 			if (!match_option(arg, ret, mitigation_options[i].option))
282 				continue;
283 			cmd = mitigation_options[i].cmd;
284 			break;
285 		}
286 
287 		if (i >= ARRAY_SIZE(mitigation_options)) {
288 			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
289 			return SPECTRE_V2_CMD_AUTO;
290 		}
291 	}
292 
293 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
294 	     cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
295 	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
296 	    !IS_ENABLED(CONFIG_RETPOLINE)) {
297 		pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
298 		return SPECTRE_V2_CMD_AUTO;
299 	}
300 
301 	if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
302 	    boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
303 		pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
304 		return SPECTRE_V2_CMD_AUTO;
305 	}
306 
307 	if (mitigation_options[i].secure)
308 		spec2_print_if_secure(mitigation_options[i].option);
309 	else
310 		spec2_print_if_insecure(mitigation_options[i].option);
311 
312 	return cmd;
313 }
314 
315 /* Check for Skylake-like CPUs (for RSB handling) */
316 static bool __init is_skylake_era(void)
317 {
318 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
319 	    boot_cpu_data.x86 == 6) {
320 		switch (boot_cpu_data.x86_model) {
321 		case INTEL_FAM6_SKYLAKE_MOBILE:
322 		case INTEL_FAM6_SKYLAKE_DESKTOP:
323 		case INTEL_FAM6_SKYLAKE_X:
324 		case INTEL_FAM6_KABYLAKE_MOBILE:
325 		case INTEL_FAM6_KABYLAKE_DESKTOP:
326 			return true;
327 		}
328 	}
329 	return false;
330 }
331 
332 static void __init spectre_v2_select_mitigation(void)
333 {
334 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
335 	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
336 
337 	/*
338 	 * If the CPU is not affected and the command line mode is NONE or AUTO
339 	 * then nothing to do.
340 	 */
341 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
342 	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
343 		return;
344 
345 	switch (cmd) {
346 	case SPECTRE_V2_CMD_NONE:
347 		return;
348 
349 	case SPECTRE_V2_CMD_FORCE:
350 	case SPECTRE_V2_CMD_AUTO:
351 		if (IS_ENABLED(CONFIG_RETPOLINE))
352 			goto retpoline_auto;
353 		break;
354 	case SPECTRE_V2_CMD_RETPOLINE_AMD:
355 		if (IS_ENABLED(CONFIG_RETPOLINE))
356 			goto retpoline_amd;
357 		break;
358 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
359 		if (IS_ENABLED(CONFIG_RETPOLINE))
360 			goto retpoline_generic;
361 		break;
362 	case SPECTRE_V2_CMD_RETPOLINE:
363 		if (IS_ENABLED(CONFIG_RETPOLINE))
364 			goto retpoline_auto;
365 		break;
366 	}
367 	pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
368 	return;
369 
370 retpoline_auto:
371 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
372 	retpoline_amd:
373 		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
374 			pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
375 			goto retpoline_generic;
376 		}
377 		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
378 					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
379 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
380 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
381 	} else {
382 	retpoline_generic:
383 		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
384 					 SPECTRE_V2_RETPOLINE_MINIMAL;
385 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
386 	}
387 
388 	spectre_v2_enabled = mode;
389 	pr_info("%s\n", spectre_v2_strings[mode]);
390 
391 	/*
392 	 * If neither SMEP nor PTI are available, there is a risk of
393 	 * hitting userspace addresses in the RSB after a context switch
394 	 * from a shallow call stack to a deeper one. To prevent this fill
395 	 * the entire RSB, even when using IBRS.
396 	 *
397 	 * Skylake era CPUs have a separate issue with *underflow* of the
398 	 * RSB, when they will predict 'ret' targets from the generic BTB.
399 	 * The proper mitigation for this is IBRS. If IBRS is not supported
400 	 * or deactivated in favour of retpolines the RSB fill on context
401 	 * switch is required.
402 	 */
403 	if ((!boot_cpu_has(X86_FEATURE_PTI) &&
404 	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
405 		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
406 		pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
407 	}
408 
409 	/* Initialize Indirect Branch Prediction Barrier if supported */
410 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
411 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
412 		pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
413 	}
414 
415 	/*
416 	 * Retpoline means the kernel is safe because it has no indirect
417 	 * branches. But firmware isn't, so use IBRS to protect that.
418 	 */
419 	if (boot_cpu_has(X86_FEATURE_IBRS)) {
420 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
421 		pr_info("Enabling Restricted Speculation for firmware calls\n");
422 	}
423 }
424 
425 #undef pr_fmt
426 #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
427 
428 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
429 
430 /* The kernel command line selection */
431 enum ssb_mitigation_cmd {
432 	SPEC_STORE_BYPASS_CMD_NONE,
433 	SPEC_STORE_BYPASS_CMD_AUTO,
434 	SPEC_STORE_BYPASS_CMD_ON,
435 	SPEC_STORE_BYPASS_CMD_PRCTL,
436 	SPEC_STORE_BYPASS_CMD_SECCOMP,
437 };
438 
439 static const char *ssb_strings[] = {
440 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
441 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
442 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
443 	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
444 };
445 
446 static const struct {
447 	const char *option;
448 	enum ssb_mitigation_cmd cmd;
449 } ssb_mitigation_options[] = {
450 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
451 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
452 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
453 	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
454 	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
455 };
456 
457 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
458 {
459 	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
460 	char arg[20];
461 	int ret, i;
462 
463 	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
464 		return SPEC_STORE_BYPASS_CMD_NONE;
465 	} else {
466 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
467 					  arg, sizeof(arg));
468 		if (ret < 0)
469 			return SPEC_STORE_BYPASS_CMD_AUTO;
470 
471 		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
472 			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
473 				continue;
474 
475 			cmd = ssb_mitigation_options[i].cmd;
476 			break;
477 		}
478 
479 		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
480 			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
481 			return SPEC_STORE_BYPASS_CMD_AUTO;
482 		}
483 	}
484 
485 	return cmd;
486 }
487 
488 static enum ssb_mitigation __init __ssb_select_mitigation(void)
489 {
490 	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
491 	enum ssb_mitigation_cmd cmd;
492 
493 	if (!boot_cpu_has(X86_FEATURE_SSBD))
494 		return mode;
495 
496 	cmd = ssb_parse_cmdline();
497 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
498 	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
499 	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
500 		return mode;
501 
502 	switch (cmd) {
503 	case SPEC_STORE_BYPASS_CMD_AUTO:
504 	case SPEC_STORE_BYPASS_CMD_SECCOMP:
505 		/*
506 		 * Choose prctl+seccomp as the default mode if seccomp is
507 		 * enabled.
508 		 */
509 		if (IS_ENABLED(CONFIG_SECCOMP))
510 			mode = SPEC_STORE_BYPASS_SECCOMP;
511 		else
512 			mode = SPEC_STORE_BYPASS_PRCTL;
513 		break;
514 	case SPEC_STORE_BYPASS_CMD_ON:
515 		mode = SPEC_STORE_BYPASS_DISABLE;
516 		break;
517 	case SPEC_STORE_BYPASS_CMD_PRCTL:
518 		mode = SPEC_STORE_BYPASS_PRCTL;
519 		break;
520 	case SPEC_STORE_BYPASS_CMD_NONE:
521 		break;
522 	}
523 
524 	/*
525 	 * We have three CPU feature flags that are in play here:
526 	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
527 	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
528 	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
529 	 */
530 	if (mode == SPEC_STORE_BYPASS_DISABLE) {
531 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
532 		/*
533 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
534 		 * use a completely different MSR and bit dependent on family.
535 		 */
536 		if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
537 			x86_amd_ssb_disable();
538 		else {
539 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
540 			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
541 			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
542 		}
543 	}
544 
545 	return mode;
546 }
547 
548 static void ssb_select_mitigation(void)
549 {
550 	ssb_mode = __ssb_select_mitigation();
551 
552 	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
553 		pr_info("%s\n", ssb_strings[ssb_mode]);
554 }
555 
556 #undef pr_fmt
557 #define pr_fmt(fmt)     "Speculation prctl: " fmt
558 
559 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
560 {
561 	bool update;
562 
563 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
564 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
565 		return -ENXIO;
566 
567 	switch (ctrl) {
568 	case PR_SPEC_ENABLE:
569 		/* If speculation is force disabled, enable is not allowed */
570 		if (task_spec_ssb_force_disable(task))
571 			return -EPERM;
572 		task_clear_spec_ssb_disable(task);
573 		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
574 		break;
575 	case PR_SPEC_DISABLE:
576 		task_set_spec_ssb_disable(task);
577 		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
578 		break;
579 	case PR_SPEC_FORCE_DISABLE:
580 		task_set_spec_ssb_disable(task);
581 		task_set_spec_ssb_force_disable(task);
582 		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
583 		break;
584 	default:
585 		return -ERANGE;
586 	}
587 
588 	/*
589 	 * If being set on non-current task, delay setting the CPU
590 	 * mitigation until it is next scheduled.
591 	 */
592 	if (task == current && update)
593 		speculative_store_bypass_update_current();
594 
595 	return 0;
596 }
597 
598 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
599 			     unsigned long ctrl)
600 {
601 	switch (which) {
602 	case PR_SPEC_STORE_BYPASS:
603 		return ssb_prctl_set(task, ctrl);
604 	default:
605 		return -ENODEV;
606 	}
607 }
608 
609 #ifdef CONFIG_SECCOMP
610 void arch_seccomp_spec_mitigate(struct task_struct *task)
611 {
612 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
613 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
614 }
615 #endif
616 
617 static int ssb_prctl_get(struct task_struct *task)
618 {
619 	switch (ssb_mode) {
620 	case SPEC_STORE_BYPASS_DISABLE:
621 		return PR_SPEC_DISABLE;
622 	case SPEC_STORE_BYPASS_SECCOMP:
623 	case SPEC_STORE_BYPASS_PRCTL:
624 		if (task_spec_ssb_force_disable(task))
625 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
626 		if (task_spec_ssb_disable(task))
627 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
628 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
629 	default:
630 		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
631 			return PR_SPEC_ENABLE;
632 		return PR_SPEC_NOT_AFFECTED;
633 	}
634 }
635 
636 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
637 {
638 	switch (which) {
639 	case PR_SPEC_STORE_BYPASS:
640 		return ssb_prctl_get(task);
641 	default:
642 		return -ENODEV;
643 	}
644 }
645 
646 void x86_spec_ctrl_setup_ap(void)
647 {
648 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
649 		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
650 
651 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
652 		x86_amd_ssb_disable();
653 }
654 
655 #ifdef CONFIG_SYSFS
656 
657 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
658 			       char *buf, unsigned int bug)
659 {
660 	if (!boot_cpu_has_bug(bug))
661 		return sprintf(buf, "Not affected\n");
662 
663 	switch (bug) {
664 	case X86_BUG_CPU_MELTDOWN:
665 		if (boot_cpu_has(X86_FEATURE_PTI))
666 			return sprintf(buf, "Mitigation: PTI\n");
667 
668 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
669 			return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
670 
671 		break;
672 
673 	case X86_BUG_SPECTRE_V1:
674 		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
675 
676 	case X86_BUG_SPECTRE_V2:
677 		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
678 			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
679 			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
680 			       spectre_v2_module_string());
681 
682 	case X86_BUG_SPEC_STORE_BYPASS:
683 		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
684 
685 	default:
686 		break;
687 	}
688 
689 	return sprintf(buf, "Vulnerable\n");
690 }
691 
692 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
693 {
694 	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
695 }
696 
697 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
698 {
699 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
700 }
701 
702 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
703 {
704 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
705 }
706 
707 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
708 {
709 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
710 }
711 #endif
712