xref: /openbmc/linux/arch/x86/kernel/cpu/bugs.c (revision 9726bfcd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34 
35 #include "cpu.h"
36 
37 static void __init spectre_v2_select_mitigation(void);
38 static void __init ssb_select_mitigation(void);
39 static void __init l1tf_select_mitigation(void);
40 static void __init mds_select_mitigation(void);
41 
42 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
43 u64 x86_spec_ctrl_base;
44 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
45 static DEFINE_MUTEX(spec_ctrl_mutex);
46 
47 /*
48  * The vendor and possibly platform specific bits which can be modified in
49  * x86_spec_ctrl_base.
50  */
51 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
52 
53 /*
54  * AMD specific MSR info for Speculative Store Bypass control.
55  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
56  */
57 u64 __ro_after_init x86_amd_ls_cfg_base;
58 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
59 
60 /* Control conditional STIBP in switch_to() */
61 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
62 /* Control conditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
64 /* Control unconditional IBPB in switch_mm() */
65 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
66 
67 /* Control MDS CPU buffer clear before returning to user space */
68 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
69 EXPORT_SYMBOL_GPL(mds_user_clear);
70 /* Control MDS CPU buffer clear before idling (halt, mwait) */
71 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
72 EXPORT_SYMBOL_GPL(mds_idle_clear);
73 
74 void __init check_bugs(void)
75 {
76 	identify_boot_cpu();
77 
78 	/*
79 	 * identify_boot_cpu() initialized SMT support information, let the
80 	 * core code know.
81 	 */
82 	cpu_smt_check_topology();
83 
84 	if (!IS_ENABLED(CONFIG_SMP)) {
85 		pr_info("CPU: ");
86 		print_cpu_info(&boot_cpu_data);
87 	}
88 
89 	/*
90 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
91 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
92 	 * init code as it is not enumerated and depends on the family.
93 	 */
94 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
95 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
96 
97 	/* Allow STIBP in MSR_SPEC_CTRL if supported */
98 	if (boot_cpu_has(X86_FEATURE_STIBP))
99 		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
100 
101 	/* Select the proper spectre mitigation before patching alternatives */
102 	spectre_v2_select_mitigation();
103 
104 	/*
105 	 * Select proper mitigation for any exposure to the Speculative Store
106 	 * Bypass vulnerability.
107 	 */
108 	ssb_select_mitigation();
109 
110 	l1tf_select_mitigation();
111 
112 	mds_select_mitigation();
113 
114 	arch_smt_update();
115 
116 #ifdef CONFIG_X86_32
117 	/*
118 	 * Check whether we are able to run this kernel safely on SMP.
119 	 *
120 	 * - i386 is no longer supported.
121 	 * - In order to run on anything without a TSC, we need to be
122 	 *   compiled for a i486.
123 	 */
124 	if (boot_cpu_data.x86 < 4)
125 		panic("Kernel requires i486+ for 'invlpg' and other features");
126 
127 	init_utsname()->machine[1] =
128 		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
129 	alternative_instructions();
130 
131 	fpu__init_check_bugs();
132 #else /* CONFIG_X86_64 */
133 	alternative_instructions();
134 
135 	/*
136 	 * Make sure the first 2MB area is not mapped by huge pages
137 	 * There are typically fixed size MTRRs in there and overlapping
138 	 * MTRRs into large pages causes slow downs.
139 	 *
140 	 * Right now we don't do that with gbpages because there seems
141 	 * very little benefit for that case.
142 	 */
143 	if (!direct_gbpages)
144 		set_memory_4k((unsigned long)__va(0), 1);
145 #endif
146 }
147 
148 void
149 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
150 {
151 	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
152 	struct thread_info *ti = current_thread_info();
153 
154 	/* Is MSR_SPEC_CTRL implemented ? */
155 	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
156 		/*
157 		 * Restrict guest_spec_ctrl to supported values. Clear the
158 		 * modifiable bits in the host base value and or the
159 		 * modifiable bits from the guest value.
160 		 */
161 		guestval = hostval & ~x86_spec_ctrl_mask;
162 		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
163 
164 		/* SSBD controlled in MSR_SPEC_CTRL */
165 		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
166 		    static_cpu_has(X86_FEATURE_AMD_SSBD))
167 			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
168 
169 		/* Conditional STIBP enabled? */
170 		if (static_branch_unlikely(&switch_to_cond_stibp))
171 			hostval |= stibp_tif_to_spec_ctrl(ti->flags);
172 
173 		if (hostval != guestval) {
174 			msrval = setguest ? guestval : hostval;
175 			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
176 		}
177 	}
178 
179 	/*
180 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
181 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
182 	 */
183 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
184 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
185 		return;
186 
187 	/*
188 	 * If the host has SSBD mitigation enabled, force it in the host's
189 	 * virtual MSR value. If its not permanently enabled, evaluate
190 	 * current's TIF_SSBD thread flag.
191 	 */
192 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
193 		hostval = SPEC_CTRL_SSBD;
194 	else
195 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
196 
197 	/* Sanitize the guest value */
198 	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
199 
200 	if (hostval != guestval) {
201 		unsigned long tif;
202 
203 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
204 				 ssbd_spec_ctrl_to_tif(hostval);
205 
206 		speculation_ctrl_update(tif);
207 	}
208 }
209 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
210 
211 static void x86_amd_ssb_disable(void)
212 {
213 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
214 
215 	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
216 		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
217 	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
218 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
219 }
220 
221 #undef pr_fmt
222 #define pr_fmt(fmt)	"MDS: " fmt
223 
224 /* Default mitigation for MDS-affected CPUs */
225 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
226 static bool mds_nosmt __ro_after_init = false;
227 
228 static const char * const mds_strings[] = {
229 	[MDS_MITIGATION_OFF]	= "Vulnerable",
230 	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
231 	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
232 };
233 
234 static void __init mds_select_mitigation(void)
235 {
236 	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
237 		mds_mitigation = MDS_MITIGATION_OFF;
238 		return;
239 	}
240 
241 	if (mds_mitigation == MDS_MITIGATION_FULL) {
242 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
243 			mds_mitigation = MDS_MITIGATION_VMWERV;
244 
245 		static_branch_enable(&mds_user_clear);
246 
247 		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
248 		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
249 			cpu_smt_disable(false);
250 	}
251 
252 	pr_info("%s\n", mds_strings[mds_mitigation]);
253 }
254 
255 static int __init mds_cmdline(char *str)
256 {
257 	if (!boot_cpu_has_bug(X86_BUG_MDS))
258 		return 0;
259 
260 	if (!str)
261 		return -EINVAL;
262 
263 	if (!strcmp(str, "off"))
264 		mds_mitigation = MDS_MITIGATION_OFF;
265 	else if (!strcmp(str, "full"))
266 		mds_mitigation = MDS_MITIGATION_FULL;
267 	else if (!strcmp(str, "full,nosmt")) {
268 		mds_mitigation = MDS_MITIGATION_FULL;
269 		mds_nosmt = true;
270 	}
271 
272 	return 0;
273 }
274 early_param("mds", mds_cmdline);
275 
276 #undef pr_fmt
277 #define pr_fmt(fmt)     "Spectre V2 : " fmt
278 
279 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
280 	SPECTRE_V2_NONE;
281 
282 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
283 	SPECTRE_V2_USER_NONE;
284 
285 #ifdef CONFIG_RETPOLINE
286 static bool spectre_v2_bad_module;
287 
288 bool retpoline_module_ok(bool has_retpoline)
289 {
290 	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
291 		return true;
292 
293 	pr_err("System may be vulnerable to spectre v2\n");
294 	spectre_v2_bad_module = true;
295 	return false;
296 }
297 
298 static inline const char *spectre_v2_module_string(void)
299 {
300 	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
301 }
302 #else
303 static inline const char *spectre_v2_module_string(void) { return ""; }
304 #endif
305 
306 static inline bool match_option(const char *arg, int arglen, const char *opt)
307 {
308 	int len = strlen(opt);
309 
310 	return len == arglen && !strncmp(arg, opt, len);
311 }
312 
313 /* The kernel command line selection for spectre v2 */
314 enum spectre_v2_mitigation_cmd {
315 	SPECTRE_V2_CMD_NONE,
316 	SPECTRE_V2_CMD_AUTO,
317 	SPECTRE_V2_CMD_FORCE,
318 	SPECTRE_V2_CMD_RETPOLINE,
319 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
320 	SPECTRE_V2_CMD_RETPOLINE_AMD,
321 };
322 
323 enum spectre_v2_user_cmd {
324 	SPECTRE_V2_USER_CMD_NONE,
325 	SPECTRE_V2_USER_CMD_AUTO,
326 	SPECTRE_V2_USER_CMD_FORCE,
327 	SPECTRE_V2_USER_CMD_PRCTL,
328 	SPECTRE_V2_USER_CMD_PRCTL_IBPB,
329 	SPECTRE_V2_USER_CMD_SECCOMP,
330 	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
331 };
332 
333 static const char * const spectre_v2_user_strings[] = {
334 	[SPECTRE_V2_USER_NONE]			= "User space: Vulnerable",
335 	[SPECTRE_V2_USER_STRICT]		= "User space: Mitigation: STIBP protection",
336 	[SPECTRE_V2_USER_STRICT_PREFERRED]	= "User space: Mitigation: STIBP always-on protection",
337 	[SPECTRE_V2_USER_PRCTL]			= "User space: Mitigation: STIBP via prctl",
338 	[SPECTRE_V2_USER_SECCOMP]		= "User space: Mitigation: STIBP via seccomp and prctl",
339 };
340 
341 static const struct {
342 	const char			*option;
343 	enum spectre_v2_user_cmd	cmd;
344 	bool				secure;
345 } v2_user_options[] __initconst = {
346 	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
347 	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
348 	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
349 	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false },
350 	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false },
351 	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false },
352 	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false },
353 };
354 
355 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
356 {
357 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
358 		pr_info("spectre_v2_user=%s forced on command line.\n", reason);
359 }
360 
361 static enum spectre_v2_user_cmd __init
362 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
363 {
364 	char arg[20];
365 	int ret, i;
366 
367 	switch (v2_cmd) {
368 	case SPECTRE_V2_CMD_NONE:
369 		return SPECTRE_V2_USER_CMD_NONE;
370 	case SPECTRE_V2_CMD_FORCE:
371 		return SPECTRE_V2_USER_CMD_FORCE;
372 	default:
373 		break;
374 	}
375 
376 	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
377 				  arg, sizeof(arg));
378 	if (ret < 0)
379 		return SPECTRE_V2_USER_CMD_AUTO;
380 
381 	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
382 		if (match_option(arg, ret, v2_user_options[i].option)) {
383 			spec_v2_user_print_cond(v2_user_options[i].option,
384 						v2_user_options[i].secure);
385 			return v2_user_options[i].cmd;
386 		}
387 	}
388 
389 	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
390 	return SPECTRE_V2_USER_CMD_AUTO;
391 }
392 
393 static void __init
394 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
395 {
396 	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
397 	bool smt_possible = IS_ENABLED(CONFIG_SMP);
398 	enum spectre_v2_user_cmd cmd;
399 
400 	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
401 		return;
402 
403 	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
404 	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
405 		smt_possible = false;
406 
407 	cmd = spectre_v2_parse_user_cmdline(v2_cmd);
408 	switch (cmd) {
409 	case SPECTRE_V2_USER_CMD_NONE:
410 		goto set_mode;
411 	case SPECTRE_V2_USER_CMD_FORCE:
412 		mode = SPECTRE_V2_USER_STRICT;
413 		break;
414 	case SPECTRE_V2_USER_CMD_PRCTL:
415 	case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
416 		mode = SPECTRE_V2_USER_PRCTL;
417 		break;
418 	case SPECTRE_V2_USER_CMD_AUTO:
419 	case SPECTRE_V2_USER_CMD_SECCOMP:
420 	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
421 		if (IS_ENABLED(CONFIG_SECCOMP))
422 			mode = SPECTRE_V2_USER_SECCOMP;
423 		else
424 			mode = SPECTRE_V2_USER_PRCTL;
425 		break;
426 	}
427 
428 	/*
429 	 * At this point, an STIBP mode other than "off" has been set.
430 	 * If STIBP support is not being forced, check if STIBP always-on
431 	 * is preferred.
432 	 */
433 	if (mode != SPECTRE_V2_USER_STRICT &&
434 	    boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
435 		mode = SPECTRE_V2_USER_STRICT_PREFERRED;
436 
437 	/* Initialize Indirect Branch Prediction Barrier */
438 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
439 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
440 
441 		switch (cmd) {
442 		case SPECTRE_V2_USER_CMD_FORCE:
443 		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
444 		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
445 			static_branch_enable(&switch_mm_always_ibpb);
446 			break;
447 		case SPECTRE_V2_USER_CMD_PRCTL:
448 		case SPECTRE_V2_USER_CMD_AUTO:
449 		case SPECTRE_V2_USER_CMD_SECCOMP:
450 			static_branch_enable(&switch_mm_cond_ibpb);
451 			break;
452 		default:
453 			break;
454 		}
455 
456 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
457 			static_key_enabled(&switch_mm_always_ibpb) ?
458 			"always-on" : "conditional");
459 	}
460 
461 	/* If enhanced IBRS is enabled no STIBP required */
462 	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
463 		return;
464 
465 	/*
466 	 * If SMT is not possible or STIBP is not available clear the STIBP
467 	 * mode.
468 	 */
469 	if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
470 		mode = SPECTRE_V2_USER_NONE;
471 set_mode:
472 	spectre_v2_user = mode;
473 	/* Only print the STIBP mode when SMT possible */
474 	if (smt_possible)
475 		pr_info("%s\n", spectre_v2_user_strings[mode]);
476 }
477 
478 static const char * const spectre_v2_strings[] = {
479 	[SPECTRE_V2_NONE]			= "Vulnerable",
480 	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
481 	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
482 	[SPECTRE_V2_IBRS_ENHANCED]		= "Mitigation: Enhanced IBRS",
483 };
484 
485 static const struct {
486 	const char *option;
487 	enum spectre_v2_mitigation_cmd cmd;
488 	bool secure;
489 } mitigation_options[] __initconst = {
490 	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
491 	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
492 	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
493 	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_AMD,	  false },
494 	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
495 	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },
496 };
497 
498 static void __init spec_v2_print_cond(const char *reason, bool secure)
499 {
500 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
501 		pr_info("%s selected on command line.\n", reason);
502 }
503 
504 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
505 {
506 	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
507 	char arg[20];
508 	int ret, i;
509 
510 	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
511 	    cpu_mitigations_off())
512 		return SPECTRE_V2_CMD_NONE;
513 
514 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
515 	if (ret < 0)
516 		return SPECTRE_V2_CMD_AUTO;
517 
518 	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
519 		if (!match_option(arg, ret, mitigation_options[i].option))
520 			continue;
521 		cmd = mitigation_options[i].cmd;
522 		break;
523 	}
524 
525 	if (i >= ARRAY_SIZE(mitigation_options)) {
526 		pr_err("unknown option (%s). Switching to AUTO select\n", arg);
527 		return SPECTRE_V2_CMD_AUTO;
528 	}
529 
530 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
531 	     cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
532 	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
533 	    !IS_ENABLED(CONFIG_RETPOLINE)) {
534 		pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
535 		return SPECTRE_V2_CMD_AUTO;
536 	}
537 
538 	if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
539 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
540 	    boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
541 		pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
542 		return SPECTRE_V2_CMD_AUTO;
543 	}
544 
545 	spec_v2_print_cond(mitigation_options[i].option,
546 			   mitigation_options[i].secure);
547 	return cmd;
548 }
549 
550 static void __init spectre_v2_select_mitigation(void)
551 {
552 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
553 	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
554 
555 	/*
556 	 * If the CPU is not affected and the command line mode is NONE or AUTO
557 	 * then nothing to do.
558 	 */
559 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
560 	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
561 		return;
562 
563 	switch (cmd) {
564 	case SPECTRE_V2_CMD_NONE:
565 		return;
566 
567 	case SPECTRE_V2_CMD_FORCE:
568 	case SPECTRE_V2_CMD_AUTO:
569 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
570 			mode = SPECTRE_V2_IBRS_ENHANCED;
571 			/* Force it so VMEXIT will restore correctly */
572 			x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
573 			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
574 			goto specv2_set_mode;
575 		}
576 		if (IS_ENABLED(CONFIG_RETPOLINE))
577 			goto retpoline_auto;
578 		break;
579 	case SPECTRE_V2_CMD_RETPOLINE_AMD:
580 		if (IS_ENABLED(CONFIG_RETPOLINE))
581 			goto retpoline_amd;
582 		break;
583 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
584 		if (IS_ENABLED(CONFIG_RETPOLINE))
585 			goto retpoline_generic;
586 		break;
587 	case SPECTRE_V2_CMD_RETPOLINE:
588 		if (IS_ENABLED(CONFIG_RETPOLINE))
589 			goto retpoline_auto;
590 		break;
591 	}
592 	pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
593 	return;
594 
595 retpoline_auto:
596 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
597 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
598 	retpoline_amd:
599 		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
600 			pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
601 			goto retpoline_generic;
602 		}
603 		mode = SPECTRE_V2_RETPOLINE_AMD;
604 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
605 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
606 	} else {
607 	retpoline_generic:
608 		mode = SPECTRE_V2_RETPOLINE_GENERIC;
609 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
610 	}
611 
612 specv2_set_mode:
613 	spectre_v2_enabled = mode;
614 	pr_info("%s\n", spectre_v2_strings[mode]);
615 
616 	/*
617 	 * If spectre v2 protection has been enabled, unconditionally fill
618 	 * RSB during a context switch; this protects against two independent
619 	 * issues:
620 	 *
621 	 *	- RSB underflow (and switch to BTB) on Skylake+
622 	 *	- SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
623 	 */
624 	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
625 	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
626 
627 	/*
628 	 * Retpoline means the kernel is safe because it has no indirect
629 	 * branches. Enhanced IBRS protects firmware too, so, enable restricted
630 	 * speculation around firmware calls only when Enhanced IBRS isn't
631 	 * supported.
632 	 *
633 	 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
634 	 * the user might select retpoline on the kernel command line and if
635 	 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
636 	 * enable IBRS around firmware calls.
637 	 */
638 	if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
639 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
640 		pr_info("Enabling Restricted Speculation for firmware calls\n");
641 	}
642 
643 	/* Set up IBPB and STIBP depending on the general spectre V2 command */
644 	spectre_v2_user_select_mitigation(cmd);
645 }
646 
647 static void update_stibp_msr(void * __unused)
648 {
649 	wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
650 }
651 
652 /* Update x86_spec_ctrl_base in case SMT state changed. */
653 static void update_stibp_strict(void)
654 {
655 	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
656 
657 	if (sched_smt_active())
658 		mask |= SPEC_CTRL_STIBP;
659 
660 	if (mask == x86_spec_ctrl_base)
661 		return;
662 
663 	pr_info("Update user space SMT mitigation: STIBP %s\n",
664 		mask & SPEC_CTRL_STIBP ? "always-on" : "off");
665 	x86_spec_ctrl_base = mask;
666 	on_each_cpu(update_stibp_msr, NULL, 1);
667 }
668 
669 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
670 static void update_indir_branch_cond(void)
671 {
672 	if (sched_smt_active())
673 		static_branch_enable(&switch_to_cond_stibp);
674 	else
675 		static_branch_disable(&switch_to_cond_stibp);
676 }
677 
678 #undef pr_fmt
679 #define pr_fmt(fmt) fmt
680 
681 /* Update the static key controlling the MDS CPU buffer clear in idle */
682 static void update_mds_branch_idle(void)
683 {
684 	/*
685 	 * Enable the idle clearing if SMT is active on CPUs which are
686 	 * affected only by MSBDS and not any other MDS variant.
687 	 *
688 	 * The other variants cannot be mitigated when SMT is enabled, so
689 	 * clearing the buffers on idle just to prevent the Store Buffer
690 	 * repartitioning leak would be a window dressing exercise.
691 	 */
692 	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
693 		return;
694 
695 	if (sched_smt_active())
696 		static_branch_enable(&mds_idle_clear);
697 	else
698 		static_branch_disable(&mds_idle_clear);
699 }
700 
701 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
702 
703 void arch_smt_update(void)
704 {
705 	/* Enhanced IBRS implies STIBP. No update required. */
706 	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
707 		return;
708 
709 	mutex_lock(&spec_ctrl_mutex);
710 
711 	switch (spectre_v2_user) {
712 	case SPECTRE_V2_USER_NONE:
713 		break;
714 	case SPECTRE_V2_USER_STRICT:
715 	case SPECTRE_V2_USER_STRICT_PREFERRED:
716 		update_stibp_strict();
717 		break;
718 	case SPECTRE_V2_USER_PRCTL:
719 	case SPECTRE_V2_USER_SECCOMP:
720 		update_indir_branch_cond();
721 		break;
722 	}
723 
724 	switch (mds_mitigation) {
725 	case MDS_MITIGATION_FULL:
726 	case MDS_MITIGATION_VMWERV:
727 		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
728 			pr_warn_once(MDS_MSG_SMT);
729 		update_mds_branch_idle();
730 		break;
731 	case MDS_MITIGATION_OFF:
732 		break;
733 	}
734 
735 	mutex_unlock(&spec_ctrl_mutex);
736 }
737 
738 #undef pr_fmt
739 #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
740 
741 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
742 
743 /* The kernel command line selection */
744 enum ssb_mitigation_cmd {
745 	SPEC_STORE_BYPASS_CMD_NONE,
746 	SPEC_STORE_BYPASS_CMD_AUTO,
747 	SPEC_STORE_BYPASS_CMD_ON,
748 	SPEC_STORE_BYPASS_CMD_PRCTL,
749 	SPEC_STORE_BYPASS_CMD_SECCOMP,
750 };
751 
752 static const char * const ssb_strings[] = {
753 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
754 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
755 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
756 	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
757 };
758 
759 static const struct {
760 	const char *option;
761 	enum ssb_mitigation_cmd cmd;
762 } ssb_mitigation_options[]  __initconst = {
763 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
764 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
765 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
766 	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
767 	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
768 };
769 
770 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
771 {
772 	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
773 	char arg[20];
774 	int ret, i;
775 
776 	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
777 	    cpu_mitigations_off()) {
778 		return SPEC_STORE_BYPASS_CMD_NONE;
779 	} else {
780 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
781 					  arg, sizeof(arg));
782 		if (ret < 0)
783 			return SPEC_STORE_BYPASS_CMD_AUTO;
784 
785 		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
786 			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
787 				continue;
788 
789 			cmd = ssb_mitigation_options[i].cmd;
790 			break;
791 		}
792 
793 		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
794 			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
795 			return SPEC_STORE_BYPASS_CMD_AUTO;
796 		}
797 	}
798 
799 	return cmd;
800 }
801 
802 static enum ssb_mitigation __init __ssb_select_mitigation(void)
803 {
804 	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
805 	enum ssb_mitigation_cmd cmd;
806 
807 	if (!boot_cpu_has(X86_FEATURE_SSBD))
808 		return mode;
809 
810 	cmd = ssb_parse_cmdline();
811 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
812 	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
813 	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
814 		return mode;
815 
816 	switch (cmd) {
817 	case SPEC_STORE_BYPASS_CMD_AUTO:
818 	case SPEC_STORE_BYPASS_CMD_SECCOMP:
819 		/*
820 		 * Choose prctl+seccomp as the default mode if seccomp is
821 		 * enabled.
822 		 */
823 		if (IS_ENABLED(CONFIG_SECCOMP))
824 			mode = SPEC_STORE_BYPASS_SECCOMP;
825 		else
826 			mode = SPEC_STORE_BYPASS_PRCTL;
827 		break;
828 	case SPEC_STORE_BYPASS_CMD_ON:
829 		mode = SPEC_STORE_BYPASS_DISABLE;
830 		break;
831 	case SPEC_STORE_BYPASS_CMD_PRCTL:
832 		mode = SPEC_STORE_BYPASS_PRCTL;
833 		break;
834 	case SPEC_STORE_BYPASS_CMD_NONE:
835 		break;
836 	}
837 
838 	/*
839 	 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
840 	 * bit in the mask to allow guests to use the mitigation even in the
841 	 * case where the host does not enable it.
842 	 */
843 	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
844 	    static_cpu_has(X86_FEATURE_AMD_SSBD)) {
845 		x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
846 	}
847 
848 	/*
849 	 * We have three CPU feature flags that are in play here:
850 	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
851 	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
852 	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
853 	 */
854 	if (mode == SPEC_STORE_BYPASS_DISABLE) {
855 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
856 		/*
857 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
858 		 * use a completely different MSR and bit dependent on family.
859 		 */
860 		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
861 		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
862 			x86_amd_ssb_disable();
863 		} else {
864 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
865 			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
866 		}
867 	}
868 
869 	return mode;
870 }
871 
872 static void ssb_select_mitigation(void)
873 {
874 	ssb_mode = __ssb_select_mitigation();
875 
876 	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
877 		pr_info("%s\n", ssb_strings[ssb_mode]);
878 }
879 
880 #undef pr_fmt
881 #define pr_fmt(fmt)     "Speculation prctl: " fmt
882 
883 static void task_update_spec_tif(struct task_struct *tsk)
884 {
885 	/* Force the update of the real TIF bits */
886 	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
887 
888 	/*
889 	 * Immediately update the speculation control MSRs for the current
890 	 * task, but for a non-current task delay setting the CPU
891 	 * mitigation until it is scheduled next.
892 	 *
893 	 * This can only happen for SECCOMP mitigation. For PRCTL it's
894 	 * always the current task.
895 	 */
896 	if (tsk == current)
897 		speculation_ctrl_update_current();
898 }
899 
900 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
901 {
902 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
903 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
904 		return -ENXIO;
905 
906 	switch (ctrl) {
907 	case PR_SPEC_ENABLE:
908 		/* If speculation is force disabled, enable is not allowed */
909 		if (task_spec_ssb_force_disable(task))
910 			return -EPERM;
911 		task_clear_spec_ssb_disable(task);
912 		task_clear_spec_ssb_noexec(task);
913 		task_update_spec_tif(task);
914 		break;
915 	case PR_SPEC_DISABLE:
916 		task_set_spec_ssb_disable(task);
917 		task_clear_spec_ssb_noexec(task);
918 		task_update_spec_tif(task);
919 		break;
920 	case PR_SPEC_FORCE_DISABLE:
921 		task_set_spec_ssb_disable(task);
922 		task_set_spec_ssb_force_disable(task);
923 		task_clear_spec_ssb_noexec(task);
924 		task_update_spec_tif(task);
925 		break;
926 	case PR_SPEC_DISABLE_NOEXEC:
927 		if (task_spec_ssb_force_disable(task))
928 			return -EPERM;
929 		task_set_spec_ssb_disable(task);
930 		task_set_spec_ssb_noexec(task);
931 		task_update_spec_tif(task);
932 		break;
933 	default:
934 		return -ERANGE;
935 	}
936 	return 0;
937 }
938 
939 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
940 {
941 	switch (ctrl) {
942 	case PR_SPEC_ENABLE:
943 		if (spectre_v2_user == SPECTRE_V2_USER_NONE)
944 			return 0;
945 		/*
946 		 * Indirect branch speculation is always disabled in strict
947 		 * mode.
948 		 */
949 		if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
950 		    spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
951 			return -EPERM;
952 		task_clear_spec_ib_disable(task);
953 		task_update_spec_tif(task);
954 		break;
955 	case PR_SPEC_DISABLE:
956 	case PR_SPEC_FORCE_DISABLE:
957 		/*
958 		 * Indirect branch speculation is always allowed when
959 		 * mitigation is force disabled.
960 		 */
961 		if (spectre_v2_user == SPECTRE_V2_USER_NONE)
962 			return -EPERM;
963 		if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
964 		    spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
965 			return 0;
966 		task_set_spec_ib_disable(task);
967 		if (ctrl == PR_SPEC_FORCE_DISABLE)
968 			task_set_spec_ib_force_disable(task);
969 		task_update_spec_tif(task);
970 		break;
971 	default:
972 		return -ERANGE;
973 	}
974 	return 0;
975 }
976 
977 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
978 			     unsigned long ctrl)
979 {
980 	switch (which) {
981 	case PR_SPEC_STORE_BYPASS:
982 		return ssb_prctl_set(task, ctrl);
983 	case PR_SPEC_INDIRECT_BRANCH:
984 		return ib_prctl_set(task, ctrl);
985 	default:
986 		return -ENODEV;
987 	}
988 }
989 
990 #ifdef CONFIG_SECCOMP
991 void arch_seccomp_spec_mitigate(struct task_struct *task)
992 {
993 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
994 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
995 	if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
996 		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
997 }
998 #endif
999 
1000 static int ssb_prctl_get(struct task_struct *task)
1001 {
1002 	switch (ssb_mode) {
1003 	case SPEC_STORE_BYPASS_DISABLE:
1004 		return PR_SPEC_DISABLE;
1005 	case SPEC_STORE_BYPASS_SECCOMP:
1006 	case SPEC_STORE_BYPASS_PRCTL:
1007 		if (task_spec_ssb_force_disable(task))
1008 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1009 		if (task_spec_ssb_noexec(task))
1010 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1011 		if (task_spec_ssb_disable(task))
1012 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1013 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1014 	default:
1015 		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1016 			return PR_SPEC_ENABLE;
1017 		return PR_SPEC_NOT_AFFECTED;
1018 	}
1019 }
1020 
1021 static int ib_prctl_get(struct task_struct *task)
1022 {
1023 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1024 		return PR_SPEC_NOT_AFFECTED;
1025 
1026 	switch (spectre_v2_user) {
1027 	case SPECTRE_V2_USER_NONE:
1028 		return PR_SPEC_ENABLE;
1029 	case SPECTRE_V2_USER_PRCTL:
1030 	case SPECTRE_V2_USER_SECCOMP:
1031 		if (task_spec_ib_force_disable(task))
1032 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1033 		if (task_spec_ib_disable(task))
1034 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1035 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1036 	case SPECTRE_V2_USER_STRICT:
1037 	case SPECTRE_V2_USER_STRICT_PREFERRED:
1038 		return PR_SPEC_DISABLE;
1039 	default:
1040 		return PR_SPEC_NOT_AFFECTED;
1041 	}
1042 }
1043 
1044 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1045 {
1046 	switch (which) {
1047 	case PR_SPEC_STORE_BYPASS:
1048 		return ssb_prctl_get(task);
1049 	case PR_SPEC_INDIRECT_BRANCH:
1050 		return ib_prctl_get(task);
1051 	default:
1052 		return -ENODEV;
1053 	}
1054 }
1055 
1056 void x86_spec_ctrl_setup_ap(void)
1057 {
1058 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1059 		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1060 
1061 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1062 		x86_amd_ssb_disable();
1063 }
1064 
1065 #undef pr_fmt
1066 #define pr_fmt(fmt)	"L1TF: " fmt
1067 
1068 /* Default mitigation for L1TF-affected CPUs */
1069 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1070 #if IS_ENABLED(CONFIG_KVM_INTEL)
1071 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1072 #endif
1073 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1074 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1075 
1076 /*
1077  * These CPUs all support 44bits physical address space internally in the
1078  * cache but CPUID can report a smaller number of physical address bits.
1079  *
1080  * The L1TF mitigation uses the top most address bit for the inversion of
1081  * non present PTEs. When the installed memory reaches into the top most
1082  * address bit due to memory holes, which has been observed on machines
1083  * which report 36bits physical address bits and have 32G RAM installed,
1084  * then the mitigation range check in l1tf_select_mitigation() triggers.
1085  * This is a false positive because the mitigation is still possible due to
1086  * the fact that the cache uses 44bit internally. Use the cache bits
1087  * instead of the reported physical bits and adjust them on the affected
1088  * machines to 44bit if the reported bits are less than 44.
1089  */
1090 static void override_cache_bits(struct cpuinfo_x86 *c)
1091 {
1092 	if (c->x86 != 6)
1093 		return;
1094 
1095 	switch (c->x86_model) {
1096 	case INTEL_FAM6_NEHALEM:
1097 	case INTEL_FAM6_WESTMERE:
1098 	case INTEL_FAM6_SANDYBRIDGE:
1099 	case INTEL_FAM6_IVYBRIDGE:
1100 	case INTEL_FAM6_HASWELL_CORE:
1101 	case INTEL_FAM6_HASWELL_ULT:
1102 	case INTEL_FAM6_HASWELL_GT3E:
1103 	case INTEL_FAM6_BROADWELL_CORE:
1104 	case INTEL_FAM6_BROADWELL_GT3E:
1105 	case INTEL_FAM6_SKYLAKE_MOBILE:
1106 	case INTEL_FAM6_SKYLAKE_DESKTOP:
1107 	case INTEL_FAM6_KABYLAKE_MOBILE:
1108 	case INTEL_FAM6_KABYLAKE_DESKTOP:
1109 		if (c->x86_cache_bits < 44)
1110 			c->x86_cache_bits = 44;
1111 		break;
1112 	}
1113 }
1114 
1115 static void __init l1tf_select_mitigation(void)
1116 {
1117 	u64 half_pa;
1118 
1119 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
1120 		return;
1121 
1122 	if (cpu_mitigations_off())
1123 		l1tf_mitigation = L1TF_MITIGATION_OFF;
1124 	else if (cpu_mitigations_auto_nosmt())
1125 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1126 
1127 	override_cache_bits(&boot_cpu_data);
1128 
1129 	switch (l1tf_mitigation) {
1130 	case L1TF_MITIGATION_OFF:
1131 	case L1TF_MITIGATION_FLUSH_NOWARN:
1132 	case L1TF_MITIGATION_FLUSH:
1133 		break;
1134 	case L1TF_MITIGATION_FLUSH_NOSMT:
1135 	case L1TF_MITIGATION_FULL:
1136 		cpu_smt_disable(false);
1137 		break;
1138 	case L1TF_MITIGATION_FULL_FORCE:
1139 		cpu_smt_disable(true);
1140 		break;
1141 	}
1142 
1143 #if CONFIG_PGTABLE_LEVELS == 2
1144 	pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1145 	return;
1146 #endif
1147 
1148 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1149 	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1150 			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1151 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1152 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1153 				half_pa);
1154 		pr_info("However, doing so will make a part of your RAM unusable.\n");
1155 		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1156 		return;
1157 	}
1158 
1159 	setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1160 }
1161 
1162 static int __init l1tf_cmdline(char *str)
1163 {
1164 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
1165 		return 0;
1166 
1167 	if (!str)
1168 		return -EINVAL;
1169 
1170 	if (!strcmp(str, "off"))
1171 		l1tf_mitigation = L1TF_MITIGATION_OFF;
1172 	else if (!strcmp(str, "flush,nowarn"))
1173 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1174 	else if (!strcmp(str, "flush"))
1175 		l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1176 	else if (!strcmp(str, "flush,nosmt"))
1177 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1178 	else if (!strcmp(str, "full"))
1179 		l1tf_mitigation = L1TF_MITIGATION_FULL;
1180 	else if (!strcmp(str, "full,force"))
1181 		l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1182 
1183 	return 0;
1184 }
1185 early_param("l1tf", l1tf_cmdline);
1186 
1187 #undef pr_fmt
1188 #define pr_fmt(fmt) fmt
1189 
1190 #ifdef CONFIG_SYSFS
1191 
1192 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1193 
1194 #if IS_ENABLED(CONFIG_KVM_INTEL)
1195 static const char * const l1tf_vmx_states[] = {
1196 	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
1197 	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
1198 	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
1199 	[VMENTER_L1D_FLUSH_ALWAYS]		= "cache flushes",
1200 	[VMENTER_L1D_FLUSH_EPT_DISABLED]	= "EPT disabled",
1201 	[VMENTER_L1D_FLUSH_NOT_REQUIRED]	= "flush not necessary"
1202 };
1203 
1204 static ssize_t l1tf_show_state(char *buf)
1205 {
1206 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1207 		return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1208 
1209 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1210 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1211 	     sched_smt_active())) {
1212 		return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1213 			       l1tf_vmx_states[l1tf_vmx_mitigation]);
1214 	}
1215 
1216 	return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1217 		       l1tf_vmx_states[l1tf_vmx_mitigation],
1218 		       sched_smt_active() ? "vulnerable" : "disabled");
1219 }
1220 #else
1221 static ssize_t l1tf_show_state(char *buf)
1222 {
1223 	return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1224 }
1225 #endif
1226 
1227 static ssize_t mds_show_state(char *buf)
1228 {
1229 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1230 		return sprintf(buf, "%s; SMT Host state unknown\n",
1231 			       mds_strings[mds_mitigation]);
1232 	}
1233 
1234 	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1235 		return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1236 			       (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1237 			        sched_smt_active() ? "mitigated" : "disabled"));
1238 	}
1239 
1240 	return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1241 		       sched_smt_active() ? "vulnerable" : "disabled");
1242 }
1243 
1244 static char *stibp_state(void)
1245 {
1246 	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1247 		return "";
1248 
1249 	switch (spectre_v2_user) {
1250 	case SPECTRE_V2_USER_NONE:
1251 		return ", STIBP: disabled";
1252 	case SPECTRE_V2_USER_STRICT:
1253 		return ", STIBP: forced";
1254 	case SPECTRE_V2_USER_STRICT_PREFERRED:
1255 		return ", STIBP: always-on";
1256 	case SPECTRE_V2_USER_PRCTL:
1257 	case SPECTRE_V2_USER_SECCOMP:
1258 		if (static_key_enabled(&switch_to_cond_stibp))
1259 			return ", STIBP: conditional";
1260 	}
1261 	return "";
1262 }
1263 
1264 static char *ibpb_state(void)
1265 {
1266 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
1267 		if (static_key_enabled(&switch_mm_always_ibpb))
1268 			return ", IBPB: always-on";
1269 		if (static_key_enabled(&switch_mm_cond_ibpb))
1270 			return ", IBPB: conditional";
1271 		return ", IBPB: disabled";
1272 	}
1273 	return "";
1274 }
1275 
1276 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1277 			       char *buf, unsigned int bug)
1278 {
1279 	if (!boot_cpu_has_bug(bug))
1280 		return sprintf(buf, "Not affected\n");
1281 
1282 	switch (bug) {
1283 	case X86_BUG_CPU_MELTDOWN:
1284 		if (boot_cpu_has(X86_FEATURE_PTI))
1285 			return sprintf(buf, "Mitigation: PTI\n");
1286 
1287 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
1288 			return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1289 
1290 		break;
1291 
1292 	case X86_BUG_SPECTRE_V1:
1293 		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1294 
1295 	case X86_BUG_SPECTRE_V2:
1296 		return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1297 			       ibpb_state(),
1298 			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1299 			       stibp_state(),
1300 			       boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1301 			       spectre_v2_module_string());
1302 
1303 	case X86_BUG_SPEC_STORE_BYPASS:
1304 		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1305 
1306 	case X86_BUG_L1TF:
1307 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1308 			return l1tf_show_state(buf);
1309 		break;
1310 
1311 	case X86_BUG_MDS:
1312 		return mds_show_state(buf);
1313 
1314 	default:
1315 		break;
1316 	}
1317 
1318 	return sprintf(buf, "Vulnerable\n");
1319 }
1320 
1321 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1322 {
1323 	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1324 }
1325 
1326 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1327 {
1328 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1329 }
1330 
1331 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1332 {
1333 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1334 }
1335 
1336 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1337 {
1338 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1339 }
1340 
1341 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1342 {
1343 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1344 }
1345 
1346 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1347 {
1348 	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1349 }
1350 #endif
1351