xref: /openbmc/linux/arch/x86/boot/cpucheck.c (revision 293d5b43)
1 /* -*- linux-c -*- ------------------------------------------------------- *
2  *
3  *   Copyright (C) 1991, 1992 Linus Torvalds
4  *   Copyright 2007 rPath, Inc. - All Rights Reserved
5  *
6  *   This file is part of the Linux kernel, and is made available under
7  *   the terms of the GNU General Public License version 2.
8  *
9  * ----------------------------------------------------------------------- */
10 
11 /*
12  * Check for obligatory CPU features and abort if the features are not
13  * present.  This code should be compilable as 16-, 32- or 64-bit
14  * code, so be very careful with types and inline assembly.
15  *
16  * This code should not contain any messages; that requires an
17  * additional wrapper.
18  *
19  * As written, this code is not safe for inclusion into the kernel
20  * proper (after FPU initialization, in particular).
21  */
22 
23 #ifdef _SETUP
24 # include "boot.h"
25 #endif
26 #include <linux/types.h>
27 #include <asm/intel-family.h>
28 #include <asm/processor-flags.h>
29 #include <asm/required-features.h>
30 #include <asm/msr-index.h>
31 #include "string.h"
32 
33 static u32 err_flags[NCAPINTS];
34 
35 static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
36 
37 static const u32 req_flags[NCAPINTS] =
38 {
39 	REQUIRED_MASK0,
40 	REQUIRED_MASK1,
41 	0, /* REQUIRED_MASK2 not implemented in this file */
42 	0, /* REQUIRED_MASK3 not implemented in this file */
43 	REQUIRED_MASK4,
44 	0, /* REQUIRED_MASK5 not implemented in this file */
45 	REQUIRED_MASK6,
46 	0, /* REQUIRED_MASK7 not implemented in this file */
47 };
48 
49 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
50 
51 static int is_amd(void)
52 {
53 	return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
54 	       cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
55 	       cpu_vendor[2] == A32('c', 'A', 'M', 'D');
56 }
57 
58 static int is_centaur(void)
59 {
60 	return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
61 	       cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
62 	       cpu_vendor[2] == A32('a', 'u', 'l', 's');
63 }
64 
65 static int is_transmeta(void)
66 {
67 	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
68 	       cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
69 	       cpu_vendor[2] == A32('M', 'x', '8', '6');
70 }
71 
72 static int is_intel(void)
73 {
74 	return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
75 	       cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
76 	       cpu_vendor[2] == A32('n', 't', 'e', 'l');
77 }
78 
79 /* Returns a bitmask of which words we have error bits in */
80 static int check_cpuflags(void)
81 {
82 	u32 err;
83 	int i;
84 
85 	err = 0;
86 	for (i = 0; i < NCAPINTS; i++) {
87 		err_flags[i] = req_flags[i] & ~cpu.flags[i];
88 		if (err_flags[i])
89 			err |= 1 << i;
90 	}
91 
92 	return err;
93 }
94 
95 /*
96  * Returns -1 on error.
97  *
98  * *cpu_level is set to the current CPU level; *req_level to the required
99  * level.  x86-64 is considered level 64 for this purpose.
100  *
101  * *err_flags_ptr is set to the flags error array if there are flags missing.
102  */
103 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
104 {
105 	int err;
106 
107 	memset(&cpu.flags, 0, sizeof cpu.flags);
108 	cpu.level = 3;
109 
110 	if (has_eflag(X86_EFLAGS_AC))
111 		cpu.level = 4;
112 
113 	get_cpuflags();
114 	err = check_cpuflags();
115 
116 	if (test_bit(X86_FEATURE_LM, cpu.flags))
117 		cpu.level = 64;
118 
119 	if (err == 0x01 &&
120 	    !(err_flags[0] &
121 	      ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
122 	    is_amd()) {
123 		/* If this is an AMD and we're only missing SSE+SSE2, try to
124 		   turn them on */
125 
126 		u32 ecx = MSR_K7_HWCR;
127 		u32 eax, edx;
128 
129 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
130 		eax &= ~(1 << 15);
131 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
132 
133 		get_cpuflags();	/* Make sure it really did something */
134 		err = check_cpuflags();
135 	} else if (err == 0x01 &&
136 		   !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
137 		   is_centaur() && cpu.model >= 6) {
138 		/* If this is a VIA C3, we might have to enable CX8
139 		   explicitly */
140 
141 		u32 ecx = MSR_VIA_FCR;
142 		u32 eax, edx;
143 
144 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
145 		eax |= (1<<1)|(1<<7);
146 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
147 
148 		set_bit(X86_FEATURE_CX8, cpu.flags);
149 		err = check_cpuflags();
150 	} else if (err == 0x01 && is_transmeta()) {
151 		/* Transmeta might have masked feature bits in word 0 */
152 
153 		u32 ecx = 0x80860004;
154 		u32 eax, edx;
155 		u32 level = 1;
156 
157 		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
158 		asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
159 		asm("cpuid"
160 		    : "+a" (level), "=d" (cpu.flags[0])
161 		    : : "ecx", "ebx");
162 		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
163 
164 		err = check_cpuflags();
165 	} else if (err == 0x01 &&
166 		   !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
167 		   is_intel() && cpu.level == 6 &&
168 		   (cpu.model == 9 || cpu.model == 13)) {
169 		/* PAE is disabled on this Pentium M but can be forced */
170 		if (cmdline_find_option_bool("forcepae")) {
171 			puts("WARNING: Forcing PAE in CPU flags\n");
172 			set_bit(X86_FEATURE_PAE, cpu.flags);
173 			err = check_cpuflags();
174 		}
175 		else {
176 			puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
177 		}
178 	}
179 	if (!err)
180 		err = check_knl_erratum();
181 
182 	if (err_flags_ptr)
183 		*err_flags_ptr = err ? err_flags : NULL;
184 	if (cpu_level_ptr)
185 		*cpu_level_ptr = cpu.level;
186 	if (req_level_ptr)
187 		*req_level_ptr = req_level;
188 
189 	return (cpu.level < req_level || err) ? -1 : 0;
190 }
191 
192 int check_knl_erratum(void)
193 {
194 	/*
195 	 * First check for the affected model/family:
196 	 */
197 	if (!is_intel() ||
198 	    cpu.family != 6 ||
199 	    cpu.model != INTEL_FAM6_XEON_PHI_KNL)
200 		return 0;
201 
202 	/*
203 	 * This erratum affects the Accessed/Dirty bits, and can
204 	 * cause stray bits to be set in !Present PTEs.  We have
205 	 * enough bits in our 64-bit PTEs (which we have on real
206 	 * 64-bit mode or PAE) to avoid using these troublesome
207 	 * bits.  But, we do not have enough space in our 32-bit
208 	 * PTEs.  So, refuse to run on 32-bit non-PAE kernels.
209 	 */
210 	if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
211 		return 0;
212 
213 	puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
214 	     "processor due to a processor erratum.  Use a 64-bit\n"
215 	     "kernel, or enable PAE in this 32-bit kernel.\n\n");
216 
217 	return -1;
218 }
219 
220 
221