197873a3dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
296ae6ea0SThomas Gleixner /* -*- linux-c -*- ------------------------------------------------------- *
396ae6ea0SThomas Gleixner *
496ae6ea0SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds
596ae6ea0SThomas Gleixner * Copyright 2007 rPath, Inc. - All Rights Reserved
696ae6ea0SThomas Gleixner *
796ae6ea0SThomas Gleixner * ----------------------------------------------------------------------- */
896ae6ea0SThomas Gleixner
996ae6ea0SThomas Gleixner /*
1096ae6ea0SThomas Gleixner * Check for obligatory CPU features and abort if the features are not
1196ae6ea0SThomas Gleixner * present. This code should be compilable as 16-, 32- or 64-bit
1296ae6ea0SThomas Gleixner * code, so be very careful with types and inline assembly.
1396ae6ea0SThomas Gleixner *
1496ae6ea0SThomas Gleixner * This code should not contain any messages; that requires an
1596ae6ea0SThomas Gleixner * additional wrapper.
1696ae6ea0SThomas Gleixner *
1796ae6ea0SThomas Gleixner * As written, this code is not safe for inclusion into the kernel
1896ae6ea0SThomas Gleixner * proper (after FPU initialization, in particular).
1996ae6ea0SThomas Gleixner */
2096ae6ea0SThomas Gleixner
2196ae6ea0SThomas Gleixner #ifdef _SETUP
2296ae6ea0SThomas Gleixner # include "boot.h"
2396ae6ea0SThomas Gleixner #endif
2496ae6ea0SThomas Gleixner #include <linux/types.h>
25e4a84be6SDave Hansen #include <asm/intel-family.h>
2696ae6ea0SThomas Gleixner #include <asm/processor-flags.h>
2796ae6ea0SThomas Gleixner #include <asm/required-features.h>
2896ae6ea0SThomas Gleixner #include <asm/msr-index.h>
29c041b5adSVivek Goyal #include "string.h"
30*950d0055SMichael Roth #include "msr.h"
3196ae6ea0SThomas Gleixner
3296ae6ea0SThomas Gleixner static u32 err_flags[NCAPINTS];
3396ae6ea0SThomas Gleixner
3496ae6ea0SThomas Gleixner static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
3596ae6ea0SThomas Gleixner
3696ae6ea0SThomas Gleixner static const u32 req_flags[NCAPINTS] =
3796ae6ea0SThomas Gleixner {
3896ae6ea0SThomas Gleixner REQUIRED_MASK0,
3996ae6ea0SThomas Gleixner REQUIRED_MASK1,
40b74b06c5SH. Peter Anvin 0, /* REQUIRED_MASK2 not implemented in this file */
41b74b06c5SH. Peter Anvin 0, /* REQUIRED_MASK3 not implemented in this file */
4296ae6ea0SThomas Gleixner REQUIRED_MASK4,
43b74b06c5SH. Peter Anvin 0, /* REQUIRED_MASK5 not implemented in this file */
4496ae6ea0SThomas Gleixner REQUIRED_MASK6,
45b74b06c5SH. Peter Anvin 0, /* REQUIRED_MASK7 not implemented in this file */
463677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK8 not implemented in this file */
473677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK9 not implemented in this file */
483677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK10 not implemented in this file */
493677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK11 not implemented in this file */
503677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK12 not implemented in this file */
513677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK13 not implemented in this file */
523677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK14 not implemented in this file */
533677d4c6SKirill A. Shutemov 0, /* REQUIRED_MASK15 not implemented in this file */
543677d4c6SKirill A. Shutemov REQUIRED_MASK16,
5596ae6ea0SThomas Gleixner };
5696ae6ea0SThomas Gleixner
5796ae6ea0SThomas Gleixner #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
5896ae6ea0SThomas Gleixner
is_amd(void)5996ae6ea0SThomas Gleixner static int is_amd(void)
6096ae6ea0SThomas Gleixner {
6196ae6ea0SThomas Gleixner return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
6296ae6ea0SThomas Gleixner cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
6396ae6ea0SThomas Gleixner cpu_vendor[2] == A32('c', 'A', 'M', 'D');
6496ae6ea0SThomas Gleixner }
6596ae6ea0SThomas Gleixner
is_centaur(void)6696ae6ea0SThomas Gleixner static int is_centaur(void)
6796ae6ea0SThomas Gleixner {
6896ae6ea0SThomas Gleixner return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
6996ae6ea0SThomas Gleixner cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
7096ae6ea0SThomas Gleixner cpu_vendor[2] == A32('a', 'u', 'l', 's');
7196ae6ea0SThomas Gleixner }
7296ae6ea0SThomas Gleixner
is_transmeta(void)7396ae6ea0SThomas Gleixner static int is_transmeta(void)
7496ae6ea0SThomas Gleixner {
7596ae6ea0SThomas Gleixner return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
7696ae6ea0SThomas Gleixner cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
7796ae6ea0SThomas Gleixner cpu_vendor[2] == A32('M', 'x', '8', '6');
7896ae6ea0SThomas Gleixner }
7996ae6ea0SThomas Gleixner
is_intel(void)8069f2366cSChris Bainbridge static int is_intel(void)
8169f2366cSChris Bainbridge {
8269f2366cSChris Bainbridge return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
8369f2366cSChris Bainbridge cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
8469f2366cSChris Bainbridge cpu_vendor[2] == A32('n', 't', 'e', 'l');
8569f2366cSChris Bainbridge }
8669f2366cSChris Bainbridge
8796ae6ea0SThomas Gleixner /* Returns a bitmask of which words we have error bits in */
check_cpuflags(void)886e6a4932SH. Peter Anvin static int check_cpuflags(void)
8996ae6ea0SThomas Gleixner {
9096ae6ea0SThomas Gleixner u32 err;
9196ae6ea0SThomas Gleixner int i;
9296ae6ea0SThomas Gleixner
9396ae6ea0SThomas Gleixner err = 0;
9496ae6ea0SThomas Gleixner for (i = 0; i < NCAPINTS; i++) {
9596ae6ea0SThomas Gleixner err_flags[i] = req_flags[i] & ~cpu.flags[i];
9696ae6ea0SThomas Gleixner if (err_flags[i])
9796ae6ea0SThomas Gleixner err |= 1 << i;
9896ae6ea0SThomas Gleixner }
9996ae6ea0SThomas Gleixner
10096ae6ea0SThomas Gleixner return err;
10196ae6ea0SThomas Gleixner }
10296ae6ea0SThomas Gleixner
10396ae6ea0SThomas Gleixner /*
10496ae6ea0SThomas Gleixner * Returns -1 on error.
10596ae6ea0SThomas Gleixner *
10696ae6ea0SThomas Gleixner * *cpu_level is set to the current CPU level; *req_level to the required
10796ae6ea0SThomas Gleixner * level. x86-64 is considered level 64 for this purpose.
10896ae6ea0SThomas Gleixner *
10996ae6ea0SThomas Gleixner * *err_flags_ptr is set to the flags error array if there are flags missing.
11096ae6ea0SThomas Gleixner */
check_cpu(int * cpu_level_ptr,int * req_level_ptr,u32 ** err_flags_ptr)11196ae6ea0SThomas Gleixner int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11296ae6ea0SThomas Gleixner {
11396ae6ea0SThomas Gleixner int err;
11496ae6ea0SThomas Gleixner
1150e96f31eSJordan Borgner memset(&cpu.flags, 0, sizeof(cpu.flags));
11696ae6ea0SThomas Gleixner cpu.level = 3;
11796ae6ea0SThomas Gleixner
11896ae6ea0SThomas Gleixner if (has_eflag(X86_EFLAGS_AC))
11996ae6ea0SThomas Gleixner cpu.level = 4;
12096ae6ea0SThomas Gleixner
1216e6a4932SH. Peter Anvin get_cpuflags();
1226e6a4932SH. Peter Anvin err = check_cpuflags();
12396ae6ea0SThomas Gleixner
12496ae6ea0SThomas Gleixner if (test_bit(X86_FEATURE_LM, cpu.flags))
12596ae6ea0SThomas Gleixner cpu.level = 64;
12696ae6ea0SThomas Gleixner
12796ae6ea0SThomas Gleixner if (err == 0x01 &&
12896ae6ea0SThomas Gleixner !(err_flags[0] &
12996ae6ea0SThomas Gleixner ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
13096ae6ea0SThomas Gleixner is_amd()) {
13196ae6ea0SThomas Gleixner /* If this is an AMD and we're only missing SSE+SSE2, try to
13296ae6ea0SThomas Gleixner turn them on */
13396ae6ea0SThomas Gleixner
134*950d0055SMichael Roth struct msr m;
13596ae6ea0SThomas Gleixner
136*950d0055SMichael Roth boot_rdmsr(MSR_K7_HWCR, &m);
137*950d0055SMichael Roth m.l &= ~(1 << 15);
138*950d0055SMichael Roth boot_wrmsr(MSR_K7_HWCR, &m);
13996ae6ea0SThomas Gleixner
1406e6a4932SH. Peter Anvin get_cpuflags(); /* Make sure it really did something */
1416e6a4932SH. Peter Anvin err = check_cpuflags();
14296ae6ea0SThomas Gleixner } else if (err == 0x01 &&
14396ae6ea0SThomas Gleixner !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
14496ae6ea0SThomas Gleixner is_centaur() && cpu.model >= 6) {
14596ae6ea0SThomas Gleixner /* If this is a VIA C3, we might have to enable CX8
14696ae6ea0SThomas Gleixner explicitly */
14796ae6ea0SThomas Gleixner
148*950d0055SMichael Roth struct msr m;
14996ae6ea0SThomas Gleixner
150*950d0055SMichael Roth boot_rdmsr(MSR_VIA_FCR, &m);
151*950d0055SMichael Roth m.l |= (1 << 1) | (1 << 7);
152*950d0055SMichael Roth boot_wrmsr(MSR_VIA_FCR, &m);
15396ae6ea0SThomas Gleixner
15496ae6ea0SThomas Gleixner set_bit(X86_FEATURE_CX8, cpu.flags);
1556e6a4932SH. Peter Anvin err = check_cpuflags();
15696ae6ea0SThomas Gleixner } else if (err == 0x01 && is_transmeta()) {
15796ae6ea0SThomas Gleixner /* Transmeta might have masked feature bits in word 0 */
15896ae6ea0SThomas Gleixner
159*950d0055SMichael Roth struct msr m, m_tmp;
16096ae6ea0SThomas Gleixner u32 level = 1;
16196ae6ea0SThomas Gleixner
162*950d0055SMichael Roth boot_rdmsr(0x80860004, &m);
163*950d0055SMichael Roth m_tmp = m;
164*950d0055SMichael Roth m_tmp.l = ~0;
165*950d0055SMichael Roth boot_wrmsr(0x80860004, &m_tmp);
16696ae6ea0SThomas Gleixner asm("cpuid"
16796ae6ea0SThomas Gleixner : "+a" (level), "=d" (cpu.flags[0])
16896ae6ea0SThomas Gleixner : : "ecx", "ebx");
169*950d0055SMichael Roth boot_wrmsr(0x80860004, &m);
17096ae6ea0SThomas Gleixner
1716e6a4932SH. Peter Anvin err = check_cpuflags();
17269f2366cSChris Bainbridge } else if (err == 0x01 &&
17369f2366cSChris Bainbridge !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
17469f2366cSChris Bainbridge is_intel() && cpu.level == 6 &&
17569f2366cSChris Bainbridge (cpu.model == 9 || cpu.model == 13)) {
17669f2366cSChris Bainbridge /* PAE is disabled on this Pentium M but can be forced */
17769f2366cSChris Bainbridge if (cmdline_find_option_bool("forcepae")) {
17869f2366cSChris Bainbridge puts("WARNING: Forcing PAE in CPU flags\n");
17969f2366cSChris Bainbridge set_bit(X86_FEATURE_PAE, cpu.flags);
18069f2366cSChris Bainbridge err = check_cpuflags();
18169f2366cSChris Bainbridge }
18269f2366cSChris Bainbridge else {
18369f2366cSChris Bainbridge puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
18469f2366cSChris Bainbridge }
18596ae6ea0SThomas Gleixner }
186e4a84be6SDave Hansen if (!err)
187e4a84be6SDave Hansen err = check_knl_erratum();
18896ae6ea0SThomas Gleixner
18996ae6ea0SThomas Gleixner if (err_flags_ptr)
19096ae6ea0SThomas Gleixner *err_flags_ptr = err ? err_flags : NULL;
19196ae6ea0SThomas Gleixner if (cpu_level_ptr)
19296ae6ea0SThomas Gleixner *cpu_level_ptr = cpu.level;
19396ae6ea0SThomas Gleixner if (req_level_ptr)
19496ae6ea0SThomas Gleixner *req_level_ptr = req_level;
19596ae6ea0SThomas Gleixner
19696ae6ea0SThomas Gleixner return (cpu.level < req_level || err) ? -1 : 0;
19796ae6ea0SThomas Gleixner }
198e4a84be6SDave Hansen
check_knl_erratum(void)199e4a84be6SDave Hansen int check_knl_erratum(void)
200e4a84be6SDave Hansen {
201e4a84be6SDave Hansen /*
202e4a84be6SDave Hansen * First check for the affected model/family:
203e4a84be6SDave Hansen */
204e4a84be6SDave Hansen if (!is_intel() ||
205e4a84be6SDave Hansen cpu.family != 6 ||
206e4a84be6SDave Hansen cpu.model != INTEL_FAM6_XEON_PHI_KNL)
207e4a84be6SDave Hansen return 0;
208e4a84be6SDave Hansen
209e4a84be6SDave Hansen /*
210e4a84be6SDave Hansen * This erratum affects the Accessed/Dirty bits, and can
211e4a84be6SDave Hansen * cause stray bits to be set in !Present PTEs. We have
212e4a84be6SDave Hansen * enough bits in our 64-bit PTEs (which we have on real
213e4a84be6SDave Hansen * 64-bit mode or PAE) to avoid using these troublesome
214e4a84be6SDave Hansen * bits. But, we do not have enough space in our 32-bit
215e4a84be6SDave Hansen * PTEs. So, refuse to run on 32-bit non-PAE kernels.
216e4a84be6SDave Hansen */
217e4a84be6SDave Hansen if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
218e4a84be6SDave Hansen return 0;
219e4a84be6SDave Hansen
220e4a84be6SDave Hansen puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
221e4a84be6SDave Hansen "processor due to a processor erratum. Use a 64-bit\n"
222e4a84be6SDave Hansen "kernel, or enable PAE in this 32-bit kernel.\n\n");
223e4a84be6SDave Hansen
224e4a84be6SDave Hansen return -1;
225e4a84be6SDave Hansen }
226e4a84be6SDave Hansen
227e4a84be6SDave Hansen
228