xref: /openbmc/linux/arch/x86/kernel/verify_cpu.S (revision 1a59d1b8)
1/*
2 *
3 *	verify_cpu.S - Code for cpu long mode and SSE verification. This
4 *	code has been borrowed from boot/setup.S and was introduced by
5 * 	Andi Kleen.
6 *
7 *	Copyright (c) 2007  Andi Kleen (ak@suse.de)
8 *	Copyright (c) 2007  Eric Biederman (ebiederm@xmission.com)
9 *	Copyright (c) 2007  Vivek Goyal (vgoyal@in.ibm.com)
10 *	Copyright (c) 2010  Kees Cook (kees.cook@canonical.com)
11 *
12 * 	This source code is licensed under the GNU General Public License,
13 * 	Version 2.  See the file COPYING for more details.
14 *
15 *	This is a common code for verification whether CPU supports
16 * 	long mode and SSE or not. It is not called directly instead this
17 *	file is included at various places and compiled in that context.
18 *	This file is expected to run in 32bit code.  Currently:
19 *
20 *	arch/x86/boot/compressed/head_64.S: Boot cpu verification
21 *	arch/x86/kernel/trampoline_64.S: secondary processor verification
22 *	arch/x86/kernel/head_32.S: processor startup
23 *
24 *	verify_cpu, returns the status of longmode and SSE in register %eax.
25 *		0: Success    1: Failure
26 *
27 *	On Intel, the XD_DISABLE flag will be cleared as a side-effect.
28 *
29 * 	The caller needs to check for the error code and take the action
30 * 	appropriately. Either display a message or halt.
31 */
32
33#include <asm/cpufeatures.h>
34#include <asm/msr-index.h>
35
36ENTRY(verify_cpu)
37	pushf				# Save caller passed flags
38	push	$0			# Kill any dangerous flags
39	popf
40
41#ifndef __x86_64__
42	pushfl				# standard way to check for cpuid
43	popl	%eax
44	movl	%eax,%ebx
45	xorl	$0x200000,%eax
46	pushl	%eax
47	popfl
48	pushfl
49	popl	%eax
50	cmpl	%eax,%ebx
51	jz	.Lverify_cpu_no_longmode	# cpu has no cpuid
52#endif
53
54	movl	$0x0,%eax		# See if cpuid 1 is implemented
55	cpuid
56	cmpl	$0x1,%eax
57	jb	.Lverify_cpu_no_longmode	# no cpuid 1
58
59	xor	%di,%di
60	cmpl	$0x68747541,%ebx	# AuthenticAMD
61	jnz	.Lverify_cpu_noamd
62	cmpl	$0x69746e65,%edx
63	jnz	.Lverify_cpu_noamd
64	cmpl	$0x444d4163,%ecx
65	jnz	.Lverify_cpu_noamd
66	mov	$1,%di			# cpu is from AMD
67	jmp	.Lverify_cpu_check
68
69.Lverify_cpu_noamd:
70	cmpl	$0x756e6547,%ebx        # GenuineIntel?
71	jnz	.Lverify_cpu_check
72	cmpl	$0x49656e69,%edx
73	jnz	.Lverify_cpu_check
74	cmpl	$0x6c65746e,%ecx
75	jnz	.Lverify_cpu_check
76
77	# only call IA32_MISC_ENABLE when:
78	# family > 6 || (family == 6 && model >= 0xd)
79	movl	$0x1, %eax		# check CPU family and model
80	cpuid
81	movl	%eax, %ecx
82
83	andl	$0x0ff00f00, %eax	# mask family and extended family
84	shrl	$8, %eax
85	cmpl	$6, %eax
86	ja	.Lverify_cpu_clear_xd	# family > 6, ok
87	jb	.Lverify_cpu_check	# family < 6, skip
88
89	andl	$0x000f00f0, %ecx	# mask model and extended model
90	shrl	$4, %ecx
91	cmpl	$0xd, %ecx
92	jb	.Lverify_cpu_check	# family == 6, model < 0xd, skip
93
94.Lverify_cpu_clear_xd:
95	movl	$MSR_IA32_MISC_ENABLE, %ecx
96	rdmsr
97	btrl	$2, %edx		# clear MSR_IA32_MISC_ENABLE_XD_DISABLE
98	jnc	.Lverify_cpu_check	# only write MSR if bit was changed
99	wrmsr
100
101.Lverify_cpu_check:
102	movl    $0x1,%eax		# Does the cpu have what it takes
103	cpuid
104	andl	$REQUIRED_MASK0,%edx
105	xorl	$REQUIRED_MASK0,%edx
106	jnz	.Lverify_cpu_no_longmode
107
108	movl    $0x80000000,%eax	# See if extended cpuid is implemented
109	cpuid
110	cmpl    $0x80000001,%eax
111	jb      .Lverify_cpu_no_longmode	# no extended cpuid
112
113	movl    $0x80000001,%eax	# Does the cpu have what it takes
114	cpuid
115	andl    $REQUIRED_MASK1,%edx
116	xorl    $REQUIRED_MASK1,%edx
117	jnz     .Lverify_cpu_no_longmode
118
119.Lverify_cpu_sse_test:
120	movl	$1,%eax
121	cpuid
122	andl	$SSE_MASK,%edx
123	cmpl	$SSE_MASK,%edx
124	je	.Lverify_cpu_sse_ok
125	test	%di,%di
126	jz	.Lverify_cpu_no_longmode	# only try to force SSE on AMD
127	movl	$MSR_K7_HWCR,%ecx
128	rdmsr
129	btr	$15,%eax		# enable SSE
130	wrmsr
131	xor	%di,%di			# don't loop
132	jmp	.Lverify_cpu_sse_test	# try again
133
134.Lverify_cpu_no_longmode:
135	popf				# Restore caller passed flags
136	movl $1,%eax
137	ret
138.Lverify_cpu_sse_ok:
139	popf				# Restore caller passed flags
140	xorl %eax, %eax
141	ret
142ENDPROC(verify_cpu)
143