xref: /openbmc/u-boot/arch/x86/cpu/cpu.c (revision 7bf38161)
1 /*
2  * (C) Copyright 2008-2011
3  * Graeme Russ, <graeme.russ@gmail.com>
4  *
5  * (C) Copyright 2002
6  * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
7  *
8  * (C) Copyright 2002
9  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
10  * Marius Groeger <mgroeger@sysgo.de>
11  *
12  * (C) Copyright 2002
13  * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
14  * Alex Zuepke <azu@sysgo.de>
15  *
16  * SPDX-License-Identifier:	GPL-2.0+
17  */
18 
19 #include <common.h>
20 #include <command.h>
21 #include <errno.h>
22 #include <malloc.h>
23 #include <asm/control_regs.h>
24 #include <asm/cpu.h>
25 #include <asm/processor.h>
26 #include <asm/processor-flags.h>
27 #include <asm/interrupt.h>
28 #include <linux/compiler.h>
29 
30 /*
31  * Constructor for a conventional segment GDT (or LDT) entry
32  * This is a macro so it can be used in initialisers
33  */
34 #define GDT_ENTRY(flags, base, limit)			\
35 	((((base)  & 0xff000000ULL) << (56-24)) |	\
36 	 (((flags) & 0x0000f0ffULL) << 40) |		\
37 	 (((limit) & 0x000f0000ULL) << (48-16)) |	\
38 	 (((base)  & 0x00ffffffULL) << 16) |		\
39 	 (((limit) & 0x0000ffffULL)))
40 
41 struct gdt_ptr {
42 	u16 len;
43 	u32 ptr;
44 } __packed;
45 
46 static void load_ds(u32 segment)
47 {
48 	asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
49 }
50 
51 static void load_es(u32 segment)
52 {
53 	asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
54 }
55 
56 static void load_fs(u32 segment)
57 {
58 	asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
59 }
60 
61 static void load_gs(u32 segment)
62 {
63 	asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
64 }
65 
66 static void load_ss(u32 segment)
67 {
68 	asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
69 }
70 
71 static void load_gdt(const u64 *boot_gdt, u16 num_entries)
72 {
73 	struct gdt_ptr gdt;
74 
75 	gdt.len = (num_entries * 8) - 1;
76 	gdt.ptr = (u32)boot_gdt;
77 
78 	asm volatile("lgdtl %0\n" : : "m" (gdt));
79 }
80 
81 void setup_gdt(gd_t *id, u64 *gdt_addr)
82 {
83 	/* CS: code, read/execute, 4 GB, base 0 */
84 	gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
85 
86 	/* DS: data, read/write, 4 GB, base 0 */
87 	gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
88 
89 	/* FS: data, read/write, 4 GB, base (Global Data Pointer) */
90 	id->arch.gd_addr = id;
91 	gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
92 		     (ulong)&id->arch.gd_addr, 0xfffff);
93 
94 	/* 16-bit CS: code, read/execute, 64 kB, base 0 */
95 	gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x109b, 0, 0x0ffff);
96 
97 	/* 16-bit DS: data, read/write, 64 kB, base 0 */
98 	gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x1093, 0, 0x0ffff);
99 
100 	load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
101 	load_ds(X86_GDT_ENTRY_32BIT_DS);
102 	load_es(X86_GDT_ENTRY_32BIT_DS);
103 	load_gs(X86_GDT_ENTRY_32BIT_DS);
104 	load_ss(X86_GDT_ENTRY_32BIT_DS);
105 	load_fs(X86_GDT_ENTRY_32BIT_FS);
106 }
107 
108 int __weak x86_cleanup_before_linux(void)
109 {
110 #ifdef CONFIG_BOOTSTAGE_STASH
111 	bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH,
112 			CONFIG_BOOTSTAGE_STASH_SIZE);
113 #endif
114 
115 	return 0;
116 }
117 
118 int x86_cpu_init_f(void)
119 {
120 	const u32 em_rst = ~X86_CR0_EM;
121 	const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
122 
123 	/* initialize FPU, reset EM, set MP and NE */
124 	asm ("fninit\n" \
125 	     "movl %%cr0, %%eax\n" \
126 	     "andl %0, %%eax\n" \
127 	     "orl  %1, %%eax\n" \
128 	     "movl %%eax, %%cr0\n" \
129 	     : : "i" (em_rst), "i" (mp_ne_set) : "eax");
130 
131 	return 0;
132 }
133 int cpu_init_f(void) __attribute__((weak, alias("x86_cpu_init_f")));
134 
135 int x86_cpu_init_r(void)
136 {
137 	/* Initialize core interrupt and exception functionality of CPU */
138 	cpu_init_interrupts();
139 	return 0;
140 }
141 int cpu_init_r(void) __attribute__((weak, alias("x86_cpu_init_r")));
142 
143 void x86_enable_caches(void)
144 {
145 	unsigned long cr0;
146 
147 	cr0 = read_cr0();
148 	cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
149 	write_cr0(cr0);
150 	wbinvd();
151 }
152 void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
153 
154 void x86_disable_caches(void)
155 {
156 	unsigned long cr0;
157 
158 	cr0 = read_cr0();
159 	cr0 |= X86_CR0_NW | X86_CR0_CD;
160 	wbinvd();
161 	write_cr0(cr0);
162 	wbinvd();
163 }
164 void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
165 
166 int x86_init_cache(void)
167 {
168 	enable_caches();
169 
170 	return 0;
171 }
172 int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
173 
174 int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
175 {
176 	printf("resetting ...\n");
177 
178 	/* wait 50 ms */
179 	udelay(50000);
180 	disable_interrupts();
181 	reset_cpu(0);
182 
183 	/*NOTREACHED*/
184 	return 0;
185 }
186 
187 void  flush_cache(unsigned long dummy1, unsigned long dummy2)
188 {
189 	asm("wbinvd\n");
190 }
191 
192 void __attribute__ ((regparm(0))) generate_gpf(void);
193 
194 /* segment 0x70 is an arbitrary segment which does not exist */
195 asm(".globl generate_gpf\n"
196 	".hidden generate_gpf\n"
197 	".type generate_gpf, @function\n"
198 	"generate_gpf:\n"
199 	"ljmp   $0x70, $0x47114711\n");
200 
201 void __reset_cpu(ulong addr)
202 {
203 	printf("Resetting using x86 Triple Fault\n");
204 	set_vector(13, generate_gpf);	/* general protection fault handler */
205 	set_vector(8, generate_gpf);	/* double fault handler */
206 	generate_gpf();			/* start the show */
207 }
208 void reset_cpu(ulong addr) __attribute__((weak, alias("__reset_cpu")));
209 
210 int dcache_status(void)
211 {
212 	return !(read_cr0() & 0x40000000);
213 }
214 
215 /* Define these functions to allow ehch-hcd to function */
216 void flush_dcache_range(unsigned long start, unsigned long stop)
217 {
218 }
219 
220 void invalidate_dcache_range(unsigned long start, unsigned long stop)
221 {
222 }
223 
224 void dcache_enable(void)
225 {
226 	enable_caches();
227 }
228 
229 void dcache_disable(void)
230 {
231 	disable_caches();
232 }
233 
234 void icache_enable(void)
235 {
236 }
237 
238 void icache_disable(void)
239 {
240 }
241 
242 int icache_status(void)
243 {
244 	return 1;
245 }
246 
247 void cpu_enable_paging_pae(ulong cr3)
248 {
249 	__asm__ __volatile__(
250 		/* Load the page table address */
251 		"movl	%0, %%cr3\n"
252 		/* Enable pae */
253 		"movl	%%cr4, %%eax\n"
254 		"orl	$0x00000020, %%eax\n"
255 		"movl	%%eax, %%cr4\n"
256 		/* Enable paging */
257 		"movl	%%cr0, %%eax\n"
258 		"orl	$0x80000000, %%eax\n"
259 		"movl	%%eax, %%cr0\n"
260 		:
261 		: "r" (cr3)
262 		: "eax");
263 }
264 
265 void cpu_disable_paging_pae(void)
266 {
267 	/* Turn off paging */
268 	__asm__ __volatile__ (
269 		/* Disable paging */
270 		"movl	%%cr0, %%eax\n"
271 		"andl	$0x7fffffff, %%eax\n"
272 		"movl	%%eax, %%cr0\n"
273 		/* Disable pae */
274 		"movl	%%cr4, %%eax\n"
275 		"andl	$0xffffffdf, %%eax\n"
276 		"movl	%%eax, %%cr4\n"
277 		:
278 		:
279 		: "eax");
280 }
281 
282 static bool has_cpuid(void)
283 {
284 	unsigned long flag;
285 
286 	asm volatile("pushf\n" \
287 		"pop %%eax\n"
288 		"mov %%eax, %%ecx\n"	/* ecx = flags */
289 		"xor %1, %%eax\n"
290 		"push %%eax\n"
291 		"popf\n"		/* flags ^= $2 */
292 		"pushf\n"
293 		"pop %%eax\n"		/* eax = flags */
294 		"push %%ecx\n"
295 		"popf\n"		/* flags = ecx */
296 		"xor %%ecx, %%eax\n"
297 		"mov %%eax, %0"
298 		: "=r" (flag)
299 		: "i" (1 << 21)
300 		: "eax", "ecx", "memory");
301 
302 	return flag != 0;
303 }
304 
305 static bool can_detect_long_mode(void)
306 {
307 	unsigned long flag;
308 
309 	asm volatile("mov $0x80000000, %%eax\n"
310 		"cpuid\n"
311 		"mov %%eax, %0"
312 		: "=r" (flag)
313 		:
314 		: "eax", "ebx", "ecx", "edx", "memory");
315 
316 	return flag > 0x80000000UL;
317 }
318 
319 static bool has_long_mode(void)
320 {
321 	unsigned long flag;
322 
323 	asm volatile("mov $0x80000001, %%eax\n"
324 		"cpuid\n"
325 		"mov %%edx, %0"
326 		: "=r" (flag)
327 		:
328 		: "eax", "ebx", "ecx", "edx", "memory");
329 
330 	return flag & (1 << 29) ? true : false;
331 }
332 
333 int cpu_has_64bit(void)
334 {
335 	return has_cpuid() && can_detect_long_mode() &&
336 		has_long_mode();
337 }
338 
339 int print_cpuinfo(void)
340 {
341 	printf("CPU:   %s\n", cpu_has_64bit() ? "x86_64" : "x86");
342 
343 	return 0;
344 }
345 
346 #define PAGETABLE_SIZE		(6 * 4096)
347 
348 /**
349  * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
350  *
351  * @pgtable: Pointer to a 24iKB block of memory
352  */
353 static void build_pagetable(uint32_t *pgtable)
354 {
355 	uint i;
356 
357 	memset(pgtable, '\0', PAGETABLE_SIZE);
358 
359 	/* Level 4 needs a single entry */
360 	pgtable[0] = (uint32_t)&pgtable[1024] + 7;
361 
362 	/* Level 3 has one 64-bit entry for each GiB of memory */
363 	for (i = 0; i < 4; i++) {
364 		pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
365 							0x1000 * i + 7;
366 	}
367 
368 	/* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
369 	for (i = 0; i < 2048; i++)
370 		pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
371 }
372 
373 int cpu_jump_to_64bit(ulong setup_base, ulong target)
374 {
375 	uint32_t *pgtable;
376 
377 	pgtable = memalign(4096, PAGETABLE_SIZE);
378 	if (!pgtable)
379 		return -ENOMEM;
380 
381 	build_pagetable(pgtable);
382 	cpu_call64((ulong)pgtable, setup_base, target);
383 	free(pgtable);
384 
385 	return -EFAULT;
386 }
387