xref: /openbmc/linux/arch/x86/kernel/head32.c (revision 87e81786)
1 /*
2  *  linux/arch/i386/kernel/head32.c -- prepare to run common code
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *  Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com>
6  */
7 
8 #include <linux/init.h>
9 #include <linux/start_kernel.h>
10 #include <linux/mm.h>
11 #include <linux/memblock.h>
12 
13 #include <asm/desc.h>
14 #include <asm/setup.h>
15 #include <asm/sections.h>
16 #include <asm/e820/api.h>
17 #include <asm/page.h>
18 #include <asm/apic.h>
19 #include <asm/io_apic.h>
20 #include <asm/bios_ebda.h>
21 #include <asm/tlbflush.h>
22 #include <asm/bootparam_utils.h>
23 
24 static void __init i386_default_early_setup(void)
25 {
26 	/* Initialize 32bit specific setup functions */
27 	x86_init.resources.reserve_resources = i386_reserve_resources;
28 	x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
29 }
30 
31 asmlinkage __visible void __init i386_start_kernel(void)
32 {
33 	cr4_init_shadow();
34 
35 	idt_setup_early_handler();
36 
37 	sanitize_boot_params(&boot_params);
38 
39 	x86_early_init_platform_quirks();
40 
41 	/* Call the subarch specific early setup function */
42 	switch (boot_params.hdr.hardware_subarch) {
43 	case X86_SUBARCH_INTEL_MID:
44 		x86_intel_mid_early_setup();
45 		break;
46 	case X86_SUBARCH_CE4100:
47 		x86_ce4100_early_setup();
48 		break;
49 	default:
50 		i386_default_early_setup();
51 		break;
52 	}
53 
54 	start_kernel();
55 }
56 
57 /*
58  * Initialize page tables.  This creates a PDE and a set of page
59  * tables, which are located immediately beyond __brk_base.  The variable
60  * _brk_end is set up to point to the first "safe" location.
61  * Mappings are created both at virtual address 0 (identity mapping)
62  * and PAGE_OFFSET for up to _end.
63  *
64  * In PAE mode initial_page_table is statically defined to contain
65  * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
66  * entries). The identity mapping is handled by pointing two PGD entries
67  * to the first kernel PMD. Note the upper half of each PMD or PTE are
68  * always zero at this stage.
69  */
70 void __init mk_early_pgtbl_32(void)
71 {
72 #ifdef __pa
73 #undef __pa
74 #endif
75 #define __pa(x)  ((unsigned long)(x) - PAGE_OFFSET)
76 	pte_t pte, *ptep;
77 	int i;
78 	unsigned long *ptr;
79 	/* Enough space to fit pagetables for the low memory linear map */
80 	const unsigned long limit = __pa(_end) +
81 		(PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
82 #ifdef CONFIG_X86_PAE
83 	pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
84 #define SET_PL2(pl2, val)    { (pl2).pmd = (val); }
85 #else
86 	pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
87 #define SET_PL2(pl2, val)   { (pl2).pgd = (val); }
88 #endif
89 
90 	ptep = (pte_t *)__pa(__brk_base);
91 	pte.pte = PTE_IDENT_ATTR;
92 
93 	while ((pte.pte & PTE_PFN_MASK) < limit) {
94 
95 		SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
96 		*pl2p = pl2;
97 #ifndef CONFIG_X86_PAE
98 		/* Kernel PDE entry */
99 		*(pl2p +  ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
100 #endif
101 		for (i = 0; i < PTRS_PER_PTE; i++) {
102 			*ptep = pte;
103 			pte.pte += PAGE_SIZE;
104 			ptep++;
105 		}
106 
107 		pl2p++;
108 	}
109 
110 	ptr = (unsigned long *)__pa(&max_pfn_mapped);
111 	/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
112 	*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
113 
114 	ptr = (unsigned long *)__pa(&_brk_end);
115 	*ptr = (unsigned long)ptep + PAGE_OFFSET;
116 }
117 
118