1 /* 2 * prepare to run common code 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 */ 6 7 #include <linux/init.h> 8 #include <linux/linkage.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/string.h> 12 #include <linux/percpu.h> 13 #include <linux/start_kernel.h> 14 #include <linux/io.h> 15 #include <linux/memblock.h> 16 17 #include <asm/processor.h> 18 #include <asm/proto.h> 19 #include <asm/smp.h> 20 #include <asm/setup.h> 21 #include <asm/desc.h> 22 #include <asm/pgtable.h> 23 #include <asm/tlbflush.h> 24 #include <asm/sections.h> 25 #include <asm/kdebug.h> 26 #include <asm/e820.h> 27 #include <asm/bios_ebda.h> 28 #include <asm/bootparam_utils.h> 29 #include <asm/microcode.h> 30 31 /* 32 * Manage page tables very early on. 33 */ 34 extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 35 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 36 static unsigned int __initdata next_early_pgt = 2; 37 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 38 39 /* Wipe all early page tables except for the kernel symbol map */ 40 static void __init reset_early_page_tables(void) 41 { 42 unsigned long i; 43 44 for (i = 0; i < PTRS_PER_PGD-1; i++) 45 early_level4_pgt[i].pgd = 0; 46 47 next_early_pgt = 0; 48 49 write_cr3(__pa(early_level4_pgt)); 50 } 51 52 /* Create a new PMD entry */ 53 int __init early_make_pgtable(unsigned long address) 54 { 55 unsigned long physaddr = address - __PAGE_OFFSET; 56 unsigned long i; 57 pgdval_t pgd, *pgd_p; 58 pudval_t pud, *pud_p; 59 pmdval_t pmd, *pmd_p; 60 61 /* Invalid address or early pgt is done ? */ 62 if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt)) 63 return -1; 64 65 again: 66 pgd_p = &early_level4_pgt[pgd_index(address)].pgd; 67 pgd = *pgd_p; 68 69 /* 70 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is 71 * critical -- __PAGE_OFFSET would point us back into the dynamic 72 * range and we might end up looping forever... 73 */ 74 if (pgd) 75 pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 76 else { 77 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 78 reset_early_page_tables(); 79 goto again; 80 } 81 82 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; 83 for (i = 0; i < PTRS_PER_PUD; i++) 84 pud_p[i] = 0; 85 *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 86 } 87 pud_p += pud_index(address); 88 pud = *pud_p; 89 90 if (pud) 91 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 92 else { 93 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 94 reset_early_page_tables(); 95 goto again; 96 } 97 98 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; 99 for (i = 0; i < PTRS_PER_PMD; i++) 100 pmd_p[i] = 0; 101 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 102 } 103 pmd = (physaddr & PMD_MASK) + early_pmd_flags; 104 pmd_p[pmd_index(address)] = pmd; 105 106 return 0; 107 } 108 109 /* Don't add a printk in there. printk relies on the PDA which is not initialized 110 yet. */ 111 static void __init clear_bss(void) 112 { 113 memset(__bss_start, 0, 114 (unsigned long) __bss_stop - (unsigned long) __bss_start); 115 } 116 117 static unsigned long get_cmd_line_ptr(void) 118 { 119 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr; 120 121 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32; 122 123 return cmd_line_ptr; 124 } 125 126 static void __init copy_bootdata(char *real_mode_data) 127 { 128 char * command_line; 129 unsigned long cmd_line_ptr; 130 131 memcpy(&boot_params, real_mode_data, sizeof boot_params); 132 sanitize_boot_params(&boot_params); 133 cmd_line_ptr = get_cmd_line_ptr(); 134 if (cmd_line_ptr) { 135 command_line = __va(cmd_line_ptr); 136 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 137 } 138 } 139 140 asmlinkage void __init x86_64_start_kernel(char * real_mode_data) 141 { 142 int i; 143 144 /* 145 * Build-time sanity checks on the kernel image and module 146 * area mappings. (these are purely build-time and produce no code) 147 */ 148 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); 149 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); 150 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 151 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); 152 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 153 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 154 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 155 (__START_KERNEL & PGDIR_MASK))); 156 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); 157 158 /* Kill off the identity-map trampoline */ 159 reset_early_page_tables(); 160 161 /* clear bss before set_intr_gate with early_idt_handler */ 162 clear_bss(); 163 164 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 165 set_intr_gate(i, early_idt_handlers[i]); 166 load_idt((const struct desc_ptr *)&idt_descr); 167 168 copy_bootdata(__va(real_mode_data)); 169 170 /* 171 * Load microcode early on BSP. 172 */ 173 load_ucode_bsp(); 174 175 if (console_loglevel == 10) 176 early_printk("Kernel alive\n"); 177 178 clear_page(init_level4_pgt); 179 /* set init_level4_pgt kernel high mapping*/ 180 init_level4_pgt[511] = early_level4_pgt[511]; 181 182 x86_64_start_reservations(real_mode_data); 183 } 184 185 void __init x86_64_start_reservations(char *real_mode_data) 186 { 187 /* version is always not zero if it is copied */ 188 if (!boot_params.hdr.version) 189 copy_bootdata(__va(real_mode_data)); 190 191 reserve_ebda_region(); 192 193 start_kernel(); 194 } 195