1 /* 2 * prepare to run common code 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 */ 6 7 #define DISABLE_BRANCH_PROFILING 8 #include <linux/init.h> 9 #include <linux/linkage.h> 10 #include <linux/types.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/percpu.h> 14 #include <linux/start_kernel.h> 15 #include <linux/io.h> 16 #include <linux/memblock.h> 17 18 #include <asm/processor.h> 19 #include <asm/proto.h> 20 #include <asm/smp.h> 21 #include <asm/setup.h> 22 #include <asm/desc.h> 23 #include <asm/pgtable.h> 24 #include <asm/tlbflush.h> 25 #include <asm/sections.h> 26 #include <asm/kdebug.h> 27 #include <asm/e820/api.h> 28 #include <asm/bios_ebda.h> 29 #include <asm/bootparam_utils.h> 30 #include <asm/microcode.h> 31 #include <asm/kasan.h> 32 33 /* 34 * Manage page tables very early on. 35 */ 36 extern pgd_t early_top_pgt[PTRS_PER_PGD]; 37 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 38 static unsigned int __initdata next_early_pgt; 39 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 40 41 #define __head __section(.head.text) 42 43 static void __head *fixup_pointer(void *ptr, unsigned long physaddr) 44 { 45 return ptr - (void *)_text + (void *)physaddr; 46 } 47 48 void __head __startup_64(unsigned long physaddr) 49 { 50 unsigned long load_delta, *p; 51 pgdval_t *pgd; 52 p4dval_t *p4d; 53 pudval_t *pud; 54 pmdval_t *pmd, pmd_entry; 55 int i; 56 57 /* Is the address too large? */ 58 if (physaddr >> MAX_PHYSMEM_BITS) 59 for (;;); 60 61 /* 62 * Compute the delta between the address I am compiled to run at 63 * and the address I am actually running at. 64 */ 65 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map); 66 67 /* Is the address not 2M aligned? */ 68 if (load_delta & ~PMD_PAGE_MASK) 69 for (;;); 70 71 /* Fixup the physical addresses in the page table */ 72 73 pgd = fixup_pointer(&early_top_pgt, physaddr); 74 pgd[pgd_index(__START_KERNEL_map)] += load_delta; 75 76 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 77 p4d = fixup_pointer(&level4_kernel_pgt, physaddr); 78 p4d[511] += load_delta; 79 } 80 81 pud = fixup_pointer(&level3_kernel_pgt, physaddr); 82 pud[510] += load_delta; 83 pud[511] += load_delta; 84 85 pmd = fixup_pointer(level2_fixmap_pgt, physaddr); 86 pmd[506] += load_delta; 87 88 /* 89 * Set up the identity mapping for the switchover. These 90 * entries should *NOT* have the global bit set! This also 91 * creates a bunch of nonsense entries but that is fine -- 92 * it avoids problems around wraparound. 93 */ 94 95 pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 96 pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 97 98 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 99 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 100 101 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 102 pgd[i + 0] = (pgdval_t)p4d + _KERNPG_TABLE; 103 pgd[i + 1] = (pgdval_t)p4d + _KERNPG_TABLE; 104 105 i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; 106 p4d[i + 0] = (pgdval_t)pud + _KERNPG_TABLE; 107 p4d[i + 1] = (pgdval_t)pud + _KERNPG_TABLE; 108 } else { 109 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 110 pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE; 111 pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE; 112 } 113 114 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; 115 pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE; 116 pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE; 117 118 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; 119 pmd_entry += physaddr; 120 121 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { 122 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; 123 pmd[idx] = pmd_entry + i * PMD_SIZE; 124 } 125 126 /* 127 * Fixup the kernel text+data virtual addresses. Note that 128 * we might write invalid pmds, when the kernel is relocated 129 * cleanup_highmap() fixes this up along with the mappings 130 * beyond _end. 131 */ 132 133 pmd = fixup_pointer(level2_kernel_pgt, physaddr); 134 for (i = 0; i < PTRS_PER_PMD; i++) { 135 if (pmd[i] & _PAGE_PRESENT) 136 pmd[i] += load_delta; 137 } 138 139 /* Fixup phys_base */ 140 p = fixup_pointer(&phys_base, physaddr); 141 *p += load_delta; 142 } 143 144 /* Wipe all early page tables except for the kernel symbol map */ 145 static void __init reset_early_page_tables(void) 146 { 147 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); 148 next_early_pgt = 0; 149 write_cr3(__pa_nodebug(early_top_pgt)); 150 } 151 152 /* Create a new PMD entry */ 153 int __init early_make_pgtable(unsigned long address) 154 { 155 unsigned long physaddr = address - __PAGE_OFFSET; 156 pgdval_t pgd, *pgd_p; 157 p4dval_t p4d, *p4d_p; 158 pudval_t pud, *pud_p; 159 pmdval_t pmd, *pmd_p; 160 161 /* Invalid address or early pgt is done ? */ 162 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt)) 163 return -1; 164 165 again: 166 pgd_p = &early_top_pgt[pgd_index(address)].pgd; 167 pgd = *pgd_p; 168 169 /* 170 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is 171 * critical -- __PAGE_OFFSET would point us back into the dynamic 172 * range and we might end up looping forever... 173 */ 174 if (!IS_ENABLED(CONFIG_X86_5LEVEL)) 175 p4d_p = pgd_p; 176 else if (pgd) 177 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 178 else { 179 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 180 reset_early_page_tables(); 181 goto again; 182 } 183 184 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++]; 185 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); 186 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 187 } 188 p4d_p += p4d_index(address); 189 p4d = *p4d_p; 190 191 if (p4d) 192 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 193 else { 194 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 195 reset_early_page_tables(); 196 goto again; 197 } 198 199 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; 200 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 201 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 202 } 203 pud_p += pud_index(address); 204 pud = *pud_p; 205 206 if (pud) 207 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 208 else { 209 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 210 reset_early_page_tables(); 211 goto again; 212 } 213 214 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; 215 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); 216 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 217 } 218 pmd = (physaddr & PMD_MASK) + early_pmd_flags; 219 pmd_p[pmd_index(address)] = pmd; 220 221 return 0; 222 } 223 224 /* Don't add a printk in there. printk relies on the PDA which is not initialized 225 yet. */ 226 static void __init clear_bss(void) 227 { 228 memset(__bss_start, 0, 229 (unsigned long) __bss_stop - (unsigned long) __bss_start); 230 } 231 232 static unsigned long get_cmd_line_ptr(void) 233 { 234 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr; 235 236 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32; 237 238 return cmd_line_ptr; 239 } 240 241 static void __init copy_bootdata(char *real_mode_data) 242 { 243 char * command_line; 244 unsigned long cmd_line_ptr; 245 246 memcpy(&boot_params, real_mode_data, sizeof boot_params); 247 sanitize_boot_params(&boot_params); 248 cmd_line_ptr = get_cmd_line_ptr(); 249 if (cmd_line_ptr) { 250 command_line = __va(cmd_line_ptr); 251 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 252 } 253 } 254 255 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) 256 { 257 int i; 258 259 /* 260 * Build-time sanity checks on the kernel image and module 261 * area mappings. (these are purely build-time and produce no code) 262 */ 263 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); 264 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); 265 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 266 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); 267 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 268 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 269 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 270 (__START_KERNEL & PGDIR_MASK))); 271 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); 272 273 cr4_init_shadow(); 274 275 /* Kill off the identity-map trampoline */ 276 reset_early_page_tables(); 277 278 clear_bss(); 279 280 clear_page(init_top_pgt); 281 282 kasan_early_init(); 283 284 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 285 set_intr_gate(i, early_idt_handler_array[i]); 286 load_idt((const struct desc_ptr *)&idt_descr); 287 288 copy_bootdata(__va(real_mode_data)); 289 290 /* 291 * Load microcode early on BSP. 292 */ 293 load_ucode_bsp(); 294 295 /* set init_top_pgt kernel high mapping*/ 296 init_top_pgt[511] = early_top_pgt[511]; 297 298 x86_64_start_reservations(real_mode_data); 299 } 300 301 void __init x86_64_start_reservations(char *real_mode_data) 302 { 303 /* version is always not zero if it is copied */ 304 if (!boot_params.hdr.version) 305 copy_bootdata(__va(real_mode_data)); 306 307 x86_early_init_platform_quirks(); 308 309 switch (boot_params.hdr.hardware_subarch) { 310 case X86_SUBARCH_INTEL_MID: 311 x86_intel_mid_early_setup(); 312 break; 313 default: 314 break; 315 } 316 317 start_kernel(); 318 } 319