1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * This file contains the setup_arch() code, which handles the architecture-dependent 6 * parts of early kernel initialization. 7 */ 8 #include <linux/console.h> 9 #include <linux/crash_dump.h> 10 #include <linux/dma-map-ops.h> 11 #include <linux/dmi.h> 12 #include <linux/efi.h> 13 #include <linux/init_ohci1394_dma.h> 14 #include <linux/initrd.h> 15 #include <linux/iscsi_ibft.h> 16 #include <linux/memblock.h> 17 #include <linux/pci.h> 18 #include <linux/root_dev.h> 19 #include <linux/hugetlb.h> 20 #include <linux/tboot.h> 21 #include <linux/usb/xhci-dbgp.h> 22 #include <linux/static_call.h> 23 #include <linux/swiotlb.h> 24 25 #include <uapi/linux/mount.h> 26 27 #include <xen/xen.h> 28 29 #include <asm/apic.h> 30 #include <asm/numa.h> 31 #include <asm/bios_ebda.h> 32 #include <asm/bugs.h> 33 #include <asm/cpu.h> 34 #include <asm/efi.h> 35 #include <asm/gart.h> 36 #include <asm/hypervisor.h> 37 #include <asm/io_apic.h> 38 #include <asm/kasan.h> 39 #include <asm/kaslr.h> 40 #include <asm/mce.h> 41 #include <asm/mtrr.h> 42 #include <asm/realmode.h> 43 #include <asm/olpc_ofw.h> 44 #include <asm/pci-direct.h> 45 #include <asm/prom.h> 46 #include <asm/proto.h> 47 #include <asm/unwind.h> 48 #include <asm/vsyscall.h> 49 #include <linux/vmalloc.h> 50 51 /* 52 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB 53 * max_pfn_mapped: highest directly mapped pfn > 4 GB 54 * 55 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are 56 * represented by pfn_mapped[]. 57 */ 58 unsigned long max_low_pfn_mapped; 59 unsigned long max_pfn_mapped; 60 61 #ifdef CONFIG_DMI 62 RESERVE_BRK(dmi_alloc, 65536); 63 #endif 64 65 66 /* 67 * Range of the BSS area. The size of the BSS area is determined 68 * at link time, with RESERVE_BRK() facility reserving additional 69 * chunks. 70 */ 71 unsigned long _brk_start = (unsigned long)__brk_base; 72 unsigned long _brk_end = (unsigned long)__brk_base; 73 74 struct boot_params boot_params; 75 76 /* 77 * These are the four main kernel memory regions, we put them into 78 * the resource tree so that kdump tools and other debugging tools 79 * recover it: 80 */ 81 82 static struct resource rodata_resource = { 83 .name = "Kernel rodata", 84 .start = 0, 85 .end = 0, 86 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 87 }; 88 89 static struct resource data_resource = { 90 .name = "Kernel data", 91 .start = 0, 92 .end = 0, 93 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 94 }; 95 96 static struct resource code_resource = { 97 .name = "Kernel code", 98 .start = 0, 99 .end = 0, 100 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 101 }; 102 103 static struct resource bss_resource = { 104 .name = "Kernel bss", 105 .start = 0, 106 .end = 0, 107 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM 108 }; 109 110 111 #ifdef CONFIG_X86_32 112 /* CPU data as detected by the assembly code in head_32.S */ 113 struct cpuinfo_x86 new_cpu_data; 114 115 /* Common CPU data for all CPUs */ 116 struct cpuinfo_x86 boot_cpu_data __read_mostly; 117 EXPORT_SYMBOL(boot_cpu_data); 118 119 unsigned int def_to_bigsmp; 120 121 struct apm_info apm_info; 122 EXPORT_SYMBOL(apm_info); 123 124 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ 125 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) 126 struct ist_info ist_info; 127 EXPORT_SYMBOL(ist_info); 128 #else 129 struct ist_info ist_info; 130 #endif 131 132 #else 133 struct cpuinfo_x86 boot_cpu_data __read_mostly; 134 EXPORT_SYMBOL(boot_cpu_data); 135 #endif 136 137 138 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 139 __visible unsigned long mmu_cr4_features __ro_after_init; 140 #else 141 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE; 142 #endif 143 144 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ 145 int bootloader_type, bootloader_version; 146 147 /* 148 * Setup options 149 */ 150 struct screen_info screen_info; 151 EXPORT_SYMBOL(screen_info); 152 struct edid_info edid_info; 153 EXPORT_SYMBOL_GPL(edid_info); 154 155 extern int root_mountflags; 156 157 unsigned long saved_video_mode; 158 159 #define RAMDISK_IMAGE_START_MASK 0x07FF 160 #define RAMDISK_PROMPT_FLAG 0x8000 161 #define RAMDISK_LOAD_FLAG 0x4000 162 163 static char __initdata command_line[COMMAND_LINE_SIZE]; 164 #ifdef CONFIG_CMDLINE_BOOL 165 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; 166 #endif 167 168 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 169 struct edd edd; 170 #ifdef CONFIG_EDD_MODULE 171 EXPORT_SYMBOL(edd); 172 #endif 173 /** 174 * copy_edd() - Copy the BIOS EDD information 175 * from boot_params into a safe place. 176 * 177 */ 178 static inline void __init copy_edd(void) 179 { 180 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, 181 sizeof(edd.mbr_signature)); 182 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); 183 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; 184 edd.edd_info_nr = boot_params.eddbuf_entries; 185 } 186 #else 187 static inline void __init copy_edd(void) 188 { 189 } 190 #endif 191 192 void * __init extend_brk(size_t size, size_t align) 193 { 194 size_t mask = align - 1; 195 void *ret; 196 197 BUG_ON(_brk_start == 0); 198 BUG_ON(align & mask); 199 200 _brk_end = (_brk_end + mask) & ~mask; 201 BUG_ON((char *)(_brk_end + size) > __brk_limit); 202 203 ret = (void *)_brk_end; 204 _brk_end += size; 205 206 memset(ret, 0, size); 207 208 return ret; 209 } 210 211 #ifdef CONFIG_X86_32 212 static void __init cleanup_highmap(void) 213 { 214 } 215 #endif 216 217 static void __init reserve_brk(void) 218 { 219 if (_brk_end > _brk_start) 220 memblock_reserve(__pa_symbol(_brk_start), 221 _brk_end - _brk_start); 222 223 /* Mark brk area as locked down and no longer taking any 224 new allocations */ 225 _brk_start = 0; 226 } 227 228 u64 relocated_ramdisk; 229 230 #ifdef CONFIG_BLK_DEV_INITRD 231 232 static u64 __init get_ramdisk_image(void) 233 { 234 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 235 236 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32; 237 238 if (ramdisk_image == 0) 239 ramdisk_image = phys_initrd_start; 240 241 return ramdisk_image; 242 } 243 static u64 __init get_ramdisk_size(void) 244 { 245 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 246 247 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32; 248 249 if (ramdisk_size == 0) 250 ramdisk_size = phys_initrd_size; 251 252 return ramdisk_size; 253 } 254 255 static void __init relocate_initrd(void) 256 { 257 /* Assume only end is not page aligned */ 258 u64 ramdisk_image = get_ramdisk_image(); 259 u64 ramdisk_size = get_ramdisk_size(); 260 u64 area_size = PAGE_ALIGN(ramdisk_size); 261 262 /* We need to move the initrd down into directly mapped mem */ 263 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, 264 PFN_PHYS(max_pfn_mapped)); 265 if (!relocated_ramdisk) 266 panic("Cannot find place for new RAMDISK of size %lld\n", 267 ramdisk_size); 268 269 initrd_start = relocated_ramdisk + PAGE_OFFSET; 270 initrd_end = initrd_start + ramdisk_size; 271 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", 272 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); 273 274 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); 275 276 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" 277 " [mem %#010llx-%#010llx]\n", 278 ramdisk_image, ramdisk_image + ramdisk_size - 1, 279 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); 280 } 281 282 static void __init early_reserve_initrd(void) 283 { 284 /* Assume only end is not page aligned */ 285 u64 ramdisk_image = get_ramdisk_image(); 286 u64 ramdisk_size = get_ramdisk_size(); 287 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 288 289 if (!boot_params.hdr.type_of_loader || 290 !ramdisk_image || !ramdisk_size) 291 return; /* No initrd provided by bootloader */ 292 293 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); 294 } 295 296 static void __init reserve_initrd(void) 297 { 298 /* Assume only end is not page aligned */ 299 u64 ramdisk_image = get_ramdisk_image(); 300 u64 ramdisk_size = get_ramdisk_size(); 301 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 302 303 if (!boot_params.hdr.type_of_loader || 304 !ramdisk_image || !ramdisk_size) 305 return; /* No initrd provided by bootloader */ 306 307 initrd_start = 0; 308 309 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image, 310 ramdisk_end - 1); 311 312 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), 313 PFN_DOWN(ramdisk_end))) { 314 /* All are mapped, easy case */ 315 initrd_start = ramdisk_image + PAGE_OFFSET; 316 initrd_end = initrd_start + ramdisk_size; 317 return; 318 } 319 320 relocate_initrd(); 321 322 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); 323 } 324 325 #else 326 static void __init early_reserve_initrd(void) 327 { 328 } 329 static void __init reserve_initrd(void) 330 { 331 } 332 #endif /* CONFIG_BLK_DEV_INITRD */ 333 334 static void __init parse_setup_data(void) 335 { 336 struct setup_data *data; 337 u64 pa_data, pa_next; 338 339 pa_data = boot_params.hdr.setup_data; 340 while (pa_data) { 341 u32 data_len, data_type; 342 343 data = early_memremap(pa_data, sizeof(*data)); 344 data_len = data->len + sizeof(struct setup_data); 345 data_type = data->type; 346 pa_next = data->next; 347 early_memunmap(data, sizeof(*data)); 348 349 switch (data_type) { 350 case SETUP_E820_EXT: 351 e820__memory_setup_extended(pa_data, data_len); 352 break; 353 case SETUP_DTB: 354 add_dtb(pa_data); 355 break; 356 case SETUP_EFI: 357 parse_efi_setup(pa_data, data_len); 358 break; 359 default: 360 break; 361 } 362 pa_data = pa_next; 363 } 364 } 365 366 static void __init memblock_x86_reserve_range_setup_data(void) 367 { 368 struct setup_data *data; 369 u64 pa_data; 370 371 pa_data = boot_params.hdr.setup_data; 372 while (pa_data) { 373 data = early_memremap(pa_data, sizeof(*data)); 374 memblock_reserve(pa_data, sizeof(*data) + data->len); 375 376 if (data->type == SETUP_INDIRECT && 377 ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) 378 memblock_reserve(((struct setup_indirect *)data->data)->addr, 379 ((struct setup_indirect *)data->data)->len); 380 381 pa_data = data->next; 382 early_memunmap(data, sizeof(*data)); 383 } 384 } 385 386 /* 387 * --------- Crashkernel reservation ------------------------------ 388 */ 389 390 #ifdef CONFIG_KEXEC_CORE 391 392 /* 16M alignment for crash kernel regions */ 393 #define CRASH_ALIGN SZ_16M 394 395 /* 396 * Keep the crash kernel below this limit. 397 * 398 * Earlier 32-bits kernels would limit the kernel to the low 512 MB range 399 * due to mapping restrictions. 400 * 401 * 64-bit kdump kernels need to be restricted to be under 64 TB, which is 402 * the upper limit of system RAM in 4-level paging mode. Since the kdump 403 * jump could be from 5-level paging to 4-level paging, the jump will fail if 404 * the kernel is put above 64 TB, and during the 1st kernel bootup there's 405 * no good way to detect the paging mode of the target kernel which will be 406 * loaded for dumping. 407 */ 408 #ifdef CONFIG_X86_32 409 # define CRASH_ADDR_LOW_MAX SZ_512M 410 # define CRASH_ADDR_HIGH_MAX SZ_512M 411 #else 412 # define CRASH_ADDR_LOW_MAX SZ_4G 413 # define CRASH_ADDR_HIGH_MAX SZ_64T 414 #endif 415 416 static int __init reserve_crashkernel_low(void) 417 { 418 #ifdef CONFIG_X86_64 419 unsigned long long base, low_base = 0, low_size = 0; 420 unsigned long low_mem_limit; 421 int ret; 422 423 low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX); 424 425 /* crashkernel=Y,low */ 426 ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base); 427 if (ret) { 428 /* 429 * two parts from kernel/dma/swiotlb.c: 430 * -swiotlb size: user-specified with swiotlb= or default. 431 * 432 * -swiotlb overflow buffer: now hardcoded to 32k. We round it 433 * to 8M for other buffers that may need to stay low too. Also 434 * make sure we allocate enough extra low memory so that we 435 * don't run out of DMA buffers for 32-bit devices. 436 */ 437 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20); 438 } else { 439 /* passed with crashkernel=0,low ? */ 440 if (!low_size) 441 return 0; 442 } 443 444 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX); 445 if (!low_base) { 446 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n", 447 (unsigned long)(low_size >> 20)); 448 return -ENOMEM; 449 } 450 451 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n", 452 (unsigned long)(low_size >> 20), 453 (unsigned long)(low_base >> 20), 454 (unsigned long)(low_mem_limit >> 20)); 455 456 crashk_low_res.start = low_base; 457 crashk_low_res.end = low_base + low_size - 1; 458 insert_resource(&iomem_resource, &crashk_low_res); 459 #endif 460 return 0; 461 } 462 463 static void __init reserve_crashkernel(void) 464 { 465 unsigned long long crash_size, crash_base, total_mem; 466 bool high = false; 467 int ret; 468 469 total_mem = memblock_phys_mem_size(); 470 471 /* crashkernel=XM */ 472 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); 473 if (ret != 0 || crash_size <= 0) { 474 /* crashkernel=X,high */ 475 ret = parse_crashkernel_high(boot_command_line, total_mem, 476 &crash_size, &crash_base); 477 if (ret != 0 || crash_size <= 0) 478 return; 479 high = true; 480 } 481 482 if (xen_pv_domain()) { 483 pr_info("Ignoring crashkernel for a Xen PV domain\n"); 484 return; 485 } 486 487 /* 0 means: find the address automatically */ 488 if (!crash_base) { 489 /* 490 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory, 491 * crashkernel=x,high reserves memory over 4G, also allocates 492 * 256M extra low memory for DMA buffers and swiotlb. 493 * But the extra memory is not required for all machines. 494 * So try low memory first and fall back to high memory 495 * unless "crashkernel=size[KMG],high" is specified. 496 */ 497 if (!high) 498 crash_base = memblock_phys_alloc_range(crash_size, 499 CRASH_ALIGN, CRASH_ALIGN, 500 CRASH_ADDR_LOW_MAX); 501 if (!crash_base) 502 crash_base = memblock_phys_alloc_range(crash_size, 503 CRASH_ALIGN, CRASH_ALIGN, 504 CRASH_ADDR_HIGH_MAX); 505 if (!crash_base) { 506 pr_info("crashkernel reservation failed - No suitable area found.\n"); 507 return; 508 } 509 } else { 510 unsigned long long start; 511 512 start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base, 513 crash_base + crash_size); 514 if (start != crash_base) { 515 pr_info("crashkernel reservation failed - memory is in use.\n"); 516 return; 517 } 518 } 519 520 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { 521 memblock_free(crash_base, crash_size); 522 return; 523 } 524 525 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", 526 (unsigned long)(crash_size >> 20), 527 (unsigned long)(crash_base >> 20), 528 (unsigned long)(total_mem >> 20)); 529 530 crashk_res.start = crash_base; 531 crashk_res.end = crash_base + crash_size - 1; 532 insert_resource(&iomem_resource, &crashk_res); 533 } 534 #else 535 static void __init reserve_crashkernel(void) 536 { 537 } 538 #endif 539 540 static struct resource standard_io_resources[] = { 541 { .name = "dma1", .start = 0x00, .end = 0x1f, 542 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 543 { .name = "pic1", .start = 0x20, .end = 0x21, 544 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 545 { .name = "timer0", .start = 0x40, .end = 0x43, 546 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 547 { .name = "timer1", .start = 0x50, .end = 0x53, 548 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 549 { .name = "keyboard", .start = 0x60, .end = 0x60, 550 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 551 { .name = "keyboard", .start = 0x64, .end = 0x64, 552 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 553 { .name = "dma page reg", .start = 0x80, .end = 0x8f, 554 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 555 { .name = "pic2", .start = 0xa0, .end = 0xa1, 556 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 557 { .name = "dma2", .start = 0xc0, .end = 0xdf, 558 .flags = IORESOURCE_BUSY | IORESOURCE_IO }, 559 { .name = "fpu", .start = 0xf0, .end = 0xff, 560 .flags = IORESOURCE_BUSY | IORESOURCE_IO } 561 }; 562 563 void __init reserve_standard_io_resources(void) 564 { 565 int i; 566 567 /* request I/O space for devices used on all i[345]86 PCs */ 568 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) 569 request_resource(&ioport_resource, &standard_io_resources[i]); 570 571 } 572 573 static __init void reserve_ibft_region(void) 574 { 575 unsigned long addr, size = 0; 576 577 addr = find_ibft_region(&size); 578 579 if (size) 580 memblock_reserve(addr, size); 581 } 582 583 static bool __init snb_gfx_workaround_needed(void) 584 { 585 #ifdef CONFIG_PCI 586 int i; 587 u16 vendor, devid; 588 static const __initconst u16 snb_ids[] = { 589 0x0102, 590 0x0112, 591 0x0122, 592 0x0106, 593 0x0116, 594 0x0126, 595 0x010a, 596 }; 597 598 /* Assume no if something weird is going on with PCI */ 599 if (!early_pci_allowed()) 600 return false; 601 602 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); 603 if (vendor != 0x8086) 604 return false; 605 606 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); 607 for (i = 0; i < ARRAY_SIZE(snb_ids); i++) 608 if (devid == snb_ids[i]) 609 return true; 610 #endif 611 612 return false; 613 } 614 615 /* 616 * Sandy Bridge graphics has trouble with certain ranges, exclude 617 * them from allocation. 618 */ 619 static void __init trim_snb_memory(void) 620 { 621 static const __initconst unsigned long bad_pages[] = { 622 0x20050000, 623 0x20110000, 624 0x20130000, 625 0x20138000, 626 0x40004000, 627 }; 628 int i; 629 630 if (!snb_gfx_workaround_needed()) 631 return; 632 633 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); 634 635 /* 636 * SandyBridge integrated graphics devices have a bug that prevents 637 * them from accessing certain memory ranges, namely anything below 638 * 1M and in the pages listed in bad_pages[] above. 639 * 640 * To avoid these pages being ever accessed by SNB gfx devices 641 * reserve all memory below the 1 MB mark and bad_pages that have 642 * not already been reserved at boot time. 643 */ 644 memblock_reserve(0, 1<<20); 645 646 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { 647 if (memblock_reserve(bad_pages[i], PAGE_SIZE)) 648 printk(KERN_WARNING "failed to reserve 0x%08lx\n", 649 bad_pages[i]); 650 } 651 } 652 653 static void __init trim_bios_range(void) 654 { 655 /* 656 * A special case is the first 4Kb of memory; 657 * This is a BIOS owned area, not kernel ram, but generally 658 * not listed as such in the E820 table. 659 * 660 * This typically reserves additional memory (64KiB by default) 661 * since some BIOSes are known to corrupt low memory. See the 662 * Kconfig help text for X86_RESERVE_LOW. 663 */ 664 e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); 665 666 /* 667 * special case: Some BIOSes report the PC BIOS 668 * area (640Kb -> 1Mb) as RAM even though it is not. 669 * take them out. 670 */ 671 e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1); 672 673 e820__update_table(e820_table); 674 } 675 676 /* called before trim_bios_range() to spare extra sanitize */ 677 static void __init e820_add_kernel_range(void) 678 { 679 u64 start = __pa_symbol(_text); 680 u64 size = __pa_symbol(_end) - start; 681 682 /* 683 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and 684 * attempt to fix it by adding the range. We may have a confused BIOS, 685 * or the user may have used memmap=exactmap or memmap=xxM$yyM to 686 * exclude kernel range. If we really are running on top non-RAM, 687 * we will crash later anyways. 688 */ 689 if (e820__mapped_all(start, start + size, E820_TYPE_RAM)) 690 return; 691 692 pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n"); 693 e820__range_remove(start, size, E820_TYPE_RAM, 0); 694 e820__range_add(start, size, E820_TYPE_RAM); 695 } 696 697 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; 698 699 static int __init parse_reservelow(char *p) 700 { 701 unsigned long long size; 702 703 if (!p) 704 return -EINVAL; 705 706 size = memparse(p, &p); 707 708 if (size < 4096) 709 size = 4096; 710 711 if (size > 640*1024) 712 size = 640*1024; 713 714 reserve_low = size; 715 716 return 0; 717 } 718 719 early_param("reservelow", parse_reservelow); 720 721 static void __init early_reserve_memory(void) 722 { 723 /* 724 * Reserve the memory occupied by the kernel between _text and 725 * __end_of_kernel_reserve symbols. Any kernel sections after the 726 * __end_of_kernel_reserve symbol must be explicitly reserved with a 727 * separate memblock_reserve() or they will be discarded. 728 */ 729 memblock_reserve(__pa_symbol(_text), 730 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text); 731 732 /* 733 * The first 4Kb of memory is a BIOS owned area, but generally it is 734 * not listed as such in the E820 table. 735 * 736 * Reserve the first memory page and typically some additional 737 * memory (64KiB by default) since some BIOSes are known to corrupt 738 * low memory. See the Kconfig help text for X86_RESERVE_LOW. 739 * 740 * In addition, make sure page 0 is always reserved because on 741 * systems with L1TF its contents can be leaked to user processes. 742 */ 743 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); 744 745 early_reserve_initrd(); 746 747 if (efi_enabled(EFI_BOOT)) 748 efi_memblock_x86_reserve_range(); 749 750 memblock_x86_reserve_range_setup_data(); 751 752 reserve_ibft_region(); 753 reserve_bios_regions(); 754 } 755 756 /* 757 * Dump out kernel offset information on panic. 758 */ 759 static int 760 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 761 { 762 if (kaslr_enabled()) { 763 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 764 kaslr_offset(), 765 __START_KERNEL, 766 __START_KERNEL_map, 767 MODULES_VADDR-1); 768 } else { 769 pr_emerg("Kernel Offset: disabled\n"); 770 } 771 772 return 0; 773 } 774 775 /* 776 * Determine if we were loaded by an EFI loader. If so, then we have also been 777 * passed the efi memmap, systab, etc., so we should use these data structures 778 * for initialization. Note, the efi init code path is determined by the 779 * global efi_enabled. This allows the same kernel image to be used on existing 780 * systems (with a traditional BIOS) as well as on EFI systems. 781 */ 782 /* 783 * setup_arch - architecture-specific boot-time initializations 784 * 785 * Note: On x86_64, fixmaps are ready for use even before this is called. 786 */ 787 788 void __init setup_arch(char **cmdline_p) 789 { 790 #ifdef CONFIG_X86_32 791 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 792 793 /* 794 * copy kernel address range established so far and switch 795 * to the proper swapper page table 796 */ 797 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, 798 initial_page_table + KERNEL_PGD_BOUNDARY, 799 KERNEL_PGD_PTRS); 800 801 load_cr3(swapper_pg_dir); 802 /* 803 * Note: Quark X1000 CPUs advertise PGE incorrectly and require 804 * a cr3 based tlb flush, so the following __flush_tlb_all() 805 * will not flush anything because the CPU quirk which clears 806 * X86_FEATURE_PGE has not been invoked yet. Though due to the 807 * load_cr3() above the TLB has been flushed already. The 808 * quirk is invoked before subsequent calls to __flush_tlb_all() 809 * so proper operation is guaranteed. 810 */ 811 __flush_tlb_all(); 812 #else 813 printk(KERN_INFO "Command line: %s\n", boot_command_line); 814 boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS; 815 #endif 816 817 /* 818 * If we have OLPC OFW, we might end up relocating the fixmap due to 819 * reserve_top(), so do this before touching the ioremap area. 820 */ 821 olpc_ofw_detect(); 822 823 idt_setup_early_traps(); 824 early_cpu_init(); 825 jump_label_init(); 826 static_call_init(); 827 early_ioremap_init(); 828 829 setup_olpc_ofw_pgd(); 830 831 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); 832 screen_info = boot_params.screen_info; 833 edid_info = boot_params.edid_info; 834 #ifdef CONFIG_X86_32 835 apm_info.bios = boot_params.apm_bios_info; 836 ist_info = boot_params.ist_info; 837 #endif 838 saved_video_mode = boot_params.hdr.vid_mode; 839 bootloader_type = boot_params.hdr.type_of_loader; 840 if ((bootloader_type >> 4) == 0xe) { 841 bootloader_type &= 0xf; 842 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; 843 } 844 bootloader_version = bootloader_type & 0xf; 845 bootloader_version |= boot_params.hdr.ext_loader_ver << 4; 846 847 #ifdef CONFIG_BLK_DEV_RAM 848 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; 849 #endif 850 #ifdef CONFIG_EFI 851 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 852 EFI32_LOADER_SIGNATURE, 4)) { 853 set_bit(EFI_BOOT, &efi.flags); 854 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 855 EFI64_LOADER_SIGNATURE, 4)) { 856 set_bit(EFI_BOOT, &efi.flags); 857 set_bit(EFI_64BIT, &efi.flags); 858 } 859 #endif 860 861 x86_init.oem.arch_setup(); 862 863 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 864 e820__memory_setup(); 865 parse_setup_data(); 866 867 copy_edd(); 868 869 if (!boot_params.hdr.root_flags) 870 root_mountflags &= ~MS_RDONLY; 871 init_mm.start_code = (unsigned long) _text; 872 init_mm.end_code = (unsigned long) _etext; 873 init_mm.end_data = (unsigned long) _edata; 874 init_mm.brk = _brk_end; 875 876 code_resource.start = __pa_symbol(_text); 877 code_resource.end = __pa_symbol(_etext)-1; 878 rodata_resource.start = __pa_symbol(__start_rodata); 879 rodata_resource.end = __pa_symbol(__end_rodata)-1; 880 data_resource.start = __pa_symbol(_sdata); 881 data_resource.end = __pa_symbol(_edata)-1; 882 bss_resource.start = __pa_symbol(__bss_start); 883 bss_resource.end = __pa_symbol(__bss_stop)-1; 884 885 #ifdef CONFIG_CMDLINE_BOOL 886 #ifdef CONFIG_CMDLINE_OVERRIDE 887 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 888 #else 889 if (builtin_cmdline[0]) { 890 /* append boot loader cmdline to builtin */ 891 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); 892 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); 893 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 894 } 895 #endif 896 #endif 897 898 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 899 *cmdline_p = command_line; 900 901 /* 902 * x86_configure_nx() is called before parse_early_param() to detect 903 * whether hardware doesn't support NX (so that the early EHCI debug 904 * console setup can safely call set_fixmap()). It may then be called 905 * again from within noexec_setup() during parsing early parameters 906 * to honor the respective command line option. 907 */ 908 x86_configure_nx(); 909 910 parse_early_param(); 911 912 /* 913 * Do some memory reservations *before* memory is added to 914 * memblock, so memblock allocations won't overwrite it. 915 * Do it after early param, so we could get (unlikely) panic from 916 * serial. 917 * 918 * After this point everything still needed from the boot loader or 919 * firmware or kernel text should be early reserved or marked not 920 * RAM in e820. All other memory is free game. 921 */ 922 early_reserve_memory(); 923 924 #ifdef CONFIG_MEMORY_HOTPLUG 925 /* 926 * Memory used by the kernel cannot be hot-removed because Linux 927 * cannot migrate the kernel pages. When memory hotplug is 928 * enabled, we should prevent memblock from allocating memory 929 * for the kernel. 930 * 931 * ACPI SRAT records all hotpluggable memory ranges. But before 932 * SRAT is parsed, we don't know about it. 933 * 934 * The kernel image is loaded into memory at very early time. We 935 * cannot prevent this anyway. So on NUMA system, we set any 936 * node the kernel resides in as un-hotpluggable. 937 * 938 * Since on modern servers, one node could have double-digit 939 * gigabytes memory, we can assume the memory around the kernel 940 * image is also un-hotpluggable. So before SRAT is parsed, just 941 * allocate memory near the kernel image to try the best to keep 942 * the kernel away from hotpluggable memory. 943 */ 944 if (movable_node_is_enabled()) 945 memblock_set_bottom_up(true); 946 #endif 947 948 x86_report_nx(); 949 950 if (acpi_mps_check()) { 951 #ifdef CONFIG_X86_LOCAL_APIC 952 disable_apic = 1; 953 #endif 954 setup_clear_cpu_cap(X86_FEATURE_APIC); 955 } 956 957 e820__reserve_setup_data(); 958 e820__finish_early_params(); 959 960 if (efi_enabled(EFI_BOOT)) 961 efi_init(); 962 963 dmi_setup(); 964 965 /* 966 * VMware detection requires dmi to be available, so this 967 * needs to be done after dmi_setup(), for the boot CPU. 968 */ 969 init_hypervisor_platform(); 970 971 tsc_early_init(); 972 x86_init.resources.probe_roms(); 973 974 /* after parse_early_param, so could debug it */ 975 insert_resource(&iomem_resource, &code_resource); 976 insert_resource(&iomem_resource, &rodata_resource); 977 insert_resource(&iomem_resource, &data_resource); 978 insert_resource(&iomem_resource, &bss_resource); 979 980 e820_add_kernel_range(); 981 trim_bios_range(); 982 #ifdef CONFIG_X86_32 983 if (ppro_with_ram_bug()) { 984 e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM, 985 E820_TYPE_RESERVED); 986 e820__update_table(e820_table); 987 printk(KERN_INFO "fixed physical RAM map:\n"); 988 e820__print_table("bad_ppro"); 989 } 990 #else 991 early_gart_iommu_check(); 992 #endif 993 994 /* 995 * partially used pages are not usable - thus 996 * we are rounding upwards: 997 */ 998 max_pfn = e820__end_of_ram_pfn(); 999 1000 /* update e820 for memory not covered by WB MTRRs */ 1001 mtrr_bp_init(); 1002 if (mtrr_trim_uncached_memory(max_pfn)) 1003 max_pfn = e820__end_of_ram_pfn(); 1004 1005 max_possible_pfn = max_pfn; 1006 1007 /* 1008 * This call is required when the CPU does not support PAT. If 1009 * mtrr_bp_init() invoked it already via pat_init() the call has no 1010 * effect. 1011 */ 1012 init_cache_modes(); 1013 1014 /* 1015 * Define random base addresses for memory sections after max_pfn is 1016 * defined and before each memory section base is used. 1017 */ 1018 kernel_randomize_memory(); 1019 1020 #ifdef CONFIG_X86_32 1021 /* max_low_pfn get updated here */ 1022 find_low_pfn_range(); 1023 #else 1024 check_x2apic(); 1025 1026 /* How many end-of-memory variables you have, grandma! */ 1027 /* need this before calling reserve_initrd */ 1028 if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) 1029 max_low_pfn = e820__end_of_low_ram_pfn(); 1030 else 1031 max_low_pfn = max_pfn; 1032 1033 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 1034 #endif 1035 1036 /* 1037 * Find and reserve possible boot-time SMP configuration: 1038 */ 1039 find_smp_config(); 1040 1041 early_alloc_pgt_buf(); 1042 1043 /* 1044 * Need to conclude brk, before e820__memblock_setup() 1045 * it could use memblock_find_in_range, could overlap with 1046 * brk area. 1047 */ 1048 reserve_brk(); 1049 1050 cleanup_highmap(); 1051 1052 memblock_set_current_limit(ISA_END_ADDRESS); 1053 e820__memblock_setup(); 1054 1055 /* 1056 * Needs to run after memblock setup because it needs the physical 1057 * memory size. 1058 */ 1059 sev_setup_arch(); 1060 1061 efi_fake_memmap(); 1062 efi_find_mirror(); 1063 efi_esrt_init(); 1064 efi_mokvar_table_init(); 1065 1066 /* 1067 * The EFI specification says that boot service code won't be 1068 * called after ExitBootServices(). This is, in fact, a lie. 1069 */ 1070 efi_reserve_boot_services(); 1071 1072 /* preallocate 4k for mptable mpc */ 1073 e820__memblock_alloc_reserved_mpc_new(); 1074 1075 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION 1076 setup_bios_corruption_check(); 1077 #endif 1078 1079 #ifdef CONFIG_X86_32 1080 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n", 1081 (max_pfn_mapped<<PAGE_SHIFT) - 1); 1082 #endif 1083 1084 reserve_real_mode(); 1085 1086 /* 1087 * Reserving memory causing GPU hangs on Sandy Bridge integrated 1088 * graphics devices should be done after we allocated memory under 1089 * 1M for the real mode trampoline. 1090 */ 1091 trim_snb_memory(); 1092 1093 init_mem_mapping(); 1094 1095 idt_setup_early_pf(); 1096 1097 /* 1098 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) 1099 * with the current CR4 value. This may not be necessary, but 1100 * auditing all the early-boot CR4 manipulation would be needed to 1101 * rule it out. 1102 * 1103 * Mask off features that don't work outside long mode (just 1104 * PCIDE for now). 1105 */ 1106 mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE; 1107 1108 memblock_set_current_limit(get_max_mapped()); 1109 1110 /* 1111 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 1112 */ 1113 1114 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 1115 if (init_ohci1394_dma_early) 1116 init_ohci1394_dma_on_all_controllers(); 1117 #endif 1118 /* Allocate bigger log buffer */ 1119 setup_log_buf(1); 1120 1121 if (efi_enabled(EFI_BOOT)) { 1122 switch (boot_params.secure_boot) { 1123 case efi_secureboot_mode_disabled: 1124 pr_info("Secure boot disabled\n"); 1125 break; 1126 case efi_secureboot_mode_enabled: 1127 pr_info("Secure boot enabled\n"); 1128 break; 1129 default: 1130 pr_info("Secure boot could not be determined\n"); 1131 break; 1132 } 1133 } 1134 1135 reserve_initrd(); 1136 1137 acpi_table_upgrade(); 1138 /* Look for ACPI tables and reserve memory occupied by them. */ 1139 acpi_boot_table_init(); 1140 1141 vsmp_init(); 1142 1143 io_delay_init(); 1144 1145 early_platform_quirks(); 1146 1147 early_acpi_boot_init(); 1148 1149 initmem_init(); 1150 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); 1151 1152 if (boot_cpu_has(X86_FEATURE_GBPAGES)) 1153 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 1154 1155 /* 1156 * Reserve memory for crash kernel after SRAT is parsed so that it 1157 * won't consume hotpluggable memory. 1158 */ 1159 reserve_crashkernel(); 1160 1161 memblock_find_dma_reserve(); 1162 1163 if (!early_xdbc_setup_hardware()) 1164 early_xdbc_register_console(); 1165 1166 x86_init.paging.pagetable_init(); 1167 1168 kasan_init(); 1169 1170 /* 1171 * Sync back kernel address range. 1172 * 1173 * FIXME: Can the later sync in setup_cpu_entry_areas() replace 1174 * this call? 1175 */ 1176 sync_initial_page_table(); 1177 1178 tboot_probe(); 1179 1180 map_vsyscall(); 1181 1182 generic_apic_probe(); 1183 1184 early_quirks(); 1185 1186 /* 1187 * Read APIC and some other early information from ACPI tables. 1188 */ 1189 acpi_boot_init(); 1190 x86_dtb_init(); 1191 1192 /* 1193 * get boot-time SMP configuration: 1194 */ 1195 get_smp_config(); 1196 1197 /* 1198 * Systems w/o ACPI and mptables might not have it mapped the local 1199 * APIC yet, but prefill_possible_map() might need to access it. 1200 */ 1201 init_apic_mappings(); 1202 1203 prefill_possible_map(); 1204 1205 init_cpu_to_node(); 1206 init_gi_nodes(); 1207 1208 io_apic_init_mappings(); 1209 1210 x86_init.hyper.guest_late_init(); 1211 1212 e820__reserve_resources(); 1213 e820__register_nosave_regions(max_pfn); 1214 1215 x86_init.resources.reserve_resources(); 1216 1217 e820__setup_pci_gap(); 1218 1219 #ifdef CONFIG_VT 1220 #if defined(CONFIG_VGA_CONSOLE) 1221 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 1222 conswitchp = &vga_con; 1223 #endif 1224 #endif 1225 x86_init.oem.banner(); 1226 1227 x86_init.timers.wallclock_init(); 1228 1229 mcheck_init(); 1230 1231 register_refined_jiffies(CLOCK_TICK_RATE); 1232 1233 #ifdef CONFIG_EFI 1234 if (efi_enabled(EFI_BOOT)) 1235 efi_apply_memmap_quirks(); 1236 #endif 1237 1238 unwind_init(); 1239 } 1240 1241 #ifdef CONFIG_X86_32 1242 1243 static struct resource video_ram_resource = { 1244 .name = "Video RAM area", 1245 .start = 0xa0000, 1246 .end = 0xbffff, 1247 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 1248 }; 1249 1250 void __init i386_reserve_resources(void) 1251 { 1252 request_resource(&iomem_resource, &video_ram_resource); 1253 reserve_standard_io_resources(); 1254 } 1255 1256 #endif /* CONFIG_X86_32 */ 1257 1258 static struct notifier_block kernel_offset_notifier = { 1259 .notifier_call = dump_kernel_offset 1260 }; 1261 1262 static int __init register_kernel_offset_dumper(void) 1263 { 1264 atomic_notifier_chain_register(&panic_notifier_list, 1265 &kernel_offset_notifier); 1266 return 0; 1267 } 1268 __initcall(register_kernel_offset_dumper); 1269