1 /* 2 * Handle the memory map. 3 * The functions here do the job until bootmem takes over. 4 * 5 * Getting sanitize_e820_map() in sync with i386 version by applying change: 6 * - Provisions for empty E820 memory regions (reported by certain BIOSes). 7 * Alex Achenbach <xela@slit.de>, December 2002. 8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 9 * 10 */ 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/bootmem.h> 15 #include <linux/ioport.h> 16 #include <linux/string.h> 17 #include <linux/kexec.h> 18 #include <linux/module.h> 19 #include <linux/mm.h> 20 #include <linux/pfn.h> 21 #include <linux/suspend.h> 22 #include <linux/firmware-map.h> 23 24 #include <asm/pgtable.h> 25 #include <asm/page.h> 26 #include <asm/e820.h> 27 #include <asm/proto.h> 28 #include <asm/setup.h> 29 #include <asm/trampoline.h> 30 31 /* 32 * The e820 map is the map that gets modified e.g. with command line parameters 33 * and that is also registered with modifications in the kernel resource tree 34 * with the iomem_resource as parent. 35 * 36 * The e820_saved is directly saved after the BIOS-provided memory map is 37 * copied. It doesn't get modified afterwards. It's registered for the 38 * /sys/firmware/memmap interface. 39 * 40 * That memory map is not modified and is used as base for kexec. The kexec'd 41 * kernel should get the same memory map as the firmware provides. Then the 42 * user can e.g. boot the original kernel with mem=1G while still booting the 43 * next kernel with full memory. 44 */ 45 struct e820map e820; 46 struct e820map e820_saved; 47 48 /* For PCI or other memory-mapped resources */ 49 unsigned long pci_mem_start = 0xaeedbabe; 50 #ifdef CONFIG_PCI 51 EXPORT_SYMBOL(pci_mem_start); 52 #endif 53 54 /* 55 * This function checks if any part of the range <start,end> is mapped 56 * with type. 57 */ 58 int 59 e820_any_mapped(u64 start, u64 end, unsigned type) 60 { 61 int i; 62 63 for (i = 0; i < e820.nr_map; i++) { 64 struct e820entry *ei = &e820.map[i]; 65 66 if (type && ei->type != type) 67 continue; 68 if (ei->addr >= end || ei->addr + ei->size <= start) 69 continue; 70 return 1; 71 } 72 return 0; 73 } 74 EXPORT_SYMBOL_GPL(e820_any_mapped); 75 76 /* 77 * This function checks if the entire range <start,end> is mapped with type. 78 * 79 * Note: this function only works correct if the e820 table is sorted and 80 * not-overlapping, which is the case 81 */ 82 int __init e820_all_mapped(u64 start, u64 end, unsigned type) 83 { 84 int i; 85 86 for (i = 0; i < e820.nr_map; i++) { 87 struct e820entry *ei = &e820.map[i]; 88 89 if (type && ei->type != type) 90 continue; 91 /* is the region (part) in overlap with the current region ?*/ 92 if (ei->addr >= end || ei->addr + ei->size <= start) 93 continue; 94 95 /* if the region is at the beginning of <start,end> we move 96 * start to the end of the region since it's ok until there 97 */ 98 if (ei->addr <= start) 99 start = ei->addr + ei->size; 100 /* 101 * if start is now at or beyond end, we're done, full 102 * coverage 103 */ 104 if (start >= end) 105 return 1; 106 } 107 return 0; 108 } 109 110 /* 111 * Add a memory region to the kernel e820 map. 112 */ 113 static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size, 114 int type) 115 { 116 int x = e820x->nr_map; 117 118 if (x >= ARRAY_SIZE(e820x->map)) { 119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 120 return; 121 } 122 123 e820x->map[x].addr = start; 124 e820x->map[x].size = size; 125 e820x->map[x].type = type; 126 e820x->nr_map++; 127 } 128 129 void __init e820_add_region(u64 start, u64 size, int type) 130 { 131 __e820_add_region(&e820, start, size, type); 132 } 133 134 static void __init e820_print_type(u32 type) 135 { 136 switch (type) { 137 case E820_RAM: 138 case E820_RESERVED_KERN: 139 printk(KERN_CONT "(usable)"); 140 break; 141 case E820_RESERVED: 142 printk(KERN_CONT "(reserved)"); 143 break; 144 case E820_ACPI: 145 printk(KERN_CONT "(ACPI data)"); 146 break; 147 case E820_NVS: 148 printk(KERN_CONT "(ACPI NVS)"); 149 break; 150 case E820_UNUSABLE: 151 printk(KERN_CONT "(unusable)"); 152 break; 153 default: 154 printk(KERN_CONT "type %u", type); 155 break; 156 } 157 } 158 159 void __init e820_print_map(char *who) 160 { 161 int i; 162 163 for (i = 0; i < e820.nr_map; i++) { 164 printk(KERN_INFO " %s: %016Lx - %016Lx ", who, 165 (unsigned long long) e820.map[i].addr, 166 (unsigned long long) 167 (e820.map[i].addr + e820.map[i].size)); 168 e820_print_type(e820.map[i].type); 169 printk(KERN_CONT "\n"); 170 } 171 } 172 173 /* 174 * Sanitize the BIOS e820 map. 175 * 176 * Some e820 responses include overlapping entries. The following 177 * replaces the original e820 map with a new one, removing overlaps, 178 * and resolving conflicting memory types in favor of highest 179 * numbered type. 180 * 181 * The input parameter biosmap points to an array of 'struct 182 * e820entry' which on entry has elements in the range [0, *pnr_map) 183 * valid, and which has space for up to max_nr_map entries. 184 * On return, the resulting sanitized e820 map entries will be in 185 * overwritten in the same location, starting at biosmap. 186 * 187 * The integer pointed to by pnr_map must be valid on entry (the 188 * current number of valid entries located at biosmap) and will 189 * be updated on return, with the new number of valid entries 190 * (something no more than max_nr_map.) 191 * 192 * The return value from sanitize_e820_map() is zero if it 193 * successfully 'sanitized' the map entries passed in, and is -1 194 * if it did nothing, which can happen if either of (1) it was 195 * only passed one map entry, or (2) any of the input map entries 196 * were invalid (start + size < start, meaning that the size was 197 * so big the described memory range wrapped around through zero.) 198 * 199 * Visually we're performing the following 200 * (1,2,3,4 = memory types)... 201 * 202 * Sample memory map (w/overlaps): 203 * ____22__________________ 204 * ______________________4_ 205 * ____1111________________ 206 * _44_____________________ 207 * 11111111________________ 208 * ____________________33__ 209 * ___________44___________ 210 * __________33333_________ 211 * ______________22________ 212 * ___________________2222_ 213 * _________111111111______ 214 * _____________________11_ 215 * _________________4______ 216 * 217 * Sanitized equivalent (no overlap): 218 * 1_______________________ 219 * _44_____________________ 220 * ___1____________________ 221 * ____22__________________ 222 * ______11________________ 223 * _________1______________ 224 * __________3_____________ 225 * ___________44___________ 226 * _____________33_________ 227 * _______________2________ 228 * ________________1_______ 229 * _________________4______ 230 * ___________________2____ 231 * ____________________33__ 232 * ______________________4_ 233 */ 234 235 int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, 236 u32 *pnr_map) 237 { 238 struct change_member { 239 struct e820entry *pbios; /* pointer to original bios entry */ 240 unsigned long long addr; /* address for this change point */ 241 }; 242 static struct change_member change_point_list[2*E820_X_MAX] __initdata; 243 static struct change_member *change_point[2*E820_X_MAX] __initdata; 244 static struct e820entry *overlap_list[E820_X_MAX] __initdata; 245 static struct e820entry new_bios[E820_X_MAX] __initdata; 246 struct change_member *change_tmp; 247 unsigned long current_type, last_type; 248 unsigned long long last_addr; 249 int chgidx, still_changing; 250 int overlap_entries; 251 int new_bios_entry; 252 int old_nr, new_nr, chg_nr; 253 int i; 254 255 /* if there's only one memory region, don't bother */ 256 if (*pnr_map < 2) 257 return -1; 258 259 old_nr = *pnr_map; 260 BUG_ON(old_nr > max_nr_map); 261 262 /* bail out if we find any unreasonable addresses in bios map */ 263 for (i = 0; i < old_nr; i++) 264 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) 265 return -1; 266 267 /* create pointers for initial change-point information (for sorting) */ 268 for (i = 0; i < 2 * old_nr; i++) 269 change_point[i] = &change_point_list[i]; 270 271 /* record all known change-points (starting and ending addresses), 272 omitting those that are for empty memory regions */ 273 chgidx = 0; 274 for (i = 0; i < old_nr; i++) { 275 if (biosmap[i].size != 0) { 276 change_point[chgidx]->addr = biosmap[i].addr; 277 change_point[chgidx++]->pbios = &biosmap[i]; 278 change_point[chgidx]->addr = biosmap[i].addr + 279 biosmap[i].size; 280 change_point[chgidx++]->pbios = &biosmap[i]; 281 } 282 } 283 chg_nr = chgidx; 284 285 /* sort change-point list by memory addresses (low -> high) */ 286 still_changing = 1; 287 while (still_changing) { 288 still_changing = 0; 289 for (i = 1; i < chg_nr; i++) { 290 unsigned long long curaddr, lastaddr; 291 unsigned long long curpbaddr, lastpbaddr; 292 293 curaddr = change_point[i]->addr; 294 lastaddr = change_point[i - 1]->addr; 295 curpbaddr = change_point[i]->pbios->addr; 296 lastpbaddr = change_point[i - 1]->pbios->addr; 297 298 /* 299 * swap entries, when: 300 * 301 * curaddr > lastaddr or 302 * curaddr == lastaddr and curaddr == curpbaddr and 303 * lastaddr != lastpbaddr 304 */ 305 if (curaddr < lastaddr || 306 (curaddr == lastaddr && curaddr == curpbaddr && 307 lastaddr != lastpbaddr)) { 308 change_tmp = change_point[i]; 309 change_point[i] = change_point[i-1]; 310 change_point[i-1] = change_tmp; 311 still_changing = 1; 312 } 313 } 314 } 315 316 /* create a new bios memory map, removing overlaps */ 317 overlap_entries = 0; /* number of entries in the overlap table */ 318 new_bios_entry = 0; /* index for creating new bios map entries */ 319 last_type = 0; /* start with undefined memory type */ 320 last_addr = 0; /* start with 0 as last starting address */ 321 322 /* loop through change-points, determining affect on the new bios map */ 323 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 324 /* keep track of all overlapping bios entries */ 325 if (change_point[chgidx]->addr == 326 change_point[chgidx]->pbios->addr) { 327 /* 328 * add map entry to overlap list (> 1 entry 329 * implies an overlap) 330 */ 331 overlap_list[overlap_entries++] = 332 change_point[chgidx]->pbios; 333 } else { 334 /* 335 * remove entry from list (order independent, 336 * so swap with last) 337 */ 338 for (i = 0; i < overlap_entries; i++) { 339 if (overlap_list[i] == 340 change_point[chgidx]->pbios) 341 overlap_list[i] = 342 overlap_list[overlap_entries-1]; 343 } 344 overlap_entries--; 345 } 346 /* 347 * if there are overlapping entries, decide which 348 * "type" to use (larger value takes precedence -- 349 * 1=usable, 2,3,4,4+=unusable) 350 */ 351 current_type = 0; 352 for (i = 0; i < overlap_entries; i++) 353 if (overlap_list[i]->type > current_type) 354 current_type = overlap_list[i]->type; 355 /* 356 * continue building up new bios map based on this 357 * information 358 */ 359 if (current_type != last_type) { 360 if (last_type != 0) { 361 new_bios[new_bios_entry].size = 362 change_point[chgidx]->addr - last_addr; 363 /* 364 * move forward only if the new size 365 * was non-zero 366 */ 367 if (new_bios[new_bios_entry].size != 0) 368 /* 369 * no more space left for new 370 * bios entries ? 371 */ 372 if (++new_bios_entry >= max_nr_map) 373 break; 374 } 375 if (current_type != 0) { 376 new_bios[new_bios_entry].addr = 377 change_point[chgidx]->addr; 378 new_bios[new_bios_entry].type = current_type; 379 last_addr = change_point[chgidx]->addr; 380 } 381 last_type = current_type; 382 } 383 } 384 /* retain count for new bios entries */ 385 new_nr = new_bios_entry; 386 387 /* copy new bios mapping into original location */ 388 memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry)); 389 *pnr_map = new_nr; 390 391 return 0; 392 } 393 394 static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) 395 { 396 while (nr_map) { 397 u64 start = biosmap->addr; 398 u64 size = biosmap->size; 399 u64 end = start + size; 400 u32 type = biosmap->type; 401 402 /* Overflow in 64 bits? Ignore the memory map. */ 403 if (start > end) 404 return -1; 405 406 e820_add_region(start, size, type); 407 408 biosmap++; 409 nr_map--; 410 } 411 return 0; 412 } 413 414 /* 415 * Copy the BIOS e820 map into a safe place. 416 * 417 * Sanity-check it while we're at it.. 418 * 419 * If we're lucky and live on a modern system, the setup code 420 * will have given us a memory map that we can use to properly 421 * set up memory. If we aren't, we'll fake a memory map. 422 */ 423 static int __init append_e820_map(struct e820entry *biosmap, int nr_map) 424 { 425 /* Only one memory region (or negative)? Ignore it */ 426 if (nr_map < 2) 427 return -1; 428 429 return __append_e820_map(biosmap, nr_map); 430 } 431 432 static u64 __init __e820_update_range(struct e820map *e820x, u64 start, 433 u64 size, unsigned old_type, 434 unsigned new_type) 435 { 436 u64 end; 437 unsigned int i; 438 u64 real_updated_size = 0; 439 440 BUG_ON(old_type == new_type); 441 442 if (size > (ULLONG_MAX - start)) 443 size = ULLONG_MAX - start; 444 445 end = start + size; 446 printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ", 447 (unsigned long long) start, 448 (unsigned long long) end); 449 e820_print_type(old_type); 450 printk(KERN_CONT " ==> "); 451 e820_print_type(new_type); 452 printk(KERN_CONT "\n"); 453 454 for (i = 0; i < e820x->nr_map; i++) { 455 struct e820entry *ei = &e820x->map[i]; 456 u64 final_start, final_end; 457 u64 ei_end; 458 459 if (ei->type != old_type) 460 continue; 461 462 ei_end = ei->addr + ei->size; 463 /* totally covered by new range? */ 464 if (ei->addr >= start && ei_end <= end) { 465 ei->type = new_type; 466 real_updated_size += ei->size; 467 continue; 468 } 469 470 /* new range is totally covered? */ 471 if (ei->addr < start && ei_end > end) { 472 __e820_add_region(e820x, start, size, new_type); 473 __e820_add_region(e820x, end, ei_end - end, ei->type); 474 ei->size = start - ei->addr; 475 real_updated_size += size; 476 continue; 477 } 478 479 /* partially covered */ 480 final_start = max(start, ei->addr); 481 final_end = min(end, ei_end); 482 if (final_start >= final_end) 483 continue; 484 485 __e820_add_region(e820x, final_start, final_end - final_start, 486 new_type); 487 488 real_updated_size += final_end - final_start; 489 490 /* 491 * left range could be head or tail, so need to update 492 * size at first. 493 */ 494 ei->size -= final_end - final_start; 495 if (ei->addr < final_start) 496 continue; 497 ei->addr = final_end; 498 } 499 return real_updated_size; 500 } 501 502 u64 __init e820_update_range(u64 start, u64 size, unsigned old_type, 503 unsigned new_type) 504 { 505 return __e820_update_range(&e820, start, size, old_type, new_type); 506 } 507 508 static u64 __init e820_update_range_saved(u64 start, u64 size, 509 unsigned old_type, unsigned new_type) 510 { 511 return __e820_update_range(&e820_saved, start, size, old_type, 512 new_type); 513 } 514 515 /* make e820 not cover the range */ 516 u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, 517 int checktype) 518 { 519 int i; 520 u64 real_removed_size = 0; 521 522 if (size > (ULLONG_MAX - start)) 523 size = ULLONG_MAX - start; 524 525 for (i = 0; i < e820.nr_map; i++) { 526 struct e820entry *ei = &e820.map[i]; 527 u64 final_start, final_end; 528 529 if (checktype && ei->type != old_type) 530 continue; 531 /* totally covered? */ 532 if (ei->addr >= start && 533 (ei->addr + ei->size) <= (start + size)) { 534 real_removed_size += ei->size; 535 memset(ei, 0, sizeof(struct e820entry)); 536 continue; 537 } 538 /* partially covered */ 539 final_start = max(start, ei->addr); 540 final_end = min(start + size, ei->addr + ei->size); 541 if (final_start >= final_end) 542 continue; 543 real_removed_size += final_end - final_start; 544 545 ei->size -= final_end - final_start; 546 if (ei->addr < final_start) 547 continue; 548 ei->addr = final_end; 549 } 550 return real_removed_size; 551 } 552 553 void __init update_e820(void) 554 { 555 u32 nr_map; 556 557 nr_map = e820.nr_map; 558 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map)) 559 return; 560 e820.nr_map = nr_map; 561 printk(KERN_INFO "modified physical RAM map:\n"); 562 e820_print_map("modified"); 563 } 564 static void __init update_e820_saved(void) 565 { 566 u32 nr_map; 567 568 nr_map = e820_saved.nr_map; 569 if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map)) 570 return; 571 e820_saved.nr_map = nr_map; 572 } 573 #define MAX_GAP_END 0x100000000ull 574 /* 575 * Search for a gap in the e820 memory space from start_addr to end_addr. 576 */ 577 __init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, 578 unsigned long start_addr, unsigned long long end_addr) 579 { 580 unsigned long long last; 581 int i = e820.nr_map; 582 int found = 0; 583 584 last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END; 585 586 while (--i >= 0) { 587 unsigned long long start = e820.map[i].addr; 588 unsigned long long end = start + e820.map[i].size; 589 590 if (end < start_addr) 591 continue; 592 593 /* 594 * Since "last" is at most 4GB, we know we'll 595 * fit in 32 bits if this condition is true 596 */ 597 if (last > end) { 598 unsigned long gap = last - end; 599 600 if (gap >= *gapsize) { 601 *gapsize = gap; 602 *gapstart = end; 603 found = 1; 604 } 605 } 606 if (start < last) 607 last = start; 608 } 609 return found; 610 } 611 612 /* 613 * Search for the biggest gap in the low 32 bits of the e820 614 * memory space. We pass this space to PCI to assign MMIO resources 615 * for hotplug or unconfigured devices in. 616 * Hopefully the BIOS let enough space left. 617 */ 618 __init void e820_setup_gap(void) 619 { 620 unsigned long gapstart, gapsize; 621 int found; 622 623 gapstart = 0x10000000; 624 gapsize = 0x400000; 625 found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END); 626 627 #ifdef CONFIG_X86_64 628 if (!found) { 629 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; 630 printk(KERN_ERR 631 "PCI: Warning: Cannot find a gap in the 32bit address range\n" 632 "PCI: Unassigned devices with 32bit resource registers may break!\n"); 633 } 634 #endif 635 636 /* 637 * e820_reserve_resources_late protect stolen RAM already 638 */ 639 pci_mem_start = gapstart; 640 641 printk(KERN_INFO 642 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", 643 pci_mem_start, gapstart, gapsize); 644 } 645 646 /** 647 * Because of the size limitation of struct boot_params, only first 648 * 128 E820 memory entries are passed to kernel via 649 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of 650 * linked list of struct setup_data, which is parsed here. 651 */ 652 void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) 653 { 654 u32 map_len; 655 int entries; 656 struct e820entry *extmap; 657 658 entries = sdata->len / sizeof(struct e820entry); 659 map_len = sdata->len + sizeof(struct setup_data); 660 if (map_len > PAGE_SIZE) 661 sdata = early_ioremap(pa_data, map_len); 662 extmap = (struct e820entry *)(sdata->data); 663 __append_e820_map(extmap, entries); 664 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 665 if (map_len > PAGE_SIZE) 666 early_iounmap(sdata, map_len); 667 printk(KERN_INFO "extended physical RAM map:\n"); 668 e820_print_map("extended"); 669 } 670 671 #if defined(CONFIG_X86_64) || \ 672 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) 673 /** 674 * Find the ranges of physical addresses that do not correspond to 675 * e820 RAM areas and mark the corresponding pages as nosave for 676 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit). 677 * 678 * This function requires the e820 map to be sorted and without any 679 * overlapping entries and assumes the first e820 area to be RAM. 680 */ 681 void __init e820_mark_nosave_regions(unsigned long limit_pfn) 682 { 683 int i; 684 unsigned long pfn; 685 686 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size); 687 for (i = 1; i < e820.nr_map; i++) { 688 struct e820entry *ei = &e820.map[i]; 689 690 if (pfn < PFN_UP(ei->addr)) 691 register_nosave_region(pfn, PFN_UP(ei->addr)); 692 693 pfn = PFN_DOWN(ei->addr + ei->size); 694 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) 695 register_nosave_region(PFN_UP(ei->addr), pfn); 696 697 if (pfn >= limit_pfn) 698 break; 699 } 700 } 701 #endif 702 703 #ifdef CONFIG_HIBERNATION 704 /** 705 * Mark ACPI NVS memory region, so that we can save/restore it during 706 * hibernation and the subsequent resume. 707 */ 708 static int __init e820_mark_nvs_memory(void) 709 { 710 int i; 711 712 for (i = 0; i < e820.nr_map; i++) { 713 struct e820entry *ei = &e820.map[i]; 714 715 if (ei->type == E820_NVS) 716 hibernate_nvs_register(ei->addr, ei->size); 717 } 718 719 return 0; 720 } 721 core_initcall(e820_mark_nvs_memory); 722 #endif 723 724 /* 725 * Early reserved memory areas. 726 */ 727 #define MAX_EARLY_RES 32 728 729 struct early_res { 730 u64 start, end; 731 char name[16]; 732 char overlap_ok; 733 }; 734 static struct early_res early_res[MAX_EARLY_RES] __initdata = { 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 736 #if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE) 737 /* 738 * But first pinch a few for the stack/trampoline stuff 739 * FIXME: Don't need the extra page at 4K, but need to fix 740 * trampoline before removing it. (see the GDT stuff) 741 */ 742 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 }, 743 #endif 744 745 {} 746 }; 747 748 static int __init find_overlapped_early(u64 start, u64 end) 749 { 750 int i; 751 struct early_res *r; 752 753 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 754 r = &early_res[i]; 755 if (end > r->start && start < r->end) 756 break; 757 } 758 759 return i; 760 } 761 762 /* 763 * Drop the i-th range from the early reservation map, 764 * by copying any higher ranges down one over it, and 765 * clearing what had been the last slot. 766 */ 767 static void __init drop_range(int i) 768 { 769 int j; 770 771 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++) 772 ; 773 774 memmove(&early_res[i], &early_res[i + 1], 775 (j - 1 - i) * sizeof(struct early_res)); 776 777 early_res[j - 1].end = 0; 778 } 779 780 /* 781 * Split any existing ranges that: 782 * 1) are marked 'overlap_ok', and 783 * 2) overlap with the stated range [start, end) 784 * into whatever portion (if any) of the existing range is entirely 785 * below or entirely above the stated range. Drop the portion 786 * of the existing range that overlaps with the stated range, 787 * which will allow the caller of this routine to then add that 788 * stated range without conflicting with any existing range. 789 */ 790 static void __init drop_overlaps_that_are_ok(u64 start, u64 end) 791 { 792 int i; 793 struct early_res *r; 794 u64 lower_start, lower_end; 795 u64 upper_start, upper_end; 796 char name[16]; 797 798 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 799 r = &early_res[i]; 800 801 /* Continue past non-overlapping ranges */ 802 if (end <= r->start || start >= r->end) 803 continue; 804 805 /* 806 * Leave non-ok overlaps as is; let caller 807 * panic "Overlapping early reservations" 808 * when it hits this overlap. 809 */ 810 if (!r->overlap_ok) 811 return; 812 813 /* 814 * We have an ok overlap. We will drop it from the early 815 * reservation map, and add back in any non-overlapping 816 * portions (lower or upper) as separate, overlap_ok, 817 * non-overlapping ranges. 818 */ 819 820 /* 1. Note any non-overlapping (lower or upper) ranges. */ 821 strncpy(name, r->name, sizeof(name) - 1); 822 823 lower_start = lower_end = 0; 824 upper_start = upper_end = 0; 825 if (r->start < start) { 826 lower_start = r->start; 827 lower_end = start; 828 } 829 if (r->end > end) { 830 upper_start = end; 831 upper_end = r->end; 832 } 833 834 /* 2. Drop the original ok overlapping range */ 835 drop_range(i); 836 837 i--; /* resume for-loop on copied down entry */ 838 839 /* 3. Add back in any non-overlapping ranges. */ 840 if (lower_end) 841 reserve_early_overlap_ok(lower_start, lower_end, name); 842 if (upper_end) 843 reserve_early_overlap_ok(upper_start, upper_end, name); 844 } 845 } 846 847 static void __init __reserve_early(u64 start, u64 end, char *name, 848 int overlap_ok) 849 { 850 int i; 851 struct early_res *r; 852 853 i = find_overlapped_early(start, end); 854 if (i >= MAX_EARLY_RES) 855 panic("Too many early reservations"); 856 r = &early_res[i]; 857 if (r->end) 858 panic("Overlapping early reservations " 859 "%llx-%llx %s to %llx-%llx %s\n", 860 start, end - 1, name?name:"", r->start, 861 r->end - 1, r->name); 862 r->start = start; 863 r->end = end; 864 r->overlap_ok = overlap_ok; 865 if (name) 866 strncpy(r->name, name, sizeof(r->name) - 1); 867 } 868 869 /* 870 * A few early reservtations come here. 871 * 872 * The 'overlap_ok' in the name of this routine does -not- mean it 873 * is ok for these reservations to overlap an earlier reservation. 874 * Rather it means that it is ok for subsequent reservations to 875 * overlap this one. 876 * 877 * Use this entry point to reserve early ranges when you are doing 878 * so out of "Paranoia", reserving perhaps more memory than you need, 879 * just in case, and don't mind a subsequent overlapping reservation 880 * that is known to be needed. 881 * 882 * The drop_overlaps_that_are_ok() call here isn't really needed. 883 * It would be needed if we had two colliding 'overlap_ok' 884 * reservations, so that the second such would not panic on the 885 * overlap with the first. We don't have any such as of this 886 * writing, but might as well tolerate such if it happens in 887 * the future. 888 */ 889 void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) 890 { 891 drop_overlaps_that_are_ok(start, end); 892 __reserve_early(start, end, name, 1); 893 } 894 895 /* 896 * Most early reservations come here. 897 * 898 * We first have drop_overlaps_that_are_ok() drop any pre-existing 899 * 'overlap_ok' ranges, so that we can then reserve this memory 900 * range without risk of panic'ing on an overlapping overlap_ok 901 * early reservation. 902 */ 903 void __init reserve_early(u64 start, u64 end, char *name) 904 { 905 if (start >= end) 906 return; 907 908 drop_overlaps_that_are_ok(start, end); 909 __reserve_early(start, end, name, 0); 910 } 911 912 void __init free_early(u64 start, u64 end) 913 { 914 struct early_res *r; 915 int i; 916 917 i = find_overlapped_early(start, end); 918 r = &early_res[i]; 919 if (i >= MAX_EARLY_RES || r->end != end || r->start != start) 920 panic("free_early on not reserved area: %llx-%llx!", 921 start, end - 1); 922 923 drop_range(i); 924 } 925 926 void __init early_res_to_bootmem(u64 start, u64 end) 927 { 928 int i, count; 929 u64 final_start, final_end; 930 931 count = 0; 932 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) 933 count++; 934 935 printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n", 936 count, start, end); 937 for (i = 0; i < count; i++) { 938 struct early_res *r = &early_res[i]; 939 printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i, 940 r->start, r->end, r->name); 941 final_start = max(start, r->start); 942 final_end = min(end, r->end); 943 if (final_start >= final_end) { 944 printk(KERN_CONT "\n"); 945 continue; 946 } 947 printk(KERN_CONT " ==> [%010llx - %010llx]\n", 948 final_start, final_end); 949 reserve_bootmem_generic(final_start, final_end - final_start, 950 BOOTMEM_DEFAULT); 951 } 952 } 953 954 /* Check for already reserved areas */ 955 static inline int __init bad_addr(u64 *addrp, u64 size, u64 align) 956 { 957 int i; 958 u64 addr = *addrp; 959 int changed = 0; 960 struct early_res *r; 961 again: 962 i = find_overlapped_early(addr, addr + size); 963 r = &early_res[i]; 964 if (i < MAX_EARLY_RES && r->end) { 965 *addrp = addr = round_up(r->end, align); 966 changed = 1; 967 goto again; 968 } 969 return changed; 970 } 971 972 /* Check for already reserved areas */ 973 static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) 974 { 975 int i; 976 u64 addr = *addrp, last; 977 u64 size = *sizep; 978 int changed = 0; 979 again: 980 last = addr + size; 981 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { 982 struct early_res *r = &early_res[i]; 983 if (last > r->start && addr < r->start) { 984 size = r->start - addr; 985 changed = 1; 986 goto again; 987 } 988 if (last > r->end && addr < r->end) { 989 addr = round_up(r->end, align); 990 size = last - addr; 991 changed = 1; 992 goto again; 993 } 994 if (last <= r->end && addr >= r->start) { 995 (*sizep)++; 996 return 0; 997 } 998 } 999 if (changed) { 1000 *addrp = addr; 1001 *sizep = size; 1002 } 1003 return changed; 1004 } 1005 1006 /* 1007 * Find a free area with specified alignment in a specific range. 1008 */ 1009 u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) 1010 { 1011 int i; 1012 1013 for (i = 0; i < e820.nr_map; i++) { 1014 struct e820entry *ei = &e820.map[i]; 1015 u64 addr, last; 1016 u64 ei_last; 1017 1018 if (ei->type != E820_RAM) 1019 continue; 1020 addr = round_up(ei->addr, align); 1021 ei_last = ei->addr + ei->size; 1022 if (addr < start) 1023 addr = round_up(start, align); 1024 if (addr >= ei_last) 1025 continue; 1026 while (bad_addr(&addr, size, align) && addr+size <= ei_last) 1027 ; 1028 last = addr + size; 1029 if (last > ei_last) 1030 continue; 1031 if (last > end) 1032 continue; 1033 return addr; 1034 } 1035 return -1ULL; 1036 } 1037 1038 /* 1039 * Find next free range after *start 1040 */ 1041 u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) 1042 { 1043 int i; 1044 1045 for (i = 0; i < e820.nr_map; i++) { 1046 struct e820entry *ei = &e820.map[i]; 1047 u64 addr, last; 1048 u64 ei_last; 1049 1050 if (ei->type != E820_RAM) 1051 continue; 1052 addr = round_up(ei->addr, align); 1053 ei_last = ei->addr + ei->size; 1054 if (addr < start) 1055 addr = round_up(start, align); 1056 if (addr >= ei_last) 1057 continue; 1058 *sizep = ei_last - addr; 1059 while (bad_addr_size(&addr, sizep, align) && 1060 addr + *sizep <= ei_last) 1061 ; 1062 last = addr + *sizep; 1063 if (last > ei_last) 1064 continue; 1065 return addr; 1066 } 1067 1068 return -1ULL; 1069 } 1070 1071 /* 1072 * pre allocated 4k and reserved it in e820 1073 */ 1074 u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) 1075 { 1076 u64 size = 0; 1077 u64 addr; 1078 u64 start; 1079 1080 for (start = startt; ; start += size) { 1081 start = find_e820_area_size(start, &size, align); 1082 if (!(start + 1)) 1083 return 0; 1084 if (size >= sizet) 1085 break; 1086 } 1087 1088 #ifdef CONFIG_X86_32 1089 if (start >= MAXMEM) 1090 return 0; 1091 if (start + size > MAXMEM) 1092 size = MAXMEM - start; 1093 #endif 1094 1095 addr = round_down(start + size - sizet, align); 1096 if (addr < start) 1097 return 0; 1098 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); 1099 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); 1100 printk(KERN_INFO "update e820 for early_reserve_e820\n"); 1101 update_e820(); 1102 update_e820_saved(); 1103 1104 return addr; 1105 } 1106 1107 #ifdef CONFIG_X86_32 1108 # ifdef CONFIG_X86_PAE 1109 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT)) 1110 # else 1111 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT)) 1112 # endif 1113 #else /* CONFIG_X86_32 */ 1114 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT 1115 #endif 1116 1117 /* 1118 * Find the highest page frame number we have available 1119 */ 1120 static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) 1121 { 1122 int i; 1123 unsigned long last_pfn = 0; 1124 unsigned long max_arch_pfn = MAX_ARCH_PFN; 1125 1126 for (i = 0; i < e820.nr_map; i++) { 1127 struct e820entry *ei = &e820.map[i]; 1128 unsigned long start_pfn; 1129 unsigned long end_pfn; 1130 1131 if (ei->type != type) 1132 continue; 1133 1134 start_pfn = ei->addr >> PAGE_SHIFT; 1135 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; 1136 1137 if (start_pfn >= limit_pfn) 1138 continue; 1139 if (end_pfn > limit_pfn) { 1140 last_pfn = limit_pfn; 1141 break; 1142 } 1143 if (end_pfn > last_pfn) 1144 last_pfn = end_pfn; 1145 } 1146 1147 if (last_pfn > max_arch_pfn) 1148 last_pfn = max_arch_pfn; 1149 1150 printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n", 1151 last_pfn, max_arch_pfn); 1152 return last_pfn; 1153 } 1154 unsigned long __init e820_end_of_ram_pfn(void) 1155 { 1156 return e820_end_pfn(MAX_ARCH_PFN, E820_RAM); 1157 } 1158 1159 unsigned long __init e820_end_of_low_ram_pfn(void) 1160 { 1161 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); 1162 } 1163 /* 1164 * Finds an active region in the address range from start_pfn to last_pfn and 1165 * returns its range in ei_startpfn and ei_endpfn for the e820 entry. 1166 */ 1167 int __init e820_find_active_region(const struct e820entry *ei, 1168 unsigned long start_pfn, 1169 unsigned long last_pfn, 1170 unsigned long *ei_startpfn, 1171 unsigned long *ei_endpfn) 1172 { 1173 u64 align = PAGE_SIZE; 1174 1175 *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; 1176 *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; 1177 1178 /* Skip map entries smaller than a page */ 1179 if (*ei_startpfn >= *ei_endpfn) 1180 return 0; 1181 1182 /* Skip if map is outside the node */ 1183 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || 1184 *ei_startpfn >= last_pfn) 1185 return 0; 1186 1187 /* Check for overlaps */ 1188 if (*ei_startpfn < start_pfn) 1189 *ei_startpfn = start_pfn; 1190 if (*ei_endpfn > last_pfn) 1191 *ei_endpfn = last_pfn; 1192 1193 return 1; 1194 } 1195 1196 /* Walk the e820 map and register active regions within a node */ 1197 void __init e820_register_active_regions(int nid, unsigned long start_pfn, 1198 unsigned long last_pfn) 1199 { 1200 unsigned long ei_startpfn; 1201 unsigned long ei_endpfn; 1202 int i; 1203 1204 for (i = 0; i < e820.nr_map; i++) 1205 if (e820_find_active_region(&e820.map[i], 1206 start_pfn, last_pfn, 1207 &ei_startpfn, &ei_endpfn)) 1208 add_active_range(nid, ei_startpfn, ei_endpfn); 1209 } 1210 1211 /* 1212 * Find the hole size (in bytes) in the memory range. 1213 * @start: starting address of the memory range to scan 1214 * @end: ending address of the memory range to scan 1215 */ 1216 u64 __init e820_hole_size(u64 start, u64 end) 1217 { 1218 unsigned long start_pfn = start >> PAGE_SHIFT; 1219 unsigned long last_pfn = end >> PAGE_SHIFT; 1220 unsigned long ei_startpfn, ei_endpfn, ram = 0; 1221 int i; 1222 1223 for (i = 0; i < e820.nr_map; i++) { 1224 if (e820_find_active_region(&e820.map[i], 1225 start_pfn, last_pfn, 1226 &ei_startpfn, &ei_endpfn)) 1227 ram += ei_endpfn - ei_startpfn; 1228 } 1229 return end - start - ((u64)ram << PAGE_SHIFT); 1230 } 1231 1232 static void early_panic(char *msg) 1233 { 1234 early_printk(msg); 1235 panic(msg); 1236 } 1237 1238 static int userdef __initdata; 1239 1240 /* "mem=nopentium" disables the 4MB page tables. */ 1241 static int __init parse_memopt(char *p) 1242 { 1243 u64 mem_size; 1244 1245 if (!p) 1246 return -EINVAL; 1247 1248 #ifdef CONFIG_X86_32 1249 if (!strcmp(p, "nopentium")) { 1250 setup_clear_cpu_cap(X86_FEATURE_PSE); 1251 return 0; 1252 } 1253 #endif 1254 1255 userdef = 1; 1256 mem_size = memparse(p, &p); 1257 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); 1258 1259 return 0; 1260 } 1261 early_param("mem", parse_memopt); 1262 1263 static int __init parse_memmap_opt(char *p) 1264 { 1265 char *oldp; 1266 u64 start_at, mem_size; 1267 1268 if (!p) 1269 return -EINVAL; 1270 1271 if (!strncmp(p, "exactmap", 8)) { 1272 #ifdef CONFIG_CRASH_DUMP 1273 /* 1274 * If we are doing a crash dump, we still need to know 1275 * the real mem size before original memory map is 1276 * reset. 1277 */ 1278 saved_max_pfn = e820_end_of_ram_pfn(); 1279 #endif 1280 e820.nr_map = 0; 1281 userdef = 1; 1282 return 0; 1283 } 1284 1285 oldp = p; 1286 mem_size = memparse(p, &p); 1287 if (p == oldp) 1288 return -EINVAL; 1289 1290 userdef = 1; 1291 if (*p == '@') { 1292 start_at = memparse(p+1, &p); 1293 e820_add_region(start_at, mem_size, E820_RAM); 1294 } else if (*p == '#') { 1295 start_at = memparse(p+1, &p); 1296 e820_add_region(start_at, mem_size, E820_ACPI); 1297 } else if (*p == '$') { 1298 start_at = memparse(p+1, &p); 1299 e820_add_region(start_at, mem_size, E820_RESERVED); 1300 } else 1301 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); 1302 1303 return *p == '\0' ? 0 : -EINVAL; 1304 } 1305 early_param("memmap", parse_memmap_opt); 1306 1307 void __init finish_e820_parsing(void) 1308 { 1309 if (userdef) { 1310 u32 nr = e820.nr_map; 1311 1312 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0) 1313 early_panic("Invalid user supplied memory map"); 1314 e820.nr_map = nr; 1315 1316 printk(KERN_INFO "user-defined physical RAM map:\n"); 1317 e820_print_map("user"); 1318 } 1319 } 1320 1321 static inline const char *e820_type_to_string(int e820_type) 1322 { 1323 switch (e820_type) { 1324 case E820_RESERVED_KERN: 1325 case E820_RAM: return "System RAM"; 1326 case E820_ACPI: return "ACPI Tables"; 1327 case E820_NVS: return "ACPI Non-volatile Storage"; 1328 case E820_UNUSABLE: return "Unusable memory"; 1329 default: return "reserved"; 1330 } 1331 } 1332 1333 /* 1334 * Mark e820 reserved areas as busy for the resource manager. 1335 */ 1336 static struct resource __initdata *e820_res; 1337 void __init e820_reserve_resources(void) 1338 { 1339 int i; 1340 struct resource *res; 1341 u64 end; 1342 1343 res = alloc_bootmem(sizeof(struct resource) * e820.nr_map); 1344 e820_res = res; 1345 for (i = 0; i < e820.nr_map; i++) { 1346 end = e820.map[i].addr + e820.map[i].size - 1; 1347 if (end != (resource_size_t)end) { 1348 res++; 1349 continue; 1350 } 1351 res->name = e820_type_to_string(e820.map[i].type); 1352 res->start = e820.map[i].addr; 1353 res->end = end; 1354 1355 res->flags = IORESOURCE_MEM; 1356 1357 /* 1358 * don't register the region that could be conflicted with 1359 * pci device BAR resource and insert them later in 1360 * pcibios_resource_survey() 1361 */ 1362 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) { 1363 res->flags |= IORESOURCE_BUSY; 1364 insert_resource(&iomem_resource, res); 1365 } 1366 res++; 1367 } 1368 1369 for (i = 0; i < e820_saved.nr_map; i++) { 1370 struct e820entry *entry = &e820_saved.map[i]; 1371 firmware_map_add_early(entry->addr, 1372 entry->addr + entry->size - 1, 1373 e820_type_to_string(entry->type)); 1374 } 1375 } 1376 1377 /* How much should we pad RAM ending depending on where it is? */ 1378 static unsigned long ram_alignment(resource_size_t pos) 1379 { 1380 unsigned long mb = pos >> 20; 1381 1382 /* To 64kB in the first megabyte */ 1383 if (!mb) 1384 return 64*1024; 1385 1386 /* To 1MB in the first 16MB */ 1387 if (mb < 16) 1388 return 1024*1024; 1389 1390 /* To 64MB for anything above that */ 1391 return 64*1024*1024; 1392 } 1393 1394 #define MAX_RESOURCE_SIZE ((resource_size_t)-1) 1395 1396 void __init e820_reserve_resources_late(void) 1397 { 1398 int i; 1399 struct resource *res; 1400 1401 res = e820_res; 1402 for (i = 0; i < e820.nr_map; i++) { 1403 if (!res->parent && res->end) 1404 insert_resource_expand_to_fit(&iomem_resource, res); 1405 res++; 1406 } 1407 1408 /* 1409 * Try to bump up RAM regions to reasonable boundaries to 1410 * avoid stolen RAM: 1411 */ 1412 for (i = 0; i < e820.nr_map; i++) { 1413 struct e820entry *entry = &e820.map[i]; 1414 u64 start, end; 1415 1416 if (entry->type != E820_RAM) 1417 continue; 1418 start = entry->addr + entry->size; 1419 end = round_up(start, ram_alignment(start)) - 1; 1420 if (end > MAX_RESOURCE_SIZE) 1421 end = MAX_RESOURCE_SIZE; 1422 if (start >= end) 1423 continue; 1424 reserve_region_with_split(&iomem_resource, start, end, 1425 "RAM buffer"); 1426 } 1427 } 1428 1429 char *__init default_machine_specific_memory_setup(void) 1430 { 1431 char *who = "BIOS-e820"; 1432 u32 new_nr; 1433 /* 1434 * Try to copy the BIOS-supplied E820-map. 1435 * 1436 * Otherwise fake a memory map; one section from 0k->640k, 1437 * the next section from 1mb->appropriate_mem_k 1438 */ 1439 new_nr = boot_params.e820_entries; 1440 sanitize_e820_map(boot_params.e820_map, 1441 ARRAY_SIZE(boot_params.e820_map), 1442 &new_nr); 1443 boot_params.e820_entries = new_nr; 1444 if (append_e820_map(boot_params.e820_map, boot_params.e820_entries) 1445 < 0) { 1446 u64 mem_size; 1447 1448 /* compare results from other methods and take the greater */ 1449 if (boot_params.alt_mem_k 1450 < boot_params.screen_info.ext_mem_k) { 1451 mem_size = boot_params.screen_info.ext_mem_k; 1452 who = "BIOS-88"; 1453 } else { 1454 mem_size = boot_params.alt_mem_k; 1455 who = "BIOS-e801"; 1456 } 1457 1458 e820.nr_map = 0; 1459 e820_add_region(0, LOWMEMSIZE(), E820_RAM); 1460 e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM); 1461 } 1462 1463 /* In case someone cares... */ 1464 return who; 1465 } 1466 1467 void __init setup_memory_map(void) 1468 { 1469 char *who; 1470 1471 who = x86_init.resources.memory_setup(); 1472 memcpy(&e820_saved, &e820, sizeof(struct e820map)); 1473 printk(KERN_INFO "BIOS-provided physical RAM map:\n"); 1474 e820_print_map(who); 1475 } 1476