1 #include <linux/gfp.h> 2 #include <linux/initrd.h> 3 #include <linux/ioport.h> 4 #include <linux/swap.h> 5 #include <linux/memblock.h> 6 #include <linux/bootmem.h> /* for max_low_pfn */ 7 8 #include <asm/cacheflush.h> 9 #include <asm/e820.h> 10 #include <asm/init.h> 11 #include <asm/page.h> 12 #include <asm/page_types.h> 13 #include <asm/sections.h> 14 #include <asm/setup.h> 15 #include <asm/tlbflush.h> 16 #include <asm/tlb.h> 17 #include <asm/proto.h> 18 #include <asm/dma.h> /* for MAX_DMA_PFN */ 19 #include <asm/microcode.h> 20 21 /* 22 * We need to define the tracepoints somewhere, and tlb.c 23 * is only compied when SMP=y. 24 */ 25 #define CREATE_TRACE_POINTS 26 #include <trace/events/tlb.h> 27 28 #include "mm_internal.h" 29 30 static unsigned long __initdata pgt_buf_start; 31 static unsigned long __initdata pgt_buf_end; 32 static unsigned long __initdata pgt_buf_top; 33 34 static unsigned long min_pfn_mapped; 35 36 static bool __initdata can_use_brk_pgt = true; 37 38 /* 39 * Pages returned are already directly mapped. 40 * 41 * Changing that is likely to break Xen, see commit: 42 * 43 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve 44 * 45 * for detailed information. 46 */ 47 __ref void *alloc_low_pages(unsigned int num) 48 { 49 unsigned long pfn; 50 int i; 51 52 if (after_bootmem) { 53 unsigned int order; 54 55 order = get_order((unsigned long)num << PAGE_SHIFT); 56 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | 57 __GFP_ZERO, order); 58 } 59 60 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { 61 unsigned long ret; 62 if (min_pfn_mapped >= max_pfn_mapped) 63 panic("alloc_low_pages: ran out of memory"); 64 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, 65 max_pfn_mapped << PAGE_SHIFT, 66 PAGE_SIZE * num , PAGE_SIZE); 67 if (!ret) 68 panic("alloc_low_pages: can not alloc memory"); 69 memblock_reserve(ret, PAGE_SIZE * num); 70 pfn = ret >> PAGE_SHIFT; 71 } else { 72 pfn = pgt_buf_end; 73 pgt_buf_end += num; 74 printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", 75 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); 76 } 77 78 for (i = 0; i < num; i++) { 79 void *adr; 80 81 adr = __va((pfn + i) << PAGE_SHIFT); 82 clear_page(adr); 83 } 84 85 return __va(pfn << PAGE_SHIFT); 86 } 87 88 /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ 89 #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) 90 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); 91 void __init early_alloc_pgt_buf(void) 92 { 93 unsigned long tables = INIT_PGT_BUF_SIZE; 94 phys_addr_t base; 95 96 base = __pa(extend_brk(tables, PAGE_SIZE)); 97 98 pgt_buf_start = base >> PAGE_SHIFT; 99 pgt_buf_end = pgt_buf_start; 100 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 101 } 102 103 int after_bootmem; 104 105 int direct_gbpages 106 #ifdef CONFIG_DIRECT_GBPAGES 107 = 1 108 #endif 109 ; 110 111 static void __init init_gbpages(void) 112 { 113 #ifdef CONFIG_X86_64 114 if (direct_gbpages && cpu_has_gbpages) 115 printk(KERN_INFO "Using GB pages for direct mapping\n"); 116 else 117 direct_gbpages = 0; 118 #endif 119 } 120 121 struct map_range { 122 unsigned long start; 123 unsigned long end; 124 unsigned page_size_mask; 125 }; 126 127 static int page_size_mask; 128 129 static void __init probe_page_size_mask(void) 130 { 131 init_gbpages(); 132 133 #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) 134 /* 135 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. 136 * This will simplify cpa(), which otherwise needs to support splitting 137 * large pages into small in interrupt context, etc. 138 */ 139 if (direct_gbpages) 140 page_size_mask |= 1 << PG_LEVEL_1G; 141 if (cpu_has_pse) 142 page_size_mask |= 1 << PG_LEVEL_2M; 143 #endif 144 145 /* Enable PSE if available */ 146 if (cpu_has_pse) 147 set_in_cr4(X86_CR4_PSE); 148 149 /* Enable PGE if available */ 150 if (cpu_has_pge) { 151 set_in_cr4(X86_CR4_PGE); 152 __supported_pte_mask |= _PAGE_GLOBAL; 153 } 154 } 155 156 #ifdef CONFIG_X86_32 157 #define NR_RANGE_MR 3 158 #else /* CONFIG_X86_64 */ 159 #define NR_RANGE_MR 5 160 #endif 161 162 static int __meminit save_mr(struct map_range *mr, int nr_range, 163 unsigned long start_pfn, unsigned long end_pfn, 164 unsigned long page_size_mask) 165 { 166 if (start_pfn < end_pfn) { 167 if (nr_range >= NR_RANGE_MR) 168 panic("run out of range for init_memory_mapping\n"); 169 mr[nr_range].start = start_pfn<<PAGE_SHIFT; 170 mr[nr_range].end = end_pfn<<PAGE_SHIFT; 171 mr[nr_range].page_size_mask = page_size_mask; 172 nr_range++; 173 } 174 175 return nr_range; 176 } 177 178 /* 179 * adjust the page_size_mask for small range to go with 180 * big page size instead small one if nearby are ram too. 181 */ 182 static void __init_refok adjust_range_page_size_mask(struct map_range *mr, 183 int nr_range) 184 { 185 int i; 186 187 for (i = 0; i < nr_range; i++) { 188 if ((page_size_mask & (1<<PG_LEVEL_2M)) && 189 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { 190 unsigned long start = round_down(mr[i].start, PMD_SIZE); 191 unsigned long end = round_up(mr[i].end, PMD_SIZE); 192 193 #ifdef CONFIG_X86_32 194 if ((end >> PAGE_SHIFT) > max_low_pfn) 195 continue; 196 #endif 197 198 if (memblock_is_region_memory(start, end - start)) 199 mr[i].page_size_mask |= 1<<PG_LEVEL_2M; 200 } 201 if ((page_size_mask & (1<<PG_LEVEL_1G)) && 202 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { 203 unsigned long start = round_down(mr[i].start, PUD_SIZE); 204 unsigned long end = round_up(mr[i].end, PUD_SIZE); 205 206 if (memblock_is_region_memory(start, end - start)) 207 mr[i].page_size_mask |= 1<<PG_LEVEL_1G; 208 } 209 } 210 } 211 212 static int __meminit split_mem_range(struct map_range *mr, int nr_range, 213 unsigned long start, 214 unsigned long end) 215 { 216 unsigned long start_pfn, end_pfn, limit_pfn; 217 unsigned long pfn; 218 int i; 219 220 limit_pfn = PFN_DOWN(end); 221 222 /* head if not big page alignment ? */ 223 pfn = start_pfn = PFN_DOWN(start); 224 #ifdef CONFIG_X86_32 225 /* 226 * Don't use a large page for the first 2/4MB of memory 227 * because there are often fixed size MTRRs in there 228 * and overlapping MTRRs into large pages can cause 229 * slowdowns. 230 */ 231 if (pfn == 0) 232 end_pfn = PFN_DOWN(PMD_SIZE); 233 else 234 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); 235 #else /* CONFIG_X86_64 */ 236 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); 237 #endif 238 if (end_pfn > limit_pfn) 239 end_pfn = limit_pfn; 240 if (start_pfn < end_pfn) { 241 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 242 pfn = end_pfn; 243 } 244 245 /* big page (2M) range */ 246 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); 247 #ifdef CONFIG_X86_32 248 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); 249 #else /* CONFIG_X86_64 */ 250 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); 251 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) 252 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); 253 #endif 254 255 if (start_pfn < end_pfn) { 256 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 257 page_size_mask & (1<<PG_LEVEL_2M)); 258 pfn = end_pfn; 259 } 260 261 #ifdef CONFIG_X86_64 262 /* big page (1G) range */ 263 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); 264 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); 265 if (start_pfn < end_pfn) { 266 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 267 page_size_mask & 268 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 269 pfn = end_pfn; 270 } 271 272 /* tail is not big page (1G) alignment */ 273 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); 274 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); 275 if (start_pfn < end_pfn) { 276 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 277 page_size_mask & (1<<PG_LEVEL_2M)); 278 pfn = end_pfn; 279 } 280 #endif 281 282 /* tail is not big page (2M) alignment */ 283 start_pfn = pfn; 284 end_pfn = limit_pfn; 285 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 286 287 if (!after_bootmem) 288 adjust_range_page_size_mask(mr, nr_range); 289 290 /* try to merge same page size and continuous */ 291 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { 292 unsigned long old_start; 293 if (mr[i].end != mr[i+1].start || 294 mr[i].page_size_mask != mr[i+1].page_size_mask) 295 continue; 296 /* move it */ 297 old_start = mr[i].start; 298 memmove(&mr[i], &mr[i+1], 299 (nr_range - 1 - i) * sizeof(struct map_range)); 300 mr[i--].start = old_start; 301 nr_range--; 302 } 303 304 for (i = 0; i < nr_range; i++) 305 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", 306 mr[i].start, mr[i].end - 1, 307 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 308 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 309 310 return nr_range; 311 } 312 313 struct range pfn_mapped[E820_X_MAX]; 314 int nr_pfn_mapped; 315 316 static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) 317 { 318 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, 319 nr_pfn_mapped, start_pfn, end_pfn); 320 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); 321 322 max_pfn_mapped = max(max_pfn_mapped, end_pfn); 323 324 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) 325 max_low_pfn_mapped = max(max_low_pfn_mapped, 326 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); 327 } 328 329 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) 330 { 331 int i; 332 333 for (i = 0; i < nr_pfn_mapped; i++) 334 if ((start_pfn >= pfn_mapped[i].start) && 335 (end_pfn <= pfn_mapped[i].end)) 336 return true; 337 338 return false; 339 } 340 341 /* 342 * Setup the direct mapping of the physical memory at PAGE_OFFSET. 343 * This runs before bootmem is initialized and gets pages directly from 344 * the physical memory. To access them they are temporarily mapped. 345 */ 346 unsigned long __init_refok init_memory_mapping(unsigned long start, 347 unsigned long end) 348 { 349 struct map_range mr[NR_RANGE_MR]; 350 unsigned long ret = 0; 351 int nr_range, i; 352 353 pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n", 354 start, end - 1); 355 356 memset(mr, 0, sizeof(mr)); 357 nr_range = split_mem_range(mr, 0, start, end); 358 359 for (i = 0; i < nr_range; i++) 360 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 361 mr[i].page_size_mask); 362 363 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); 364 365 return ret >> PAGE_SHIFT; 366 } 367 368 /* 369 * We need to iterate through the E820 memory map and create direct mappings 370 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply 371 * create direct mappings for all pfns from [0 to max_low_pfn) and 372 * [4GB to max_pfn) because of possible memory holes in high addresses 373 * that cannot be marked as UC by fixed/variable range MTRRs. 374 * Depending on the alignment of E820 ranges, this may possibly result 375 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. 376 * 377 * init_mem_mapping() calls init_range_memory_mapping() with big range. 378 * That range would have hole in the middle or ends, and only ram parts 379 * will be mapped in init_range_memory_mapping(). 380 */ 381 static unsigned long __init init_range_memory_mapping( 382 unsigned long r_start, 383 unsigned long r_end) 384 { 385 unsigned long start_pfn, end_pfn; 386 unsigned long mapped_ram_size = 0; 387 int i; 388 389 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { 390 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); 391 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); 392 if (start >= end) 393 continue; 394 395 /* 396 * if it is overlapping with brk pgt, we need to 397 * alloc pgt buf from memblock instead. 398 */ 399 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= 400 min(end, (u64)pgt_buf_top<<PAGE_SHIFT); 401 init_memory_mapping(start, end); 402 mapped_ram_size += end - start; 403 can_use_brk_pgt = true; 404 } 405 406 return mapped_ram_size; 407 } 408 409 static unsigned long __init get_new_step_size(unsigned long step_size) 410 { 411 /* 412 * Explain why we shift by 5 and why we don't have to worry about 413 * 'step_size << 5' overflowing: 414 * 415 * initial mapped size is PMD_SIZE (2M). 416 * We can not set step_size to be PUD_SIZE (1G) yet. 417 * In worse case, when we cross the 1G boundary, and 418 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) 419 * to map 1G range with PTE. Use 5 as shift for now. 420 * 421 * Don't need to worry about overflow, on 32bit, when step_size 422 * is 0, round_down() returns 0 for start, and that turns it 423 * into 0x100000000ULL. 424 */ 425 return step_size << 5; 426 } 427 428 /** 429 * memory_map_top_down - Map [map_start, map_end) top down 430 * @map_start: start address of the target memory range 431 * @map_end: end address of the target memory range 432 * 433 * This function will setup direct mapping for memory range 434 * [map_start, map_end) in top-down. That said, the page tables 435 * will be allocated at the end of the memory, and we map the 436 * memory in top-down. 437 */ 438 static void __init memory_map_top_down(unsigned long map_start, 439 unsigned long map_end) 440 { 441 unsigned long real_end, start, last_start; 442 unsigned long step_size; 443 unsigned long addr; 444 unsigned long mapped_ram_size = 0; 445 unsigned long new_mapped_ram_size; 446 447 /* xen has big range in reserved near end of ram, skip it at first.*/ 448 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); 449 real_end = addr + PMD_SIZE; 450 451 /* step_size need to be small so pgt_buf from BRK could cover it */ 452 step_size = PMD_SIZE; 453 max_pfn_mapped = 0; /* will get exact value next */ 454 min_pfn_mapped = real_end >> PAGE_SHIFT; 455 last_start = start = real_end; 456 457 /* 458 * We start from the top (end of memory) and go to the bottom. 459 * The memblock_find_in_range() gets us a block of RAM from the 460 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages 461 * for page table. 462 */ 463 while (last_start > map_start) { 464 if (last_start > step_size) { 465 start = round_down(last_start - 1, step_size); 466 if (start < map_start) 467 start = map_start; 468 } else 469 start = map_start; 470 new_mapped_ram_size = init_range_memory_mapping(start, 471 last_start); 472 last_start = start; 473 min_pfn_mapped = last_start >> PAGE_SHIFT; 474 /* only increase step_size after big range get mapped */ 475 if (new_mapped_ram_size > mapped_ram_size) 476 step_size = get_new_step_size(step_size); 477 mapped_ram_size += new_mapped_ram_size; 478 } 479 480 if (real_end < map_end) 481 init_range_memory_mapping(real_end, map_end); 482 } 483 484 /** 485 * memory_map_bottom_up - Map [map_start, map_end) bottom up 486 * @map_start: start address of the target memory range 487 * @map_end: end address of the target memory range 488 * 489 * This function will setup direct mapping for memory range 490 * [map_start, map_end) in bottom-up. Since we have limited the 491 * bottom-up allocation above the kernel, the page tables will 492 * be allocated just above the kernel and we map the memory 493 * in [map_start, map_end) in bottom-up. 494 */ 495 static void __init memory_map_bottom_up(unsigned long map_start, 496 unsigned long map_end) 497 { 498 unsigned long next, new_mapped_ram_size, start; 499 unsigned long mapped_ram_size = 0; 500 /* step_size need to be small so pgt_buf from BRK could cover it */ 501 unsigned long step_size = PMD_SIZE; 502 503 start = map_start; 504 min_pfn_mapped = start >> PAGE_SHIFT; 505 506 /* 507 * We start from the bottom (@map_start) and go to the top (@map_end). 508 * The memblock_find_in_range() gets us a block of RAM from the 509 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages 510 * for page table. 511 */ 512 while (start < map_end) { 513 if (map_end - start > step_size) { 514 next = round_up(start + 1, step_size); 515 if (next > map_end) 516 next = map_end; 517 } else 518 next = map_end; 519 520 new_mapped_ram_size = init_range_memory_mapping(start, next); 521 start = next; 522 523 if (new_mapped_ram_size > mapped_ram_size) 524 step_size = get_new_step_size(step_size); 525 mapped_ram_size += new_mapped_ram_size; 526 } 527 } 528 529 void __init init_mem_mapping(void) 530 { 531 unsigned long end; 532 533 probe_page_size_mask(); 534 535 #ifdef CONFIG_X86_64 536 end = max_pfn << PAGE_SHIFT; 537 #else 538 end = max_low_pfn << PAGE_SHIFT; 539 #endif 540 541 /* the ISA range is always mapped regardless of memory holes */ 542 init_memory_mapping(0, ISA_END_ADDRESS); 543 544 /* 545 * If the allocation is in bottom-up direction, we setup direct mapping 546 * in bottom-up, otherwise we setup direct mapping in top-down. 547 */ 548 if (memblock_bottom_up()) { 549 unsigned long kernel_end = __pa_symbol(_end); 550 551 /* 552 * we need two separate calls here. This is because we want to 553 * allocate page tables above the kernel. So we first map 554 * [kernel_end, end) to make memory above the kernel be mapped 555 * as soon as possible. And then use page tables allocated above 556 * the kernel to map [ISA_END_ADDRESS, kernel_end). 557 */ 558 memory_map_bottom_up(kernel_end, end); 559 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); 560 } else { 561 memory_map_top_down(ISA_END_ADDRESS, end); 562 } 563 564 #ifdef CONFIG_X86_64 565 if (max_pfn > max_low_pfn) { 566 /* can we preseve max_low_pfn ?*/ 567 max_low_pfn = max_pfn; 568 } 569 #else 570 early_ioremap_page_table_range_init(); 571 #endif 572 573 load_cr3(swapper_pg_dir); 574 __flush_tlb_all(); 575 576 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 577 } 578 579 /* 580 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 581 * is valid. The argument is a physical page number. 582 * 583 * 584 * On x86, access has to be given to the first megabyte of ram because that area 585 * contains bios code and data regions used by X and dosemu and similar apps. 586 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 587 * mmio resources as well as potential bios/acpi data regions. 588 */ 589 int devmem_is_allowed(unsigned long pagenr) 590 { 591 if (pagenr < 256) 592 return 1; 593 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 594 return 0; 595 if (!page_is_ram(pagenr)) 596 return 1; 597 return 0; 598 } 599 600 void free_init_pages(char *what, unsigned long begin, unsigned long end) 601 { 602 unsigned long begin_aligned, end_aligned; 603 604 /* Make sure boundaries are page aligned */ 605 begin_aligned = PAGE_ALIGN(begin); 606 end_aligned = end & PAGE_MASK; 607 608 if (WARN_ON(begin_aligned != begin || end_aligned != end)) { 609 begin = begin_aligned; 610 end = end_aligned; 611 } 612 613 if (begin >= end) 614 return; 615 616 /* 617 * If debugging page accesses then do not free this memory but 618 * mark them not present - any buggy init-section access will 619 * create a kernel page fault: 620 */ 621 #ifdef CONFIG_DEBUG_PAGEALLOC 622 printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n", 623 begin, end - 1); 624 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 625 #else 626 /* 627 * We just marked the kernel text read only above, now that 628 * we are going to free part of that, we need to make that 629 * writeable and non-executable first. 630 */ 631 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); 632 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); 633 634 free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); 635 #endif 636 } 637 638 void free_initmem(void) 639 { 640 free_init_pages("unused kernel", 641 (unsigned long)(&__init_begin), 642 (unsigned long)(&__init_end)); 643 } 644 645 #ifdef CONFIG_BLK_DEV_INITRD 646 void __init free_initrd_mem(unsigned long start, unsigned long end) 647 { 648 #ifdef CONFIG_MICROCODE_EARLY 649 /* 650 * Remember, initrd memory may contain microcode or other useful things. 651 * Before we lose initrd mem, we need to find a place to hold them 652 * now that normal virtual memory is enabled. 653 */ 654 save_microcode_in_initrd(); 655 #endif 656 657 /* 658 * end could be not aligned, and We can not align that, 659 * decompresser could be confused by aligned initrd_end 660 * We already reserve the end partial page before in 661 * - i386_start_kernel() 662 * - x86_64_start_kernel() 663 * - relocate_initrd() 664 * So here We can do PAGE_ALIGN() safely to get partial page to be freed 665 */ 666 free_init_pages("initrd", start, PAGE_ALIGN(end)); 667 } 668 #endif 669 670 void __init zone_sizes_init(void) 671 { 672 unsigned long max_zone_pfns[MAX_NR_ZONES]; 673 674 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 675 676 #ifdef CONFIG_ZONE_DMA 677 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 678 #endif 679 #ifdef CONFIG_ZONE_DMA32 680 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 681 #endif 682 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 683 #ifdef CONFIG_HIGHMEM 684 max_zone_pfns[ZONE_HIGHMEM] = max_pfn; 685 #endif 686 687 free_area_init_nodes(max_zone_pfns); 688 } 689 690