1 /* 2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved. 3 * Copyright (c) 2001 Intel Corp. 4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com> 5 * Copyright (c) 2002 NEC Corp. 6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> 7 * Copyright (c) 2004 Silicon Graphics, Inc 8 * Russ Anderson <rja@sgi.com> 9 * Jesse Barnes <jbarnes@sgi.com> 10 * Jack Steiner <steiner@sgi.com> 11 */ 12 13 /* 14 * Platform initialization for Discontig Memory 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/nmi.h> 20 #include <linux/swap.h> 21 #include <linux/bootmem.h> 22 #include <linux/acpi.h> 23 #include <linux/efi.h> 24 #include <linux/nodemask.h> 25 #include <asm/pgalloc.h> 26 #include <asm/tlb.h> 27 #include <asm/meminit.h> 28 #include <asm/numa.h> 29 #include <asm/sections.h> 30 31 /* 32 * Track per-node information needed to setup the boot memory allocator, the 33 * per-node areas, and the real VM. 34 */ 35 struct early_node_data { 36 struct ia64_node_data *node_data; 37 unsigned long pernode_addr; 38 unsigned long pernode_size; 39 unsigned long num_physpages; 40 #ifdef CONFIG_ZONE_DMA 41 unsigned long num_dma_physpages; 42 #endif 43 unsigned long min_pfn; 44 unsigned long max_pfn; 45 }; 46 47 static struct early_node_data mem_data[MAX_NUMNODES] __initdata; 48 static nodemask_t memory_less_mask __initdata; 49 50 pg_data_t *pgdat_list[MAX_NUMNODES]; 51 52 /* 53 * To prevent cache aliasing effects, align per-node structures so that they 54 * start at addresses that are strided by node number. 55 */ 56 #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024) 57 #define NODEDATA_ALIGN(addr, node) \ 58 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \ 59 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) 60 61 /** 62 * build_node_maps - callback to setup bootmem structs for each node 63 * @start: physical start of range 64 * @len: length of range 65 * @node: node where this range resides 66 * 67 * We allocate a struct bootmem_data for each piece of memory that we wish to 68 * treat as a virtually contiguous block (i.e. each node). Each such block 69 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down 70 * if necessary. Any non-existent pages will simply be part of the virtual 71 * memmap. We also update min_low_pfn and max_low_pfn here as we receive 72 * memory ranges from the caller. 73 */ 74 static int __init build_node_maps(unsigned long start, unsigned long len, 75 int node) 76 { 77 unsigned long spfn, epfn, end = start + len; 78 struct bootmem_data *bdp = &bootmem_node_data[node]; 79 80 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; 81 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; 82 83 if (!bdp->node_low_pfn) { 84 bdp->node_min_pfn = spfn; 85 bdp->node_low_pfn = epfn; 86 } else { 87 bdp->node_min_pfn = min(spfn, bdp->node_min_pfn); 88 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); 89 } 90 91 return 0; 92 } 93 94 /** 95 * early_nr_cpus_node - return number of cpus on a given node 96 * @node: node to check 97 * 98 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because 99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been 100 * called yet. Note that node 0 will also count all non-existent cpus. 101 */ 102 static int __meminit early_nr_cpus_node(int node) 103 { 104 int cpu, n = 0; 105 106 for_each_possible_early_cpu(cpu) 107 if (node == node_cpuid[cpu].nid) 108 n++; 109 110 return n; 111 } 112 113 /** 114 * compute_pernodesize - compute size of pernode data 115 * @node: the node id. 116 */ 117 static unsigned long __meminit compute_pernodesize(int node) 118 { 119 unsigned long pernodesize = 0, cpus; 120 121 cpus = early_nr_cpus_node(node); 122 pernodesize += PERCPU_PAGE_SIZE * cpus; 123 pernodesize += node * L1_CACHE_BYTES; 124 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); 125 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 126 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); 127 pernodesize = PAGE_ALIGN(pernodesize); 128 return pernodesize; 129 } 130 131 /** 132 * per_cpu_node_setup - setup per-cpu areas on each node 133 * @cpu_data: per-cpu area on this node 134 * @node: node to setup 135 * 136 * Copy the static per-cpu data into the region we just set aside and then 137 * setup __per_cpu_offset for each CPU on this node. Return a pointer to 138 * the end of the area. 139 */ 140 static void *per_cpu_node_setup(void *cpu_data, int node) 141 { 142 #ifdef CONFIG_SMP 143 int cpu; 144 145 for_each_possible_early_cpu(cpu) { 146 if (node == node_cpuid[cpu].nid) { 147 memcpy(__va(cpu_data), __phys_per_cpu_start, 148 __per_cpu_end - __per_cpu_start); 149 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 150 __per_cpu_start; 151 cpu_data += PERCPU_PAGE_SIZE; 152 } 153 } 154 #endif 155 return cpu_data; 156 } 157 158 /** 159 * fill_pernode - initialize pernode data. 160 * @node: the node id. 161 * @pernode: physical address of pernode data 162 * @pernodesize: size of the pernode data 163 */ 164 static void __init fill_pernode(int node, unsigned long pernode, 165 unsigned long pernodesize) 166 { 167 void *cpu_data; 168 int cpus = early_nr_cpus_node(node); 169 struct bootmem_data *bdp = &bootmem_node_data[node]; 170 171 mem_data[node].pernode_addr = pernode; 172 mem_data[node].pernode_size = pernodesize; 173 memset(__va(pernode), 0, pernodesize); 174 175 cpu_data = (void *)pernode; 176 pernode += PERCPU_PAGE_SIZE * cpus; 177 pernode += node * L1_CACHE_BYTES; 178 179 pgdat_list[node] = __va(pernode); 180 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 181 182 mem_data[node].node_data = __va(pernode); 183 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 184 185 pgdat_list[node]->bdata = bdp; 186 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 187 188 cpu_data = per_cpu_node_setup(cpu_data, node); 189 190 return; 191 } 192 193 /** 194 * find_pernode_space - allocate memory for memory map and per-node structures 195 * @start: physical start of range 196 * @len: length of range 197 * @node: node where this range resides 198 * 199 * This routine reserves space for the per-cpu data struct, the list of 200 * pg_data_ts and the per-node data struct. Each node will have something like 201 * the following in the first chunk of addr. space large enough to hold it. 202 * 203 * ________________________ 204 * | | 205 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first 206 * | PERCPU_PAGE_SIZE * | start and length big enough 207 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus. 208 * |------------------------| 209 * | local pg_data_t * | 210 * |------------------------| 211 * | local ia64_node_data | 212 * |------------------------| 213 * | ??? | 214 * |________________________| 215 * 216 * Once this space has been set aside, the bootmem maps are initialized. We 217 * could probably move the allocation of the per-cpu and ia64_node_data space 218 * outside of this function and use alloc_bootmem_node(), but doing it here 219 * is straightforward and we get the alignments we want so... 220 */ 221 static int __init find_pernode_space(unsigned long start, unsigned long len, 222 int node) 223 { 224 unsigned long spfn, epfn; 225 unsigned long pernodesize = 0, pernode, pages, mapsize; 226 struct bootmem_data *bdp = &bootmem_node_data[node]; 227 228 spfn = start >> PAGE_SHIFT; 229 epfn = (start + len) >> PAGE_SHIFT; 230 231 pages = bdp->node_low_pfn - bdp->node_min_pfn; 232 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 233 234 /* 235 * Make sure this memory falls within this node's usable memory 236 * since we may have thrown some away in build_maps(). 237 */ 238 if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn) 239 return 0; 240 241 /* Don't setup this node's local space twice... */ 242 if (mem_data[node].pernode_addr) 243 return 0; 244 245 /* 246 * Calculate total size needed, incl. what's necessary 247 * for good alignment and alias prevention. 248 */ 249 pernodesize = compute_pernodesize(node); 250 pernode = NODEDATA_ALIGN(start, node); 251 252 /* Is this range big enough for what we want to store here? */ 253 if (start + len > (pernode + pernodesize + mapsize)) 254 fill_pernode(node, pernode, pernodesize); 255 256 return 0; 257 } 258 259 /** 260 * free_node_bootmem - free bootmem allocator memory for use 261 * @start: physical start of range 262 * @len: length of range 263 * @node: node where this range resides 264 * 265 * Simply calls the bootmem allocator to free the specified ranged from 266 * the given pg_data_t's bdata struct. After this function has been called 267 * for all the entries in the EFI memory map, the bootmem allocator will 268 * be ready to service allocation requests. 269 */ 270 static int __init free_node_bootmem(unsigned long start, unsigned long len, 271 int node) 272 { 273 free_bootmem_node(pgdat_list[node], start, len); 274 275 return 0; 276 } 277 278 /** 279 * reserve_pernode_space - reserve memory for per-node space 280 * 281 * Reserve the space used by the bootmem maps & per-node space in the boot 282 * allocator so that when we actually create the real mem maps we don't 283 * use their memory. 284 */ 285 static void __init reserve_pernode_space(void) 286 { 287 unsigned long base, size, pages; 288 struct bootmem_data *bdp; 289 int node; 290 291 for_each_online_node(node) { 292 pg_data_t *pdp = pgdat_list[node]; 293 294 if (node_isset(node, memory_less_mask)) 295 continue; 296 297 bdp = pdp->bdata; 298 299 /* First the bootmem_map itself */ 300 pages = bdp->node_low_pfn - bdp->node_min_pfn; 301 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; 302 base = __pa(bdp->node_bootmem_map); 303 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 304 305 /* Now the per-node space */ 306 size = mem_data[node].pernode_size; 307 base = __pa(mem_data[node].pernode_addr); 308 reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT); 309 } 310 } 311 312 static void __meminit scatter_node_data(void) 313 { 314 pg_data_t **dst; 315 int node; 316 317 /* 318 * for_each_online_node() can't be used at here. 319 * node_online_map is not set for hot-added nodes at this time, 320 * because we are halfway through initialization of the new node's 321 * structures. If for_each_online_node() is used, a new node's 322 * pg_data_ptrs will be not initialized. Instead of using it, 323 * pgdat_list[] is checked. 324 */ 325 for_each_node(node) { 326 if (pgdat_list[node]) { 327 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; 328 memcpy(dst, pgdat_list, sizeof(pgdat_list)); 329 } 330 } 331 } 332 333 /** 334 * initialize_pernode_data - fixup per-cpu & per-node pointers 335 * 336 * Each node's per-node area has a copy of the global pg_data_t list, so 337 * we copy that to each node here, as well as setting the per-cpu pointer 338 * to the local node data structure. The active_cpus field of the per-node 339 * structure gets setup by the platform_cpu_init() function later. 340 */ 341 static void __init initialize_pernode_data(void) 342 { 343 int cpu, node; 344 345 scatter_node_data(); 346 347 #ifdef CONFIG_SMP 348 /* Set the node_data pointer for each per-cpu struct */ 349 for_each_possible_early_cpu(cpu) { 350 node = node_cpuid[cpu].nid; 351 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 352 } 353 #else 354 { 355 struct cpuinfo_ia64 *cpu0_cpu_info; 356 cpu = 0; 357 node = node_cpuid[cpu].nid; 358 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 359 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 360 cpu0_cpu_info->node_data = mem_data[node].node_data; 361 } 362 #endif /* CONFIG_SMP */ 363 } 364 365 /** 366 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit 367 * node but fall back to any other node when __alloc_bootmem_node fails 368 * for best. 369 * @nid: node id 370 * @pernodesize: size of this node's pernode data 371 */ 372 static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) 373 { 374 void *ptr = NULL; 375 u8 best = 0xff; 376 int bestnode = -1, node, anynode = 0; 377 378 for_each_online_node(node) { 379 if (node_isset(node, memory_less_mask)) 380 continue; 381 else if (node_distance(nid, node) < best) { 382 best = node_distance(nid, node); 383 bestnode = node; 384 } 385 anynode = node; 386 } 387 388 if (bestnode == -1) 389 bestnode = anynode; 390 391 ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, 392 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 393 394 return ptr; 395 } 396 397 /** 398 * memory_less_nodes - allocate and initialize CPU only nodes pernode 399 * information. 400 */ 401 static void __init memory_less_nodes(void) 402 { 403 unsigned long pernodesize; 404 void *pernode; 405 int node; 406 407 for_each_node_mask(node, memory_less_mask) { 408 pernodesize = compute_pernodesize(node); 409 pernode = memory_less_node_alloc(node, pernodesize); 410 fill_pernode(node, __pa(pernode), pernodesize); 411 } 412 413 return; 414 } 415 416 /** 417 * find_memory - walk the EFI memory map and setup the bootmem allocator 418 * 419 * Called early in boot to setup the bootmem allocator, and to 420 * allocate the per-cpu and per-node structures. 421 */ 422 void __init find_memory(void) 423 { 424 int node; 425 426 reserve_memory(); 427 428 if (num_online_nodes() == 0) { 429 printk(KERN_ERR "node info missing!\n"); 430 node_set_online(0); 431 } 432 433 nodes_or(memory_less_mask, memory_less_mask, node_online_map); 434 min_low_pfn = -1; 435 max_low_pfn = 0; 436 437 /* These actually end up getting called by call_pernode_memory() */ 438 efi_memmap_walk(filter_rsvd_memory, build_node_maps); 439 efi_memmap_walk(filter_rsvd_memory, find_pernode_space); 440 efi_memmap_walk(find_max_min_low_pfn, NULL); 441 442 for_each_online_node(node) 443 if (bootmem_node_data[node].node_low_pfn) { 444 node_clear(node, memory_less_mask); 445 mem_data[node].min_pfn = ~0UL; 446 } 447 448 efi_memmap_walk(filter_memory, register_active_ranges); 449 450 /* 451 * Initialize the boot memory maps in reverse order since that's 452 * what the bootmem allocator expects 453 */ 454 for (node = MAX_NUMNODES - 1; node >= 0; node--) { 455 unsigned long pernode, pernodesize, map; 456 struct bootmem_data *bdp; 457 458 if (!node_online(node)) 459 continue; 460 else if (node_isset(node, memory_less_mask)) 461 continue; 462 463 bdp = &bootmem_node_data[node]; 464 pernode = mem_data[node].pernode_addr; 465 pernodesize = mem_data[node].pernode_size; 466 map = pernode + pernodesize; 467 468 init_bootmem_node(pgdat_list[node], 469 map>>PAGE_SHIFT, 470 bdp->node_min_pfn, 471 bdp->node_low_pfn); 472 } 473 474 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); 475 476 reserve_pernode_space(); 477 memory_less_nodes(); 478 initialize_pernode_data(); 479 480 max_pfn = max_low_pfn; 481 482 find_initrd(); 483 } 484 485 #ifdef CONFIG_SMP 486 /** 487 * per_cpu_init - setup per-cpu variables 488 * 489 * find_pernode_space() does most of this already, we just need to set 490 * local_per_cpu_offset 491 */ 492 void __cpuinit *per_cpu_init(void) 493 { 494 int cpu; 495 static int first_time = 1; 496 497 if (first_time) { 498 first_time = 0; 499 for_each_possible_early_cpu(cpu) 500 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; 501 } 502 503 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 504 } 505 #endif /* CONFIG_SMP */ 506 507 /** 508 * show_mem - give short summary of memory stats 509 * 510 * Shows a simple page count of reserved and used pages in the system. 511 * For discontig machines, it does this on a per-pgdat basis. 512 */ 513 void show_mem(void) 514 { 515 int i, total_reserved = 0; 516 int total_shared = 0, total_cached = 0; 517 unsigned long total_present = 0; 518 pg_data_t *pgdat; 519 520 printk(KERN_INFO "Mem-info:\n"); 521 show_free_areas(); 522 printk(KERN_INFO "Node memory in pages:\n"); 523 for_each_online_pgdat(pgdat) { 524 unsigned long present; 525 unsigned long flags; 526 int shared = 0, cached = 0, reserved = 0; 527 528 pgdat_resize_lock(pgdat, &flags); 529 present = pgdat->node_present_pages; 530 for(i = 0; i < pgdat->node_spanned_pages; i++) { 531 struct page *page; 532 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) 533 touch_nmi_watchdog(); 534 if (pfn_valid(pgdat->node_start_pfn + i)) 535 page = pfn_to_page(pgdat->node_start_pfn + i); 536 else { 537 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 538 i) - 1; 539 continue; 540 } 541 if (PageReserved(page)) 542 reserved++; 543 else if (PageSwapCache(page)) 544 cached++; 545 else if (page_count(page)) 546 shared += page_count(page)-1; 547 } 548 pgdat_resize_unlock(pgdat, &flags); 549 total_present += present; 550 total_reserved += reserved; 551 total_cached += cached; 552 total_shared += shared; 553 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 554 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 555 present, reserved, shared, cached); 556 } 557 printk(KERN_INFO "%ld pages of RAM\n", total_present); 558 printk(KERN_INFO "%d reserved pages\n", total_reserved); 559 printk(KERN_INFO "%d pages shared\n", total_shared); 560 printk(KERN_INFO "%d pages swap cached\n", total_cached); 561 printk(KERN_INFO "Total of %ld pages in page table cache\n", 562 quicklist_total_size()); 563 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 564 } 565 566 /** 567 * call_pernode_memory - use SRAT to call callback functions with node info 568 * @start: physical start of range 569 * @len: length of range 570 * @arg: function to call for each range 571 * 572 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find 573 * out to which node a block of memory belongs. Ignore memory that we cannot 574 * identify, and split blocks that run across multiple nodes. 575 * 576 * Take this opportunity to round the start address up and the end address 577 * down to page boundaries. 578 */ 579 void call_pernode_memory(unsigned long start, unsigned long len, void *arg) 580 { 581 unsigned long rs, re, end = start + len; 582 void (*func)(unsigned long, unsigned long, int); 583 int i; 584 585 start = PAGE_ALIGN(start); 586 end &= PAGE_MASK; 587 if (start >= end) 588 return; 589 590 func = arg; 591 592 if (!num_node_memblks) { 593 /* No SRAT table, so assume one node (node 0) */ 594 if (start < end) 595 (*func)(start, end - start, 0); 596 return; 597 } 598 599 for (i = 0; i < num_node_memblks; i++) { 600 rs = max(start, node_memblk[i].start_paddr); 601 re = min(end, node_memblk[i].start_paddr + 602 node_memblk[i].size); 603 604 if (rs < re) 605 (*func)(rs, re - rs, node_memblk[i].nid); 606 607 if (re == end) 608 break; 609 } 610 } 611 612 /** 613 * count_node_pages - callback to build per-node memory info structures 614 * @start: physical start of range 615 * @len: length of range 616 * @node: node where this range resides 617 * 618 * Each node has it's own number of physical pages, DMAable pages, start, and 619 * end page frame number. This routine will be called by call_pernode_memory() 620 * for each piece of usable memory and will setup these values for each node. 621 * Very similar to build_maps(). 622 */ 623 static __init int count_node_pages(unsigned long start, unsigned long len, int node) 624 { 625 unsigned long end = start + len; 626 627 mem_data[node].num_physpages += len >> PAGE_SHIFT; 628 #ifdef CONFIG_ZONE_DMA 629 if (start <= __pa(MAX_DMA_ADDRESS)) 630 mem_data[node].num_dma_physpages += 631 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; 632 #endif 633 start = GRANULEROUNDDOWN(start); 634 start = ORDERROUNDDOWN(start); 635 end = GRANULEROUNDUP(end); 636 mem_data[node].max_pfn = max(mem_data[node].max_pfn, 637 end >> PAGE_SHIFT); 638 mem_data[node].min_pfn = min(mem_data[node].min_pfn, 639 start >> PAGE_SHIFT); 640 641 return 0; 642 } 643 644 /** 645 * paging_init - setup page tables 646 * 647 * paging_init() sets up the page tables for each node of the system and frees 648 * the bootmem allocator memory for general use. 649 */ 650 void __init paging_init(void) 651 { 652 unsigned long max_dma; 653 unsigned long pfn_offset = 0; 654 unsigned long max_pfn = 0; 655 int node; 656 unsigned long max_zone_pfns[MAX_NR_ZONES]; 657 658 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 659 660 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 661 662 sparse_memory_present_with_active_regions(MAX_NUMNODES); 663 sparse_init(); 664 665 #ifdef CONFIG_VIRTUAL_MEM_MAP 666 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 667 sizeof(struct page)); 668 vmem_map = (struct page *) vmalloc_end; 669 efi_memmap_walk(create_mem_map_page_table, NULL); 670 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 671 #endif 672 673 for_each_online_node(node) { 674 num_physpages += mem_data[node].num_physpages; 675 pfn_offset = mem_data[node].min_pfn; 676 677 #ifdef CONFIG_VIRTUAL_MEM_MAP 678 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; 679 #endif 680 if (mem_data[node].max_pfn > max_pfn) 681 max_pfn = mem_data[node].max_pfn; 682 } 683 684 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 685 #ifdef CONFIG_ZONE_DMA 686 max_zone_pfns[ZONE_DMA] = max_dma; 687 #endif 688 max_zone_pfns[ZONE_NORMAL] = max_pfn; 689 free_area_init_nodes(max_zone_pfns); 690 691 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 692 } 693 694 #ifdef CONFIG_MEMORY_HOTPLUG 695 pg_data_t *arch_alloc_nodedata(int nid) 696 { 697 unsigned long size = compute_pernodesize(nid); 698 699 return kzalloc(size, GFP_KERNEL); 700 } 701 702 void arch_free_nodedata(pg_data_t *pgdat) 703 { 704 kfree(pgdat); 705 } 706 707 void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) 708 { 709 pgdat_list[update_node] = update_pgdat; 710 scatter_node_data(); 711 } 712 #endif 713 714 #ifdef CONFIG_SPARSEMEM_VMEMMAP 715 int __meminit vmemmap_populate(struct page *start_page, 716 unsigned long size, int node) 717 { 718 return vmemmap_populate_basepages(start_page, size, node); 719 } 720 #endif 721