xref: /openbmc/linux/arch/ia64/mm/discontig.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * Copyright (c) 2000, 2003 Silicon Graphics, Inc.  All rights reserved.
3  * Copyright (c) 2001 Intel Corp.
4  * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5  * Copyright (c) 2002 NEC Corp.
6  * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7  * Copyright (c) 2004 Silicon Graphics, Inc
8  *	Russ Anderson <rja@sgi.com>
9  *	Jesse Barnes <jbarnes@sgi.com>
10  *	Jack Steiner <steiner@sgi.com>
11  */
12 
13 /*
14  * Platform initialization for Discontig Memory
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/nmi.h>
20 #include <linux/swap.h>
21 #include <linux/bootmem.h>
22 #include <linux/acpi.h>
23 #include <linux/efi.h>
24 #include <linux/nodemask.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlb.h>
27 #include <asm/meminit.h>
28 #include <asm/numa.h>
29 #include <asm/sections.h>
30 
31 /*
32  * Track per-node information needed to setup the boot memory allocator, the
33  * per-node areas, and the real VM.
34  */
35 struct early_node_data {
36 	struct ia64_node_data *node_data;
37 	unsigned long pernode_addr;
38 	unsigned long pernode_size;
39 	struct bootmem_data bootmem_data;
40 	unsigned long num_physpages;
41 #ifdef CONFIG_ZONE_DMA
42 	unsigned long num_dma_physpages;
43 #endif
44 	unsigned long min_pfn;
45 	unsigned long max_pfn;
46 };
47 
48 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
49 static nodemask_t memory_less_mask __initdata;
50 
51 pg_data_t *pgdat_list[MAX_NUMNODES];
52 
53 /*
54  * To prevent cache aliasing effects, align per-node structures so that they
55  * start at addresses that are strided by node number.
56  */
57 #define MAX_NODE_ALIGN_OFFSET	(32 * 1024 * 1024)
58 #define NODEDATA_ALIGN(addr, node)						\
59 	((((addr) + 1024*1024-1) & ~(1024*1024-1)) + 				\
60 	     (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
61 
62 /**
63  * build_node_maps - callback to setup bootmem structs for each node
64  * @start: physical start of range
65  * @len: length of range
66  * @node: node where this range resides
67  *
68  * We allocate a struct bootmem_data for each piece of memory that we wish to
69  * treat as a virtually contiguous block (i.e. each node). Each such block
70  * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
71  * if necessary.  Any non-existent pages will simply be part of the virtual
72  * memmap.  We also update min_low_pfn and max_low_pfn here as we receive
73  * memory ranges from the caller.
74  */
75 static int __init build_node_maps(unsigned long start, unsigned long len,
76 				  int node)
77 {
78 	unsigned long cstart, epfn, end = start + len;
79 	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
80 
81 	epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
82 	cstart = GRANULEROUNDDOWN(start);
83 
84 	if (!bdp->node_low_pfn) {
85 		bdp->node_boot_start = cstart;
86 		bdp->node_low_pfn = epfn;
87 	} else {
88 		bdp->node_boot_start = min(cstart, bdp->node_boot_start);
89 		bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
90 	}
91 
92 	return 0;
93 }
94 
95 /**
96  * early_nr_cpus_node - return number of cpus on a given node
97  * @node: node to check
98  *
99  * Count the number of cpus on @node.  We can't use nr_cpus_node() yet because
100  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
101  * called yet.  Note that node 0 will also count all non-existent cpus.
102  */
103 static int __meminit early_nr_cpus_node(int node)
104 {
105 	int cpu, n = 0;
106 
107 	for (cpu = 0; cpu < NR_CPUS; cpu++)
108 		if (node == node_cpuid[cpu].nid)
109 			n++;
110 
111 	return n;
112 }
113 
114 /**
115  * compute_pernodesize - compute size of pernode data
116  * @node: the node id.
117  */
118 static unsigned long __meminit compute_pernodesize(int node)
119 {
120 	unsigned long pernodesize = 0, cpus;
121 
122 	cpus = early_nr_cpus_node(node);
123 	pernodesize += PERCPU_PAGE_SIZE * cpus;
124 	pernodesize += node * L1_CACHE_BYTES;
125 	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
126 	pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
127 	pernodesize = PAGE_ALIGN(pernodesize);
128 	return pernodesize;
129 }
130 
131 /**
132  * per_cpu_node_setup - setup per-cpu areas on each node
133  * @cpu_data: per-cpu area on this node
134  * @node: node to setup
135  *
136  * Copy the static per-cpu data into the region we just set aside and then
137  * setup __per_cpu_offset for each CPU on this node.  Return a pointer to
138  * the end of the area.
139  */
140 static void *per_cpu_node_setup(void *cpu_data, int node)
141 {
142 #ifdef CONFIG_SMP
143 	int cpu;
144 
145 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
146 		if (node == node_cpuid[cpu].nid) {
147 			memcpy(__va(cpu_data), __phys_per_cpu_start,
148 			       __per_cpu_end - __per_cpu_start);
149 			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
150 				__per_cpu_start;
151 			cpu_data += PERCPU_PAGE_SIZE;
152 		}
153 	}
154 #endif
155 	return cpu_data;
156 }
157 
158 /**
159  * fill_pernode - initialize pernode data.
160  * @node: the node id.
161  * @pernode: physical address of pernode data
162  * @pernodesize: size of the pernode data
163  */
164 static void __init fill_pernode(int node, unsigned long pernode,
165 	unsigned long pernodesize)
166 {
167 	void *cpu_data;
168 	int cpus = early_nr_cpus_node(node);
169 	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
170 
171 	mem_data[node].pernode_addr = pernode;
172 	mem_data[node].pernode_size = pernodesize;
173 	memset(__va(pernode), 0, pernodesize);
174 
175 	cpu_data = (void *)pernode;
176 	pernode += PERCPU_PAGE_SIZE * cpus;
177 	pernode += node * L1_CACHE_BYTES;
178 
179 	pgdat_list[node] = __va(pernode);
180 	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
181 
182 	mem_data[node].node_data = __va(pernode);
183 	pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
184 
185 	pgdat_list[node]->bdata = bdp;
186 	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
187 
188 	cpu_data = per_cpu_node_setup(cpu_data, node);
189 
190 	return;
191 }
192 
193 /**
194  * find_pernode_space - allocate memory for memory map and per-node structures
195  * @start: physical start of range
196  * @len: length of range
197  * @node: node where this range resides
198  *
199  * This routine reserves space for the per-cpu data struct, the list of
200  * pg_data_ts and the per-node data struct.  Each node will have something like
201  * the following in the first chunk of addr. space large enough to hold it.
202  *
203  *    ________________________
204  *   |                        |
205  *   |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
206  *   |    PERCPU_PAGE_SIZE *  |     start and length big enough
207  *   |    cpus_on_this_node   | Node 0 will also have entries for all non-existent cpus.
208  *   |------------------------|
209  *   |   local pg_data_t *    |
210  *   |------------------------|
211  *   |  local ia64_node_data  |
212  *   |------------------------|
213  *   |          ???           |
214  *   |________________________|
215  *
216  * Once this space has been set aside, the bootmem maps are initialized.  We
217  * could probably move the allocation of the per-cpu and ia64_node_data space
218  * outside of this function and use alloc_bootmem_node(), but doing it here
219  * is straightforward and we get the alignments we want so...
220  */
221 static int __init find_pernode_space(unsigned long start, unsigned long len,
222 				     int node)
223 {
224 	unsigned long epfn;
225 	unsigned long pernodesize = 0, pernode, pages, mapsize;
226 	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
227 
228 	epfn = (start + len) >> PAGE_SHIFT;
229 
230 	pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
231 	mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
232 
233 	/*
234 	 * Make sure this memory falls within this node's usable memory
235 	 * since we may have thrown some away in build_maps().
236 	 */
237 	if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
238 		return 0;
239 
240 	/* Don't setup this node's local space twice... */
241 	if (mem_data[node].pernode_addr)
242 		return 0;
243 
244 	/*
245 	 * Calculate total size needed, incl. what's necessary
246 	 * for good alignment and alias prevention.
247 	 */
248 	pernodesize = compute_pernodesize(node);
249 	pernode = NODEDATA_ALIGN(start, node);
250 
251 	/* Is this range big enough for what we want to store here? */
252 	if (start + len > (pernode + pernodesize + mapsize))
253 		fill_pernode(node, pernode, pernodesize);
254 
255 	return 0;
256 }
257 
258 /**
259  * free_node_bootmem - free bootmem allocator memory for use
260  * @start: physical start of range
261  * @len: length of range
262  * @node: node where this range resides
263  *
264  * Simply calls the bootmem allocator to free the specified ranged from
265  * the given pg_data_t's bdata struct.  After this function has been called
266  * for all the entries in the EFI memory map, the bootmem allocator will
267  * be ready to service allocation requests.
268  */
269 static int __init free_node_bootmem(unsigned long start, unsigned long len,
270 				    int node)
271 {
272 	free_bootmem_node(pgdat_list[node], start, len);
273 
274 	return 0;
275 }
276 
277 /**
278  * reserve_pernode_space - reserve memory for per-node space
279  *
280  * Reserve the space used by the bootmem maps & per-node space in the boot
281  * allocator so that when we actually create the real mem maps we don't
282  * use their memory.
283  */
284 static void __init reserve_pernode_space(void)
285 {
286 	unsigned long base, size, pages;
287 	struct bootmem_data *bdp;
288 	int node;
289 
290 	for_each_online_node(node) {
291 		pg_data_t *pdp = pgdat_list[node];
292 
293 		if (node_isset(node, memory_less_mask))
294 			continue;
295 
296 		bdp = pdp->bdata;
297 
298 		/* First the bootmem_map itself */
299 		pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
300 		size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
301 		base = __pa(bdp->node_bootmem_map);
302 		reserve_bootmem_node(pdp, base, size);
303 
304 		/* Now the per-node space */
305 		size = mem_data[node].pernode_size;
306 		base = __pa(mem_data[node].pernode_addr);
307 		reserve_bootmem_node(pdp, base, size);
308 	}
309 }
310 
311 static void __meminit scatter_node_data(void)
312 {
313 	pg_data_t **dst;
314 	int node;
315 
316 	/*
317 	 * for_each_online_node() can't be used at here.
318 	 * node_online_map is not set for hot-added nodes at this time,
319 	 * because we are halfway through initialization of the new node's
320 	 * structures.  If for_each_online_node() is used, a new node's
321 	 * pg_data_ptrs will be not initialized. Instead of using it,
322 	 * pgdat_list[] is checked.
323 	 */
324 	for_each_node(node) {
325 		if (pgdat_list[node]) {
326 			dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
327 			memcpy(dst, pgdat_list, sizeof(pgdat_list));
328 		}
329 	}
330 }
331 
332 /**
333  * initialize_pernode_data - fixup per-cpu & per-node pointers
334  *
335  * Each node's per-node area has a copy of the global pg_data_t list, so
336  * we copy that to each node here, as well as setting the per-cpu pointer
337  * to the local node data structure.  The active_cpus field of the per-node
338  * structure gets setup by the platform_cpu_init() function later.
339  */
340 static void __init initialize_pernode_data(void)
341 {
342 	int cpu, node;
343 
344 	scatter_node_data();
345 
346 #ifdef CONFIG_SMP
347 	/* Set the node_data pointer for each per-cpu struct */
348 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
349 		node = node_cpuid[cpu].nid;
350 		per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
351 	}
352 #else
353 	{
354 		struct cpuinfo_ia64 *cpu0_cpu_info;
355 		cpu = 0;
356 		node = node_cpuid[cpu].nid;
357 		cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
358 			((char *)&per_cpu__cpu_info - __per_cpu_start));
359 		cpu0_cpu_info->node_data = mem_data[node].node_data;
360 	}
361 #endif /* CONFIG_SMP */
362 }
363 
364 /**
365  * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
366  * 	node but fall back to any other node when __alloc_bootmem_node fails
367  *	for best.
368  * @nid: node id
369  * @pernodesize: size of this node's pernode data
370  */
371 static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
372 {
373 	void *ptr = NULL;
374 	u8 best = 0xff;
375 	int bestnode = -1, node, anynode = 0;
376 
377 	for_each_online_node(node) {
378 		if (node_isset(node, memory_less_mask))
379 			continue;
380 		else if (node_distance(nid, node) < best) {
381 			best = node_distance(nid, node);
382 			bestnode = node;
383 		}
384 		anynode = node;
385 	}
386 
387 	if (bestnode == -1)
388 		bestnode = anynode;
389 
390 	ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
391 		PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
392 
393 	return ptr;
394 }
395 
396 /**
397  * memory_less_nodes - allocate and initialize CPU only nodes pernode
398  *	information.
399  */
400 static void __init memory_less_nodes(void)
401 {
402 	unsigned long pernodesize;
403 	void *pernode;
404 	int node;
405 
406 	for_each_node_mask(node, memory_less_mask) {
407 		pernodesize = compute_pernodesize(node);
408 		pernode = memory_less_node_alloc(node, pernodesize);
409 		fill_pernode(node, __pa(pernode), pernodesize);
410 	}
411 
412 	return;
413 }
414 
415 /**
416  * find_memory - walk the EFI memory map and setup the bootmem allocator
417  *
418  * Called early in boot to setup the bootmem allocator, and to
419  * allocate the per-cpu and per-node structures.
420  */
421 void __init find_memory(void)
422 {
423 	int node;
424 
425 	reserve_memory();
426 
427 	if (num_online_nodes() == 0) {
428 		printk(KERN_ERR "node info missing!\n");
429 		node_set_online(0);
430 	}
431 
432 	nodes_or(memory_less_mask, memory_less_mask, node_online_map);
433 	min_low_pfn = -1;
434 	max_low_pfn = 0;
435 
436 	/* These actually end up getting called by call_pernode_memory() */
437 	efi_memmap_walk(filter_rsvd_memory, build_node_maps);
438 	efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
439 	efi_memmap_walk(find_max_min_low_pfn, NULL);
440 
441 	for_each_online_node(node)
442 		if (mem_data[node].bootmem_data.node_low_pfn) {
443 			node_clear(node, memory_less_mask);
444 			mem_data[node].min_pfn = ~0UL;
445 		}
446 
447 	efi_memmap_walk(register_active_ranges, NULL);
448 
449 	/*
450 	 * Initialize the boot memory maps in reverse order since that's
451 	 * what the bootmem allocator expects
452 	 */
453 	for (node = MAX_NUMNODES - 1; node >= 0; node--) {
454 		unsigned long pernode, pernodesize, map;
455 		struct bootmem_data *bdp;
456 
457 		if (!node_online(node))
458 			continue;
459 		else if (node_isset(node, memory_less_mask))
460 			continue;
461 
462 		bdp = &mem_data[node].bootmem_data;
463 		pernode = mem_data[node].pernode_addr;
464 		pernodesize = mem_data[node].pernode_size;
465 		map = pernode + pernodesize;
466 
467 		init_bootmem_node(pgdat_list[node],
468 				  map>>PAGE_SHIFT,
469 				  bdp->node_boot_start>>PAGE_SHIFT,
470 				  bdp->node_low_pfn);
471 	}
472 
473 	efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
474 
475 	reserve_pernode_space();
476 	memory_less_nodes();
477 	initialize_pernode_data();
478 
479 	max_pfn = max_low_pfn;
480 
481 	find_initrd();
482 }
483 
484 #ifdef CONFIG_SMP
485 /**
486  * per_cpu_init - setup per-cpu variables
487  *
488  * find_pernode_space() does most of this already, we just need to set
489  * local_per_cpu_offset
490  */
491 void __cpuinit *per_cpu_init(void)
492 {
493 	int cpu;
494 	static int first_time = 1;
495 
496 
497 	if (smp_processor_id() != 0)
498 		return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
499 
500 	if (first_time) {
501 		first_time = 0;
502 		for (cpu = 0; cpu < NR_CPUS; cpu++)
503 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
504 	}
505 
506 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
507 }
508 #endif /* CONFIG_SMP */
509 
510 /**
511  * show_mem - give short summary of memory stats
512  *
513  * Shows a simple page count of reserved and used pages in the system.
514  * For discontig machines, it does this on a per-pgdat basis.
515  */
516 void show_mem(void)
517 {
518 	int i, total_reserved = 0;
519 	int total_shared = 0, total_cached = 0;
520 	unsigned long total_present = 0;
521 	pg_data_t *pgdat;
522 
523 	printk(KERN_INFO "Mem-info:\n");
524 	show_free_areas();
525 	printk(KERN_INFO "Free swap:       %6ldkB\n",
526 	       nr_swap_pages<<(PAGE_SHIFT-10));
527 	printk(KERN_INFO "Node memory in pages:\n");
528 	for_each_online_pgdat(pgdat) {
529 		unsigned long present;
530 		unsigned long flags;
531 		int shared = 0, cached = 0, reserved = 0;
532 
533 		pgdat_resize_lock(pgdat, &flags);
534 		present = pgdat->node_present_pages;
535 		for(i = 0; i < pgdat->node_spanned_pages; i++) {
536 			struct page *page;
537 			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
538 				touch_nmi_watchdog();
539 			if (pfn_valid(pgdat->node_start_pfn + i))
540 				page = pfn_to_page(pgdat->node_start_pfn + i);
541 			else {
542 				i = vmemmap_find_next_valid_pfn(pgdat->node_id,
543 					 i) - 1;
544 				continue;
545 			}
546 			if (PageReserved(page))
547 				reserved++;
548 			else if (PageSwapCache(page))
549 				cached++;
550 			else if (page_count(page))
551 				shared += page_count(page)-1;
552 		}
553 		pgdat_resize_unlock(pgdat, &flags);
554 		total_present += present;
555 		total_reserved += reserved;
556 		total_cached += cached;
557 		total_shared += shared;
558 		printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
559 		       "shrd: %10d, swpd: %10d\n", pgdat->node_id,
560 		       present, reserved, shared, cached);
561 	}
562 	printk(KERN_INFO "%ld pages of RAM\n", total_present);
563 	printk(KERN_INFO "%d reserved pages\n", total_reserved);
564 	printk(KERN_INFO "%d pages shared\n", total_shared);
565 	printk(KERN_INFO "%d pages swap cached\n", total_cached);
566 	printk(KERN_INFO "Total of %ld pages in page table cache\n",
567 	       quicklist_total_size());
568 	printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
569 }
570 
571 /**
572  * call_pernode_memory - use SRAT to call callback functions with node info
573  * @start: physical start of range
574  * @len: length of range
575  * @arg: function to call for each range
576  *
577  * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
578  * out to which node a block of memory belongs.  Ignore memory that we cannot
579  * identify, and split blocks that run across multiple nodes.
580  *
581  * Take this opportunity to round the start address up and the end address
582  * down to page boundaries.
583  */
584 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
585 {
586 	unsigned long rs, re, end = start + len;
587 	void (*func)(unsigned long, unsigned long, int);
588 	int i;
589 
590 	start = PAGE_ALIGN(start);
591 	end &= PAGE_MASK;
592 	if (start >= end)
593 		return;
594 
595 	func = arg;
596 
597 	if (!num_node_memblks) {
598 		/* No SRAT table, so assume one node (node 0) */
599 		if (start < end)
600 			(*func)(start, end - start, 0);
601 		return;
602 	}
603 
604 	for (i = 0; i < num_node_memblks; i++) {
605 		rs = max(start, node_memblk[i].start_paddr);
606 		re = min(end, node_memblk[i].start_paddr +
607 			 node_memblk[i].size);
608 
609 		if (rs < re)
610 			(*func)(rs, re - rs, node_memblk[i].nid);
611 
612 		if (re == end)
613 			break;
614 	}
615 }
616 
617 /**
618  * count_node_pages - callback to build per-node memory info structures
619  * @start: physical start of range
620  * @len: length of range
621  * @node: node where this range resides
622  *
623  * Each node has it's own number of physical pages, DMAable pages, start, and
624  * end page frame number.  This routine will be called by call_pernode_memory()
625  * for each piece of usable memory and will setup these values for each node.
626  * Very similar to build_maps().
627  */
628 static __init int count_node_pages(unsigned long start, unsigned long len, int node)
629 {
630 	unsigned long end = start + len;
631 
632 	mem_data[node].num_physpages += len >> PAGE_SHIFT;
633 #ifdef CONFIG_ZONE_DMA
634 	if (start <= __pa(MAX_DMA_ADDRESS))
635 		mem_data[node].num_dma_physpages +=
636 			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
637 #endif
638 	start = GRANULEROUNDDOWN(start);
639 	start = ORDERROUNDDOWN(start);
640 	end = GRANULEROUNDUP(end);
641 	mem_data[node].max_pfn = max(mem_data[node].max_pfn,
642 				     end >> PAGE_SHIFT);
643 	mem_data[node].min_pfn = min(mem_data[node].min_pfn,
644 				     start >> PAGE_SHIFT);
645 
646 	return 0;
647 }
648 
649 /**
650  * paging_init - setup page tables
651  *
652  * paging_init() sets up the page tables for each node of the system and frees
653  * the bootmem allocator memory for general use.
654  */
655 void __init paging_init(void)
656 {
657 	unsigned long max_dma;
658 	unsigned long pfn_offset = 0;
659 	unsigned long max_pfn = 0;
660 	int node;
661 	unsigned long max_zone_pfns[MAX_NR_ZONES];
662 
663 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
664 
665 	efi_memmap_walk(filter_rsvd_memory, count_node_pages);
666 
667 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
668 	sparse_init();
669 
670 #ifdef CONFIG_VIRTUAL_MEM_MAP
671 	vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
672 		sizeof(struct page));
673 	vmem_map = (struct page *) vmalloc_end;
674 	efi_memmap_walk(create_mem_map_page_table, NULL);
675 	printk("Virtual mem_map starts at 0x%p\n", vmem_map);
676 #endif
677 
678 	for_each_online_node(node) {
679 		num_physpages += mem_data[node].num_physpages;
680 		pfn_offset = mem_data[node].min_pfn;
681 
682 #ifdef CONFIG_VIRTUAL_MEM_MAP
683 		NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
684 #endif
685 		if (mem_data[node].max_pfn > max_pfn)
686 			max_pfn = mem_data[node].max_pfn;
687 	}
688 
689 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
690 #ifdef CONFIG_ZONE_DMA
691 	max_zone_pfns[ZONE_DMA] = max_dma;
692 #endif
693 	max_zone_pfns[ZONE_NORMAL] = max_pfn;
694 	free_area_init_nodes(max_zone_pfns);
695 
696 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
697 }
698 
699 #ifdef CONFIG_MEMORY_HOTPLUG
700 pg_data_t *arch_alloc_nodedata(int nid)
701 {
702 	unsigned long size = compute_pernodesize(nid);
703 
704 	return kzalloc(size, GFP_KERNEL);
705 }
706 
707 void arch_free_nodedata(pg_data_t *pgdat)
708 {
709 	kfree(pgdat);
710 }
711 
712 void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
713 {
714 	pgdat_list[update_node] = update_pgdat;
715 	scatter_node_data();
716 }
717 #endif
718 
719 #ifdef CONFIG_SPARSEMEM_VMEMMAP
720 int __meminit vmemmap_populate(struct page *start_page,
721 						unsigned long size, int node)
722 {
723 	return vmemmap_populate_basepages(start_page, size, node);
724 }
725 #endif
726