xref: /openbmc/linux/arch/ia64/mm/contig.c (revision 643d1f7f)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1998-2003 Hewlett-Packard Co
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  *	Stephane Eranian <eranian@hpl.hp.com>
9  * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10  * Copyright (C) 1999 VA Linux Systems
11  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12  * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13  *
14  * Routines used by ia64 machines with contiguous (or virtually contiguous)
15  * memory.
16  */
17 #include <linux/bootmem.h>
18 #include <linux/efi.h>
19 #include <linux/mm.h>
20 #include <linux/nmi.h>
21 #include <linux/swap.h>
22 
23 #include <asm/meminit.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/mca.h>
28 
29 #ifdef CONFIG_VIRTUAL_MEM_MAP
30 static unsigned long max_gap;
31 #endif
32 
33 /**
34  * show_mem - give short summary of memory stats
35  *
36  * Shows a simple page count of reserved and used pages in the system.
37  * For discontig machines, it does this on a per-pgdat basis.
38  */
39 void show_mem(void)
40 {
41 	int i, total_reserved = 0;
42 	int total_shared = 0, total_cached = 0;
43 	unsigned long total_present = 0;
44 	pg_data_t *pgdat;
45 
46 	printk(KERN_INFO "Mem-info:\n");
47 	show_free_areas();
48 	printk(KERN_INFO "Free swap:       %6ldkB\n",
49 	       nr_swap_pages<<(PAGE_SHIFT-10));
50 	printk(KERN_INFO "Node memory in pages:\n");
51 	for_each_online_pgdat(pgdat) {
52 		unsigned long present;
53 		unsigned long flags;
54 		int shared = 0, cached = 0, reserved = 0;
55 
56 		pgdat_resize_lock(pgdat, &flags);
57 		present = pgdat->node_present_pages;
58 		for(i = 0; i < pgdat->node_spanned_pages; i++) {
59 			struct page *page;
60 			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
61 				touch_nmi_watchdog();
62 			if (pfn_valid(pgdat->node_start_pfn + i))
63 				page = pfn_to_page(pgdat->node_start_pfn + i);
64 			else {
65 #ifdef CONFIG_VIRTUAL_MEM_MAP
66 				if (max_gap < LARGE_GAP)
67 					continue;
68 #endif
69 				i = vmemmap_find_next_valid_pfn(pgdat->node_id,
70 					 i) - 1;
71 				continue;
72 			}
73 			if (PageReserved(page))
74 				reserved++;
75 			else if (PageSwapCache(page))
76 				cached++;
77 			else if (page_count(page))
78 				shared += page_count(page)-1;
79 		}
80 		pgdat_resize_unlock(pgdat, &flags);
81 		total_present += present;
82 		total_reserved += reserved;
83 		total_cached += cached;
84 		total_shared += shared;
85 		printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
86 		       "shrd: %10d, swpd: %10d\n", pgdat->node_id,
87 		       present, reserved, shared, cached);
88 	}
89 	printk(KERN_INFO "%ld pages of RAM\n", total_present);
90 	printk(KERN_INFO "%d reserved pages\n", total_reserved);
91 	printk(KERN_INFO "%d pages shared\n", total_shared);
92 	printk(KERN_INFO "%d pages swap cached\n", total_cached);
93 	printk(KERN_INFO "Total of %ld pages in page table cache\n",
94 	       quicklist_total_size());
95 	printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
96 }
97 
98 
99 /* physical address where the bootmem map is located */
100 unsigned long bootmap_start;
101 
102 /**
103  * find_bootmap_location - callback to find a memory area for the bootmap
104  * @start: start of region
105  * @end: end of region
106  * @arg: unused callback data
107  *
108  * Find a place to put the bootmap and return its starting address in
109  * bootmap_start.  This address must be page-aligned.
110  */
111 static int __init
112 find_bootmap_location (unsigned long start, unsigned long end, void *arg)
113 {
114 	unsigned long needed = *(unsigned long *)arg;
115 	unsigned long range_start, range_end, free_start;
116 	int i;
117 
118 #if IGNORE_PFN0
119 	if (start == PAGE_OFFSET) {
120 		start += PAGE_SIZE;
121 		if (start >= end)
122 			return 0;
123 	}
124 #endif
125 
126 	free_start = PAGE_OFFSET;
127 
128 	for (i = 0; i < num_rsvd_regions; i++) {
129 		range_start = max(start, free_start);
130 		range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
131 
132 		free_start = PAGE_ALIGN(rsvd_region[i].end);
133 
134 		if (range_end <= range_start)
135 			continue; /* skip over empty range */
136 
137 		if (range_end - range_start >= needed) {
138 			bootmap_start = __pa(range_start);
139 			return -1;	/* done */
140 		}
141 
142 		/* nothing more available in this segment */
143 		if (range_end == end)
144 			return 0;
145 	}
146 	return 0;
147 }
148 
149 #ifdef CONFIG_SMP
150 static void *cpu_data;
151 /**
152  * per_cpu_init - setup per-cpu variables
153  *
154  * Allocate and setup per-cpu data areas.
155  */
156 void * __cpuinit
157 per_cpu_init (void)
158 {
159 	int cpu;
160 	static int first_time=1;
161 
162 	/*
163 	 * get_free_pages() cannot be used before cpu_init() done.  BSP
164 	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
165 	 * get_zeroed_page().
166 	 */
167 	if (first_time) {
168 		first_time=0;
169 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
170 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
171 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
172 			cpu_data += PERCPU_PAGE_SIZE;
173 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
174 		}
175 	}
176 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
177 }
178 
179 static inline void
180 alloc_per_cpu_data(void)
181 {
182 	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
183 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
184 }
185 #else
186 #define alloc_per_cpu_data() do { } while (0)
187 #endif /* CONFIG_SMP */
188 
189 /**
190  * find_memory - setup memory map
191  *
192  * Walk the EFI memory map and find usable memory for the system, taking
193  * into account reserved areas.
194  */
195 void __init
196 find_memory (void)
197 {
198 	unsigned long bootmap_size;
199 
200 	reserve_memory();
201 
202 	/* first find highest page frame number */
203 	min_low_pfn = ~0UL;
204 	max_low_pfn = 0;
205 	efi_memmap_walk(find_max_min_low_pfn, NULL);
206 	max_pfn = max_low_pfn;
207 	/* how many bytes to cover all the pages */
208 	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
209 
210 	/* look for a location to hold the bootmap */
211 	bootmap_start = ~0UL;
212 	efi_memmap_walk(find_bootmap_location, &bootmap_size);
213 	if (bootmap_start == ~0UL)
214 		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
215 
216 	bootmap_size = init_bootmem_node(NODE_DATA(0),
217 			(bootmap_start >> PAGE_SHIFT), 0, max_pfn);
218 
219 	/* Free all available memory, then mark bootmem-map as being in use. */
220 	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
221 	reserve_bootmem(bootmap_start, bootmap_size);
222 
223 	find_initrd();
224 
225 	alloc_per_cpu_data();
226 }
227 
228 static int
229 count_pages (u64 start, u64 end, void *arg)
230 {
231 	unsigned long *count = arg;
232 
233 	*count += (end - start) >> PAGE_SHIFT;
234 	return 0;
235 }
236 
237 /*
238  * Set up the page tables.
239  */
240 
241 void __init
242 paging_init (void)
243 {
244 	unsigned long max_dma;
245 	unsigned long max_zone_pfns[MAX_NR_ZONES];
246 
247 	num_physpages = 0;
248 	efi_memmap_walk(count_pages, &num_physpages);
249 
250 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
251 #ifdef CONFIG_ZONE_DMA
252 	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
253 	max_zone_pfns[ZONE_DMA] = max_dma;
254 #endif
255 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
256 
257 #ifdef CONFIG_VIRTUAL_MEM_MAP
258 	efi_memmap_walk(register_active_ranges, NULL);
259 	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
260 	if (max_gap < LARGE_GAP) {
261 		vmem_map = (struct page *) 0;
262 		free_area_init_nodes(max_zone_pfns);
263 	} else {
264 		unsigned long map_size;
265 
266 		/* allocate virtual_mem_map */
267 
268 		map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
269 			sizeof(struct page));
270 		vmalloc_end -= map_size;
271 		vmem_map = (struct page *) vmalloc_end;
272 		efi_memmap_walk(create_mem_map_page_table, NULL);
273 
274 		/*
275 		 * alloc_node_mem_map makes an adjustment for mem_map
276 		 * which isn't compatible with vmem_map.
277 		 */
278 		NODE_DATA(0)->node_mem_map = vmem_map +
279 			find_min_pfn_with_active_regions();
280 		free_area_init_nodes(max_zone_pfns);
281 
282 		printk("Virtual mem_map starts at 0x%p\n", mem_map);
283 	}
284 #else /* !CONFIG_VIRTUAL_MEM_MAP */
285 	add_active_range(0, 0, max_low_pfn);
286 	free_area_init_nodes(max_zone_pfns);
287 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
288 	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
289 }
290