xref: /openbmc/linux/arch/x86/mm/init.c (revision d78c317f)
1 #include <linux/gfp.h>
2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>	/* for max_low_pfn */
7 
8 #include <asm/cacheflush.h>
9 #include <asm/e820.h>
10 #include <asm/init.h>
11 #include <asm/page.h>
12 #include <asm/page_types.h>
13 #include <asm/sections.h>
14 #include <asm/setup.h>
15 #include <asm/system.h>
16 #include <asm/tlbflush.h>
17 #include <asm/tlb.h>
18 #include <asm/proto.h>
19 #include <asm/dma.h>		/* for MAX_DMA_PFN */
20 
21 unsigned long __initdata pgt_buf_start;
22 unsigned long __meminitdata pgt_buf_end;
23 unsigned long __meminitdata pgt_buf_top;
24 
25 int after_bootmem;
26 
27 int direct_gbpages
28 #ifdef CONFIG_DIRECT_GBPAGES
29 				= 1
30 #endif
31 ;
32 
33 static void __init find_early_table_space(unsigned long end, int use_pse,
34 					  int use_gbpages)
35 {
36 	unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
37 	phys_addr_t base;
38 
39 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
40 	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
41 
42 	if (use_gbpages) {
43 		unsigned long extra;
44 
45 		extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
46 		pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
47 	} else
48 		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
49 
50 	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
51 
52 	if (use_pse) {
53 		unsigned long extra;
54 
55 		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
56 #ifdef CONFIG_X86_32
57 		extra += PMD_SIZE;
58 #endif
59 		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
60 	} else
61 		ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
62 
63 	tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
64 
65 #ifdef CONFIG_X86_32
66 	/* for fixmap */
67 	tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
68 #endif
69 	good_end = max_pfn_mapped << PAGE_SHIFT;
70 
71 	base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
72 	if (!base)
73 		panic("Cannot find space for the kernel page tables");
74 
75 	pgt_buf_start = base >> PAGE_SHIFT;
76 	pgt_buf_end = pgt_buf_start;
77 	pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
78 
79 	printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
80 		end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
81 }
82 
83 void __init native_pagetable_reserve(u64 start, u64 end)
84 {
85 	memblock_reserve(start, end - start);
86 }
87 
88 struct map_range {
89 	unsigned long start;
90 	unsigned long end;
91 	unsigned page_size_mask;
92 };
93 
94 #ifdef CONFIG_X86_32
95 #define NR_RANGE_MR 3
96 #else /* CONFIG_X86_64 */
97 #define NR_RANGE_MR 5
98 #endif
99 
100 static int __meminit save_mr(struct map_range *mr, int nr_range,
101 			     unsigned long start_pfn, unsigned long end_pfn,
102 			     unsigned long page_size_mask)
103 {
104 	if (start_pfn < end_pfn) {
105 		if (nr_range >= NR_RANGE_MR)
106 			panic("run out of range for init_memory_mapping\n");
107 		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
108 		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
109 		mr[nr_range].page_size_mask = page_size_mask;
110 		nr_range++;
111 	}
112 
113 	return nr_range;
114 }
115 
116 /*
117  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
118  * This runs before bootmem is initialized and gets pages directly from
119  * the physical memory. To access them they are temporarily mapped.
120  */
121 unsigned long __init_refok init_memory_mapping(unsigned long start,
122 					       unsigned long end)
123 {
124 	unsigned long page_size_mask = 0;
125 	unsigned long start_pfn, end_pfn;
126 	unsigned long ret = 0;
127 	unsigned long pos;
128 
129 	struct map_range mr[NR_RANGE_MR];
130 	int nr_range, i;
131 	int use_pse, use_gbpages;
132 
133 	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
134 
135 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
136 	/*
137 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
138 	 * This will simplify cpa(), which otherwise needs to support splitting
139 	 * large pages into small in interrupt context, etc.
140 	 */
141 	use_pse = use_gbpages = 0;
142 #else
143 	use_pse = cpu_has_pse;
144 	use_gbpages = direct_gbpages;
145 #endif
146 
147 	/* Enable PSE if available */
148 	if (cpu_has_pse)
149 		set_in_cr4(X86_CR4_PSE);
150 
151 	/* Enable PGE if available */
152 	if (cpu_has_pge) {
153 		set_in_cr4(X86_CR4_PGE);
154 		__supported_pte_mask |= _PAGE_GLOBAL;
155 	}
156 
157 	if (use_gbpages)
158 		page_size_mask |= 1 << PG_LEVEL_1G;
159 	if (use_pse)
160 		page_size_mask |= 1 << PG_LEVEL_2M;
161 
162 	memset(mr, 0, sizeof(mr));
163 	nr_range = 0;
164 
165 	/* head if not big page alignment ? */
166 	start_pfn = start >> PAGE_SHIFT;
167 	pos = start_pfn << PAGE_SHIFT;
168 #ifdef CONFIG_X86_32
169 	/*
170 	 * Don't use a large page for the first 2/4MB of memory
171 	 * because there are often fixed size MTRRs in there
172 	 * and overlapping MTRRs into large pages can cause
173 	 * slowdowns.
174 	 */
175 	if (pos == 0)
176 		end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
177 	else
178 		end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
179 				 << (PMD_SHIFT - PAGE_SHIFT);
180 #else /* CONFIG_X86_64 */
181 	end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
182 			<< (PMD_SHIFT - PAGE_SHIFT);
183 #endif
184 	if (end_pfn > (end >> PAGE_SHIFT))
185 		end_pfn = end >> PAGE_SHIFT;
186 	if (start_pfn < end_pfn) {
187 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
188 		pos = end_pfn << PAGE_SHIFT;
189 	}
190 
191 	/* big page (2M) range */
192 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
193 			 << (PMD_SHIFT - PAGE_SHIFT);
194 #ifdef CONFIG_X86_32
195 	end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
196 #else /* CONFIG_X86_64 */
197 	end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
198 			 << (PUD_SHIFT - PAGE_SHIFT);
199 	if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
200 		end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
201 #endif
202 
203 	if (start_pfn < end_pfn) {
204 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
205 				page_size_mask & (1<<PG_LEVEL_2M));
206 		pos = end_pfn << PAGE_SHIFT;
207 	}
208 
209 #ifdef CONFIG_X86_64
210 	/* big page (1G) range */
211 	start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
212 			 << (PUD_SHIFT - PAGE_SHIFT);
213 	end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
214 	if (start_pfn < end_pfn) {
215 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
216 				page_size_mask &
217 				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
218 		pos = end_pfn << PAGE_SHIFT;
219 	}
220 
221 	/* tail is not big page (1G) alignment */
222 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
223 			 << (PMD_SHIFT - PAGE_SHIFT);
224 	end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
225 	if (start_pfn < end_pfn) {
226 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
227 				page_size_mask & (1<<PG_LEVEL_2M));
228 		pos = end_pfn << PAGE_SHIFT;
229 	}
230 #endif
231 
232 	/* tail is not big page (2M) alignment */
233 	start_pfn = pos>>PAGE_SHIFT;
234 	end_pfn = end>>PAGE_SHIFT;
235 	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
236 
237 	/* try to merge same page size and continuous */
238 	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
239 		unsigned long old_start;
240 		if (mr[i].end != mr[i+1].start ||
241 		    mr[i].page_size_mask != mr[i+1].page_size_mask)
242 			continue;
243 		/* move it */
244 		old_start = mr[i].start;
245 		memmove(&mr[i], &mr[i+1],
246 			(nr_range - 1 - i) * sizeof(struct map_range));
247 		mr[i--].start = old_start;
248 		nr_range--;
249 	}
250 
251 	for (i = 0; i < nr_range; i++)
252 		printk(KERN_DEBUG " %010lx - %010lx page %s\n",
253 				mr[i].start, mr[i].end,
254 			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
255 			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
256 
257 	/*
258 	 * Find space for the kernel direct mapping tables.
259 	 *
260 	 * Later we should allocate these tables in the local node of the
261 	 * memory mapped. Unfortunately this is done currently before the
262 	 * nodes are discovered.
263 	 */
264 	if (!after_bootmem)
265 		find_early_table_space(end, use_pse, use_gbpages);
266 
267 	for (i = 0; i < nr_range; i++)
268 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
269 						   mr[i].page_size_mask);
270 
271 #ifdef CONFIG_X86_32
272 	early_ioremap_page_table_range_init();
273 
274 	load_cr3(swapper_pg_dir);
275 #endif
276 
277 	__flush_tlb_all();
278 
279 	/*
280 	 * Reserve the kernel pagetable pages we used (pgt_buf_start -
281 	 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
282 	 * so that they can be reused for other purposes.
283 	 *
284 	 * On native it just means calling memblock_reserve, on Xen it also
285 	 * means marking RW the pagetable pages that we allocated before
286 	 * but that haven't been used.
287 	 *
288 	 * In fact on xen we mark RO the whole range pgt_buf_start -
289 	 * pgt_buf_top, because we have to make sure that when
290 	 * init_memory_mapping reaches the pagetable pages area, it maps
291 	 * RO all the pagetable pages, including the ones that are beyond
292 	 * pgt_buf_end at that time.
293 	 */
294 	if (!after_bootmem && pgt_buf_end > pgt_buf_start)
295 		x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
296 				PFN_PHYS(pgt_buf_end));
297 
298 	if (!after_bootmem)
299 		early_memtest(start, end);
300 
301 	return ret >> PAGE_SHIFT;
302 }
303 
304 
305 /*
306  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
307  * is valid. The argument is a physical page number.
308  *
309  *
310  * On x86, access has to be given to the first megabyte of ram because that area
311  * contains bios code and data regions used by X and dosemu and similar apps.
312  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
313  * mmio resources as well as potential bios/acpi data regions.
314  */
315 int devmem_is_allowed(unsigned long pagenr)
316 {
317 	if (pagenr <= 256)
318 		return 1;
319 	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
320 		return 0;
321 	if (!page_is_ram(pagenr))
322 		return 1;
323 	return 0;
324 }
325 
326 void free_init_pages(char *what, unsigned long begin, unsigned long end)
327 {
328 	unsigned long addr;
329 	unsigned long begin_aligned, end_aligned;
330 
331 	/* Make sure boundaries are page aligned */
332 	begin_aligned = PAGE_ALIGN(begin);
333 	end_aligned   = end & PAGE_MASK;
334 
335 	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
336 		begin = begin_aligned;
337 		end   = end_aligned;
338 	}
339 
340 	if (begin >= end)
341 		return;
342 
343 	addr = begin;
344 
345 	/*
346 	 * If debugging page accesses then do not free this memory but
347 	 * mark them not present - any buggy init-section access will
348 	 * create a kernel page fault:
349 	 */
350 #ifdef CONFIG_DEBUG_PAGEALLOC
351 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
352 		begin, end);
353 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
354 #else
355 	/*
356 	 * We just marked the kernel text read only above, now that
357 	 * we are going to free part of that, we need to make that
358 	 * writeable and non-executable first.
359 	 */
360 	set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
361 	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
362 
363 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
364 
365 	for (; addr < end; addr += PAGE_SIZE) {
366 		ClearPageReserved(virt_to_page(addr));
367 		init_page_count(virt_to_page(addr));
368 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
369 		free_page(addr);
370 		totalram_pages++;
371 	}
372 #endif
373 }
374 
375 void free_initmem(void)
376 {
377 	free_init_pages("unused kernel memory",
378 			(unsigned long)(&__init_begin),
379 			(unsigned long)(&__init_end));
380 }
381 
382 #ifdef CONFIG_BLK_DEV_INITRD
383 void free_initrd_mem(unsigned long start, unsigned long end)
384 {
385 	/*
386 	 * end could be not aligned, and We can not align that,
387 	 * decompresser could be confused by aligned initrd_end
388 	 * We already reserve the end partial page before in
389 	 *   - i386_start_kernel()
390 	 *   - x86_64_start_kernel()
391 	 *   - relocate_initrd()
392 	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
393 	 */
394 	free_init_pages("initrd memory", start, PAGE_ALIGN(end));
395 }
396 #endif
397 
398 void __init zone_sizes_init(void)
399 {
400 	unsigned long max_zone_pfns[MAX_NR_ZONES];
401 
402 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
403 
404 #ifdef CONFIG_ZONE_DMA
405 	max_zone_pfns[ZONE_DMA]		= MAX_DMA_PFN;
406 #endif
407 #ifdef CONFIG_ZONE_DMA32
408 	max_zone_pfns[ZONE_DMA32]	= MAX_DMA32_PFN;
409 #endif
410 	max_zone_pfns[ZONE_NORMAL]	= max_low_pfn;
411 #ifdef CONFIG_HIGHMEM
412 	max_zone_pfns[ZONE_HIGHMEM]	= max_pfn;
413 #endif
414 
415 	free_area_init_nodes(max_zone_pfns);
416 }
417 
418