xref: /openbmc/linux/arch/x86/mm/init.c (revision b04b4f78)
1 #include <linux/ioport.h>
2 #include <linux/swap.h>
3 
4 #include <asm/cacheflush.h>
5 #include <asm/e820.h>
6 #include <asm/init.h>
7 #include <asm/page.h>
8 #include <asm/page_types.h>
9 #include <asm/sections.h>
10 #include <asm/setup.h>
11 #include <asm/system.h>
12 #include <asm/tlbflush.h>
13 
14 unsigned long __initdata e820_table_start;
15 unsigned long __meminitdata e820_table_end;
16 unsigned long __meminitdata e820_table_top;
17 
18 int after_bootmem;
19 
20 int direct_gbpages
21 #ifdef CONFIG_DIRECT_GBPAGES
22 				= 1
23 #endif
24 ;
25 
26 static void __init find_early_table_space(unsigned long end, int use_pse,
27 					  int use_gbpages)
28 {
29 	unsigned long puds, pmds, ptes, tables, start;
30 
31 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
32 	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
33 
34 	if (use_gbpages) {
35 		unsigned long extra;
36 
37 		extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
38 		pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
39 	} else
40 		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
41 
42 	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
43 
44 	if (use_pse) {
45 		unsigned long extra;
46 
47 		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
48 #ifdef CONFIG_X86_32
49 		extra += PMD_SIZE;
50 #endif
51 		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
52 	} else
53 		ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
54 
55 	tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
56 
57 #ifdef CONFIG_X86_32
58 	/* for fixmap */
59 	tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
60 #endif
61 
62 	/*
63 	 * RED-PEN putting page tables only on node 0 could
64 	 * cause a hotspot and fill up ZONE_DMA. The page tables
65 	 * need roughly 0.5KB per GB.
66 	 */
67 #ifdef CONFIG_X86_32
68 	start = 0x7000;
69 	e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
70 					tables, PAGE_SIZE);
71 #else /* CONFIG_X86_64 */
72 	start = 0x8000;
73 	e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
74 #endif
75 	if (e820_table_start == -1UL)
76 		panic("Cannot find space for the kernel page tables");
77 
78 	e820_table_start >>= PAGE_SHIFT;
79 	e820_table_end = e820_table_start;
80 	e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
81 
82 	printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
83 		end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
84 }
85 
86 struct map_range {
87 	unsigned long start;
88 	unsigned long end;
89 	unsigned page_size_mask;
90 };
91 
92 #ifdef CONFIG_X86_32
93 #define NR_RANGE_MR 3
94 #else /* CONFIG_X86_64 */
95 #define NR_RANGE_MR 5
96 #endif
97 
98 static int __meminit save_mr(struct map_range *mr, int nr_range,
99 			     unsigned long start_pfn, unsigned long end_pfn,
100 			     unsigned long page_size_mask)
101 {
102 	if (start_pfn < end_pfn) {
103 		if (nr_range >= NR_RANGE_MR)
104 			panic("run out of range for init_memory_mapping\n");
105 		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
106 		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
107 		mr[nr_range].page_size_mask = page_size_mask;
108 		nr_range++;
109 	}
110 
111 	return nr_range;
112 }
113 
114 #ifdef CONFIG_X86_64
115 static void __init init_gbpages(void)
116 {
117 	if (direct_gbpages && cpu_has_gbpages)
118 		printk(KERN_INFO "Using GB pages for direct mapping\n");
119 	else
120 		direct_gbpages = 0;
121 }
122 #else
123 static inline void init_gbpages(void)
124 {
125 }
126 #endif
127 
128 /*
129  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
130  * This runs before bootmem is initialized and gets pages directly from
131  * the physical memory. To access them they are temporarily mapped.
132  */
133 unsigned long __init_refok init_memory_mapping(unsigned long start,
134 					       unsigned long end)
135 {
136 	unsigned long page_size_mask = 0;
137 	unsigned long start_pfn, end_pfn;
138 	unsigned long ret = 0;
139 	unsigned long pos;
140 
141 	struct map_range mr[NR_RANGE_MR];
142 	int nr_range, i;
143 	int use_pse, use_gbpages;
144 
145 	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
146 
147 	if (!after_bootmem)
148 		init_gbpages();
149 
150 #ifdef CONFIG_DEBUG_PAGEALLOC
151 	/*
152 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
153 	 * This will simplify cpa(), which otherwise needs to support splitting
154 	 * large pages into small in interrupt context, etc.
155 	 */
156 	use_pse = use_gbpages = 0;
157 #else
158 	use_pse = cpu_has_pse;
159 	use_gbpages = direct_gbpages;
160 #endif
161 
162 #ifdef CONFIG_X86_32
163 #ifdef CONFIG_X86_PAE
164 	set_nx();
165 	if (nx_enabled)
166 		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
167 #endif
168 
169 	/* Enable PSE if available */
170 	if (cpu_has_pse)
171 		set_in_cr4(X86_CR4_PSE);
172 
173 	/* Enable PGE if available */
174 	if (cpu_has_pge) {
175 		set_in_cr4(X86_CR4_PGE);
176 		__supported_pte_mask |= _PAGE_GLOBAL;
177 	}
178 #endif
179 
180 	if (use_gbpages)
181 		page_size_mask |= 1 << PG_LEVEL_1G;
182 	if (use_pse)
183 		page_size_mask |= 1 << PG_LEVEL_2M;
184 
185 	memset(mr, 0, sizeof(mr));
186 	nr_range = 0;
187 
188 	/* head if not big page alignment ? */
189 	start_pfn = start >> PAGE_SHIFT;
190 	pos = start_pfn << PAGE_SHIFT;
191 #ifdef CONFIG_X86_32
192 	/*
193 	 * Don't use a large page for the first 2/4MB of memory
194 	 * because there are often fixed size MTRRs in there
195 	 * and overlapping MTRRs into large pages can cause
196 	 * slowdowns.
197 	 */
198 	if (pos == 0)
199 		end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
200 	else
201 		end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
202 				 << (PMD_SHIFT - PAGE_SHIFT);
203 #else /* CONFIG_X86_64 */
204 	end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
205 			<< (PMD_SHIFT - PAGE_SHIFT);
206 #endif
207 	if (end_pfn > (end >> PAGE_SHIFT))
208 		end_pfn = end >> PAGE_SHIFT;
209 	if (start_pfn < end_pfn) {
210 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
211 		pos = end_pfn << PAGE_SHIFT;
212 	}
213 
214 	/* big page (2M) range */
215 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
216 			 << (PMD_SHIFT - PAGE_SHIFT);
217 #ifdef CONFIG_X86_32
218 	end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
219 #else /* CONFIG_X86_64 */
220 	end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
221 			 << (PUD_SHIFT - PAGE_SHIFT);
222 	if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
223 		end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
224 #endif
225 
226 	if (start_pfn < end_pfn) {
227 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
228 				page_size_mask & (1<<PG_LEVEL_2M));
229 		pos = end_pfn << PAGE_SHIFT;
230 	}
231 
232 #ifdef CONFIG_X86_64
233 	/* big page (1G) range */
234 	start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
235 			 << (PUD_SHIFT - PAGE_SHIFT);
236 	end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
237 	if (start_pfn < end_pfn) {
238 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
239 				page_size_mask &
240 				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
241 		pos = end_pfn << PAGE_SHIFT;
242 	}
243 
244 	/* tail is not big page (1G) alignment */
245 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
246 			 << (PMD_SHIFT - PAGE_SHIFT);
247 	end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
248 	if (start_pfn < end_pfn) {
249 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
250 				page_size_mask & (1<<PG_LEVEL_2M));
251 		pos = end_pfn << PAGE_SHIFT;
252 	}
253 #endif
254 
255 	/* tail is not big page (2M) alignment */
256 	start_pfn = pos>>PAGE_SHIFT;
257 	end_pfn = end>>PAGE_SHIFT;
258 	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
259 
260 	/* try to merge same page size and continuous */
261 	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
262 		unsigned long old_start;
263 		if (mr[i].end != mr[i+1].start ||
264 		    mr[i].page_size_mask != mr[i+1].page_size_mask)
265 			continue;
266 		/* move it */
267 		old_start = mr[i].start;
268 		memmove(&mr[i], &mr[i+1],
269 			(nr_range - 1 - i) * sizeof(struct map_range));
270 		mr[i--].start = old_start;
271 		nr_range--;
272 	}
273 
274 	for (i = 0; i < nr_range; i++)
275 		printk(KERN_DEBUG " %010lx - %010lx page %s\n",
276 				mr[i].start, mr[i].end,
277 			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
278 			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
279 
280 	/*
281 	 * Find space for the kernel direct mapping tables.
282 	 *
283 	 * Later we should allocate these tables in the local node of the
284 	 * memory mapped. Unfortunately this is done currently before the
285 	 * nodes are discovered.
286 	 */
287 	if (!after_bootmem)
288 		find_early_table_space(end, use_pse, use_gbpages);
289 
290 #ifdef CONFIG_X86_32
291 	for (i = 0; i < nr_range; i++)
292 		kernel_physical_mapping_init(mr[i].start, mr[i].end,
293 					     mr[i].page_size_mask);
294 	ret = end;
295 #else /* CONFIG_X86_64 */
296 	for (i = 0; i < nr_range; i++)
297 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
298 						   mr[i].page_size_mask);
299 #endif
300 
301 #ifdef CONFIG_X86_32
302 	early_ioremap_page_table_range_init();
303 
304 	load_cr3(swapper_pg_dir);
305 #endif
306 
307 #ifdef CONFIG_X86_64
308 	if (!after_bootmem && !start) {
309 		pud_t *pud;
310 		pmd_t *pmd;
311 
312 		mmu_cr4_features = read_cr4();
313 
314 		/*
315 		 * _brk_end cannot change anymore, but it and _end may be
316 		 * located on different 2M pages. cleanup_highmap(), however,
317 		 * can only consider _end when it runs, so destroy any
318 		 * mappings beyond _brk_end here.
319 		 */
320 		pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
321 		pmd = pmd_offset(pud, _brk_end - 1);
322 		while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
323 			pmd_clear(pmd);
324 	}
325 #endif
326 	__flush_tlb_all();
327 
328 	if (!after_bootmem && e820_table_end > e820_table_start)
329 		reserve_early(e820_table_start << PAGE_SHIFT,
330 				 e820_table_end << PAGE_SHIFT, "PGTABLE");
331 
332 	if (!after_bootmem)
333 		early_memtest(start, end);
334 
335 	return ret >> PAGE_SHIFT;
336 }
337 
338 
339 /*
340  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
341  * is valid. The argument is a physical page number.
342  *
343  *
344  * On x86, access has to be given to the first megabyte of ram because that area
345  * contains bios code and data regions used by X and dosemu and similar apps.
346  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
347  * mmio resources as well as potential bios/acpi data regions.
348  */
349 int devmem_is_allowed(unsigned long pagenr)
350 {
351 	if (pagenr <= 256)
352 		return 1;
353 	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
354 		return 0;
355 	if (!page_is_ram(pagenr))
356 		return 1;
357 	return 0;
358 }
359 
360 void free_init_pages(char *what, unsigned long begin, unsigned long end)
361 {
362 	unsigned long addr = begin;
363 
364 	if (addr >= end)
365 		return;
366 
367 	/*
368 	 * If debugging page accesses then do not free this memory but
369 	 * mark them not present - any buggy init-section access will
370 	 * create a kernel page fault:
371 	 */
372 #ifdef CONFIG_DEBUG_PAGEALLOC
373 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
374 		begin, PAGE_ALIGN(end));
375 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
376 #else
377 	/*
378 	 * We just marked the kernel text read only above, now that
379 	 * we are going to free part of that, we need to make that
380 	 * writeable first.
381 	 */
382 	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
383 
384 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
385 
386 	for (; addr < end; addr += PAGE_SIZE) {
387 		ClearPageReserved(virt_to_page(addr));
388 		init_page_count(virt_to_page(addr));
389 		memset((void *)(addr & ~(PAGE_SIZE-1)),
390 			POISON_FREE_INITMEM, PAGE_SIZE);
391 		free_page(addr);
392 		totalram_pages++;
393 	}
394 #endif
395 }
396 
397 void free_initmem(void)
398 {
399 	free_init_pages("unused kernel memory",
400 			(unsigned long)(&__init_begin),
401 			(unsigned long)(&__init_end));
402 }
403 
404 #ifdef CONFIG_BLK_DEV_INITRD
405 void free_initrd_mem(unsigned long start, unsigned long end)
406 {
407 	free_init_pages("initrd memory", start, end);
408 }
409 #endif
410