xref: /openbmc/linux/arch/x86/mm/init.c (revision b627b4ed)
1 #include <linux/ioport.h>
2 #include <linux/swap.h>
3 
4 #include <asm/cacheflush.h>
5 #include <asm/e820.h>
6 #include <asm/init.h>
7 #include <asm/page.h>
8 #include <asm/page_types.h>
9 #include <asm/sections.h>
10 #include <asm/system.h>
11 #include <asm/tlbflush.h>
12 
13 unsigned long __initdata e820_table_start;
14 unsigned long __meminitdata e820_table_end;
15 unsigned long __meminitdata e820_table_top;
16 
17 int after_bootmem;
18 
19 int direct_gbpages
20 #ifdef CONFIG_DIRECT_GBPAGES
21 				= 1
22 #endif
23 ;
24 
25 static void __init find_early_table_space(unsigned long end, int use_pse,
26 					  int use_gbpages)
27 {
28 	unsigned long puds, pmds, ptes, tables, start;
29 
30 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
31 	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
32 
33 	if (use_gbpages) {
34 		unsigned long extra;
35 
36 		extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
37 		pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
38 	} else
39 		pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
40 
41 	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
42 
43 	if (use_pse) {
44 		unsigned long extra;
45 
46 		extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
47 #ifdef CONFIG_X86_32
48 		extra += PMD_SIZE;
49 #endif
50 		ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 	} else
52 		ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
53 
54 	tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
55 
56 #ifdef CONFIG_X86_32
57 	/* for fixmap */
58 	tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
59 #endif
60 
61 	/*
62 	 * RED-PEN putting page tables only on node 0 could
63 	 * cause a hotspot and fill up ZONE_DMA. The page tables
64 	 * need roughly 0.5KB per GB.
65 	 */
66 #ifdef CONFIG_X86_32
67 	start = 0x7000;
68 	e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
69 					tables, PAGE_SIZE);
70 #else /* CONFIG_X86_64 */
71 	start = 0x8000;
72 	e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
73 #endif
74 	if (e820_table_start == -1UL)
75 		panic("Cannot find space for the kernel page tables");
76 
77 	e820_table_start >>= PAGE_SHIFT;
78 	e820_table_end = e820_table_start;
79 	e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
80 
81 	printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
82 		end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
83 }
84 
85 struct map_range {
86 	unsigned long start;
87 	unsigned long end;
88 	unsigned page_size_mask;
89 };
90 
91 #ifdef CONFIG_X86_32
92 #define NR_RANGE_MR 3
93 #else /* CONFIG_X86_64 */
94 #define NR_RANGE_MR 5
95 #endif
96 
97 static int __meminit save_mr(struct map_range *mr, int nr_range,
98 			     unsigned long start_pfn, unsigned long end_pfn,
99 			     unsigned long page_size_mask)
100 {
101 	if (start_pfn < end_pfn) {
102 		if (nr_range >= NR_RANGE_MR)
103 			panic("run out of range for init_memory_mapping\n");
104 		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
105 		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
106 		mr[nr_range].page_size_mask = page_size_mask;
107 		nr_range++;
108 	}
109 
110 	return nr_range;
111 }
112 
113 #ifdef CONFIG_X86_64
114 static void __init init_gbpages(void)
115 {
116 	if (direct_gbpages && cpu_has_gbpages)
117 		printk(KERN_INFO "Using GB pages for direct mapping\n");
118 	else
119 		direct_gbpages = 0;
120 }
121 #else
122 static inline void init_gbpages(void)
123 {
124 }
125 #endif
126 
127 /*
128  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
129  * This runs before bootmem is initialized and gets pages directly from
130  * the physical memory. To access them they are temporarily mapped.
131  */
132 unsigned long __init_refok init_memory_mapping(unsigned long start,
133 					       unsigned long end)
134 {
135 	unsigned long page_size_mask = 0;
136 	unsigned long start_pfn, end_pfn;
137 	unsigned long ret = 0;
138 	unsigned long pos;
139 
140 	struct map_range mr[NR_RANGE_MR];
141 	int nr_range, i;
142 	int use_pse, use_gbpages;
143 
144 	printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
145 
146 	if (!after_bootmem)
147 		init_gbpages();
148 
149 #ifdef CONFIG_DEBUG_PAGEALLOC
150 	/*
151 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
152 	 * This will simplify cpa(), which otherwise needs to support splitting
153 	 * large pages into small in interrupt context, etc.
154 	 */
155 	use_pse = use_gbpages = 0;
156 #else
157 	use_pse = cpu_has_pse;
158 	use_gbpages = direct_gbpages;
159 #endif
160 
161 #ifdef CONFIG_X86_32
162 #ifdef CONFIG_X86_PAE
163 	set_nx();
164 	if (nx_enabled)
165 		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
166 #endif
167 
168 	/* Enable PSE if available */
169 	if (cpu_has_pse)
170 		set_in_cr4(X86_CR4_PSE);
171 
172 	/* Enable PGE if available */
173 	if (cpu_has_pge) {
174 		set_in_cr4(X86_CR4_PGE);
175 		__supported_pte_mask |= _PAGE_GLOBAL;
176 	}
177 #endif
178 
179 	if (use_gbpages)
180 		page_size_mask |= 1 << PG_LEVEL_1G;
181 	if (use_pse)
182 		page_size_mask |= 1 << PG_LEVEL_2M;
183 
184 	memset(mr, 0, sizeof(mr));
185 	nr_range = 0;
186 
187 	/* head if not big page alignment ? */
188 	start_pfn = start >> PAGE_SHIFT;
189 	pos = start_pfn << PAGE_SHIFT;
190 #ifdef CONFIG_X86_32
191 	/*
192 	 * Don't use a large page for the first 2/4MB of memory
193 	 * because there are often fixed size MTRRs in there
194 	 * and overlapping MTRRs into large pages can cause
195 	 * slowdowns.
196 	 */
197 	if (pos == 0)
198 		end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
199 	else
200 		end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
201 				 << (PMD_SHIFT - PAGE_SHIFT);
202 #else /* CONFIG_X86_64 */
203 	end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
204 			<< (PMD_SHIFT - PAGE_SHIFT);
205 #endif
206 	if (end_pfn > (end >> PAGE_SHIFT))
207 		end_pfn = end >> PAGE_SHIFT;
208 	if (start_pfn < end_pfn) {
209 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
210 		pos = end_pfn << PAGE_SHIFT;
211 	}
212 
213 	/* big page (2M) range */
214 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
215 			 << (PMD_SHIFT - PAGE_SHIFT);
216 #ifdef CONFIG_X86_32
217 	end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
218 #else /* CONFIG_X86_64 */
219 	end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
220 			 << (PUD_SHIFT - PAGE_SHIFT);
221 	if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
222 		end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
223 #endif
224 
225 	if (start_pfn < end_pfn) {
226 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
227 				page_size_mask & (1<<PG_LEVEL_2M));
228 		pos = end_pfn << PAGE_SHIFT;
229 	}
230 
231 #ifdef CONFIG_X86_64
232 	/* big page (1G) range */
233 	start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
234 			 << (PUD_SHIFT - PAGE_SHIFT);
235 	end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
236 	if (start_pfn < end_pfn) {
237 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
238 				page_size_mask &
239 				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
240 		pos = end_pfn << PAGE_SHIFT;
241 	}
242 
243 	/* tail is not big page (1G) alignment */
244 	start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
245 			 << (PMD_SHIFT - PAGE_SHIFT);
246 	end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
247 	if (start_pfn < end_pfn) {
248 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
249 				page_size_mask & (1<<PG_LEVEL_2M));
250 		pos = end_pfn << PAGE_SHIFT;
251 	}
252 #endif
253 
254 	/* tail is not big page (2M) alignment */
255 	start_pfn = pos>>PAGE_SHIFT;
256 	end_pfn = end>>PAGE_SHIFT;
257 	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
258 
259 	/* try to merge same page size and continuous */
260 	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
261 		unsigned long old_start;
262 		if (mr[i].end != mr[i+1].start ||
263 		    mr[i].page_size_mask != mr[i+1].page_size_mask)
264 			continue;
265 		/* move it */
266 		old_start = mr[i].start;
267 		memmove(&mr[i], &mr[i+1],
268 			(nr_range - 1 - i) * sizeof(struct map_range));
269 		mr[i--].start = old_start;
270 		nr_range--;
271 	}
272 
273 	for (i = 0; i < nr_range; i++)
274 		printk(KERN_DEBUG " %010lx - %010lx page %s\n",
275 				mr[i].start, mr[i].end,
276 			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
277 			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
278 
279 	/*
280 	 * Find space for the kernel direct mapping tables.
281 	 *
282 	 * Later we should allocate these tables in the local node of the
283 	 * memory mapped. Unfortunately this is done currently before the
284 	 * nodes are discovered.
285 	 */
286 	if (!after_bootmem)
287 		find_early_table_space(end, use_pse, use_gbpages);
288 
289 #ifdef CONFIG_X86_32
290 	for (i = 0; i < nr_range; i++)
291 		kernel_physical_mapping_init(mr[i].start, mr[i].end,
292 					     mr[i].page_size_mask);
293 	ret = end;
294 #else /* CONFIG_X86_64 */
295 	for (i = 0; i < nr_range; i++)
296 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
297 						   mr[i].page_size_mask);
298 #endif
299 
300 #ifdef CONFIG_X86_32
301 	early_ioremap_page_table_range_init();
302 
303 	load_cr3(swapper_pg_dir);
304 #endif
305 
306 #ifdef CONFIG_X86_64
307 	if (!after_bootmem)
308 		mmu_cr4_features = read_cr4();
309 #endif
310 	__flush_tlb_all();
311 
312 	if (!after_bootmem && e820_table_end > e820_table_start)
313 		reserve_early(e820_table_start << PAGE_SHIFT,
314 				 e820_table_end << PAGE_SHIFT, "PGTABLE");
315 
316 	if (!after_bootmem)
317 		early_memtest(start, end);
318 
319 	return ret >> PAGE_SHIFT;
320 }
321 
322 
323 /*
324  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
325  * is valid. The argument is a physical page number.
326  *
327  *
328  * On x86, access has to be given to the first megabyte of ram because that area
329  * contains bios code and data regions used by X and dosemu and similar apps.
330  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
331  * mmio resources as well as potential bios/acpi data regions.
332  */
333 int devmem_is_allowed(unsigned long pagenr)
334 {
335 	if (pagenr <= 256)
336 		return 1;
337 	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
338 		return 0;
339 	if (!page_is_ram(pagenr))
340 		return 1;
341 	return 0;
342 }
343 
344 void free_init_pages(char *what, unsigned long begin, unsigned long end)
345 {
346 	unsigned long addr = begin;
347 
348 	if (addr >= end)
349 		return;
350 
351 	/*
352 	 * If debugging page accesses then do not free this memory but
353 	 * mark them not present - any buggy init-section access will
354 	 * create a kernel page fault:
355 	 */
356 #ifdef CONFIG_DEBUG_PAGEALLOC
357 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
358 		begin, PAGE_ALIGN(end));
359 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
360 #else
361 	/*
362 	 * We just marked the kernel text read only above, now that
363 	 * we are going to free part of that, we need to make that
364 	 * writeable first.
365 	 */
366 	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
367 
368 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
369 
370 	for (; addr < end; addr += PAGE_SIZE) {
371 		ClearPageReserved(virt_to_page(addr));
372 		init_page_count(virt_to_page(addr));
373 		memset((void *)(addr & ~(PAGE_SIZE-1)),
374 			POISON_FREE_INITMEM, PAGE_SIZE);
375 		free_page(addr);
376 		totalram_pages++;
377 	}
378 #endif
379 }
380 
381 void free_initmem(void)
382 {
383 	free_init_pages("unused kernel memory",
384 			(unsigned long)(&__init_begin),
385 			(unsigned long)(&__init_end));
386 }
387 
388 #ifdef CONFIG_BLK_DEV_INITRD
389 void free_initrd_mem(unsigned long start, unsigned long end)
390 {
391 	free_init_pages("initrd memory", start, end);
392 }
393 #endif
394