xref: /openbmc/linux/arch/x86/mm/init_32.c (revision 82ced6fd)
1 /*
2  *
3  *  Copyright (C) 1995  Linus Torvalds
4  *
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  */
7 
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33 
34 #include <asm/asm.h>
35 #include <asm/bios_ebda.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/dma.h>
41 #include <asm/fixmap.h>
42 #include <asm/e820.h>
43 #include <asm/apic.h>
44 #include <asm/bugs.h>
45 #include <asm/tlb.h>
46 #include <asm/tlbflush.h>
47 #include <asm/pgalloc.h>
48 #include <asm/sections.h>
49 #include <asm/paravirt.h>
50 #include <asm/setup.h>
51 #include <asm/cacheflush.h>
52 #include <asm/init.h>
53 
54 unsigned long max_low_pfn_mapped;
55 unsigned long max_pfn_mapped;
56 
57 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
58 unsigned long highstart_pfn, highend_pfn;
59 
60 static noinline int do_test_wp_bit(void);
61 
62 bool __read_mostly __vmalloc_start_set = false;
63 
64 static __init void *alloc_low_page(void)
65 {
66 	unsigned long pfn = e820_table_end++;
67 	void *adr;
68 
69 	if (pfn >= e820_table_top)
70 		panic("alloc_low_page: ran out of memory");
71 
72 	adr = __va(pfn * PAGE_SIZE);
73 	memset(adr, 0, PAGE_SIZE);
74 	return adr;
75 }
76 
77 /*
78  * Creates a middle page table and puts a pointer to it in the
79  * given global directory entry. This only returns the gd entry
80  * in non-PAE compilation mode, since the middle layer is folded.
81  */
82 static pmd_t * __init one_md_table_init(pgd_t *pgd)
83 {
84 	pud_t *pud;
85 	pmd_t *pmd_table;
86 
87 #ifdef CONFIG_X86_PAE
88 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
89 		if (after_bootmem)
90 			pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
91 		else
92 			pmd_table = (pmd_t *)alloc_low_page();
93 		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
94 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
95 		pud = pud_offset(pgd, 0);
96 		BUG_ON(pmd_table != pmd_offset(pud, 0));
97 
98 		return pmd_table;
99 	}
100 #endif
101 	pud = pud_offset(pgd, 0);
102 	pmd_table = pmd_offset(pud, 0);
103 
104 	return pmd_table;
105 }
106 
107 /*
108  * Create a page table and place a pointer to it in a middle page
109  * directory entry:
110  */
111 static pte_t * __init one_page_table_init(pmd_t *pmd)
112 {
113 	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
114 		pte_t *page_table = NULL;
115 
116 		if (after_bootmem) {
117 #ifdef CONFIG_DEBUG_PAGEALLOC
118 			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
119 #endif
120 			if (!page_table)
121 				page_table =
122 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
123 		} else
124 			page_table = (pte_t *)alloc_low_page();
125 
126 		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
127 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
128 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
129 	}
130 
131 	return pte_offset_kernel(pmd, 0);
132 }
133 
134 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
135 {
136 	int pgd_idx = pgd_index(vaddr);
137 	int pmd_idx = pmd_index(vaddr);
138 
139 	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
140 }
141 
142 pte_t * __init populate_extra_pte(unsigned long vaddr)
143 {
144 	int pte_idx = pte_index(vaddr);
145 	pmd_t *pmd;
146 
147 	pmd = populate_extra_pmd(vaddr);
148 	return one_page_table_init(pmd) + pte_idx;
149 }
150 
151 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
152 					   unsigned long vaddr, pte_t *lastpte)
153 {
154 #ifdef CONFIG_HIGHMEM
155 	/*
156 	 * Something (early fixmap) may already have put a pte
157 	 * page here, which causes the page table allocation
158 	 * to become nonlinear. Attempt to fix it, and if it
159 	 * is still nonlinear then we have to bug.
160 	 */
161 	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
162 	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
163 
164 	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
165 	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
166 	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
167 	    && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
168 		|| (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
169 		pte_t *newpte;
170 		int i;
171 
172 		BUG_ON(after_bootmem);
173 		newpte = alloc_low_page();
174 		for (i = 0; i < PTRS_PER_PTE; i++)
175 			set_pte(newpte + i, pte[i]);
176 
177 		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
178 		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
179 		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
180 		__flush_tlb_all();
181 
182 		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
183 		pte = newpte;
184 	}
185 	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
186 	       && vaddr > fix_to_virt(FIX_KMAP_END)
187 	       && lastpte && lastpte + PTRS_PER_PTE != pte);
188 #endif
189 	return pte;
190 }
191 
192 /*
193  * This function initializes a certain range of kernel virtual memory
194  * with new bootmem page tables, everywhere page tables are missing in
195  * the given range.
196  *
197  * NOTE: The pagetables are allocated contiguous on the physical space
198  * so we can cache the place of the first one and move around without
199  * checking the pgd every time.
200  */
201 static void __init
202 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
203 {
204 	int pgd_idx, pmd_idx;
205 	unsigned long vaddr;
206 	pgd_t *pgd;
207 	pmd_t *pmd;
208 	pte_t *pte = NULL;
209 
210 	vaddr = start;
211 	pgd_idx = pgd_index(vaddr);
212 	pmd_idx = pmd_index(vaddr);
213 	pgd = pgd_base + pgd_idx;
214 
215 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
216 		pmd = one_md_table_init(pgd);
217 		pmd = pmd + pmd_index(vaddr);
218 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
219 							pmd++, pmd_idx++) {
220 			pte = page_table_kmap_check(one_page_table_init(pmd),
221 			                            pmd, vaddr, pte);
222 
223 			vaddr += PMD_SIZE;
224 		}
225 		pmd_idx = 0;
226 	}
227 }
228 
229 static inline int is_kernel_text(unsigned long addr)
230 {
231 	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
232 		return 1;
233 	return 0;
234 }
235 
236 /*
237  * This maps the physical memory to kernel virtual address space, a total
238  * of max_low_pfn pages, by creating page tables starting from address
239  * PAGE_OFFSET:
240  */
241 unsigned long __init
242 kernel_physical_mapping_init(unsigned long start,
243 			     unsigned long end,
244 			     unsigned long page_size_mask)
245 {
246 	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
247 	unsigned long start_pfn, end_pfn;
248 	pgd_t *pgd_base = swapper_pg_dir;
249 	int pgd_idx, pmd_idx, pte_ofs;
250 	unsigned long pfn;
251 	pgd_t *pgd;
252 	pmd_t *pmd;
253 	pte_t *pte;
254 	unsigned pages_2m, pages_4k;
255 	int mapping_iter;
256 
257 	start_pfn = start >> PAGE_SHIFT;
258 	end_pfn = end >> PAGE_SHIFT;
259 
260 	/*
261 	 * First iteration will setup identity mapping using large/small pages
262 	 * based on use_pse, with other attributes same as set by
263 	 * the early code in head_32.S
264 	 *
265 	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
266 	 * as desired for the kernel identity mapping.
267 	 *
268 	 * This two pass mechanism conforms to the TLB app note which says:
269 	 *
270 	 *     "Software should not write to a paging-structure entry in a way
271 	 *      that would change, for any linear address, both the page size
272 	 *      and either the page frame or attributes."
273 	 */
274 	mapping_iter = 1;
275 
276 	if (!cpu_has_pse)
277 		use_pse = 0;
278 
279 repeat:
280 	pages_2m = pages_4k = 0;
281 	pfn = start_pfn;
282 	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
283 	pgd = pgd_base + pgd_idx;
284 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
285 		pmd = one_md_table_init(pgd);
286 
287 		if (pfn >= end_pfn)
288 			continue;
289 #ifdef CONFIG_X86_PAE
290 		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
291 		pmd += pmd_idx;
292 #else
293 		pmd_idx = 0;
294 #endif
295 		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
296 		     pmd++, pmd_idx++) {
297 			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
298 
299 			/*
300 			 * Map with big pages if possible, otherwise
301 			 * create normal page tables:
302 			 */
303 			if (use_pse) {
304 				unsigned int addr2;
305 				pgprot_t prot = PAGE_KERNEL_LARGE;
306 				/*
307 				 * first pass will use the same initial
308 				 * identity mapping attribute + _PAGE_PSE.
309 				 */
310 				pgprot_t init_prot =
311 					__pgprot(PTE_IDENT_ATTR |
312 						 _PAGE_PSE);
313 
314 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
315 					PAGE_OFFSET + PAGE_SIZE-1;
316 
317 				if (is_kernel_text(addr) ||
318 				    is_kernel_text(addr2))
319 					prot = PAGE_KERNEL_LARGE_EXEC;
320 
321 				pages_2m++;
322 				if (mapping_iter == 1)
323 					set_pmd(pmd, pfn_pmd(pfn, init_prot));
324 				else
325 					set_pmd(pmd, pfn_pmd(pfn, prot));
326 
327 				pfn += PTRS_PER_PTE;
328 				continue;
329 			}
330 			pte = one_page_table_init(pmd);
331 
332 			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
333 			pte += pte_ofs;
334 			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
335 			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
336 				pgprot_t prot = PAGE_KERNEL;
337 				/*
338 				 * first pass will use the same initial
339 				 * identity mapping attribute.
340 				 */
341 				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
342 
343 				if (is_kernel_text(addr))
344 					prot = PAGE_KERNEL_EXEC;
345 
346 				pages_4k++;
347 				if (mapping_iter == 1)
348 					set_pte(pte, pfn_pte(pfn, init_prot));
349 				else
350 					set_pte(pte, pfn_pte(pfn, prot));
351 			}
352 		}
353 	}
354 	if (mapping_iter == 1) {
355 		/*
356 		 * update direct mapping page count only in the first
357 		 * iteration.
358 		 */
359 		update_page_count(PG_LEVEL_2M, pages_2m);
360 		update_page_count(PG_LEVEL_4K, pages_4k);
361 
362 		/*
363 		 * local global flush tlb, which will flush the previous
364 		 * mappings present in both small and large page TLB's.
365 		 */
366 		__flush_tlb_all();
367 
368 		/*
369 		 * Second iteration will set the actual desired PTE attributes.
370 		 */
371 		mapping_iter = 2;
372 		goto repeat;
373 	}
374 	return 0;
375 }
376 
377 pte_t *kmap_pte;
378 pgprot_t kmap_prot;
379 
380 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
381 {
382 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
383 			vaddr), vaddr), vaddr);
384 }
385 
386 static void __init kmap_init(void)
387 {
388 	unsigned long kmap_vstart;
389 
390 	/*
391 	 * Cache the first kmap pte:
392 	 */
393 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
394 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
395 
396 	kmap_prot = PAGE_KERNEL;
397 }
398 
399 #ifdef CONFIG_HIGHMEM
400 static void __init permanent_kmaps_init(pgd_t *pgd_base)
401 {
402 	unsigned long vaddr;
403 	pgd_t *pgd;
404 	pud_t *pud;
405 	pmd_t *pmd;
406 	pte_t *pte;
407 
408 	vaddr = PKMAP_BASE;
409 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
410 
411 	pgd = swapper_pg_dir + pgd_index(vaddr);
412 	pud = pud_offset(pgd, vaddr);
413 	pmd = pmd_offset(pud, vaddr);
414 	pte = pte_offset_kernel(pmd, vaddr);
415 	pkmap_page_table = pte;
416 }
417 
418 static void __init add_one_highpage_init(struct page *page, int pfn)
419 {
420 	ClearPageReserved(page);
421 	init_page_count(page);
422 	__free_page(page);
423 	totalhigh_pages++;
424 }
425 
426 struct add_highpages_data {
427 	unsigned long start_pfn;
428 	unsigned long end_pfn;
429 };
430 
431 static int __init add_highpages_work_fn(unsigned long start_pfn,
432 					 unsigned long end_pfn, void *datax)
433 {
434 	int node_pfn;
435 	struct page *page;
436 	unsigned long final_start_pfn, final_end_pfn;
437 	struct add_highpages_data *data;
438 
439 	data = (struct add_highpages_data *)datax;
440 
441 	final_start_pfn = max(start_pfn, data->start_pfn);
442 	final_end_pfn = min(end_pfn, data->end_pfn);
443 	if (final_start_pfn >= final_end_pfn)
444 		return 0;
445 
446 	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
447 	     node_pfn++) {
448 		if (!pfn_valid(node_pfn))
449 			continue;
450 		page = pfn_to_page(node_pfn);
451 		add_one_highpage_init(page, node_pfn);
452 	}
453 
454 	return 0;
455 
456 }
457 
458 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
459 					      unsigned long end_pfn)
460 {
461 	struct add_highpages_data data;
462 
463 	data.start_pfn = start_pfn;
464 	data.end_pfn = end_pfn;
465 
466 	work_with_active_regions(nid, add_highpages_work_fn, &data);
467 }
468 
469 #else
470 static inline void permanent_kmaps_init(pgd_t *pgd_base)
471 {
472 }
473 #endif /* CONFIG_HIGHMEM */
474 
475 void __init native_pagetable_setup_start(pgd_t *base)
476 {
477 	unsigned long pfn, va;
478 	pgd_t *pgd;
479 	pud_t *pud;
480 	pmd_t *pmd;
481 	pte_t *pte;
482 
483 	/*
484 	 * Remove any mappings which extend past the end of physical
485 	 * memory from the boot time page table:
486 	 */
487 	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
488 		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
489 		pgd = base + pgd_index(va);
490 		if (!pgd_present(*pgd))
491 			break;
492 
493 		pud = pud_offset(pgd, va);
494 		pmd = pmd_offset(pud, va);
495 		if (!pmd_present(*pmd))
496 			break;
497 
498 		pte = pte_offset_kernel(pmd, va);
499 		if (!pte_present(*pte))
500 			break;
501 
502 		pte_clear(NULL, va, pte);
503 	}
504 	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
505 }
506 
507 void __init native_pagetable_setup_done(pgd_t *base)
508 {
509 }
510 
511 /*
512  * Build a proper pagetable for the kernel mappings.  Up until this
513  * point, we've been running on some set of pagetables constructed by
514  * the boot process.
515  *
516  * If we're booting on native hardware, this will be a pagetable
517  * constructed in arch/x86/kernel/head_32.S.  The root of the
518  * pagetable will be swapper_pg_dir.
519  *
520  * If we're booting paravirtualized under a hypervisor, then there are
521  * more options: we may already be running PAE, and the pagetable may
522  * or may not be based in swapper_pg_dir.  In any case,
523  * paravirt_pagetable_setup_start() will set up swapper_pg_dir
524  * appropriately for the rest of the initialization to work.
525  *
526  * In general, pagetable_init() assumes that the pagetable may already
527  * be partially populated, and so it avoids stomping on any existing
528  * mappings.
529  */
530 void __init early_ioremap_page_table_range_init(void)
531 {
532 	pgd_t *pgd_base = swapper_pg_dir;
533 	unsigned long vaddr, end;
534 
535 	/*
536 	 * Fixed mappings, only the page table structure has to be
537 	 * created - mappings will be set by set_fixmap():
538 	 */
539 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
540 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
541 	page_table_range_init(vaddr, end, pgd_base);
542 	early_ioremap_reset();
543 }
544 
545 static void __init pagetable_init(void)
546 {
547 	pgd_t *pgd_base = swapper_pg_dir;
548 
549 	permanent_kmaps_init(pgd_base);
550 }
551 
552 #ifdef CONFIG_ACPI_SLEEP
553 /*
554  * ACPI suspend needs this for resume, because things like the intel-agp
555  * driver might have split up a kernel 4MB mapping.
556  */
557 char swsusp_pg_dir[PAGE_SIZE]
558 	__attribute__ ((aligned(PAGE_SIZE)));
559 
560 static inline void save_pg_dir(void)
561 {
562 	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
563 }
564 #else /* !CONFIG_ACPI_SLEEP */
565 static inline void save_pg_dir(void)
566 {
567 }
568 #endif /* !CONFIG_ACPI_SLEEP */
569 
570 void zap_low_mappings(void)
571 {
572 	int i;
573 
574 	/*
575 	 * Zap initial low-memory mappings.
576 	 *
577 	 * Note that "pgd_clear()" doesn't do it for
578 	 * us, because pgd_clear() is a no-op on i386.
579 	 */
580 	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
581 #ifdef CONFIG_X86_PAE
582 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
583 #else
584 		set_pgd(swapper_pg_dir+i, __pgd(0));
585 #endif
586 	}
587 	flush_tlb_all();
588 }
589 
590 int nx_enabled;
591 
592 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
593 EXPORT_SYMBOL_GPL(__supported_pte_mask);
594 
595 #ifdef CONFIG_X86_PAE
596 
597 static int disable_nx __initdata;
598 
599 /*
600  * noexec = on|off
601  *
602  * Control non executable mappings.
603  *
604  * on      Enable
605  * off     Disable
606  */
607 static int __init noexec_setup(char *str)
608 {
609 	if (!str || !strcmp(str, "on")) {
610 		if (cpu_has_nx) {
611 			__supported_pte_mask |= _PAGE_NX;
612 			disable_nx = 0;
613 		}
614 	} else {
615 		if (!strcmp(str, "off")) {
616 			disable_nx = 1;
617 			__supported_pte_mask &= ~_PAGE_NX;
618 		} else {
619 			return -EINVAL;
620 		}
621 	}
622 
623 	return 0;
624 }
625 early_param("noexec", noexec_setup);
626 
627 void __init set_nx(void)
628 {
629 	unsigned int v[4], l, h;
630 
631 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
632 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
633 
634 		if ((v[3] & (1 << 20)) && !disable_nx) {
635 			rdmsr(MSR_EFER, l, h);
636 			l |= EFER_NX;
637 			wrmsr(MSR_EFER, l, h);
638 			nx_enabled = 1;
639 			__supported_pte_mask |= _PAGE_NX;
640 		}
641 	}
642 }
643 #endif
644 
645 /* user-defined highmem size */
646 static unsigned int highmem_pages = -1;
647 
648 /*
649  * highmem=size forces highmem to be exactly 'size' bytes.
650  * This works even on boxes that have no highmem otherwise.
651  * This also works to reduce highmem size on bigger boxes.
652  */
653 static int __init parse_highmem(char *arg)
654 {
655 	if (!arg)
656 		return -EINVAL;
657 
658 	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
659 	return 0;
660 }
661 early_param("highmem", parse_highmem);
662 
663 #define MSG_HIGHMEM_TOO_BIG \
664 	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
665 
666 #define MSG_LOWMEM_TOO_SMALL \
667 	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
668 /*
669  * All of RAM fits into lowmem - but if user wants highmem
670  * artificially via the highmem=x boot parameter then create
671  * it:
672  */
673 void __init lowmem_pfn_init(void)
674 {
675 	/* max_low_pfn is 0, we already have early_res support */
676 	max_low_pfn = max_pfn;
677 
678 	if (highmem_pages == -1)
679 		highmem_pages = 0;
680 #ifdef CONFIG_HIGHMEM
681 	if (highmem_pages >= max_pfn) {
682 		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
683 			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
684 		highmem_pages = 0;
685 	}
686 	if (highmem_pages) {
687 		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
688 			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
689 				pages_to_mb(highmem_pages));
690 			highmem_pages = 0;
691 		}
692 		max_low_pfn -= highmem_pages;
693 	}
694 #else
695 	if (highmem_pages)
696 		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
697 #endif
698 }
699 
700 #define MSG_HIGHMEM_TOO_SMALL \
701 	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
702 
703 #define MSG_HIGHMEM_TRIMMED \
704 	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
705 /*
706  * We have more RAM than fits into lowmem - we try to put it into
707  * highmem, also taking the highmem=x boot parameter into account:
708  */
709 void __init highmem_pfn_init(void)
710 {
711 	max_low_pfn = MAXMEM_PFN;
712 
713 	if (highmem_pages == -1)
714 		highmem_pages = max_pfn - MAXMEM_PFN;
715 
716 	if (highmem_pages + MAXMEM_PFN < max_pfn)
717 		max_pfn = MAXMEM_PFN + highmem_pages;
718 
719 	if (highmem_pages + MAXMEM_PFN > max_pfn) {
720 		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
721 			pages_to_mb(max_pfn - MAXMEM_PFN),
722 			pages_to_mb(highmem_pages));
723 		highmem_pages = 0;
724 	}
725 #ifndef CONFIG_HIGHMEM
726 	/* Maximum memory usable is what is directly addressable */
727 	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
728 	if (max_pfn > MAX_NONPAE_PFN)
729 		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
730 	else
731 		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
732 	max_pfn = MAXMEM_PFN;
733 #else /* !CONFIG_HIGHMEM */
734 #ifndef CONFIG_HIGHMEM64G
735 	if (max_pfn > MAX_NONPAE_PFN) {
736 		max_pfn = MAX_NONPAE_PFN;
737 		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
738 	}
739 #endif /* !CONFIG_HIGHMEM64G */
740 #endif /* !CONFIG_HIGHMEM */
741 }
742 
743 /*
744  * Determine low and high memory ranges:
745  */
746 void __init find_low_pfn_range(void)
747 {
748 	/* it could update max_pfn */
749 
750 	if (max_pfn <= MAXMEM_PFN)
751 		lowmem_pfn_init();
752 	else
753 		highmem_pfn_init();
754 }
755 
756 #ifndef CONFIG_NEED_MULTIPLE_NODES
757 void __init initmem_init(unsigned long start_pfn,
758 				  unsigned long end_pfn)
759 {
760 #ifdef CONFIG_HIGHMEM
761 	highstart_pfn = highend_pfn = max_pfn;
762 	if (max_pfn > max_low_pfn)
763 		highstart_pfn = max_low_pfn;
764 	memory_present(0, 0, highend_pfn);
765 	e820_register_active_regions(0, 0, highend_pfn);
766 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
767 		pages_to_mb(highend_pfn - highstart_pfn));
768 	num_physpages = highend_pfn;
769 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
770 #else
771 	memory_present(0, 0, max_low_pfn);
772 	e820_register_active_regions(0, 0, max_low_pfn);
773 	num_physpages = max_low_pfn;
774 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
775 #endif
776 #ifdef CONFIG_FLATMEM
777 	max_mapnr = num_physpages;
778 #endif
779 	__vmalloc_start_set = true;
780 
781 	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
782 			pages_to_mb(max_low_pfn));
783 
784 	setup_bootmem_allocator();
785 }
786 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
787 
788 static void __init zone_sizes_init(void)
789 {
790 	unsigned long max_zone_pfns[MAX_NR_ZONES];
791 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
792 	max_zone_pfns[ZONE_DMA] =
793 		virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
794 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
795 #ifdef CONFIG_HIGHMEM
796 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
797 #endif
798 
799 	free_area_init_nodes(max_zone_pfns);
800 }
801 
802 static unsigned long __init setup_node_bootmem(int nodeid,
803 				 unsigned long start_pfn,
804 				 unsigned long end_pfn,
805 				 unsigned long bootmap)
806 {
807 	unsigned long bootmap_size;
808 
809 	/* don't touch min_low_pfn */
810 	bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
811 					 bootmap >> PAGE_SHIFT,
812 					 start_pfn, end_pfn);
813 	printk(KERN_INFO "  node %d low ram: %08lx - %08lx\n",
814 		nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
815 	printk(KERN_INFO "  node %d bootmap %08lx - %08lx\n",
816 		 nodeid, bootmap, bootmap + bootmap_size);
817 	free_bootmem_with_active_regions(nodeid, end_pfn);
818 	early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
819 
820 	return bootmap + bootmap_size;
821 }
822 
823 void __init setup_bootmem_allocator(void)
824 {
825 	int nodeid;
826 	unsigned long bootmap_size, bootmap;
827 	/*
828 	 * Initialize the boot-time allocator (with low memory only):
829 	 */
830 	bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
831 	bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
832 				 PAGE_SIZE);
833 	if (bootmap == -1L)
834 		panic("Cannot find bootmem map of size %ld\n", bootmap_size);
835 	reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
836 
837 	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
838 		 max_pfn_mapped<<PAGE_SHIFT);
839 	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
840 
841 	for_each_online_node(nodeid) {
842 		 unsigned long start_pfn, end_pfn;
843 
844 #ifdef CONFIG_NEED_MULTIPLE_NODES
845 		start_pfn = node_start_pfn[nodeid];
846 		end_pfn = node_end_pfn[nodeid];
847 		if (start_pfn > max_low_pfn)
848 			continue;
849 		if (end_pfn > max_low_pfn)
850 			end_pfn = max_low_pfn;
851 #else
852 		start_pfn = 0;
853 		end_pfn = max_low_pfn;
854 #endif
855 		bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
856 						 bootmap);
857 	}
858 
859 	after_bootmem = 1;
860 }
861 
862 /*
863  * paging_init() sets up the page tables - note that the first 8MB are
864  * already mapped by head.S.
865  *
866  * This routines also unmaps the page at virtual kernel address 0, so
867  * that we can trap those pesky NULL-reference errors in the kernel.
868  */
869 void __init paging_init(void)
870 {
871 	pagetable_init();
872 
873 	__flush_tlb_all();
874 
875 	kmap_init();
876 
877 	/*
878 	 * NOTE: at this point the bootmem allocator is fully available.
879 	 */
880 	sparse_init();
881 	zone_sizes_init();
882 }
883 
884 /*
885  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
886  * and also on some strange 486's. All 586+'s are OK. This used to involve
887  * black magic jumps to work around some nasty CPU bugs, but fortunately the
888  * switch to using exceptions got rid of all that.
889  */
890 static void __init test_wp_bit(void)
891 {
892 	printk(KERN_INFO
893   "Checking if this processor honours the WP bit even in supervisor mode...");
894 
895 	/* Any page-aligned address will do, the test is non-destructive */
896 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
897 	boot_cpu_data.wp_works_ok = do_test_wp_bit();
898 	clear_fixmap(FIX_WP_TEST);
899 
900 	if (!boot_cpu_data.wp_works_ok) {
901 		printk(KERN_CONT "No.\n");
902 #ifdef CONFIG_X86_WP_WORKS_OK
903 		panic(
904   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
905 #endif
906 	} else {
907 		printk(KERN_CONT "Ok.\n");
908 	}
909 }
910 
911 static struct kcore_list kcore_mem, kcore_vmalloc;
912 
913 void __init mem_init(void)
914 {
915 	int codesize, reservedpages, datasize, initsize;
916 	int tmp;
917 
918 	pci_iommu_alloc();
919 
920 #ifdef CONFIG_FLATMEM
921 	BUG_ON(!mem_map);
922 #endif
923 	/* this will put all low memory onto the freelists */
924 	totalram_pages += free_all_bootmem();
925 
926 	reservedpages = 0;
927 	for (tmp = 0; tmp < max_low_pfn; tmp++)
928 		/*
929 		 * Only count reserved RAM pages:
930 		 */
931 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
932 			reservedpages++;
933 
934 	set_highmem_pages_init();
935 
936 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
937 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
938 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
939 
940 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
941 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
942 		   VMALLOC_END-VMALLOC_START);
943 
944 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
945 			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
946 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
947 		num_physpages << (PAGE_SHIFT-10),
948 		codesize >> 10,
949 		reservedpages << (PAGE_SHIFT-10),
950 		datasize >> 10,
951 		initsize >> 10,
952 		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
953 	       );
954 
955 	printk(KERN_INFO "virtual kernel memory layout:\n"
956 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
957 #ifdef CONFIG_HIGHMEM
958 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
959 #endif
960 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
961 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
962 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
963 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
964 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
965 		FIXADDR_START, FIXADDR_TOP,
966 		(FIXADDR_TOP - FIXADDR_START) >> 10,
967 
968 #ifdef CONFIG_HIGHMEM
969 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
970 		(LAST_PKMAP*PAGE_SIZE) >> 10,
971 #endif
972 
973 		VMALLOC_START, VMALLOC_END,
974 		(VMALLOC_END - VMALLOC_START) >> 20,
975 
976 		(unsigned long)__va(0), (unsigned long)high_memory,
977 		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
978 
979 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
980 		((unsigned long)&__init_end -
981 		 (unsigned long)&__init_begin) >> 10,
982 
983 		(unsigned long)&_etext, (unsigned long)&_edata,
984 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
985 
986 		(unsigned long)&_text, (unsigned long)&_etext,
987 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
988 
989 	/*
990 	 * Check boundaries twice: Some fundamental inconsistencies can
991 	 * be detected at build time already.
992 	 */
993 #define __FIXADDR_TOP (-PAGE_SIZE)
994 #ifdef CONFIG_HIGHMEM
995 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
996 	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
997 #endif
998 #define high_memory (-128UL << 20)
999 	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
1000 #undef high_memory
1001 #undef __FIXADDR_TOP
1002 
1003 #ifdef CONFIG_HIGHMEM
1004 	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
1005 	BUG_ON(VMALLOC_END				> PKMAP_BASE);
1006 #endif
1007 	BUG_ON(VMALLOC_START				>= VMALLOC_END);
1008 	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
1009 
1010 	if (boot_cpu_data.wp_works_ok < 0)
1011 		test_wp_bit();
1012 
1013 	save_pg_dir();
1014 	zap_low_mappings();
1015 }
1016 
1017 #ifdef CONFIG_MEMORY_HOTPLUG
1018 int arch_add_memory(int nid, u64 start, u64 size)
1019 {
1020 	struct pglist_data *pgdata = NODE_DATA(nid);
1021 	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
1022 	unsigned long start_pfn = start >> PAGE_SHIFT;
1023 	unsigned long nr_pages = size >> PAGE_SHIFT;
1024 
1025 	return __add_pages(nid, zone, start_pfn, nr_pages);
1026 }
1027 #endif
1028 
1029 /*
1030  * This function cannot be __init, since exceptions don't work in that
1031  * section.  Put this after the callers, so that it cannot be inlined.
1032  */
1033 static noinline int do_test_wp_bit(void)
1034 {
1035 	char tmp_reg;
1036 	int flag;
1037 
1038 	__asm__ __volatile__(
1039 		"	movb %0, %1	\n"
1040 		"1:	movb %1, %0	\n"
1041 		"	xorl %2, %2	\n"
1042 		"2:			\n"
1043 		_ASM_EXTABLE(1b,2b)
1044 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1045 		 "=q" (tmp_reg),
1046 		 "=r" (flag)
1047 		:"2" (1)
1048 		:"memory");
1049 
1050 	return flag;
1051 }
1052 
1053 #ifdef CONFIG_DEBUG_RODATA
1054 const int rodata_test_data = 0xC3;
1055 EXPORT_SYMBOL_GPL(rodata_test_data);
1056 
1057 static int kernel_set_to_readonly;
1058 
1059 void set_kernel_text_rw(void)
1060 {
1061 	unsigned long start = PFN_ALIGN(_text);
1062 	unsigned long size = PFN_ALIGN(_etext) - start;
1063 
1064 	if (!kernel_set_to_readonly)
1065 		return;
1066 
1067 	pr_debug("Set kernel text: %lx - %lx for read write\n",
1068 		 start, start+size);
1069 
1070 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1071 }
1072 
1073 void set_kernel_text_ro(void)
1074 {
1075 	unsigned long start = PFN_ALIGN(_text);
1076 	unsigned long size = PFN_ALIGN(_etext) - start;
1077 
1078 	if (!kernel_set_to_readonly)
1079 		return;
1080 
1081 	pr_debug("Set kernel text: %lx - %lx for read only\n",
1082 		 start, start+size);
1083 
1084 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1085 }
1086 
1087 void mark_rodata_ro(void)
1088 {
1089 	unsigned long start = PFN_ALIGN(_text);
1090 	unsigned long size = PFN_ALIGN(_etext) - start;
1091 
1092 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1093 	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1094 		size >> 10);
1095 
1096 	kernel_set_to_readonly = 1;
1097 
1098 #ifdef CONFIG_CPA_DEBUG
1099 	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1100 		start, start+size);
1101 	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
1102 
1103 	printk(KERN_INFO "Testing CPA: write protecting again\n");
1104 	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1105 #endif
1106 
1107 	start += size;
1108 	size = (unsigned long)__end_rodata - start;
1109 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1110 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1111 		size >> 10);
1112 	rodata_test();
1113 
1114 #ifdef CONFIG_CPA_DEBUG
1115 	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
1116 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1117 
1118 	printk(KERN_INFO "Testing CPA: write protecting again\n");
1119 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1120 #endif
1121 }
1122 #endif
1123 
1124 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1125 				   int flags)
1126 {
1127 	return reserve_bootmem(phys, len, flags);
1128 }
1129