xref: /openbmc/linux/arch/x86/mm/init_32.c (revision f42b3800)
1 /*
2  *  linux/arch/i386/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  */
8 
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
33 
34 #include <asm/asm.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
39 #include <asm/dma.h>
40 #include <asm/fixmap.h>
41 #include <asm/e820.h>
42 #include <asm/apic.h>
43 #include <asm/bugs.h>
44 #include <asm/tlb.h>
45 #include <asm/tlbflush.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/paravirt.h>
49 #include <asm/setup.h>
50 #include <asm/cacheflush.h>
51 
52 unsigned int __VMALLOC_RESERVE = 128 << 20;
53 
54 unsigned long max_pfn_mapped;
55 
56 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 unsigned long highstart_pfn, highend_pfn;
58 
59 static noinline int do_test_wp_bit(void);
60 
61 /*
62  * Creates a middle page table and puts a pointer to it in the
63  * given global directory entry. This only returns the gd entry
64  * in non-PAE compilation mode, since the middle layer is folded.
65  */
66 static pmd_t * __init one_md_table_init(pgd_t *pgd)
67 {
68 	pud_t *pud;
69 	pmd_t *pmd_table;
70 
71 #ifdef CONFIG_X86_PAE
72 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
73 		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
74 
75 		paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
76 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
77 		pud = pud_offset(pgd, 0);
78 		BUG_ON(pmd_table != pmd_offset(pud, 0));
79 	}
80 #endif
81 	pud = pud_offset(pgd, 0);
82 	pmd_table = pmd_offset(pud, 0);
83 
84 	return pmd_table;
85 }
86 
87 /*
88  * Create a page table and place a pointer to it in a middle page
89  * directory entry:
90  */
91 static pte_t * __init one_page_table_init(pmd_t *pmd)
92 {
93 	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
94 		pte_t *page_table = NULL;
95 
96 #ifdef CONFIG_DEBUG_PAGEALLOC
97 		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
98 #endif
99 		if (!page_table) {
100 			page_table =
101 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
102 		}
103 
104 		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
105 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
106 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
107 	}
108 
109 	return pte_offset_kernel(pmd, 0);
110 }
111 
112 /*
113  * This function initializes a certain range of kernel virtual memory
114  * with new bootmem page tables, everywhere page tables are missing in
115  * the given range.
116  *
117  * NOTE: The pagetables are allocated contiguous on the physical space
118  * so we can cache the place of the first one and move around without
119  * checking the pgd every time.
120  */
121 static void __init
122 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
123 {
124 	int pgd_idx, pmd_idx;
125 	unsigned long vaddr;
126 	pgd_t *pgd;
127 	pmd_t *pmd;
128 
129 	vaddr = start;
130 	pgd_idx = pgd_index(vaddr);
131 	pmd_idx = pmd_index(vaddr);
132 	pgd = pgd_base + pgd_idx;
133 
134 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
135 		pmd = one_md_table_init(pgd);
136 		pmd = pmd + pmd_index(vaddr);
137 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
138 							pmd++, pmd_idx++) {
139 			one_page_table_init(pmd);
140 
141 			vaddr += PMD_SIZE;
142 		}
143 		pmd_idx = 0;
144 	}
145 }
146 
147 static inline int is_kernel_text(unsigned long addr)
148 {
149 	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
150 		return 1;
151 	return 0;
152 }
153 
154 /*
155  * This maps the physical memory to kernel virtual address space, a total
156  * of max_low_pfn pages, by creating page tables starting from address
157  * PAGE_OFFSET:
158  */
159 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
160 {
161 	int pgd_idx, pmd_idx, pte_ofs;
162 	unsigned long pfn;
163 	pgd_t *pgd;
164 	pmd_t *pmd;
165 	pte_t *pte;
166 
167 	pgd_idx = pgd_index(PAGE_OFFSET);
168 	pgd = pgd_base + pgd_idx;
169 	pfn = 0;
170 
171 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172 		pmd = one_md_table_init(pgd);
173 		if (pfn >= max_low_pfn)
174 			continue;
175 
176 		for (pmd_idx = 0;
177 		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
178 		     pmd++, pmd_idx++) {
179 			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
180 
181 			/*
182 			 * Map with big pages if possible, otherwise
183 			 * create normal page tables:
184 			 *
185 			 * Don't use a large page for the first 2/4MB of memory
186 			 * because there are often fixed size MTRRs in there
187 			 * and overlapping MTRRs into large pages can cause
188 			 * slowdowns.
189 			 */
190 			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
191 				unsigned int addr2;
192 				pgprot_t prot = PAGE_KERNEL_LARGE;
193 
194 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
195 					PAGE_OFFSET + PAGE_SIZE-1;
196 
197 				if (is_kernel_text(addr) ||
198 				    is_kernel_text(addr2))
199 					prot = PAGE_KERNEL_LARGE_EXEC;
200 
201 				set_pmd(pmd, pfn_pmd(pfn, prot));
202 
203 				pfn += PTRS_PER_PTE;
204 				max_pfn_mapped = pfn;
205 				continue;
206 			}
207 			pte = one_page_table_init(pmd);
208 
209 			for (pte_ofs = 0;
210 			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
211 			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
212 				pgprot_t prot = PAGE_KERNEL;
213 
214 				if (is_kernel_text(addr))
215 					prot = PAGE_KERNEL_EXEC;
216 
217 				set_pte(pte, pfn_pte(pfn, prot));
218 			}
219 			max_pfn_mapped = pfn;
220 		}
221 	}
222 }
223 
224 static inline int page_kills_ppro(unsigned long pagenr)
225 {
226 	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
227 		return 1;
228 	return 0;
229 }
230 
231 #ifdef CONFIG_HIGHMEM
232 pte_t *kmap_pte;
233 pgprot_t kmap_prot;
234 
235 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
236 {
237 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
238 			vaddr), vaddr), vaddr);
239 }
240 
241 static void __init kmap_init(void)
242 {
243 	unsigned long kmap_vstart;
244 
245 	/*
246 	 * Cache the first kmap pte:
247 	 */
248 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
249 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
250 
251 	kmap_prot = PAGE_KERNEL;
252 }
253 
254 static void __init permanent_kmaps_init(pgd_t *pgd_base)
255 {
256 	unsigned long vaddr;
257 	pgd_t *pgd;
258 	pud_t *pud;
259 	pmd_t *pmd;
260 	pte_t *pte;
261 
262 	vaddr = PKMAP_BASE;
263 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
264 
265 	pgd = swapper_pg_dir + pgd_index(vaddr);
266 	pud = pud_offset(pgd, vaddr);
267 	pmd = pmd_offset(pud, vaddr);
268 	pte = pte_offset_kernel(pmd, vaddr);
269 	pkmap_page_table = pte;
270 }
271 
272 static void __meminit free_new_highpage(struct page *page)
273 {
274 	init_page_count(page);
275 	__free_page(page);
276 	totalhigh_pages++;
277 }
278 
279 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
280 {
281 	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
282 		ClearPageReserved(page);
283 		free_new_highpage(page);
284 	} else
285 		SetPageReserved(page);
286 }
287 
288 static int __meminit
289 add_one_highpage_hotplug(struct page *page, unsigned long pfn)
290 {
291 	free_new_highpage(page);
292 	totalram_pages++;
293 #ifdef CONFIG_FLATMEM
294 	max_mapnr = max(pfn, max_mapnr);
295 #endif
296 	num_physpages++;
297 
298 	return 0;
299 }
300 
301 /*
302  * Not currently handling the NUMA case.
303  * Assuming single node and all memory that
304  * has been added dynamically that would be
305  * onlined here is in HIGHMEM.
306  */
307 void __meminit online_page(struct page *page)
308 {
309 	ClearPageReserved(page);
310 	add_one_highpage_hotplug(page, page_to_pfn(page));
311 }
312 
313 #ifndef CONFIG_NUMA
314 static void __init set_highmem_pages_init(int bad_ppro)
315 {
316 	int pfn;
317 
318 	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
319 		/*
320 		 * Holes under sparsemem might not have no mem_map[]:
321 		 */
322 		if (pfn_valid(pfn))
323 			add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
324 	}
325 	totalram_pages += totalhigh_pages;
326 }
327 #endif /* !CONFIG_NUMA */
328 
329 #else
330 # define kmap_init()				do { } while (0)
331 # define permanent_kmaps_init(pgd_base)		do { } while (0)
332 # define set_highmem_pages_init(bad_ppro)	do { } while (0)
333 #endif /* CONFIG_HIGHMEM */
334 
335 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
336 EXPORT_SYMBOL(__PAGE_KERNEL);
337 
338 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
339 
340 void __init native_pagetable_setup_start(pgd_t *base)
341 {
342 	unsigned long pfn, va;
343 	pgd_t *pgd;
344 	pud_t *pud;
345 	pmd_t *pmd;
346 	pte_t *pte;
347 
348 	/*
349 	 * Remove any mappings which extend past the end of physical
350 	 * memory from the boot time page table:
351 	 */
352 	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
353 		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
354 		pgd = base + pgd_index(va);
355 		if (!pgd_present(*pgd))
356 			break;
357 
358 		pud = pud_offset(pgd, va);
359 		pmd = pmd_offset(pud, va);
360 		if (!pmd_present(*pmd))
361 			break;
362 
363 		pte = pte_offset_kernel(pmd, va);
364 		if (!pte_present(*pte))
365 			break;
366 
367 		pte_clear(NULL, va, pte);
368 	}
369 	paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
370 }
371 
372 void __init native_pagetable_setup_done(pgd_t *base)
373 {
374 }
375 
376 /*
377  * Build a proper pagetable for the kernel mappings.  Up until this
378  * point, we've been running on some set of pagetables constructed by
379  * the boot process.
380  *
381  * If we're booting on native hardware, this will be a pagetable
382  * constructed in arch/x86/kernel/head_32.S.  The root of the
383  * pagetable will be swapper_pg_dir.
384  *
385  * If we're booting paravirtualized under a hypervisor, then there are
386  * more options: we may already be running PAE, and the pagetable may
387  * or may not be based in swapper_pg_dir.  In any case,
388  * paravirt_pagetable_setup_start() will set up swapper_pg_dir
389  * appropriately for the rest of the initialization to work.
390  *
391  * In general, pagetable_init() assumes that the pagetable may already
392  * be partially populated, and so it avoids stomping on any existing
393  * mappings.
394  */
395 static void __init pagetable_init(void)
396 {
397 	pgd_t *pgd_base = swapper_pg_dir;
398 	unsigned long vaddr, end;
399 
400 	paravirt_pagetable_setup_start(pgd_base);
401 
402 	/* Enable PSE if available */
403 	if (cpu_has_pse)
404 		set_in_cr4(X86_CR4_PSE);
405 
406 	/* Enable PGE if available */
407 	if (cpu_has_pge) {
408 		set_in_cr4(X86_CR4_PGE);
409 		__PAGE_KERNEL |= _PAGE_GLOBAL;
410 		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
411 	}
412 
413 	kernel_physical_mapping_init(pgd_base);
414 	remap_numa_kva();
415 
416 	/*
417 	 * Fixed mappings, only the page table structure has to be
418 	 * created - mappings will be set by set_fixmap():
419 	 */
420 	early_ioremap_clear();
421 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
422 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
423 	page_table_range_init(vaddr, end, pgd_base);
424 	early_ioremap_reset();
425 
426 	permanent_kmaps_init(pgd_base);
427 
428 	paravirt_pagetable_setup_done(pgd_base);
429 }
430 
431 #ifdef CONFIG_ACPI_SLEEP
432 /*
433  * ACPI suspend needs this for resume, because things like the intel-agp
434  * driver might have split up a kernel 4MB mapping.
435  */
436 char swsusp_pg_dir[PAGE_SIZE]
437 	__attribute__ ((aligned(PAGE_SIZE)));
438 
439 static inline void save_pg_dir(void)
440 {
441 	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
442 }
443 #else /* !CONFIG_ACPI_SLEEP */
444 static inline void save_pg_dir(void)
445 {
446 }
447 #endif /* !CONFIG_ACPI_SLEEP */
448 
449 void zap_low_mappings(void)
450 {
451 	int i;
452 
453 	save_pg_dir();
454 
455 	/*
456 	 * Zap initial low-memory mappings.
457 	 *
458 	 * Note that "pgd_clear()" doesn't do it for
459 	 * us, because pgd_clear() is a no-op on i386.
460 	 */
461 	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
462 #ifdef CONFIG_X86_PAE
463 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
464 #else
465 		set_pgd(swapper_pg_dir+i, __pgd(0));
466 #endif
467 	}
468 	flush_tlb_all();
469 }
470 
471 int nx_enabled;
472 
473 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
474 EXPORT_SYMBOL_GPL(__supported_pte_mask);
475 
476 #ifdef CONFIG_X86_PAE
477 
478 static int disable_nx __initdata;
479 
480 /*
481  * noexec = on|off
482  *
483  * Control non executable mappings.
484  *
485  * on      Enable
486  * off     Disable
487  */
488 static int __init noexec_setup(char *str)
489 {
490 	if (!str || !strcmp(str, "on")) {
491 		if (cpu_has_nx) {
492 			__supported_pte_mask |= _PAGE_NX;
493 			disable_nx = 0;
494 		}
495 	} else {
496 		if (!strcmp(str, "off")) {
497 			disable_nx = 1;
498 			__supported_pte_mask &= ~_PAGE_NX;
499 		} else {
500 			return -EINVAL;
501 		}
502 	}
503 
504 	return 0;
505 }
506 early_param("noexec", noexec_setup);
507 
508 static void __init set_nx(void)
509 {
510 	unsigned int v[4], l, h;
511 
512 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
513 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
514 
515 		if ((v[3] & (1 << 20)) && !disable_nx) {
516 			rdmsr(MSR_EFER, l, h);
517 			l |= EFER_NX;
518 			wrmsr(MSR_EFER, l, h);
519 			nx_enabled = 1;
520 			__supported_pte_mask |= _PAGE_NX;
521 		}
522 	}
523 }
524 #endif
525 
526 /*
527  * paging_init() sets up the page tables - note that the first 8MB are
528  * already mapped by head.S.
529  *
530  * This routines also unmaps the page at virtual kernel address 0, so
531  * that we can trap those pesky NULL-reference errors in the kernel.
532  */
533 void __init paging_init(void)
534 {
535 #ifdef CONFIG_X86_PAE
536 	set_nx();
537 	if (nx_enabled)
538 		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
539 #endif
540 	pagetable_init();
541 
542 	load_cr3(swapper_pg_dir);
543 
544 	__flush_tlb_all();
545 
546 	kmap_init();
547 }
548 
549 /*
550  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
551  * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
552  * used to involve black magic jumps to work around some nasty CPU bugs,
553  * but fortunately the switch to using exceptions got rid of all that.
554  */
555 static void __init test_wp_bit(void)
556 {
557 	printk(KERN_INFO
558   "Checking if this processor honours the WP bit even in supervisor mode...");
559 
560 	/* Any page-aligned address will do, the test is non-destructive */
561 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
562 	boot_cpu_data.wp_works_ok = do_test_wp_bit();
563 	clear_fixmap(FIX_WP_TEST);
564 
565 	if (!boot_cpu_data.wp_works_ok) {
566 		printk(KERN_CONT "No.\n");
567 #ifdef CONFIG_X86_WP_WORKS_OK
568 		panic(
569   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
570 #endif
571 	} else {
572 		printk(KERN_CONT "Ok.\n");
573 	}
574 }
575 
576 static struct kcore_list kcore_mem, kcore_vmalloc;
577 
578 void __init mem_init(void)
579 {
580 	int codesize, reservedpages, datasize, initsize;
581 	int tmp, bad_ppro;
582 
583 #ifdef CONFIG_FLATMEM
584 	BUG_ON(!mem_map);
585 #endif
586 	bad_ppro = ppro_with_ram_bug();
587 
588 #ifdef CONFIG_HIGHMEM
589 	/* check that fixmap and pkmap do not overlap */
590 	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
591 		printk(KERN_ERR
592 			"fixmap and kmap areas overlap - this will crash\n");
593 		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
594 				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
595 				FIXADDR_START);
596 		BUG();
597 	}
598 #endif
599 	/* this will put all low memory onto the freelists */
600 	totalram_pages += free_all_bootmem();
601 
602 	reservedpages = 0;
603 	for (tmp = 0; tmp < max_low_pfn; tmp++)
604 		/*
605 		 * Only count reserved RAM pages:
606 		 */
607 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
608 			reservedpages++;
609 
610 	set_highmem_pages_init(bad_ppro);
611 
612 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
613 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
614 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
615 
616 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
617 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
618 		   VMALLOC_END-VMALLOC_START);
619 
620 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
621 			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
622 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
623 		num_physpages << (PAGE_SHIFT-10),
624 		codesize >> 10,
625 		reservedpages << (PAGE_SHIFT-10),
626 		datasize >> 10,
627 		initsize >> 10,
628 		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
629 	       );
630 
631 #if 1 /* double-sanity-check paranoia */
632 	printk(KERN_INFO "virtual kernel memory layout:\n"
633 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
634 #ifdef CONFIG_HIGHMEM
635 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
636 #endif
637 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
638 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
639 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
640 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
641 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
642 		FIXADDR_START, FIXADDR_TOP,
643 		(FIXADDR_TOP - FIXADDR_START) >> 10,
644 
645 #ifdef CONFIG_HIGHMEM
646 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
647 		(LAST_PKMAP*PAGE_SIZE) >> 10,
648 #endif
649 
650 		VMALLOC_START, VMALLOC_END,
651 		(VMALLOC_END - VMALLOC_START) >> 20,
652 
653 		(unsigned long)__va(0), (unsigned long)high_memory,
654 		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
655 
656 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
657 		((unsigned long)&__init_end -
658 		 (unsigned long)&__init_begin) >> 10,
659 
660 		(unsigned long)&_etext, (unsigned long)&_edata,
661 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
662 
663 		(unsigned long)&_text, (unsigned long)&_etext,
664 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
665 
666 #ifdef CONFIG_HIGHMEM
667 	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
668 	BUG_ON(VMALLOC_END				> PKMAP_BASE);
669 #endif
670 	BUG_ON(VMALLOC_START				> VMALLOC_END);
671 	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
672 #endif /* double-sanity-check paranoia */
673 
674 	if (boot_cpu_data.wp_works_ok < 0)
675 		test_wp_bit();
676 
677 	cpa_init();
678 
679 	/*
680 	 * Subtle. SMP is doing it's boot stuff late (because it has to
681 	 * fork idle threads) - but it also needs low mappings for the
682 	 * protected-mode entry to work. We zap these entries only after
683 	 * the WP-bit has been tested.
684 	 */
685 #ifndef CONFIG_SMP
686 	zap_low_mappings();
687 #endif
688 }
689 
690 #ifdef CONFIG_MEMORY_HOTPLUG
691 int arch_add_memory(int nid, u64 start, u64 size)
692 {
693 	struct pglist_data *pgdata = NODE_DATA(nid);
694 	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
695 	unsigned long start_pfn = start >> PAGE_SHIFT;
696 	unsigned long nr_pages = size >> PAGE_SHIFT;
697 
698 	return __add_pages(zone, start_pfn, nr_pages);
699 }
700 #endif
701 
702 /*
703  * This function cannot be __init, since exceptions don't work in that
704  * section.  Put this after the callers, so that it cannot be inlined.
705  */
706 static noinline int do_test_wp_bit(void)
707 {
708 	char tmp_reg;
709 	int flag;
710 
711 	__asm__ __volatile__(
712 		"	movb %0, %1	\n"
713 		"1:	movb %1, %0	\n"
714 		"	xorl %2, %2	\n"
715 		"2:			\n"
716 		_ASM_EXTABLE(1b,2b)
717 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
718 		 "=q" (tmp_reg),
719 		 "=r" (flag)
720 		:"2" (1)
721 		:"memory");
722 
723 	return flag;
724 }
725 
726 #ifdef CONFIG_DEBUG_RODATA
727 const int rodata_test_data = 0xC3;
728 EXPORT_SYMBOL_GPL(rodata_test_data);
729 
730 void mark_rodata_ro(void)
731 {
732 	unsigned long start = PFN_ALIGN(_text);
733 	unsigned long size = PFN_ALIGN(_etext) - start;
734 
735 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
736 	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
737 		size >> 10);
738 
739 #ifdef CONFIG_CPA_DEBUG
740 	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
741 		start, start+size);
742 	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
743 
744 	printk(KERN_INFO "Testing CPA: write protecting again\n");
745 	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
746 #endif
747 	start += size;
748 	size = (unsigned long)__end_rodata - start;
749 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
750 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
751 		size >> 10);
752 	rodata_test();
753 
754 #ifdef CONFIG_CPA_DEBUG
755 	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
756 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
757 
758 	printk(KERN_INFO "Testing CPA: write protecting again\n");
759 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
760 #endif
761 }
762 #endif
763 
764 void free_init_pages(char *what, unsigned long begin, unsigned long end)
765 {
766 #ifdef CONFIG_DEBUG_PAGEALLOC
767 	/*
768 	 * If debugging page accesses then do not free this memory but
769 	 * mark them not present - any buggy init-section access will
770 	 * create a kernel page fault:
771 	 */
772 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
773 		begin, PAGE_ALIGN(end));
774 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
775 #else
776 	unsigned long addr;
777 
778 	/*
779 	 * We just marked the kernel text read only above, now that
780 	 * we are going to free part of that, we need to make that
781 	 * writeable first.
782 	 */
783 	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
784 
785 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
786 		ClearPageReserved(virt_to_page(addr));
787 		init_page_count(virt_to_page(addr));
788 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
789 		free_page(addr);
790 		totalram_pages++;
791 	}
792 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
793 #endif
794 }
795 
796 void free_initmem(void)
797 {
798 	free_init_pages("unused kernel memory",
799 			(unsigned long)(&__init_begin),
800 			(unsigned long)(&__init_end));
801 }
802 
803 #ifdef CONFIG_BLK_DEV_INITRD
804 void free_initrd_mem(unsigned long start, unsigned long end)
805 {
806 	free_init_pages("initrd memory", start, end);
807 }
808 #endif
809