xref: /openbmc/linux/arch/powerpc/mm/init_64.c (revision 732a675a)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *
9  *  Derived from "arch/i386/mm/init.c"
10  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
11  *
12  *  Dave Engebretsen <engebret@us.ibm.com>
13  *      Rework for PPC64 port.
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation; either version
18  *  2 of the License, or (at your option) any later version.
19  *
20  */
21 
22 #undef DEBUG
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/lmb.h>
44 
45 #include <asm/pgalloc.h>
46 #include <asm/page.h>
47 #include <asm/prom.h>
48 #include <asm/rtas.h>
49 #include <asm/io.h>
50 #include <asm/mmu_context.h>
51 #include <asm/pgtable.h>
52 #include <asm/mmu.h>
53 #include <asm/uaccess.h>
54 #include <asm/smp.h>
55 #include <asm/machdep.h>
56 #include <asm/tlb.h>
57 #include <asm/eeh.h>
58 #include <asm/processor.h>
59 #include <asm/mmzone.h>
60 #include <asm/cputable.h>
61 #include <asm/sections.h>
62 #include <asm/system.h>
63 #include <asm/iommu.h>
64 #include <asm/abs_addr.h>
65 #include <asm/vdso.h>
66 
67 #include "mmu_decl.h"
68 
69 #if PGTABLE_RANGE > USER_VSID_RANGE
70 #warning Limited user VSID range means pagetable space is wasted
71 #endif
72 
73 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74 #warning TASK_SIZE is smaller than it needs to be.
75 #endif
76 
77 phys_addr_t memstart_addr = ~0;
78 phys_addr_t kernstart_addr;
79 
80 void free_initmem(void)
81 {
82 	unsigned long addr;
83 
84 	addr = (unsigned long)__init_begin;
85 	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
86 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
87 		ClearPageReserved(virt_to_page(addr));
88 		init_page_count(virt_to_page(addr));
89 		free_page(addr);
90 		totalram_pages++;
91 	}
92 	printk ("Freeing unused kernel memory: %luk freed\n",
93 		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
94 }
95 
96 #ifdef CONFIG_BLK_DEV_INITRD
97 void free_initrd_mem(unsigned long start, unsigned long end)
98 {
99 	if (start < end)
100 		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
101 	for (; start < end; start += PAGE_SIZE) {
102 		ClearPageReserved(virt_to_page(start));
103 		init_page_count(virt_to_page(start));
104 		free_page(start);
105 		totalram_pages++;
106 	}
107 }
108 #endif
109 
110 #ifdef CONFIG_PROC_KCORE
111 static struct kcore_list kcore_vmem;
112 
113 static int __init setup_kcore(void)
114 {
115 	int i;
116 
117 	for (i=0; i < lmb.memory.cnt; i++) {
118 		unsigned long base, size;
119 		struct kcore_list *kcore_mem;
120 
121 		base = lmb.memory.region[i].base;
122 		size = lmb.memory.region[i].size;
123 
124 		/* GFP_ATOMIC to avoid might_sleep warnings during boot */
125 		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
126 		if (!kcore_mem)
127 			panic("%s: kmalloc failed\n", __func__);
128 
129 		kclist_add(kcore_mem, __va(base), size);
130 	}
131 
132 	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
133 
134 	return 0;
135 }
136 module_init(setup_kcore);
137 #endif
138 
139 static void zero_ctor(struct kmem_cache *cache, void *addr)
140 {
141 	memset(addr, 0, kmem_cache_size(cache));
142 }
143 
144 static const unsigned int pgtable_cache_size[2] = {
145 	PGD_TABLE_SIZE, PMD_TABLE_SIZE
146 };
147 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
148 #ifdef CONFIG_PPC_64K_PAGES
149 	"pgd_cache", "pmd_cache",
150 #else
151 	"pgd_cache", "pud_pmd_cache",
152 #endif /* CONFIG_PPC_64K_PAGES */
153 };
154 
155 #ifdef CONFIG_HUGETLB_PAGE
156 /* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
157  * can't put into the tables above, because HPAGE_SHIFT is not compile
158  * time constant. */
159 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
160 #else
161 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
162 #endif
163 
164 void pgtable_cache_init(void)
165 {
166 	int i;
167 
168 	for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
169 		int size = pgtable_cache_size[i];
170 		const char *name = pgtable_cache_name[i];
171 
172 		pr_debug("Allocating page table cache %s (#%d) "
173 			"for size: %08x...\n", name, i, size);
174 		pgtable_cache[i] = kmem_cache_create(name,
175 						     size, size,
176 						     SLAB_PANIC,
177 						     zero_ctor);
178 	}
179 }
180 
181 #ifdef CONFIG_SPARSEMEM_VMEMMAP
182 /*
183  * Given an address within the vmemmap, determine the pfn of the page that
184  * represents the start of the section it is within.  Note that we have to
185  * do this by hand as the proffered address may not be correctly aligned.
186  * Subtraction of non-aligned pointers produces undefined results.
187  */
188 unsigned long __meminit vmemmap_section_start(unsigned long page)
189 {
190 	unsigned long offset = page - ((unsigned long)(vmemmap));
191 
192 	/* Return the pfn of the start of the section. */
193 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
194 }
195 
196 /*
197  * Check if this vmemmap page is already initialised.  If any section
198  * which overlaps this vmemmap page is initialised then this page is
199  * initialised already.
200  */
201 int __meminit vmemmap_populated(unsigned long start, int page_size)
202 {
203 	unsigned long end = start + page_size;
204 
205 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
206 		if (pfn_valid(vmemmap_section_start(start)))
207 			return 1;
208 
209 	return 0;
210 }
211 
212 int __meminit vmemmap_populate(struct page *start_page,
213 			       unsigned long nr_pages, int node)
214 {
215 	unsigned long mode_rw;
216 	unsigned long start = (unsigned long)start_page;
217 	unsigned long end = (unsigned long)(start_page + nr_pages);
218 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
219 
220 	mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
221 
222 	/* Align to the page size of the linear mapping. */
223 	start = _ALIGN_DOWN(start, page_size);
224 
225 	for (; start < end; start += page_size) {
226 		int mapped;
227 		void *p;
228 
229 		if (vmemmap_populated(start, page_size))
230 			continue;
231 
232 		p = vmemmap_alloc_block(page_size, node);
233 		if (!p)
234 			return -ENOMEM;
235 
236 		pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
237 			start, p, __pa(p));
238 
239 		mapped = htab_bolt_mapping(start, start + page_size,
240 					__pa(p), mode_rw, mmu_vmemmap_psize,
241 					mmu_kernel_ssize);
242 		BUG_ON(mapped < 0);
243 	}
244 
245 	return 0;
246 }
247 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
248