xref: /openbmc/linux/arch/powerpc/mm/init_64.c (revision 4800cd83)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *
9  *  Derived from "arch/i386/mm/init.c"
10  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
11  *
12  *  Dave Engebretsen <engebret@us.ibm.com>
13  *      Rework for PPC64 port.
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation; either version
18  *  2 of the License, or (at your option) any later version.
19  *
20  */
21 
22 #undef DEBUG
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/memblock.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 
47 #include <asm/pgalloc.h>
48 #include <asm/page.h>
49 #include <asm/prom.h>
50 #include <asm/rtas.h>
51 #include <asm/io.h>
52 #include <asm/mmu_context.h>
53 #include <asm/pgtable.h>
54 #include <asm/mmu.h>
55 #include <asm/uaccess.h>
56 #include <asm/smp.h>
57 #include <asm/machdep.h>
58 #include <asm/tlb.h>
59 #include <asm/eeh.h>
60 #include <asm/processor.h>
61 #include <asm/mmzone.h>
62 #include <asm/cputable.h>
63 #include <asm/sections.h>
64 #include <asm/system.h>
65 #include <asm/iommu.h>
66 #include <asm/abs_addr.h>
67 #include <asm/vdso.h>
68 
69 #include "mmu_decl.h"
70 
71 #ifdef CONFIG_PPC_STD_MMU_64
72 #if PGTABLE_RANGE > USER_VSID_RANGE
73 #warning Limited user VSID range means pagetable space is wasted
74 #endif
75 
76 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
77 #warning TASK_SIZE is smaller than it needs to be.
78 #endif
79 #endif /* CONFIG_PPC_STD_MMU_64 */
80 
81 phys_addr_t memstart_addr = ~0;
82 EXPORT_SYMBOL_GPL(memstart_addr);
83 phys_addr_t kernstart_addr;
84 EXPORT_SYMBOL_GPL(kernstart_addr);
85 
86 void free_initmem(void)
87 {
88 	unsigned long addr;
89 
90 	addr = (unsigned long)__init_begin;
91 	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
92 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
93 		ClearPageReserved(virt_to_page(addr));
94 		init_page_count(virt_to_page(addr));
95 		free_page(addr);
96 		totalram_pages++;
97 	}
98 	printk ("Freeing unused kernel memory: %luk freed\n",
99 		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
100 }
101 
102 #ifdef CONFIG_BLK_DEV_INITRD
103 void free_initrd_mem(unsigned long start, unsigned long end)
104 {
105 	if (start < end)
106 		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
107 	for (; start < end; start += PAGE_SIZE) {
108 		ClearPageReserved(virt_to_page(start));
109 		init_page_count(virt_to_page(start));
110 		free_page(start);
111 		totalram_pages++;
112 	}
113 }
114 #endif
115 
116 static void pgd_ctor(void *addr)
117 {
118 	memset(addr, 0, PGD_TABLE_SIZE);
119 }
120 
121 static void pmd_ctor(void *addr)
122 {
123 	memset(addr, 0, PMD_TABLE_SIZE);
124 }
125 
126 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
127 
128 /*
129  * Create a kmem_cache() for pagetables.  This is not used for PTE
130  * pages - they're linked to struct page, come from the normal free
131  * pages pool and have a different entry size (see real_pte_t) to
132  * everything else.  Caches created by this function are used for all
133  * the higher level pagetables, and for hugepage pagetables.
134  */
135 void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
136 {
137 	char *name;
138 	unsigned long table_size = sizeof(void *) << shift;
139 	unsigned long align = table_size;
140 
141 	/* When batching pgtable pointers for RCU freeing, we store
142 	 * the index size in the low bits.  Table alignment must be
143 	 * big enough to fit it.
144 	 *
145 	 * Likewise, hugeapge pagetable pointers contain a (different)
146 	 * shift value in the low bits.  All tables must be aligned so
147 	 * as to leave enough 0 bits in the address to contain it. */
148 	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
149 				     HUGEPD_SHIFT_MASK + 1);
150 	struct kmem_cache *new;
151 
152 	/* It would be nice if this was a BUILD_BUG_ON(), but at the
153 	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
154 	 * constant expression, so so much for that. */
155 	BUG_ON(!is_power_of_2(minalign));
156 	BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
157 
158 	if (PGT_CACHE(shift))
159 		return; /* Already have a cache of this size */
160 
161 	align = max_t(unsigned long, align, minalign);
162 	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
163 	new = kmem_cache_create(name, table_size, align, 0, ctor);
164 	PGT_CACHE(shift) = new;
165 
166 	pr_debug("Allocated pgtable cache for order %d\n", shift);
167 }
168 
169 
170 void pgtable_cache_init(void)
171 {
172 	pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
173 	pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
174 	if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
175 		panic("Couldn't allocate pgtable caches");
176 
177 	/* In all current configs, when the PUD index exists it's the
178 	 * same size as either the pgd or pmd index.  Verify that the
179 	 * initialization above has also created a PUD cache.  This
180 	 * will need re-examiniation if we add new possibilities for
181 	 * the pagetable layout. */
182 	BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
183 }
184 
185 #ifdef CONFIG_SPARSEMEM_VMEMMAP
186 /*
187  * Given an address within the vmemmap, determine the pfn of the page that
188  * represents the start of the section it is within.  Note that we have to
189  * do this by hand as the proffered address may not be correctly aligned.
190  * Subtraction of non-aligned pointers produces undefined results.
191  */
192 static unsigned long __meminit vmemmap_section_start(unsigned long page)
193 {
194 	unsigned long offset = page - ((unsigned long)(vmemmap));
195 
196 	/* Return the pfn of the start of the section. */
197 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
198 }
199 
200 /*
201  * Check if this vmemmap page is already initialised.  If any section
202  * which overlaps this vmemmap page is initialised then this page is
203  * initialised already.
204  */
205 static int __meminit vmemmap_populated(unsigned long start, int page_size)
206 {
207 	unsigned long end = start + page_size;
208 
209 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
210 		if (pfn_valid(vmemmap_section_start(start)))
211 			return 1;
212 
213 	return 0;
214 }
215 
216 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
217  *
218  * On Book3E CPUs, the vmemmap is currently mapped in the top half of
219  * the vmalloc space using normal page tables, though the size of
220  * pages encoded in the PTEs can be different
221  */
222 
223 #ifdef CONFIG_PPC_BOOK3E
224 static void __meminit vmemmap_create_mapping(unsigned long start,
225 					     unsigned long page_size,
226 					     unsigned long phys)
227 {
228 	/* Create a PTE encoding without page size */
229 	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
230 		_PAGE_KERNEL_RW;
231 
232 	/* PTEs only contain page size encodings up to 32M */
233 	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
234 
235 	/* Encode the size in the PTE */
236 	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
237 
238 	/* For each PTE for that area, map things. Note that we don't
239 	 * increment phys because all PTEs are of the large size and
240 	 * thus must have the low bits clear
241 	 */
242 	for (i = 0; i < page_size; i += PAGE_SIZE)
243 		BUG_ON(map_kernel_page(start + i, phys, flags));
244 }
245 #else /* CONFIG_PPC_BOOK3E */
246 static void __meminit vmemmap_create_mapping(unsigned long start,
247 					     unsigned long page_size,
248 					     unsigned long phys)
249 {
250 	int  mapped = htab_bolt_mapping(start, start + page_size, phys,
251 					PAGE_KERNEL, mmu_vmemmap_psize,
252 					mmu_kernel_ssize);
253 	BUG_ON(mapped < 0);
254 }
255 #endif /* CONFIG_PPC_BOOK3E */
256 
257 struct vmemmap_backing *vmemmap_list;
258 
259 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
260 {
261 	static struct vmemmap_backing *next;
262 	static int num_left;
263 
264 	/* allocate a page when required and hand out chunks */
265 	if (!next || !num_left) {
266 		next = vmemmap_alloc_block(PAGE_SIZE, node);
267 		if (unlikely(!next)) {
268 			WARN_ON(1);
269 			return NULL;
270 		}
271 		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
272 	}
273 
274 	num_left--;
275 
276 	return next++;
277 }
278 
279 static __meminit void vmemmap_list_populate(unsigned long phys,
280 					    unsigned long start,
281 					    int node)
282 {
283 	struct vmemmap_backing *vmem_back;
284 
285 	vmem_back = vmemmap_list_alloc(node);
286 	if (unlikely(!vmem_back)) {
287 		WARN_ON(1);
288 		return;
289 	}
290 
291 	vmem_back->phys = phys;
292 	vmem_back->virt_addr = start;
293 	vmem_back->list = vmemmap_list;
294 
295 	vmemmap_list = vmem_back;
296 }
297 
298 int __meminit vmemmap_populate(struct page *start_page,
299 			       unsigned long nr_pages, int node)
300 {
301 	unsigned long start = (unsigned long)start_page;
302 	unsigned long end = (unsigned long)(start_page + nr_pages);
303 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
304 
305 	/* Align to the page size of the linear mapping. */
306 	start = _ALIGN_DOWN(start, page_size);
307 
308 	pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
309 		 start_page, nr_pages, node);
310 	pr_debug(" -> map %lx..%lx\n", start, end);
311 
312 	for (; start < end; start += page_size) {
313 		void *p;
314 
315 		if (vmemmap_populated(start, page_size))
316 			continue;
317 
318 		p = vmemmap_alloc_block(page_size, node);
319 		if (!p)
320 			return -ENOMEM;
321 
322 		vmemmap_list_populate(__pa(p), start, node);
323 
324 		pr_debug("      * %016lx..%016lx allocated at %p\n",
325 			 start, start + page_size, p);
326 
327 		vmemmap_create_mapping(start, page_size, __pa(p));
328 	}
329 
330 	return 0;
331 }
332 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
333 
334