xref: /openbmc/linux/arch/powerpc/mm/init_64.c (revision bc000245)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *
9  *  Derived from "arch/i386/mm/init.c"
10  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
11  *
12  *  Dave Engebretsen <engebret@us.ibm.com>
13  *      Rework for PPC64 port.
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation; either version
18  *  2 of the License, or (at your option) any later version.
19  *
20  */
21 
22 #undef DEBUG
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/memblock.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 
47 #include <asm/pgalloc.h>
48 #include <asm/page.h>
49 #include <asm/prom.h>
50 #include <asm/rtas.h>
51 #include <asm/io.h>
52 #include <asm/mmu_context.h>
53 #include <asm/pgtable.h>
54 #include <asm/mmu.h>
55 #include <asm/uaccess.h>
56 #include <asm/smp.h>
57 #include <asm/machdep.h>
58 #include <asm/tlb.h>
59 #include <asm/eeh.h>
60 #include <asm/processor.h>
61 #include <asm/mmzone.h>
62 #include <asm/cputable.h>
63 #include <asm/sections.h>
64 #include <asm/iommu.h>
65 #include <asm/vdso.h>
66 
67 #include "mmu_decl.h"
68 
69 #ifdef CONFIG_PPC_STD_MMU_64
70 #if PGTABLE_RANGE > USER_VSID_RANGE
71 #warning Limited user VSID range means pagetable space is wasted
72 #endif
73 
74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75 #warning TASK_SIZE is smaller than it needs to be.
76 #endif
77 #endif /* CONFIG_PPC_STD_MMU_64 */
78 
79 phys_addr_t memstart_addr = ~0;
80 EXPORT_SYMBOL_GPL(memstart_addr);
81 phys_addr_t kernstart_addr;
82 EXPORT_SYMBOL_GPL(kernstart_addr);
83 
84 static void pgd_ctor(void *addr)
85 {
86 	memset(addr, 0, PGD_TABLE_SIZE);
87 }
88 
89 static void pmd_ctor(void *addr)
90 {
91 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
92 	memset(addr, 0, PMD_TABLE_SIZE * 2);
93 #else
94 	memset(addr, 0, PMD_TABLE_SIZE);
95 #endif
96 }
97 
98 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
99 
100 /*
101  * Create a kmem_cache() for pagetables.  This is not used for PTE
102  * pages - they're linked to struct page, come from the normal free
103  * pages pool and have a different entry size (see real_pte_t) to
104  * everything else.  Caches created by this function are used for all
105  * the higher level pagetables, and for hugepage pagetables.
106  */
107 void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
108 {
109 	char *name;
110 	unsigned long table_size = sizeof(void *) << shift;
111 	unsigned long align = table_size;
112 
113 	/* When batching pgtable pointers for RCU freeing, we store
114 	 * the index size in the low bits.  Table alignment must be
115 	 * big enough to fit it.
116 	 *
117 	 * Likewise, hugeapge pagetable pointers contain a (different)
118 	 * shift value in the low bits.  All tables must be aligned so
119 	 * as to leave enough 0 bits in the address to contain it. */
120 	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
121 				     HUGEPD_SHIFT_MASK + 1);
122 	struct kmem_cache *new;
123 
124 	/* It would be nice if this was a BUILD_BUG_ON(), but at the
125 	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
126 	 * constant expression, so so much for that. */
127 	BUG_ON(!is_power_of_2(minalign));
128 	BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
129 
130 	if (PGT_CACHE(shift))
131 		return; /* Already have a cache of this size */
132 
133 	align = max_t(unsigned long, align, minalign);
134 	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
135 	new = kmem_cache_create(name, table_size, align, 0, ctor);
136 	pgtable_cache[shift - 1] = new;
137 	pr_debug("Allocated pgtable cache for order %d\n", shift);
138 }
139 
140 
141 void pgtable_cache_init(void)
142 {
143 	pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
144 	pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
145 	if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
146 		panic("Couldn't allocate pgtable caches");
147 	/* In all current configs, when the PUD index exists it's the
148 	 * same size as either the pgd or pmd index.  Verify that the
149 	 * initialization above has also created a PUD cache.  This
150 	 * will need re-examiniation if we add new possibilities for
151 	 * the pagetable layout. */
152 	BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
153 }
154 
155 #ifdef CONFIG_SPARSEMEM_VMEMMAP
156 /*
157  * Given an address within the vmemmap, determine the pfn of the page that
158  * represents the start of the section it is within.  Note that we have to
159  * do this by hand as the proffered address may not be correctly aligned.
160  * Subtraction of non-aligned pointers produces undefined results.
161  */
162 static unsigned long __meminit vmemmap_section_start(unsigned long page)
163 {
164 	unsigned long offset = page - ((unsigned long)(vmemmap));
165 
166 	/* Return the pfn of the start of the section. */
167 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
168 }
169 
170 /*
171  * Check if this vmemmap page is already initialised.  If any section
172  * which overlaps this vmemmap page is initialised then this page is
173  * initialised already.
174  */
175 static int __meminit vmemmap_populated(unsigned long start, int page_size)
176 {
177 	unsigned long end = start + page_size;
178 
179 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
180 		if (pfn_valid(vmemmap_section_start(start)))
181 			return 1;
182 
183 	return 0;
184 }
185 
186 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
187  *
188  * On Book3E CPUs, the vmemmap is currently mapped in the top half of
189  * the vmalloc space using normal page tables, though the size of
190  * pages encoded in the PTEs can be different
191  */
192 
193 #ifdef CONFIG_PPC_BOOK3E
194 static void __meminit vmemmap_create_mapping(unsigned long start,
195 					     unsigned long page_size,
196 					     unsigned long phys)
197 {
198 	/* Create a PTE encoding without page size */
199 	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
200 		_PAGE_KERNEL_RW;
201 
202 	/* PTEs only contain page size encodings up to 32M */
203 	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
204 
205 	/* Encode the size in the PTE */
206 	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
207 
208 	/* For each PTE for that area, map things. Note that we don't
209 	 * increment phys because all PTEs are of the large size and
210 	 * thus must have the low bits clear
211 	 */
212 	for (i = 0; i < page_size; i += PAGE_SIZE)
213 		BUG_ON(map_kernel_page(start + i, phys, flags));
214 }
215 #else /* CONFIG_PPC_BOOK3E */
216 static void __meminit vmemmap_create_mapping(unsigned long start,
217 					     unsigned long page_size,
218 					     unsigned long phys)
219 {
220 	int  mapped = htab_bolt_mapping(start, start + page_size, phys,
221 					pgprot_val(PAGE_KERNEL),
222 					mmu_vmemmap_psize,
223 					mmu_kernel_ssize);
224 	BUG_ON(mapped < 0);
225 }
226 #endif /* CONFIG_PPC_BOOK3E */
227 
228 struct vmemmap_backing *vmemmap_list;
229 
230 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
231 {
232 	static struct vmemmap_backing *next;
233 	static int num_left;
234 
235 	/* allocate a page when required and hand out chunks */
236 	if (!next || !num_left) {
237 		next = vmemmap_alloc_block(PAGE_SIZE, node);
238 		if (unlikely(!next)) {
239 			WARN_ON(1);
240 			return NULL;
241 		}
242 		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
243 	}
244 
245 	num_left--;
246 
247 	return next++;
248 }
249 
250 static __meminit void vmemmap_list_populate(unsigned long phys,
251 					    unsigned long start,
252 					    int node)
253 {
254 	struct vmemmap_backing *vmem_back;
255 
256 	vmem_back = vmemmap_list_alloc(node);
257 	if (unlikely(!vmem_back)) {
258 		WARN_ON(1);
259 		return;
260 	}
261 
262 	vmem_back->phys = phys;
263 	vmem_back->virt_addr = start;
264 	vmem_back->list = vmemmap_list;
265 
266 	vmemmap_list = vmem_back;
267 }
268 
269 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
270 {
271 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
272 
273 	/* Align to the page size of the linear mapping. */
274 	start = _ALIGN_DOWN(start, page_size);
275 
276 	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
277 
278 	for (; start < end; start += page_size) {
279 		void *p;
280 
281 		if (vmemmap_populated(start, page_size))
282 			continue;
283 
284 		p = vmemmap_alloc_block(page_size, node);
285 		if (!p)
286 			return -ENOMEM;
287 
288 		vmemmap_list_populate(__pa(p), start, node);
289 
290 		pr_debug("      * %016lx..%016lx allocated at %p\n",
291 			 start, start + page_size, p);
292 
293 		vmemmap_create_mapping(start, page_size, __pa(p));
294 	}
295 
296 	return 0;
297 }
298 
299 void vmemmap_free(unsigned long start, unsigned long end)
300 {
301 }
302 
303 void register_page_bootmem_memmap(unsigned long section_nr,
304 				  struct page *start_page, unsigned long size)
305 {
306 }
307 
308 /*
309  * We do not have access to the sparsemem vmemmap, so we fallback to
310  * walking the list of sparsemem blocks which we already maintain for
311  * the sake of crashdump. In the long run, we might want to maintain
312  * a tree if performance of that linear walk becomes a problem.
313  *
314  * realmode_pfn_to_page functions can fail due to:
315  * 1) As real sparsemem blocks do not lay in RAM continously (they
316  * are in virtual address space which is not available in the real mode),
317  * the requested page struct can be split between blocks so get_page/put_page
318  * may fail.
319  * 2) When huge pages are used, the get_page/put_page API will fail
320  * in real mode as the linked addresses in the page struct are virtual
321  * too.
322  */
323 struct page *realmode_pfn_to_page(unsigned long pfn)
324 {
325 	struct vmemmap_backing *vmem_back;
326 	struct page *page;
327 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
328 	unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
329 
330 	for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
331 		if (pg_va < vmem_back->virt_addr)
332 			continue;
333 
334 		/* Check that page struct is not split between real pages */
335 		if ((pg_va + sizeof(struct page)) >
336 				(vmem_back->virt_addr + page_size))
337 			return NULL;
338 
339 		page = (struct page *) (vmem_back->phys + pg_va -
340 				vmem_back->virt_addr);
341 		return page;
342 	}
343 
344 	return NULL;
345 }
346 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
347 
348 #elif defined(CONFIG_FLATMEM)
349 
350 struct page *realmode_pfn_to_page(unsigned long pfn)
351 {
352 	struct page *page = pfn_to_page(pfn);
353 	return page;
354 }
355 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
356 
357 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
358