xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision 05bcf503)
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
27 #include <linux/fs.h>
28 
29 #include <asm/cputype.h>
30 #include <asm/sections.h>
31 #include <asm/setup.h>
32 #include <asm/sizes.h>
33 #include <asm/tlb.h>
34 #include <asm/mmu_context.h>
35 
36 #include "mm.h"
37 
38 /*
39  * Empty_zero_page is a special page that is used for zero-initialized data
40  * and COW.
41  */
42 struct page *empty_zero_page;
43 EXPORT_SYMBOL(empty_zero_page);
44 
45 pgprot_t pgprot_default;
46 EXPORT_SYMBOL(pgprot_default);
47 
48 static pmdval_t prot_sect_kernel;
49 
50 struct cachepolicy {
51 	const char	policy[16];
52 	u64		mair;
53 	u64		tcr;
54 };
55 
56 static struct cachepolicy cache_policies[] __initdata = {
57 	{
58 		.policy		= "uncached",
59 		.mair		= 0x44,			/* inner, outer non-cacheable */
60 		.tcr		= TCR_IRGN_NC | TCR_ORGN_NC,
61 	}, {
62 		.policy		= "writethrough",
63 		.mair		= 0xaa,			/* inner, outer write-through, read-allocate */
64 		.tcr		= TCR_IRGN_WT | TCR_ORGN_WT,
65 	}, {
66 		.policy		= "writeback",
67 		.mair		= 0xee,			/* inner, outer write-back, read-allocate */
68 		.tcr		= TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
69 	}
70 };
71 
72 /*
73  * These are useful for identifying cache coherency problems by allowing the
74  * cache or the cache and writebuffer to be turned off. It changes the Normal
75  * memory caching attributes in the MAIR_EL1 register.
76  */
77 static int __init early_cachepolicy(char *p)
78 {
79 	int i;
80 	u64 tmp;
81 
82 	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
83 		int len = strlen(cache_policies[i].policy);
84 
85 		if (memcmp(p, cache_policies[i].policy, len) == 0)
86 			break;
87 	}
88 	if (i == ARRAY_SIZE(cache_policies)) {
89 		pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
90 		return 0;
91 	}
92 
93 	flush_cache_all();
94 
95 	/*
96 	 * Modify MT_NORMAL attributes in MAIR_EL1.
97 	 */
98 	asm volatile(
99 	"	mrs	%0, mair_el1\n"
100 	"	bfi	%0, %1, #%2, #8\n"
101 	"	msr	mair_el1, %0\n"
102 	"	isb\n"
103 	: "=&r" (tmp)
104 	: "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
105 
106 	/*
107 	 * Modify TCR PTW cacheability attributes.
108 	 */
109 	asm volatile(
110 	"	mrs	%0, tcr_el1\n"
111 	"	bic	%0, %0, %2\n"
112 	"	orr	%0, %0, %1\n"
113 	"	msr	tcr_el1, %0\n"
114 	"	isb\n"
115 	: "=&r" (tmp)
116 	: "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
117 
118 	flush_cache_all();
119 
120 	return 0;
121 }
122 early_param("cachepolicy", early_cachepolicy);
123 
124 /*
125  * Adjust the PMD section entries according to the CPU in use.
126  */
127 static void __init init_mem_pgprot(void)
128 {
129 	pteval_t default_pgprot;
130 	int i;
131 
132 	default_pgprot = PTE_ATTRINDX(MT_NORMAL);
133 	prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
134 
135 #ifdef CONFIG_SMP
136 	/*
137 	 * Mark memory with the "shared" attribute for SMP systems
138 	 */
139 	default_pgprot |= PTE_SHARED;
140 	prot_sect_kernel |= PMD_SECT_S;
141 #endif
142 
143 	for (i = 0; i < 16; i++) {
144 		unsigned long v = pgprot_val(protection_map[i]);
145 		protection_map[i] = __pgprot(v | default_pgprot);
146 	}
147 
148 	pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
149 }
150 
151 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
152 			      unsigned long size, pgprot_t vma_prot)
153 {
154 	if (!pfn_valid(pfn))
155 		return pgprot_noncached(vma_prot);
156 	else if (file->f_flags & O_SYNC)
157 		return pgprot_writecombine(vma_prot);
158 	return vma_prot;
159 }
160 EXPORT_SYMBOL(phys_mem_access_prot);
161 
162 static void __init *early_alloc(unsigned long sz)
163 {
164 	void *ptr = __va(memblock_alloc(sz, sz));
165 	memset(ptr, 0, sz);
166 	return ptr;
167 }
168 
169 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
170 				  unsigned long end, unsigned long pfn)
171 {
172 	pte_t *pte;
173 
174 	if (pmd_none(*pmd)) {
175 		pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
176 		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
177 	}
178 	BUG_ON(pmd_bad(*pmd));
179 
180 	pte = pte_offset_kernel(pmd, addr);
181 	do {
182 		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
183 		pfn++;
184 	} while (pte++, addr += PAGE_SIZE, addr != end);
185 }
186 
187 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
188 				  unsigned long end, phys_addr_t phys)
189 {
190 	pmd_t *pmd;
191 	unsigned long next;
192 
193 	/*
194 	 * Check for initial section mappings in the pgd/pud and remove them.
195 	 */
196 	if (pud_none(*pud) || pud_bad(*pud)) {
197 		pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
198 		pud_populate(&init_mm, pud, pmd);
199 	}
200 
201 	pmd = pmd_offset(pud, addr);
202 	do {
203 		next = pmd_addr_end(addr, end);
204 		/* try section mapping first */
205 		if (((addr | next | phys) & ~SECTION_MASK) == 0)
206 			set_pmd(pmd, __pmd(phys | prot_sect_kernel));
207 		else
208 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
209 		phys += next - addr;
210 	} while (pmd++, addr = next, addr != end);
211 }
212 
213 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
214 				  unsigned long end, unsigned long phys)
215 {
216 	pud_t *pud = pud_offset(pgd, addr);
217 	unsigned long next;
218 
219 	do {
220 		next = pud_addr_end(addr, end);
221 		alloc_init_pmd(pud, addr, next, phys);
222 		phys += next - addr;
223 	} while (pud++, addr = next, addr != end);
224 }
225 
226 /*
227  * Create the page directory entries and any necessary page tables for the
228  * mapping specified by 'md'.
229  */
230 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
231 				  phys_addr_t size)
232 {
233 	unsigned long addr, length, end, next;
234 	pgd_t *pgd;
235 
236 	if (virt < VMALLOC_START) {
237 		pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
238 			   phys, virt);
239 		return;
240 	}
241 
242 	addr = virt & PAGE_MASK;
243 	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
244 
245 	pgd = pgd_offset_k(addr);
246 	end = addr + length;
247 	do {
248 		next = pgd_addr_end(addr, end);
249 		alloc_init_pud(pgd, addr, next, phys);
250 		phys += next - addr;
251 	} while (pgd++, addr = next, addr != end);
252 }
253 
254 static void __init map_mem(void)
255 {
256 	struct memblock_region *reg;
257 
258 	/* map all the memory banks */
259 	for_each_memblock(memory, reg) {
260 		phys_addr_t start = reg->base;
261 		phys_addr_t end = start + reg->size;
262 
263 		if (start >= end)
264 			break;
265 
266 		create_mapping(start, __phys_to_virt(start), end - start);
267 	}
268 }
269 
270 /*
271  * paging_init() sets up the page tables, initialises the zone memory
272  * maps and sets up the zero page.
273  */
274 void __init paging_init(void)
275 {
276 	void *zero_page;
277 
278 	/*
279 	 * Maximum PGDIR_SIZE addressable via the initial direct kernel
280 	 * mapping in swapper_pg_dir.
281 	 */
282 	memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
283 
284 	init_mem_pgprot();
285 	map_mem();
286 
287 	/*
288 	 * Finally flush the caches and tlb to ensure that we're in a
289 	 * consistent state.
290 	 */
291 	flush_cache_all();
292 	flush_tlb_all();
293 
294 	/* allocate the zero page. */
295 	zero_page = early_alloc(PAGE_SIZE);
296 
297 	bootmem_init();
298 
299 	empty_zero_page = virt_to_page(zero_page);
300 	__flush_dcache_page(empty_zero_page);
301 
302 	/*
303 	 * TTBR0 is only used for the identity mapping at this stage. Make it
304 	 * point to zero page to avoid speculatively fetching new entries.
305 	 */
306 	cpu_set_reserved_ttbr0();
307 	flush_tlb_all();
308 }
309 
310 /*
311  * Enable the identity mapping to allow the MMU disabling.
312  */
313 void setup_mm_for_reboot(void)
314 {
315 	cpu_switch_mm(idmap_pg_dir, &init_mm);
316 	flush_tlb_all();
317 }
318 
319 /*
320  * Check whether a kernel address is valid (derived from arch/x86/).
321  */
322 int kern_addr_valid(unsigned long addr)
323 {
324 	pgd_t *pgd;
325 	pud_t *pud;
326 	pmd_t *pmd;
327 	pte_t *pte;
328 
329 	if ((((long)addr) >> VA_BITS) != -1UL)
330 		return 0;
331 
332 	pgd = pgd_offset_k(addr);
333 	if (pgd_none(*pgd))
334 		return 0;
335 
336 	pud = pud_offset(pgd, addr);
337 	if (pud_none(*pud))
338 		return 0;
339 
340 	pmd = pmd_offset(pud, addr);
341 	if (pmd_none(*pmd))
342 		return 0;
343 
344 	pte = pte_offset_kernel(pmd, addr);
345 	if (pte_none(*pte))
346 		return 0;
347 
348 	return pfn_valid(pte_pfn(*pte));
349 }
350 #ifdef CONFIG_SPARSEMEM_VMEMMAP
351 #ifdef CONFIG_ARM64_64K_PAGES
352 int __meminit vmemmap_populate(struct page *start_page,
353 			       unsigned long size, int node)
354 {
355 	return vmemmap_populate_basepages(start_page, size, node);
356 }
357 #else	/* !CONFIG_ARM64_64K_PAGES */
358 int __meminit vmemmap_populate(struct page *start_page,
359 			       unsigned long size, int node)
360 {
361 	unsigned long addr = (unsigned long)start_page;
362 	unsigned long end = (unsigned long)(start_page + size);
363 	unsigned long next;
364 	pgd_t *pgd;
365 	pud_t *pud;
366 	pmd_t *pmd;
367 
368 	do {
369 		next = pmd_addr_end(addr, end);
370 
371 		pgd = vmemmap_pgd_populate(addr, node);
372 		if (!pgd)
373 			return -ENOMEM;
374 
375 		pud = vmemmap_pud_populate(pgd, addr, node);
376 		if (!pud)
377 			return -ENOMEM;
378 
379 		pmd = pmd_offset(pud, addr);
380 		if (pmd_none(*pmd)) {
381 			void *p = NULL;
382 
383 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
384 			if (!p)
385 				return -ENOMEM;
386 
387 			set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
388 		} else
389 			vmemmap_verify((pte_t *)pmd, node, addr, next);
390 	} while (addr = next, addr != end);
391 
392 	return 0;
393 }
394 #endif	/* CONFIG_ARM64_64K_PAGES */
395 #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
396