xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision e1f7c9ee)
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29 
30 #include <asm/cputype.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/memblock.h>
36 #include <asm/mmu_context.h>
37 
38 #include "mm.h"
39 
40 /*
41  * Empty_zero_page is a special page that is used for zero-initialized data
42  * and COW.
43  */
44 struct page *empty_zero_page;
45 EXPORT_SYMBOL(empty_zero_page);
46 
47 struct cachepolicy {
48 	const char	policy[16];
49 	u64		mair;
50 	u64		tcr;
51 };
52 
53 static struct cachepolicy cache_policies[] __initdata = {
54 	{
55 		.policy		= "uncached",
56 		.mair		= 0x44,			/* inner, outer non-cacheable */
57 		.tcr		= TCR_IRGN_NC | TCR_ORGN_NC,
58 	}, {
59 		.policy		= "writethrough",
60 		.mair		= 0xaa,			/* inner, outer write-through, read-allocate */
61 		.tcr		= TCR_IRGN_WT | TCR_ORGN_WT,
62 	}, {
63 		.policy		= "writeback",
64 		.mair		= 0xee,			/* inner, outer write-back, read-allocate */
65 		.tcr		= TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
66 	}
67 };
68 
69 /*
70  * These are useful for identifying cache coherency problems by allowing the
71  * cache or the cache and writebuffer to be turned off. It changes the Normal
72  * memory caching attributes in the MAIR_EL1 register.
73  */
74 static int __init early_cachepolicy(char *p)
75 {
76 	int i;
77 	u64 tmp;
78 
79 	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
80 		int len = strlen(cache_policies[i].policy);
81 
82 		if (memcmp(p, cache_policies[i].policy, len) == 0)
83 			break;
84 	}
85 	if (i == ARRAY_SIZE(cache_policies)) {
86 		pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
87 		return 0;
88 	}
89 
90 	flush_cache_all();
91 
92 	/*
93 	 * Modify MT_NORMAL attributes in MAIR_EL1.
94 	 */
95 	asm volatile(
96 	"	mrs	%0, mair_el1\n"
97 	"	bfi	%0, %1, %2, #8\n"
98 	"	msr	mair_el1, %0\n"
99 	"	isb\n"
100 	: "=&r" (tmp)
101 	: "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
102 
103 	/*
104 	 * Modify TCR PTW cacheability attributes.
105 	 */
106 	asm volatile(
107 	"	mrs	%0, tcr_el1\n"
108 	"	bic	%0, %0, %2\n"
109 	"	orr	%0, %0, %1\n"
110 	"	msr	tcr_el1, %0\n"
111 	"	isb\n"
112 	: "=&r" (tmp)
113 	: "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
114 
115 	flush_cache_all();
116 
117 	return 0;
118 }
119 early_param("cachepolicy", early_cachepolicy);
120 
121 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
122 			      unsigned long size, pgprot_t vma_prot)
123 {
124 	if (!pfn_valid(pfn))
125 		return pgprot_noncached(vma_prot);
126 	else if (file->f_flags & O_SYNC)
127 		return pgprot_writecombine(vma_prot);
128 	return vma_prot;
129 }
130 EXPORT_SYMBOL(phys_mem_access_prot);
131 
132 static void __init *early_alloc(unsigned long sz)
133 {
134 	void *ptr = __va(memblock_alloc(sz, sz));
135 	memset(ptr, 0, sz);
136 	return ptr;
137 }
138 
139 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
140 				  unsigned long end, unsigned long pfn,
141 				  pgprot_t prot)
142 {
143 	pte_t *pte;
144 
145 	if (pmd_none(*pmd)) {
146 		pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
147 		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
148 	}
149 	BUG_ON(pmd_bad(*pmd));
150 
151 	pte = pte_offset_kernel(pmd, addr);
152 	do {
153 		set_pte(pte, pfn_pte(pfn, prot));
154 		pfn++;
155 	} while (pte++, addr += PAGE_SIZE, addr != end);
156 }
157 
158 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
159 				  unsigned long end, phys_addr_t phys,
160 				  int map_io)
161 {
162 	pmd_t *pmd;
163 	unsigned long next;
164 	pmdval_t prot_sect;
165 	pgprot_t prot_pte;
166 
167 	if (map_io) {
168 		prot_sect = PROT_SECT_DEVICE_nGnRE;
169 		prot_pte = __pgprot(PROT_DEVICE_nGnRE);
170 	} else {
171 		prot_sect = PROT_SECT_NORMAL_EXEC;
172 		prot_pte = PAGE_KERNEL_EXEC;
173 	}
174 
175 	/*
176 	 * Check for initial section mappings in the pgd/pud and remove them.
177 	 */
178 	if (pud_none(*pud) || pud_bad(*pud)) {
179 		pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
180 		pud_populate(&init_mm, pud, pmd);
181 	}
182 
183 	pmd = pmd_offset(pud, addr);
184 	do {
185 		next = pmd_addr_end(addr, end);
186 		/* try section mapping first */
187 		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
188 			pmd_t old_pmd =*pmd;
189 			set_pmd(pmd, __pmd(phys | prot_sect));
190 			/*
191 			 * Check for previous table entries created during
192 			 * boot (__create_page_tables) and flush them.
193 			 */
194 			if (!pmd_none(old_pmd))
195 				flush_tlb_all();
196 		} else {
197 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
198 				       prot_pte);
199 		}
200 		phys += next - addr;
201 	} while (pmd++, addr = next, addr != end);
202 }
203 
204 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
205 				  unsigned long end, unsigned long phys,
206 				  int map_io)
207 {
208 	pud_t *pud;
209 	unsigned long next;
210 
211 	if (pgd_none(*pgd)) {
212 		pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
213 		pgd_populate(&init_mm, pgd, pud);
214 	}
215 	BUG_ON(pgd_bad(*pgd));
216 
217 	pud = pud_offset(pgd, addr);
218 	do {
219 		next = pud_addr_end(addr, end);
220 
221 		/*
222 		 * For 4K granule only, attempt to put down a 1GB block
223 		 */
224 		if (!map_io && (PAGE_SHIFT == 12) &&
225 		    ((addr | next | phys) & ~PUD_MASK) == 0) {
226 			pud_t old_pud = *pud;
227 			set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
228 
229 			/*
230 			 * If we have an old value for a pud, it will
231 			 * be pointing to a pmd table that we no longer
232 			 * need (from swapper_pg_dir).
233 			 *
234 			 * Look up the old pmd table and free it.
235 			 */
236 			if (!pud_none(old_pud)) {
237 				phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
238 				memblock_free(table, PAGE_SIZE);
239 				flush_tlb_all();
240 			}
241 		} else {
242 			alloc_init_pmd(pud, addr, next, phys, map_io);
243 		}
244 		phys += next - addr;
245 	} while (pud++, addr = next, addr != end);
246 }
247 
248 /*
249  * Create the page directory entries and any necessary page tables for the
250  * mapping specified by 'md'.
251  */
252 static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
253 				    unsigned long virt, phys_addr_t size,
254 				    int map_io)
255 {
256 	unsigned long addr, length, end, next;
257 
258 	addr = virt & PAGE_MASK;
259 	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
260 
261 	end = addr + length;
262 	do {
263 		next = pgd_addr_end(addr, end);
264 		alloc_init_pud(pgd, addr, next, phys, map_io);
265 		phys += next - addr;
266 	} while (pgd++, addr = next, addr != end);
267 }
268 
269 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
270 				  phys_addr_t size)
271 {
272 	if (virt < VMALLOC_START) {
273 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
274 			&phys, virt);
275 		return;
276 	}
277 	__create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
278 }
279 
280 void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
281 {
282 	if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
283 		pr_warn("BUG: not creating id mapping for %pa\n", &addr);
284 		return;
285 	}
286 	__create_mapping(&idmap_pg_dir[pgd_index(addr)],
287 			 addr, addr, size, map_io);
288 }
289 
290 static void __init map_mem(void)
291 {
292 	struct memblock_region *reg;
293 	phys_addr_t limit;
294 
295 	/*
296 	 * Temporarily limit the memblock range. We need to do this as
297 	 * create_mapping requires puds, pmds and ptes to be allocated from
298 	 * memory addressable from the initial direct kernel mapping.
299 	 *
300 	 * The initial direct kernel mapping, located at swapper_pg_dir, gives
301 	 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
302 	 * PHYS_OFFSET (which must be aligned to 2MB as per
303 	 * Documentation/arm64/booting.txt).
304 	 */
305 	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
306 		limit = PHYS_OFFSET + PMD_SIZE;
307 	else
308 		limit = PHYS_OFFSET + PUD_SIZE;
309 	memblock_set_current_limit(limit);
310 
311 	/* map all the memory banks */
312 	for_each_memblock(memory, reg) {
313 		phys_addr_t start = reg->base;
314 		phys_addr_t end = start + reg->size;
315 
316 		if (start >= end)
317 			break;
318 
319 #ifndef CONFIG_ARM64_64K_PAGES
320 		/*
321 		 * For the first memory bank align the start address and
322 		 * current memblock limit to prevent create_mapping() from
323 		 * allocating pte page tables from unmapped memory.
324 		 * When 64K pages are enabled, the pte page table for the
325 		 * first PGDIR_SIZE is already present in swapper_pg_dir.
326 		 */
327 		if (start < limit)
328 			start = ALIGN(start, PMD_SIZE);
329 		if (end < limit) {
330 			limit = end & PMD_MASK;
331 			memblock_set_current_limit(limit);
332 		}
333 #endif
334 
335 		create_mapping(start, __phys_to_virt(start), end - start);
336 	}
337 
338 	/* Limit no longer required. */
339 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
340 }
341 
342 /*
343  * paging_init() sets up the page tables, initialises the zone memory
344  * maps and sets up the zero page.
345  */
346 void __init paging_init(void)
347 {
348 	void *zero_page;
349 
350 	map_mem();
351 
352 	/*
353 	 * Finally flush the caches and tlb to ensure that we're in a
354 	 * consistent state.
355 	 */
356 	flush_cache_all();
357 	flush_tlb_all();
358 
359 	/* allocate the zero page. */
360 	zero_page = early_alloc(PAGE_SIZE);
361 
362 	bootmem_init();
363 
364 	empty_zero_page = virt_to_page(zero_page);
365 
366 	/*
367 	 * TTBR0 is only used for the identity mapping at this stage. Make it
368 	 * point to zero page to avoid speculatively fetching new entries.
369 	 */
370 	cpu_set_reserved_ttbr0();
371 	flush_tlb_all();
372 }
373 
374 /*
375  * Enable the identity mapping to allow the MMU disabling.
376  */
377 void setup_mm_for_reboot(void)
378 {
379 	cpu_switch_mm(idmap_pg_dir, &init_mm);
380 	flush_tlb_all();
381 }
382 
383 /*
384  * Check whether a kernel address is valid (derived from arch/x86/).
385  */
386 int kern_addr_valid(unsigned long addr)
387 {
388 	pgd_t *pgd;
389 	pud_t *pud;
390 	pmd_t *pmd;
391 	pte_t *pte;
392 
393 	if ((((long)addr) >> VA_BITS) != -1UL)
394 		return 0;
395 
396 	pgd = pgd_offset_k(addr);
397 	if (pgd_none(*pgd))
398 		return 0;
399 
400 	pud = pud_offset(pgd, addr);
401 	if (pud_none(*pud))
402 		return 0;
403 
404 	if (pud_sect(*pud))
405 		return pfn_valid(pud_pfn(*pud));
406 
407 	pmd = pmd_offset(pud, addr);
408 	if (pmd_none(*pmd))
409 		return 0;
410 
411 	if (pmd_sect(*pmd))
412 		return pfn_valid(pmd_pfn(*pmd));
413 
414 	pte = pte_offset_kernel(pmd, addr);
415 	if (pte_none(*pte))
416 		return 0;
417 
418 	return pfn_valid(pte_pfn(*pte));
419 }
420 #ifdef CONFIG_SPARSEMEM_VMEMMAP
421 #ifdef CONFIG_ARM64_64K_PAGES
422 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
423 {
424 	return vmemmap_populate_basepages(start, end, node);
425 }
426 #else	/* !CONFIG_ARM64_64K_PAGES */
427 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
428 {
429 	unsigned long addr = start;
430 	unsigned long next;
431 	pgd_t *pgd;
432 	pud_t *pud;
433 	pmd_t *pmd;
434 
435 	do {
436 		next = pmd_addr_end(addr, end);
437 
438 		pgd = vmemmap_pgd_populate(addr, node);
439 		if (!pgd)
440 			return -ENOMEM;
441 
442 		pud = vmemmap_pud_populate(pgd, addr, node);
443 		if (!pud)
444 			return -ENOMEM;
445 
446 		pmd = pmd_offset(pud, addr);
447 		if (pmd_none(*pmd)) {
448 			void *p = NULL;
449 
450 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
451 			if (!p)
452 				return -ENOMEM;
453 
454 			set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
455 		} else
456 			vmemmap_verify((pte_t *)pmd, node, addr, next);
457 	} while (addr = next, addr != end);
458 
459 	return 0;
460 }
461 #endif	/* CONFIG_ARM64_64K_PAGES */
462 void vmemmap_free(unsigned long start, unsigned long end)
463 {
464 }
465 #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
466