xref: /openbmc/linux/arch/powerpc/mm/book3s32/mmu.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for handling the MMU on those
4  * PowerPC implementations where the MMU substantially follows the
5  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
6  * and 8260 implementations but excludes the 8xx and 4xx.
7  *  -- paulus
8  *
9  *  Derived from arch/ppc/mm/init.c:
10  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11  *
12  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
14  *    Copyright (C) 1996 Paul Mackerras
15  *
16  *  Derived from "arch/i386/mm/init.c"
17  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 
26 #include <asm/prom.h>
27 #include <asm/mmu.h>
28 #include <asm/machdep.h>
29 #include <asm/code-patching.h>
30 #include <asm/sections.h>
31 
32 #include <mm/mmu_decl.h>
33 
34 struct hash_pte *Hash;
35 static unsigned long Hash_size, Hash_mask;
36 unsigned long _SDR1;
37 static unsigned int hash_mb, hash_mb2;
38 
39 struct ppc_bat BATS[8][2];	/* 8 pairs of IBAT, DBAT */
40 
41 struct batrange {		/* stores address ranges mapped by BATs */
42 	unsigned long start;
43 	unsigned long limit;
44 	phys_addr_t phys;
45 } bat_addrs[8];
46 
47 /*
48  * Return PA for this VA if it is mapped by a BAT, or 0
49  */
50 phys_addr_t v_block_mapped(unsigned long va)
51 {
52 	int b;
53 	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
54 		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
55 			return bat_addrs[b].phys + (va - bat_addrs[b].start);
56 	return 0;
57 }
58 
59 /*
60  * Return VA for a given PA or 0 if not mapped
61  */
62 unsigned long p_block_mapped(phys_addr_t pa)
63 {
64 	int b;
65 	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
66 		if (pa >= bat_addrs[b].phys
67 	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
68 		              +bat_addrs[b].phys)
69 			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
70 	return 0;
71 }
72 
73 static int find_free_bat(void)
74 {
75 	int b;
76 
77 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
78 		for (b = 0; b < 4; b++) {
79 			struct ppc_bat *bat = BATS[b];
80 
81 			if (!(bat[0].batl & 0x40))
82 				return b;
83 		}
84 	} else {
85 		int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
86 
87 		for (b = 0; b < n; b++) {
88 			struct ppc_bat *bat = BATS[b];
89 
90 			if (!(bat[1].batu & 3))
91 				return b;
92 		}
93 	}
94 	return -1;
95 }
96 
97 /*
98  * This function calculates the size of the larger block usable to map the
99  * beginning of an area based on the start address and size of that area:
100  * - max block size is 8M on 601 and 256 on other 6xx.
101  * - base address must be aligned to the block size. So the maximum block size
102  *   is identified by the lowest bit set to 1 in the base address (for instance
103  *   if base is 0x16000000, max size is 0x02000000).
104  * - block size has to be a power of two. This is calculated by finding the
105  *   highest bit set to 1.
106  */
107 static unsigned int block_size(unsigned long base, unsigned long top)
108 {
109 	unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M;
110 	unsigned int base_shift = (ffs(base) - 1) & 31;
111 	unsigned int block_shift = (fls(top - base) - 1) & 31;
112 
113 	return min3(max_size, 1U << base_shift, 1U << block_shift);
114 }
115 
116 /*
117  * Set up one of the IBAT (block address translation) register pairs.
118  * The parameters are not checked; in particular size must be a power
119  * of 2 between 128k and 256M.
120  * Only for 603+ ...
121  */
122 static void setibat(int index, unsigned long virt, phys_addr_t phys,
123 		    unsigned int size, pgprot_t prot)
124 {
125 	unsigned int bl = (size >> 17) - 1;
126 	int wimgxpp;
127 	struct ppc_bat *bat = BATS[index];
128 	unsigned long flags = pgprot_val(prot);
129 
130 	if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
131 		flags &= ~_PAGE_COHERENT;
132 
133 	wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
134 	bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
135 	bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
136 	if (flags & _PAGE_USER)
137 		bat[0].batu |= 1;	/* Vp = 1 */
138 }
139 
140 static void clearibat(int index)
141 {
142 	struct ppc_bat *bat = BATS[index];
143 
144 	bat[0].batu = 0;
145 	bat[0].batl = 0;
146 }
147 
148 static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
149 {
150 	int idx;
151 
152 	while ((idx = find_free_bat()) != -1 && base != top) {
153 		unsigned int size = block_size(base, top);
154 
155 		if (size < 128 << 10)
156 			break;
157 		setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
158 		base += size;
159 	}
160 
161 	return base;
162 }
163 
164 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
165 {
166 	unsigned long done;
167 	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
168 
169 	if (__map_without_bats) {
170 		pr_debug("RAM mapped without BATs\n");
171 		return base;
172 	}
173 
174 	if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
175 		return __mmu_mapin_ram(base, top);
176 
177 	done = __mmu_mapin_ram(base, border);
178 	if (done != border)
179 		return done;
180 
181 	return __mmu_mapin_ram(border, top);
182 }
183 
184 void mmu_mark_initmem_nx(void)
185 {
186 	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
187 	int i;
188 	unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
189 	unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
190 	unsigned long size;
191 
192 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
193 		return;
194 
195 	for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
196 		size = block_size(base, top);
197 		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
198 		base += size;
199 	}
200 	if (base < top) {
201 		size = block_size(base, top);
202 		size = max(size, 128UL << 10);
203 		if ((top - base) > size) {
204 			if (strict_kernel_rwx_enabled())
205 				pr_warn("Kernel _etext not properly aligned\n");
206 			size <<= 1;
207 		}
208 		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
209 		base += size;
210 	}
211 	for (; i < nb; i++)
212 		clearibat(i);
213 
214 	update_bats();
215 
216 	for (i = TASK_SIZE >> 28; i < 16; i++) {
217 		/* Do not set NX on VM space for modules */
218 		if (IS_ENABLED(CONFIG_MODULES) &&
219 		    (VMALLOC_START & 0xf0000000) == i << 28)
220 			break;
221 		mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
222 	}
223 }
224 
225 void mmu_mark_rodata_ro(void)
226 {
227 	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
228 	int i;
229 
230 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
231 		return;
232 
233 	for (i = 0; i < nb; i++) {
234 		struct ppc_bat *bat = BATS[i];
235 
236 		if (bat_addrs[i].start < (unsigned long)__init_begin)
237 			bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
238 	}
239 
240 	update_bats();
241 }
242 
243 /*
244  * Set up one of the I/D BAT (block address translation) register pairs.
245  * The parameters are not checked; in particular size must be a power
246  * of 2 between 128k and 256M.
247  * On 603+, only set IBAT when _PAGE_EXEC is set
248  */
249 void __init setbat(int index, unsigned long virt, phys_addr_t phys,
250 		   unsigned int size, pgprot_t prot)
251 {
252 	unsigned int bl;
253 	int wimgxpp;
254 	struct ppc_bat *bat;
255 	unsigned long flags = pgprot_val(prot);
256 
257 	if (index == -1)
258 		index = find_free_bat();
259 	if (index == -1) {
260 		pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
261 		       (unsigned long long)phys);
262 		return;
263 	}
264 	bat = BATS[index];
265 
266 	if ((flags & _PAGE_NO_CACHE) ||
267 	    (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
268 		flags &= ~_PAGE_COHERENT;
269 
270 	bl = (size >> 17) - 1;
271 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
272 		/* 603, 604, etc. */
273 		/* Do DBAT first */
274 		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
275 				   | _PAGE_COHERENT | _PAGE_GUARDED);
276 		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
277 		bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
278 		bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
279 		if (flags & _PAGE_USER)
280 			bat[1].batu |= 1; 	/* Vp = 1 */
281 		if (flags & _PAGE_GUARDED) {
282 			/* G bit must be zero in IBATs */
283 			flags &= ~_PAGE_EXEC;
284 		}
285 		if (flags & _PAGE_EXEC)
286 			bat[0] = bat[1];
287 		else
288 			bat[0].batu = bat[0].batl = 0;
289 	} else {
290 		/* 601 cpu */
291 		if (bl > BL_8M)
292 			bl = BL_8M;
293 		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
294 				   | _PAGE_COHERENT);
295 		wimgxpp |= (flags & _PAGE_RW)?
296 			((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
297 		bat->batu = virt | wimgxpp | 4;	/* Ks=0, Ku=1 */
298 		bat->batl = phys | bl | 0x40;	/* V=1 */
299 	}
300 
301 	bat_addrs[index].start = virt;
302 	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
303 	bat_addrs[index].phys = phys;
304 }
305 
306 /*
307  * Preload a translation in the hash table
308  */
309 void hash_preload(struct mm_struct *mm, unsigned long ea)
310 {
311 	pmd_t *pmd;
312 
313 	if (!Hash)
314 		return;
315 	pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
316 	if (!pmd_none(*pmd))
317 		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
318 }
319 
320 /*
321  * This is called at the end of handling a user page fault, when the
322  * fault has been handled by updating a PTE in the linux page tables.
323  * We use it to preload an HPTE into the hash table corresponding to
324  * the updated linux PTE.
325  *
326  * This must always be called with the pte lock held.
327  */
328 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
329 		      pte_t *ptep)
330 {
331 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
332 		return;
333 	/*
334 	 * We don't need to worry about _PAGE_PRESENT here because we are
335 	 * called with either mm->page_table_lock held or ptl lock held
336 	 */
337 
338 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
339 	if (!pte_young(*ptep) || address >= TASK_SIZE)
340 		return;
341 
342 	/* We have to test for regs NULL since init will get here first thing at boot */
343 	if (!current->thread.regs)
344 		return;
345 
346 	/* We also avoid filling the hash if not coming from a fault */
347 	if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
348 		return;
349 
350 	hash_preload(vma->vm_mm, address);
351 }
352 
353 /*
354  * Initialize the hash table and patch the instructions in hashtable.S.
355  */
356 void __init MMU_init_hw(void)
357 {
358 	unsigned int n_hpteg, lg_n_hpteg;
359 
360 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
361 		return;
362 
363 	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
364 
365 #define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
366 #define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
367 #define MIN_N_HPTEG	1024		/* min 64kB hash table */
368 
369 	/*
370 	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
371 	 * This is less than the recommended amount, but then
372 	 * Linux ain't AIX.
373 	 */
374 	n_hpteg = total_memory / (PAGE_SIZE * 8);
375 	if (n_hpteg < MIN_N_HPTEG)
376 		n_hpteg = MIN_N_HPTEG;
377 	lg_n_hpteg = __ilog2(n_hpteg);
378 	if (n_hpteg & (n_hpteg - 1)) {
379 		++lg_n_hpteg;		/* round up if not power of 2 */
380 		n_hpteg = 1 << lg_n_hpteg;
381 	}
382 	Hash_size = n_hpteg << LG_HPTEG_SIZE;
383 
384 	/*
385 	 * Find some memory for the hash table.
386 	 */
387 	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
388 	Hash = memblock_alloc(Hash_size, Hash_size);
389 	if (!Hash)
390 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
391 		      __func__, Hash_size, Hash_size);
392 	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
393 
394 	pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
395 		(unsigned long long)(total_memory >> 20), Hash_size >> 10);
396 
397 
398 	Hash_mask = n_hpteg - 1;
399 	hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
400 	if (lg_n_hpteg > 16)
401 		hash_mb2 = 16 - LG_HPTEG_SIZE;
402 
403 	/*
404 	 * When KASAN is selected, there is already an early temporary hash
405 	 * table and the switch to the final hash table is done later.
406 	 */
407 	if (IS_ENABLED(CONFIG_KASAN))
408 		return;
409 
410 	MMU_init_hw_patch();
411 }
412 
413 void __init MMU_init_hw_patch(void)
414 {
415 	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
416 	unsigned int hash;
417 
418 	if (ppc_md.progress)
419 		ppc_md.progress("hash:patch", 0x345);
420 	if (ppc_md.progress)
421 		ppc_md.progress("hash:done", 0x205);
422 
423 	/* WARNING: Make sure nothing can trigger a KASAN check past this point */
424 
425 	/*
426 	 * Patch up the instructions in hashtable.S:create_hpte
427 	 */
428 	if (IS_ENABLED(CONFIG_VMAP_STACK))
429 		hash = (unsigned int)Hash;
430 	else
431 		hash = (unsigned int)Hash - PAGE_OFFSET;
432 
433 	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
434 	modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
435 	modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
436 	modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
437 	modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
438 
439 	/*
440 	 * Patch up the instructions in hashtable.S:flush_hash_page
441 	 */
442 	modify_instruction_site(&patch__flush_hash_A0, 0xffff,
443 				((unsigned int)Hash - PAGE_OFFSET) >> 16);
444 	modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
445 	modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
446 	modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
447 }
448 
449 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
450 				phys_addr_t first_memblock_size)
451 {
452 	/* We don't currently support the first MEMBLOCK not mapping 0
453 	 * physical on those processors
454 	 */
455 	BUG_ON(first_memblock_base != 0);
456 
457 	/* 601 can only access 16MB at the moment */
458 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
459 		memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
460 	else /* Anything else has 256M mapped */
461 		memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
462 }
463 
464 void __init print_system_hash_info(void)
465 {
466 	pr_info("Hash_size         = 0x%lx\n", Hash_size);
467 	if (Hash_mask)
468 		pr_info("Hash_mask         = 0x%lx\n", Hash_mask);
469 }
470 
471 #ifdef CONFIG_PPC_KUEP
472 void __init setup_kuep(bool disabled)
473 {
474 	pr_info("Activating Kernel Userspace Execution Prevention\n");
475 
476 	if (disabled)
477 		pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
478 }
479 #endif
480 
481 #ifdef CONFIG_PPC_KUAP
482 void __init setup_kuap(bool disabled)
483 {
484 	pr_info("Activating Kernel Userspace Access Protection\n");
485 
486 	if (disabled)
487 		pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
488 }
489 #endif
490