1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains the routines for handling the MMU on those 4 * PowerPC implementations where the MMU substantially follows the 5 * architecture specification. This includes the 6xx, 7xx, 7xxx, 6 * and 8260 implementations but excludes the 8xx and 4xx. 7 * -- paulus 8 * 9 * Derived from arch/ppc/mm/init.c: 10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 * 12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 13 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 14 * Copyright (C) 1996 Paul Mackerras 15 * 16 * Derived from "arch/i386/mm/init.c" 17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/memblock.h> 25 26 #include <asm/prom.h> 27 #include <asm/mmu.h> 28 #include <asm/machdep.h> 29 #include <asm/code-patching.h> 30 #include <asm/sections.h> 31 32 #include <mm/mmu_decl.h> 33 34 struct hash_pte *Hash; 35 static unsigned long Hash_size, Hash_mask; 36 unsigned long _SDR1; 37 static unsigned int hash_mb, hash_mb2; 38 39 struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */ 40 41 struct batrange { /* stores address ranges mapped by BATs */ 42 unsigned long start; 43 unsigned long limit; 44 phys_addr_t phys; 45 } bat_addrs[8]; 46 47 /* 48 * Return PA for this VA if it is mapped by a BAT, or 0 49 */ 50 phys_addr_t v_block_mapped(unsigned long va) 51 { 52 int b; 53 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) 54 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) 55 return bat_addrs[b].phys + (va - bat_addrs[b].start); 56 return 0; 57 } 58 59 /* 60 * Return VA for a given PA or 0 if not mapped 61 */ 62 unsigned long p_block_mapped(phys_addr_t pa) 63 { 64 int b; 65 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) 66 if (pa >= bat_addrs[b].phys 67 && pa < (bat_addrs[b].limit-bat_addrs[b].start) 68 +bat_addrs[b].phys) 69 return bat_addrs[b].start+(pa-bat_addrs[b].phys); 70 return 0; 71 } 72 73 static int find_free_bat(void) 74 { 75 int b; 76 77 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { 78 for (b = 0; b < 4; b++) { 79 struct ppc_bat *bat = BATS[b]; 80 81 if (!(bat[0].batl & 0x40)) 82 return b; 83 } 84 } else { 85 int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; 86 87 for (b = 0; b < n; b++) { 88 struct ppc_bat *bat = BATS[b]; 89 90 if (!(bat[1].batu & 3)) 91 return b; 92 } 93 } 94 return -1; 95 } 96 97 /* 98 * This function calculates the size of the larger block usable to map the 99 * beginning of an area based on the start address and size of that area: 100 * - max block size is 8M on 601 and 256 on other 6xx. 101 * - base address must be aligned to the block size. So the maximum block size 102 * is identified by the lowest bit set to 1 in the base address (for instance 103 * if base is 0x16000000, max size is 0x02000000). 104 * - block size has to be a power of two. This is calculated by finding the 105 * highest bit set to 1. 106 */ 107 static unsigned int block_size(unsigned long base, unsigned long top) 108 { 109 unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M; 110 unsigned int base_shift = (ffs(base) - 1) & 31; 111 unsigned int block_shift = (fls(top - base) - 1) & 31; 112 113 return min3(max_size, 1U << base_shift, 1U << block_shift); 114 } 115 116 /* 117 * Set up one of the IBAT (block address translation) register pairs. 118 * The parameters are not checked; in particular size must be a power 119 * of 2 between 128k and 256M. 120 * Only for 603+ ... 121 */ 122 static void setibat(int index, unsigned long virt, phys_addr_t phys, 123 unsigned int size, pgprot_t prot) 124 { 125 unsigned int bl = (size >> 17) - 1; 126 int wimgxpp; 127 struct ppc_bat *bat = BATS[index]; 128 unsigned long flags = pgprot_val(prot); 129 130 if (!cpu_has_feature(CPU_FTR_NEED_COHERENT)) 131 flags &= ~_PAGE_COHERENT; 132 133 wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX); 134 bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ 135 bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp; 136 if (flags & _PAGE_USER) 137 bat[0].batu |= 1; /* Vp = 1 */ 138 } 139 140 static void clearibat(int index) 141 { 142 struct ppc_bat *bat = BATS[index]; 143 144 bat[0].batu = 0; 145 bat[0].batl = 0; 146 } 147 148 static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top) 149 { 150 int idx; 151 152 while ((idx = find_free_bat()) != -1 && base != top) { 153 unsigned int size = block_size(base, top); 154 155 if (size < 128 << 10) 156 break; 157 setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); 158 base += size; 159 } 160 161 return base; 162 } 163 164 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) 165 { 166 unsigned long done; 167 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; 168 169 if (__map_without_bats) { 170 pr_debug("RAM mapped without BATs\n"); 171 return base; 172 } 173 174 if (!strict_kernel_rwx_enabled() || base >= border || top <= border) 175 return __mmu_mapin_ram(base, top); 176 177 done = __mmu_mapin_ram(base, border); 178 if (done != border) 179 return done; 180 181 return __mmu_mapin_ram(border, top); 182 } 183 184 void mmu_mark_initmem_nx(void) 185 { 186 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; 187 int i; 188 unsigned long base = (unsigned long)_stext - PAGE_OFFSET; 189 unsigned long top = (unsigned long)_etext - PAGE_OFFSET; 190 unsigned long size; 191 192 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) 193 return; 194 195 for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { 196 size = block_size(base, top); 197 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); 198 base += size; 199 } 200 if (base < top) { 201 size = block_size(base, top); 202 size = max(size, 128UL << 10); 203 if ((top - base) > size) { 204 if (strict_kernel_rwx_enabled()) 205 pr_warn("Kernel _etext not properly aligned\n"); 206 size <<= 1; 207 } 208 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); 209 base += size; 210 } 211 for (; i < nb; i++) 212 clearibat(i); 213 214 update_bats(); 215 216 for (i = TASK_SIZE >> 28; i < 16; i++) { 217 /* Do not set NX on VM space for modules */ 218 if (IS_ENABLED(CONFIG_MODULES) && 219 (VMALLOC_START & 0xf0000000) == i << 28) 220 break; 221 mtsrin(mfsrin(i << 28) | 0x10000000, i << 28); 222 } 223 } 224 225 void mmu_mark_rodata_ro(void) 226 { 227 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; 228 int i; 229 230 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) 231 return; 232 233 for (i = 0; i < nb; i++) { 234 struct ppc_bat *bat = BATS[i]; 235 236 if (bat_addrs[i].start < (unsigned long)__init_begin) 237 bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX; 238 } 239 240 update_bats(); 241 } 242 243 /* 244 * Set up one of the I/D BAT (block address translation) register pairs. 245 * The parameters are not checked; in particular size must be a power 246 * of 2 between 128k and 256M. 247 * On 603+, only set IBAT when _PAGE_EXEC is set 248 */ 249 void __init setbat(int index, unsigned long virt, phys_addr_t phys, 250 unsigned int size, pgprot_t prot) 251 { 252 unsigned int bl; 253 int wimgxpp; 254 struct ppc_bat *bat = BATS[index]; 255 unsigned long flags = pgprot_val(prot); 256 257 if ((flags & _PAGE_NO_CACHE) || 258 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0)) 259 flags &= ~_PAGE_COHERENT; 260 261 bl = (size >> 17) - 1; 262 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { 263 /* 603, 604, etc. */ 264 /* Do DBAT first */ 265 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE 266 | _PAGE_COHERENT | _PAGE_GUARDED); 267 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; 268 bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ 269 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; 270 if (flags & _PAGE_USER) 271 bat[1].batu |= 1; /* Vp = 1 */ 272 if (flags & _PAGE_GUARDED) { 273 /* G bit must be zero in IBATs */ 274 flags &= ~_PAGE_EXEC; 275 } 276 if (flags & _PAGE_EXEC) 277 bat[0] = bat[1]; 278 else 279 bat[0].batu = bat[0].batl = 0; 280 } else { 281 /* 601 cpu */ 282 if (bl > BL_8M) 283 bl = BL_8M; 284 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE 285 | _PAGE_COHERENT); 286 wimgxpp |= (flags & _PAGE_RW)? 287 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX; 288 bat->batu = virt | wimgxpp | 4; /* Ks=0, Ku=1 */ 289 bat->batl = phys | bl | 0x40; /* V=1 */ 290 } 291 292 bat_addrs[index].start = virt; 293 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; 294 bat_addrs[index].phys = phys; 295 } 296 297 /* 298 * Preload a translation in the hash table 299 */ 300 void hash_preload(struct mm_struct *mm, unsigned long ea) 301 { 302 pmd_t *pmd; 303 304 if (!Hash) 305 return; 306 pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); 307 if (!pmd_none(*pmd)) 308 add_hash_page(mm->context.id, ea, pmd_val(*pmd)); 309 } 310 311 /* 312 * This is called at the end of handling a user page fault, when the 313 * fault has been handled by updating a PTE in the linux page tables. 314 * We use it to preload an HPTE into the hash table corresponding to 315 * the updated linux PTE. 316 * 317 * This must always be called with the pte lock held. 318 */ 319 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 320 pte_t *ptep) 321 { 322 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) 323 return; 324 /* 325 * We don't need to worry about _PAGE_PRESENT here because we are 326 * called with either mm->page_table_lock held or ptl lock held 327 */ 328 329 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 330 if (!pte_young(*ptep) || address >= TASK_SIZE) 331 return; 332 333 /* We have to test for regs NULL since init will get here first thing at boot */ 334 if (!current->thread.regs) 335 return; 336 337 /* We also avoid filling the hash if not coming from a fault */ 338 if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) 339 return; 340 341 hash_preload(vma->vm_mm, address); 342 } 343 344 /* 345 * Initialize the hash table and patch the instructions in hashtable.S. 346 */ 347 void __init MMU_init_hw(void) 348 { 349 unsigned int n_hpteg, lg_n_hpteg; 350 351 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) 352 return; 353 354 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); 355 356 #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ 357 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) 358 #define MIN_N_HPTEG 1024 /* min 64kB hash table */ 359 360 /* 361 * Allow 1 HPTE (1/8 HPTEG) for each page of memory. 362 * This is less than the recommended amount, but then 363 * Linux ain't AIX. 364 */ 365 n_hpteg = total_memory / (PAGE_SIZE * 8); 366 if (n_hpteg < MIN_N_HPTEG) 367 n_hpteg = MIN_N_HPTEG; 368 lg_n_hpteg = __ilog2(n_hpteg); 369 if (n_hpteg & (n_hpteg - 1)) { 370 ++lg_n_hpteg; /* round up if not power of 2 */ 371 n_hpteg = 1 << lg_n_hpteg; 372 } 373 Hash_size = n_hpteg << LG_HPTEG_SIZE; 374 375 /* 376 * Find some memory for the hash table. 377 */ 378 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 379 Hash = memblock_alloc(Hash_size, Hash_size); 380 if (!Hash) 381 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 382 __func__, Hash_size, Hash_size); 383 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 384 385 pr_info("Total memory = %lldMB; using %ldkB for hash table\n", 386 (unsigned long long)(total_memory >> 20), Hash_size >> 10); 387 388 389 Hash_mask = n_hpteg - 1; 390 hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; 391 if (lg_n_hpteg > 16) 392 hash_mb2 = 16 - LG_HPTEG_SIZE; 393 394 /* 395 * When KASAN is selected, there is already an early temporary hash 396 * table and the switch to the final hash table is done later. 397 */ 398 if (IS_ENABLED(CONFIG_KASAN)) 399 return; 400 401 MMU_init_hw_patch(); 402 } 403 404 void __init MMU_init_hw_patch(void) 405 { 406 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); 407 408 if (ppc_md.progress) 409 ppc_md.progress("hash:patch", 0x345); 410 if (ppc_md.progress) 411 ppc_md.progress("hash:done", 0x205); 412 413 /* WARNING: Make sure nothing can trigger a KASAN check past this point */ 414 415 /* 416 * Patch up the instructions in hashtable.S:create_hpte 417 */ 418 modify_instruction_site(&patch__hash_page_A0, 0xffff, 419 ((unsigned int)Hash - PAGE_OFFSET) >> 16); 420 modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6); 421 modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6); 422 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); 423 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask); 424 425 /* 426 * Patch up the instructions in hashtable.S:flush_hash_page 427 */ 428 modify_instruction_site(&patch__flush_hash_A0, 0xffff, 429 ((unsigned int)Hash - PAGE_OFFSET) >> 16); 430 modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6); 431 modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6); 432 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); 433 } 434 435 void setup_initial_memory_limit(phys_addr_t first_memblock_base, 436 phys_addr_t first_memblock_size) 437 { 438 /* We don't currently support the first MEMBLOCK not mapping 0 439 * physical on those processors 440 */ 441 BUG_ON(first_memblock_base != 0); 442 443 /* 601 can only access 16MB at the moment */ 444 if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) 445 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); 446 else /* Anything else has 256M mapped */ 447 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); 448 } 449 450 void __init print_system_hash_info(void) 451 { 452 pr_info("Hash_size = 0x%lx\n", Hash_size); 453 if (Hash_mask) 454 pr_info("Hash_mask = 0x%lx\n", Hash_mask); 455 } 456 457 #ifdef CONFIG_PPC_KUEP 458 void __init setup_kuep(bool disabled) 459 { 460 pr_info("Activating Kernel Userspace Execution Prevention\n"); 461 462 if (disabled) 463 pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n"); 464 } 465 #endif 466 467 #ifdef CONFIG_PPC_KUAP 468 void __init setup_kuap(bool disabled) 469 { 470 pr_info("Activating Kernel Userspace Access Protection\n"); 471 472 if (disabled) 473 pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n"); 474 } 475 #endif 476