1 /* 2 * This file contains the routines for TLB flushing. 3 * On machines where the MMU does not use a hash table to store virtual to 4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors, 5 * this does -not- include 603 however which shares the implementation with 6 * hash based processors) 7 * 8 * -- BenH 9 * 10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> 11 * IBM Corp. 12 * 13 * Derived from arch/ppc/mm/init.c: 14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 15 * 16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 17 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 18 * Copyright (C) 1996 Paul Mackerras 19 * 20 * Derived from "arch/i386/mm/init.c" 21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 26 * 2 of the License, or (at your option) any later version. 27 * 28 */ 29 30 #include <linux/kernel.h> 31 #include <linux/export.h> 32 #include <linux/mm.h> 33 #include <linux/init.h> 34 #include <linux/highmem.h> 35 #include <linux/pagemap.h> 36 #include <linux/preempt.h> 37 #include <linux/spinlock.h> 38 #include <linux/memblock.h> 39 #include <linux/of_fdt.h> 40 #include <linux/hugetlb.h> 41 42 #include <asm/tlbflush.h> 43 #include <asm/tlb.h> 44 #include <asm/code-patching.h> 45 #include <asm/cputhreads.h> 46 #include <asm/hugetlb.h> 47 #include <asm/paca.h> 48 49 #include <mm/mmu_decl.h> 50 51 /* 52 * This struct lists the sw-supported page sizes. The hardawre MMU may support 53 * other sizes not listed here. The .ind field is only used on MMUs that have 54 * indirect page table entries. 55 */ 56 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) 57 #ifdef CONFIG_PPC_FSL_BOOK3E 58 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 59 [MMU_PAGE_4K] = { 60 .shift = 12, 61 .enc = BOOK3E_PAGESZ_4K, 62 }, 63 [MMU_PAGE_2M] = { 64 .shift = 21, 65 .enc = BOOK3E_PAGESZ_2M, 66 }, 67 [MMU_PAGE_4M] = { 68 .shift = 22, 69 .enc = BOOK3E_PAGESZ_4M, 70 }, 71 [MMU_PAGE_16M] = { 72 .shift = 24, 73 .enc = BOOK3E_PAGESZ_16M, 74 }, 75 [MMU_PAGE_64M] = { 76 .shift = 26, 77 .enc = BOOK3E_PAGESZ_64M, 78 }, 79 [MMU_PAGE_256M] = { 80 .shift = 28, 81 .enc = BOOK3E_PAGESZ_256M, 82 }, 83 [MMU_PAGE_1G] = { 84 .shift = 30, 85 .enc = BOOK3E_PAGESZ_1GB, 86 }, 87 }; 88 #elif defined(CONFIG_PPC_8xx) 89 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 90 /* we only manage 4k and 16k pages as normal pages */ 91 #ifdef CONFIG_PPC_4K_PAGES 92 [MMU_PAGE_4K] = { 93 .shift = 12, 94 }, 95 #else 96 [MMU_PAGE_16K] = { 97 .shift = 14, 98 }, 99 #endif 100 [MMU_PAGE_512K] = { 101 .shift = 19, 102 }, 103 [MMU_PAGE_8M] = { 104 .shift = 23, 105 }, 106 }; 107 #else 108 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 109 [MMU_PAGE_4K] = { 110 .shift = 12, 111 .ind = 20, 112 .enc = BOOK3E_PAGESZ_4K, 113 }, 114 [MMU_PAGE_16K] = { 115 .shift = 14, 116 .enc = BOOK3E_PAGESZ_16K, 117 }, 118 [MMU_PAGE_64K] = { 119 .shift = 16, 120 .ind = 28, 121 .enc = BOOK3E_PAGESZ_64K, 122 }, 123 [MMU_PAGE_1M] = { 124 .shift = 20, 125 .enc = BOOK3E_PAGESZ_1M, 126 }, 127 [MMU_PAGE_16M] = { 128 .shift = 24, 129 .ind = 36, 130 .enc = BOOK3E_PAGESZ_16M, 131 }, 132 [MMU_PAGE_256M] = { 133 .shift = 28, 134 .enc = BOOK3E_PAGESZ_256M, 135 }, 136 [MMU_PAGE_1G] = { 137 .shift = 30, 138 .enc = BOOK3E_PAGESZ_1GB, 139 }, 140 }; 141 #endif /* CONFIG_FSL_BOOKE */ 142 143 static inline int mmu_get_tsize(int psize) 144 { 145 return mmu_psize_defs[psize].enc; 146 } 147 #else 148 static inline int mmu_get_tsize(int psize) 149 { 150 /* This isn't used on !Book3E for now */ 151 return 0; 152 } 153 #endif /* CONFIG_PPC_BOOK3E_MMU */ 154 155 /* The variables below are currently only used on 64-bit Book3E 156 * though this will probably be made common with other nohash 157 * implementations at some point 158 */ 159 #ifdef CONFIG_PPC64 160 161 int mmu_linear_psize; /* Page size used for the linear mapping */ 162 int mmu_pte_psize; /* Page size used for PTE pages */ 163 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ 164 int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ 165 unsigned long linear_map_top; /* Top of linear mapping */ 166 167 168 /* 169 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug 170 * exceptions. This is used for bolted and e6500 TLB miss handlers which 171 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, 172 * this is set to zero. 173 */ 174 int extlb_level_exc; 175 176 #endif /* CONFIG_PPC64 */ 177 178 #ifdef CONFIG_PPC_FSL_BOOK3E 179 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ 180 DEFINE_PER_CPU(int, next_tlbcam_idx); 181 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); 182 #endif 183 184 /* 185 * Base TLB flushing operations: 186 * 187 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 188 * - flush_tlb_page(vma, vmaddr) flushes one page 189 * - flush_tlb_range(vma, start, end) flushes a range of pages 190 * - flush_tlb_kernel_range(start, end) flushes kernel pages 191 * 192 * - local_* variants of page and mm only apply to the current 193 * processor 194 */ 195 196 /* 197 * These are the base non-SMP variants of page and mm flushing 198 */ 199 void local_flush_tlb_mm(struct mm_struct *mm) 200 { 201 unsigned int pid; 202 203 preempt_disable(); 204 pid = mm->context.id; 205 if (pid != MMU_NO_CONTEXT) 206 _tlbil_pid(pid); 207 preempt_enable(); 208 } 209 EXPORT_SYMBOL(local_flush_tlb_mm); 210 211 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 212 int tsize, int ind) 213 { 214 unsigned int pid; 215 216 preempt_disable(); 217 pid = mm ? mm->context.id : 0; 218 if (pid != MMU_NO_CONTEXT) 219 _tlbil_va(vmaddr, pid, tsize, ind); 220 preempt_enable(); 221 } 222 223 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 224 { 225 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 226 mmu_get_tsize(mmu_virtual_psize), 0); 227 } 228 EXPORT_SYMBOL(local_flush_tlb_page); 229 230 /* 231 * And here are the SMP non-local implementations 232 */ 233 #ifdef CONFIG_SMP 234 235 static DEFINE_RAW_SPINLOCK(tlbivax_lock); 236 237 struct tlb_flush_param { 238 unsigned long addr; 239 unsigned int pid; 240 unsigned int tsize; 241 unsigned int ind; 242 }; 243 244 static void do_flush_tlb_mm_ipi(void *param) 245 { 246 struct tlb_flush_param *p = param; 247 248 _tlbil_pid(p ? p->pid : 0); 249 } 250 251 static void do_flush_tlb_page_ipi(void *param) 252 { 253 struct tlb_flush_param *p = param; 254 255 _tlbil_va(p->addr, p->pid, p->tsize, p->ind); 256 } 257 258 259 /* Note on invalidations and PID: 260 * 261 * We snapshot the PID with preempt disabled. At this point, it can still 262 * change either because: 263 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU 264 * - we are invaliating some target that isn't currently running here 265 * and is concurrently acquiring a new PID on another CPU 266 * - some other CPU is re-acquiring a lost PID for this mm 267 * etc... 268 * 269 * However, this shouldn't be a problem as we only guarantee 270 * invalidation of TLB entries present prior to this call, so we 271 * don't care about the PID changing, and invalidating a stale PID 272 * is generally harmless. 273 */ 274 275 void flush_tlb_mm(struct mm_struct *mm) 276 { 277 unsigned int pid; 278 279 preempt_disable(); 280 pid = mm->context.id; 281 if (unlikely(pid == MMU_NO_CONTEXT)) 282 goto no_context; 283 if (!mm_is_core_local(mm)) { 284 struct tlb_flush_param p = { .pid = pid }; 285 /* Ignores smp_processor_id() even if set. */ 286 smp_call_function_many(mm_cpumask(mm), 287 do_flush_tlb_mm_ipi, &p, 1); 288 } 289 _tlbil_pid(pid); 290 no_context: 291 preempt_enable(); 292 } 293 EXPORT_SYMBOL(flush_tlb_mm); 294 295 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 296 int tsize, int ind) 297 { 298 struct cpumask *cpu_mask; 299 unsigned int pid; 300 301 /* 302 * This function as well as __local_flush_tlb_page() must only be called 303 * for user contexts. 304 */ 305 if (WARN_ON(!mm)) 306 return; 307 308 preempt_disable(); 309 pid = mm->context.id; 310 if (unlikely(pid == MMU_NO_CONTEXT)) 311 goto bail; 312 cpu_mask = mm_cpumask(mm); 313 if (!mm_is_core_local(mm)) { 314 /* If broadcast tlbivax is supported, use it */ 315 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { 316 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); 317 if (lock) 318 raw_spin_lock(&tlbivax_lock); 319 _tlbivax_bcast(vmaddr, pid, tsize, ind); 320 if (lock) 321 raw_spin_unlock(&tlbivax_lock); 322 goto bail; 323 } else { 324 struct tlb_flush_param p = { 325 .pid = pid, 326 .addr = vmaddr, 327 .tsize = tsize, 328 .ind = ind, 329 }; 330 /* Ignores smp_processor_id() even if set in cpu_mask */ 331 smp_call_function_many(cpu_mask, 332 do_flush_tlb_page_ipi, &p, 1); 333 } 334 } 335 _tlbil_va(vmaddr, pid, tsize, ind); 336 bail: 337 preempt_enable(); 338 } 339 340 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 341 { 342 #ifdef CONFIG_HUGETLB_PAGE 343 if (vma && is_vm_hugetlb_page(vma)) 344 flush_hugetlb_page(vma, vmaddr); 345 #endif 346 347 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 348 mmu_get_tsize(mmu_virtual_psize), 0); 349 } 350 EXPORT_SYMBOL(flush_tlb_page); 351 352 #endif /* CONFIG_SMP */ 353 354 #ifdef CONFIG_PPC_47x 355 void __init early_init_mmu_47x(void) 356 { 357 #ifdef CONFIG_SMP 358 unsigned long root = of_get_flat_dt_root(); 359 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL)) 360 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST); 361 #endif /* CONFIG_SMP */ 362 } 363 #endif /* CONFIG_PPC_47x */ 364 365 /* 366 * Flush kernel TLB entries in the given range 367 */ 368 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 369 { 370 #ifdef CONFIG_SMP 371 preempt_disable(); 372 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); 373 _tlbil_pid(0); 374 preempt_enable(); 375 #else 376 _tlbil_pid(0); 377 #endif 378 } 379 EXPORT_SYMBOL(flush_tlb_kernel_range); 380 381 /* 382 * Currently, for range flushing, we just do a full mm flush. This should 383 * be optimized based on a threshold on the size of the range, since 384 * some implementation can stack multiple tlbivax before a tlbsync but 385 * for now, we keep it that way 386 */ 387 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 388 unsigned long end) 389 390 { 391 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK)) 392 flush_tlb_page(vma, start); 393 else 394 flush_tlb_mm(vma->vm_mm); 395 } 396 EXPORT_SYMBOL(flush_tlb_range); 397 398 void tlb_flush(struct mmu_gather *tlb) 399 { 400 flush_tlb_mm(tlb->mm); 401 } 402 403 /* 404 * Below are functions specific to the 64-bit variant of Book3E though that 405 * may change in the future 406 */ 407 408 #ifdef CONFIG_PPC64 409 410 /* 411 * Handling of virtual linear page tables or indirect TLB entries 412 * flushing when PTE pages are freed 413 */ 414 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) 415 { 416 int tsize = mmu_psize_defs[mmu_pte_psize].enc; 417 418 if (book3e_htw_mode != PPC_HTW_NONE) { 419 unsigned long start = address & PMD_MASK; 420 unsigned long end = address + PMD_SIZE; 421 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; 422 423 /* This isn't the most optimal, ideally we would factor out the 424 * while preempt & CPU mask mucking around, or even the IPI but 425 * it will do for now 426 */ 427 while (start < end) { 428 __flush_tlb_page(tlb->mm, start, tsize, 1); 429 start += size; 430 } 431 } else { 432 unsigned long rmask = 0xf000000000000000ul; 433 unsigned long rid = (address & rmask) | 0x1000000000000000ul; 434 unsigned long vpte = address & ~rmask; 435 436 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; 437 vpte |= rid; 438 __flush_tlb_page(tlb->mm, vpte, tsize, 0); 439 } 440 } 441 442 static void setup_page_sizes(void) 443 { 444 unsigned int tlb0cfg; 445 unsigned int tlb0ps; 446 unsigned int eptcfg; 447 int i, psize; 448 449 #ifdef CONFIG_PPC_FSL_BOOK3E 450 unsigned int mmucfg = mfspr(SPRN_MMUCFG); 451 int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); 452 453 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { 454 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); 455 unsigned int min_pg, max_pg; 456 457 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; 458 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; 459 460 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 461 struct mmu_psize_def *def; 462 unsigned int shift; 463 464 def = &mmu_psize_defs[psize]; 465 shift = def->shift; 466 467 if (shift == 0 || shift & 1) 468 continue; 469 470 /* adjust to be in terms of 4^shift Kb */ 471 shift = (shift - 10) >> 1; 472 473 if ((shift >= min_pg) && (shift <= max_pg)) 474 def->flags |= MMU_PAGE_SIZE_DIRECT; 475 } 476 477 goto out; 478 } 479 480 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 481 u32 tlb1cfg, tlb1ps; 482 483 tlb0cfg = mfspr(SPRN_TLB0CFG); 484 tlb1cfg = mfspr(SPRN_TLB1CFG); 485 tlb1ps = mfspr(SPRN_TLB1PS); 486 eptcfg = mfspr(SPRN_EPTCFG); 487 488 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) 489 book3e_htw_mode = PPC_HTW_E6500; 490 491 /* 492 * We expect 4K subpage size and unrestricted indirect size. 493 * The lack of a restriction on indirect size is a Freescale 494 * extension, indicated by PSn = 0 but SPSn != 0. 495 */ 496 if (eptcfg != 2) 497 book3e_htw_mode = PPC_HTW_NONE; 498 499 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 500 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 501 502 if (!def->shift) 503 continue; 504 505 if (tlb1ps & (1U << (def->shift - 10))) { 506 def->flags |= MMU_PAGE_SIZE_DIRECT; 507 508 if (book3e_htw_mode && psize == MMU_PAGE_2M) 509 def->flags |= MMU_PAGE_SIZE_INDIRECT; 510 } 511 } 512 513 goto out; 514 } 515 #endif 516 517 tlb0cfg = mfspr(SPRN_TLB0CFG); 518 tlb0ps = mfspr(SPRN_TLB0PS); 519 eptcfg = mfspr(SPRN_EPTCFG); 520 521 /* Look for supported direct sizes */ 522 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 523 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 524 525 if (tlb0ps & (1U << (def->shift - 10))) 526 def->flags |= MMU_PAGE_SIZE_DIRECT; 527 } 528 529 /* Indirect page sizes supported ? */ 530 if ((tlb0cfg & TLBnCFG_IND) == 0 || 531 (tlb0cfg & TLBnCFG_PT) == 0) 532 goto out; 533 534 book3e_htw_mode = PPC_HTW_IBM; 535 536 /* Now, we only deal with one IND page size for each 537 * direct size. Hopefully all implementations today are 538 * unambiguous, but we might want to be careful in the 539 * future. 540 */ 541 for (i = 0; i < 3; i++) { 542 unsigned int ps, sps; 543 544 sps = eptcfg & 0x1f; 545 eptcfg >>= 5; 546 ps = eptcfg & 0x1f; 547 eptcfg >>= 5; 548 if (!ps || !sps) 549 continue; 550 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { 551 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 552 553 if (ps == (def->shift - 10)) 554 def->flags |= MMU_PAGE_SIZE_INDIRECT; 555 if (sps == (def->shift - 10)) 556 def->ind = ps + 10; 557 } 558 } 559 560 out: 561 /* Cleanup array and print summary */ 562 pr_info("MMU: Supported page sizes\n"); 563 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 564 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 565 const char *__page_type_names[] = { 566 "unsupported", 567 "direct", 568 "indirect", 569 "direct & indirect" 570 }; 571 if (def->flags == 0) { 572 def->shift = 0; 573 continue; 574 } 575 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), 576 __page_type_names[def->flags & 0x3]); 577 } 578 } 579 580 static void setup_mmu_htw(void) 581 { 582 /* 583 * If we want to use HW tablewalk, enable it by patching the TLB miss 584 * handlers to branch to the one dedicated to it. 585 */ 586 587 switch (book3e_htw_mode) { 588 case PPC_HTW_IBM: 589 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); 590 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); 591 break; 592 #ifdef CONFIG_PPC_FSL_BOOK3E 593 case PPC_HTW_E6500: 594 extlb_level_exc = EX_TLB_SIZE; 595 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); 596 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); 597 break; 598 #endif 599 } 600 pr_info("MMU: Book3E HW tablewalk %s\n", 601 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); 602 } 603 604 /* 605 * Early initialization of the MMU TLB code 606 */ 607 static void early_init_this_mmu(void) 608 { 609 unsigned int mas4; 610 611 /* Set MAS4 based on page table setting */ 612 613 mas4 = 0x4 << MAS4_WIMGED_SHIFT; 614 switch (book3e_htw_mode) { 615 case PPC_HTW_E6500: 616 mas4 |= MAS4_INDD; 617 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; 618 mas4 |= MAS4_TLBSELD(1); 619 mmu_pte_psize = MMU_PAGE_2M; 620 break; 621 622 case PPC_HTW_IBM: 623 mas4 |= MAS4_INDD; 624 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 625 mmu_pte_psize = MMU_PAGE_1M; 626 break; 627 628 case PPC_HTW_NONE: 629 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 630 mmu_pte_psize = mmu_virtual_psize; 631 break; 632 } 633 mtspr(SPRN_MAS4, mas4); 634 635 #ifdef CONFIG_PPC_FSL_BOOK3E 636 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 637 unsigned int num_cams; 638 int __maybe_unused cpu = smp_processor_id(); 639 bool map = true; 640 641 /* use a quarter of the TLBCAM for bolted linear map */ 642 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; 643 644 /* 645 * Only do the mapping once per core, or else the 646 * transient mapping would cause problems. 647 */ 648 #ifdef CONFIG_SMP 649 if (hweight32(get_tensr()) > 1) 650 map = false; 651 #endif 652 653 if (map) 654 linear_map_top = map_mem_in_cams(linear_map_top, 655 num_cams, false); 656 } 657 #endif 658 659 /* A sync won't hurt us after mucking around with 660 * the MMU configuration 661 */ 662 mb(); 663 } 664 665 static void __init early_init_mmu_global(void) 666 { 667 /* XXX This will have to be decided at runtime, but right 668 * now our boot and TLB miss code hard wires it. Ideally 669 * we should find out a suitable page size and patch the 670 * TLB miss code (either that or use the PACA to store 671 * the value we want) 672 */ 673 mmu_linear_psize = MMU_PAGE_1G; 674 675 /* XXX This should be decided at runtime based on supported 676 * page sizes in the TLB, but for now let's assume 16M is 677 * always there and a good fit (which it probably is) 678 * 679 * Freescale booke only supports 4K pages in TLB0, so use that. 680 */ 681 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) 682 mmu_vmemmap_psize = MMU_PAGE_4K; 683 else 684 mmu_vmemmap_psize = MMU_PAGE_16M; 685 686 /* XXX This code only checks for TLB 0 capabilities and doesn't 687 * check what page size combos are supported by the HW. It 688 * also doesn't handle the case where a separate array holds 689 * the IND entries from the array loaded by the PT. 690 */ 691 /* Look for supported page sizes */ 692 setup_page_sizes(); 693 694 /* Look for HW tablewalk support */ 695 setup_mmu_htw(); 696 697 #ifdef CONFIG_PPC_FSL_BOOK3E 698 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 699 if (book3e_htw_mode == PPC_HTW_NONE) { 700 extlb_level_exc = EX_TLB_SIZE; 701 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 702 patch_exception(0x1e0, 703 exc_instruction_tlb_miss_bolted_book3e); 704 } 705 } 706 #endif 707 708 /* Set the global containing the top of the linear mapping 709 * for use by the TLB miss code 710 */ 711 linear_map_top = memblock_end_of_DRAM(); 712 } 713 714 static void __init early_mmu_set_memory_limit(void) 715 { 716 #ifdef CONFIG_PPC_FSL_BOOK3E 717 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 718 /* 719 * Limit memory so we dont have linear faults. 720 * Unlike memblock_set_current_limit, which limits 721 * memory available during early boot, this permanently 722 * reduces the memory available to Linux. We need to 723 * do this because highmem is not supported on 64-bit. 724 */ 725 memblock_enforce_memory_limit(linear_map_top); 726 } 727 #endif 728 729 memblock_set_current_limit(linear_map_top); 730 } 731 732 /* boot cpu only */ 733 void __init early_init_mmu(void) 734 { 735 early_init_mmu_global(); 736 early_init_this_mmu(); 737 early_mmu_set_memory_limit(); 738 } 739 740 void early_init_mmu_secondary(void) 741 { 742 early_init_this_mmu(); 743 } 744 745 void setup_initial_memory_limit(phys_addr_t first_memblock_base, 746 phys_addr_t first_memblock_size) 747 { 748 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match 749 * the bolted TLB entry. We know for now that only 1G 750 * entries are supported though that may eventually 751 * change. 752 * 753 * on FSL Embedded 64-bit, usually all RAM is bolted, but with 754 * unusual memory sizes it's possible for some RAM to not be mapped 755 * (such RAM is not used at all by Linux, since we don't support 756 * highmem on 64-bit). We limit ppc64_rma_size to what would be 757 * mappable if this memblock is the only one. Additional memblocks 758 * can only increase, not decrease, the amount that ends up getting 759 * mapped. We still limit max to 1G even if we'll eventually map 760 * more. This is due to what the early init code is set up to do. 761 * 762 * We crop it to the size of the first MEMBLOCK to 763 * avoid going over total available memory just in case... 764 */ 765 #ifdef CONFIG_PPC_FSL_BOOK3E 766 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 767 unsigned long linear_sz; 768 unsigned int num_cams; 769 770 /* use a quarter of the TLBCAM for bolted linear map */ 771 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; 772 773 linear_sz = map_mem_in_cams(first_memblock_size, num_cams, 774 true); 775 776 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); 777 } else 778 #endif 779 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); 780 781 /* Finally limit subsequent allocations */ 782 memblock_set_current_limit(first_memblock_base + ppc64_rma_size); 783 } 784 #else /* ! CONFIG_PPC64 */ 785 void __init early_init_mmu(void) 786 { 787 #ifdef CONFIG_PPC_47x 788 early_init_mmu_47x(); 789 #endif 790 791 #ifdef CONFIG_PPC_MM_SLICES 792 mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT); 793 #endif 794 } 795 #endif /* CONFIG_PPC64 */ 796