1 /* 2 * This file contains ioremap and related functions for 64-bit machines. 3 * 4 * Derived from arch/ppc64/mm/init.c 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 8 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * Copyright (C) 1996 Paul Mackerras 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * 14 * Dave Engebretsen <engebret@us.ibm.com> 15 * Rework for PPC64 port. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 * 22 */ 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/export.h> 30 #include <linux/types.h> 31 #include <linux/mman.h> 32 #include <linux/mm.h> 33 #include <linux/swap.h> 34 #include <linux/stddef.h> 35 #include <linux/vmalloc.h> 36 #include <linux/memblock.h> 37 #include <linux/slab.h> 38 #include <linux/hugetlb.h> 39 40 #include <asm/pgalloc.h> 41 #include <asm/page.h> 42 #include <asm/prom.h> 43 #include <asm/io.h> 44 #include <asm/mmu_context.h> 45 #include <asm/pgtable.h> 46 #include <asm/mmu.h> 47 #include <asm/smp.h> 48 #include <asm/machdep.h> 49 #include <asm/tlb.h> 50 #include <asm/trace.h> 51 #include <asm/processor.h> 52 #include <asm/cputable.h> 53 #include <asm/sections.h> 54 #include <asm/firmware.h> 55 #include <asm/dma.h> 56 #include <asm/powernv.h> 57 58 #include "mmu_decl.h" 59 60 #ifdef CONFIG_PPC_STD_MMU_64 61 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) 62 #error TASK_SIZE_USER64 exceeds user VSID range 63 #endif 64 #endif 65 66 #ifdef CONFIG_PPC_BOOK3S_64 67 /* 68 * partition table and process table for ISA 3.0 69 */ 70 struct prtb_entry *process_tb; 71 struct patb_entry *partition_tb; 72 /* 73 * page table size 74 */ 75 unsigned long __pte_index_size; 76 EXPORT_SYMBOL(__pte_index_size); 77 unsigned long __pmd_index_size; 78 EXPORT_SYMBOL(__pmd_index_size); 79 unsigned long __pud_index_size; 80 EXPORT_SYMBOL(__pud_index_size); 81 unsigned long __pgd_index_size; 82 EXPORT_SYMBOL(__pgd_index_size); 83 unsigned long __pmd_cache_index; 84 EXPORT_SYMBOL(__pmd_cache_index); 85 unsigned long __pte_table_size; 86 EXPORT_SYMBOL(__pte_table_size); 87 unsigned long __pmd_table_size; 88 EXPORT_SYMBOL(__pmd_table_size); 89 unsigned long __pud_table_size; 90 EXPORT_SYMBOL(__pud_table_size); 91 unsigned long __pgd_table_size; 92 EXPORT_SYMBOL(__pgd_table_size); 93 unsigned long __pmd_val_bits; 94 EXPORT_SYMBOL(__pmd_val_bits); 95 unsigned long __pud_val_bits; 96 EXPORT_SYMBOL(__pud_val_bits); 97 unsigned long __pgd_val_bits; 98 EXPORT_SYMBOL(__pgd_val_bits); 99 unsigned long __kernel_virt_start; 100 EXPORT_SYMBOL(__kernel_virt_start); 101 unsigned long __kernel_virt_size; 102 EXPORT_SYMBOL(__kernel_virt_size); 103 unsigned long __vmalloc_start; 104 EXPORT_SYMBOL(__vmalloc_start); 105 unsigned long __vmalloc_end; 106 EXPORT_SYMBOL(__vmalloc_end); 107 unsigned long __kernel_io_start; 108 EXPORT_SYMBOL(__kernel_io_start); 109 struct page *vmemmap; 110 EXPORT_SYMBOL(vmemmap); 111 unsigned long __pte_frag_nr; 112 EXPORT_SYMBOL(__pte_frag_nr); 113 unsigned long __pte_frag_size_shift; 114 EXPORT_SYMBOL(__pte_frag_size_shift); 115 unsigned long ioremap_bot; 116 #else /* !CONFIG_PPC_BOOK3S_64 */ 117 unsigned long ioremap_bot = IOREMAP_BASE; 118 #endif 119 120 /** 121 * __ioremap_at - Low level function to establish the page tables 122 * for an IO mapping 123 */ 124 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, 125 unsigned long flags) 126 { 127 unsigned long i; 128 129 /* Make sure we have the base flags */ 130 if ((flags & _PAGE_PRESENT) == 0) 131 flags |= pgprot_val(PAGE_KERNEL); 132 133 /* We don't support the 4K PFN hack with ioremap */ 134 if (flags & H_PAGE_4K_PFN) 135 return NULL; 136 137 WARN_ON(pa & ~PAGE_MASK); 138 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 139 WARN_ON(size & ~PAGE_MASK); 140 141 for (i = 0; i < size; i += PAGE_SIZE) 142 if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) 143 return NULL; 144 145 return (void __iomem *)ea; 146 } 147 148 /** 149 * __iounmap_from - Low level function to tear down the page tables 150 * for an IO mapping. This is used for mappings that 151 * are manipulated manually, like partial unmapping of 152 * PCI IOs or ISA space. 153 */ 154 void __iounmap_at(void *ea, unsigned long size) 155 { 156 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 157 WARN_ON(size & ~PAGE_MASK); 158 159 unmap_kernel_range((unsigned long)ea, size); 160 } 161 162 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 163 unsigned long flags, void *caller) 164 { 165 phys_addr_t paligned; 166 void __iomem *ret; 167 168 /* 169 * Choose an address to map it to. 170 * Once the imalloc system is running, we use it. 171 * Before that, we map using addresses going 172 * up from ioremap_bot. imalloc will use 173 * the addresses from ioremap_bot through 174 * IMALLOC_END 175 * 176 */ 177 paligned = addr & PAGE_MASK; 178 size = PAGE_ALIGN(addr + size) - paligned; 179 180 if ((size == 0) || (paligned == 0)) 181 return NULL; 182 183 if (slab_is_available()) { 184 struct vm_struct *area; 185 186 area = __get_vm_area_caller(size, VM_IOREMAP, 187 ioremap_bot, IOREMAP_END, 188 caller); 189 if (area == NULL) 190 return NULL; 191 192 area->phys_addr = paligned; 193 ret = __ioremap_at(paligned, area->addr, size, flags); 194 if (!ret) 195 vunmap(area->addr); 196 } else { 197 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); 198 if (ret) 199 ioremap_bot += size; 200 } 201 202 if (ret) 203 ret += addr & ~PAGE_MASK; 204 return ret; 205 } 206 207 void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 208 unsigned long flags) 209 { 210 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 211 } 212 213 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 214 { 215 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); 216 void *caller = __builtin_return_address(0); 217 218 if (ppc_md.ioremap) 219 return ppc_md.ioremap(addr, size, flags, caller); 220 return __ioremap_caller(addr, size, flags, caller); 221 } 222 223 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) 224 { 225 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); 226 void *caller = __builtin_return_address(0); 227 228 if (ppc_md.ioremap) 229 return ppc_md.ioremap(addr, size, flags, caller); 230 return __ioremap_caller(addr, size, flags, caller); 231 } 232 233 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, 234 unsigned long flags) 235 { 236 void *caller = __builtin_return_address(0); 237 238 /* writeable implies dirty for kernel addresses */ 239 if (flags & _PAGE_WRITE) 240 flags |= _PAGE_DIRTY; 241 242 /* we don't want to let _PAGE_EXEC leak out */ 243 flags &= ~_PAGE_EXEC; 244 /* 245 * Force kernel mapping. 246 */ 247 #if defined(CONFIG_PPC_BOOK3S_64) 248 flags |= _PAGE_PRIVILEGED; 249 #else 250 flags &= ~_PAGE_USER; 251 #endif 252 253 254 #ifdef _PAGE_BAP_SR 255 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format 256 * which means that we just cleared supervisor access... oops ;-) This 257 * restores it 258 */ 259 flags |= _PAGE_BAP_SR; 260 #endif 261 262 if (ppc_md.ioremap) 263 return ppc_md.ioremap(addr, size, flags, caller); 264 return __ioremap_caller(addr, size, flags, caller); 265 } 266 267 268 /* 269 * Unmap an IO region and remove it from imalloc'd list. 270 * Access to IO memory should be serialized by driver. 271 */ 272 void __iounmap(volatile void __iomem *token) 273 { 274 void *addr; 275 276 if (!slab_is_available()) 277 return; 278 279 addr = (void *) ((unsigned long __force) 280 PCI_FIX_ADDR(token) & PAGE_MASK); 281 if ((unsigned long)addr < ioremap_bot) { 282 printk(KERN_WARNING "Attempt to iounmap early bolted mapping" 283 " at 0x%p\n", addr); 284 return; 285 } 286 vunmap(addr); 287 } 288 289 void iounmap(volatile void __iomem *token) 290 { 291 if (ppc_md.iounmap) 292 ppc_md.iounmap(token); 293 else 294 __iounmap(token); 295 } 296 297 EXPORT_SYMBOL(ioremap); 298 EXPORT_SYMBOL(ioremap_wc); 299 EXPORT_SYMBOL(ioremap_prot); 300 EXPORT_SYMBOL(__ioremap); 301 EXPORT_SYMBOL(__ioremap_at); 302 EXPORT_SYMBOL(iounmap); 303 EXPORT_SYMBOL(__iounmap); 304 EXPORT_SYMBOL(__iounmap_at); 305 306 #ifndef __PAGETABLE_PUD_FOLDED 307 /* 4 level page table */ 308 struct page *pgd_page(pgd_t pgd) 309 { 310 if (pgd_huge(pgd)) 311 return pte_page(pgd_pte(pgd)); 312 return virt_to_page(pgd_page_vaddr(pgd)); 313 } 314 #endif 315 316 struct page *pud_page(pud_t pud) 317 { 318 if (pud_huge(pud)) 319 return pte_page(pud_pte(pud)); 320 return virt_to_page(pud_page_vaddr(pud)); 321 } 322 323 /* 324 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags 325 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. 326 */ 327 struct page *pmd_page(pmd_t pmd) 328 { 329 if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) 330 return pte_page(pmd_pte(pmd)); 331 return virt_to_page(pmd_page_vaddr(pmd)); 332 } 333 334 #ifdef CONFIG_PPC_64K_PAGES 335 static pte_t *get_from_cache(struct mm_struct *mm) 336 { 337 void *pte_frag, *ret; 338 339 spin_lock(&mm->page_table_lock); 340 ret = mm->context.pte_frag; 341 if (ret) { 342 pte_frag = ret + PTE_FRAG_SIZE; 343 /* 344 * If we have taken up all the fragments mark PTE page NULL 345 */ 346 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) 347 pte_frag = NULL; 348 mm->context.pte_frag = pte_frag; 349 } 350 spin_unlock(&mm->page_table_lock); 351 return (pte_t *)ret; 352 } 353 354 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) 355 { 356 void *ret = NULL; 357 struct page *page; 358 359 if (!kernel) { 360 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); 361 if (!page) 362 return NULL; 363 if (!pgtable_page_ctor(page)) { 364 __free_page(page); 365 return NULL; 366 } 367 } else { 368 page = alloc_page(PGALLOC_GFP); 369 if (!page) 370 return NULL; 371 } 372 373 ret = page_address(page); 374 spin_lock(&mm->page_table_lock); 375 /* 376 * If we find pgtable_page set, we return 377 * the allocated page with single fragement 378 * count. 379 */ 380 if (likely(!mm->context.pte_frag)) { 381 set_page_count(page, PTE_FRAG_NR); 382 mm->context.pte_frag = ret + PTE_FRAG_SIZE; 383 } 384 spin_unlock(&mm->page_table_lock); 385 386 return (pte_t *)ret; 387 } 388 389 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) 390 { 391 pte_t *pte; 392 393 pte = get_from_cache(mm); 394 if (pte) 395 return pte; 396 397 return __alloc_for_cache(mm, kernel); 398 } 399 #endif /* CONFIG_PPC_64K_PAGES */ 400 401 void pte_fragment_free(unsigned long *table, int kernel) 402 { 403 struct page *page = virt_to_page(table); 404 if (put_page_testzero(page)) { 405 if (!kernel) 406 pgtable_page_dtor(page); 407 free_hot_cold_page(page, 0); 408 } 409 } 410 411 #ifdef CONFIG_SMP 412 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 413 { 414 unsigned long pgf = (unsigned long)table; 415 416 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 417 pgf |= shift; 418 tlb_remove_table(tlb, (void *)pgf); 419 } 420 421 void __tlb_remove_table(void *_table) 422 { 423 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); 424 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; 425 426 if (!shift) 427 /* PTE page needs special handling */ 428 pte_fragment_free(table, 0); 429 else { 430 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 431 kmem_cache_free(PGT_CACHE(shift), table); 432 } 433 } 434 #else 435 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 436 { 437 if (!shift) { 438 /* PTE page needs special handling */ 439 pte_fragment_free(table, 0); 440 } else { 441 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 442 kmem_cache_free(PGT_CACHE(shift), table); 443 } 444 } 445 #endif 446 447 #ifdef CONFIG_PPC_BOOK3S_64 448 void __init mmu_partition_table_init(void) 449 { 450 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; 451 unsigned long ptcr; 452 453 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); 454 partition_tb = __va(memblock_alloc_base(patb_size, patb_size, 455 MEMBLOCK_ALLOC_ANYWHERE)); 456 457 /* Initialize the Partition Table with no entries */ 458 memset((void *)partition_tb, 0, patb_size); 459 460 /* 461 * update partition table control register, 462 * 64 K size. 463 */ 464 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); 465 mtspr(SPRN_PTCR, ptcr); 466 powernv_set_nmmu_ptcr(ptcr); 467 } 468 469 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, 470 unsigned long dw1) 471 { 472 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); 473 474 partition_tb[lpid].patb0 = cpu_to_be64(dw0); 475 partition_tb[lpid].patb1 = cpu_to_be64(dw1); 476 477 /* 478 * Global flush of TLBs and partition table caches for this lpid. 479 * The type of flush (hash or radix) depends on what the previous 480 * use of this partition ID was, not the new use. 481 */ 482 asm volatile("ptesync" : : : "memory"); 483 if (old & PATB_HR) { 484 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 485 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 486 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); 487 } else { 488 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 489 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 490 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); 491 } 492 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 493 } 494 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); 495 #endif /* CONFIG_PPC_BOOK3S_64 */ 496 497 #ifdef CONFIG_STRICT_KERNEL_RWX 498 void mark_rodata_ro(void) 499 { 500 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { 501 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); 502 return; 503 } 504 505 if (radix_enabled()) 506 radix__mark_rodata_ro(); 507 else 508 hash__mark_rodata_ro(); 509 } 510 511 void mark_initmem_nx(void) 512 { 513 if (radix_enabled()) 514 radix__mark_initmem_nx(); 515 else 516 hash__mark_initmem_nx(); 517 } 518 #endif 519