1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * 9 * Derived from "arch/i386/mm/init.c" 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * 12 * Dave Engebretsen <engebret@us.ibm.com> 13 * Rework for PPC64 port. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 */ 21 22 #undef DEBUG 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include <linux/highmem.h> 38 #include <linux/idr.h> 39 #include <linux/nodemask.h> 40 #include <linux/module.h> 41 #include <linux/poison.h> 42 #include <linux/memblock.h> 43 #include <linux/hugetlb.h> 44 #include <linux/slab.h> 45 46 #include <asm/pgalloc.h> 47 #include <asm/page.h> 48 #include <asm/prom.h> 49 #include <asm/rtas.h> 50 #include <asm/io.h> 51 #include <asm/mmu_context.h> 52 #include <asm/pgtable.h> 53 #include <asm/mmu.h> 54 #include <asm/uaccess.h> 55 #include <asm/smp.h> 56 #include <asm/machdep.h> 57 #include <asm/tlb.h> 58 #include <asm/eeh.h> 59 #include <asm/processor.h> 60 #include <asm/mmzone.h> 61 #include <asm/cputable.h> 62 #include <asm/sections.h> 63 #include <asm/iommu.h> 64 #include <asm/vdso.h> 65 66 #include "mmu_decl.h" 67 68 #ifdef CONFIG_PPC_STD_MMU_64 69 #if PGTABLE_RANGE > USER_VSID_RANGE 70 #warning Limited user VSID range means pagetable space is wasted 71 #endif 72 73 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 74 #warning TASK_SIZE is smaller than it needs to be. 75 #endif 76 #endif /* CONFIG_PPC_STD_MMU_64 */ 77 78 phys_addr_t memstart_addr = ~0; 79 EXPORT_SYMBOL_GPL(memstart_addr); 80 phys_addr_t kernstart_addr; 81 EXPORT_SYMBOL_GPL(kernstart_addr); 82 83 static void pgd_ctor(void *addr) 84 { 85 memset(addr, 0, PGD_TABLE_SIZE); 86 } 87 88 static void pmd_ctor(void *addr) 89 { 90 memset(addr, 0, PMD_TABLE_SIZE); 91 } 92 93 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; 94 95 /* 96 * Create a kmem_cache() for pagetables. This is not used for PTE 97 * pages - they're linked to struct page, come from the normal free 98 * pages pool and have a different entry size (see real_pte_t) to 99 * everything else. Caches created by this function are used for all 100 * the higher level pagetables, and for hugepage pagetables. 101 */ 102 void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) 103 { 104 char *name; 105 unsigned long table_size = sizeof(void *) << shift; 106 unsigned long align = table_size; 107 108 /* When batching pgtable pointers for RCU freeing, we store 109 * the index size in the low bits. Table alignment must be 110 * big enough to fit it. 111 * 112 * Likewise, hugeapge pagetable pointers contain a (different) 113 * shift value in the low bits. All tables must be aligned so 114 * as to leave enough 0 bits in the address to contain it. */ 115 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 116 HUGEPD_SHIFT_MASK + 1); 117 struct kmem_cache *new; 118 119 /* It would be nice if this was a BUILD_BUG_ON(), but at the 120 * moment, gcc doesn't seem to recognize is_power_of_2 as a 121 * constant expression, so so much for that. */ 122 BUG_ON(!is_power_of_2(minalign)); 123 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); 124 125 if (PGT_CACHE(shift)) 126 return; /* Already have a cache of this size */ 127 128 align = max_t(unsigned long, align, minalign); 129 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 130 new = kmem_cache_create(name, table_size, align, 0, ctor); 131 kfree(name); 132 pgtable_cache[shift - 1] = new; 133 pr_debug("Allocated pgtable cache for order %d\n", shift); 134 } 135 136 137 void pgtable_cache_init(void) 138 { 139 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 140 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 141 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) 142 panic("Couldn't allocate pgtable caches"); 143 /* In all current configs, when the PUD index exists it's the 144 * same size as either the pgd or pmd index. Verify that the 145 * initialization above has also created a PUD cache. This 146 * will need re-examiniation if we add new possibilities for 147 * the pagetable layout. */ 148 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)); 149 } 150 151 #ifdef CONFIG_SPARSEMEM_VMEMMAP 152 /* 153 * Given an address within the vmemmap, determine the pfn of the page that 154 * represents the start of the section it is within. Note that we have to 155 * do this by hand as the proffered address may not be correctly aligned. 156 * Subtraction of non-aligned pointers produces undefined results. 157 */ 158 static unsigned long __meminit vmemmap_section_start(unsigned long page) 159 { 160 unsigned long offset = page - ((unsigned long)(vmemmap)); 161 162 /* Return the pfn of the start of the section. */ 163 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 164 } 165 166 /* 167 * Check if this vmemmap page is already initialised. If any section 168 * which overlaps this vmemmap page is initialised then this page is 169 * initialised already. 170 */ 171 static int __meminit vmemmap_populated(unsigned long start, int page_size) 172 { 173 unsigned long end = start + page_size; 174 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); 175 176 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 177 if (pfn_valid(page_to_pfn((struct page *)start))) 178 return 1; 179 180 return 0; 181 } 182 183 /* On hash-based CPUs, the vmemmap is bolted in the hash table. 184 * 185 * On Book3E CPUs, the vmemmap is currently mapped in the top half of 186 * the vmalloc space using normal page tables, though the size of 187 * pages encoded in the PTEs can be different 188 */ 189 190 #ifdef CONFIG_PPC_BOOK3E 191 static void __meminit vmemmap_create_mapping(unsigned long start, 192 unsigned long page_size, 193 unsigned long phys) 194 { 195 /* Create a PTE encoding without page size */ 196 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | 197 _PAGE_KERNEL_RW; 198 199 /* PTEs only contain page size encodings up to 32M */ 200 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); 201 202 /* Encode the size in the PTE */ 203 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; 204 205 /* For each PTE for that area, map things. Note that we don't 206 * increment phys because all PTEs are of the large size and 207 * thus must have the low bits clear 208 */ 209 for (i = 0; i < page_size; i += PAGE_SIZE) 210 BUG_ON(map_kernel_page(start + i, phys, flags)); 211 } 212 213 #ifdef CONFIG_MEMORY_HOTPLUG 214 static void vmemmap_remove_mapping(unsigned long start, 215 unsigned long page_size) 216 { 217 } 218 #endif 219 #else /* CONFIG_PPC_BOOK3E */ 220 static void __meminit vmemmap_create_mapping(unsigned long start, 221 unsigned long page_size, 222 unsigned long phys) 223 { 224 int mapped = htab_bolt_mapping(start, start + page_size, phys, 225 pgprot_val(PAGE_KERNEL), 226 mmu_vmemmap_psize, 227 mmu_kernel_ssize); 228 BUG_ON(mapped < 0); 229 } 230 231 #ifdef CONFIG_MEMORY_HOTPLUG 232 static void vmemmap_remove_mapping(unsigned long start, 233 unsigned long page_size) 234 { 235 int mapped = htab_remove_mapping(start, start + page_size, 236 mmu_vmemmap_psize, 237 mmu_kernel_ssize); 238 BUG_ON(mapped < 0); 239 } 240 #endif 241 242 #endif /* CONFIG_PPC_BOOK3E */ 243 244 struct vmemmap_backing *vmemmap_list; 245 static struct vmemmap_backing *next; 246 static int num_left; 247 static int num_freed; 248 249 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 250 { 251 struct vmemmap_backing *vmem_back; 252 /* get from freed entries first */ 253 if (num_freed) { 254 num_freed--; 255 vmem_back = next; 256 next = next->list; 257 258 return vmem_back; 259 } 260 261 /* allocate a page when required and hand out chunks */ 262 if (!num_left) { 263 next = vmemmap_alloc_block(PAGE_SIZE, node); 264 if (unlikely(!next)) { 265 WARN_ON(1); 266 return NULL; 267 } 268 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); 269 } 270 271 num_left--; 272 273 return next++; 274 } 275 276 static __meminit void vmemmap_list_populate(unsigned long phys, 277 unsigned long start, 278 int node) 279 { 280 struct vmemmap_backing *vmem_back; 281 282 vmem_back = vmemmap_list_alloc(node); 283 if (unlikely(!vmem_back)) { 284 WARN_ON(1); 285 return; 286 } 287 288 vmem_back->phys = phys; 289 vmem_back->virt_addr = start; 290 vmem_back->list = vmemmap_list; 291 292 vmemmap_list = vmem_back; 293 } 294 295 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 296 { 297 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 298 299 /* Align to the page size of the linear mapping. */ 300 start = _ALIGN_DOWN(start, page_size); 301 302 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); 303 304 for (; start < end; start += page_size) { 305 void *p; 306 307 if (vmemmap_populated(start, page_size)) 308 continue; 309 310 p = vmemmap_alloc_block(page_size, node); 311 if (!p) 312 return -ENOMEM; 313 314 vmemmap_list_populate(__pa(p), start, node); 315 316 pr_debug(" * %016lx..%016lx allocated at %p\n", 317 start, start + page_size, p); 318 319 vmemmap_create_mapping(start, page_size, __pa(p)); 320 } 321 322 return 0; 323 } 324 325 #ifdef CONFIG_MEMORY_HOTPLUG 326 static unsigned long vmemmap_list_free(unsigned long start) 327 { 328 struct vmemmap_backing *vmem_back, *vmem_back_prev; 329 330 vmem_back_prev = vmem_back = vmemmap_list; 331 332 /* look for it with prev pointer recorded */ 333 for (; vmem_back; vmem_back = vmem_back->list) { 334 if (vmem_back->virt_addr == start) 335 break; 336 vmem_back_prev = vmem_back; 337 } 338 339 if (unlikely(!vmem_back)) { 340 WARN_ON(1); 341 return 0; 342 } 343 344 /* remove it from vmemmap_list */ 345 if (vmem_back == vmemmap_list) /* remove head */ 346 vmemmap_list = vmem_back->list; 347 else 348 vmem_back_prev->list = vmem_back->list; 349 350 /* next point to this freed entry */ 351 vmem_back->list = next; 352 next = vmem_back; 353 num_freed++; 354 355 return vmem_back->phys; 356 } 357 358 void __ref vmemmap_free(unsigned long start, unsigned long end) 359 { 360 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 361 362 start = _ALIGN_DOWN(start, page_size); 363 364 pr_debug("vmemmap_free %lx...%lx\n", start, end); 365 366 for (; start < end; start += page_size) { 367 unsigned long addr; 368 369 /* 370 * the section has already be marked as invalid, so 371 * vmemmap_populated() true means some other sections still 372 * in this page, so skip it. 373 */ 374 if (vmemmap_populated(start, page_size)) 375 continue; 376 377 addr = vmemmap_list_free(start); 378 if (addr) { 379 struct page *page = pfn_to_page(addr >> PAGE_SHIFT); 380 381 if (PageReserved(page)) { 382 /* allocated from bootmem */ 383 if (page_size < PAGE_SIZE) { 384 /* 385 * this shouldn't happen, but if it is 386 * the case, leave the memory there 387 */ 388 WARN_ON_ONCE(1); 389 } else { 390 unsigned int nr_pages = 391 1 << get_order(page_size); 392 while (nr_pages--) 393 free_reserved_page(page++); 394 } 395 } else 396 free_pages((unsigned long)(__va(addr)), 397 get_order(page_size)); 398 399 vmemmap_remove_mapping(start, page_size); 400 } 401 } 402 } 403 #endif 404 void register_page_bootmem_memmap(unsigned long section_nr, 405 struct page *start_page, unsigned long size) 406 { 407 } 408 409 /* 410 * We do not have access to the sparsemem vmemmap, so we fallback to 411 * walking the list of sparsemem blocks which we already maintain for 412 * the sake of crashdump. In the long run, we might want to maintain 413 * a tree if performance of that linear walk becomes a problem. 414 * 415 * realmode_pfn_to_page functions can fail due to: 416 * 1) As real sparsemem blocks do not lay in RAM continously (they 417 * are in virtual address space which is not available in the real mode), 418 * the requested page struct can be split between blocks so get_page/put_page 419 * may fail. 420 * 2) When huge pages are used, the get_page/put_page API will fail 421 * in real mode as the linked addresses in the page struct are virtual 422 * too. 423 */ 424 struct page *realmode_pfn_to_page(unsigned long pfn) 425 { 426 struct vmemmap_backing *vmem_back; 427 struct page *page; 428 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 429 unsigned long pg_va = (unsigned long) pfn_to_page(pfn); 430 431 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { 432 if (pg_va < vmem_back->virt_addr) 433 continue; 434 435 /* After vmemmap_list entry free is possible, need check all */ 436 if ((pg_va + sizeof(struct page)) <= 437 (vmem_back->virt_addr + page_size)) { 438 page = (struct page *) (vmem_back->phys + pg_va - 439 vmem_back->virt_addr); 440 return page; 441 } 442 } 443 444 /* Probably that page struct is split between real pages */ 445 return NULL; 446 } 447 EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 448 449 #elif defined(CONFIG_FLATMEM) 450 451 struct page *realmode_pfn_to_page(unsigned long pfn) 452 { 453 struct page *page = pfn_to_page(pfn); 454 return page; 455 } 456 EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 457 458 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ 459