1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/ioport.h> 26 #include <linux/percpu.h> 27 #include <linux/memblock.h> 28 #include <linux/mmzone.h> 29 #include <linux/gfp.h> 30 31 #include <asm/head.h> 32 #include <asm/page.h> 33 #include <asm/pgalloc.h> 34 #include <asm/pgtable.h> 35 #include <asm/oplib.h> 36 #include <asm/iommu.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 #include <asm/mmu_context.h> 40 #include <asm/tlbflush.h> 41 #include <asm/dma.h> 42 #include <asm/starfire.h> 43 #include <asm/tlb.h> 44 #include <asm/spitfire.h> 45 #include <asm/sections.h> 46 #include <asm/tsb.h> 47 #include <asm/hypervisor.h> 48 #include <asm/prom.h> 49 #include <asm/mdesc.h> 50 #include <asm/cpudata.h> 51 #include <asm/setup.h> 52 #include <asm/irq.h> 53 54 #include "init_64.h" 55 56 unsigned long kern_linear_pte_xor[4] __read_mostly; 57 static unsigned long page_cache4v_flag; 58 59 /* A bitmap, two bits for every 256MB of physical memory. These two 60 * bits determine what page size we use for kernel linear 61 * translations. They form an index into kern_linear_pte_xor[]. The 62 * value in the indexed slot is XOR'd with the TLB miss virtual 63 * address to form the resulting TTE. The mapping is: 64 * 65 * 0 ==> 4MB 66 * 1 ==> 256MB 67 * 2 ==> 2GB 68 * 3 ==> 16GB 69 * 70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later 71 * support 2GB pages, and hopefully future cpus will support the 16GB 72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there 73 * if these larger page sizes are not supported by the cpu. 74 * 75 * It would be nice to determine this from the machine description 76 * 'cpu' properties, but we need to have this table setup before the 77 * MDESC is initialized. 78 */ 79 80 #ifndef CONFIG_DEBUG_PAGEALLOC 81 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. 82 * Space is allocated for this right after the trap table in 83 * arch/sparc64/kernel/head.S 84 */ 85 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 86 #endif 87 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 88 89 static unsigned long cpu_pgsz_mask; 90 91 #define MAX_BANKS 1024 92 93 static struct linux_prom64_registers pavail[MAX_BANKS]; 94 static int pavail_ents; 95 96 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES]; 97 98 static int cmp_p64(const void *a, const void *b) 99 { 100 const struct linux_prom64_registers *x = a, *y = b; 101 102 if (x->phys_addr > y->phys_addr) 103 return 1; 104 if (x->phys_addr < y->phys_addr) 105 return -1; 106 return 0; 107 } 108 109 static void __init read_obp_memory(const char *property, 110 struct linux_prom64_registers *regs, 111 int *num_ents) 112 { 113 phandle node = prom_finddevice("/memory"); 114 int prop_size = prom_getproplen(node, property); 115 int ents, ret, i; 116 117 ents = prop_size / sizeof(struct linux_prom64_registers); 118 if (ents > MAX_BANKS) { 119 prom_printf("The machine has more %s property entries than " 120 "this kernel can support (%d).\n", 121 property, MAX_BANKS); 122 prom_halt(); 123 } 124 125 ret = prom_getproperty(node, property, (char *) regs, prop_size); 126 if (ret == -1) { 127 prom_printf("Couldn't get %s property from /memory.\n", 128 property); 129 prom_halt(); 130 } 131 132 /* Sanitize what we got from the firmware, by page aligning 133 * everything. 134 */ 135 for (i = 0; i < ents; i++) { 136 unsigned long base, size; 137 138 base = regs[i].phys_addr; 139 size = regs[i].reg_size; 140 141 size &= PAGE_MASK; 142 if (base & ~PAGE_MASK) { 143 unsigned long new_base = PAGE_ALIGN(base); 144 145 size -= new_base - base; 146 if ((long) size < 0L) 147 size = 0UL; 148 base = new_base; 149 } 150 if (size == 0UL) { 151 /* If it is empty, simply get rid of it. 152 * This simplifies the logic of the other 153 * functions that process these arrays. 154 */ 155 memmove(®s[i], ®s[i + 1], 156 (ents - i - 1) * sizeof(regs[0])); 157 i--; 158 ents--; 159 continue; 160 } 161 regs[i].phys_addr = base; 162 regs[i].reg_size = size; 163 } 164 165 *num_ents = ents; 166 167 sort(regs, ents, sizeof(struct linux_prom64_registers), 168 cmp_p64, NULL); 169 } 170 171 /* Kernel physical address base and size in bytes. */ 172 unsigned long kern_base __read_mostly; 173 unsigned long kern_size __read_mostly; 174 175 /* Initial ramdisk setup */ 176 extern unsigned long sparc_ramdisk_image64; 177 extern unsigned int sparc_ramdisk_image; 178 extern unsigned int sparc_ramdisk_size; 179 180 struct page *mem_map_zero __read_mostly; 181 EXPORT_SYMBOL(mem_map_zero); 182 183 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 184 185 unsigned long sparc64_kern_pri_context __read_mostly; 186 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 187 unsigned long sparc64_kern_sec_context __read_mostly; 188 189 int num_kernel_image_mappings; 190 191 #ifdef CONFIG_DEBUG_DCFLUSH 192 atomic_t dcpage_flushes = ATOMIC_INIT(0); 193 #ifdef CONFIG_SMP 194 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 195 #endif 196 #endif 197 198 inline void flush_dcache_page_impl(struct page *page) 199 { 200 BUG_ON(tlb_type == hypervisor); 201 #ifdef CONFIG_DEBUG_DCFLUSH 202 atomic_inc(&dcpage_flushes); 203 #endif 204 205 #ifdef DCACHE_ALIASING_POSSIBLE 206 __flush_dcache_page(page_address(page), 207 ((tlb_type == spitfire) && 208 page_mapping(page) != NULL)); 209 #else 210 if (page_mapping(page) != NULL && 211 tlb_type == spitfire) 212 __flush_icache_page(__pa(page_address(page))); 213 #endif 214 } 215 216 #define PG_dcache_dirty PG_arch_1 217 #define PG_dcache_cpu_shift 32UL 218 #define PG_dcache_cpu_mask \ 219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 220 221 #define dcache_dirty_cpu(page) \ 222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 223 224 static inline void set_dcache_dirty(struct page *page, int this_cpu) 225 { 226 unsigned long mask = this_cpu; 227 unsigned long non_cpu_bits; 228 229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 231 232 __asm__ __volatile__("1:\n\t" 233 "ldx [%2], %%g7\n\t" 234 "and %%g7, %1, %%g1\n\t" 235 "or %%g1, %0, %%g1\n\t" 236 "casx [%2], %%g7, %%g1\n\t" 237 "cmp %%g7, %%g1\n\t" 238 "bne,pn %%xcc, 1b\n\t" 239 " nop" 240 : /* no outputs */ 241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 242 : "g1", "g7"); 243 } 244 245 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 246 { 247 unsigned long mask = (1UL << PG_dcache_dirty); 248 249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 250 "1:\n\t" 251 "ldx [%2], %%g7\n\t" 252 "srlx %%g7, %4, %%g1\n\t" 253 "and %%g1, %3, %%g1\n\t" 254 "cmp %%g1, %0\n\t" 255 "bne,pn %%icc, 2f\n\t" 256 " andn %%g7, %1, %%g1\n\t" 257 "casx [%2], %%g7, %%g1\n\t" 258 "cmp %%g7, %%g1\n\t" 259 "bne,pn %%xcc, 1b\n\t" 260 " nop\n" 261 "2:" 262 : /* no outputs */ 263 : "r" (cpu), "r" (mask), "r" (&page->flags), 264 "i" (PG_dcache_cpu_mask), 265 "i" (PG_dcache_cpu_shift) 266 : "g1", "g7"); 267 } 268 269 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 270 { 271 unsigned long tsb_addr = (unsigned long) ent; 272 273 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 274 tsb_addr = __pa(tsb_addr); 275 276 __tsb_insert(tsb_addr, tag, pte); 277 } 278 279 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 280 281 static void flush_dcache(unsigned long pfn) 282 { 283 struct page *page; 284 285 page = pfn_to_page(pfn); 286 if (page) { 287 unsigned long pg_flags; 288 289 pg_flags = page->flags; 290 if (pg_flags & (1UL << PG_dcache_dirty)) { 291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 292 PG_dcache_cpu_mask); 293 int this_cpu = get_cpu(); 294 295 /* This is just to optimize away some function calls 296 * in the SMP case. 297 */ 298 if (cpu == this_cpu) 299 flush_dcache_page_impl(page); 300 else 301 smp_flush_dcache_page_impl(page, cpu); 302 303 clear_dcache_dirty_cpu(page, cpu); 304 305 put_cpu(); 306 } 307 } 308 } 309 310 /* mm->context.lock must be held */ 311 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, 312 unsigned long tsb_hash_shift, unsigned long address, 313 unsigned long tte) 314 { 315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; 316 unsigned long tag; 317 318 if (unlikely(!tsb)) 319 return; 320 321 tsb += ((address >> tsb_hash_shift) & 322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 323 tag = (address >> 22UL); 324 tsb_insert(tsb, tag, tte); 325 } 326 327 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 328 static inline bool is_hugetlb_pte(pte_t pte) 329 { 330 if ((tlb_type == hypervisor && 331 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 332 (tlb_type != hypervisor && 333 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) 334 return true; 335 return false; 336 } 337 #endif 338 339 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 340 { 341 struct mm_struct *mm; 342 unsigned long flags; 343 pte_t pte = *ptep; 344 345 if (tlb_type != hypervisor) { 346 unsigned long pfn = pte_pfn(pte); 347 348 if (pfn_valid(pfn)) 349 flush_dcache(pfn); 350 } 351 352 mm = vma->vm_mm; 353 354 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ 355 if (!pte_accessible(mm, pte)) 356 return; 357 358 spin_lock_irqsave(&mm->context.lock, flags); 359 360 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 361 if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) 362 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 363 address, pte_val(pte)); 364 else 365 #endif 366 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, 367 address, pte_val(pte)); 368 369 spin_unlock_irqrestore(&mm->context.lock, flags); 370 } 371 372 void flush_dcache_page(struct page *page) 373 { 374 struct address_space *mapping; 375 int this_cpu; 376 377 if (tlb_type == hypervisor) 378 return; 379 380 /* Do not bother with the expensive D-cache flush if it 381 * is merely the zero page. The 'bigcore' testcase in GDB 382 * causes this case to run millions of times. 383 */ 384 if (page == ZERO_PAGE(0)) 385 return; 386 387 this_cpu = get_cpu(); 388 389 mapping = page_mapping(page); 390 if (mapping && !mapping_mapped(mapping)) { 391 int dirty = test_bit(PG_dcache_dirty, &page->flags); 392 if (dirty) { 393 int dirty_cpu = dcache_dirty_cpu(page); 394 395 if (dirty_cpu == this_cpu) 396 goto out; 397 smp_flush_dcache_page_impl(page, dirty_cpu); 398 } 399 set_dcache_dirty(page, this_cpu); 400 } else { 401 /* We could delay the flush for the !page_mapping 402 * case too. But that case is for exec env/arg 403 * pages and those are %99 certainly going to get 404 * faulted into the tlb (and thus flushed) anyways. 405 */ 406 flush_dcache_page_impl(page); 407 } 408 409 out: 410 put_cpu(); 411 } 412 EXPORT_SYMBOL(flush_dcache_page); 413 414 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 415 { 416 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 417 if (tlb_type == spitfire) { 418 unsigned long kaddr; 419 420 /* This code only runs on Spitfire cpus so this is 421 * why we can assume _PAGE_PADDR_4U. 422 */ 423 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 424 unsigned long paddr, mask = _PAGE_PADDR_4U; 425 426 if (kaddr >= PAGE_OFFSET) 427 paddr = kaddr & mask; 428 else { 429 pgd_t *pgdp = pgd_offset_k(kaddr); 430 pud_t *pudp = pud_offset(pgdp, kaddr); 431 pmd_t *pmdp = pmd_offset(pudp, kaddr); 432 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 433 434 paddr = pte_val(*ptep) & mask; 435 } 436 __flush_icache_page(paddr); 437 } 438 } 439 } 440 EXPORT_SYMBOL(flush_icache_range); 441 442 void mmu_info(struct seq_file *m) 443 { 444 static const char *pgsz_strings[] = { 445 "8K", "64K", "512K", "4MB", "32MB", 446 "256MB", "2GB", "16GB", 447 }; 448 int i, printed; 449 450 if (tlb_type == cheetah) 451 seq_printf(m, "MMU Type\t: Cheetah\n"); 452 else if (tlb_type == cheetah_plus) 453 seq_printf(m, "MMU Type\t: Cheetah+\n"); 454 else if (tlb_type == spitfire) 455 seq_printf(m, "MMU Type\t: Spitfire\n"); 456 else if (tlb_type == hypervisor) 457 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 458 else 459 seq_printf(m, "MMU Type\t: ???\n"); 460 461 seq_printf(m, "MMU PGSZs\t: "); 462 printed = 0; 463 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) { 464 if (cpu_pgsz_mask & (1UL << i)) { 465 seq_printf(m, "%s%s", 466 printed ? "," : "", pgsz_strings[i]); 467 printed++; 468 } 469 } 470 seq_putc(m, '\n'); 471 472 #ifdef CONFIG_DEBUG_DCFLUSH 473 seq_printf(m, "DCPageFlushes\t: %d\n", 474 atomic_read(&dcpage_flushes)); 475 #ifdef CONFIG_SMP 476 seq_printf(m, "DCPageFlushesXC\t: %d\n", 477 atomic_read(&dcpage_flushes_xcall)); 478 #endif /* CONFIG_SMP */ 479 #endif /* CONFIG_DEBUG_DCFLUSH */ 480 } 481 482 struct linux_prom_translation prom_trans[512] __read_mostly; 483 unsigned int prom_trans_ents __read_mostly; 484 485 unsigned long kern_locked_tte_data; 486 487 /* The obp translations are saved based on 8k pagesize, since obp can 488 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 489 * HI_OBP_ADDRESS range are handled in ktlb.S. 490 */ 491 static inline int in_obp_range(unsigned long vaddr) 492 { 493 return (vaddr >= LOW_OBP_ADDRESS && 494 vaddr < HI_OBP_ADDRESS); 495 } 496 497 static int cmp_ptrans(const void *a, const void *b) 498 { 499 const struct linux_prom_translation *x = a, *y = b; 500 501 if (x->virt > y->virt) 502 return 1; 503 if (x->virt < y->virt) 504 return -1; 505 return 0; 506 } 507 508 /* Read OBP translations property into 'prom_trans[]'. */ 509 static void __init read_obp_translations(void) 510 { 511 int n, node, ents, first, last, i; 512 513 node = prom_finddevice("/virtual-memory"); 514 n = prom_getproplen(node, "translations"); 515 if (unlikely(n == 0 || n == -1)) { 516 prom_printf("prom_mappings: Couldn't get size.\n"); 517 prom_halt(); 518 } 519 if (unlikely(n > sizeof(prom_trans))) { 520 prom_printf("prom_mappings: Size %d is too big.\n", n); 521 prom_halt(); 522 } 523 524 if ((n = prom_getproperty(node, "translations", 525 (char *)&prom_trans[0], 526 sizeof(prom_trans))) == -1) { 527 prom_printf("prom_mappings: Couldn't get property.\n"); 528 prom_halt(); 529 } 530 531 n = n / sizeof(struct linux_prom_translation); 532 533 ents = n; 534 535 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 536 cmp_ptrans, NULL); 537 538 /* Now kick out all the non-OBP entries. */ 539 for (i = 0; i < ents; i++) { 540 if (in_obp_range(prom_trans[i].virt)) 541 break; 542 } 543 first = i; 544 for (; i < ents; i++) { 545 if (!in_obp_range(prom_trans[i].virt)) 546 break; 547 } 548 last = i; 549 550 for (i = 0; i < (last - first); i++) { 551 struct linux_prom_translation *src = &prom_trans[i + first]; 552 struct linux_prom_translation *dest = &prom_trans[i]; 553 554 *dest = *src; 555 } 556 for (; i < ents; i++) { 557 struct linux_prom_translation *dest = &prom_trans[i]; 558 dest->virt = dest->size = dest->data = 0x0UL; 559 } 560 561 prom_trans_ents = last - first; 562 563 if (tlb_type == spitfire) { 564 /* Clear diag TTE bits. */ 565 for (i = 0; i < prom_trans_ents; i++) 566 prom_trans[i].data &= ~0x0003fe0000000000UL; 567 } 568 569 /* Force execute bit on. */ 570 for (i = 0; i < prom_trans_ents; i++) 571 prom_trans[i].data |= (tlb_type == hypervisor ? 572 _PAGE_EXEC_4V : _PAGE_EXEC_4U); 573 } 574 575 static void __init hypervisor_tlb_lock(unsigned long vaddr, 576 unsigned long pte, 577 unsigned long mmu) 578 { 579 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 580 581 if (ret != 0) { 582 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: " 583 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 584 prom_halt(); 585 } 586 } 587 588 static unsigned long kern_large_tte(unsigned long paddr); 589 590 static void __init remap_kernel(void) 591 { 592 unsigned long phys_page, tte_vaddr, tte_data; 593 int i, tlb_ent = sparc64_highest_locked_tlbent(); 594 595 tte_vaddr = (unsigned long) KERNBASE; 596 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 597 tte_data = kern_large_tte(phys_page); 598 599 kern_locked_tte_data = tte_data; 600 601 /* Now lock us into the TLBs via Hypervisor or OBP. */ 602 if (tlb_type == hypervisor) { 603 for (i = 0; i < num_kernel_image_mappings; i++) { 604 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 605 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 606 tte_vaddr += 0x400000; 607 tte_data += 0x400000; 608 } 609 } else { 610 for (i = 0; i < num_kernel_image_mappings; i++) { 611 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 612 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 613 tte_vaddr += 0x400000; 614 tte_data += 0x400000; 615 } 616 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 617 } 618 if (tlb_type == cheetah_plus) { 619 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 620 CTX_CHEETAH_PLUS_NUC); 621 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 622 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 623 } 624 } 625 626 627 static void __init inherit_prom_mappings(void) 628 { 629 /* Now fixup OBP's idea about where we really are mapped. */ 630 printk("Remapping the kernel... "); 631 remap_kernel(); 632 printk("done.\n"); 633 } 634 635 void prom_world(int enter) 636 { 637 if (!enter) 638 set_fs(get_fs()); 639 640 __asm__ __volatile__("flushw"); 641 } 642 643 void __flush_dcache_range(unsigned long start, unsigned long end) 644 { 645 unsigned long va; 646 647 if (tlb_type == spitfire) { 648 int n = 0; 649 650 for (va = start; va < end; va += 32) { 651 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 652 if (++n >= 512) 653 break; 654 } 655 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 656 start = __pa(start); 657 end = __pa(end); 658 for (va = start; va < end; va += 32) 659 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 660 "membar #Sync" 661 : /* no outputs */ 662 : "r" (va), 663 "i" (ASI_DCACHE_INVALIDATE)); 664 } 665 } 666 EXPORT_SYMBOL(__flush_dcache_range); 667 668 /* get_new_mmu_context() uses "cache + 1". */ 669 DEFINE_SPINLOCK(ctx_alloc_lock); 670 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 671 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 672 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 673 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 674 675 /* Caller does TLB context flushing on local CPU if necessary. 676 * The caller also ensures that CTX_VALID(mm->context) is false. 677 * 678 * We must be careful about boundary cases so that we never 679 * let the user have CTX 0 (nucleus) or we ever use a CTX 680 * version of zero (and thus NO_CONTEXT would not be caught 681 * by version mis-match tests in mmu_context.h). 682 * 683 * Always invoked with interrupts disabled. 684 */ 685 void get_new_mmu_context(struct mm_struct *mm) 686 { 687 unsigned long ctx, new_ctx; 688 unsigned long orig_pgsz_bits; 689 int new_version; 690 691 spin_lock(&ctx_alloc_lock); 692 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 693 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 694 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 695 new_version = 0; 696 if (new_ctx >= (1 << CTX_NR_BITS)) { 697 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 698 if (new_ctx >= ctx) { 699 int i; 700 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 701 CTX_FIRST_VERSION; 702 if (new_ctx == 1) 703 new_ctx = CTX_FIRST_VERSION; 704 705 /* Don't call memset, for 16 entries that's just 706 * plain silly... 707 */ 708 mmu_context_bmap[0] = 3; 709 mmu_context_bmap[1] = 0; 710 mmu_context_bmap[2] = 0; 711 mmu_context_bmap[3] = 0; 712 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 713 mmu_context_bmap[i + 0] = 0; 714 mmu_context_bmap[i + 1] = 0; 715 mmu_context_bmap[i + 2] = 0; 716 mmu_context_bmap[i + 3] = 0; 717 } 718 new_version = 1; 719 goto out; 720 } 721 } 722 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 723 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 724 out: 725 tlb_context_cache = new_ctx; 726 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 727 spin_unlock(&ctx_alloc_lock); 728 729 if (unlikely(new_version)) 730 smp_new_mmu_context_version(); 731 } 732 733 static int numa_enabled = 1; 734 static int numa_debug; 735 736 static int __init early_numa(char *p) 737 { 738 if (!p) 739 return 0; 740 741 if (strstr(p, "off")) 742 numa_enabled = 0; 743 744 if (strstr(p, "debug")) 745 numa_debug = 1; 746 747 return 0; 748 } 749 early_param("numa", early_numa); 750 751 #define numadbg(f, a...) \ 752 do { if (numa_debug) \ 753 printk(KERN_INFO f, ## a); \ 754 } while (0) 755 756 static void __init find_ramdisk(unsigned long phys_base) 757 { 758 #ifdef CONFIG_BLK_DEV_INITRD 759 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 760 unsigned long ramdisk_image; 761 762 /* Older versions of the bootloader only supported a 763 * 32-bit physical address for the ramdisk image 764 * location, stored at sparc_ramdisk_image. Newer 765 * SILO versions set sparc_ramdisk_image to zero and 766 * provide a full 64-bit physical address at 767 * sparc_ramdisk_image64. 768 */ 769 ramdisk_image = sparc_ramdisk_image; 770 if (!ramdisk_image) 771 ramdisk_image = sparc_ramdisk_image64; 772 773 /* Another bootloader quirk. The bootloader normalizes 774 * the physical address to KERNBASE, so we have to 775 * factor that back out and add in the lowest valid 776 * physical page address to get the true physical address. 777 */ 778 ramdisk_image -= KERNBASE; 779 ramdisk_image += phys_base; 780 781 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 782 ramdisk_image, sparc_ramdisk_size); 783 784 initrd_start = ramdisk_image; 785 initrd_end = ramdisk_image + sparc_ramdisk_size; 786 787 memblock_reserve(initrd_start, sparc_ramdisk_size); 788 789 initrd_start += PAGE_OFFSET; 790 initrd_end += PAGE_OFFSET; 791 } 792 #endif 793 } 794 795 struct node_mem_mask { 796 unsigned long mask; 797 unsigned long val; 798 }; 799 static struct node_mem_mask node_masks[MAX_NUMNODES]; 800 static int num_node_masks; 801 802 #ifdef CONFIG_NEED_MULTIPLE_NODES 803 804 int numa_cpu_lookup_table[NR_CPUS]; 805 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 806 807 struct mdesc_mblock { 808 u64 base; 809 u64 size; 810 u64 offset; /* RA-to-PA */ 811 }; 812 static struct mdesc_mblock *mblocks; 813 static int num_mblocks; 814 815 static unsigned long ra_to_pa(unsigned long addr) 816 { 817 int i; 818 819 for (i = 0; i < num_mblocks; i++) { 820 struct mdesc_mblock *m = &mblocks[i]; 821 822 if (addr >= m->base && 823 addr < (m->base + m->size)) { 824 addr += m->offset; 825 break; 826 } 827 } 828 return addr; 829 } 830 831 static int find_node(unsigned long addr) 832 { 833 int i; 834 835 addr = ra_to_pa(addr); 836 for (i = 0; i < num_node_masks; i++) { 837 struct node_mem_mask *p = &node_masks[i]; 838 839 if ((addr & p->mask) == p->val) 840 return i; 841 } 842 /* The following condition has been observed on LDOM guests.*/ 843 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 844 " rule. Some physical memory will be owned by node 0."); 845 return 0; 846 } 847 848 static u64 memblock_nid_range(u64 start, u64 end, int *nid) 849 { 850 *nid = find_node(start); 851 start += PAGE_SIZE; 852 while (start < end) { 853 int n = find_node(start); 854 855 if (n != *nid) 856 break; 857 start += PAGE_SIZE; 858 } 859 860 if (start > end) 861 start = end; 862 863 return start; 864 } 865 #endif 866 867 /* This must be invoked after performing all of the necessary 868 * memblock_set_node() calls for 'nid'. We need to be able to get 869 * correct data from get_pfn_range_for_nid(). 870 */ 871 static void __init allocate_node_data(int nid) 872 { 873 struct pglist_data *p; 874 unsigned long start_pfn, end_pfn; 875 #ifdef CONFIG_NEED_MULTIPLE_NODES 876 unsigned long paddr; 877 878 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 879 if (!paddr) { 880 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 881 prom_halt(); 882 } 883 NODE_DATA(nid) = __va(paddr); 884 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 885 886 NODE_DATA(nid)->node_id = nid; 887 #endif 888 889 p = NODE_DATA(nid); 890 891 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 892 p->node_start_pfn = start_pfn; 893 p->node_spanned_pages = end_pfn - start_pfn; 894 } 895 896 static void init_node_masks_nonnuma(void) 897 { 898 #ifdef CONFIG_NEED_MULTIPLE_NODES 899 int i; 900 #endif 901 902 numadbg("Initializing tables for non-numa.\n"); 903 904 node_masks[0].mask = node_masks[0].val = 0; 905 num_node_masks = 1; 906 907 #ifdef CONFIG_NEED_MULTIPLE_NODES 908 for (i = 0; i < NR_CPUS; i++) 909 numa_cpu_lookup_table[i] = 0; 910 911 cpumask_setall(&numa_cpumask_lookup_table[0]); 912 #endif 913 } 914 915 #ifdef CONFIG_NEED_MULTIPLE_NODES 916 struct pglist_data *node_data[MAX_NUMNODES]; 917 918 EXPORT_SYMBOL(numa_cpu_lookup_table); 919 EXPORT_SYMBOL(numa_cpumask_lookup_table); 920 EXPORT_SYMBOL(node_data); 921 922 struct mdesc_mlgroup { 923 u64 node; 924 u64 latency; 925 u64 match; 926 u64 mask; 927 }; 928 static struct mdesc_mlgroup *mlgroups; 929 static int num_mlgroups; 930 931 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 932 u32 cfg_handle) 933 { 934 u64 arc; 935 936 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 937 u64 target = mdesc_arc_target(md, arc); 938 const u64 *val; 939 940 val = mdesc_get_property(md, target, 941 "cfg-handle", NULL); 942 if (val && *val == cfg_handle) 943 return 0; 944 } 945 return -ENODEV; 946 } 947 948 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 949 u32 cfg_handle) 950 { 951 u64 arc, candidate, best_latency = ~(u64)0; 952 953 candidate = MDESC_NODE_NULL; 954 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 955 u64 target = mdesc_arc_target(md, arc); 956 const char *name = mdesc_node_name(md, target); 957 const u64 *val; 958 959 if (strcmp(name, "pio-latency-group")) 960 continue; 961 962 val = mdesc_get_property(md, target, "latency", NULL); 963 if (!val) 964 continue; 965 966 if (*val < best_latency) { 967 candidate = target; 968 best_latency = *val; 969 } 970 } 971 972 if (candidate == MDESC_NODE_NULL) 973 return -ENODEV; 974 975 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 976 } 977 978 int of_node_to_nid(struct device_node *dp) 979 { 980 const struct linux_prom64_registers *regs; 981 struct mdesc_handle *md; 982 u32 cfg_handle; 983 int count, nid; 984 u64 grp; 985 986 /* This is the right thing to do on currently supported 987 * SUN4U NUMA platforms as well, as the PCI controller does 988 * not sit behind any particular memory controller. 989 */ 990 if (!mlgroups) 991 return -1; 992 993 regs = of_get_property(dp, "reg", NULL); 994 if (!regs) 995 return -1; 996 997 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 998 999 md = mdesc_grab(); 1000 1001 count = 0; 1002 nid = -1; 1003 mdesc_for_each_node_by_name(md, grp, "group") { 1004 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 1005 nid = count; 1006 break; 1007 } 1008 count++; 1009 } 1010 1011 mdesc_release(md); 1012 1013 return nid; 1014 } 1015 1016 static void __init add_node_ranges(void) 1017 { 1018 struct memblock_region *reg; 1019 1020 for_each_memblock(memory, reg) { 1021 unsigned long size = reg->size; 1022 unsigned long start, end; 1023 1024 start = reg->base; 1025 end = start + size; 1026 while (start < end) { 1027 unsigned long this_end; 1028 int nid; 1029 1030 this_end = memblock_nid_range(start, end, &nid); 1031 1032 numadbg("Setting memblock NUMA node nid[%d] " 1033 "start[%lx] end[%lx]\n", 1034 nid, start, this_end); 1035 1036 memblock_set_node(start, this_end - start, 1037 &memblock.memory, nid); 1038 start = this_end; 1039 } 1040 } 1041 } 1042 1043 static int __init grab_mlgroups(struct mdesc_handle *md) 1044 { 1045 unsigned long paddr; 1046 int count = 0; 1047 u64 node; 1048 1049 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 1050 count++; 1051 if (!count) 1052 return -ENOENT; 1053 1054 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), 1055 SMP_CACHE_BYTES); 1056 if (!paddr) 1057 return -ENOMEM; 1058 1059 mlgroups = __va(paddr); 1060 num_mlgroups = count; 1061 1062 count = 0; 1063 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1064 struct mdesc_mlgroup *m = &mlgroups[count++]; 1065 const u64 *val; 1066 1067 m->node = node; 1068 1069 val = mdesc_get_property(md, node, "latency", NULL); 1070 m->latency = *val; 1071 val = mdesc_get_property(md, node, "address-match", NULL); 1072 m->match = *val; 1073 val = mdesc_get_property(md, node, "address-mask", NULL); 1074 m->mask = *val; 1075 1076 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1077 "match[%llx] mask[%llx]\n", 1078 count - 1, m->node, m->latency, m->match, m->mask); 1079 } 1080 1081 return 0; 1082 } 1083 1084 static int __init grab_mblocks(struct mdesc_handle *md) 1085 { 1086 unsigned long paddr; 1087 int count = 0; 1088 u64 node; 1089 1090 mdesc_for_each_node_by_name(md, node, "mblock") 1091 count++; 1092 if (!count) 1093 return -ENOENT; 1094 1095 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), 1096 SMP_CACHE_BYTES); 1097 if (!paddr) 1098 return -ENOMEM; 1099 1100 mblocks = __va(paddr); 1101 num_mblocks = count; 1102 1103 count = 0; 1104 mdesc_for_each_node_by_name(md, node, "mblock") { 1105 struct mdesc_mblock *m = &mblocks[count++]; 1106 const u64 *val; 1107 1108 val = mdesc_get_property(md, node, "base", NULL); 1109 m->base = *val; 1110 val = mdesc_get_property(md, node, "size", NULL); 1111 m->size = *val; 1112 val = mdesc_get_property(md, node, 1113 "address-congruence-offset", NULL); 1114 1115 /* The address-congruence-offset property is optional. 1116 * Explicity zero it be identifty this. 1117 */ 1118 if (val) 1119 m->offset = *val; 1120 else 1121 m->offset = 0UL; 1122 1123 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1124 count - 1, m->base, m->size, m->offset); 1125 } 1126 1127 return 0; 1128 } 1129 1130 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1131 u64 grp, cpumask_t *mask) 1132 { 1133 u64 arc; 1134 1135 cpumask_clear(mask); 1136 1137 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1138 u64 target = mdesc_arc_target(md, arc); 1139 const char *name = mdesc_node_name(md, target); 1140 const u64 *id; 1141 1142 if (strcmp(name, "cpu")) 1143 continue; 1144 id = mdesc_get_property(md, target, "id", NULL); 1145 if (*id < nr_cpu_ids) 1146 cpumask_set_cpu(*id, mask); 1147 } 1148 } 1149 1150 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1151 { 1152 int i; 1153 1154 for (i = 0; i < num_mlgroups; i++) { 1155 struct mdesc_mlgroup *m = &mlgroups[i]; 1156 if (m->node == node) 1157 return m; 1158 } 1159 return NULL; 1160 } 1161 1162 int __node_distance(int from, int to) 1163 { 1164 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) { 1165 pr_warn("Returning default NUMA distance value for %d->%d\n", 1166 from, to); 1167 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; 1168 } 1169 return numa_latency[from][to]; 1170 } 1171 1172 static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1173 { 1174 int i; 1175 1176 for (i = 0; i < MAX_NUMNODES; i++) { 1177 struct node_mem_mask *n = &node_masks[i]; 1178 1179 if ((grp->mask == n->mask) && (grp->match == n->val)) 1180 break; 1181 } 1182 return i; 1183 } 1184 1185 static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, 1186 int index) 1187 { 1188 u64 arc; 1189 1190 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1191 int tnode; 1192 u64 target = mdesc_arc_target(md, arc); 1193 struct mdesc_mlgroup *m = find_mlgroup(target); 1194 1195 if (!m) 1196 continue; 1197 tnode = find_best_numa_node_for_mlgroup(m); 1198 if (tnode == MAX_NUMNODES) 1199 continue; 1200 numa_latency[index][tnode] = m->latency; 1201 } 1202 } 1203 1204 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1205 int index) 1206 { 1207 struct mdesc_mlgroup *candidate = NULL; 1208 u64 arc, best_latency = ~(u64)0; 1209 struct node_mem_mask *n; 1210 1211 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1212 u64 target = mdesc_arc_target(md, arc); 1213 struct mdesc_mlgroup *m = find_mlgroup(target); 1214 if (!m) 1215 continue; 1216 if (m->latency < best_latency) { 1217 candidate = m; 1218 best_latency = m->latency; 1219 } 1220 } 1221 if (!candidate) 1222 return -ENOENT; 1223 1224 if (num_node_masks != index) { 1225 printk(KERN_ERR "Inconsistent NUMA state, " 1226 "index[%d] != num_node_masks[%d]\n", 1227 index, num_node_masks); 1228 return -EINVAL; 1229 } 1230 1231 n = &node_masks[num_node_masks++]; 1232 1233 n->mask = candidate->mask; 1234 n->val = candidate->match; 1235 1236 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1237 index, n->mask, n->val, candidate->latency); 1238 1239 return 0; 1240 } 1241 1242 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1243 int index) 1244 { 1245 cpumask_t mask; 1246 int cpu; 1247 1248 numa_parse_mdesc_group_cpus(md, grp, &mask); 1249 1250 for_each_cpu(cpu, &mask) 1251 numa_cpu_lookup_table[cpu] = index; 1252 cpumask_copy(&numa_cpumask_lookup_table[index], &mask); 1253 1254 if (numa_debug) { 1255 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1256 for_each_cpu(cpu, &mask) 1257 printk("%d ", cpu); 1258 printk("]\n"); 1259 } 1260 1261 return numa_attach_mlgroup(md, grp, index); 1262 } 1263 1264 static int __init numa_parse_mdesc(void) 1265 { 1266 struct mdesc_handle *md = mdesc_grab(); 1267 int i, j, err, count; 1268 u64 node; 1269 1270 /* Some sane defaults for numa latency values */ 1271 for (i = 0; i < MAX_NUMNODES; i++) { 1272 for (j = 0; j < MAX_NUMNODES; j++) 1273 numa_latency[i][j] = (i == j) ? 1274 LOCAL_DISTANCE : REMOTE_DISTANCE; 1275 } 1276 1277 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1278 if (node == MDESC_NODE_NULL) { 1279 mdesc_release(md); 1280 return -ENOENT; 1281 } 1282 1283 err = grab_mblocks(md); 1284 if (err < 0) 1285 goto out; 1286 1287 err = grab_mlgroups(md); 1288 if (err < 0) 1289 goto out; 1290 1291 count = 0; 1292 mdesc_for_each_node_by_name(md, node, "group") { 1293 err = numa_parse_mdesc_group(md, node, count); 1294 if (err < 0) 1295 break; 1296 count++; 1297 } 1298 1299 count = 0; 1300 mdesc_for_each_node_by_name(md, node, "group") { 1301 find_numa_latencies_for_group(md, node, count); 1302 count++; 1303 } 1304 1305 /* Normalize numa latency matrix according to ACPI SLIT spec. */ 1306 for (i = 0; i < MAX_NUMNODES; i++) { 1307 u64 self_latency = numa_latency[i][i]; 1308 1309 for (j = 0; j < MAX_NUMNODES; j++) { 1310 numa_latency[i][j] = 1311 (numa_latency[i][j] * LOCAL_DISTANCE) / 1312 self_latency; 1313 } 1314 } 1315 1316 add_node_ranges(); 1317 1318 for (i = 0; i < num_node_masks; i++) { 1319 allocate_node_data(i); 1320 node_set_online(i); 1321 } 1322 1323 err = 0; 1324 out: 1325 mdesc_release(md); 1326 return err; 1327 } 1328 1329 static int __init numa_parse_jbus(void) 1330 { 1331 unsigned long cpu, index; 1332 1333 /* NUMA node id is encoded in bits 36 and higher, and there is 1334 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1335 */ 1336 index = 0; 1337 for_each_present_cpu(cpu) { 1338 numa_cpu_lookup_table[cpu] = index; 1339 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); 1340 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1341 node_masks[index].val = cpu << 36UL; 1342 1343 index++; 1344 } 1345 num_node_masks = index; 1346 1347 add_node_ranges(); 1348 1349 for (index = 0; index < num_node_masks; index++) { 1350 allocate_node_data(index); 1351 node_set_online(index); 1352 } 1353 1354 return 0; 1355 } 1356 1357 static int __init numa_parse_sun4u(void) 1358 { 1359 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1360 unsigned long ver; 1361 1362 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1363 if ((ver >> 32UL) == __JALAPENO_ID || 1364 (ver >> 32UL) == __SERRANO_ID) 1365 return numa_parse_jbus(); 1366 } 1367 return -1; 1368 } 1369 1370 static int __init bootmem_init_numa(void) 1371 { 1372 int err = -1; 1373 1374 numadbg("bootmem_init_numa()\n"); 1375 1376 if (numa_enabled) { 1377 if (tlb_type == hypervisor) 1378 err = numa_parse_mdesc(); 1379 else 1380 err = numa_parse_sun4u(); 1381 } 1382 return err; 1383 } 1384 1385 #else 1386 1387 static int bootmem_init_numa(void) 1388 { 1389 return -1; 1390 } 1391 1392 #endif 1393 1394 static void __init bootmem_init_nonnuma(void) 1395 { 1396 unsigned long top_of_ram = memblock_end_of_DRAM(); 1397 unsigned long total_ram = memblock_phys_mem_size(); 1398 1399 numadbg("bootmem_init_nonnuma()\n"); 1400 1401 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1402 top_of_ram, total_ram); 1403 printk(KERN_INFO "Memory hole size: %ldMB\n", 1404 (top_of_ram - total_ram) >> 20); 1405 1406 init_node_masks_nonnuma(); 1407 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 1408 allocate_node_data(0); 1409 node_set_online(0); 1410 } 1411 1412 static unsigned long __init bootmem_init(unsigned long phys_base) 1413 { 1414 unsigned long end_pfn; 1415 1416 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1417 max_pfn = max_low_pfn = end_pfn; 1418 min_low_pfn = (phys_base >> PAGE_SHIFT); 1419 1420 if (bootmem_init_numa() < 0) 1421 bootmem_init_nonnuma(); 1422 1423 /* Dump memblock with node info. */ 1424 memblock_dump_all(); 1425 1426 /* XXX cpu notifier XXX */ 1427 1428 sparse_memory_present_with_active_regions(MAX_NUMNODES); 1429 sparse_init(); 1430 1431 return end_pfn; 1432 } 1433 1434 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1435 static int pall_ents __initdata; 1436 1437 static unsigned long max_phys_bits = 40; 1438 1439 bool kern_addr_valid(unsigned long addr) 1440 { 1441 pgd_t *pgd; 1442 pud_t *pud; 1443 pmd_t *pmd; 1444 pte_t *pte; 1445 1446 if ((long)addr < 0L) { 1447 unsigned long pa = __pa(addr); 1448 1449 if ((addr >> max_phys_bits) != 0UL) 1450 return false; 1451 1452 return pfn_valid(pa >> PAGE_SHIFT); 1453 } 1454 1455 if (addr >= (unsigned long) KERNBASE && 1456 addr < (unsigned long)&_end) 1457 return true; 1458 1459 pgd = pgd_offset_k(addr); 1460 if (pgd_none(*pgd)) 1461 return 0; 1462 1463 pud = pud_offset(pgd, addr); 1464 if (pud_none(*pud)) 1465 return 0; 1466 1467 if (pud_large(*pud)) 1468 return pfn_valid(pud_pfn(*pud)); 1469 1470 pmd = pmd_offset(pud, addr); 1471 if (pmd_none(*pmd)) 1472 return 0; 1473 1474 if (pmd_large(*pmd)) 1475 return pfn_valid(pmd_pfn(*pmd)); 1476 1477 pte = pte_offset_kernel(pmd, addr); 1478 if (pte_none(*pte)) 1479 return 0; 1480 1481 return pfn_valid(pte_pfn(*pte)); 1482 } 1483 EXPORT_SYMBOL(kern_addr_valid); 1484 1485 static unsigned long __ref kernel_map_hugepud(unsigned long vstart, 1486 unsigned long vend, 1487 pud_t *pud) 1488 { 1489 const unsigned long mask16gb = (1UL << 34) - 1UL; 1490 u64 pte_val = vstart; 1491 1492 /* Each PUD is 8GB */ 1493 if ((vstart & mask16gb) || 1494 (vend - vstart <= mask16gb)) { 1495 pte_val ^= kern_linear_pte_xor[2]; 1496 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; 1497 1498 return vstart + PUD_SIZE; 1499 } 1500 1501 pte_val ^= kern_linear_pte_xor[3]; 1502 pte_val |= _PAGE_PUD_HUGE; 1503 1504 vend = vstart + mask16gb + 1UL; 1505 while (vstart < vend) { 1506 pud_val(*pud) = pte_val; 1507 1508 pte_val += PUD_SIZE; 1509 vstart += PUD_SIZE; 1510 pud++; 1511 } 1512 return vstart; 1513 } 1514 1515 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, 1516 bool guard) 1517 { 1518 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) 1519 return true; 1520 1521 return false; 1522 } 1523 1524 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, 1525 unsigned long vend, 1526 pmd_t *pmd) 1527 { 1528 const unsigned long mask256mb = (1UL << 28) - 1UL; 1529 const unsigned long mask2gb = (1UL << 31) - 1UL; 1530 u64 pte_val = vstart; 1531 1532 /* Each PMD is 8MB */ 1533 if ((vstart & mask256mb) || 1534 (vend - vstart <= mask256mb)) { 1535 pte_val ^= kern_linear_pte_xor[0]; 1536 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; 1537 1538 return vstart + PMD_SIZE; 1539 } 1540 1541 if ((vstart & mask2gb) || 1542 (vend - vstart <= mask2gb)) { 1543 pte_val ^= kern_linear_pte_xor[1]; 1544 pte_val |= _PAGE_PMD_HUGE; 1545 vend = vstart + mask256mb + 1UL; 1546 } else { 1547 pte_val ^= kern_linear_pte_xor[2]; 1548 pte_val |= _PAGE_PMD_HUGE; 1549 vend = vstart + mask2gb + 1UL; 1550 } 1551 1552 while (vstart < vend) { 1553 pmd_val(*pmd) = pte_val; 1554 1555 pte_val += PMD_SIZE; 1556 vstart += PMD_SIZE; 1557 pmd++; 1558 } 1559 1560 return vstart; 1561 } 1562 1563 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, 1564 bool guard) 1565 { 1566 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) 1567 return true; 1568 1569 return false; 1570 } 1571 1572 static unsigned long __ref kernel_map_range(unsigned long pstart, 1573 unsigned long pend, pgprot_t prot, 1574 bool use_huge) 1575 { 1576 unsigned long vstart = PAGE_OFFSET + pstart; 1577 unsigned long vend = PAGE_OFFSET + pend; 1578 unsigned long alloc_bytes = 0UL; 1579 1580 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1581 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1582 vstart, vend); 1583 prom_halt(); 1584 } 1585 1586 while (vstart < vend) { 1587 unsigned long this_end, paddr = __pa(vstart); 1588 pgd_t *pgd = pgd_offset_k(vstart); 1589 pud_t *pud; 1590 pmd_t *pmd; 1591 pte_t *pte; 1592 1593 if (pgd_none(*pgd)) { 1594 pud_t *new; 1595 1596 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1597 alloc_bytes += PAGE_SIZE; 1598 pgd_populate(&init_mm, pgd, new); 1599 } 1600 pud = pud_offset(pgd, vstart); 1601 if (pud_none(*pud)) { 1602 pmd_t *new; 1603 1604 if (kernel_can_map_hugepud(vstart, vend, use_huge)) { 1605 vstart = kernel_map_hugepud(vstart, vend, pud); 1606 continue; 1607 } 1608 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1609 alloc_bytes += PAGE_SIZE; 1610 pud_populate(&init_mm, pud, new); 1611 } 1612 1613 pmd = pmd_offset(pud, vstart); 1614 if (pmd_none(*pmd)) { 1615 pte_t *new; 1616 1617 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { 1618 vstart = kernel_map_hugepmd(vstart, vend, pmd); 1619 continue; 1620 } 1621 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1622 alloc_bytes += PAGE_SIZE; 1623 pmd_populate_kernel(&init_mm, pmd, new); 1624 } 1625 1626 pte = pte_offset_kernel(pmd, vstart); 1627 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1628 if (this_end > vend) 1629 this_end = vend; 1630 1631 while (vstart < this_end) { 1632 pte_val(*pte) = (paddr | pgprot_val(prot)); 1633 1634 vstart += PAGE_SIZE; 1635 paddr += PAGE_SIZE; 1636 pte++; 1637 } 1638 } 1639 1640 return alloc_bytes; 1641 } 1642 1643 static void __init flush_all_kernel_tsbs(void) 1644 { 1645 int i; 1646 1647 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { 1648 struct tsb *ent = &swapper_tsb[i]; 1649 1650 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 1651 } 1652 #ifndef CONFIG_DEBUG_PAGEALLOC 1653 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { 1654 struct tsb *ent = &swapper_4m_tsb[i]; 1655 1656 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 1657 } 1658 #endif 1659 } 1660 1661 extern unsigned int kvmap_linear_patch[1]; 1662 1663 static void __init kernel_physical_mapping_init(void) 1664 { 1665 unsigned long i, mem_alloced = 0UL; 1666 bool use_huge = true; 1667 1668 #ifdef CONFIG_DEBUG_PAGEALLOC 1669 use_huge = false; 1670 #endif 1671 for (i = 0; i < pall_ents; i++) { 1672 unsigned long phys_start, phys_end; 1673 1674 phys_start = pall[i].phys_addr; 1675 phys_end = phys_start + pall[i].reg_size; 1676 1677 mem_alloced += kernel_map_range(phys_start, phys_end, 1678 PAGE_KERNEL, use_huge); 1679 } 1680 1681 printk("Allocated %ld bytes for kernel page tables.\n", 1682 mem_alloced); 1683 1684 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1685 flushi(&kvmap_linear_patch[0]); 1686 1687 flush_all_kernel_tsbs(); 1688 1689 __flush_tlb_all(); 1690 } 1691 1692 #ifdef CONFIG_DEBUG_PAGEALLOC 1693 void __kernel_map_pages(struct page *page, int numpages, int enable) 1694 { 1695 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1696 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1697 1698 kernel_map_range(phys_start, phys_end, 1699 (enable ? PAGE_KERNEL : __pgprot(0)), false); 1700 1701 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1702 PAGE_OFFSET + phys_end); 1703 1704 /* we should perform an IPI and flush all tlbs, 1705 * but that can deadlock->flush only current cpu. 1706 */ 1707 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1708 PAGE_OFFSET + phys_end); 1709 } 1710 #endif 1711 1712 unsigned long __init find_ecache_flush_span(unsigned long size) 1713 { 1714 int i; 1715 1716 for (i = 0; i < pavail_ents; i++) { 1717 if (pavail[i].reg_size >= size) 1718 return pavail[i].phys_addr; 1719 } 1720 1721 return ~0UL; 1722 } 1723 1724 unsigned long PAGE_OFFSET; 1725 EXPORT_SYMBOL(PAGE_OFFSET); 1726 1727 unsigned long VMALLOC_END = 0x0000010000000000UL; 1728 EXPORT_SYMBOL(VMALLOC_END); 1729 1730 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; 1731 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; 1732 1733 static void __init setup_page_offset(void) 1734 { 1735 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1736 /* Cheetah/Panther support a full 64-bit virtual 1737 * address, so we can use all that our page tables 1738 * support. 1739 */ 1740 sparc64_va_hole_top = 0xfff0000000000000UL; 1741 sparc64_va_hole_bottom = 0x0010000000000000UL; 1742 1743 max_phys_bits = 42; 1744 } else if (tlb_type == hypervisor) { 1745 switch (sun4v_chip_type) { 1746 case SUN4V_CHIP_NIAGARA1: 1747 case SUN4V_CHIP_NIAGARA2: 1748 /* T1 and T2 support 48-bit virtual addresses. */ 1749 sparc64_va_hole_top = 0xffff800000000000UL; 1750 sparc64_va_hole_bottom = 0x0000800000000000UL; 1751 1752 max_phys_bits = 39; 1753 break; 1754 case SUN4V_CHIP_NIAGARA3: 1755 /* T3 supports 48-bit virtual addresses. */ 1756 sparc64_va_hole_top = 0xffff800000000000UL; 1757 sparc64_va_hole_bottom = 0x0000800000000000UL; 1758 1759 max_phys_bits = 43; 1760 break; 1761 case SUN4V_CHIP_NIAGARA4: 1762 case SUN4V_CHIP_NIAGARA5: 1763 case SUN4V_CHIP_SPARC64X: 1764 case SUN4V_CHIP_SPARC_M6: 1765 /* T4 and later support 52-bit virtual addresses. */ 1766 sparc64_va_hole_top = 0xfff8000000000000UL; 1767 sparc64_va_hole_bottom = 0x0008000000000000UL; 1768 max_phys_bits = 47; 1769 break; 1770 case SUN4V_CHIP_SPARC_M7: 1771 default: 1772 /* M7 and later support 52-bit virtual addresses. */ 1773 sparc64_va_hole_top = 0xfff8000000000000UL; 1774 sparc64_va_hole_bottom = 0x0008000000000000UL; 1775 max_phys_bits = 49; 1776 break; 1777 } 1778 } 1779 1780 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) { 1781 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n", 1782 max_phys_bits); 1783 prom_halt(); 1784 } 1785 1786 PAGE_OFFSET = sparc64_va_hole_top; 1787 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + 1788 (sparc64_va_hole_bottom >> 2)); 1789 1790 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", 1791 PAGE_OFFSET, max_phys_bits); 1792 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", 1793 VMALLOC_START, VMALLOC_END); 1794 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", 1795 VMEMMAP_BASE, VMEMMAP_BASE << 1); 1796 } 1797 1798 static void __init tsb_phys_patch(void) 1799 { 1800 struct tsb_ldquad_phys_patch_entry *pquad; 1801 struct tsb_phys_patch_entry *p; 1802 1803 pquad = &__tsb_ldquad_phys_patch; 1804 while (pquad < &__tsb_ldquad_phys_patch_end) { 1805 unsigned long addr = pquad->addr; 1806 1807 if (tlb_type == hypervisor) 1808 *(unsigned int *) addr = pquad->sun4v_insn; 1809 else 1810 *(unsigned int *) addr = pquad->sun4u_insn; 1811 wmb(); 1812 __asm__ __volatile__("flush %0" 1813 : /* no outputs */ 1814 : "r" (addr)); 1815 1816 pquad++; 1817 } 1818 1819 p = &__tsb_phys_patch; 1820 while (p < &__tsb_phys_patch_end) { 1821 unsigned long addr = p->addr; 1822 1823 *(unsigned int *) addr = p->insn; 1824 wmb(); 1825 __asm__ __volatile__("flush %0" 1826 : /* no outputs */ 1827 : "r" (addr)); 1828 1829 p++; 1830 } 1831 } 1832 1833 /* Don't mark as init, we give this to the Hypervisor. */ 1834 #ifndef CONFIG_DEBUG_PAGEALLOC 1835 #define NUM_KTSB_DESCR 2 1836 #else 1837 #define NUM_KTSB_DESCR 1 1838 #endif 1839 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1840 1841 /* The swapper TSBs are loaded with a base sequence of: 1842 * 1843 * sethi %uhi(SYMBOL), REG1 1844 * sethi %hi(SYMBOL), REG2 1845 * or REG1, %ulo(SYMBOL), REG1 1846 * or REG2, %lo(SYMBOL), REG2 1847 * sllx REG1, 32, REG1 1848 * or REG1, REG2, REG1 1849 * 1850 * When we use physical addressing for the TSB accesses, we patch the 1851 * first four instructions in the above sequence. 1852 */ 1853 1854 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) 1855 { 1856 unsigned long high_bits, low_bits; 1857 1858 high_bits = (pa >> 32) & 0xffffffff; 1859 low_bits = (pa >> 0) & 0xffffffff; 1860 1861 while (start < end) { 1862 unsigned int *ia = (unsigned int *)(unsigned long)*start; 1863 1864 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); 1865 __asm__ __volatile__("flush %0" : : "r" (ia)); 1866 1867 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); 1868 __asm__ __volatile__("flush %0" : : "r" (ia + 1)); 1869 1870 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); 1871 __asm__ __volatile__("flush %0" : : "r" (ia + 2)); 1872 1873 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); 1874 __asm__ __volatile__("flush %0" : : "r" (ia + 3)); 1875 1876 start++; 1877 } 1878 } 1879 1880 static void ktsb_phys_patch(void) 1881 { 1882 extern unsigned int __swapper_tsb_phys_patch; 1883 extern unsigned int __swapper_tsb_phys_patch_end; 1884 unsigned long ktsb_pa; 1885 1886 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1887 patch_one_ktsb_phys(&__swapper_tsb_phys_patch, 1888 &__swapper_tsb_phys_patch_end, ktsb_pa); 1889 #ifndef CONFIG_DEBUG_PAGEALLOC 1890 { 1891 extern unsigned int __swapper_4m_tsb_phys_patch; 1892 extern unsigned int __swapper_4m_tsb_phys_patch_end; 1893 ktsb_pa = (kern_base + 1894 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1895 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, 1896 &__swapper_4m_tsb_phys_patch_end, ktsb_pa); 1897 } 1898 #endif 1899 } 1900 1901 static void __init sun4v_ktsb_init(void) 1902 { 1903 unsigned long ktsb_pa; 1904 1905 /* First KTSB for PAGE_SIZE mappings. */ 1906 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1907 1908 switch (PAGE_SIZE) { 1909 case 8 * 1024: 1910 default: 1911 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1912 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1913 break; 1914 1915 case 64 * 1024: 1916 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1917 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1918 break; 1919 1920 case 512 * 1024: 1921 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1922 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1923 break; 1924 1925 case 4 * 1024 * 1024: 1926 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1927 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1928 break; 1929 } 1930 1931 ktsb_descr[0].assoc = 1; 1932 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1933 ktsb_descr[0].ctx_idx = 0; 1934 ktsb_descr[0].tsb_base = ktsb_pa; 1935 ktsb_descr[0].resv = 0; 1936 1937 #ifndef CONFIG_DEBUG_PAGEALLOC 1938 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */ 1939 ktsb_pa = (kern_base + 1940 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1941 1942 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1943 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB | 1944 HV_PGSZ_MASK_256MB | 1945 HV_PGSZ_MASK_2GB | 1946 HV_PGSZ_MASK_16GB) & 1947 cpu_pgsz_mask); 1948 ktsb_descr[1].assoc = 1; 1949 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1950 ktsb_descr[1].ctx_idx = 0; 1951 ktsb_descr[1].tsb_base = ktsb_pa; 1952 ktsb_descr[1].resv = 0; 1953 #endif 1954 } 1955 1956 void sun4v_ktsb_register(void) 1957 { 1958 unsigned long pa, ret; 1959 1960 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1961 1962 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1963 if (ret != 0) { 1964 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1965 "errors with %lx\n", pa, ret); 1966 prom_halt(); 1967 } 1968 } 1969 1970 static void __init sun4u_linear_pte_xor_finalize(void) 1971 { 1972 #ifndef CONFIG_DEBUG_PAGEALLOC 1973 /* This is where we would add Panther support for 1974 * 32MB and 256MB pages. 1975 */ 1976 #endif 1977 } 1978 1979 static void __init sun4v_linear_pte_xor_finalize(void) 1980 { 1981 unsigned long pagecv_flag; 1982 1983 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead 1984 * enables MCD error. Do not set bit 9 on M7 processor. 1985 */ 1986 switch (sun4v_chip_type) { 1987 case SUN4V_CHIP_SPARC_M7: 1988 pagecv_flag = 0x00; 1989 break; 1990 default: 1991 pagecv_flag = _PAGE_CV_4V; 1992 break; 1993 } 1994 #ifndef CONFIG_DEBUG_PAGEALLOC 1995 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { 1996 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 1997 PAGE_OFFSET; 1998 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | 1999 _PAGE_P_4V | _PAGE_W_4V); 2000 } else { 2001 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 2002 } 2003 2004 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { 2005 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ 2006 PAGE_OFFSET; 2007 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | 2008 _PAGE_P_4V | _PAGE_W_4V); 2009 } else { 2010 kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; 2011 } 2012 2013 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { 2014 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ 2015 PAGE_OFFSET; 2016 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | 2017 _PAGE_P_4V | _PAGE_W_4V); 2018 } else { 2019 kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; 2020 } 2021 #endif 2022 } 2023 2024 /* paging_init() sets up the page tables */ 2025 2026 static unsigned long last_valid_pfn; 2027 2028 static void sun4u_pgprot_init(void); 2029 static void sun4v_pgprot_init(void); 2030 2031 static phys_addr_t __init available_memory(void) 2032 { 2033 phys_addr_t available = 0ULL; 2034 phys_addr_t pa_start, pa_end; 2035 u64 i; 2036 2037 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, 2038 &pa_end, NULL) 2039 available = available + (pa_end - pa_start); 2040 2041 return available; 2042 } 2043 2044 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2045 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2046 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2047 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2048 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2049 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2050 2051 /* We need to exclude reserved regions. This exclusion will include 2052 * vmlinux and initrd. To be more precise the initrd size could be used to 2053 * compute a new lower limit because it is freed later during initialization. 2054 */ 2055 static void __init reduce_memory(phys_addr_t limit_ram) 2056 { 2057 phys_addr_t avail_ram = available_memory(); 2058 phys_addr_t pa_start, pa_end; 2059 u64 i; 2060 2061 if (limit_ram >= avail_ram) 2062 return; 2063 2064 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, 2065 &pa_end, NULL) { 2066 phys_addr_t region_size = pa_end - pa_start; 2067 phys_addr_t clip_start = pa_start; 2068 2069 avail_ram = avail_ram - region_size; 2070 /* Are we consuming too much? */ 2071 if (avail_ram < limit_ram) { 2072 phys_addr_t give_back = limit_ram - avail_ram; 2073 2074 region_size = region_size - give_back; 2075 clip_start = clip_start + give_back; 2076 } 2077 2078 memblock_remove(clip_start, region_size); 2079 2080 if (avail_ram <= limit_ram) 2081 break; 2082 i = 0UL; 2083 } 2084 } 2085 2086 void __init paging_init(void) 2087 { 2088 unsigned long end_pfn, shift, phys_base; 2089 unsigned long real_end, i; 2090 int node; 2091 2092 setup_page_offset(); 2093 2094 /* These build time checkes make sure that the dcache_dirty_cpu() 2095 * page->flags usage will work. 2096 * 2097 * When a page gets marked as dcache-dirty, we store the 2098 * cpu number starting at bit 32 in the page->flags. Also, 2099 * functions like clear_dcache_dirty_cpu use the cpu mask 2100 * in 13-bit signed-immediate instruction fields. 2101 */ 2102 2103 /* 2104 * Page flags must not reach into upper 32 bits that are used 2105 * for the cpu number 2106 */ 2107 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 2108 2109 /* 2110 * The bit fields placed in the high range must not reach below 2111 * the 32 bit boundary. Otherwise we cannot place the cpu field 2112 * at the 32 bit boundary. 2113 */ 2114 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 2115 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 2116 2117 BUILD_BUG_ON(NR_CPUS > 4096); 2118 2119 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 2120 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 2121 2122 /* Invalidate both kernel TSBs. */ 2123 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 2124 #ifndef CONFIG_DEBUG_PAGEALLOC 2125 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2126 #endif 2127 2128 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde 2129 * bit on M7 processor. This is a conflicting usage of the same 2130 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption 2131 * Detection error on all pages and this will lead to problems 2132 * later. Kernel does not run with MCD enabled and hence rest 2133 * of the required steps to fully configure memory corruption 2134 * detection are not taken. We need to ensure TTE.mcde is not 2135 * set on M7 processor. Compute the value of cacheability 2136 * flag for use later taking this into consideration. 2137 */ 2138 switch (sun4v_chip_type) { 2139 case SUN4V_CHIP_SPARC_M7: 2140 page_cache4v_flag = _PAGE_CP_4V; 2141 break; 2142 default: 2143 page_cache4v_flag = _PAGE_CACHE_4V; 2144 break; 2145 } 2146 2147 if (tlb_type == hypervisor) 2148 sun4v_pgprot_init(); 2149 else 2150 sun4u_pgprot_init(); 2151 2152 if (tlb_type == cheetah_plus || 2153 tlb_type == hypervisor) { 2154 tsb_phys_patch(); 2155 ktsb_phys_patch(); 2156 } 2157 2158 if (tlb_type == hypervisor) 2159 sun4v_patch_tlb_handlers(); 2160 2161 /* Find available physical memory... 2162 * 2163 * Read it twice in order to work around a bug in openfirmware. 2164 * The call to grab this table itself can cause openfirmware to 2165 * allocate memory, which in turn can take away some space from 2166 * the list of available memory. Reading it twice makes sure 2167 * we really do get the final value. 2168 */ 2169 read_obp_translations(); 2170 read_obp_memory("reg", &pall[0], &pall_ents); 2171 read_obp_memory("available", &pavail[0], &pavail_ents); 2172 read_obp_memory("available", &pavail[0], &pavail_ents); 2173 2174 phys_base = 0xffffffffffffffffUL; 2175 for (i = 0; i < pavail_ents; i++) { 2176 phys_base = min(phys_base, pavail[i].phys_addr); 2177 memblock_add(pavail[i].phys_addr, pavail[i].reg_size); 2178 } 2179 2180 memblock_reserve(kern_base, kern_size); 2181 2182 find_ramdisk(phys_base); 2183 2184 if (cmdline_memory_size) 2185 reduce_memory(cmdline_memory_size); 2186 2187 memblock_allow_resize(); 2188 memblock_dump_all(); 2189 2190 set_bit(0, mmu_context_bmap); 2191 2192 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 2193 2194 real_end = (unsigned long)_end; 2195 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); 2196 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 2197 num_kernel_image_mappings); 2198 2199 /* Set kernel pgd to upper alias so physical page computations 2200 * work. 2201 */ 2202 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 2203 2204 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 2205 2206 inherit_prom_mappings(); 2207 2208 /* Ok, we can use our TLB miss and window trap handlers safely. */ 2209 setup_tba(); 2210 2211 __flush_tlb_all(); 2212 2213 prom_build_devicetree(); 2214 of_populate_present_mask(); 2215 #ifndef CONFIG_SMP 2216 of_fill_in_cpu_data(); 2217 #endif 2218 2219 if (tlb_type == hypervisor) { 2220 sun4v_mdesc_init(); 2221 mdesc_populate_present_mask(cpu_all_mask); 2222 #ifndef CONFIG_SMP 2223 mdesc_fill_in_cpu_data(cpu_all_mask); 2224 #endif 2225 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask); 2226 2227 sun4v_linear_pte_xor_finalize(); 2228 2229 sun4v_ktsb_init(); 2230 sun4v_ktsb_register(); 2231 } else { 2232 unsigned long impl, ver; 2233 2234 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | 2235 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); 2236 2237 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); 2238 impl = ((ver >> 32) & 0xffff); 2239 if (impl == PANTHER_IMPL) 2240 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB | 2241 HV_PGSZ_MASK_256MB); 2242 2243 sun4u_linear_pte_xor_finalize(); 2244 } 2245 2246 /* Flush the TLBs and the 4M TSB so that the updated linear 2247 * pte XOR settings are realized for all mappings. 2248 */ 2249 __flush_tlb_all(); 2250 #ifndef CONFIG_DEBUG_PAGEALLOC 2251 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2252 #endif 2253 __flush_tlb_all(); 2254 2255 /* Setup bootmem... */ 2256 last_valid_pfn = end_pfn = bootmem_init(phys_base); 2257 2258 /* Once the OF device tree and MDESC have been setup, we know 2259 * the list of possible cpus. Therefore we can allocate the 2260 * IRQ stacks. 2261 */ 2262 for_each_possible_cpu(i) { 2263 node = cpu_to_node(i); 2264 2265 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2266 THREAD_SIZE, 2267 THREAD_SIZE, 0); 2268 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2269 THREAD_SIZE, 2270 THREAD_SIZE, 0); 2271 } 2272 2273 kernel_physical_mapping_init(); 2274 2275 { 2276 unsigned long max_zone_pfns[MAX_NR_ZONES]; 2277 2278 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 2279 2280 max_zone_pfns[ZONE_NORMAL] = end_pfn; 2281 2282 free_area_init_nodes(max_zone_pfns); 2283 } 2284 2285 printk("Booting Linux...\n"); 2286 } 2287 2288 int page_in_phys_avail(unsigned long paddr) 2289 { 2290 int i; 2291 2292 paddr &= PAGE_MASK; 2293 2294 for (i = 0; i < pavail_ents; i++) { 2295 unsigned long start, end; 2296 2297 start = pavail[i].phys_addr; 2298 end = start + pavail[i].reg_size; 2299 2300 if (paddr >= start && paddr < end) 2301 return 1; 2302 } 2303 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 2304 return 1; 2305 #ifdef CONFIG_BLK_DEV_INITRD 2306 if (paddr >= __pa(initrd_start) && 2307 paddr < __pa(PAGE_ALIGN(initrd_end))) 2308 return 1; 2309 #endif 2310 2311 return 0; 2312 } 2313 2314 static void __init register_page_bootmem_info(void) 2315 { 2316 #ifdef CONFIG_NEED_MULTIPLE_NODES 2317 int i; 2318 2319 for_each_online_node(i) 2320 if (NODE_DATA(i)->node_spanned_pages) 2321 register_page_bootmem_info_node(NODE_DATA(i)); 2322 #endif 2323 } 2324 void __init mem_init(void) 2325 { 2326 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2327 2328 register_page_bootmem_info(); 2329 free_all_bootmem(); 2330 2331 /* 2332 * Set up the zero page, mark it reserved, so that page count 2333 * is not manipulated when freeing the page from user ptes. 2334 */ 2335 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 2336 if (mem_map_zero == NULL) { 2337 prom_printf("paging_init: Cannot alloc zero page.\n"); 2338 prom_halt(); 2339 } 2340 mark_page_reserved(mem_map_zero); 2341 2342 mem_init_print_info(NULL); 2343 2344 if (tlb_type == cheetah || tlb_type == cheetah_plus) 2345 cheetah_ecache_flush_init(); 2346 } 2347 2348 void free_initmem(void) 2349 { 2350 unsigned long addr, initend; 2351 int do_free = 1; 2352 2353 /* If the physical memory maps were trimmed by kernel command 2354 * line options, don't even try freeing this initmem stuff up. 2355 * The kernel image could have been in the trimmed out region 2356 * and if so the freeing below will free invalid page structs. 2357 */ 2358 if (cmdline_memory_size) 2359 do_free = 0; 2360 2361 /* 2362 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 2363 */ 2364 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 2365 initend = (unsigned long)(__init_end) & PAGE_MASK; 2366 for (; addr < initend; addr += PAGE_SIZE) { 2367 unsigned long page; 2368 2369 page = (addr + 2370 ((unsigned long) __va(kern_base)) - 2371 ((unsigned long) KERNBASE)); 2372 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 2373 2374 if (do_free) 2375 free_reserved_page(virt_to_page(page)); 2376 } 2377 } 2378 2379 #ifdef CONFIG_BLK_DEV_INITRD 2380 void free_initrd_mem(unsigned long start, unsigned long end) 2381 { 2382 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, 2383 "initrd"); 2384 } 2385 #endif 2386 2387 pgprot_t PAGE_KERNEL __read_mostly; 2388 EXPORT_SYMBOL(PAGE_KERNEL); 2389 2390 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2391 pgprot_t PAGE_COPY __read_mostly; 2392 2393 pgprot_t PAGE_SHARED __read_mostly; 2394 EXPORT_SYMBOL(PAGE_SHARED); 2395 2396 unsigned long pg_iobits __read_mostly; 2397 2398 unsigned long _PAGE_IE __read_mostly; 2399 EXPORT_SYMBOL(_PAGE_IE); 2400 2401 unsigned long _PAGE_E __read_mostly; 2402 EXPORT_SYMBOL(_PAGE_E); 2403 2404 unsigned long _PAGE_CACHE __read_mostly; 2405 EXPORT_SYMBOL(_PAGE_CACHE); 2406 2407 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2408 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, 2409 int node) 2410 { 2411 unsigned long pte_base; 2412 2413 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2414 _PAGE_CP_4U | _PAGE_CV_4U | 2415 _PAGE_P_4U | _PAGE_W_4U); 2416 if (tlb_type == hypervisor) 2417 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2418 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); 2419 2420 pte_base |= _PAGE_PMD_HUGE; 2421 2422 vstart = vstart & PMD_MASK; 2423 vend = ALIGN(vend, PMD_SIZE); 2424 for (; vstart < vend; vstart += PMD_SIZE) { 2425 pgd_t *pgd = pgd_offset_k(vstart); 2426 unsigned long pte; 2427 pud_t *pud; 2428 pmd_t *pmd; 2429 2430 if (pgd_none(*pgd)) { 2431 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node); 2432 2433 if (!new) 2434 return -ENOMEM; 2435 pgd_populate(&init_mm, pgd, new); 2436 } 2437 2438 pud = pud_offset(pgd, vstart); 2439 if (pud_none(*pud)) { 2440 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node); 2441 2442 if (!new) 2443 return -ENOMEM; 2444 pud_populate(&init_mm, pud, new); 2445 } 2446 2447 pmd = pmd_offset(pud, vstart); 2448 2449 pte = pmd_val(*pmd); 2450 if (!(pte & _PAGE_VALID)) { 2451 void *block = vmemmap_alloc_block(PMD_SIZE, node); 2452 2453 if (!block) 2454 return -ENOMEM; 2455 2456 pmd_val(*pmd) = pte_base | __pa(block); 2457 } 2458 } 2459 2460 return 0; 2461 } 2462 2463 void vmemmap_free(unsigned long start, unsigned long end) 2464 { 2465 } 2466 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2467 2468 static void prot_init_common(unsigned long page_none, 2469 unsigned long page_shared, 2470 unsigned long page_copy, 2471 unsigned long page_readonly, 2472 unsigned long page_exec_bit) 2473 { 2474 PAGE_COPY = __pgprot(page_copy); 2475 PAGE_SHARED = __pgprot(page_shared); 2476 2477 protection_map[0x0] = __pgprot(page_none); 2478 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2479 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2480 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2481 protection_map[0x4] = __pgprot(page_readonly); 2482 protection_map[0x5] = __pgprot(page_readonly); 2483 protection_map[0x6] = __pgprot(page_copy); 2484 protection_map[0x7] = __pgprot(page_copy); 2485 protection_map[0x8] = __pgprot(page_none); 2486 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2487 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2488 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2489 protection_map[0xc] = __pgprot(page_readonly); 2490 protection_map[0xd] = __pgprot(page_readonly); 2491 protection_map[0xe] = __pgprot(page_shared); 2492 protection_map[0xf] = __pgprot(page_shared); 2493 } 2494 2495 static void __init sun4u_pgprot_init(void) 2496 { 2497 unsigned long page_none, page_shared, page_copy, page_readonly; 2498 unsigned long page_exec_bit; 2499 int i; 2500 2501 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2502 _PAGE_CACHE_4U | _PAGE_P_4U | 2503 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2504 _PAGE_EXEC_4U); 2505 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2506 _PAGE_CACHE_4U | _PAGE_P_4U | 2507 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2508 _PAGE_EXEC_4U | _PAGE_L_4U); 2509 2510 _PAGE_IE = _PAGE_IE_4U; 2511 _PAGE_E = _PAGE_E_4U; 2512 _PAGE_CACHE = _PAGE_CACHE_4U; 2513 2514 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2515 __ACCESS_BITS_4U | _PAGE_E_4U); 2516 2517 #ifdef CONFIG_DEBUG_PAGEALLOC 2518 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; 2519 #else 2520 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2521 PAGE_OFFSET; 2522 #endif 2523 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2524 _PAGE_P_4U | _PAGE_W_4U); 2525 2526 for (i = 1; i < 4; i++) 2527 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; 2528 2529 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2530 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2531 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2532 2533 2534 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2535 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2536 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2537 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2538 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2539 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2540 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2541 2542 page_exec_bit = _PAGE_EXEC_4U; 2543 2544 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2545 page_exec_bit); 2546 } 2547 2548 static void __init sun4v_pgprot_init(void) 2549 { 2550 unsigned long page_none, page_shared, page_copy, page_readonly; 2551 unsigned long page_exec_bit; 2552 int i; 2553 2554 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2555 page_cache4v_flag | _PAGE_P_4V | 2556 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2557 _PAGE_EXEC_4V); 2558 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2559 2560 _PAGE_IE = _PAGE_IE_4V; 2561 _PAGE_E = _PAGE_E_4V; 2562 _PAGE_CACHE = page_cache4v_flag; 2563 2564 #ifdef CONFIG_DEBUG_PAGEALLOC 2565 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; 2566 #else 2567 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2568 PAGE_OFFSET; 2569 #endif 2570 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | 2571 _PAGE_W_4V); 2572 2573 for (i = 1; i < 4; i++) 2574 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; 2575 2576 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2577 __ACCESS_BITS_4V | _PAGE_E_4V); 2578 2579 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2580 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2581 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2582 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2583 2584 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; 2585 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2586 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2587 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2588 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2589 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | 2590 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2591 2592 page_exec_bit = _PAGE_EXEC_4V; 2593 2594 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2595 page_exec_bit); 2596 } 2597 2598 unsigned long pte_sz_bits(unsigned long sz) 2599 { 2600 if (tlb_type == hypervisor) { 2601 switch (sz) { 2602 case 8 * 1024: 2603 default: 2604 return _PAGE_SZ8K_4V; 2605 case 64 * 1024: 2606 return _PAGE_SZ64K_4V; 2607 case 512 * 1024: 2608 return _PAGE_SZ512K_4V; 2609 case 4 * 1024 * 1024: 2610 return _PAGE_SZ4MB_4V; 2611 } 2612 } else { 2613 switch (sz) { 2614 case 8 * 1024: 2615 default: 2616 return _PAGE_SZ8K_4U; 2617 case 64 * 1024: 2618 return _PAGE_SZ64K_4U; 2619 case 512 * 1024: 2620 return _PAGE_SZ512K_4U; 2621 case 4 * 1024 * 1024: 2622 return _PAGE_SZ4MB_4U; 2623 } 2624 } 2625 } 2626 2627 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2628 { 2629 pte_t pte; 2630 2631 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2632 pte_val(pte) |= (((unsigned long)space) << 32); 2633 pte_val(pte) |= pte_sz_bits(page_size); 2634 2635 return pte; 2636 } 2637 2638 static unsigned long kern_large_tte(unsigned long paddr) 2639 { 2640 unsigned long val; 2641 2642 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2643 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2644 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2645 if (tlb_type == hypervisor) 2646 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2647 page_cache4v_flag | _PAGE_P_4V | 2648 _PAGE_EXEC_4V | _PAGE_W_4V); 2649 2650 return val | paddr; 2651 } 2652 2653 /* If not locked, zap it. */ 2654 void __flush_tlb_all(void) 2655 { 2656 unsigned long pstate; 2657 int i; 2658 2659 __asm__ __volatile__("flushw\n\t" 2660 "rdpr %%pstate, %0\n\t" 2661 "wrpr %0, %1, %%pstate" 2662 : "=r" (pstate) 2663 : "i" (PSTATE_IE)); 2664 if (tlb_type == hypervisor) { 2665 sun4v_mmu_demap_all(); 2666 } else if (tlb_type == spitfire) { 2667 for (i = 0; i < 64; i++) { 2668 /* Spitfire Errata #32 workaround */ 2669 /* NOTE: Always runs on spitfire, so no 2670 * cheetah+ page size encodings. 2671 */ 2672 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2673 "flush %%g6" 2674 : /* No outputs */ 2675 : "r" (0), 2676 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2677 2678 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2679 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2680 "membar #Sync" 2681 : /* no outputs */ 2682 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2683 spitfire_put_dtlb_data(i, 0x0UL); 2684 } 2685 2686 /* Spitfire Errata #32 workaround */ 2687 /* NOTE: Always runs on spitfire, so no 2688 * cheetah+ page size encodings. 2689 */ 2690 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2691 "flush %%g6" 2692 : /* No outputs */ 2693 : "r" (0), 2694 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2695 2696 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2697 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2698 "membar #Sync" 2699 : /* no outputs */ 2700 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2701 spitfire_put_itlb_data(i, 0x0UL); 2702 } 2703 } 2704 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2705 cheetah_flush_dtlb_all(); 2706 cheetah_flush_itlb_all(); 2707 } 2708 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2709 : : "r" (pstate)); 2710 } 2711 2712 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2713 unsigned long address) 2714 { 2715 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2716 __GFP_REPEAT | __GFP_ZERO); 2717 pte_t *pte = NULL; 2718 2719 if (page) 2720 pte = (pte_t *) page_address(page); 2721 2722 return pte; 2723 } 2724 2725 pgtable_t pte_alloc_one(struct mm_struct *mm, 2726 unsigned long address) 2727 { 2728 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2729 __GFP_REPEAT | __GFP_ZERO); 2730 if (!page) 2731 return NULL; 2732 if (!pgtable_page_ctor(page)) { 2733 free_hot_cold_page(page, 0); 2734 return NULL; 2735 } 2736 return (pte_t *) page_address(page); 2737 } 2738 2739 void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 2740 { 2741 free_page((unsigned long)pte); 2742 } 2743 2744 static void __pte_free(pgtable_t pte) 2745 { 2746 struct page *page = virt_to_page(pte); 2747 2748 pgtable_page_dtor(page); 2749 __free_page(page); 2750 } 2751 2752 void pte_free(struct mm_struct *mm, pgtable_t pte) 2753 { 2754 __pte_free(pte); 2755 } 2756 2757 void pgtable_free(void *table, bool is_page) 2758 { 2759 if (is_page) 2760 __pte_free(table); 2761 else 2762 kmem_cache_free(pgtable_cache, table); 2763 } 2764 2765 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2766 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 2767 pmd_t *pmd) 2768 { 2769 unsigned long pte, flags; 2770 struct mm_struct *mm; 2771 pmd_t entry = *pmd; 2772 2773 if (!pmd_large(entry) || !pmd_young(entry)) 2774 return; 2775 2776 pte = pmd_val(entry); 2777 2778 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ 2779 if (!(pte & _PAGE_VALID)) 2780 return; 2781 2782 /* We are fabricating 8MB pages using 4MB real hw pages. */ 2783 pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); 2784 2785 mm = vma->vm_mm; 2786 2787 spin_lock_irqsave(&mm->context.lock, flags); 2788 2789 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) 2790 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 2791 addr, pte); 2792 2793 spin_unlock_irqrestore(&mm->context.lock, flags); 2794 } 2795 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2796 2797 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 2798 static void context_reload(void *__data) 2799 { 2800 struct mm_struct *mm = __data; 2801 2802 if (mm == current->mm) 2803 load_secondary_context(mm); 2804 } 2805 2806 void hugetlb_setup(struct pt_regs *regs) 2807 { 2808 struct mm_struct *mm = current->mm; 2809 struct tsb_config *tp; 2810 2811 if (faulthandler_disabled() || !mm) { 2812 const struct exception_table_entry *entry; 2813 2814 entry = search_exception_tables(regs->tpc); 2815 if (entry) { 2816 regs->tpc = entry->fixup; 2817 regs->tnpc = regs->tpc + 4; 2818 return; 2819 } 2820 pr_alert("Unexpected HugeTLB setup in atomic context.\n"); 2821 die_if_kernel("HugeTSB in atomic", regs); 2822 } 2823 2824 tp = &mm->context.tsb_block[MM_TSB_HUGE]; 2825 if (likely(tp->tsb == NULL)) 2826 tsb_grow(mm, MM_TSB_HUGE, 0); 2827 2828 tsb_context_switch(mm); 2829 smp_tsb_sync(mm); 2830 2831 /* On UltraSPARC-III+ and later, configure the second half of 2832 * the Data-TLB for huge pages. 2833 */ 2834 if (tlb_type == cheetah_plus) { 2835 unsigned long ctx; 2836 2837 spin_lock(&ctx_alloc_lock); 2838 ctx = mm->context.sparc64_ctx_val; 2839 ctx &= ~CTX_PGSZ_MASK; 2840 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; 2841 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; 2842 2843 if (ctx != mm->context.sparc64_ctx_val) { 2844 /* When changing the page size fields, we 2845 * must perform a context flush so that no 2846 * stale entries match. This flush must 2847 * occur with the original context register 2848 * settings. 2849 */ 2850 do_flush_tlb_mm(mm); 2851 2852 /* Reload the context register of all processors 2853 * also executing in this address space. 2854 */ 2855 mm->context.sparc64_ctx_val = ctx; 2856 on_each_cpu(context_reload, mm, 0); 2857 } 2858 spin_unlock(&ctx_alloc_lock); 2859 } 2860 } 2861 #endif 2862 2863 static struct resource code_resource = { 2864 .name = "Kernel code", 2865 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2866 }; 2867 2868 static struct resource data_resource = { 2869 .name = "Kernel data", 2870 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2871 }; 2872 2873 static struct resource bss_resource = { 2874 .name = "Kernel bss", 2875 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2876 }; 2877 2878 static inline resource_size_t compute_kern_paddr(void *addr) 2879 { 2880 return (resource_size_t) (addr - KERNBASE + kern_base); 2881 } 2882 2883 static void __init kernel_lds_init(void) 2884 { 2885 code_resource.start = compute_kern_paddr(_text); 2886 code_resource.end = compute_kern_paddr(_etext - 1); 2887 data_resource.start = compute_kern_paddr(_etext); 2888 data_resource.end = compute_kern_paddr(_edata - 1); 2889 bss_resource.start = compute_kern_paddr(__bss_start); 2890 bss_resource.end = compute_kern_paddr(_end - 1); 2891 } 2892 2893 static int __init report_memory(void) 2894 { 2895 int i; 2896 struct resource *res; 2897 2898 kernel_lds_init(); 2899 2900 for (i = 0; i < pavail_ents; i++) { 2901 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 2902 2903 if (!res) { 2904 pr_warn("Failed to allocate source.\n"); 2905 break; 2906 } 2907 2908 res->name = "System RAM"; 2909 res->start = pavail[i].phys_addr; 2910 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; 2911 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 2912 2913 if (insert_resource(&iomem_resource, res) < 0) { 2914 pr_warn("Resource insertion failed.\n"); 2915 break; 2916 } 2917 2918 insert_resource(res, &code_resource); 2919 insert_resource(res, &data_resource); 2920 insert_resource(res, &bss_resource); 2921 } 2922 2923 return 0; 2924 } 2925 arch_initcall(report_memory); 2926 2927 #ifdef CONFIG_SMP 2928 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range 2929 #else 2930 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range 2931 #endif 2932 2933 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 2934 { 2935 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { 2936 if (start < LOW_OBP_ADDRESS) { 2937 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); 2938 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); 2939 } 2940 if (end > HI_OBP_ADDRESS) { 2941 flush_tsb_kernel_range(HI_OBP_ADDRESS, end); 2942 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); 2943 } 2944 } else { 2945 flush_tsb_kernel_range(start, end); 2946 do_flush_tlb_kernel_range(start, end); 2947 } 2948 } 2949