1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/ioport.h> 26 #include <linux/percpu.h> 27 #include <linux/memblock.h> 28 #include <linux/mmzone.h> 29 #include <linux/gfp.h> 30 31 #include <asm/head.h> 32 #include <asm/page.h> 33 #include <asm/pgalloc.h> 34 #include <asm/pgtable.h> 35 #include <asm/oplib.h> 36 #include <asm/iommu.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 #include <asm/mmu_context.h> 40 #include <asm/tlbflush.h> 41 #include <asm/dma.h> 42 #include <asm/starfire.h> 43 #include <asm/tlb.h> 44 #include <asm/spitfire.h> 45 #include <asm/sections.h> 46 #include <asm/tsb.h> 47 #include <asm/hypervisor.h> 48 #include <asm/prom.h> 49 #include <asm/mdesc.h> 50 #include <asm/cpudata.h> 51 #include <asm/setup.h> 52 #include <asm/irq.h> 53 54 #include "init_64.h" 55 56 unsigned long kern_linear_pte_xor[4] __read_mostly; 57 58 /* A bitmap, two bits for every 256MB of physical memory. These two 59 * bits determine what page size we use for kernel linear 60 * translations. They form an index into kern_linear_pte_xor[]. The 61 * value in the indexed slot is XOR'd with the TLB miss virtual 62 * address to form the resulting TTE. The mapping is: 63 * 64 * 0 ==> 4MB 65 * 1 ==> 256MB 66 * 2 ==> 2GB 67 * 3 ==> 16GB 68 * 69 * All sun4v chips support 256MB pages. Only SPARC-T4 and later 70 * support 2GB pages, and hopefully future cpus will support the 16GB 71 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there 72 * if these larger page sizes are not supported by the cpu. 73 * 74 * It would be nice to determine this from the machine description 75 * 'cpu' properties, but we need to have this table setup before the 76 * MDESC is initialized. 77 */ 78 79 #ifndef CONFIG_DEBUG_PAGEALLOC 80 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. 81 * Space is allocated for this right after the trap table in 82 * arch/sparc64/kernel/head.S 83 */ 84 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 85 #endif 86 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 87 88 static unsigned long cpu_pgsz_mask; 89 90 #define MAX_BANKS 1024 91 92 static struct linux_prom64_registers pavail[MAX_BANKS]; 93 static int pavail_ents; 94 95 static int cmp_p64(const void *a, const void *b) 96 { 97 const struct linux_prom64_registers *x = a, *y = b; 98 99 if (x->phys_addr > y->phys_addr) 100 return 1; 101 if (x->phys_addr < y->phys_addr) 102 return -1; 103 return 0; 104 } 105 106 static void __init read_obp_memory(const char *property, 107 struct linux_prom64_registers *regs, 108 int *num_ents) 109 { 110 phandle node = prom_finddevice("/memory"); 111 int prop_size = prom_getproplen(node, property); 112 int ents, ret, i; 113 114 ents = prop_size / sizeof(struct linux_prom64_registers); 115 if (ents > MAX_BANKS) { 116 prom_printf("The machine has more %s property entries than " 117 "this kernel can support (%d).\n", 118 property, MAX_BANKS); 119 prom_halt(); 120 } 121 122 ret = prom_getproperty(node, property, (char *) regs, prop_size); 123 if (ret == -1) { 124 prom_printf("Couldn't get %s property from /memory.\n", 125 property); 126 prom_halt(); 127 } 128 129 /* Sanitize what we got from the firmware, by page aligning 130 * everything. 131 */ 132 for (i = 0; i < ents; i++) { 133 unsigned long base, size; 134 135 base = regs[i].phys_addr; 136 size = regs[i].reg_size; 137 138 size &= PAGE_MASK; 139 if (base & ~PAGE_MASK) { 140 unsigned long new_base = PAGE_ALIGN(base); 141 142 size -= new_base - base; 143 if ((long) size < 0L) 144 size = 0UL; 145 base = new_base; 146 } 147 if (size == 0UL) { 148 /* If it is empty, simply get rid of it. 149 * This simplifies the logic of the other 150 * functions that process these arrays. 151 */ 152 memmove(®s[i], ®s[i + 1], 153 (ents - i - 1) * sizeof(regs[0])); 154 i--; 155 ents--; 156 continue; 157 } 158 regs[i].phys_addr = base; 159 regs[i].reg_size = size; 160 } 161 162 *num_ents = ents; 163 164 sort(regs, ents, sizeof(struct linux_prom64_registers), 165 cmp_p64, NULL); 166 } 167 168 /* Kernel physical address base and size in bytes. */ 169 unsigned long kern_base __read_mostly; 170 unsigned long kern_size __read_mostly; 171 172 /* Initial ramdisk setup */ 173 extern unsigned long sparc_ramdisk_image64; 174 extern unsigned int sparc_ramdisk_image; 175 extern unsigned int sparc_ramdisk_size; 176 177 struct page *mem_map_zero __read_mostly; 178 EXPORT_SYMBOL(mem_map_zero); 179 180 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 181 182 unsigned long sparc64_kern_pri_context __read_mostly; 183 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 184 unsigned long sparc64_kern_sec_context __read_mostly; 185 186 int num_kernel_image_mappings; 187 188 #ifdef CONFIG_DEBUG_DCFLUSH 189 atomic_t dcpage_flushes = ATOMIC_INIT(0); 190 #ifdef CONFIG_SMP 191 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 192 #endif 193 #endif 194 195 inline void flush_dcache_page_impl(struct page *page) 196 { 197 BUG_ON(tlb_type == hypervisor); 198 #ifdef CONFIG_DEBUG_DCFLUSH 199 atomic_inc(&dcpage_flushes); 200 #endif 201 202 #ifdef DCACHE_ALIASING_POSSIBLE 203 __flush_dcache_page(page_address(page), 204 ((tlb_type == spitfire) && 205 page_mapping(page) != NULL)); 206 #else 207 if (page_mapping(page) != NULL && 208 tlb_type == spitfire) 209 __flush_icache_page(__pa(page_address(page))); 210 #endif 211 } 212 213 #define PG_dcache_dirty PG_arch_1 214 #define PG_dcache_cpu_shift 32UL 215 #define PG_dcache_cpu_mask \ 216 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 217 218 #define dcache_dirty_cpu(page) \ 219 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 220 221 static inline void set_dcache_dirty(struct page *page, int this_cpu) 222 { 223 unsigned long mask = this_cpu; 224 unsigned long non_cpu_bits; 225 226 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 227 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 228 229 __asm__ __volatile__("1:\n\t" 230 "ldx [%2], %%g7\n\t" 231 "and %%g7, %1, %%g1\n\t" 232 "or %%g1, %0, %%g1\n\t" 233 "casx [%2], %%g7, %%g1\n\t" 234 "cmp %%g7, %%g1\n\t" 235 "bne,pn %%xcc, 1b\n\t" 236 " nop" 237 : /* no outputs */ 238 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 239 : "g1", "g7"); 240 } 241 242 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 243 { 244 unsigned long mask = (1UL << PG_dcache_dirty); 245 246 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 247 "1:\n\t" 248 "ldx [%2], %%g7\n\t" 249 "srlx %%g7, %4, %%g1\n\t" 250 "and %%g1, %3, %%g1\n\t" 251 "cmp %%g1, %0\n\t" 252 "bne,pn %%icc, 2f\n\t" 253 " andn %%g7, %1, %%g1\n\t" 254 "casx [%2], %%g7, %%g1\n\t" 255 "cmp %%g7, %%g1\n\t" 256 "bne,pn %%xcc, 1b\n\t" 257 " nop\n" 258 "2:" 259 : /* no outputs */ 260 : "r" (cpu), "r" (mask), "r" (&page->flags), 261 "i" (PG_dcache_cpu_mask), 262 "i" (PG_dcache_cpu_shift) 263 : "g1", "g7"); 264 } 265 266 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 267 { 268 unsigned long tsb_addr = (unsigned long) ent; 269 270 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 271 tsb_addr = __pa(tsb_addr); 272 273 __tsb_insert(tsb_addr, tag, pte); 274 } 275 276 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 277 278 static void flush_dcache(unsigned long pfn) 279 { 280 struct page *page; 281 282 page = pfn_to_page(pfn); 283 if (page) { 284 unsigned long pg_flags; 285 286 pg_flags = page->flags; 287 if (pg_flags & (1UL << PG_dcache_dirty)) { 288 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 289 PG_dcache_cpu_mask); 290 int this_cpu = get_cpu(); 291 292 /* This is just to optimize away some function calls 293 * in the SMP case. 294 */ 295 if (cpu == this_cpu) 296 flush_dcache_page_impl(page); 297 else 298 smp_flush_dcache_page_impl(page, cpu); 299 300 clear_dcache_dirty_cpu(page, cpu); 301 302 put_cpu(); 303 } 304 } 305 } 306 307 /* mm->context.lock must be held */ 308 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, 309 unsigned long tsb_hash_shift, unsigned long address, 310 unsigned long tte) 311 { 312 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; 313 unsigned long tag; 314 315 if (unlikely(!tsb)) 316 return; 317 318 tsb += ((address >> tsb_hash_shift) & 319 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 320 tag = (address >> 22UL); 321 tsb_insert(tsb, tag, tte); 322 } 323 324 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 325 static inline bool is_hugetlb_pte(pte_t pte) 326 { 327 if ((tlb_type == hypervisor && 328 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 329 (tlb_type != hypervisor && 330 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) 331 return true; 332 return false; 333 } 334 #endif 335 336 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 337 { 338 struct mm_struct *mm; 339 unsigned long flags; 340 pte_t pte = *ptep; 341 342 if (tlb_type != hypervisor) { 343 unsigned long pfn = pte_pfn(pte); 344 345 if (pfn_valid(pfn)) 346 flush_dcache(pfn); 347 } 348 349 mm = vma->vm_mm; 350 351 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ 352 if (!pte_accessible(mm, pte)) 353 return; 354 355 spin_lock_irqsave(&mm->context.lock, flags); 356 357 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 358 if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) 359 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 360 address, pte_val(pte)); 361 else 362 #endif 363 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, 364 address, pte_val(pte)); 365 366 spin_unlock_irqrestore(&mm->context.lock, flags); 367 } 368 369 void flush_dcache_page(struct page *page) 370 { 371 struct address_space *mapping; 372 int this_cpu; 373 374 if (tlb_type == hypervisor) 375 return; 376 377 /* Do not bother with the expensive D-cache flush if it 378 * is merely the zero page. The 'bigcore' testcase in GDB 379 * causes this case to run millions of times. 380 */ 381 if (page == ZERO_PAGE(0)) 382 return; 383 384 this_cpu = get_cpu(); 385 386 mapping = page_mapping(page); 387 if (mapping && !mapping_mapped(mapping)) { 388 int dirty = test_bit(PG_dcache_dirty, &page->flags); 389 if (dirty) { 390 int dirty_cpu = dcache_dirty_cpu(page); 391 392 if (dirty_cpu == this_cpu) 393 goto out; 394 smp_flush_dcache_page_impl(page, dirty_cpu); 395 } 396 set_dcache_dirty(page, this_cpu); 397 } else { 398 /* We could delay the flush for the !page_mapping 399 * case too. But that case is for exec env/arg 400 * pages and those are %99 certainly going to get 401 * faulted into the tlb (and thus flushed) anyways. 402 */ 403 flush_dcache_page_impl(page); 404 } 405 406 out: 407 put_cpu(); 408 } 409 EXPORT_SYMBOL(flush_dcache_page); 410 411 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 412 { 413 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 414 if (tlb_type == spitfire) { 415 unsigned long kaddr; 416 417 /* This code only runs on Spitfire cpus so this is 418 * why we can assume _PAGE_PADDR_4U. 419 */ 420 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 421 unsigned long paddr, mask = _PAGE_PADDR_4U; 422 423 if (kaddr >= PAGE_OFFSET) 424 paddr = kaddr & mask; 425 else { 426 pgd_t *pgdp = pgd_offset_k(kaddr); 427 pud_t *pudp = pud_offset(pgdp, kaddr); 428 pmd_t *pmdp = pmd_offset(pudp, kaddr); 429 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 430 431 paddr = pte_val(*ptep) & mask; 432 } 433 __flush_icache_page(paddr); 434 } 435 } 436 } 437 EXPORT_SYMBOL(flush_icache_range); 438 439 void mmu_info(struct seq_file *m) 440 { 441 static const char *pgsz_strings[] = { 442 "8K", "64K", "512K", "4MB", "32MB", 443 "256MB", "2GB", "16GB", 444 }; 445 int i, printed; 446 447 if (tlb_type == cheetah) 448 seq_printf(m, "MMU Type\t: Cheetah\n"); 449 else if (tlb_type == cheetah_plus) 450 seq_printf(m, "MMU Type\t: Cheetah+\n"); 451 else if (tlb_type == spitfire) 452 seq_printf(m, "MMU Type\t: Spitfire\n"); 453 else if (tlb_type == hypervisor) 454 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 455 else 456 seq_printf(m, "MMU Type\t: ???\n"); 457 458 seq_printf(m, "MMU PGSZs\t: "); 459 printed = 0; 460 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) { 461 if (cpu_pgsz_mask & (1UL << i)) { 462 seq_printf(m, "%s%s", 463 printed ? "," : "", pgsz_strings[i]); 464 printed++; 465 } 466 } 467 seq_putc(m, '\n'); 468 469 #ifdef CONFIG_DEBUG_DCFLUSH 470 seq_printf(m, "DCPageFlushes\t: %d\n", 471 atomic_read(&dcpage_flushes)); 472 #ifdef CONFIG_SMP 473 seq_printf(m, "DCPageFlushesXC\t: %d\n", 474 atomic_read(&dcpage_flushes_xcall)); 475 #endif /* CONFIG_SMP */ 476 #endif /* CONFIG_DEBUG_DCFLUSH */ 477 } 478 479 struct linux_prom_translation prom_trans[512] __read_mostly; 480 unsigned int prom_trans_ents __read_mostly; 481 482 unsigned long kern_locked_tte_data; 483 484 /* The obp translations are saved based on 8k pagesize, since obp can 485 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 486 * HI_OBP_ADDRESS range are handled in ktlb.S. 487 */ 488 static inline int in_obp_range(unsigned long vaddr) 489 { 490 return (vaddr >= LOW_OBP_ADDRESS && 491 vaddr < HI_OBP_ADDRESS); 492 } 493 494 static int cmp_ptrans(const void *a, const void *b) 495 { 496 const struct linux_prom_translation *x = a, *y = b; 497 498 if (x->virt > y->virt) 499 return 1; 500 if (x->virt < y->virt) 501 return -1; 502 return 0; 503 } 504 505 /* Read OBP translations property into 'prom_trans[]'. */ 506 static void __init read_obp_translations(void) 507 { 508 int n, node, ents, first, last, i; 509 510 node = prom_finddevice("/virtual-memory"); 511 n = prom_getproplen(node, "translations"); 512 if (unlikely(n == 0 || n == -1)) { 513 prom_printf("prom_mappings: Couldn't get size.\n"); 514 prom_halt(); 515 } 516 if (unlikely(n > sizeof(prom_trans))) { 517 prom_printf("prom_mappings: Size %d is too big.\n", n); 518 prom_halt(); 519 } 520 521 if ((n = prom_getproperty(node, "translations", 522 (char *)&prom_trans[0], 523 sizeof(prom_trans))) == -1) { 524 prom_printf("prom_mappings: Couldn't get property.\n"); 525 prom_halt(); 526 } 527 528 n = n / sizeof(struct linux_prom_translation); 529 530 ents = n; 531 532 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 533 cmp_ptrans, NULL); 534 535 /* Now kick out all the non-OBP entries. */ 536 for (i = 0; i < ents; i++) { 537 if (in_obp_range(prom_trans[i].virt)) 538 break; 539 } 540 first = i; 541 for (; i < ents; i++) { 542 if (!in_obp_range(prom_trans[i].virt)) 543 break; 544 } 545 last = i; 546 547 for (i = 0; i < (last - first); i++) { 548 struct linux_prom_translation *src = &prom_trans[i + first]; 549 struct linux_prom_translation *dest = &prom_trans[i]; 550 551 *dest = *src; 552 } 553 for (; i < ents; i++) { 554 struct linux_prom_translation *dest = &prom_trans[i]; 555 dest->virt = dest->size = dest->data = 0x0UL; 556 } 557 558 prom_trans_ents = last - first; 559 560 if (tlb_type == spitfire) { 561 /* Clear diag TTE bits. */ 562 for (i = 0; i < prom_trans_ents; i++) 563 prom_trans[i].data &= ~0x0003fe0000000000UL; 564 } 565 566 /* Force execute bit on. */ 567 for (i = 0; i < prom_trans_ents; i++) 568 prom_trans[i].data |= (tlb_type == hypervisor ? 569 _PAGE_EXEC_4V : _PAGE_EXEC_4U); 570 } 571 572 static void __init hypervisor_tlb_lock(unsigned long vaddr, 573 unsigned long pte, 574 unsigned long mmu) 575 { 576 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 577 578 if (ret != 0) { 579 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: " 580 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 581 prom_halt(); 582 } 583 } 584 585 static unsigned long kern_large_tte(unsigned long paddr); 586 587 static void __init remap_kernel(void) 588 { 589 unsigned long phys_page, tte_vaddr, tte_data; 590 int i, tlb_ent = sparc64_highest_locked_tlbent(); 591 592 tte_vaddr = (unsigned long) KERNBASE; 593 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 594 tte_data = kern_large_tte(phys_page); 595 596 kern_locked_tte_data = tte_data; 597 598 /* Now lock us into the TLBs via Hypervisor or OBP. */ 599 if (tlb_type == hypervisor) { 600 for (i = 0; i < num_kernel_image_mappings; i++) { 601 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 602 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 603 tte_vaddr += 0x400000; 604 tte_data += 0x400000; 605 } 606 } else { 607 for (i = 0; i < num_kernel_image_mappings; i++) { 608 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 609 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 610 tte_vaddr += 0x400000; 611 tte_data += 0x400000; 612 } 613 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 614 } 615 if (tlb_type == cheetah_plus) { 616 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 617 CTX_CHEETAH_PLUS_NUC); 618 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 619 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 620 } 621 } 622 623 624 static void __init inherit_prom_mappings(void) 625 { 626 /* Now fixup OBP's idea about where we really are mapped. */ 627 printk("Remapping the kernel... "); 628 remap_kernel(); 629 printk("done.\n"); 630 } 631 632 void prom_world(int enter) 633 { 634 if (!enter) 635 set_fs(get_fs()); 636 637 __asm__ __volatile__("flushw"); 638 } 639 640 void __flush_dcache_range(unsigned long start, unsigned long end) 641 { 642 unsigned long va; 643 644 if (tlb_type == spitfire) { 645 int n = 0; 646 647 for (va = start; va < end; va += 32) { 648 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 649 if (++n >= 512) 650 break; 651 } 652 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 653 start = __pa(start); 654 end = __pa(end); 655 for (va = start; va < end; va += 32) 656 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 657 "membar #Sync" 658 : /* no outputs */ 659 : "r" (va), 660 "i" (ASI_DCACHE_INVALIDATE)); 661 } 662 } 663 EXPORT_SYMBOL(__flush_dcache_range); 664 665 /* get_new_mmu_context() uses "cache + 1". */ 666 DEFINE_SPINLOCK(ctx_alloc_lock); 667 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 668 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 669 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 670 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 671 672 /* Caller does TLB context flushing on local CPU if necessary. 673 * The caller also ensures that CTX_VALID(mm->context) is false. 674 * 675 * We must be careful about boundary cases so that we never 676 * let the user have CTX 0 (nucleus) or we ever use a CTX 677 * version of zero (and thus NO_CONTEXT would not be caught 678 * by version mis-match tests in mmu_context.h). 679 * 680 * Always invoked with interrupts disabled. 681 */ 682 void get_new_mmu_context(struct mm_struct *mm) 683 { 684 unsigned long ctx, new_ctx; 685 unsigned long orig_pgsz_bits; 686 int new_version; 687 688 spin_lock(&ctx_alloc_lock); 689 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 690 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 691 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 692 new_version = 0; 693 if (new_ctx >= (1 << CTX_NR_BITS)) { 694 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 695 if (new_ctx >= ctx) { 696 int i; 697 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 698 CTX_FIRST_VERSION; 699 if (new_ctx == 1) 700 new_ctx = CTX_FIRST_VERSION; 701 702 /* Don't call memset, for 16 entries that's just 703 * plain silly... 704 */ 705 mmu_context_bmap[0] = 3; 706 mmu_context_bmap[1] = 0; 707 mmu_context_bmap[2] = 0; 708 mmu_context_bmap[3] = 0; 709 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 710 mmu_context_bmap[i + 0] = 0; 711 mmu_context_bmap[i + 1] = 0; 712 mmu_context_bmap[i + 2] = 0; 713 mmu_context_bmap[i + 3] = 0; 714 } 715 new_version = 1; 716 goto out; 717 } 718 } 719 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 720 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 721 out: 722 tlb_context_cache = new_ctx; 723 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 724 spin_unlock(&ctx_alloc_lock); 725 726 if (unlikely(new_version)) 727 smp_new_mmu_context_version(); 728 } 729 730 static int numa_enabled = 1; 731 static int numa_debug; 732 733 static int __init early_numa(char *p) 734 { 735 if (!p) 736 return 0; 737 738 if (strstr(p, "off")) 739 numa_enabled = 0; 740 741 if (strstr(p, "debug")) 742 numa_debug = 1; 743 744 return 0; 745 } 746 early_param("numa", early_numa); 747 748 #define numadbg(f, a...) \ 749 do { if (numa_debug) \ 750 printk(KERN_INFO f, ## a); \ 751 } while (0) 752 753 static void __init find_ramdisk(unsigned long phys_base) 754 { 755 #ifdef CONFIG_BLK_DEV_INITRD 756 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 757 unsigned long ramdisk_image; 758 759 /* Older versions of the bootloader only supported a 760 * 32-bit physical address for the ramdisk image 761 * location, stored at sparc_ramdisk_image. Newer 762 * SILO versions set sparc_ramdisk_image to zero and 763 * provide a full 64-bit physical address at 764 * sparc_ramdisk_image64. 765 */ 766 ramdisk_image = sparc_ramdisk_image; 767 if (!ramdisk_image) 768 ramdisk_image = sparc_ramdisk_image64; 769 770 /* Another bootloader quirk. The bootloader normalizes 771 * the physical address to KERNBASE, so we have to 772 * factor that back out and add in the lowest valid 773 * physical page address to get the true physical address. 774 */ 775 ramdisk_image -= KERNBASE; 776 ramdisk_image += phys_base; 777 778 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 779 ramdisk_image, sparc_ramdisk_size); 780 781 initrd_start = ramdisk_image; 782 initrd_end = ramdisk_image + sparc_ramdisk_size; 783 784 memblock_reserve(initrd_start, sparc_ramdisk_size); 785 786 initrd_start += PAGE_OFFSET; 787 initrd_end += PAGE_OFFSET; 788 } 789 #endif 790 } 791 792 struct node_mem_mask { 793 unsigned long mask; 794 unsigned long val; 795 }; 796 static struct node_mem_mask node_masks[MAX_NUMNODES]; 797 static int num_node_masks; 798 799 #ifdef CONFIG_NEED_MULTIPLE_NODES 800 801 int numa_cpu_lookup_table[NR_CPUS]; 802 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 803 804 struct mdesc_mblock { 805 u64 base; 806 u64 size; 807 u64 offset; /* RA-to-PA */ 808 }; 809 static struct mdesc_mblock *mblocks; 810 static int num_mblocks; 811 812 static unsigned long ra_to_pa(unsigned long addr) 813 { 814 int i; 815 816 for (i = 0; i < num_mblocks; i++) { 817 struct mdesc_mblock *m = &mblocks[i]; 818 819 if (addr >= m->base && 820 addr < (m->base + m->size)) { 821 addr += m->offset; 822 break; 823 } 824 } 825 return addr; 826 } 827 828 static int find_node(unsigned long addr) 829 { 830 int i; 831 832 addr = ra_to_pa(addr); 833 for (i = 0; i < num_node_masks; i++) { 834 struct node_mem_mask *p = &node_masks[i]; 835 836 if ((addr & p->mask) == p->val) 837 return i; 838 } 839 /* The following condition has been observed on LDOM guests.*/ 840 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 841 " rule. Some physical memory will be owned by node 0."); 842 return 0; 843 } 844 845 static u64 memblock_nid_range(u64 start, u64 end, int *nid) 846 { 847 *nid = find_node(start); 848 start += PAGE_SIZE; 849 while (start < end) { 850 int n = find_node(start); 851 852 if (n != *nid) 853 break; 854 start += PAGE_SIZE; 855 } 856 857 if (start > end) 858 start = end; 859 860 return start; 861 } 862 #endif 863 864 /* This must be invoked after performing all of the necessary 865 * memblock_set_node() calls for 'nid'. We need to be able to get 866 * correct data from get_pfn_range_for_nid(). 867 */ 868 static void __init allocate_node_data(int nid) 869 { 870 struct pglist_data *p; 871 unsigned long start_pfn, end_pfn; 872 #ifdef CONFIG_NEED_MULTIPLE_NODES 873 unsigned long paddr; 874 875 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 876 if (!paddr) { 877 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 878 prom_halt(); 879 } 880 NODE_DATA(nid) = __va(paddr); 881 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 882 883 NODE_DATA(nid)->node_id = nid; 884 #endif 885 886 p = NODE_DATA(nid); 887 888 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 889 p->node_start_pfn = start_pfn; 890 p->node_spanned_pages = end_pfn - start_pfn; 891 } 892 893 static void init_node_masks_nonnuma(void) 894 { 895 #ifdef CONFIG_NEED_MULTIPLE_NODES 896 int i; 897 #endif 898 899 numadbg("Initializing tables for non-numa.\n"); 900 901 node_masks[0].mask = node_masks[0].val = 0; 902 num_node_masks = 1; 903 904 #ifdef CONFIG_NEED_MULTIPLE_NODES 905 for (i = 0; i < NR_CPUS; i++) 906 numa_cpu_lookup_table[i] = 0; 907 908 cpumask_setall(&numa_cpumask_lookup_table[0]); 909 #endif 910 } 911 912 #ifdef CONFIG_NEED_MULTIPLE_NODES 913 struct pglist_data *node_data[MAX_NUMNODES]; 914 915 EXPORT_SYMBOL(numa_cpu_lookup_table); 916 EXPORT_SYMBOL(numa_cpumask_lookup_table); 917 EXPORT_SYMBOL(node_data); 918 919 struct mdesc_mlgroup { 920 u64 node; 921 u64 latency; 922 u64 match; 923 u64 mask; 924 }; 925 static struct mdesc_mlgroup *mlgroups; 926 static int num_mlgroups; 927 928 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 929 u32 cfg_handle) 930 { 931 u64 arc; 932 933 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 934 u64 target = mdesc_arc_target(md, arc); 935 const u64 *val; 936 937 val = mdesc_get_property(md, target, 938 "cfg-handle", NULL); 939 if (val && *val == cfg_handle) 940 return 0; 941 } 942 return -ENODEV; 943 } 944 945 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 946 u32 cfg_handle) 947 { 948 u64 arc, candidate, best_latency = ~(u64)0; 949 950 candidate = MDESC_NODE_NULL; 951 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 952 u64 target = mdesc_arc_target(md, arc); 953 const char *name = mdesc_node_name(md, target); 954 const u64 *val; 955 956 if (strcmp(name, "pio-latency-group")) 957 continue; 958 959 val = mdesc_get_property(md, target, "latency", NULL); 960 if (!val) 961 continue; 962 963 if (*val < best_latency) { 964 candidate = target; 965 best_latency = *val; 966 } 967 } 968 969 if (candidate == MDESC_NODE_NULL) 970 return -ENODEV; 971 972 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 973 } 974 975 int of_node_to_nid(struct device_node *dp) 976 { 977 const struct linux_prom64_registers *regs; 978 struct mdesc_handle *md; 979 u32 cfg_handle; 980 int count, nid; 981 u64 grp; 982 983 /* This is the right thing to do on currently supported 984 * SUN4U NUMA platforms as well, as the PCI controller does 985 * not sit behind any particular memory controller. 986 */ 987 if (!mlgroups) 988 return -1; 989 990 regs = of_get_property(dp, "reg", NULL); 991 if (!regs) 992 return -1; 993 994 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 995 996 md = mdesc_grab(); 997 998 count = 0; 999 nid = -1; 1000 mdesc_for_each_node_by_name(md, grp, "group") { 1001 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 1002 nid = count; 1003 break; 1004 } 1005 count++; 1006 } 1007 1008 mdesc_release(md); 1009 1010 return nid; 1011 } 1012 1013 static void __init add_node_ranges(void) 1014 { 1015 struct memblock_region *reg; 1016 1017 for_each_memblock(memory, reg) { 1018 unsigned long size = reg->size; 1019 unsigned long start, end; 1020 1021 start = reg->base; 1022 end = start + size; 1023 while (start < end) { 1024 unsigned long this_end; 1025 int nid; 1026 1027 this_end = memblock_nid_range(start, end, &nid); 1028 1029 numadbg("Setting memblock NUMA node nid[%d] " 1030 "start[%lx] end[%lx]\n", 1031 nid, start, this_end); 1032 1033 memblock_set_node(start, this_end - start, 1034 &memblock.memory, nid); 1035 start = this_end; 1036 } 1037 } 1038 } 1039 1040 static int __init grab_mlgroups(struct mdesc_handle *md) 1041 { 1042 unsigned long paddr; 1043 int count = 0; 1044 u64 node; 1045 1046 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 1047 count++; 1048 if (!count) 1049 return -ENOENT; 1050 1051 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), 1052 SMP_CACHE_BYTES); 1053 if (!paddr) 1054 return -ENOMEM; 1055 1056 mlgroups = __va(paddr); 1057 num_mlgroups = count; 1058 1059 count = 0; 1060 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1061 struct mdesc_mlgroup *m = &mlgroups[count++]; 1062 const u64 *val; 1063 1064 m->node = node; 1065 1066 val = mdesc_get_property(md, node, "latency", NULL); 1067 m->latency = *val; 1068 val = mdesc_get_property(md, node, "address-match", NULL); 1069 m->match = *val; 1070 val = mdesc_get_property(md, node, "address-mask", NULL); 1071 m->mask = *val; 1072 1073 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1074 "match[%llx] mask[%llx]\n", 1075 count - 1, m->node, m->latency, m->match, m->mask); 1076 } 1077 1078 return 0; 1079 } 1080 1081 static int __init grab_mblocks(struct mdesc_handle *md) 1082 { 1083 unsigned long paddr; 1084 int count = 0; 1085 u64 node; 1086 1087 mdesc_for_each_node_by_name(md, node, "mblock") 1088 count++; 1089 if (!count) 1090 return -ENOENT; 1091 1092 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), 1093 SMP_CACHE_BYTES); 1094 if (!paddr) 1095 return -ENOMEM; 1096 1097 mblocks = __va(paddr); 1098 num_mblocks = count; 1099 1100 count = 0; 1101 mdesc_for_each_node_by_name(md, node, "mblock") { 1102 struct mdesc_mblock *m = &mblocks[count++]; 1103 const u64 *val; 1104 1105 val = mdesc_get_property(md, node, "base", NULL); 1106 m->base = *val; 1107 val = mdesc_get_property(md, node, "size", NULL); 1108 m->size = *val; 1109 val = mdesc_get_property(md, node, 1110 "address-congruence-offset", NULL); 1111 1112 /* The address-congruence-offset property is optional. 1113 * Explicity zero it be identifty this. 1114 */ 1115 if (val) 1116 m->offset = *val; 1117 else 1118 m->offset = 0UL; 1119 1120 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1121 count - 1, m->base, m->size, m->offset); 1122 } 1123 1124 return 0; 1125 } 1126 1127 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1128 u64 grp, cpumask_t *mask) 1129 { 1130 u64 arc; 1131 1132 cpumask_clear(mask); 1133 1134 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1135 u64 target = mdesc_arc_target(md, arc); 1136 const char *name = mdesc_node_name(md, target); 1137 const u64 *id; 1138 1139 if (strcmp(name, "cpu")) 1140 continue; 1141 id = mdesc_get_property(md, target, "id", NULL); 1142 if (*id < nr_cpu_ids) 1143 cpumask_set_cpu(*id, mask); 1144 } 1145 } 1146 1147 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1148 { 1149 int i; 1150 1151 for (i = 0; i < num_mlgroups; i++) { 1152 struct mdesc_mlgroup *m = &mlgroups[i]; 1153 if (m->node == node) 1154 return m; 1155 } 1156 return NULL; 1157 } 1158 1159 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1160 int index) 1161 { 1162 struct mdesc_mlgroup *candidate = NULL; 1163 u64 arc, best_latency = ~(u64)0; 1164 struct node_mem_mask *n; 1165 1166 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1167 u64 target = mdesc_arc_target(md, arc); 1168 struct mdesc_mlgroup *m = find_mlgroup(target); 1169 if (!m) 1170 continue; 1171 if (m->latency < best_latency) { 1172 candidate = m; 1173 best_latency = m->latency; 1174 } 1175 } 1176 if (!candidate) 1177 return -ENOENT; 1178 1179 if (num_node_masks != index) { 1180 printk(KERN_ERR "Inconsistent NUMA state, " 1181 "index[%d] != num_node_masks[%d]\n", 1182 index, num_node_masks); 1183 return -EINVAL; 1184 } 1185 1186 n = &node_masks[num_node_masks++]; 1187 1188 n->mask = candidate->mask; 1189 n->val = candidate->match; 1190 1191 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1192 index, n->mask, n->val, candidate->latency); 1193 1194 return 0; 1195 } 1196 1197 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1198 int index) 1199 { 1200 cpumask_t mask; 1201 int cpu; 1202 1203 numa_parse_mdesc_group_cpus(md, grp, &mask); 1204 1205 for_each_cpu(cpu, &mask) 1206 numa_cpu_lookup_table[cpu] = index; 1207 cpumask_copy(&numa_cpumask_lookup_table[index], &mask); 1208 1209 if (numa_debug) { 1210 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1211 for_each_cpu(cpu, &mask) 1212 printk("%d ", cpu); 1213 printk("]\n"); 1214 } 1215 1216 return numa_attach_mlgroup(md, grp, index); 1217 } 1218 1219 static int __init numa_parse_mdesc(void) 1220 { 1221 struct mdesc_handle *md = mdesc_grab(); 1222 int i, err, count; 1223 u64 node; 1224 1225 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1226 if (node == MDESC_NODE_NULL) { 1227 mdesc_release(md); 1228 return -ENOENT; 1229 } 1230 1231 err = grab_mblocks(md); 1232 if (err < 0) 1233 goto out; 1234 1235 err = grab_mlgroups(md); 1236 if (err < 0) 1237 goto out; 1238 1239 count = 0; 1240 mdesc_for_each_node_by_name(md, node, "group") { 1241 err = numa_parse_mdesc_group(md, node, count); 1242 if (err < 0) 1243 break; 1244 count++; 1245 } 1246 1247 add_node_ranges(); 1248 1249 for (i = 0; i < num_node_masks; i++) { 1250 allocate_node_data(i); 1251 node_set_online(i); 1252 } 1253 1254 err = 0; 1255 out: 1256 mdesc_release(md); 1257 return err; 1258 } 1259 1260 static int __init numa_parse_jbus(void) 1261 { 1262 unsigned long cpu, index; 1263 1264 /* NUMA node id is encoded in bits 36 and higher, and there is 1265 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1266 */ 1267 index = 0; 1268 for_each_present_cpu(cpu) { 1269 numa_cpu_lookup_table[cpu] = index; 1270 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); 1271 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1272 node_masks[index].val = cpu << 36UL; 1273 1274 index++; 1275 } 1276 num_node_masks = index; 1277 1278 add_node_ranges(); 1279 1280 for (index = 0; index < num_node_masks; index++) { 1281 allocate_node_data(index); 1282 node_set_online(index); 1283 } 1284 1285 return 0; 1286 } 1287 1288 static int __init numa_parse_sun4u(void) 1289 { 1290 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1291 unsigned long ver; 1292 1293 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1294 if ((ver >> 32UL) == __JALAPENO_ID || 1295 (ver >> 32UL) == __SERRANO_ID) 1296 return numa_parse_jbus(); 1297 } 1298 return -1; 1299 } 1300 1301 static int __init bootmem_init_numa(void) 1302 { 1303 int err = -1; 1304 1305 numadbg("bootmem_init_numa()\n"); 1306 1307 if (numa_enabled) { 1308 if (tlb_type == hypervisor) 1309 err = numa_parse_mdesc(); 1310 else 1311 err = numa_parse_sun4u(); 1312 } 1313 return err; 1314 } 1315 1316 #else 1317 1318 static int bootmem_init_numa(void) 1319 { 1320 return -1; 1321 } 1322 1323 #endif 1324 1325 static void __init bootmem_init_nonnuma(void) 1326 { 1327 unsigned long top_of_ram = memblock_end_of_DRAM(); 1328 unsigned long total_ram = memblock_phys_mem_size(); 1329 1330 numadbg("bootmem_init_nonnuma()\n"); 1331 1332 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1333 top_of_ram, total_ram); 1334 printk(KERN_INFO "Memory hole size: %ldMB\n", 1335 (top_of_ram - total_ram) >> 20); 1336 1337 init_node_masks_nonnuma(); 1338 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 1339 allocate_node_data(0); 1340 node_set_online(0); 1341 } 1342 1343 static unsigned long __init bootmem_init(unsigned long phys_base) 1344 { 1345 unsigned long end_pfn; 1346 1347 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1348 max_pfn = max_low_pfn = end_pfn; 1349 min_low_pfn = (phys_base >> PAGE_SHIFT); 1350 1351 if (bootmem_init_numa() < 0) 1352 bootmem_init_nonnuma(); 1353 1354 /* Dump memblock with node info. */ 1355 memblock_dump_all(); 1356 1357 /* XXX cpu notifier XXX */ 1358 1359 sparse_memory_present_with_active_regions(MAX_NUMNODES); 1360 sparse_init(); 1361 1362 return end_pfn; 1363 } 1364 1365 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1366 static int pall_ents __initdata; 1367 1368 static unsigned long max_phys_bits = 40; 1369 1370 bool kern_addr_valid(unsigned long addr) 1371 { 1372 pgd_t *pgd; 1373 pud_t *pud; 1374 pmd_t *pmd; 1375 pte_t *pte; 1376 1377 if ((long)addr < 0L) { 1378 unsigned long pa = __pa(addr); 1379 1380 if ((addr >> max_phys_bits) != 0UL) 1381 return false; 1382 1383 return pfn_valid(pa >> PAGE_SHIFT); 1384 } 1385 1386 if (addr >= (unsigned long) KERNBASE && 1387 addr < (unsigned long)&_end) 1388 return true; 1389 1390 pgd = pgd_offset_k(addr); 1391 if (pgd_none(*pgd)) 1392 return 0; 1393 1394 pud = pud_offset(pgd, addr); 1395 if (pud_none(*pud)) 1396 return 0; 1397 1398 if (pud_large(*pud)) 1399 return pfn_valid(pud_pfn(*pud)); 1400 1401 pmd = pmd_offset(pud, addr); 1402 if (pmd_none(*pmd)) 1403 return 0; 1404 1405 if (pmd_large(*pmd)) 1406 return pfn_valid(pmd_pfn(*pmd)); 1407 1408 pte = pte_offset_kernel(pmd, addr); 1409 if (pte_none(*pte)) 1410 return 0; 1411 1412 return pfn_valid(pte_pfn(*pte)); 1413 } 1414 EXPORT_SYMBOL(kern_addr_valid); 1415 1416 static unsigned long __ref kernel_map_hugepud(unsigned long vstart, 1417 unsigned long vend, 1418 pud_t *pud) 1419 { 1420 const unsigned long mask16gb = (1UL << 34) - 1UL; 1421 u64 pte_val = vstart; 1422 1423 /* Each PUD is 8GB */ 1424 if ((vstart & mask16gb) || 1425 (vend - vstart <= mask16gb)) { 1426 pte_val ^= kern_linear_pte_xor[2]; 1427 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; 1428 1429 return vstart + PUD_SIZE; 1430 } 1431 1432 pte_val ^= kern_linear_pte_xor[3]; 1433 pte_val |= _PAGE_PUD_HUGE; 1434 1435 vend = vstart + mask16gb + 1UL; 1436 while (vstart < vend) { 1437 pud_val(*pud) = pte_val; 1438 1439 pte_val += PUD_SIZE; 1440 vstart += PUD_SIZE; 1441 pud++; 1442 } 1443 return vstart; 1444 } 1445 1446 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, 1447 bool guard) 1448 { 1449 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) 1450 return true; 1451 1452 return false; 1453 } 1454 1455 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, 1456 unsigned long vend, 1457 pmd_t *pmd) 1458 { 1459 const unsigned long mask256mb = (1UL << 28) - 1UL; 1460 const unsigned long mask2gb = (1UL << 31) - 1UL; 1461 u64 pte_val = vstart; 1462 1463 /* Each PMD is 8MB */ 1464 if ((vstart & mask256mb) || 1465 (vend - vstart <= mask256mb)) { 1466 pte_val ^= kern_linear_pte_xor[0]; 1467 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; 1468 1469 return vstart + PMD_SIZE; 1470 } 1471 1472 if ((vstart & mask2gb) || 1473 (vend - vstart <= mask2gb)) { 1474 pte_val ^= kern_linear_pte_xor[1]; 1475 pte_val |= _PAGE_PMD_HUGE; 1476 vend = vstart + mask256mb + 1UL; 1477 } else { 1478 pte_val ^= kern_linear_pte_xor[2]; 1479 pte_val |= _PAGE_PMD_HUGE; 1480 vend = vstart + mask2gb + 1UL; 1481 } 1482 1483 while (vstart < vend) { 1484 pmd_val(*pmd) = pte_val; 1485 1486 pte_val += PMD_SIZE; 1487 vstart += PMD_SIZE; 1488 pmd++; 1489 } 1490 1491 return vstart; 1492 } 1493 1494 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, 1495 bool guard) 1496 { 1497 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) 1498 return true; 1499 1500 return false; 1501 } 1502 1503 static unsigned long __ref kernel_map_range(unsigned long pstart, 1504 unsigned long pend, pgprot_t prot, 1505 bool use_huge) 1506 { 1507 unsigned long vstart = PAGE_OFFSET + pstart; 1508 unsigned long vend = PAGE_OFFSET + pend; 1509 unsigned long alloc_bytes = 0UL; 1510 1511 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1512 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1513 vstart, vend); 1514 prom_halt(); 1515 } 1516 1517 while (vstart < vend) { 1518 unsigned long this_end, paddr = __pa(vstart); 1519 pgd_t *pgd = pgd_offset_k(vstart); 1520 pud_t *pud; 1521 pmd_t *pmd; 1522 pte_t *pte; 1523 1524 if (pgd_none(*pgd)) { 1525 pud_t *new; 1526 1527 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1528 alloc_bytes += PAGE_SIZE; 1529 pgd_populate(&init_mm, pgd, new); 1530 } 1531 pud = pud_offset(pgd, vstart); 1532 if (pud_none(*pud)) { 1533 pmd_t *new; 1534 1535 if (kernel_can_map_hugepud(vstart, vend, use_huge)) { 1536 vstart = kernel_map_hugepud(vstart, vend, pud); 1537 continue; 1538 } 1539 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1540 alloc_bytes += PAGE_SIZE; 1541 pud_populate(&init_mm, pud, new); 1542 } 1543 1544 pmd = pmd_offset(pud, vstart); 1545 if (pmd_none(*pmd)) { 1546 pte_t *new; 1547 1548 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { 1549 vstart = kernel_map_hugepmd(vstart, vend, pmd); 1550 continue; 1551 } 1552 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1553 alloc_bytes += PAGE_SIZE; 1554 pmd_populate_kernel(&init_mm, pmd, new); 1555 } 1556 1557 pte = pte_offset_kernel(pmd, vstart); 1558 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1559 if (this_end > vend) 1560 this_end = vend; 1561 1562 while (vstart < this_end) { 1563 pte_val(*pte) = (paddr | pgprot_val(prot)); 1564 1565 vstart += PAGE_SIZE; 1566 paddr += PAGE_SIZE; 1567 pte++; 1568 } 1569 } 1570 1571 return alloc_bytes; 1572 } 1573 1574 static void __init flush_all_kernel_tsbs(void) 1575 { 1576 int i; 1577 1578 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { 1579 struct tsb *ent = &swapper_tsb[i]; 1580 1581 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 1582 } 1583 #ifndef CONFIG_DEBUG_PAGEALLOC 1584 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { 1585 struct tsb *ent = &swapper_4m_tsb[i]; 1586 1587 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 1588 } 1589 #endif 1590 } 1591 1592 extern unsigned int kvmap_linear_patch[1]; 1593 1594 static void __init kernel_physical_mapping_init(void) 1595 { 1596 unsigned long i, mem_alloced = 0UL; 1597 bool use_huge = true; 1598 1599 #ifdef CONFIG_DEBUG_PAGEALLOC 1600 use_huge = false; 1601 #endif 1602 for (i = 0; i < pall_ents; i++) { 1603 unsigned long phys_start, phys_end; 1604 1605 phys_start = pall[i].phys_addr; 1606 phys_end = phys_start + pall[i].reg_size; 1607 1608 mem_alloced += kernel_map_range(phys_start, phys_end, 1609 PAGE_KERNEL, use_huge); 1610 } 1611 1612 printk("Allocated %ld bytes for kernel page tables.\n", 1613 mem_alloced); 1614 1615 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1616 flushi(&kvmap_linear_patch[0]); 1617 1618 flush_all_kernel_tsbs(); 1619 1620 __flush_tlb_all(); 1621 } 1622 1623 #ifdef CONFIG_DEBUG_PAGEALLOC 1624 void kernel_map_pages(struct page *page, int numpages, int enable) 1625 { 1626 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1627 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1628 1629 kernel_map_range(phys_start, phys_end, 1630 (enable ? PAGE_KERNEL : __pgprot(0)), false); 1631 1632 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1633 PAGE_OFFSET + phys_end); 1634 1635 /* we should perform an IPI and flush all tlbs, 1636 * but that can deadlock->flush only current cpu. 1637 */ 1638 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1639 PAGE_OFFSET + phys_end); 1640 } 1641 #endif 1642 1643 unsigned long __init find_ecache_flush_span(unsigned long size) 1644 { 1645 int i; 1646 1647 for (i = 0; i < pavail_ents; i++) { 1648 if (pavail[i].reg_size >= size) 1649 return pavail[i].phys_addr; 1650 } 1651 1652 return ~0UL; 1653 } 1654 1655 unsigned long PAGE_OFFSET; 1656 EXPORT_SYMBOL(PAGE_OFFSET); 1657 1658 unsigned long VMALLOC_END = 0x0000010000000000UL; 1659 EXPORT_SYMBOL(VMALLOC_END); 1660 1661 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; 1662 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; 1663 1664 static void __init setup_page_offset(void) 1665 { 1666 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1667 /* Cheetah/Panther support a full 64-bit virtual 1668 * address, so we can use all that our page tables 1669 * support. 1670 */ 1671 sparc64_va_hole_top = 0xfff0000000000000UL; 1672 sparc64_va_hole_bottom = 0x0010000000000000UL; 1673 1674 max_phys_bits = 42; 1675 } else if (tlb_type == hypervisor) { 1676 switch (sun4v_chip_type) { 1677 case SUN4V_CHIP_NIAGARA1: 1678 case SUN4V_CHIP_NIAGARA2: 1679 /* T1 and T2 support 48-bit virtual addresses. */ 1680 sparc64_va_hole_top = 0xffff800000000000UL; 1681 sparc64_va_hole_bottom = 0x0000800000000000UL; 1682 1683 max_phys_bits = 39; 1684 break; 1685 case SUN4V_CHIP_NIAGARA3: 1686 /* T3 supports 48-bit virtual addresses. */ 1687 sparc64_va_hole_top = 0xffff800000000000UL; 1688 sparc64_va_hole_bottom = 0x0000800000000000UL; 1689 1690 max_phys_bits = 43; 1691 break; 1692 case SUN4V_CHIP_NIAGARA4: 1693 case SUN4V_CHIP_NIAGARA5: 1694 case SUN4V_CHIP_SPARC64X: 1695 case SUN4V_CHIP_SPARC_M6: 1696 /* T4 and later support 52-bit virtual addresses. */ 1697 sparc64_va_hole_top = 0xfff8000000000000UL; 1698 sparc64_va_hole_bottom = 0x0008000000000000UL; 1699 max_phys_bits = 47; 1700 break; 1701 case SUN4V_CHIP_SPARC_M7: 1702 default: 1703 /* M7 and later support 52-bit virtual addresses. */ 1704 sparc64_va_hole_top = 0xfff8000000000000UL; 1705 sparc64_va_hole_bottom = 0x0008000000000000UL; 1706 max_phys_bits = 49; 1707 break; 1708 } 1709 } 1710 1711 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) { 1712 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n", 1713 max_phys_bits); 1714 prom_halt(); 1715 } 1716 1717 PAGE_OFFSET = sparc64_va_hole_top; 1718 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + 1719 (sparc64_va_hole_bottom >> 2)); 1720 1721 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", 1722 PAGE_OFFSET, max_phys_bits); 1723 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", 1724 VMALLOC_START, VMALLOC_END); 1725 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", 1726 VMEMMAP_BASE, VMEMMAP_BASE << 1); 1727 } 1728 1729 static void __init tsb_phys_patch(void) 1730 { 1731 struct tsb_ldquad_phys_patch_entry *pquad; 1732 struct tsb_phys_patch_entry *p; 1733 1734 pquad = &__tsb_ldquad_phys_patch; 1735 while (pquad < &__tsb_ldquad_phys_patch_end) { 1736 unsigned long addr = pquad->addr; 1737 1738 if (tlb_type == hypervisor) 1739 *(unsigned int *) addr = pquad->sun4v_insn; 1740 else 1741 *(unsigned int *) addr = pquad->sun4u_insn; 1742 wmb(); 1743 __asm__ __volatile__("flush %0" 1744 : /* no outputs */ 1745 : "r" (addr)); 1746 1747 pquad++; 1748 } 1749 1750 p = &__tsb_phys_patch; 1751 while (p < &__tsb_phys_patch_end) { 1752 unsigned long addr = p->addr; 1753 1754 *(unsigned int *) addr = p->insn; 1755 wmb(); 1756 __asm__ __volatile__("flush %0" 1757 : /* no outputs */ 1758 : "r" (addr)); 1759 1760 p++; 1761 } 1762 } 1763 1764 /* Don't mark as init, we give this to the Hypervisor. */ 1765 #ifndef CONFIG_DEBUG_PAGEALLOC 1766 #define NUM_KTSB_DESCR 2 1767 #else 1768 #define NUM_KTSB_DESCR 1 1769 #endif 1770 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1771 1772 /* The swapper TSBs are loaded with a base sequence of: 1773 * 1774 * sethi %uhi(SYMBOL), REG1 1775 * sethi %hi(SYMBOL), REG2 1776 * or REG1, %ulo(SYMBOL), REG1 1777 * or REG2, %lo(SYMBOL), REG2 1778 * sllx REG1, 32, REG1 1779 * or REG1, REG2, REG1 1780 * 1781 * When we use physical addressing for the TSB accesses, we patch the 1782 * first four instructions in the above sequence. 1783 */ 1784 1785 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) 1786 { 1787 unsigned long high_bits, low_bits; 1788 1789 high_bits = (pa >> 32) & 0xffffffff; 1790 low_bits = (pa >> 0) & 0xffffffff; 1791 1792 while (start < end) { 1793 unsigned int *ia = (unsigned int *)(unsigned long)*start; 1794 1795 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); 1796 __asm__ __volatile__("flush %0" : : "r" (ia)); 1797 1798 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); 1799 __asm__ __volatile__("flush %0" : : "r" (ia + 1)); 1800 1801 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); 1802 __asm__ __volatile__("flush %0" : : "r" (ia + 2)); 1803 1804 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); 1805 __asm__ __volatile__("flush %0" : : "r" (ia + 3)); 1806 1807 start++; 1808 } 1809 } 1810 1811 static void ktsb_phys_patch(void) 1812 { 1813 extern unsigned int __swapper_tsb_phys_patch; 1814 extern unsigned int __swapper_tsb_phys_patch_end; 1815 unsigned long ktsb_pa; 1816 1817 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1818 patch_one_ktsb_phys(&__swapper_tsb_phys_patch, 1819 &__swapper_tsb_phys_patch_end, ktsb_pa); 1820 #ifndef CONFIG_DEBUG_PAGEALLOC 1821 { 1822 extern unsigned int __swapper_4m_tsb_phys_patch; 1823 extern unsigned int __swapper_4m_tsb_phys_patch_end; 1824 ktsb_pa = (kern_base + 1825 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1826 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, 1827 &__swapper_4m_tsb_phys_patch_end, ktsb_pa); 1828 } 1829 #endif 1830 } 1831 1832 static void __init sun4v_ktsb_init(void) 1833 { 1834 unsigned long ktsb_pa; 1835 1836 /* First KTSB for PAGE_SIZE mappings. */ 1837 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1838 1839 switch (PAGE_SIZE) { 1840 case 8 * 1024: 1841 default: 1842 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1843 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1844 break; 1845 1846 case 64 * 1024: 1847 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1848 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1849 break; 1850 1851 case 512 * 1024: 1852 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1853 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1854 break; 1855 1856 case 4 * 1024 * 1024: 1857 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1858 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1859 break; 1860 } 1861 1862 ktsb_descr[0].assoc = 1; 1863 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1864 ktsb_descr[0].ctx_idx = 0; 1865 ktsb_descr[0].tsb_base = ktsb_pa; 1866 ktsb_descr[0].resv = 0; 1867 1868 #ifndef CONFIG_DEBUG_PAGEALLOC 1869 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */ 1870 ktsb_pa = (kern_base + 1871 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1872 1873 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1874 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB | 1875 HV_PGSZ_MASK_256MB | 1876 HV_PGSZ_MASK_2GB | 1877 HV_PGSZ_MASK_16GB) & 1878 cpu_pgsz_mask); 1879 ktsb_descr[1].assoc = 1; 1880 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1881 ktsb_descr[1].ctx_idx = 0; 1882 ktsb_descr[1].tsb_base = ktsb_pa; 1883 ktsb_descr[1].resv = 0; 1884 #endif 1885 } 1886 1887 void sun4v_ktsb_register(void) 1888 { 1889 unsigned long pa, ret; 1890 1891 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1892 1893 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1894 if (ret != 0) { 1895 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1896 "errors with %lx\n", pa, ret); 1897 prom_halt(); 1898 } 1899 } 1900 1901 static void __init sun4u_linear_pte_xor_finalize(void) 1902 { 1903 #ifndef CONFIG_DEBUG_PAGEALLOC 1904 /* This is where we would add Panther support for 1905 * 32MB and 256MB pages. 1906 */ 1907 #endif 1908 } 1909 1910 static void __init sun4v_linear_pte_xor_finalize(void) 1911 { 1912 #ifndef CONFIG_DEBUG_PAGEALLOC 1913 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { 1914 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 1915 PAGE_OFFSET; 1916 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1917 _PAGE_P_4V | _PAGE_W_4V); 1918 } else { 1919 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 1920 } 1921 1922 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { 1923 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ 1924 PAGE_OFFSET; 1925 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1926 _PAGE_P_4V | _PAGE_W_4V); 1927 } else { 1928 kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; 1929 } 1930 1931 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { 1932 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ 1933 PAGE_OFFSET; 1934 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | 1935 _PAGE_P_4V | _PAGE_W_4V); 1936 } else { 1937 kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; 1938 } 1939 #endif 1940 } 1941 1942 /* paging_init() sets up the page tables */ 1943 1944 static unsigned long last_valid_pfn; 1945 1946 static void sun4u_pgprot_init(void); 1947 static void sun4v_pgprot_init(void); 1948 1949 static phys_addr_t __init available_memory(void) 1950 { 1951 phys_addr_t available = 0ULL; 1952 phys_addr_t pa_start, pa_end; 1953 u64 i; 1954 1955 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) 1956 available = available + (pa_end - pa_start); 1957 1958 return available; 1959 } 1960 1961 /* We need to exclude reserved regions. This exclusion will include 1962 * vmlinux and initrd. To be more precise the initrd size could be used to 1963 * compute a new lower limit because it is freed later during initialization. 1964 */ 1965 static void __init reduce_memory(phys_addr_t limit_ram) 1966 { 1967 phys_addr_t avail_ram = available_memory(); 1968 phys_addr_t pa_start, pa_end; 1969 u64 i; 1970 1971 if (limit_ram >= avail_ram) 1972 return; 1973 1974 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) { 1975 phys_addr_t region_size = pa_end - pa_start; 1976 phys_addr_t clip_start = pa_start; 1977 1978 avail_ram = avail_ram - region_size; 1979 /* Are we consuming too much? */ 1980 if (avail_ram < limit_ram) { 1981 phys_addr_t give_back = limit_ram - avail_ram; 1982 1983 region_size = region_size - give_back; 1984 clip_start = clip_start + give_back; 1985 } 1986 1987 memblock_remove(clip_start, region_size); 1988 1989 if (avail_ram <= limit_ram) 1990 break; 1991 i = 0UL; 1992 } 1993 } 1994 1995 void __init paging_init(void) 1996 { 1997 unsigned long end_pfn, shift, phys_base; 1998 unsigned long real_end, i; 1999 int node; 2000 2001 setup_page_offset(); 2002 2003 /* These build time checkes make sure that the dcache_dirty_cpu() 2004 * page->flags usage will work. 2005 * 2006 * When a page gets marked as dcache-dirty, we store the 2007 * cpu number starting at bit 32 in the page->flags. Also, 2008 * functions like clear_dcache_dirty_cpu use the cpu mask 2009 * in 13-bit signed-immediate instruction fields. 2010 */ 2011 2012 /* 2013 * Page flags must not reach into upper 32 bits that are used 2014 * for the cpu number 2015 */ 2016 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 2017 2018 /* 2019 * The bit fields placed in the high range must not reach below 2020 * the 32 bit boundary. Otherwise we cannot place the cpu field 2021 * at the 32 bit boundary. 2022 */ 2023 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 2024 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 2025 2026 BUILD_BUG_ON(NR_CPUS > 4096); 2027 2028 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; 2029 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 2030 2031 /* Invalidate both kernel TSBs. */ 2032 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 2033 #ifndef CONFIG_DEBUG_PAGEALLOC 2034 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2035 #endif 2036 2037 if (tlb_type == hypervisor) 2038 sun4v_pgprot_init(); 2039 else 2040 sun4u_pgprot_init(); 2041 2042 if (tlb_type == cheetah_plus || 2043 tlb_type == hypervisor) { 2044 tsb_phys_patch(); 2045 ktsb_phys_patch(); 2046 } 2047 2048 if (tlb_type == hypervisor) 2049 sun4v_patch_tlb_handlers(); 2050 2051 /* Find available physical memory... 2052 * 2053 * Read it twice in order to work around a bug in openfirmware. 2054 * The call to grab this table itself can cause openfirmware to 2055 * allocate memory, which in turn can take away some space from 2056 * the list of available memory. Reading it twice makes sure 2057 * we really do get the final value. 2058 */ 2059 read_obp_translations(); 2060 read_obp_memory("reg", &pall[0], &pall_ents); 2061 read_obp_memory("available", &pavail[0], &pavail_ents); 2062 read_obp_memory("available", &pavail[0], &pavail_ents); 2063 2064 phys_base = 0xffffffffffffffffUL; 2065 for (i = 0; i < pavail_ents; i++) { 2066 phys_base = min(phys_base, pavail[i].phys_addr); 2067 memblock_add(pavail[i].phys_addr, pavail[i].reg_size); 2068 } 2069 2070 memblock_reserve(kern_base, kern_size); 2071 2072 find_ramdisk(phys_base); 2073 2074 if (cmdline_memory_size) 2075 reduce_memory(cmdline_memory_size); 2076 2077 memblock_allow_resize(); 2078 memblock_dump_all(); 2079 2080 set_bit(0, mmu_context_bmap); 2081 2082 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 2083 2084 real_end = (unsigned long)_end; 2085 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); 2086 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 2087 num_kernel_image_mappings); 2088 2089 /* Set kernel pgd to upper alias so physical page computations 2090 * work. 2091 */ 2092 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 2093 2094 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 2095 2096 inherit_prom_mappings(); 2097 2098 /* Ok, we can use our TLB miss and window trap handlers safely. */ 2099 setup_tba(); 2100 2101 __flush_tlb_all(); 2102 2103 prom_build_devicetree(); 2104 of_populate_present_mask(); 2105 #ifndef CONFIG_SMP 2106 of_fill_in_cpu_data(); 2107 #endif 2108 2109 if (tlb_type == hypervisor) { 2110 sun4v_mdesc_init(); 2111 mdesc_populate_present_mask(cpu_all_mask); 2112 #ifndef CONFIG_SMP 2113 mdesc_fill_in_cpu_data(cpu_all_mask); 2114 #endif 2115 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask); 2116 2117 sun4v_linear_pte_xor_finalize(); 2118 2119 sun4v_ktsb_init(); 2120 sun4v_ktsb_register(); 2121 } else { 2122 unsigned long impl, ver; 2123 2124 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | 2125 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); 2126 2127 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); 2128 impl = ((ver >> 32) & 0xffff); 2129 if (impl == PANTHER_IMPL) 2130 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB | 2131 HV_PGSZ_MASK_256MB); 2132 2133 sun4u_linear_pte_xor_finalize(); 2134 } 2135 2136 /* Flush the TLBs and the 4M TSB so that the updated linear 2137 * pte XOR settings are realized for all mappings. 2138 */ 2139 __flush_tlb_all(); 2140 #ifndef CONFIG_DEBUG_PAGEALLOC 2141 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 2142 #endif 2143 __flush_tlb_all(); 2144 2145 /* Setup bootmem... */ 2146 last_valid_pfn = end_pfn = bootmem_init(phys_base); 2147 2148 /* Once the OF device tree and MDESC have been setup, we know 2149 * the list of possible cpus. Therefore we can allocate the 2150 * IRQ stacks. 2151 */ 2152 for_each_possible_cpu(i) { 2153 node = cpu_to_node(i); 2154 2155 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2156 THREAD_SIZE, 2157 THREAD_SIZE, 0); 2158 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2159 THREAD_SIZE, 2160 THREAD_SIZE, 0); 2161 } 2162 2163 kernel_physical_mapping_init(); 2164 2165 { 2166 unsigned long max_zone_pfns[MAX_NR_ZONES]; 2167 2168 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 2169 2170 max_zone_pfns[ZONE_NORMAL] = end_pfn; 2171 2172 free_area_init_nodes(max_zone_pfns); 2173 } 2174 2175 printk("Booting Linux...\n"); 2176 } 2177 2178 int page_in_phys_avail(unsigned long paddr) 2179 { 2180 int i; 2181 2182 paddr &= PAGE_MASK; 2183 2184 for (i = 0; i < pavail_ents; i++) { 2185 unsigned long start, end; 2186 2187 start = pavail[i].phys_addr; 2188 end = start + pavail[i].reg_size; 2189 2190 if (paddr >= start && paddr < end) 2191 return 1; 2192 } 2193 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 2194 return 1; 2195 #ifdef CONFIG_BLK_DEV_INITRD 2196 if (paddr >= __pa(initrd_start) && 2197 paddr < __pa(PAGE_ALIGN(initrd_end))) 2198 return 1; 2199 #endif 2200 2201 return 0; 2202 } 2203 2204 static void __init register_page_bootmem_info(void) 2205 { 2206 #ifdef CONFIG_NEED_MULTIPLE_NODES 2207 int i; 2208 2209 for_each_online_node(i) 2210 if (NODE_DATA(i)->node_spanned_pages) 2211 register_page_bootmem_info_node(NODE_DATA(i)); 2212 #endif 2213 } 2214 void __init mem_init(void) 2215 { 2216 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2217 2218 register_page_bootmem_info(); 2219 free_all_bootmem(); 2220 2221 /* 2222 * Set up the zero page, mark it reserved, so that page count 2223 * is not manipulated when freeing the page from user ptes. 2224 */ 2225 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 2226 if (mem_map_zero == NULL) { 2227 prom_printf("paging_init: Cannot alloc zero page.\n"); 2228 prom_halt(); 2229 } 2230 mark_page_reserved(mem_map_zero); 2231 2232 mem_init_print_info(NULL); 2233 2234 if (tlb_type == cheetah || tlb_type == cheetah_plus) 2235 cheetah_ecache_flush_init(); 2236 } 2237 2238 void free_initmem(void) 2239 { 2240 unsigned long addr, initend; 2241 int do_free = 1; 2242 2243 /* If the physical memory maps were trimmed by kernel command 2244 * line options, don't even try freeing this initmem stuff up. 2245 * The kernel image could have been in the trimmed out region 2246 * and if so the freeing below will free invalid page structs. 2247 */ 2248 if (cmdline_memory_size) 2249 do_free = 0; 2250 2251 /* 2252 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 2253 */ 2254 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 2255 initend = (unsigned long)(__init_end) & PAGE_MASK; 2256 for (; addr < initend; addr += PAGE_SIZE) { 2257 unsigned long page; 2258 2259 page = (addr + 2260 ((unsigned long) __va(kern_base)) - 2261 ((unsigned long) KERNBASE)); 2262 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 2263 2264 if (do_free) 2265 free_reserved_page(virt_to_page(page)); 2266 } 2267 } 2268 2269 #ifdef CONFIG_BLK_DEV_INITRD 2270 void free_initrd_mem(unsigned long start, unsigned long end) 2271 { 2272 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, 2273 "initrd"); 2274 } 2275 #endif 2276 2277 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2278 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2279 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2280 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2281 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2282 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2283 2284 pgprot_t PAGE_KERNEL __read_mostly; 2285 EXPORT_SYMBOL(PAGE_KERNEL); 2286 2287 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2288 pgprot_t PAGE_COPY __read_mostly; 2289 2290 pgprot_t PAGE_SHARED __read_mostly; 2291 EXPORT_SYMBOL(PAGE_SHARED); 2292 2293 unsigned long pg_iobits __read_mostly; 2294 2295 unsigned long _PAGE_IE __read_mostly; 2296 EXPORT_SYMBOL(_PAGE_IE); 2297 2298 unsigned long _PAGE_E __read_mostly; 2299 EXPORT_SYMBOL(_PAGE_E); 2300 2301 unsigned long _PAGE_CACHE __read_mostly; 2302 EXPORT_SYMBOL(_PAGE_CACHE); 2303 2304 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2305 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, 2306 int node) 2307 { 2308 unsigned long pte_base; 2309 2310 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2311 _PAGE_CP_4U | _PAGE_CV_4U | 2312 _PAGE_P_4U | _PAGE_W_4U); 2313 if (tlb_type == hypervisor) 2314 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2315 _PAGE_CP_4V | _PAGE_CV_4V | 2316 _PAGE_P_4V | _PAGE_W_4V); 2317 2318 pte_base |= _PAGE_PMD_HUGE; 2319 2320 vstart = vstart & PMD_MASK; 2321 vend = ALIGN(vend, PMD_SIZE); 2322 for (; vstart < vend; vstart += PMD_SIZE) { 2323 pgd_t *pgd = pgd_offset_k(vstart); 2324 unsigned long pte; 2325 pud_t *pud; 2326 pmd_t *pmd; 2327 2328 if (pgd_none(*pgd)) { 2329 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node); 2330 2331 if (!new) 2332 return -ENOMEM; 2333 pgd_populate(&init_mm, pgd, new); 2334 } 2335 2336 pud = pud_offset(pgd, vstart); 2337 if (pud_none(*pud)) { 2338 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node); 2339 2340 if (!new) 2341 return -ENOMEM; 2342 pud_populate(&init_mm, pud, new); 2343 } 2344 2345 pmd = pmd_offset(pud, vstart); 2346 2347 pte = pmd_val(*pmd); 2348 if (!(pte & _PAGE_VALID)) { 2349 void *block = vmemmap_alloc_block(PMD_SIZE, node); 2350 2351 if (!block) 2352 return -ENOMEM; 2353 2354 pmd_val(*pmd) = pte_base | __pa(block); 2355 } 2356 } 2357 2358 return 0; 2359 } 2360 2361 void vmemmap_free(unsigned long start, unsigned long end) 2362 { 2363 } 2364 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2365 2366 static void prot_init_common(unsigned long page_none, 2367 unsigned long page_shared, 2368 unsigned long page_copy, 2369 unsigned long page_readonly, 2370 unsigned long page_exec_bit) 2371 { 2372 PAGE_COPY = __pgprot(page_copy); 2373 PAGE_SHARED = __pgprot(page_shared); 2374 2375 protection_map[0x0] = __pgprot(page_none); 2376 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2377 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2378 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2379 protection_map[0x4] = __pgprot(page_readonly); 2380 protection_map[0x5] = __pgprot(page_readonly); 2381 protection_map[0x6] = __pgprot(page_copy); 2382 protection_map[0x7] = __pgprot(page_copy); 2383 protection_map[0x8] = __pgprot(page_none); 2384 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2385 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2386 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2387 protection_map[0xc] = __pgprot(page_readonly); 2388 protection_map[0xd] = __pgprot(page_readonly); 2389 protection_map[0xe] = __pgprot(page_shared); 2390 protection_map[0xf] = __pgprot(page_shared); 2391 } 2392 2393 static void __init sun4u_pgprot_init(void) 2394 { 2395 unsigned long page_none, page_shared, page_copy, page_readonly; 2396 unsigned long page_exec_bit; 2397 int i; 2398 2399 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2400 _PAGE_CACHE_4U | _PAGE_P_4U | 2401 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2402 _PAGE_EXEC_4U); 2403 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2404 _PAGE_CACHE_4U | _PAGE_P_4U | 2405 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2406 _PAGE_EXEC_4U | _PAGE_L_4U); 2407 2408 _PAGE_IE = _PAGE_IE_4U; 2409 _PAGE_E = _PAGE_E_4U; 2410 _PAGE_CACHE = _PAGE_CACHE_4U; 2411 2412 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2413 __ACCESS_BITS_4U | _PAGE_E_4U); 2414 2415 #ifdef CONFIG_DEBUG_PAGEALLOC 2416 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; 2417 #else 2418 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2419 PAGE_OFFSET; 2420 #endif 2421 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2422 _PAGE_P_4U | _PAGE_W_4U); 2423 2424 for (i = 1; i < 4; i++) 2425 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; 2426 2427 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2428 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2429 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2430 2431 2432 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2433 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2434 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2435 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2436 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2437 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2438 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2439 2440 page_exec_bit = _PAGE_EXEC_4U; 2441 2442 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2443 page_exec_bit); 2444 } 2445 2446 static void __init sun4v_pgprot_init(void) 2447 { 2448 unsigned long page_none, page_shared, page_copy, page_readonly; 2449 unsigned long page_exec_bit; 2450 int i; 2451 2452 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2453 _PAGE_CACHE_4V | _PAGE_P_4V | 2454 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2455 _PAGE_EXEC_4V); 2456 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2457 2458 _PAGE_IE = _PAGE_IE_4V; 2459 _PAGE_E = _PAGE_E_4V; 2460 _PAGE_CACHE = _PAGE_CACHE_4V; 2461 2462 #ifdef CONFIG_DEBUG_PAGEALLOC 2463 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; 2464 #else 2465 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2466 PAGE_OFFSET; 2467 #endif 2468 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2469 _PAGE_P_4V | _PAGE_W_4V); 2470 2471 for (i = 1; i < 4; i++) 2472 kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; 2473 2474 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2475 __ACCESS_BITS_4V | _PAGE_E_4V); 2476 2477 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2478 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2479 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2480 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2481 2482 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2483 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2484 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2485 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2486 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2487 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2488 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2489 2490 page_exec_bit = _PAGE_EXEC_4V; 2491 2492 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2493 page_exec_bit); 2494 } 2495 2496 unsigned long pte_sz_bits(unsigned long sz) 2497 { 2498 if (tlb_type == hypervisor) { 2499 switch (sz) { 2500 case 8 * 1024: 2501 default: 2502 return _PAGE_SZ8K_4V; 2503 case 64 * 1024: 2504 return _PAGE_SZ64K_4V; 2505 case 512 * 1024: 2506 return _PAGE_SZ512K_4V; 2507 case 4 * 1024 * 1024: 2508 return _PAGE_SZ4MB_4V; 2509 } 2510 } else { 2511 switch (sz) { 2512 case 8 * 1024: 2513 default: 2514 return _PAGE_SZ8K_4U; 2515 case 64 * 1024: 2516 return _PAGE_SZ64K_4U; 2517 case 512 * 1024: 2518 return _PAGE_SZ512K_4U; 2519 case 4 * 1024 * 1024: 2520 return _PAGE_SZ4MB_4U; 2521 } 2522 } 2523 } 2524 2525 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2526 { 2527 pte_t pte; 2528 2529 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2530 pte_val(pte) |= (((unsigned long)space) << 32); 2531 pte_val(pte) |= pte_sz_bits(page_size); 2532 2533 return pte; 2534 } 2535 2536 static unsigned long kern_large_tte(unsigned long paddr) 2537 { 2538 unsigned long val; 2539 2540 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2541 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2542 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2543 if (tlb_type == hypervisor) 2544 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2545 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2546 _PAGE_EXEC_4V | _PAGE_W_4V); 2547 2548 return val | paddr; 2549 } 2550 2551 /* If not locked, zap it. */ 2552 void __flush_tlb_all(void) 2553 { 2554 unsigned long pstate; 2555 int i; 2556 2557 __asm__ __volatile__("flushw\n\t" 2558 "rdpr %%pstate, %0\n\t" 2559 "wrpr %0, %1, %%pstate" 2560 : "=r" (pstate) 2561 : "i" (PSTATE_IE)); 2562 if (tlb_type == hypervisor) { 2563 sun4v_mmu_demap_all(); 2564 } else if (tlb_type == spitfire) { 2565 for (i = 0; i < 64; i++) { 2566 /* Spitfire Errata #32 workaround */ 2567 /* NOTE: Always runs on spitfire, so no 2568 * cheetah+ page size encodings. 2569 */ 2570 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2571 "flush %%g6" 2572 : /* No outputs */ 2573 : "r" (0), 2574 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2575 2576 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2577 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2578 "membar #Sync" 2579 : /* no outputs */ 2580 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2581 spitfire_put_dtlb_data(i, 0x0UL); 2582 } 2583 2584 /* Spitfire Errata #32 workaround */ 2585 /* NOTE: Always runs on spitfire, so no 2586 * cheetah+ page size encodings. 2587 */ 2588 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2589 "flush %%g6" 2590 : /* No outputs */ 2591 : "r" (0), 2592 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2593 2594 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2595 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2596 "membar #Sync" 2597 : /* no outputs */ 2598 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2599 spitfire_put_itlb_data(i, 0x0UL); 2600 } 2601 } 2602 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2603 cheetah_flush_dtlb_all(); 2604 cheetah_flush_itlb_all(); 2605 } 2606 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2607 : : "r" (pstate)); 2608 } 2609 2610 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2611 unsigned long address) 2612 { 2613 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2614 __GFP_REPEAT | __GFP_ZERO); 2615 pte_t *pte = NULL; 2616 2617 if (page) 2618 pte = (pte_t *) page_address(page); 2619 2620 return pte; 2621 } 2622 2623 pgtable_t pte_alloc_one(struct mm_struct *mm, 2624 unsigned long address) 2625 { 2626 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2627 __GFP_REPEAT | __GFP_ZERO); 2628 if (!page) 2629 return NULL; 2630 if (!pgtable_page_ctor(page)) { 2631 free_hot_cold_page(page, 0); 2632 return NULL; 2633 } 2634 return (pte_t *) page_address(page); 2635 } 2636 2637 void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 2638 { 2639 free_page((unsigned long)pte); 2640 } 2641 2642 static void __pte_free(pgtable_t pte) 2643 { 2644 struct page *page = virt_to_page(pte); 2645 2646 pgtable_page_dtor(page); 2647 __free_page(page); 2648 } 2649 2650 void pte_free(struct mm_struct *mm, pgtable_t pte) 2651 { 2652 __pte_free(pte); 2653 } 2654 2655 void pgtable_free(void *table, bool is_page) 2656 { 2657 if (is_page) 2658 __pte_free(table); 2659 else 2660 kmem_cache_free(pgtable_cache, table); 2661 } 2662 2663 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2664 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 2665 pmd_t *pmd) 2666 { 2667 unsigned long pte, flags; 2668 struct mm_struct *mm; 2669 pmd_t entry = *pmd; 2670 2671 if (!pmd_large(entry) || !pmd_young(entry)) 2672 return; 2673 2674 pte = pmd_val(entry); 2675 2676 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ 2677 if (!(pte & _PAGE_VALID)) 2678 return; 2679 2680 /* We are fabricating 8MB pages using 4MB real hw pages. */ 2681 pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); 2682 2683 mm = vma->vm_mm; 2684 2685 spin_lock_irqsave(&mm->context.lock, flags); 2686 2687 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) 2688 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 2689 addr, pte); 2690 2691 spin_unlock_irqrestore(&mm->context.lock, flags); 2692 } 2693 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2694 2695 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 2696 static void context_reload(void *__data) 2697 { 2698 struct mm_struct *mm = __data; 2699 2700 if (mm == current->mm) 2701 load_secondary_context(mm); 2702 } 2703 2704 void hugetlb_setup(struct pt_regs *regs) 2705 { 2706 struct mm_struct *mm = current->mm; 2707 struct tsb_config *tp; 2708 2709 if (in_atomic() || !mm) { 2710 const struct exception_table_entry *entry; 2711 2712 entry = search_exception_tables(regs->tpc); 2713 if (entry) { 2714 regs->tpc = entry->fixup; 2715 regs->tnpc = regs->tpc + 4; 2716 return; 2717 } 2718 pr_alert("Unexpected HugeTLB setup in atomic context.\n"); 2719 die_if_kernel("HugeTSB in atomic", regs); 2720 } 2721 2722 tp = &mm->context.tsb_block[MM_TSB_HUGE]; 2723 if (likely(tp->tsb == NULL)) 2724 tsb_grow(mm, MM_TSB_HUGE, 0); 2725 2726 tsb_context_switch(mm); 2727 smp_tsb_sync(mm); 2728 2729 /* On UltraSPARC-III+ and later, configure the second half of 2730 * the Data-TLB for huge pages. 2731 */ 2732 if (tlb_type == cheetah_plus) { 2733 unsigned long ctx; 2734 2735 spin_lock(&ctx_alloc_lock); 2736 ctx = mm->context.sparc64_ctx_val; 2737 ctx &= ~CTX_PGSZ_MASK; 2738 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; 2739 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; 2740 2741 if (ctx != mm->context.sparc64_ctx_val) { 2742 /* When changing the page size fields, we 2743 * must perform a context flush so that no 2744 * stale entries match. This flush must 2745 * occur with the original context register 2746 * settings. 2747 */ 2748 do_flush_tlb_mm(mm); 2749 2750 /* Reload the context register of all processors 2751 * also executing in this address space. 2752 */ 2753 mm->context.sparc64_ctx_val = ctx; 2754 on_each_cpu(context_reload, mm, 0); 2755 } 2756 spin_unlock(&ctx_alloc_lock); 2757 } 2758 } 2759 #endif 2760 2761 static struct resource code_resource = { 2762 .name = "Kernel code", 2763 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2764 }; 2765 2766 static struct resource data_resource = { 2767 .name = "Kernel data", 2768 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2769 }; 2770 2771 static struct resource bss_resource = { 2772 .name = "Kernel bss", 2773 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2774 }; 2775 2776 static inline resource_size_t compute_kern_paddr(void *addr) 2777 { 2778 return (resource_size_t) (addr - KERNBASE + kern_base); 2779 } 2780 2781 static void __init kernel_lds_init(void) 2782 { 2783 code_resource.start = compute_kern_paddr(_text); 2784 code_resource.end = compute_kern_paddr(_etext - 1); 2785 data_resource.start = compute_kern_paddr(_etext); 2786 data_resource.end = compute_kern_paddr(_edata - 1); 2787 bss_resource.start = compute_kern_paddr(__bss_start); 2788 bss_resource.end = compute_kern_paddr(_end - 1); 2789 } 2790 2791 static int __init report_memory(void) 2792 { 2793 int i; 2794 struct resource *res; 2795 2796 kernel_lds_init(); 2797 2798 for (i = 0; i < pavail_ents; i++) { 2799 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 2800 2801 if (!res) { 2802 pr_warn("Failed to allocate source.\n"); 2803 break; 2804 } 2805 2806 res->name = "System RAM"; 2807 res->start = pavail[i].phys_addr; 2808 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; 2809 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 2810 2811 if (insert_resource(&iomem_resource, res) < 0) { 2812 pr_warn("Resource insertion failed.\n"); 2813 break; 2814 } 2815 2816 insert_resource(res, &code_resource); 2817 insert_resource(res, &data_resource); 2818 insert_resource(res, &bss_resource); 2819 } 2820 2821 return 0; 2822 } 2823 device_initcall(report_memory); 2824 2825 #ifdef CONFIG_SMP 2826 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range 2827 #else 2828 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range 2829 #endif 2830 2831 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 2832 { 2833 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { 2834 if (start < LOW_OBP_ADDRESS) { 2835 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); 2836 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); 2837 } 2838 if (end > HI_OBP_ADDRESS) { 2839 flush_tsb_kernel_range(HI_OBP_ADDRESS, end); 2840 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); 2841 } 2842 } else { 2843 flush_tsb_kernel_range(start, end); 2844 do_flush_tlb_kernel_range(start, end); 2845 } 2846 } 2847