1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/percpu.h> 26 #include <linux/lmb.h> 27 #include <linux/mmzone.h> 28 #include <linux/gfp.h> 29 30 #include <asm/head.h> 31 #include <asm/system.h> 32 #include <asm/page.h> 33 #include <asm/pgalloc.h> 34 #include <asm/pgtable.h> 35 #include <asm/oplib.h> 36 #include <asm/iommu.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 #include <asm/mmu_context.h> 40 #include <asm/tlbflush.h> 41 #include <asm/dma.h> 42 #include <asm/starfire.h> 43 #include <asm/tlb.h> 44 #include <asm/spitfire.h> 45 #include <asm/sections.h> 46 #include <asm/tsb.h> 47 #include <asm/hypervisor.h> 48 #include <asm/prom.h> 49 #include <asm/mdesc.h> 50 #include <asm/cpudata.h> 51 #include <asm/irq.h> 52 53 #include "init_64.h" 54 55 unsigned long kern_linear_pte_xor[2] __read_mostly; 56 57 /* A bitmap, one bit for every 256MB of physical memory. If the bit 58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else 59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]). 60 */ 61 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 62 63 #ifndef CONFIG_DEBUG_PAGEALLOC 64 /* A special kernel TSB for 4MB and 256MB linear mappings. 65 * Space is allocated for this right after the trap table 66 * in arch/sparc64/kernel/head.S 67 */ 68 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 69 #endif 70 71 #define MAX_BANKS 32 72 73 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; 74 static int pavail_ents __devinitdata; 75 76 static int cmp_p64(const void *a, const void *b) 77 { 78 const struct linux_prom64_registers *x = a, *y = b; 79 80 if (x->phys_addr > y->phys_addr) 81 return 1; 82 if (x->phys_addr < y->phys_addr) 83 return -1; 84 return 0; 85 } 86 87 static void __init read_obp_memory(const char *property, 88 struct linux_prom64_registers *regs, 89 int *num_ents) 90 { 91 int node = prom_finddevice("/memory"); 92 int prop_size = prom_getproplen(node, property); 93 int ents, ret, i; 94 95 ents = prop_size / sizeof(struct linux_prom64_registers); 96 if (ents > MAX_BANKS) { 97 prom_printf("The machine has more %s property entries than " 98 "this kernel can support (%d).\n", 99 property, MAX_BANKS); 100 prom_halt(); 101 } 102 103 ret = prom_getproperty(node, property, (char *) regs, prop_size); 104 if (ret == -1) { 105 prom_printf("Couldn't get %s property from /memory.\n"); 106 prom_halt(); 107 } 108 109 /* Sanitize what we got from the firmware, by page aligning 110 * everything. 111 */ 112 for (i = 0; i < ents; i++) { 113 unsigned long base, size; 114 115 base = regs[i].phys_addr; 116 size = regs[i].reg_size; 117 118 size &= PAGE_MASK; 119 if (base & ~PAGE_MASK) { 120 unsigned long new_base = PAGE_ALIGN(base); 121 122 size -= new_base - base; 123 if ((long) size < 0L) 124 size = 0UL; 125 base = new_base; 126 } 127 if (size == 0UL) { 128 /* If it is empty, simply get rid of it. 129 * This simplifies the logic of the other 130 * functions that process these arrays. 131 */ 132 memmove(®s[i], ®s[i + 1], 133 (ents - i - 1) * sizeof(regs[0])); 134 i--; 135 ents--; 136 continue; 137 } 138 regs[i].phys_addr = base; 139 regs[i].reg_size = size; 140 } 141 142 *num_ents = ents; 143 144 sort(regs, ents, sizeof(struct linux_prom64_registers), 145 cmp_p64, NULL); 146 } 147 148 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / 149 sizeof(unsigned long)]; 150 EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 151 152 /* Kernel physical address base and size in bytes. */ 153 unsigned long kern_base __read_mostly; 154 unsigned long kern_size __read_mostly; 155 156 /* Initial ramdisk setup */ 157 extern unsigned long sparc_ramdisk_image64; 158 extern unsigned int sparc_ramdisk_image; 159 extern unsigned int sparc_ramdisk_size; 160 161 struct page *mem_map_zero __read_mostly; 162 EXPORT_SYMBOL(mem_map_zero); 163 164 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 165 166 unsigned long sparc64_kern_pri_context __read_mostly; 167 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 168 unsigned long sparc64_kern_sec_context __read_mostly; 169 170 int num_kernel_image_mappings; 171 172 #ifdef CONFIG_DEBUG_DCFLUSH 173 atomic_t dcpage_flushes = ATOMIC_INIT(0); 174 #ifdef CONFIG_SMP 175 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 176 #endif 177 #endif 178 179 inline void flush_dcache_page_impl(struct page *page) 180 { 181 BUG_ON(tlb_type == hypervisor); 182 #ifdef CONFIG_DEBUG_DCFLUSH 183 atomic_inc(&dcpage_flushes); 184 #endif 185 186 #ifdef DCACHE_ALIASING_POSSIBLE 187 __flush_dcache_page(page_address(page), 188 ((tlb_type == spitfire) && 189 page_mapping(page) != NULL)); 190 #else 191 if (page_mapping(page) != NULL && 192 tlb_type == spitfire) 193 __flush_icache_page(__pa(page_address(page))); 194 #endif 195 } 196 197 #define PG_dcache_dirty PG_arch_1 198 #define PG_dcache_cpu_shift 32UL 199 #define PG_dcache_cpu_mask \ 200 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 201 202 #define dcache_dirty_cpu(page) \ 203 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 204 205 static inline void set_dcache_dirty(struct page *page, int this_cpu) 206 { 207 unsigned long mask = this_cpu; 208 unsigned long non_cpu_bits; 209 210 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 211 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 212 213 __asm__ __volatile__("1:\n\t" 214 "ldx [%2], %%g7\n\t" 215 "and %%g7, %1, %%g1\n\t" 216 "or %%g1, %0, %%g1\n\t" 217 "casx [%2], %%g7, %%g1\n\t" 218 "cmp %%g7, %%g1\n\t" 219 "bne,pn %%xcc, 1b\n\t" 220 " nop" 221 : /* no outputs */ 222 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 223 : "g1", "g7"); 224 } 225 226 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 227 { 228 unsigned long mask = (1UL << PG_dcache_dirty); 229 230 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 231 "1:\n\t" 232 "ldx [%2], %%g7\n\t" 233 "srlx %%g7, %4, %%g1\n\t" 234 "and %%g1, %3, %%g1\n\t" 235 "cmp %%g1, %0\n\t" 236 "bne,pn %%icc, 2f\n\t" 237 " andn %%g7, %1, %%g1\n\t" 238 "casx [%2], %%g7, %%g1\n\t" 239 "cmp %%g7, %%g1\n\t" 240 "bne,pn %%xcc, 1b\n\t" 241 " nop\n" 242 "2:" 243 : /* no outputs */ 244 : "r" (cpu), "r" (mask), "r" (&page->flags), 245 "i" (PG_dcache_cpu_mask), 246 "i" (PG_dcache_cpu_shift) 247 : "g1", "g7"); 248 } 249 250 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 251 { 252 unsigned long tsb_addr = (unsigned long) ent; 253 254 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 255 tsb_addr = __pa(tsb_addr); 256 257 __tsb_insert(tsb_addr, tag, pte); 258 } 259 260 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 261 unsigned long _PAGE_SZBITS __read_mostly; 262 263 static void flush_dcache(unsigned long pfn) 264 { 265 struct page *page; 266 267 page = pfn_to_page(pfn); 268 if (page) { 269 unsigned long pg_flags; 270 271 pg_flags = page->flags; 272 if (pg_flags & (1UL << PG_dcache_dirty)) { 273 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 274 PG_dcache_cpu_mask); 275 int this_cpu = get_cpu(); 276 277 /* This is just to optimize away some function calls 278 * in the SMP case. 279 */ 280 if (cpu == this_cpu) 281 flush_dcache_page_impl(page); 282 else 283 smp_flush_dcache_page_impl(page, cpu); 284 285 clear_dcache_dirty_cpu(page, cpu); 286 287 put_cpu(); 288 } 289 } 290 } 291 292 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 293 { 294 struct mm_struct *mm; 295 struct tsb *tsb; 296 unsigned long tag, flags; 297 unsigned long tsb_index, tsb_hash_shift; 298 pte_t pte = *ptep; 299 300 if (tlb_type != hypervisor) { 301 unsigned long pfn = pte_pfn(pte); 302 303 if (pfn_valid(pfn)) 304 flush_dcache(pfn); 305 } 306 307 mm = vma->vm_mm; 308 309 tsb_index = MM_TSB_BASE; 310 tsb_hash_shift = PAGE_SHIFT; 311 312 spin_lock_irqsave(&mm->context.lock, flags); 313 314 #ifdef CONFIG_HUGETLB_PAGE 315 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 316 if ((tlb_type == hypervisor && 317 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 318 (tlb_type != hypervisor && 319 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { 320 tsb_index = MM_TSB_HUGE; 321 tsb_hash_shift = HPAGE_SHIFT; 322 } 323 } 324 #endif 325 326 tsb = mm->context.tsb_block[tsb_index].tsb; 327 tsb += ((address >> tsb_hash_shift) & 328 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 329 tag = (address >> 22UL); 330 tsb_insert(tsb, tag, pte_val(pte)); 331 332 spin_unlock_irqrestore(&mm->context.lock, flags); 333 } 334 335 void flush_dcache_page(struct page *page) 336 { 337 struct address_space *mapping; 338 int this_cpu; 339 340 if (tlb_type == hypervisor) 341 return; 342 343 /* Do not bother with the expensive D-cache flush if it 344 * is merely the zero page. The 'bigcore' testcase in GDB 345 * causes this case to run millions of times. 346 */ 347 if (page == ZERO_PAGE(0)) 348 return; 349 350 this_cpu = get_cpu(); 351 352 mapping = page_mapping(page); 353 if (mapping && !mapping_mapped(mapping)) { 354 int dirty = test_bit(PG_dcache_dirty, &page->flags); 355 if (dirty) { 356 int dirty_cpu = dcache_dirty_cpu(page); 357 358 if (dirty_cpu == this_cpu) 359 goto out; 360 smp_flush_dcache_page_impl(page, dirty_cpu); 361 } 362 set_dcache_dirty(page, this_cpu); 363 } else { 364 /* We could delay the flush for the !page_mapping 365 * case too. But that case is for exec env/arg 366 * pages and those are %99 certainly going to get 367 * faulted into the tlb (and thus flushed) anyways. 368 */ 369 flush_dcache_page_impl(page); 370 } 371 372 out: 373 put_cpu(); 374 } 375 EXPORT_SYMBOL(flush_dcache_page); 376 377 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 378 { 379 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 380 if (tlb_type == spitfire) { 381 unsigned long kaddr; 382 383 /* This code only runs on Spitfire cpus so this is 384 * why we can assume _PAGE_PADDR_4U. 385 */ 386 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 387 unsigned long paddr, mask = _PAGE_PADDR_4U; 388 389 if (kaddr >= PAGE_OFFSET) 390 paddr = kaddr & mask; 391 else { 392 pgd_t *pgdp = pgd_offset_k(kaddr); 393 pud_t *pudp = pud_offset(pgdp, kaddr); 394 pmd_t *pmdp = pmd_offset(pudp, kaddr); 395 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 396 397 paddr = pte_val(*ptep) & mask; 398 } 399 __flush_icache_page(paddr); 400 } 401 } 402 } 403 EXPORT_SYMBOL(flush_icache_range); 404 405 void mmu_info(struct seq_file *m) 406 { 407 if (tlb_type == cheetah) 408 seq_printf(m, "MMU Type\t: Cheetah\n"); 409 else if (tlb_type == cheetah_plus) 410 seq_printf(m, "MMU Type\t: Cheetah+\n"); 411 else if (tlb_type == spitfire) 412 seq_printf(m, "MMU Type\t: Spitfire\n"); 413 else if (tlb_type == hypervisor) 414 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 415 else 416 seq_printf(m, "MMU Type\t: ???\n"); 417 418 #ifdef CONFIG_DEBUG_DCFLUSH 419 seq_printf(m, "DCPageFlushes\t: %d\n", 420 atomic_read(&dcpage_flushes)); 421 #ifdef CONFIG_SMP 422 seq_printf(m, "DCPageFlushesXC\t: %d\n", 423 atomic_read(&dcpage_flushes_xcall)); 424 #endif /* CONFIG_SMP */ 425 #endif /* CONFIG_DEBUG_DCFLUSH */ 426 } 427 428 struct linux_prom_translation prom_trans[512] __read_mostly; 429 unsigned int prom_trans_ents __read_mostly; 430 431 unsigned long kern_locked_tte_data; 432 433 /* The obp translations are saved based on 8k pagesize, since obp can 434 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 435 * HI_OBP_ADDRESS range are handled in ktlb.S. 436 */ 437 static inline int in_obp_range(unsigned long vaddr) 438 { 439 return (vaddr >= LOW_OBP_ADDRESS && 440 vaddr < HI_OBP_ADDRESS); 441 } 442 443 static int cmp_ptrans(const void *a, const void *b) 444 { 445 const struct linux_prom_translation *x = a, *y = b; 446 447 if (x->virt > y->virt) 448 return 1; 449 if (x->virt < y->virt) 450 return -1; 451 return 0; 452 } 453 454 /* Read OBP translations property into 'prom_trans[]'. */ 455 static void __init read_obp_translations(void) 456 { 457 int n, node, ents, first, last, i; 458 459 node = prom_finddevice("/virtual-memory"); 460 n = prom_getproplen(node, "translations"); 461 if (unlikely(n == 0 || n == -1)) { 462 prom_printf("prom_mappings: Couldn't get size.\n"); 463 prom_halt(); 464 } 465 if (unlikely(n > sizeof(prom_trans))) { 466 prom_printf("prom_mappings: Size %Zd is too big.\n", n); 467 prom_halt(); 468 } 469 470 if ((n = prom_getproperty(node, "translations", 471 (char *)&prom_trans[0], 472 sizeof(prom_trans))) == -1) { 473 prom_printf("prom_mappings: Couldn't get property.\n"); 474 prom_halt(); 475 } 476 477 n = n / sizeof(struct linux_prom_translation); 478 479 ents = n; 480 481 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 482 cmp_ptrans, NULL); 483 484 /* Now kick out all the non-OBP entries. */ 485 for (i = 0; i < ents; i++) { 486 if (in_obp_range(prom_trans[i].virt)) 487 break; 488 } 489 first = i; 490 for (; i < ents; i++) { 491 if (!in_obp_range(prom_trans[i].virt)) 492 break; 493 } 494 last = i; 495 496 for (i = 0; i < (last - first); i++) { 497 struct linux_prom_translation *src = &prom_trans[i + first]; 498 struct linux_prom_translation *dest = &prom_trans[i]; 499 500 *dest = *src; 501 } 502 for (; i < ents; i++) { 503 struct linux_prom_translation *dest = &prom_trans[i]; 504 dest->virt = dest->size = dest->data = 0x0UL; 505 } 506 507 prom_trans_ents = last - first; 508 509 if (tlb_type == spitfire) { 510 /* Clear diag TTE bits. */ 511 for (i = 0; i < prom_trans_ents; i++) 512 prom_trans[i].data &= ~0x0003fe0000000000UL; 513 } 514 } 515 516 static void __init hypervisor_tlb_lock(unsigned long vaddr, 517 unsigned long pte, 518 unsigned long mmu) 519 { 520 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 521 522 if (ret != 0) { 523 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 524 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 525 prom_halt(); 526 } 527 } 528 529 static unsigned long kern_large_tte(unsigned long paddr); 530 531 static void __init remap_kernel(void) 532 { 533 unsigned long phys_page, tte_vaddr, tte_data; 534 int i, tlb_ent = sparc64_highest_locked_tlbent(); 535 536 tte_vaddr = (unsigned long) KERNBASE; 537 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 538 tte_data = kern_large_tte(phys_page); 539 540 kern_locked_tte_data = tte_data; 541 542 /* Now lock us into the TLBs via Hypervisor or OBP. */ 543 if (tlb_type == hypervisor) { 544 for (i = 0; i < num_kernel_image_mappings; i++) { 545 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 546 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 547 tte_vaddr += 0x400000; 548 tte_data += 0x400000; 549 } 550 } else { 551 for (i = 0; i < num_kernel_image_mappings; i++) { 552 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 553 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 554 tte_vaddr += 0x400000; 555 tte_data += 0x400000; 556 } 557 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 558 } 559 if (tlb_type == cheetah_plus) { 560 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 561 CTX_CHEETAH_PLUS_NUC); 562 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 563 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 564 } 565 } 566 567 568 static void __init inherit_prom_mappings(void) 569 { 570 /* Now fixup OBP's idea about where we really are mapped. */ 571 printk("Remapping the kernel... "); 572 remap_kernel(); 573 printk("done.\n"); 574 } 575 576 void prom_world(int enter) 577 { 578 if (!enter) 579 set_fs((mm_segment_t) { get_thread_current_ds() }); 580 581 __asm__ __volatile__("flushw"); 582 } 583 584 void __flush_dcache_range(unsigned long start, unsigned long end) 585 { 586 unsigned long va; 587 588 if (tlb_type == spitfire) { 589 int n = 0; 590 591 for (va = start; va < end; va += 32) { 592 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 593 if (++n >= 512) 594 break; 595 } 596 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 597 start = __pa(start); 598 end = __pa(end); 599 for (va = start; va < end; va += 32) 600 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 601 "membar #Sync" 602 : /* no outputs */ 603 : "r" (va), 604 "i" (ASI_DCACHE_INVALIDATE)); 605 } 606 } 607 EXPORT_SYMBOL(__flush_dcache_range); 608 609 /* get_new_mmu_context() uses "cache + 1". */ 610 DEFINE_SPINLOCK(ctx_alloc_lock); 611 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 612 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 613 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 614 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 615 616 /* Caller does TLB context flushing on local CPU if necessary. 617 * The caller also ensures that CTX_VALID(mm->context) is false. 618 * 619 * We must be careful about boundary cases so that we never 620 * let the user have CTX 0 (nucleus) or we ever use a CTX 621 * version of zero (and thus NO_CONTEXT would not be caught 622 * by version mis-match tests in mmu_context.h). 623 * 624 * Always invoked with interrupts disabled. 625 */ 626 void get_new_mmu_context(struct mm_struct *mm) 627 { 628 unsigned long ctx, new_ctx; 629 unsigned long orig_pgsz_bits; 630 unsigned long flags; 631 int new_version; 632 633 spin_lock_irqsave(&ctx_alloc_lock, flags); 634 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 635 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 636 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 637 new_version = 0; 638 if (new_ctx >= (1 << CTX_NR_BITS)) { 639 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 640 if (new_ctx >= ctx) { 641 int i; 642 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 643 CTX_FIRST_VERSION; 644 if (new_ctx == 1) 645 new_ctx = CTX_FIRST_VERSION; 646 647 /* Don't call memset, for 16 entries that's just 648 * plain silly... 649 */ 650 mmu_context_bmap[0] = 3; 651 mmu_context_bmap[1] = 0; 652 mmu_context_bmap[2] = 0; 653 mmu_context_bmap[3] = 0; 654 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 655 mmu_context_bmap[i + 0] = 0; 656 mmu_context_bmap[i + 1] = 0; 657 mmu_context_bmap[i + 2] = 0; 658 mmu_context_bmap[i + 3] = 0; 659 } 660 new_version = 1; 661 goto out; 662 } 663 } 664 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 665 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 666 out: 667 tlb_context_cache = new_ctx; 668 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 669 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 670 671 if (unlikely(new_version)) 672 smp_new_mmu_context_version(); 673 } 674 675 static int numa_enabled = 1; 676 static int numa_debug; 677 678 static int __init early_numa(char *p) 679 { 680 if (!p) 681 return 0; 682 683 if (strstr(p, "off")) 684 numa_enabled = 0; 685 686 if (strstr(p, "debug")) 687 numa_debug = 1; 688 689 return 0; 690 } 691 early_param("numa", early_numa); 692 693 #define numadbg(f, a...) \ 694 do { if (numa_debug) \ 695 printk(KERN_INFO f, ## a); \ 696 } while (0) 697 698 static void __init find_ramdisk(unsigned long phys_base) 699 { 700 #ifdef CONFIG_BLK_DEV_INITRD 701 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 702 unsigned long ramdisk_image; 703 704 /* Older versions of the bootloader only supported a 705 * 32-bit physical address for the ramdisk image 706 * location, stored at sparc_ramdisk_image. Newer 707 * SILO versions set sparc_ramdisk_image to zero and 708 * provide a full 64-bit physical address at 709 * sparc_ramdisk_image64. 710 */ 711 ramdisk_image = sparc_ramdisk_image; 712 if (!ramdisk_image) 713 ramdisk_image = sparc_ramdisk_image64; 714 715 /* Another bootloader quirk. The bootloader normalizes 716 * the physical address to KERNBASE, so we have to 717 * factor that back out and add in the lowest valid 718 * physical page address to get the true physical address. 719 */ 720 ramdisk_image -= KERNBASE; 721 ramdisk_image += phys_base; 722 723 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 724 ramdisk_image, sparc_ramdisk_size); 725 726 initrd_start = ramdisk_image; 727 initrd_end = ramdisk_image + sparc_ramdisk_size; 728 729 lmb_reserve(initrd_start, sparc_ramdisk_size); 730 731 initrd_start += PAGE_OFFSET; 732 initrd_end += PAGE_OFFSET; 733 } 734 #endif 735 } 736 737 struct node_mem_mask { 738 unsigned long mask; 739 unsigned long val; 740 unsigned long bootmem_paddr; 741 }; 742 static struct node_mem_mask node_masks[MAX_NUMNODES]; 743 static int num_node_masks; 744 745 int numa_cpu_lookup_table[NR_CPUS]; 746 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 747 748 #ifdef CONFIG_NEED_MULTIPLE_NODES 749 750 struct mdesc_mblock { 751 u64 base; 752 u64 size; 753 u64 offset; /* RA-to-PA */ 754 }; 755 static struct mdesc_mblock *mblocks; 756 static int num_mblocks; 757 758 static unsigned long ra_to_pa(unsigned long addr) 759 { 760 int i; 761 762 for (i = 0; i < num_mblocks; i++) { 763 struct mdesc_mblock *m = &mblocks[i]; 764 765 if (addr >= m->base && 766 addr < (m->base + m->size)) { 767 addr += m->offset; 768 break; 769 } 770 } 771 return addr; 772 } 773 774 static int find_node(unsigned long addr) 775 { 776 int i; 777 778 addr = ra_to_pa(addr); 779 for (i = 0; i < num_node_masks; i++) { 780 struct node_mem_mask *p = &node_masks[i]; 781 782 if ((addr & p->mask) == p->val) 783 return i; 784 } 785 return -1; 786 } 787 788 static unsigned long long nid_range(unsigned long long start, 789 unsigned long long end, int *nid) 790 { 791 *nid = find_node(start); 792 start += PAGE_SIZE; 793 while (start < end) { 794 int n = find_node(start); 795 796 if (n != *nid) 797 break; 798 start += PAGE_SIZE; 799 } 800 801 if (start > end) 802 start = end; 803 804 return start; 805 } 806 #else 807 static unsigned long long nid_range(unsigned long long start, 808 unsigned long long end, int *nid) 809 { 810 *nid = 0; 811 return end; 812 } 813 #endif 814 815 /* This must be invoked after performing all of the necessary 816 * add_active_range() calls for 'nid'. We need to be able to get 817 * correct data from get_pfn_range_for_nid(). 818 */ 819 static void __init allocate_node_data(int nid) 820 { 821 unsigned long paddr, num_pages, start_pfn, end_pfn; 822 struct pglist_data *p; 823 824 #ifdef CONFIG_NEED_MULTIPLE_NODES 825 paddr = lmb_alloc_nid(sizeof(struct pglist_data), 826 SMP_CACHE_BYTES, nid, nid_range); 827 if (!paddr) { 828 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 829 prom_halt(); 830 } 831 NODE_DATA(nid) = __va(paddr); 832 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 833 834 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 835 #endif 836 837 p = NODE_DATA(nid); 838 839 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 840 p->node_start_pfn = start_pfn; 841 p->node_spanned_pages = end_pfn - start_pfn; 842 843 if (p->node_spanned_pages) { 844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 845 846 paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, 847 nid_range); 848 if (!paddr) { 849 prom_printf("Cannot allocate bootmap for nid[%d]\n", 850 nid); 851 prom_halt(); 852 } 853 node_masks[nid].bootmem_paddr = paddr; 854 } 855 } 856 857 static void init_node_masks_nonnuma(void) 858 { 859 int i; 860 861 numadbg("Initializing tables for non-numa.\n"); 862 863 node_masks[0].mask = node_masks[0].val = 0; 864 num_node_masks = 1; 865 866 for (i = 0; i < NR_CPUS; i++) 867 numa_cpu_lookup_table[i] = 0; 868 869 numa_cpumask_lookup_table[0] = CPU_MASK_ALL; 870 } 871 872 #ifdef CONFIG_NEED_MULTIPLE_NODES 873 struct pglist_data *node_data[MAX_NUMNODES]; 874 875 EXPORT_SYMBOL(numa_cpu_lookup_table); 876 EXPORT_SYMBOL(numa_cpumask_lookup_table); 877 EXPORT_SYMBOL(node_data); 878 879 struct mdesc_mlgroup { 880 u64 node; 881 u64 latency; 882 u64 match; 883 u64 mask; 884 }; 885 static struct mdesc_mlgroup *mlgroups; 886 static int num_mlgroups; 887 888 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 889 u32 cfg_handle) 890 { 891 u64 arc; 892 893 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 894 u64 target = mdesc_arc_target(md, arc); 895 const u64 *val; 896 897 val = mdesc_get_property(md, target, 898 "cfg-handle", NULL); 899 if (val && *val == cfg_handle) 900 return 0; 901 } 902 return -ENODEV; 903 } 904 905 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 906 u32 cfg_handle) 907 { 908 u64 arc, candidate, best_latency = ~(u64)0; 909 910 candidate = MDESC_NODE_NULL; 911 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 912 u64 target = mdesc_arc_target(md, arc); 913 const char *name = mdesc_node_name(md, target); 914 const u64 *val; 915 916 if (strcmp(name, "pio-latency-group")) 917 continue; 918 919 val = mdesc_get_property(md, target, "latency", NULL); 920 if (!val) 921 continue; 922 923 if (*val < best_latency) { 924 candidate = target; 925 best_latency = *val; 926 } 927 } 928 929 if (candidate == MDESC_NODE_NULL) 930 return -ENODEV; 931 932 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 933 } 934 935 int of_node_to_nid(struct device_node *dp) 936 { 937 const struct linux_prom64_registers *regs; 938 struct mdesc_handle *md; 939 u32 cfg_handle; 940 int count, nid; 941 u64 grp; 942 943 /* This is the right thing to do on currently supported 944 * SUN4U NUMA platforms as well, as the PCI controller does 945 * not sit behind any particular memory controller. 946 */ 947 if (!mlgroups) 948 return -1; 949 950 regs = of_get_property(dp, "reg", NULL); 951 if (!regs) 952 return -1; 953 954 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 955 956 md = mdesc_grab(); 957 958 count = 0; 959 nid = -1; 960 mdesc_for_each_node_by_name(md, grp, "group") { 961 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 962 nid = count; 963 break; 964 } 965 count++; 966 } 967 968 mdesc_release(md); 969 970 return nid; 971 } 972 973 static void __init add_node_ranges(void) 974 { 975 int i; 976 977 for (i = 0; i < lmb.memory.cnt; i++) { 978 unsigned long size = lmb_size_bytes(&lmb.memory, i); 979 unsigned long start, end; 980 981 start = lmb.memory.region[i].base; 982 end = start + size; 983 while (start < end) { 984 unsigned long this_end; 985 int nid; 986 987 this_end = nid_range(start, end, &nid); 988 989 numadbg("Adding active range nid[%d] " 990 "start[%lx] end[%lx]\n", 991 nid, start, this_end); 992 993 add_active_range(nid, 994 start >> PAGE_SHIFT, 995 this_end >> PAGE_SHIFT); 996 997 start = this_end; 998 } 999 } 1000 } 1001 1002 static int __init grab_mlgroups(struct mdesc_handle *md) 1003 { 1004 unsigned long paddr; 1005 int count = 0; 1006 u64 node; 1007 1008 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 1009 count++; 1010 if (!count) 1011 return -ENOENT; 1012 1013 paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), 1014 SMP_CACHE_BYTES); 1015 if (!paddr) 1016 return -ENOMEM; 1017 1018 mlgroups = __va(paddr); 1019 num_mlgroups = count; 1020 1021 count = 0; 1022 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1023 struct mdesc_mlgroup *m = &mlgroups[count++]; 1024 const u64 *val; 1025 1026 m->node = node; 1027 1028 val = mdesc_get_property(md, node, "latency", NULL); 1029 m->latency = *val; 1030 val = mdesc_get_property(md, node, "address-match", NULL); 1031 m->match = *val; 1032 val = mdesc_get_property(md, node, "address-mask", NULL); 1033 m->mask = *val; 1034 1035 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1036 "match[%llx] mask[%llx]\n", 1037 count - 1, m->node, m->latency, m->match, m->mask); 1038 } 1039 1040 return 0; 1041 } 1042 1043 static int __init grab_mblocks(struct mdesc_handle *md) 1044 { 1045 unsigned long paddr; 1046 int count = 0; 1047 u64 node; 1048 1049 mdesc_for_each_node_by_name(md, node, "mblock") 1050 count++; 1051 if (!count) 1052 return -ENOENT; 1053 1054 paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), 1055 SMP_CACHE_BYTES); 1056 if (!paddr) 1057 return -ENOMEM; 1058 1059 mblocks = __va(paddr); 1060 num_mblocks = count; 1061 1062 count = 0; 1063 mdesc_for_each_node_by_name(md, node, "mblock") { 1064 struct mdesc_mblock *m = &mblocks[count++]; 1065 const u64 *val; 1066 1067 val = mdesc_get_property(md, node, "base", NULL); 1068 m->base = *val; 1069 val = mdesc_get_property(md, node, "size", NULL); 1070 m->size = *val; 1071 val = mdesc_get_property(md, node, 1072 "address-congruence-offset", NULL); 1073 m->offset = *val; 1074 1075 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1076 count - 1, m->base, m->size, m->offset); 1077 } 1078 1079 return 0; 1080 } 1081 1082 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1083 u64 grp, cpumask_t *mask) 1084 { 1085 u64 arc; 1086 1087 cpus_clear(*mask); 1088 1089 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1090 u64 target = mdesc_arc_target(md, arc); 1091 const char *name = mdesc_node_name(md, target); 1092 const u64 *id; 1093 1094 if (strcmp(name, "cpu")) 1095 continue; 1096 id = mdesc_get_property(md, target, "id", NULL); 1097 if (*id < nr_cpu_ids) 1098 cpu_set(*id, *mask); 1099 } 1100 } 1101 1102 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1103 { 1104 int i; 1105 1106 for (i = 0; i < num_mlgroups; i++) { 1107 struct mdesc_mlgroup *m = &mlgroups[i]; 1108 if (m->node == node) 1109 return m; 1110 } 1111 return NULL; 1112 } 1113 1114 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1115 int index) 1116 { 1117 struct mdesc_mlgroup *candidate = NULL; 1118 u64 arc, best_latency = ~(u64)0; 1119 struct node_mem_mask *n; 1120 1121 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1122 u64 target = mdesc_arc_target(md, arc); 1123 struct mdesc_mlgroup *m = find_mlgroup(target); 1124 if (!m) 1125 continue; 1126 if (m->latency < best_latency) { 1127 candidate = m; 1128 best_latency = m->latency; 1129 } 1130 } 1131 if (!candidate) 1132 return -ENOENT; 1133 1134 if (num_node_masks != index) { 1135 printk(KERN_ERR "Inconsistent NUMA state, " 1136 "index[%d] != num_node_masks[%d]\n", 1137 index, num_node_masks); 1138 return -EINVAL; 1139 } 1140 1141 n = &node_masks[num_node_masks++]; 1142 1143 n->mask = candidate->mask; 1144 n->val = candidate->match; 1145 1146 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1147 index, n->mask, n->val, candidate->latency); 1148 1149 return 0; 1150 } 1151 1152 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1153 int index) 1154 { 1155 cpumask_t mask; 1156 int cpu; 1157 1158 numa_parse_mdesc_group_cpus(md, grp, &mask); 1159 1160 for_each_cpu_mask(cpu, mask) 1161 numa_cpu_lookup_table[cpu] = index; 1162 numa_cpumask_lookup_table[index] = mask; 1163 1164 if (numa_debug) { 1165 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1166 for_each_cpu_mask(cpu, mask) 1167 printk("%d ", cpu); 1168 printk("]\n"); 1169 } 1170 1171 return numa_attach_mlgroup(md, grp, index); 1172 } 1173 1174 static int __init numa_parse_mdesc(void) 1175 { 1176 struct mdesc_handle *md = mdesc_grab(); 1177 int i, err, count; 1178 u64 node; 1179 1180 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1181 if (node == MDESC_NODE_NULL) { 1182 mdesc_release(md); 1183 return -ENOENT; 1184 } 1185 1186 err = grab_mblocks(md); 1187 if (err < 0) 1188 goto out; 1189 1190 err = grab_mlgroups(md); 1191 if (err < 0) 1192 goto out; 1193 1194 count = 0; 1195 mdesc_for_each_node_by_name(md, node, "group") { 1196 err = numa_parse_mdesc_group(md, node, count); 1197 if (err < 0) 1198 break; 1199 count++; 1200 } 1201 1202 add_node_ranges(); 1203 1204 for (i = 0; i < num_node_masks; i++) { 1205 allocate_node_data(i); 1206 node_set_online(i); 1207 } 1208 1209 err = 0; 1210 out: 1211 mdesc_release(md); 1212 return err; 1213 } 1214 1215 static int __init numa_parse_jbus(void) 1216 { 1217 unsigned long cpu, index; 1218 1219 /* NUMA node id is encoded in bits 36 and higher, and there is 1220 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1221 */ 1222 index = 0; 1223 for_each_present_cpu(cpu) { 1224 numa_cpu_lookup_table[cpu] = index; 1225 numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); 1226 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1227 node_masks[index].val = cpu << 36UL; 1228 1229 index++; 1230 } 1231 num_node_masks = index; 1232 1233 add_node_ranges(); 1234 1235 for (index = 0; index < num_node_masks; index++) { 1236 allocate_node_data(index); 1237 node_set_online(index); 1238 } 1239 1240 return 0; 1241 } 1242 1243 static int __init numa_parse_sun4u(void) 1244 { 1245 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1246 unsigned long ver; 1247 1248 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1249 if ((ver >> 32UL) == __JALAPENO_ID || 1250 (ver >> 32UL) == __SERRANO_ID) 1251 return numa_parse_jbus(); 1252 } 1253 return -1; 1254 } 1255 1256 static int __init bootmem_init_numa(void) 1257 { 1258 int err = -1; 1259 1260 numadbg("bootmem_init_numa()\n"); 1261 1262 if (numa_enabled) { 1263 if (tlb_type == hypervisor) 1264 err = numa_parse_mdesc(); 1265 else 1266 err = numa_parse_sun4u(); 1267 } 1268 return err; 1269 } 1270 1271 #else 1272 1273 static int bootmem_init_numa(void) 1274 { 1275 return -1; 1276 } 1277 1278 #endif 1279 1280 static void __init bootmem_init_nonnuma(void) 1281 { 1282 unsigned long top_of_ram = lmb_end_of_DRAM(); 1283 unsigned long total_ram = lmb_phys_mem_size(); 1284 unsigned int i; 1285 1286 numadbg("bootmem_init_nonnuma()\n"); 1287 1288 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1289 top_of_ram, total_ram); 1290 printk(KERN_INFO "Memory hole size: %ldMB\n", 1291 (top_of_ram - total_ram) >> 20); 1292 1293 init_node_masks_nonnuma(); 1294 1295 for (i = 0; i < lmb.memory.cnt; i++) { 1296 unsigned long size = lmb_size_bytes(&lmb.memory, i); 1297 unsigned long start_pfn, end_pfn; 1298 1299 if (!size) 1300 continue; 1301 1302 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 1303 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 1304 add_active_range(0, start_pfn, end_pfn); 1305 } 1306 1307 allocate_node_data(0); 1308 1309 node_set_online(0); 1310 } 1311 1312 static void __init reserve_range_in_node(int nid, unsigned long start, 1313 unsigned long end) 1314 { 1315 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n", 1316 nid, start, end); 1317 while (start < end) { 1318 unsigned long this_end; 1319 int n; 1320 1321 this_end = nid_range(start, end, &n); 1322 if (n == nid) { 1323 numadbg(" MATCH reserving range [%lx:%lx]\n", 1324 start, this_end); 1325 reserve_bootmem_node(NODE_DATA(nid), start, 1326 (this_end - start), BOOTMEM_DEFAULT); 1327 } else 1328 numadbg(" NO MATCH, advancing start to %lx\n", 1329 this_end); 1330 1331 start = this_end; 1332 } 1333 } 1334 1335 static void __init trim_reserved_in_node(int nid) 1336 { 1337 int i; 1338 1339 numadbg(" trim_reserved_in_node(%d)\n", nid); 1340 1341 for (i = 0; i < lmb.reserved.cnt; i++) { 1342 unsigned long start = lmb.reserved.region[i].base; 1343 unsigned long size = lmb_size_bytes(&lmb.reserved, i); 1344 unsigned long end = start + size; 1345 1346 reserve_range_in_node(nid, start, end); 1347 } 1348 } 1349 1350 static void __init bootmem_init_one_node(int nid) 1351 { 1352 struct pglist_data *p; 1353 1354 numadbg("bootmem_init_one_node(%d)\n", nid); 1355 1356 p = NODE_DATA(nid); 1357 1358 if (p->node_spanned_pages) { 1359 unsigned long paddr = node_masks[nid].bootmem_paddr; 1360 unsigned long end_pfn; 1361 1362 end_pfn = p->node_start_pfn + p->node_spanned_pages; 1363 1364 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n", 1365 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 1366 1367 init_bootmem_node(p, paddr >> PAGE_SHIFT, 1368 p->node_start_pfn, end_pfn); 1369 1370 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n", 1371 nid, end_pfn); 1372 free_bootmem_with_active_regions(nid, end_pfn); 1373 1374 trim_reserved_in_node(nid); 1375 1376 numadbg(" sparse_memory_present_with_active_regions(%d)\n", 1377 nid); 1378 sparse_memory_present_with_active_regions(nid); 1379 } 1380 } 1381 1382 static unsigned long __init bootmem_init(unsigned long phys_base) 1383 { 1384 unsigned long end_pfn; 1385 int nid; 1386 1387 end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 1388 max_pfn = max_low_pfn = end_pfn; 1389 min_low_pfn = (phys_base >> PAGE_SHIFT); 1390 1391 if (bootmem_init_numa() < 0) 1392 bootmem_init_nonnuma(); 1393 1394 /* XXX cpu notifier XXX */ 1395 1396 for_each_online_node(nid) 1397 bootmem_init_one_node(nid); 1398 1399 sparse_init(); 1400 1401 return end_pfn; 1402 } 1403 1404 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1405 static int pall_ents __initdata; 1406 1407 #ifdef CONFIG_DEBUG_PAGEALLOC 1408 static unsigned long __ref kernel_map_range(unsigned long pstart, 1409 unsigned long pend, pgprot_t prot) 1410 { 1411 unsigned long vstart = PAGE_OFFSET + pstart; 1412 unsigned long vend = PAGE_OFFSET + pend; 1413 unsigned long alloc_bytes = 0UL; 1414 1415 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1416 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1417 vstart, vend); 1418 prom_halt(); 1419 } 1420 1421 while (vstart < vend) { 1422 unsigned long this_end, paddr = __pa(vstart); 1423 pgd_t *pgd = pgd_offset_k(vstart); 1424 pud_t *pud; 1425 pmd_t *pmd; 1426 pte_t *pte; 1427 1428 pud = pud_offset(pgd, vstart); 1429 if (pud_none(*pud)) { 1430 pmd_t *new; 1431 1432 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1433 alloc_bytes += PAGE_SIZE; 1434 pud_populate(&init_mm, pud, new); 1435 } 1436 1437 pmd = pmd_offset(pud, vstart); 1438 if (!pmd_present(*pmd)) { 1439 pte_t *new; 1440 1441 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1442 alloc_bytes += PAGE_SIZE; 1443 pmd_populate_kernel(&init_mm, pmd, new); 1444 } 1445 1446 pte = pte_offset_kernel(pmd, vstart); 1447 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1448 if (this_end > vend) 1449 this_end = vend; 1450 1451 while (vstart < this_end) { 1452 pte_val(*pte) = (paddr | pgprot_val(prot)); 1453 1454 vstart += PAGE_SIZE; 1455 paddr += PAGE_SIZE; 1456 pte++; 1457 } 1458 } 1459 1460 return alloc_bytes; 1461 } 1462 1463 extern unsigned int kvmap_linear_patch[1]; 1464 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1465 1466 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) 1467 { 1468 const unsigned long shift_256MB = 28; 1469 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); 1470 const unsigned long size_256MB = (1UL << shift_256MB); 1471 1472 while (start < end) { 1473 long remains; 1474 1475 remains = end - start; 1476 if (remains < size_256MB) 1477 break; 1478 1479 if (start & mask_256MB) { 1480 start = (start + size_256MB) & ~mask_256MB; 1481 continue; 1482 } 1483 1484 while (remains >= size_256MB) { 1485 unsigned long index = start >> shift_256MB; 1486 1487 __set_bit(index, kpte_linear_bitmap); 1488 1489 start += size_256MB; 1490 remains -= size_256MB; 1491 } 1492 } 1493 } 1494 1495 static void __init init_kpte_bitmap(void) 1496 { 1497 unsigned long i; 1498 1499 for (i = 0; i < pall_ents; i++) { 1500 unsigned long phys_start, phys_end; 1501 1502 phys_start = pall[i].phys_addr; 1503 phys_end = phys_start + pall[i].reg_size; 1504 1505 mark_kpte_bitmap(phys_start, phys_end); 1506 } 1507 } 1508 1509 static void __init kernel_physical_mapping_init(void) 1510 { 1511 #ifdef CONFIG_DEBUG_PAGEALLOC 1512 unsigned long i, mem_alloced = 0UL; 1513 1514 for (i = 0; i < pall_ents; i++) { 1515 unsigned long phys_start, phys_end; 1516 1517 phys_start = pall[i].phys_addr; 1518 phys_end = phys_start + pall[i].reg_size; 1519 1520 mem_alloced += kernel_map_range(phys_start, phys_end, 1521 PAGE_KERNEL); 1522 } 1523 1524 printk("Allocated %ld bytes for kernel page tables.\n", 1525 mem_alloced); 1526 1527 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1528 flushi(&kvmap_linear_patch[0]); 1529 1530 __flush_tlb_all(); 1531 #endif 1532 } 1533 1534 #ifdef CONFIG_DEBUG_PAGEALLOC 1535 void kernel_map_pages(struct page *page, int numpages, int enable) 1536 { 1537 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1538 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1539 1540 kernel_map_range(phys_start, phys_end, 1541 (enable ? PAGE_KERNEL : __pgprot(0))); 1542 1543 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1544 PAGE_OFFSET + phys_end); 1545 1546 /* we should perform an IPI and flush all tlbs, 1547 * but that can deadlock->flush only current cpu. 1548 */ 1549 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1550 PAGE_OFFSET + phys_end); 1551 } 1552 #endif 1553 1554 unsigned long __init find_ecache_flush_span(unsigned long size) 1555 { 1556 int i; 1557 1558 for (i = 0; i < pavail_ents; i++) { 1559 if (pavail[i].reg_size >= size) 1560 return pavail[i].phys_addr; 1561 } 1562 1563 return ~0UL; 1564 } 1565 1566 static void __init tsb_phys_patch(void) 1567 { 1568 struct tsb_ldquad_phys_patch_entry *pquad; 1569 struct tsb_phys_patch_entry *p; 1570 1571 pquad = &__tsb_ldquad_phys_patch; 1572 while (pquad < &__tsb_ldquad_phys_patch_end) { 1573 unsigned long addr = pquad->addr; 1574 1575 if (tlb_type == hypervisor) 1576 *(unsigned int *) addr = pquad->sun4v_insn; 1577 else 1578 *(unsigned int *) addr = pquad->sun4u_insn; 1579 wmb(); 1580 __asm__ __volatile__("flush %0" 1581 : /* no outputs */ 1582 : "r" (addr)); 1583 1584 pquad++; 1585 } 1586 1587 p = &__tsb_phys_patch; 1588 while (p < &__tsb_phys_patch_end) { 1589 unsigned long addr = p->addr; 1590 1591 *(unsigned int *) addr = p->insn; 1592 wmb(); 1593 __asm__ __volatile__("flush %0" 1594 : /* no outputs */ 1595 : "r" (addr)); 1596 1597 p++; 1598 } 1599 } 1600 1601 /* Don't mark as init, we give this to the Hypervisor. */ 1602 #ifndef CONFIG_DEBUG_PAGEALLOC 1603 #define NUM_KTSB_DESCR 2 1604 #else 1605 #define NUM_KTSB_DESCR 1 1606 #endif 1607 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1608 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 1609 1610 static void __init sun4v_ktsb_init(void) 1611 { 1612 unsigned long ktsb_pa; 1613 1614 /* First KTSB for PAGE_SIZE mappings. */ 1615 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1616 1617 switch (PAGE_SIZE) { 1618 case 8 * 1024: 1619 default: 1620 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1621 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1622 break; 1623 1624 case 64 * 1024: 1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1627 break; 1628 1629 case 512 * 1024: 1630 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1631 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1632 break; 1633 1634 case 4 * 1024 * 1024: 1635 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1636 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1637 break; 1638 }; 1639 1640 ktsb_descr[0].assoc = 1; 1641 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1642 ktsb_descr[0].ctx_idx = 0; 1643 ktsb_descr[0].tsb_base = ktsb_pa; 1644 ktsb_descr[0].resv = 0; 1645 1646 #ifndef CONFIG_DEBUG_PAGEALLOC 1647 /* Second KTSB for 4MB/256MB mappings. */ 1648 ktsb_pa = (kern_base + 1649 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1650 1651 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1652 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | 1653 HV_PGSZ_MASK_256MB); 1654 ktsb_descr[1].assoc = 1; 1655 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1656 ktsb_descr[1].ctx_idx = 0; 1657 ktsb_descr[1].tsb_base = ktsb_pa; 1658 ktsb_descr[1].resv = 0; 1659 #endif 1660 } 1661 1662 void __cpuinit sun4v_ktsb_register(void) 1663 { 1664 unsigned long pa, ret; 1665 1666 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1667 1668 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1669 if (ret != 0) { 1670 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1671 "errors with %lx\n", pa, ret); 1672 prom_halt(); 1673 } 1674 } 1675 1676 /* paging_init() sets up the page tables */ 1677 1678 static unsigned long last_valid_pfn; 1679 pgd_t swapper_pg_dir[2048]; 1680 1681 static void sun4u_pgprot_init(void); 1682 static void sun4v_pgprot_init(void); 1683 1684 void __init paging_init(void) 1685 { 1686 unsigned long end_pfn, shift, phys_base; 1687 unsigned long real_end, i; 1688 1689 /* These build time checkes make sure that the dcache_dirty_cpu() 1690 * page->flags usage will work. 1691 * 1692 * When a page gets marked as dcache-dirty, we store the 1693 * cpu number starting at bit 32 in the page->flags. Also, 1694 * functions like clear_dcache_dirty_cpu use the cpu mask 1695 * in 13-bit signed-immediate instruction fields. 1696 */ 1697 1698 /* 1699 * Page flags must not reach into upper 32 bits that are used 1700 * for the cpu number 1701 */ 1702 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 1703 1704 /* 1705 * The bit fields placed in the high range must not reach below 1706 * the 32 bit boundary. Otherwise we cannot place the cpu field 1707 * at the 32 bit boundary. 1708 */ 1709 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 1710 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 1711 1712 BUILD_BUG_ON(NR_CPUS > 4096); 1713 1714 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1715 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1716 1717 /* Invalidate both kernel TSBs. */ 1718 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1719 #ifndef CONFIG_DEBUG_PAGEALLOC 1720 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 1721 #endif 1722 1723 if (tlb_type == hypervisor) 1724 sun4v_pgprot_init(); 1725 else 1726 sun4u_pgprot_init(); 1727 1728 if (tlb_type == cheetah_plus || 1729 tlb_type == hypervisor) 1730 tsb_phys_patch(); 1731 1732 if (tlb_type == hypervisor) { 1733 sun4v_patch_tlb_handlers(); 1734 sun4v_ktsb_init(); 1735 } 1736 1737 lmb_init(); 1738 1739 /* Find available physical memory... 1740 * 1741 * Read it twice in order to work around a bug in openfirmware. 1742 * The call to grab this table itself can cause openfirmware to 1743 * allocate memory, which in turn can take away some space from 1744 * the list of available memory. Reading it twice makes sure 1745 * we really do get the final value. 1746 */ 1747 read_obp_translations(); 1748 read_obp_memory("reg", &pall[0], &pall_ents); 1749 read_obp_memory("available", &pavail[0], &pavail_ents); 1750 read_obp_memory("available", &pavail[0], &pavail_ents); 1751 1752 phys_base = 0xffffffffffffffffUL; 1753 for (i = 0; i < pavail_ents; i++) { 1754 phys_base = min(phys_base, pavail[i].phys_addr); 1755 lmb_add(pavail[i].phys_addr, pavail[i].reg_size); 1756 } 1757 1758 lmb_reserve(kern_base, kern_size); 1759 1760 find_ramdisk(phys_base); 1761 1762 lmb_enforce_memory_limit(cmdline_memory_size); 1763 1764 lmb_analyze(); 1765 lmb_dump_all(); 1766 1767 set_bit(0, mmu_context_bmap); 1768 1769 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1770 1771 real_end = (unsigned long)_end; 1772 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1773 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1774 num_kernel_image_mappings); 1775 1776 /* Set kernel pgd to upper alias so physical page computations 1777 * work. 1778 */ 1779 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1780 1781 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); 1782 1783 /* Now can init the kernel/bad page tables. */ 1784 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1785 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 1786 1787 inherit_prom_mappings(); 1788 1789 init_kpte_bitmap(); 1790 1791 /* Ok, we can use our TLB miss and window trap handlers safely. */ 1792 setup_tba(); 1793 1794 __flush_tlb_all(); 1795 1796 if (tlb_type == hypervisor) 1797 sun4v_ktsb_register(); 1798 1799 prom_build_devicetree(); 1800 of_populate_present_mask(); 1801 #ifndef CONFIG_SMP 1802 of_fill_in_cpu_data(); 1803 #endif 1804 1805 if (tlb_type == hypervisor) { 1806 sun4v_mdesc_init(); 1807 mdesc_populate_present_mask(cpu_all_mask); 1808 #ifndef CONFIG_SMP 1809 mdesc_fill_in_cpu_data(cpu_all_mask); 1810 #endif 1811 } 1812 1813 /* Once the OF device tree and MDESC have been setup, we know 1814 * the list of possible cpus. Therefore we can allocate the 1815 * IRQ stacks. 1816 */ 1817 for_each_possible_cpu(i) { 1818 /* XXX Use node local allocations... XXX */ 1819 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 1820 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); 1821 } 1822 1823 /* Setup bootmem... */ 1824 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1825 1826 #ifndef CONFIG_NEED_MULTIPLE_NODES 1827 max_mapnr = last_valid_pfn; 1828 #endif 1829 kernel_physical_mapping_init(); 1830 1831 { 1832 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1833 1834 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1835 1836 max_zone_pfns[ZONE_NORMAL] = end_pfn; 1837 1838 free_area_init_nodes(max_zone_pfns); 1839 } 1840 1841 printk("Booting Linux...\n"); 1842 } 1843 1844 int __devinit page_in_phys_avail(unsigned long paddr) 1845 { 1846 int i; 1847 1848 paddr &= PAGE_MASK; 1849 1850 for (i = 0; i < pavail_ents; i++) { 1851 unsigned long start, end; 1852 1853 start = pavail[i].phys_addr; 1854 end = start + pavail[i].reg_size; 1855 1856 if (paddr >= start && paddr < end) 1857 return 1; 1858 } 1859 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 1860 return 1; 1861 #ifdef CONFIG_BLK_DEV_INITRD 1862 if (paddr >= __pa(initrd_start) && 1863 paddr < __pa(PAGE_ALIGN(initrd_end))) 1864 return 1; 1865 #endif 1866 1867 return 0; 1868 } 1869 1870 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; 1871 static int pavail_rescan_ents __initdata; 1872 1873 /* Certain OBP calls, such as fetching "available" properties, can 1874 * claim physical memory. So, along with initializing the valid 1875 * address bitmap, what we do here is refetch the physical available 1876 * memory list again, and make sure it provides at least as much 1877 * memory as 'pavail' does. 1878 */ 1879 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) 1880 { 1881 int i; 1882 1883 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); 1884 1885 for (i = 0; i < pavail_ents; i++) { 1886 unsigned long old_start, old_end; 1887 1888 old_start = pavail[i].phys_addr; 1889 old_end = old_start + pavail[i].reg_size; 1890 while (old_start < old_end) { 1891 int n; 1892 1893 for (n = 0; n < pavail_rescan_ents; n++) { 1894 unsigned long new_start, new_end; 1895 1896 new_start = pavail_rescan[n].phys_addr; 1897 new_end = new_start + 1898 pavail_rescan[n].reg_size; 1899 1900 if (new_start <= old_start && 1901 new_end >= (old_start + PAGE_SIZE)) { 1902 set_bit(old_start >> 22, bitmap); 1903 goto do_next_page; 1904 } 1905 } 1906 1907 prom_printf("mem_init: Lost memory in pavail\n"); 1908 prom_printf("mem_init: OLD start[%lx] size[%lx]\n", 1909 pavail[i].phys_addr, 1910 pavail[i].reg_size); 1911 prom_printf("mem_init: NEW start[%lx] size[%lx]\n", 1912 pavail_rescan[i].phys_addr, 1913 pavail_rescan[i].reg_size); 1914 prom_printf("mem_init: Cannot continue, aborting.\n"); 1915 prom_halt(); 1916 1917 do_next_page: 1918 old_start += PAGE_SIZE; 1919 } 1920 } 1921 } 1922 1923 static void __init patch_tlb_miss_handler_bitmap(void) 1924 { 1925 extern unsigned int valid_addr_bitmap_insn[]; 1926 extern unsigned int valid_addr_bitmap_patch[]; 1927 1928 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; 1929 mb(); 1930 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; 1931 flushi(&valid_addr_bitmap_insn[0]); 1932 } 1933 1934 void __init mem_init(void) 1935 { 1936 unsigned long codepages, datapages, initpages; 1937 unsigned long addr, last; 1938 1939 addr = PAGE_OFFSET + kern_base; 1940 last = PAGE_ALIGN(kern_size) + addr; 1941 while (addr < last) { 1942 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 1943 addr += PAGE_SIZE; 1944 } 1945 1946 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); 1947 patch_tlb_miss_handler_bitmap(); 1948 1949 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1950 1951 #ifdef CONFIG_NEED_MULTIPLE_NODES 1952 { 1953 int i; 1954 for_each_online_node(i) { 1955 if (NODE_DATA(i)->node_spanned_pages != 0) { 1956 totalram_pages += 1957 free_all_bootmem_node(NODE_DATA(i)); 1958 } 1959 } 1960 } 1961 #else 1962 totalram_pages = free_all_bootmem(); 1963 #endif 1964 1965 /* We subtract one to account for the mem_map_zero page 1966 * allocated below. 1967 */ 1968 totalram_pages -= 1; 1969 num_physpages = totalram_pages; 1970 1971 /* 1972 * Set up the zero page, mark it reserved, so that page count 1973 * is not manipulated when freeing the page from user ptes. 1974 */ 1975 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 1976 if (mem_map_zero == NULL) { 1977 prom_printf("paging_init: Cannot alloc zero page.\n"); 1978 prom_halt(); 1979 } 1980 SetPageReserved(mem_map_zero); 1981 1982 codepages = (((unsigned long) _etext) - ((unsigned long) _start)); 1983 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 1984 datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); 1985 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; 1986 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); 1987 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; 1988 1989 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", 1990 nr_free_pages() << (PAGE_SHIFT-10), 1991 codepages << (PAGE_SHIFT-10), 1992 datapages << (PAGE_SHIFT-10), 1993 initpages << (PAGE_SHIFT-10), 1994 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); 1995 1996 if (tlb_type == cheetah || tlb_type == cheetah_plus) 1997 cheetah_ecache_flush_init(); 1998 } 1999 2000 void free_initmem(void) 2001 { 2002 unsigned long addr, initend; 2003 int do_free = 1; 2004 2005 /* If the physical memory maps were trimmed by kernel command 2006 * line options, don't even try freeing this initmem stuff up. 2007 * The kernel image could have been in the trimmed out region 2008 * and if so the freeing below will free invalid page structs. 2009 */ 2010 if (cmdline_memory_size) 2011 do_free = 0; 2012 2013 /* 2014 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 2015 */ 2016 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 2017 initend = (unsigned long)(__init_end) & PAGE_MASK; 2018 for (; addr < initend; addr += PAGE_SIZE) { 2019 unsigned long page; 2020 struct page *p; 2021 2022 page = (addr + 2023 ((unsigned long) __va(kern_base)) - 2024 ((unsigned long) KERNBASE)); 2025 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 2026 2027 if (do_free) { 2028 p = virt_to_page(page); 2029 2030 ClearPageReserved(p); 2031 init_page_count(p); 2032 __free_page(p); 2033 num_physpages++; 2034 totalram_pages++; 2035 } 2036 } 2037 } 2038 2039 #ifdef CONFIG_BLK_DEV_INITRD 2040 void free_initrd_mem(unsigned long start, unsigned long end) 2041 { 2042 if (start < end) 2043 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 2044 for (; start < end; start += PAGE_SIZE) { 2045 struct page *p = virt_to_page(start); 2046 2047 ClearPageReserved(p); 2048 init_page_count(p); 2049 __free_page(p); 2050 num_physpages++; 2051 totalram_pages++; 2052 } 2053 } 2054 #endif 2055 2056 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2057 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2058 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2059 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2060 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2061 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2062 2063 pgprot_t PAGE_KERNEL __read_mostly; 2064 EXPORT_SYMBOL(PAGE_KERNEL); 2065 2066 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2067 pgprot_t PAGE_COPY __read_mostly; 2068 2069 pgprot_t PAGE_SHARED __read_mostly; 2070 EXPORT_SYMBOL(PAGE_SHARED); 2071 2072 unsigned long pg_iobits __read_mostly; 2073 2074 unsigned long _PAGE_IE __read_mostly; 2075 EXPORT_SYMBOL(_PAGE_IE); 2076 2077 unsigned long _PAGE_E __read_mostly; 2078 EXPORT_SYMBOL(_PAGE_E); 2079 2080 unsigned long _PAGE_CACHE __read_mostly; 2081 EXPORT_SYMBOL(_PAGE_CACHE); 2082 2083 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2084 unsigned long vmemmap_table[VMEMMAP_SIZE]; 2085 2086 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2087 { 2088 unsigned long vstart = (unsigned long) start; 2089 unsigned long vend = (unsigned long) (start + nr); 2090 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2091 unsigned long phys_end = (vend - VMEMMAP_BASE); 2092 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2093 unsigned long end = VMEMMAP_ALIGN(phys_end); 2094 unsigned long pte_base; 2095 2096 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2097 _PAGE_CP_4U | _PAGE_CV_4U | 2098 _PAGE_P_4U | _PAGE_W_4U); 2099 if (tlb_type == hypervisor) 2100 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2101 _PAGE_CP_4V | _PAGE_CV_4V | 2102 _PAGE_P_4V | _PAGE_W_4V); 2103 2104 for (; addr < end; addr += VMEMMAP_CHUNK) { 2105 unsigned long *vmem_pp = 2106 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); 2107 void *block; 2108 2109 if (!(*vmem_pp & _PAGE_VALID)) { 2110 block = vmemmap_alloc_block(1UL << 22, node); 2111 if (!block) 2112 return -ENOMEM; 2113 2114 *vmem_pp = pte_base | __pa(block); 2115 2116 printk(KERN_INFO "[%p-%p] page_structs=%lu " 2117 "node=%d entry=%lu/%lu\n", start, block, nr, 2118 node, 2119 addr >> VMEMMAP_CHUNK_SHIFT, 2120 VMEMMAP_SIZE); 2121 } 2122 } 2123 return 0; 2124 } 2125 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2126 2127 static void prot_init_common(unsigned long page_none, 2128 unsigned long page_shared, 2129 unsigned long page_copy, 2130 unsigned long page_readonly, 2131 unsigned long page_exec_bit) 2132 { 2133 PAGE_COPY = __pgprot(page_copy); 2134 PAGE_SHARED = __pgprot(page_shared); 2135 2136 protection_map[0x0] = __pgprot(page_none); 2137 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2138 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2139 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2140 protection_map[0x4] = __pgprot(page_readonly); 2141 protection_map[0x5] = __pgprot(page_readonly); 2142 protection_map[0x6] = __pgprot(page_copy); 2143 protection_map[0x7] = __pgprot(page_copy); 2144 protection_map[0x8] = __pgprot(page_none); 2145 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2146 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2147 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2148 protection_map[0xc] = __pgprot(page_readonly); 2149 protection_map[0xd] = __pgprot(page_readonly); 2150 protection_map[0xe] = __pgprot(page_shared); 2151 protection_map[0xf] = __pgprot(page_shared); 2152 } 2153 2154 static void __init sun4u_pgprot_init(void) 2155 { 2156 unsigned long page_none, page_shared, page_copy, page_readonly; 2157 unsigned long page_exec_bit; 2158 2159 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2160 _PAGE_CACHE_4U | _PAGE_P_4U | 2161 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2162 _PAGE_EXEC_4U); 2163 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2164 _PAGE_CACHE_4U | _PAGE_P_4U | 2165 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2166 _PAGE_EXEC_4U | _PAGE_L_4U); 2167 2168 _PAGE_IE = _PAGE_IE_4U; 2169 _PAGE_E = _PAGE_E_4U; 2170 _PAGE_CACHE = _PAGE_CACHE_4U; 2171 2172 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2173 __ACCESS_BITS_4U | _PAGE_E_4U); 2174 2175 #ifdef CONFIG_DEBUG_PAGEALLOC 2176 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ 2177 0xfffff80000000000UL; 2178 #else 2179 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2180 0xfffff80000000000UL; 2181 #endif 2182 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2183 _PAGE_P_4U | _PAGE_W_4U); 2184 2185 /* XXX Should use 256MB on Panther. XXX */ 2186 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 2187 2188 _PAGE_SZBITS = _PAGE_SZBITS_4U; 2189 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2190 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2191 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2192 2193 2194 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2195 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2196 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2197 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2198 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2199 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2200 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2201 2202 page_exec_bit = _PAGE_EXEC_4U; 2203 2204 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2205 page_exec_bit); 2206 } 2207 2208 static void __init sun4v_pgprot_init(void) 2209 { 2210 unsigned long page_none, page_shared, page_copy, page_readonly; 2211 unsigned long page_exec_bit; 2212 2213 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2214 _PAGE_CACHE_4V | _PAGE_P_4V | 2215 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2216 _PAGE_EXEC_4V); 2217 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2218 2219 _PAGE_IE = _PAGE_IE_4V; 2220 _PAGE_E = _PAGE_E_4V; 2221 _PAGE_CACHE = _PAGE_CACHE_4V; 2222 2223 #ifdef CONFIG_DEBUG_PAGEALLOC 2224 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2225 0xfffff80000000000UL; 2226 #else 2227 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2228 0xfffff80000000000UL; 2229 #endif 2230 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2231 _PAGE_P_4V | _PAGE_W_4V); 2232 2233 #ifdef CONFIG_DEBUG_PAGEALLOC 2234 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2235 0xfffff80000000000UL; 2236 #else 2237 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 2238 0xfffff80000000000UL; 2239 #endif 2240 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2241 _PAGE_P_4V | _PAGE_W_4V); 2242 2243 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2244 __ACCESS_BITS_4V | _PAGE_E_4V); 2245 2246 _PAGE_SZBITS = _PAGE_SZBITS_4V; 2247 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2248 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2249 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2250 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2251 2252 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2253 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2254 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2255 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2256 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2257 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2258 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2259 2260 page_exec_bit = _PAGE_EXEC_4V; 2261 2262 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2263 page_exec_bit); 2264 } 2265 2266 unsigned long pte_sz_bits(unsigned long sz) 2267 { 2268 if (tlb_type == hypervisor) { 2269 switch (sz) { 2270 case 8 * 1024: 2271 default: 2272 return _PAGE_SZ8K_4V; 2273 case 64 * 1024: 2274 return _PAGE_SZ64K_4V; 2275 case 512 * 1024: 2276 return _PAGE_SZ512K_4V; 2277 case 4 * 1024 * 1024: 2278 return _PAGE_SZ4MB_4V; 2279 }; 2280 } else { 2281 switch (sz) { 2282 case 8 * 1024: 2283 default: 2284 return _PAGE_SZ8K_4U; 2285 case 64 * 1024: 2286 return _PAGE_SZ64K_4U; 2287 case 512 * 1024: 2288 return _PAGE_SZ512K_4U; 2289 case 4 * 1024 * 1024: 2290 return _PAGE_SZ4MB_4U; 2291 }; 2292 } 2293 } 2294 2295 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2296 { 2297 pte_t pte; 2298 2299 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2300 pte_val(pte) |= (((unsigned long)space) << 32); 2301 pte_val(pte) |= pte_sz_bits(page_size); 2302 2303 return pte; 2304 } 2305 2306 static unsigned long kern_large_tte(unsigned long paddr) 2307 { 2308 unsigned long val; 2309 2310 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2311 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2312 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2313 if (tlb_type == hypervisor) 2314 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2315 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2316 _PAGE_EXEC_4V | _PAGE_W_4V); 2317 2318 return val | paddr; 2319 } 2320 2321 /* If not locked, zap it. */ 2322 void __flush_tlb_all(void) 2323 { 2324 unsigned long pstate; 2325 int i; 2326 2327 __asm__ __volatile__("flushw\n\t" 2328 "rdpr %%pstate, %0\n\t" 2329 "wrpr %0, %1, %%pstate" 2330 : "=r" (pstate) 2331 : "i" (PSTATE_IE)); 2332 if (tlb_type == hypervisor) { 2333 sun4v_mmu_demap_all(); 2334 } else if (tlb_type == spitfire) { 2335 for (i = 0; i < 64; i++) { 2336 /* Spitfire Errata #32 workaround */ 2337 /* NOTE: Always runs on spitfire, so no 2338 * cheetah+ page size encodings. 2339 */ 2340 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2341 "flush %%g6" 2342 : /* No outputs */ 2343 : "r" (0), 2344 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2345 2346 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2347 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2348 "membar #Sync" 2349 : /* no outputs */ 2350 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2351 spitfire_put_dtlb_data(i, 0x0UL); 2352 } 2353 2354 /* Spitfire Errata #32 workaround */ 2355 /* NOTE: Always runs on spitfire, so no 2356 * cheetah+ page size encodings. 2357 */ 2358 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2359 "flush %%g6" 2360 : /* No outputs */ 2361 : "r" (0), 2362 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2363 2364 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2365 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2366 "membar #Sync" 2367 : /* no outputs */ 2368 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2369 spitfire_put_itlb_data(i, 0x0UL); 2370 } 2371 } 2372 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2373 cheetah_flush_dtlb_all(); 2374 cheetah_flush_itlb_all(); 2375 } 2376 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2377 : : "r" (pstate)); 2378 } 2379