1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/percpu.h> 26 #include <linux/memblock.h> 27 #include <linux/mmzone.h> 28 #include <linux/gfp.h> 29 30 #include <asm/head.h> 31 #include <asm/page.h> 32 #include <asm/pgalloc.h> 33 #include <asm/pgtable.h> 34 #include <asm/oplib.h> 35 #include <asm/iommu.h> 36 #include <asm/io.h> 37 #include <asm/uaccess.h> 38 #include <asm/mmu_context.h> 39 #include <asm/tlbflush.h> 40 #include <asm/dma.h> 41 #include <asm/starfire.h> 42 #include <asm/tlb.h> 43 #include <asm/spitfire.h> 44 #include <asm/sections.h> 45 #include <asm/tsb.h> 46 #include <asm/hypervisor.h> 47 #include <asm/prom.h> 48 #include <asm/mdesc.h> 49 #include <asm/cpudata.h> 50 #include <asm/irq.h> 51 52 #include "init_64.h" 53 54 unsigned long kern_linear_pte_xor[2] __read_mostly; 55 56 /* A bitmap, one bit for every 256MB of physical memory. If the bit 57 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else 58 * if set we should use a 256MB page (via kern_linear_pte_xor[1]). 59 */ 60 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 61 62 #ifndef CONFIG_DEBUG_PAGEALLOC 63 /* A special kernel TSB for 4MB and 256MB linear mappings. 64 * Space is allocated for this right after the trap table 65 * in arch/sparc64/kernel/head.S 66 */ 67 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 68 #endif 69 70 #define MAX_BANKS 32 71 72 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; 73 static int pavail_ents __devinitdata; 74 75 static int cmp_p64(const void *a, const void *b) 76 { 77 const struct linux_prom64_registers *x = a, *y = b; 78 79 if (x->phys_addr > y->phys_addr) 80 return 1; 81 if (x->phys_addr < y->phys_addr) 82 return -1; 83 return 0; 84 } 85 86 static void __init read_obp_memory(const char *property, 87 struct linux_prom64_registers *regs, 88 int *num_ents) 89 { 90 phandle node = prom_finddevice("/memory"); 91 int prop_size = prom_getproplen(node, property); 92 int ents, ret, i; 93 94 ents = prop_size / sizeof(struct linux_prom64_registers); 95 if (ents > MAX_BANKS) { 96 prom_printf("The machine has more %s property entries than " 97 "this kernel can support (%d).\n", 98 property, MAX_BANKS); 99 prom_halt(); 100 } 101 102 ret = prom_getproperty(node, property, (char *) regs, prop_size); 103 if (ret == -1) { 104 prom_printf("Couldn't get %s property from /memory.\n"); 105 prom_halt(); 106 } 107 108 /* Sanitize what we got from the firmware, by page aligning 109 * everything. 110 */ 111 for (i = 0; i < ents; i++) { 112 unsigned long base, size; 113 114 base = regs[i].phys_addr; 115 size = regs[i].reg_size; 116 117 size &= PAGE_MASK; 118 if (base & ~PAGE_MASK) { 119 unsigned long new_base = PAGE_ALIGN(base); 120 121 size -= new_base - base; 122 if ((long) size < 0L) 123 size = 0UL; 124 base = new_base; 125 } 126 if (size == 0UL) { 127 /* If it is empty, simply get rid of it. 128 * This simplifies the logic of the other 129 * functions that process these arrays. 130 */ 131 memmove(®s[i], ®s[i + 1], 132 (ents - i - 1) * sizeof(regs[0])); 133 i--; 134 ents--; 135 continue; 136 } 137 regs[i].phys_addr = base; 138 regs[i].reg_size = size; 139 } 140 141 *num_ents = ents; 142 143 sort(regs, ents, sizeof(struct linux_prom64_registers), 144 cmp_p64, NULL); 145 } 146 147 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / 148 sizeof(unsigned long)]; 149 EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 150 151 /* Kernel physical address base and size in bytes. */ 152 unsigned long kern_base __read_mostly; 153 unsigned long kern_size __read_mostly; 154 155 /* Initial ramdisk setup */ 156 extern unsigned long sparc_ramdisk_image64; 157 extern unsigned int sparc_ramdisk_image; 158 extern unsigned int sparc_ramdisk_size; 159 160 struct page *mem_map_zero __read_mostly; 161 EXPORT_SYMBOL(mem_map_zero); 162 163 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 164 165 unsigned long sparc64_kern_pri_context __read_mostly; 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 int num_kernel_image_mappings; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); 173 #ifdef CONFIG_SMP 174 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 175 #endif 176 #endif 177 178 inline void flush_dcache_page_impl(struct page *page) 179 { 180 BUG_ON(tlb_type == hypervisor); 181 #ifdef CONFIG_DEBUG_DCFLUSH 182 atomic_inc(&dcpage_flushes); 183 #endif 184 185 #ifdef DCACHE_ALIASING_POSSIBLE 186 __flush_dcache_page(page_address(page), 187 ((tlb_type == spitfire) && 188 page_mapping(page) != NULL)); 189 #else 190 if (page_mapping(page) != NULL && 191 tlb_type == spitfire) 192 __flush_icache_page(__pa(page_address(page))); 193 #endif 194 } 195 196 #define PG_dcache_dirty PG_arch_1 197 #define PG_dcache_cpu_shift 32UL 198 #define PG_dcache_cpu_mask \ 199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 200 201 #define dcache_dirty_cpu(page) \ 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 203 204 static inline void set_dcache_dirty(struct page *page, int this_cpu) 205 { 206 unsigned long mask = this_cpu; 207 unsigned long non_cpu_bits; 208 209 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 210 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 211 212 __asm__ __volatile__("1:\n\t" 213 "ldx [%2], %%g7\n\t" 214 "and %%g7, %1, %%g1\n\t" 215 "or %%g1, %0, %%g1\n\t" 216 "casx [%2], %%g7, %%g1\n\t" 217 "cmp %%g7, %%g1\n\t" 218 "bne,pn %%xcc, 1b\n\t" 219 " nop" 220 : /* no outputs */ 221 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 222 : "g1", "g7"); 223 } 224 225 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 226 { 227 unsigned long mask = (1UL << PG_dcache_dirty); 228 229 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 230 "1:\n\t" 231 "ldx [%2], %%g7\n\t" 232 "srlx %%g7, %4, %%g1\n\t" 233 "and %%g1, %3, %%g1\n\t" 234 "cmp %%g1, %0\n\t" 235 "bne,pn %%icc, 2f\n\t" 236 " andn %%g7, %1, %%g1\n\t" 237 "casx [%2], %%g7, %%g1\n\t" 238 "cmp %%g7, %%g1\n\t" 239 "bne,pn %%xcc, 1b\n\t" 240 " nop\n" 241 "2:" 242 : /* no outputs */ 243 : "r" (cpu), "r" (mask), "r" (&page->flags), 244 "i" (PG_dcache_cpu_mask), 245 "i" (PG_dcache_cpu_shift) 246 : "g1", "g7"); 247 } 248 249 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 250 { 251 unsigned long tsb_addr = (unsigned long) ent; 252 253 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 254 tsb_addr = __pa(tsb_addr); 255 256 __tsb_insert(tsb_addr, tag, pte); 257 } 258 259 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 260 unsigned long _PAGE_SZBITS __read_mostly; 261 262 static void flush_dcache(unsigned long pfn) 263 { 264 struct page *page; 265 266 page = pfn_to_page(pfn); 267 if (page) { 268 unsigned long pg_flags; 269 270 pg_flags = page->flags; 271 if (pg_flags & (1UL << PG_dcache_dirty)) { 272 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 273 PG_dcache_cpu_mask); 274 int this_cpu = get_cpu(); 275 276 /* This is just to optimize away some function calls 277 * in the SMP case. 278 */ 279 if (cpu == this_cpu) 280 flush_dcache_page_impl(page); 281 else 282 smp_flush_dcache_page_impl(page, cpu); 283 284 clear_dcache_dirty_cpu(page, cpu); 285 286 put_cpu(); 287 } 288 } 289 } 290 291 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 292 { 293 struct mm_struct *mm; 294 struct tsb *tsb; 295 unsigned long tag, flags; 296 unsigned long tsb_index, tsb_hash_shift; 297 pte_t pte = *ptep; 298 299 if (tlb_type != hypervisor) { 300 unsigned long pfn = pte_pfn(pte); 301 302 if (pfn_valid(pfn)) 303 flush_dcache(pfn); 304 } 305 306 mm = vma->vm_mm; 307 308 tsb_index = MM_TSB_BASE; 309 tsb_hash_shift = PAGE_SHIFT; 310 311 spin_lock_irqsave(&mm->context.lock, flags); 312 313 #ifdef CONFIG_HUGETLB_PAGE 314 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 315 if ((tlb_type == hypervisor && 316 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 317 (tlb_type != hypervisor && 318 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { 319 tsb_index = MM_TSB_HUGE; 320 tsb_hash_shift = HPAGE_SHIFT; 321 } 322 } 323 #endif 324 325 tsb = mm->context.tsb_block[tsb_index].tsb; 326 tsb += ((address >> tsb_hash_shift) & 327 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 328 tag = (address >> 22UL); 329 tsb_insert(tsb, tag, pte_val(pte)); 330 331 spin_unlock_irqrestore(&mm->context.lock, flags); 332 } 333 334 void flush_dcache_page(struct page *page) 335 { 336 struct address_space *mapping; 337 int this_cpu; 338 339 if (tlb_type == hypervisor) 340 return; 341 342 /* Do not bother with the expensive D-cache flush if it 343 * is merely the zero page. The 'bigcore' testcase in GDB 344 * causes this case to run millions of times. 345 */ 346 if (page == ZERO_PAGE(0)) 347 return; 348 349 this_cpu = get_cpu(); 350 351 mapping = page_mapping(page); 352 if (mapping && !mapping_mapped(mapping)) { 353 int dirty = test_bit(PG_dcache_dirty, &page->flags); 354 if (dirty) { 355 int dirty_cpu = dcache_dirty_cpu(page); 356 357 if (dirty_cpu == this_cpu) 358 goto out; 359 smp_flush_dcache_page_impl(page, dirty_cpu); 360 } 361 set_dcache_dirty(page, this_cpu); 362 } else { 363 /* We could delay the flush for the !page_mapping 364 * case too. But that case is for exec env/arg 365 * pages and those are %99 certainly going to get 366 * faulted into the tlb (and thus flushed) anyways. 367 */ 368 flush_dcache_page_impl(page); 369 } 370 371 out: 372 put_cpu(); 373 } 374 EXPORT_SYMBOL(flush_dcache_page); 375 376 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 377 { 378 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 379 if (tlb_type == spitfire) { 380 unsigned long kaddr; 381 382 /* This code only runs on Spitfire cpus so this is 383 * why we can assume _PAGE_PADDR_4U. 384 */ 385 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 386 unsigned long paddr, mask = _PAGE_PADDR_4U; 387 388 if (kaddr >= PAGE_OFFSET) 389 paddr = kaddr & mask; 390 else { 391 pgd_t *pgdp = pgd_offset_k(kaddr); 392 pud_t *pudp = pud_offset(pgdp, kaddr); 393 pmd_t *pmdp = pmd_offset(pudp, kaddr); 394 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 395 396 paddr = pte_val(*ptep) & mask; 397 } 398 __flush_icache_page(paddr); 399 } 400 } 401 } 402 EXPORT_SYMBOL(flush_icache_range); 403 404 void mmu_info(struct seq_file *m) 405 { 406 if (tlb_type == cheetah) 407 seq_printf(m, "MMU Type\t: Cheetah\n"); 408 else if (tlb_type == cheetah_plus) 409 seq_printf(m, "MMU Type\t: Cheetah+\n"); 410 else if (tlb_type == spitfire) 411 seq_printf(m, "MMU Type\t: Spitfire\n"); 412 else if (tlb_type == hypervisor) 413 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 414 else 415 seq_printf(m, "MMU Type\t: ???\n"); 416 417 #ifdef CONFIG_DEBUG_DCFLUSH 418 seq_printf(m, "DCPageFlushes\t: %d\n", 419 atomic_read(&dcpage_flushes)); 420 #ifdef CONFIG_SMP 421 seq_printf(m, "DCPageFlushesXC\t: %d\n", 422 atomic_read(&dcpage_flushes_xcall)); 423 #endif /* CONFIG_SMP */ 424 #endif /* CONFIG_DEBUG_DCFLUSH */ 425 } 426 427 struct linux_prom_translation prom_trans[512] __read_mostly; 428 unsigned int prom_trans_ents __read_mostly; 429 430 unsigned long kern_locked_tte_data; 431 432 /* The obp translations are saved based on 8k pagesize, since obp can 433 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 434 * HI_OBP_ADDRESS range are handled in ktlb.S. 435 */ 436 static inline int in_obp_range(unsigned long vaddr) 437 { 438 return (vaddr >= LOW_OBP_ADDRESS && 439 vaddr < HI_OBP_ADDRESS); 440 } 441 442 static int cmp_ptrans(const void *a, const void *b) 443 { 444 const struct linux_prom_translation *x = a, *y = b; 445 446 if (x->virt > y->virt) 447 return 1; 448 if (x->virt < y->virt) 449 return -1; 450 return 0; 451 } 452 453 /* Read OBP translations property into 'prom_trans[]'. */ 454 static void __init read_obp_translations(void) 455 { 456 int n, node, ents, first, last, i; 457 458 node = prom_finddevice("/virtual-memory"); 459 n = prom_getproplen(node, "translations"); 460 if (unlikely(n == 0 || n == -1)) { 461 prom_printf("prom_mappings: Couldn't get size.\n"); 462 prom_halt(); 463 } 464 if (unlikely(n > sizeof(prom_trans))) { 465 prom_printf("prom_mappings: Size %Zd is too big.\n", n); 466 prom_halt(); 467 } 468 469 if ((n = prom_getproperty(node, "translations", 470 (char *)&prom_trans[0], 471 sizeof(prom_trans))) == -1) { 472 prom_printf("prom_mappings: Couldn't get property.\n"); 473 prom_halt(); 474 } 475 476 n = n / sizeof(struct linux_prom_translation); 477 478 ents = n; 479 480 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 481 cmp_ptrans, NULL); 482 483 /* Now kick out all the non-OBP entries. */ 484 for (i = 0; i < ents; i++) { 485 if (in_obp_range(prom_trans[i].virt)) 486 break; 487 } 488 first = i; 489 for (; i < ents; i++) { 490 if (!in_obp_range(prom_trans[i].virt)) 491 break; 492 } 493 last = i; 494 495 for (i = 0; i < (last - first); i++) { 496 struct linux_prom_translation *src = &prom_trans[i + first]; 497 struct linux_prom_translation *dest = &prom_trans[i]; 498 499 *dest = *src; 500 } 501 for (; i < ents; i++) { 502 struct linux_prom_translation *dest = &prom_trans[i]; 503 dest->virt = dest->size = dest->data = 0x0UL; 504 } 505 506 prom_trans_ents = last - first; 507 508 if (tlb_type == spitfire) { 509 /* Clear diag TTE bits. */ 510 for (i = 0; i < prom_trans_ents; i++) 511 prom_trans[i].data &= ~0x0003fe0000000000UL; 512 } 513 514 /* Force execute bit on. */ 515 for (i = 0; i < prom_trans_ents; i++) 516 prom_trans[i].data |= (tlb_type == hypervisor ? 517 _PAGE_EXEC_4V : _PAGE_EXEC_4U); 518 } 519 520 static void __init hypervisor_tlb_lock(unsigned long vaddr, 521 unsigned long pte, 522 unsigned long mmu) 523 { 524 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 525 526 if (ret != 0) { 527 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 528 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 529 prom_halt(); 530 } 531 } 532 533 static unsigned long kern_large_tte(unsigned long paddr); 534 535 static void __init remap_kernel(void) 536 { 537 unsigned long phys_page, tte_vaddr, tte_data; 538 int i, tlb_ent = sparc64_highest_locked_tlbent(); 539 540 tte_vaddr = (unsigned long) KERNBASE; 541 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 542 tte_data = kern_large_tte(phys_page); 543 544 kern_locked_tte_data = tte_data; 545 546 /* Now lock us into the TLBs via Hypervisor or OBP. */ 547 if (tlb_type == hypervisor) { 548 for (i = 0; i < num_kernel_image_mappings; i++) { 549 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 551 tte_vaddr += 0x400000; 552 tte_data += 0x400000; 553 } 554 } else { 555 for (i = 0; i < num_kernel_image_mappings; i++) { 556 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 557 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 558 tte_vaddr += 0x400000; 559 tte_data += 0x400000; 560 } 561 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 562 } 563 if (tlb_type == cheetah_plus) { 564 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 565 CTX_CHEETAH_PLUS_NUC); 566 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 567 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 568 } 569 } 570 571 572 static void __init inherit_prom_mappings(void) 573 { 574 /* Now fixup OBP's idea about where we really are mapped. */ 575 printk("Remapping the kernel... "); 576 remap_kernel(); 577 printk("done.\n"); 578 } 579 580 void prom_world(int enter) 581 { 582 if (!enter) 583 set_fs((mm_segment_t) { get_thread_current_ds() }); 584 585 __asm__ __volatile__("flushw"); 586 } 587 588 void __flush_dcache_range(unsigned long start, unsigned long end) 589 { 590 unsigned long va; 591 592 if (tlb_type == spitfire) { 593 int n = 0; 594 595 for (va = start; va < end; va += 32) { 596 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 597 if (++n >= 512) 598 break; 599 } 600 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 601 start = __pa(start); 602 end = __pa(end); 603 for (va = start; va < end; va += 32) 604 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 605 "membar #Sync" 606 : /* no outputs */ 607 : "r" (va), 608 "i" (ASI_DCACHE_INVALIDATE)); 609 } 610 } 611 EXPORT_SYMBOL(__flush_dcache_range); 612 613 /* get_new_mmu_context() uses "cache + 1". */ 614 DEFINE_SPINLOCK(ctx_alloc_lock); 615 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 616 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 617 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 618 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 619 620 /* Caller does TLB context flushing on local CPU if necessary. 621 * The caller also ensures that CTX_VALID(mm->context) is false. 622 * 623 * We must be careful about boundary cases so that we never 624 * let the user have CTX 0 (nucleus) or we ever use a CTX 625 * version of zero (and thus NO_CONTEXT would not be caught 626 * by version mis-match tests in mmu_context.h). 627 * 628 * Always invoked with interrupts disabled. 629 */ 630 void get_new_mmu_context(struct mm_struct *mm) 631 { 632 unsigned long ctx, new_ctx; 633 unsigned long orig_pgsz_bits; 634 unsigned long flags; 635 int new_version; 636 637 spin_lock_irqsave(&ctx_alloc_lock, flags); 638 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 639 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 640 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 641 new_version = 0; 642 if (new_ctx >= (1 << CTX_NR_BITS)) { 643 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 644 if (new_ctx >= ctx) { 645 int i; 646 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 647 CTX_FIRST_VERSION; 648 if (new_ctx == 1) 649 new_ctx = CTX_FIRST_VERSION; 650 651 /* Don't call memset, for 16 entries that's just 652 * plain silly... 653 */ 654 mmu_context_bmap[0] = 3; 655 mmu_context_bmap[1] = 0; 656 mmu_context_bmap[2] = 0; 657 mmu_context_bmap[3] = 0; 658 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 659 mmu_context_bmap[i + 0] = 0; 660 mmu_context_bmap[i + 1] = 0; 661 mmu_context_bmap[i + 2] = 0; 662 mmu_context_bmap[i + 3] = 0; 663 } 664 new_version = 1; 665 goto out; 666 } 667 } 668 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 669 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 670 out: 671 tlb_context_cache = new_ctx; 672 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 673 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 674 675 if (unlikely(new_version)) 676 smp_new_mmu_context_version(); 677 } 678 679 static int numa_enabled = 1; 680 static int numa_debug; 681 682 static int __init early_numa(char *p) 683 { 684 if (!p) 685 return 0; 686 687 if (strstr(p, "off")) 688 numa_enabled = 0; 689 690 if (strstr(p, "debug")) 691 numa_debug = 1; 692 693 return 0; 694 } 695 early_param("numa", early_numa); 696 697 #define numadbg(f, a...) \ 698 do { if (numa_debug) \ 699 printk(KERN_INFO f, ## a); \ 700 } while (0) 701 702 static void __init find_ramdisk(unsigned long phys_base) 703 { 704 #ifdef CONFIG_BLK_DEV_INITRD 705 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 706 unsigned long ramdisk_image; 707 708 /* Older versions of the bootloader only supported a 709 * 32-bit physical address for the ramdisk image 710 * location, stored at sparc_ramdisk_image. Newer 711 * SILO versions set sparc_ramdisk_image to zero and 712 * provide a full 64-bit physical address at 713 * sparc_ramdisk_image64. 714 */ 715 ramdisk_image = sparc_ramdisk_image; 716 if (!ramdisk_image) 717 ramdisk_image = sparc_ramdisk_image64; 718 719 /* Another bootloader quirk. The bootloader normalizes 720 * the physical address to KERNBASE, so we have to 721 * factor that back out and add in the lowest valid 722 * physical page address to get the true physical address. 723 */ 724 ramdisk_image -= KERNBASE; 725 ramdisk_image += phys_base; 726 727 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 728 ramdisk_image, sparc_ramdisk_size); 729 730 initrd_start = ramdisk_image; 731 initrd_end = ramdisk_image + sparc_ramdisk_size; 732 733 memblock_reserve(initrd_start, sparc_ramdisk_size); 734 735 initrd_start += PAGE_OFFSET; 736 initrd_end += PAGE_OFFSET; 737 } 738 #endif 739 } 740 741 struct node_mem_mask { 742 unsigned long mask; 743 unsigned long val; 744 unsigned long bootmem_paddr; 745 }; 746 static struct node_mem_mask node_masks[MAX_NUMNODES]; 747 static int num_node_masks; 748 749 int numa_cpu_lookup_table[NR_CPUS]; 750 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 751 752 #ifdef CONFIG_NEED_MULTIPLE_NODES 753 754 struct mdesc_mblock { 755 u64 base; 756 u64 size; 757 u64 offset; /* RA-to-PA */ 758 }; 759 static struct mdesc_mblock *mblocks; 760 static int num_mblocks; 761 762 static unsigned long ra_to_pa(unsigned long addr) 763 { 764 int i; 765 766 for (i = 0; i < num_mblocks; i++) { 767 struct mdesc_mblock *m = &mblocks[i]; 768 769 if (addr >= m->base && 770 addr < (m->base + m->size)) { 771 addr += m->offset; 772 break; 773 } 774 } 775 return addr; 776 } 777 778 static int find_node(unsigned long addr) 779 { 780 int i; 781 782 addr = ra_to_pa(addr); 783 for (i = 0; i < num_node_masks; i++) { 784 struct node_mem_mask *p = &node_masks[i]; 785 786 if ((addr & p->mask) == p->val) 787 return i; 788 } 789 return -1; 790 } 791 792 static u64 memblock_nid_range(u64 start, u64 end, int *nid) 793 { 794 *nid = find_node(start); 795 start += PAGE_SIZE; 796 while (start < end) { 797 int n = find_node(start); 798 799 if (n != *nid) 800 break; 801 start += PAGE_SIZE; 802 } 803 804 if (start > end) 805 start = end; 806 807 return start; 808 } 809 #else 810 static u64 memblock_nid_range(u64 start, u64 end, int *nid) 811 { 812 *nid = 0; 813 return end; 814 } 815 #endif 816 817 /* This must be invoked after performing all of the necessary 818 * memblock_set_node() calls for 'nid'. We need to be able to get 819 * correct data from get_pfn_range_for_nid(). 820 */ 821 static void __init allocate_node_data(int nid) 822 { 823 unsigned long paddr, num_pages, start_pfn, end_pfn; 824 struct pglist_data *p; 825 826 #ifdef CONFIG_NEED_MULTIPLE_NODES 827 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 828 if (!paddr) { 829 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 830 prom_halt(); 831 } 832 NODE_DATA(nid) = __va(paddr); 833 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 834 835 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 836 #endif 837 838 p = NODE_DATA(nid); 839 840 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 841 p->node_start_pfn = start_pfn; 842 p->node_spanned_pages = end_pfn - start_pfn; 843 844 if (p->node_spanned_pages) { 845 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 846 847 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); 848 if (!paddr) { 849 prom_printf("Cannot allocate bootmap for nid[%d]\n", 850 nid); 851 prom_halt(); 852 } 853 node_masks[nid].bootmem_paddr = paddr; 854 } 855 } 856 857 static void init_node_masks_nonnuma(void) 858 { 859 int i; 860 861 numadbg("Initializing tables for non-numa.\n"); 862 863 node_masks[0].mask = node_masks[0].val = 0; 864 num_node_masks = 1; 865 866 for (i = 0; i < NR_CPUS; i++) 867 numa_cpu_lookup_table[i] = 0; 868 869 cpumask_setall(&numa_cpumask_lookup_table[0]); 870 } 871 872 #ifdef CONFIG_NEED_MULTIPLE_NODES 873 struct pglist_data *node_data[MAX_NUMNODES]; 874 875 EXPORT_SYMBOL(numa_cpu_lookup_table); 876 EXPORT_SYMBOL(numa_cpumask_lookup_table); 877 EXPORT_SYMBOL(node_data); 878 879 struct mdesc_mlgroup { 880 u64 node; 881 u64 latency; 882 u64 match; 883 u64 mask; 884 }; 885 static struct mdesc_mlgroup *mlgroups; 886 static int num_mlgroups; 887 888 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 889 u32 cfg_handle) 890 { 891 u64 arc; 892 893 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 894 u64 target = mdesc_arc_target(md, arc); 895 const u64 *val; 896 897 val = mdesc_get_property(md, target, 898 "cfg-handle", NULL); 899 if (val && *val == cfg_handle) 900 return 0; 901 } 902 return -ENODEV; 903 } 904 905 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 906 u32 cfg_handle) 907 { 908 u64 arc, candidate, best_latency = ~(u64)0; 909 910 candidate = MDESC_NODE_NULL; 911 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 912 u64 target = mdesc_arc_target(md, arc); 913 const char *name = mdesc_node_name(md, target); 914 const u64 *val; 915 916 if (strcmp(name, "pio-latency-group")) 917 continue; 918 919 val = mdesc_get_property(md, target, "latency", NULL); 920 if (!val) 921 continue; 922 923 if (*val < best_latency) { 924 candidate = target; 925 best_latency = *val; 926 } 927 } 928 929 if (candidate == MDESC_NODE_NULL) 930 return -ENODEV; 931 932 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 933 } 934 935 int of_node_to_nid(struct device_node *dp) 936 { 937 const struct linux_prom64_registers *regs; 938 struct mdesc_handle *md; 939 u32 cfg_handle; 940 int count, nid; 941 u64 grp; 942 943 /* This is the right thing to do on currently supported 944 * SUN4U NUMA platforms as well, as the PCI controller does 945 * not sit behind any particular memory controller. 946 */ 947 if (!mlgroups) 948 return -1; 949 950 regs = of_get_property(dp, "reg", NULL); 951 if (!regs) 952 return -1; 953 954 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 955 956 md = mdesc_grab(); 957 958 count = 0; 959 nid = -1; 960 mdesc_for_each_node_by_name(md, grp, "group") { 961 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 962 nid = count; 963 break; 964 } 965 count++; 966 } 967 968 mdesc_release(md); 969 970 return nid; 971 } 972 973 static void __init add_node_ranges(void) 974 { 975 struct memblock_region *reg; 976 977 for_each_memblock(memory, reg) { 978 unsigned long size = reg->size; 979 unsigned long start, end; 980 981 start = reg->base; 982 end = start + size; 983 while (start < end) { 984 unsigned long this_end; 985 int nid; 986 987 this_end = memblock_nid_range(start, end, &nid); 988 989 numadbg("Setting memblock NUMA node nid[%d] " 990 "start[%lx] end[%lx]\n", 991 nid, start, this_end); 992 993 memblock_set_node(start, this_end - start, nid); 994 start = this_end; 995 } 996 } 997 } 998 999 static int __init grab_mlgroups(struct mdesc_handle *md) 1000 { 1001 unsigned long paddr; 1002 int count = 0; 1003 u64 node; 1004 1005 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 1006 count++; 1007 if (!count) 1008 return -ENOENT; 1009 1010 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), 1011 SMP_CACHE_BYTES); 1012 if (!paddr) 1013 return -ENOMEM; 1014 1015 mlgroups = __va(paddr); 1016 num_mlgroups = count; 1017 1018 count = 0; 1019 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1020 struct mdesc_mlgroup *m = &mlgroups[count++]; 1021 const u64 *val; 1022 1023 m->node = node; 1024 1025 val = mdesc_get_property(md, node, "latency", NULL); 1026 m->latency = *val; 1027 val = mdesc_get_property(md, node, "address-match", NULL); 1028 m->match = *val; 1029 val = mdesc_get_property(md, node, "address-mask", NULL); 1030 m->mask = *val; 1031 1032 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1033 "match[%llx] mask[%llx]\n", 1034 count - 1, m->node, m->latency, m->match, m->mask); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int __init grab_mblocks(struct mdesc_handle *md) 1041 { 1042 unsigned long paddr; 1043 int count = 0; 1044 u64 node; 1045 1046 mdesc_for_each_node_by_name(md, node, "mblock") 1047 count++; 1048 if (!count) 1049 return -ENOENT; 1050 1051 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), 1052 SMP_CACHE_BYTES); 1053 if (!paddr) 1054 return -ENOMEM; 1055 1056 mblocks = __va(paddr); 1057 num_mblocks = count; 1058 1059 count = 0; 1060 mdesc_for_each_node_by_name(md, node, "mblock") { 1061 struct mdesc_mblock *m = &mblocks[count++]; 1062 const u64 *val; 1063 1064 val = mdesc_get_property(md, node, "base", NULL); 1065 m->base = *val; 1066 val = mdesc_get_property(md, node, "size", NULL); 1067 m->size = *val; 1068 val = mdesc_get_property(md, node, 1069 "address-congruence-offset", NULL); 1070 m->offset = *val; 1071 1072 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1073 count - 1, m->base, m->size, m->offset); 1074 } 1075 1076 return 0; 1077 } 1078 1079 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1080 u64 grp, cpumask_t *mask) 1081 { 1082 u64 arc; 1083 1084 cpumask_clear(mask); 1085 1086 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1087 u64 target = mdesc_arc_target(md, arc); 1088 const char *name = mdesc_node_name(md, target); 1089 const u64 *id; 1090 1091 if (strcmp(name, "cpu")) 1092 continue; 1093 id = mdesc_get_property(md, target, "id", NULL); 1094 if (*id < nr_cpu_ids) 1095 cpumask_set_cpu(*id, mask); 1096 } 1097 } 1098 1099 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1100 { 1101 int i; 1102 1103 for (i = 0; i < num_mlgroups; i++) { 1104 struct mdesc_mlgroup *m = &mlgroups[i]; 1105 if (m->node == node) 1106 return m; 1107 } 1108 return NULL; 1109 } 1110 1111 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1112 int index) 1113 { 1114 struct mdesc_mlgroup *candidate = NULL; 1115 u64 arc, best_latency = ~(u64)0; 1116 struct node_mem_mask *n; 1117 1118 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1119 u64 target = mdesc_arc_target(md, arc); 1120 struct mdesc_mlgroup *m = find_mlgroup(target); 1121 if (!m) 1122 continue; 1123 if (m->latency < best_latency) { 1124 candidate = m; 1125 best_latency = m->latency; 1126 } 1127 } 1128 if (!candidate) 1129 return -ENOENT; 1130 1131 if (num_node_masks != index) { 1132 printk(KERN_ERR "Inconsistent NUMA state, " 1133 "index[%d] != num_node_masks[%d]\n", 1134 index, num_node_masks); 1135 return -EINVAL; 1136 } 1137 1138 n = &node_masks[num_node_masks++]; 1139 1140 n->mask = candidate->mask; 1141 n->val = candidate->match; 1142 1143 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1144 index, n->mask, n->val, candidate->latency); 1145 1146 return 0; 1147 } 1148 1149 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1150 int index) 1151 { 1152 cpumask_t mask; 1153 int cpu; 1154 1155 numa_parse_mdesc_group_cpus(md, grp, &mask); 1156 1157 for_each_cpu(cpu, &mask) 1158 numa_cpu_lookup_table[cpu] = index; 1159 cpumask_copy(&numa_cpumask_lookup_table[index], &mask); 1160 1161 if (numa_debug) { 1162 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1163 for_each_cpu(cpu, &mask) 1164 printk("%d ", cpu); 1165 printk("]\n"); 1166 } 1167 1168 return numa_attach_mlgroup(md, grp, index); 1169 } 1170 1171 static int __init numa_parse_mdesc(void) 1172 { 1173 struct mdesc_handle *md = mdesc_grab(); 1174 int i, err, count; 1175 u64 node; 1176 1177 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1178 if (node == MDESC_NODE_NULL) { 1179 mdesc_release(md); 1180 return -ENOENT; 1181 } 1182 1183 err = grab_mblocks(md); 1184 if (err < 0) 1185 goto out; 1186 1187 err = grab_mlgroups(md); 1188 if (err < 0) 1189 goto out; 1190 1191 count = 0; 1192 mdesc_for_each_node_by_name(md, node, "group") { 1193 err = numa_parse_mdesc_group(md, node, count); 1194 if (err < 0) 1195 break; 1196 count++; 1197 } 1198 1199 add_node_ranges(); 1200 1201 for (i = 0; i < num_node_masks; i++) { 1202 allocate_node_data(i); 1203 node_set_online(i); 1204 } 1205 1206 err = 0; 1207 out: 1208 mdesc_release(md); 1209 return err; 1210 } 1211 1212 static int __init numa_parse_jbus(void) 1213 { 1214 unsigned long cpu, index; 1215 1216 /* NUMA node id is encoded in bits 36 and higher, and there is 1217 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1218 */ 1219 index = 0; 1220 for_each_present_cpu(cpu) { 1221 numa_cpu_lookup_table[cpu] = index; 1222 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); 1223 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1224 node_masks[index].val = cpu << 36UL; 1225 1226 index++; 1227 } 1228 num_node_masks = index; 1229 1230 add_node_ranges(); 1231 1232 for (index = 0; index < num_node_masks; index++) { 1233 allocate_node_data(index); 1234 node_set_online(index); 1235 } 1236 1237 return 0; 1238 } 1239 1240 static int __init numa_parse_sun4u(void) 1241 { 1242 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1243 unsigned long ver; 1244 1245 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1246 if ((ver >> 32UL) == __JALAPENO_ID || 1247 (ver >> 32UL) == __SERRANO_ID) 1248 return numa_parse_jbus(); 1249 } 1250 return -1; 1251 } 1252 1253 static int __init bootmem_init_numa(void) 1254 { 1255 int err = -1; 1256 1257 numadbg("bootmem_init_numa()\n"); 1258 1259 if (numa_enabled) { 1260 if (tlb_type == hypervisor) 1261 err = numa_parse_mdesc(); 1262 else 1263 err = numa_parse_sun4u(); 1264 } 1265 return err; 1266 } 1267 1268 #else 1269 1270 static int bootmem_init_numa(void) 1271 { 1272 return -1; 1273 } 1274 1275 #endif 1276 1277 static void __init bootmem_init_nonnuma(void) 1278 { 1279 unsigned long top_of_ram = memblock_end_of_DRAM(); 1280 unsigned long total_ram = memblock_phys_mem_size(); 1281 1282 numadbg("bootmem_init_nonnuma()\n"); 1283 1284 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1285 top_of_ram, total_ram); 1286 printk(KERN_INFO "Memory hole size: %ldMB\n", 1287 (top_of_ram - total_ram) >> 20); 1288 1289 init_node_masks_nonnuma(); 1290 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); 1291 allocate_node_data(0); 1292 node_set_online(0); 1293 } 1294 1295 static void __init reserve_range_in_node(int nid, unsigned long start, 1296 unsigned long end) 1297 { 1298 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n", 1299 nid, start, end); 1300 while (start < end) { 1301 unsigned long this_end; 1302 int n; 1303 1304 this_end = memblock_nid_range(start, end, &n); 1305 if (n == nid) { 1306 numadbg(" MATCH reserving range [%lx:%lx]\n", 1307 start, this_end); 1308 reserve_bootmem_node(NODE_DATA(nid), start, 1309 (this_end - start), BOOTMEM_DEFAULT); 1310 } else 1311 numadbg(" NO MATCH, advancing start to %lx\n", 1312 this_end); 1313 1314 start = this_end; 1315 } 1316 } 1317 1318 static void __init trim_reserved_in_node(int nid) 1319 { 1320 struct memblock_region *reg; 1321 1322 numadbg(" trim_reserved_in_node(%d)\n", nid); 1323 1324 for_each_memblock(reserved, reg) 1325 reserve_range_in_node(nid, reg->base, reg->base + reg->size); 1326 } 1327 1328 static void __init bootmem_init_one_node(int nid) 1329 { 1330 struct pglist_data *p; 1331 1332 numadbg("bootmem_init_one_node(%d)\n", nid); 1333 1334 p = NODE_DATA(nid); 1335 1336 if (p->node_spanned_pages) { 1337 unsigned long paddr = node_masks[nid].bootmem_paddr; 1338 unsigned long end_pfn; 1339 1340 end_pfn = p->node_start_pfn + p->node_spanned_pages; 1341 1342 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n", 1343 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 1344 1345 init_bootmem_node(p, paddr >> PAGE_SHIFT, 1346 p->node_start_pfn, end_pfn); 1347 1348 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n", 1349 nid, end_pfn); 1350 free_bootmem_with_active_regions(nid, end_pfn); 1351 1352 trim_reserved_in_node(nid); 1353 1354 numadbg(" sparse_memory_present_with_active_regions(%d)\n", 1355 nid); 1356 sparse_memory_present_with_active_regions(nid); 1357 } 1358 } 1359 1360 static unsigned long __init bootmem_init(unsigned long phys_base) 1361 { 1362 unsigned long end_pfn; 1363 int nid; 1364 1365 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1366 max_pfn = max_low_pfn = end_pfn; 1367 min_low_pfn = (phys_base >> PAGE_SHIFT); 1368 1369 if (bootmem_init_numa() < 0) 1370 bootmem_init_nonnuma(); 1371 1372 /* XXX cpu notifier XXX */ 1373 1374 for_each_online_node(nid) 1375 bootmem_init_one_node(nid); 1376 1377 sparse_init(); 1378 1379 return end_pfn; 1380 } 1381 1382 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1383 static int pall_ents __initdata; 1384 1385 #ifdef CONFIG_DEBUG_PAGEALLOC 1386 static unsigned long __ref kernel_map_range(unsigned long pstart, 1387 unsigned long pend, pgprot_t prot) 1388 { 1389 unsigned long vstart = PAGE_OFFSET + pstart; 1390 unsigned long vend = PAGE_OFFSET + pend; 1391 unsigned long alloc_bytes = 0UL; 1392 1393 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1394 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1395 vstart, vend); 1396 prom_halt(); 1397 } 1398 1399 while (vstart < vend) { 1400 unsigned long this_end, paddr = __pa(vstart); 1401 pgd_t *pgd = pgd_offset_k(vstart); 1402 pud_t *pud; 1403 pmd_t *pmd; 1404 pte_t *pte; 1405 1406 pud = pud_offset(pgd, vstart); 1407 if (pud_none(*pud)) { 1408 pmd_t *new; 1409 1410 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1411 alloc_bytes += PAGE_SIZE; 1412 pud_populate(&init_mm, pud, new); 1413 } 1414 1415 pmd = pmd_offset(pud, vstart); 1416 if (!pmd_present(*pmd)) { 1417 pte_t *new; 1418 1419 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1420 alloc_bytes += PAGE_SIZE; 1421 pmd_populate_kernel(&init_mm, pmd, new); 1422 } 1423 1424 pte = pte_offset_kernel(pmd, vstart); 1425 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1426 if (this_end > vend) 1427 this_end = vend; 1428 1429 while (vstart < this_end) { 1430 pte_val(*pte) = (paddr | pgprot_val(prot)); 1431 1432 vstart += PAGE_SIZE; 1433 paddr += PAGE_SIZE; 1434 pte++; 1435 } 1436 } 1437 1438 return alloc_bytes; 1439 } 1440 1441 extern unsigned int kvmap_linear_patch[1]; 1442 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1443 1444 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) 1445 { 1446 const unsigned long shift_256MB = 28; 1447 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); 1448 const unsigned long size_256MB = (1UL << shift_256MB); 1449 1450 while (start < end) { 1451 long remains; 1452 1453 remains = end - start; 1454 if (remains < size_256MB) 1455 break; 1456 1457 if (start & mask_256MB) { 1458 start = (start + size_256MB) & ~mask_256MB; 1459 continue; 1460 } 1461 1462 while (remains >= size_256MB) { 1463 unsigned long index = start >> shift_256MB; 1464 1465 __set_bit(index, kpte_linear_bitmap); 1466 1467 start += size_256MB; 1468 remains -= size_256MB; 1469 } 1470 } 1471 } 1472 1473 static void __init init_kpte_bitmap(void) 1474 { 1475 unsigned long i; 1476 1477 for (i = 0; i < pall_ents; i++) { 1478 unsigned long phys_start, phys_end; 1479 1480 phys_start = pall[i].phys_addr; 1481 phys_end = phys_start + pall[i].reg_size; 1482 1483 mark_kpte_bitmap(phys_start, phys_end); 1484 } 1485 } 1486 1487 static void __init kernel_physical_mapping_init(void) 1488 { 1489 #ifdef CONFIG_DEBUG_PAGEALLOC 1490 unsigned long i, mem_alloced = 0UL; 1491 1492 for (i = 0; i < pall_ents; i++) { 1493 unsigned long phys_start, phys_end; 1494 1495 phys_start = pall[i].phys_addr; 1496 phys_end = phys_start + pall[i].reg_size; 1497 1498 mem_alloced += kernel_map_range(phys_start, phys_end, 1499 PAGE_KERNEL); 1500 } 1501 1502 printk("Allocated %ld bytes for kernel page tables.\n", 1503 mem_alloced); 1504 1505 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1506 flushi(&kvmap_linear_patch[0]); 1507 1508 __flush_tlb_all(); 1509 #endif 1510 } 1511 1512 #ifdef CONFIG_DEBUG_PAGEALLOC 1513 void kernel_map_pages(struct page *page, int numpages, int enable) 1514 { 1515 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1516 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1517 1518 kernel_map_range(phys_start, phys_end, 1519 (enable ? PAGE_KERNEL : __pgprot(0))); 1520 1521 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1522 PAGE_OFFSET + phys_end); 1523 1524 /* we should perform an IPI and flush all tlbs, 1525 * but that can deadlock->flush only current cpu. 1526 */ 1527 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1528 PAGE_OFFSET + phys_end); 1529 } 1530 #endif 1531 1532 unsigned long __init find_ecache_flush_span(unsigned long size) 1533 { 1534 int i; 1535 1536 for (i = 0; i < pavail_ents; i++) { 1537 if (pavail[i].reg_size >= size) 1538 return pavail[i].phys_addr; 1539 } 1540 1541 return ~0UL; 1542 } 1543 1544 static void __init tsb_phys_patch(void) 1545 { 1546 struct tsb_ldquad_phys_patch_entry *pquad; 1547 struct tsb_phys_patch_entry *p; 1548 1549 pquad = &__tsb_ldquad_phys_patch; 1550 while (pquad < &__tsb_ldquad_phys_patch_end) { 1551 unsigned long addr = pquad->addr; 1552 1553 if (tlb_type == hypervisor) 1554 *(unsigned int *) addr = pquad->sun4v_insn; 1555 else 1556 *(unsigned int *) addr = pquad->sun4u_insn; 1557 wmb(); 1558 __asm__ __volatile__("flush %0" 1559 : /* no outputs */ 1560 : "r" (addr)); 1561 1562 pquad++; 1563 } 1564 1565 p = &__tsb_phys_patch; 1566 while (p < &__tsb_phys_patch_end) { 1567 unsigned long addr = p->addr; 1568 1569 *(unsigned int *) addr = p->insn; 1570 wmb(); 1571 __asm__ __volatile__("flush %0" 1572 : /* no outputs */ 1573 : "r" (addr)); 1574 1575 p++; 1576 } 1577 } 1578 1579 /* Don't mark as init, we give this to the Hypervisor. */ 1580 #ifndef CONFIG_DEBUG_PAGEALLOC 1581 #define NUM_KTSB_DESCR 2 1582 #else 1583 #define NUM_KTSB_DESCR 1 1584 #endif 1585 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1586 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 1587 1588 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) 1589 { 1590 pa >>= KTSB_PHYS_SHIFT; 1591 1592 while (start < end) { 1593 unsigned int *ia = (unsigned int *)(unsigned long)*start; 1594 1595 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); 1596 __asm__ __volatile__("flush %0" : : "r" (ia)); 1597 1598 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); 1599 __asm__ __volatile__("flush %0" : : "r" (ia + 1)); 1600 1601 start++; 1602 } 1603 } 1604 1605 static void ktsb_phys_patch(void) 1606 { 1607 extern unsigned int __swapper_tsb_phys_patch; 1608 extern unsigned int __swapper_tsb_phys_patch_end; 1609 unsigned long ktsb_pa; 1610 1611 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1612 patch_one_ktsb_phys(&__swapper_tsb_phys_patch, 1613 &__swapper_tsb_phys_patch_end, ktsb_pa); 1614 #ifndef CONFIG_DEBUG_PAGEALLOC 1615 { 1616 extern unsigned int __swapper_4m_tsb_phys_patch; 1617 extern unsigned int __swapper_4m_tsb_phys_patch_end; 1618 ktsb_pa = (kern_base + 1619 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1620 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, 1621 &__swapper_4m_tsb_phys_patch_end, ktsb_pa); 1622 } 1623 #endif 1624 } 1625 1626 static void __init sun4v_ktsb_init(void) 1627 { 1628 unsigned long ktsb_pa; 1629 1630 /* First KTSB for PAGE_SIZE mappings. */ 1631 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1632 1633 switch (PAGE_SIZE) { 1634 case 8 * 1024: 1635 default: 1636 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1637 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1638 break; 1639 1640 case 64 * 1024: 1641 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1642 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1643 break; 1644 1645 case 512 * 1024: 1646 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1647 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1648 break; 1649 1650 case 4 * 1024 * 1024: 1651 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1652 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1653 break; 1654 } 1655 1656 ktsb_descr[0].assoc = 1; 1657 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1658 ktsb_descr[0].ctx_idx = 0; 1659 ktsb_descr[0].tsb_base = ktsb_pa; 1660 ktsb_descr[0].resv = 0; 1661 1662 #ifndef CONFIG_DEBUG_PAGEALLOC 1663 /* Second KTSB for 4MB/256MB mappings. */ 1664 ktsb_pa = (kern_base + 1665 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1666 1667 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1668 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | 1669 HV_PGSZ_MASK_256MB); 1670 ktsb_descr[1].assoc = 1; 1671 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1672 ktsb_descr[1].ctx_idx = 0; 1673 ktsb_descr[1].tsb_base = ktsb_pa; 1674 ktsb_descr[1].resv = 0; 1675 #endif 1676 } 1677 1678 void __cpuinit sun4v_ktsb_register(void) 1679 { 1680 unsigned long pa, ret; 1681 1682 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1683 1684 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1685 if (ret != 0) { 1686 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1687 "errors with %lx\n", pa, ret); 1688 prom_halt(); 1689 } 1690 } 1691 1692 /* paging_init() sets up the page tables */ 1693 1694 static unsigned long last_valid_pfn; 1695 pgd_t swapper_pg_dir[2048]; 1696 1697 static void sun4u_pgprot_init(void); 1698 static void sun4v_pgprot_init(void); 1699 1700 void __init paging_init(void) 1701 { 1702 unsigned long end_pfn, shift, phys_base; 1703 unsigned long real_end, i; 1704 1705 /* These build time checkes make sure that the dcache_dirty_cpu() 1706 * page->flags usage will work. 1707 * 1708 * When a page gets marked as dcache-dirty, we store the 1709 * cpu number starting at bit 32 in the page->flags. Also, 1710 * functions like clear_dcache_dirty_cpu use the cpu mask 1711 * in 13-bit signed-immediate instruction fields. 1712 */ 1713 1714 /* 1715 * Page flags must not reach into upper 32 bits that are used 1716 * for the cpu number 1717 */ 1718 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 1719 1720 /* 1721 * The bit fields placed in the high range must not reach below 1722 * the 32 bit boundary. Otherwise we cannot place the cpu field 1723 * at the 32 bit boundary. 1724 */ 1725 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 1726 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 1727 1728 BUILD_BUG_ON(NR_CPUS > 4096); 1729 1730 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1731 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1732 1733 /* Invalidate both kernel TSBs. */ 1734 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1735 #ifndef CONFIG_DEBUG_PAGEALLOC 1736 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 1737 #endif 1738 1739 if (tlb_type == hypervisor) 1740 sun4v_pgprot_init(); 1741 else 1742 sun4u_pgprot_init(); 1743 1744 if (tlb_type == cheetah_plus || 1745 tlb_type == hypervisor) { 1746 tsb_phys_patch(); 1747 ktsb_phys_patch(); 1748 } 1749 1750 if (tlb_type == hypervisor) { 1751 sun4v_patch_tlb_handlers(); 1752 sun4v_ktsb_init(); 1753 } 1754 1755 /* Find available physical memory... 1756 * 1757 * Read it twice in order to work around a bug in openfirmware. 1758 * The call to grab this table itself can cause openfirmware to 1759 * allocate memory, which in turn can take away some space from 1760 * the list of available memory. Reading it twice makes sure 1761 * we really do get the final value. 1762 */ 1763 read_obp_translations(); 1764 read_obp_memory("reg", &pall[0], &pall_ents); 1765 read_obp_memory("available", &pavail[0], &pavail_ents); 1766 read_obp_memory("available", &pavail[0], &pavail_ents); 1767 1768 phys_base = 0xffffffffffffffffUL; 1769 for (i = 0; i < pavail_ents; i++) { 1770 phys_base = min(phys_base, pavail[i].phys_addr); 1771 memblock_add(pavail[i].phys_addr, pavail[i].reg_size); 1772 } 1773 1774 memblock_reserve(kern_base, kern_size); 1775 1776 find_ramdisk(phys_base); 1777 1778 memblock_enforce_memory_limit(cmdline_memory_size); 1779 1780 memblock_allow_resize(); 1781 memblock_dump_all(); 1782 1783 set_bit(0, mmu_context_bmap); 1784 1785 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1786 1787 real_end = (unsigned long)_end; 1788 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1789 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1790 num_kernel_image_mappings); 1791 1792 /* Set kernel pgd to upper alias so physical page computations 1793 * work. 1794 */ 1795 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1796 1797 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); 1798 1799 /* Now can init the kernel/bad page tables. */ 1800 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1801 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 1802 1803 inherit_prom_mappings(); 1804 1805 init_kpte_bitmap(); 1806 1807 /* Ok, we can use our TLB miss and window trap handlers safely. */ 1808 setup_tba(); 1809 1810 __flush_tlb_all(); 1811 1812 if (tlb_type == hypervisor) 1813 sun4v_ktsb_register(); 1814 1815 prom_build_devicetree(); 1816 of_populate_present_mask(); 1817 #ifndef CONFIG_SMP 1818 of_fill_in_cpu_data(); 1819 #endif 1820 1821 if (tlb_type == hypervisor) { 1822 sun4v_mdesc_init(); 1823 mdesc_populate_present_mask(cpu_all_mask); 1824 #ifndef CONFIG_SMP 1825 mdesc_fill_in_cpu_data(cpu_all_mask); 1826 #endif 1827 } 1828 1829 /* Once the OF device tree and MDESC have been setup, we know 1830 * the list of possible cpus. Therefore we can allocate the 1831 * IRQ stacks. 1832 */ 1833 for_each_possible_cpu(i) { 1834 /* XXX Use node local allocations... XXX */ 1835 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 1836 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 1837 } 1838 1839 /* Setup bootmem... */ 1840 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1841 1842 #ifndef CONFIG_NEED_MULTIPLE_NODES 1843 max_mapnr = last_valid_pfn; 1844 #endif 1845 kernel_physical_mapping_init(); 1846 1847 { 1848 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1849 1850 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1851 1852 max_zone_pfns[ZONE_NORMAL] = end_pfn; 1853 1854 free_area_init_nodes(max_zone_pfns); 1855 } 1856 1857 printk("Booting Linux...\n"); 1858 } 1859 1860 int __devinit page_in_phys_avail(unsigned long paddr) 1861 { 1862 int i; 1863 1864 paddr &= PAGE_MASK; 1865 1866 for (i = 0; i < pavail_ents; i++) { 1867 unsigned long start, end; 1868 1869 start = pavail[i].phys_addr; 1870 end = start + pavail[i].reg_size; 1871 1872 if (paddr >= start && paddr < end) 1873 return 1; 1874 } 1875 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 1876 return 1; 1877 #ifdef CONFIG_BLK_DEV_INITRD 1878 if (paddr >= __pa(initrd_start) && 1879 paddr < __pa(PAGE_ALIGN(initrd_end))) 1880 return 1; 1881 #endif 1882 1883 return 0; 1884 } 1885 1886 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; 1887 static int pavail_rescan_ents __initdata; 1888 1889 /* Certain OBP calls, such as fetching "available" properties, can 1890 * claim physical memory. So, along with initializing the valid 1891 * address bitmap, what we do here is refetch the physical available 1892 * memory list again, and make sure it provides at least as much 1893 * memory as 'pavail' does. 1894 */ 1895 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) 1896 { 1897 int i; 1898 1899 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); 1900 1901 for (i = 0; i < pavail_ents; i++) { 1902 unsigned long old_start, old_end; 1903 1904 old_start = pavail[i].phys_addr; 1905 old_end = old_start + pavail[i].reg_size; 1906 while (old_start < old_end) { 1907 int n; 1908 1909 for (n = 0; n < pavail_rescan_ents; n++) { 1910 unsigned long new_start, new_end; 1911 1912 new_start = pavail_rescan[n].phys_addr; 1913 new_end = new_start + 1914 pavail_rescan[n].reg_size; 1915 1916 if (new_start <= old_start && 1917 new_end >= (old_start + PAGE_SIZE)) { 1918 set_bit(old_start >> 22, bitmap); 1919 goto do_next_page; 1920 } 1921 } 1922 1923 prom_printf("mem_init: Lost memory in pavail\n"); 1924 prom_printf("mem_init: OLD start[%lx] size[%lx]\n", 1925 pavail[i].phys_addr, 1926 pavail[i].reg_size); 1927 prom_printf("mem_init: NEW start[%lx] size[%lx]\n", 1928 pavail_rescan[i].phys_addr, 1929 pavail_rescan[i].reg_size); 1930 prom_printf("mem_init: Cannot continue, aborting.\n"); 1931 prom_halt(); 1932 1933 do_next_page: 1934 old_start += PAGE_SIZE; 1935 } 1936 } 1937 } 1938 1939 static void __init patch_tlb_miss_handler_bitmap(void) 1940 { 1941 extern unsigned int valid_addr_bitmap_insn[]; 1942 extern unsigned int valid_addr_bitmap_patch[]; 1943 1944 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; 1945 mb(); 1946 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; 1947 flushi(&valid_addr_bitmap_insn[0]); 1948 } 1949 1950 void __init mem_init(void) 1951 { 1952 unsigned long codepages, datapages, initpages; 1953 unsigned long addr, last; 1954 1955 addr = PAGE_OFFSET + kern_base; 1956 last = PAGE_ALIGN(kern_size) + addr; 1957 while (addr < last) { 1958 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 1959 addr += PAGE_SIZE; 1960 } 1961 1962 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); 1963 patch_tlb_miss_handler_bitmap(); 1964 1965 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1966 1967 #ifdef CONFIG_NEED_MULTIPLE_NODES 1968 { 1969 int i; 1970 for_each_online_node(i) { 1971 if (NODE_DATA(i)->node_spanned_pages != 0) { 1972 totalram_pages += 1973 free_all_bootmem_node(NODE_DATA(i)); 1974 } 1975 } 1976 } 1977 #else 1978 totalram_pages = free_all_bootmem(); 1979 #endif 1980 1981 /* We subtract one to account for the mem_map_zero page 1982 * allocated below. 1983 */ 1984 totalram_pages -= 1; 1985 num_physpages = totalram_pages; 1986 1987 /* 1988 * Set up the zero page, mark it reserved, so that page count 1989 * is not manipulated when freeing the page from user ptes. 1990 */ 1991 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 1992 if (mem_map_zero == NULL) { 1993 prom_printf("paging_init: Cannot alloc zero page.\n"); 1994 prom_halt(); 1995 } 1996 SetPageReserved(mem_map_zero); 1997 1998 codepages = (((unsigned long) _etext) - ((unsigned long) _start)); 1999 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 2000 datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); 2001 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; 2002 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); 2003 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; 2004 2005 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", 2006 nr_free_pages() << (PAGE_SHIFT-10), 2007 codepages << (PAGE_SHIFT-10), 2008 datapages << (PAGE_SHIFT-10), 2009 initpages << (PAGE_SHIFT-10), 2010 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); 2011 2012 if (tlb_type == cheetah || tlb_type == cheetah_plus) 2013 cheetah_ecache_flush_init(); 2014 } 2015 2016 void free_initmem(void) 2017 { 2018 unsigned long addr, initend; 2019 int do_free = 1; 2020 2021 /* If the physical memory maps were trimmed by kernel command 2022 * line options, don't even try freeing this initmem stuff up. 2023 * The kernel image could have been in the trimmed out region 2024 * and if so the freeing below will free invalid page structs. 2025 */ 2026 if (cmdline_memory_size) 2027 do_free = 0; 2028 2029 /* 2030 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 2031 */ 2032 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 2033 initend = (unsigned long)(__init_end) & PAGE_MASK; 2034 for (; addr < initend; addr += PAGE_SIZE) { 2035 unsigned long page; 2036 struct page *p; 2037 2038 page = (addr + 2039 ((unsigned long) __va(kern_base)) - 2040 ((unsigned long) KERNBASE)); 2041 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 2042 2043 if (do_free) { 2044 p = virt_to_page(page); 2045 2046 ClearPageReserved(p); 2047 init_page_count(p); 2048 __free_page(p); 2049 num_physpages++; 2050 totalram_pages++; 2051 } 2052 } 2053 } 2054 2055 #ifdef CONFIG_BLK_DEV_INITRD 2056 void free_initrd_mem(unsigned long start, unsigned long end) 2057 { 2058 if (start < end) 2059 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 2060 for (; start < end; start += PAGE_SIZE) { 2061 struct page *p = virt_to_page(start); 2062 2063 ClearPageReserved(p); 2064 init_page_count(p); 2065 __free_page(p); 2066 num_physpages++; 2067 totalram_pages++; 2068 } 2069 } 2070 #endif 2071 2072 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2073 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2074 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2075 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2076 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2077 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2078 2079 pgprot_t PAGE_KERNEL __read_mostly; 2080 EXPORT_SYMBOL(PAGE_KERNEL); 2081 2082 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2083 pgprot_t PAGE_COPY __read_mostly; 2084 2085 pgprot_t PAGE_SHARED __read_mostly; 2086 EXPORT_SYMBOL(PAGE_SHARED); 2087 2088 unsigned long pg_iobits __read_mostly; 2089 2090 unsigned long _PAGE_IE __read_mostly; 2091 EXPORT_SYMBOL(_PAGE_IE); 2092 2093 unsigned long _PAGE_E __read_mostly; 2094 EXPORT_SYMBOL(_PAGE_E); 2095 2096 unsigned long _PAGE_CACHE __read_mostly; 2097 EXPORT_SYMBOL(_PAGE_CACHE); 2098 2099 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2100 unsigned long vmemmap_table[VMEMMAP_SIZE]; 2101 2102 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2103 { 2104 unsigned long vstart = (unsigned long) start; 2105 unsigned long vend = (unsigned long) (start + nr); 2106 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2107 unsigned long phys_end = (vend - VMEMMAP_BASE); 2108 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2109 unsigned long end = VMEMMAP_ALIGN(phys_end); 2110 unsigned long pte_base; 2111 2112 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2113 _PAGE_CP_4U | _PAGE_CV_4U | 2114 _PAGE_P_4U | _PAGE_W_4U); 2115 if (tlb_type == hypervisor) 2116 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2117 _PAGE_CP_4V | _PAGE_CV_4V | 2118 _PAGE_P_4V | _PAGE_W_4V); 2119 2120 for (; addr < end; addr += VMEMMAP_CHUNK) { 2121 unsigned long *vmem_pp = 2122 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); 2123 void *block; 2124 2125 if (!(*vmem_pp & _PAGE_VALID)) { 2126 block = vmemmap_alloc_block(1UL << 22, node); 2127 if (!block) 2128 return -ENOMEM; 2129 2130 *vmem_pp = pte_base | __pa(block); 2131 2132 printk(KERN_INFO "[%p-%p] page_structs=%lu " 2133 "node=%d entry=%lu/%lu\n", start, block, nr, 2134 node, 2135 addr >> VMEMMAP_CHUNK_SHIFT, 2136 VMEMMAP_SIZE); 2137 } 2138 } 2139 return 0; 2140 } 2141 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2142 2143 static void prot_init_common(unsigned long page_none, 2144 unsigned long page_shared, 2145 unsigned long page_copy, 2146 unsigned long page_readonly, 2147 unsigned long page_exec_bit) 2148 { 2149 PAGE_COPY = __pgprot(page_copy); 2150 PAGE_SHARED = __pgprot(page_shared); 2151 2152 protection_map[0x0] = __pgprot(page_none); 2153 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2154 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2155 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2156 protection_map[0x4] = __pgprot(page_readonly); 2157 protection_map[0x5] = __pgprot(page_readonly); 2158 protection_map[0x6] = __pgprot(page_copy); 2159 protection_map[0x7] = __pgprot(page_copy); 2160 protection_map[0x8] = __pgprot(page_none); 2161 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2162 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2163 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2164 protection_map[0xc] = __pgprot(page_readonly); 2165 protection_map[0xd] = __pgprot(page_readonly); 2166 protection_map[0xe] = __pgprot(page_shared); 2167 protection_map[0xf] = __pgprot(page_shared); 2168 } 2169 2170 static void __init sun4u_pgprot_init(void) 2171 { 2172 unsigned long page_none, page_shared, page_copy, page_readonly; 2173 unsigned long page_exec_bit; 2174 2175 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2176 _PAGE_CACHE_4U | _PAGE_P_4U | 2177 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2178 _PAGE_EXEC_4U); 2179 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2180 _PAGE_CACHE_4U | _PAGE_P_4U | 2181 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2182 _PAGE_EXEC_4U | _PAGE_L_4U); 2183 2184 _PAGE_IE = _PAGE_IE_4U; 2185 _PAGE_E = _PAGE_E_4U; 2186 _PAGE_CACHE = _PAGE_CACHE_4U; 2187 2188 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2189 __ACCESS_BITS_4U | _PAGE_E_4U); 2190 2191 #ifdef CONFIG_DEBUG_PAGEALLOC 2192 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ 2193 0xfffff80000000000UL; 2194 #else 2195 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2196 0xfffff80000000000UL; 2197 #endif 2198 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2199 _PAGE_P_4U | _PAGE_W_4U); 2200 2201 /* XXX Should use 256MB on Panther. XXX */ 2202 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 2203 2204 _PAGE_SZBITS = _PAGE_SZBITS_4U; 2205 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2206 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2207 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2208 2209 2210 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2211 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2212 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2213 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2214 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2215 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2216 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2217 2218 page_exec_bit = _PAGE_EXEC_4U; 2219 2220 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2221 page_exec_bit); 2222 } 2223 2224 static void __init sun4v_pgprot_init(void) 2225 { 2226 unsigned long page_none, page_shared, page_copy, page_readonly; 2227 unsigned long page_exec_bit; 2228 2229 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2230 _PAGE_CACHE_4V | _PAGE_P_4V | 2231 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2232 _PAGE_EXEC_4V); 2233 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2234 2235 _PAGE_IE = _PAGE_IE_4V; 2236 _PAGE_E = _PAGE_E_4V; 2237 _PAGE_CACHE = _PAGE_CACHE_4V; 2238 2239 #ifdef CONFIG_DEBUG_PAGEALLOC 2240 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2241 0xfffff80000000000UL; 2242 #else 2243 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2244 0xfffff80000000000UL; 2245 #endif 2246 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2247 _PAGE_P_4V | _PAGE_W_4V); 2248 2249 #ifdef CONFIG_DEBUG_PAGEALLOC 2250 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2251 0xfffff80000000000UL; 2252 #else 2253 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 2254 0xfffff80000000000UL; 2255 #endif 2256 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2257 _PAGE_P_4V | _PAGE_W_4V); 2258 2259 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2260 __ACCESS_BITS_4V | _PAGE_E_4V); 2261 2262 _PAGE_SZBITS = _PAGE_SZBITS_4V; 2263 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2264 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2265 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2266 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2267 2268 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2269 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2270 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2271 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2272 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2273 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2274 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2275 2276 page_exec_bit = _PAGE_EXEC_4V; 2277 2278 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2279 page_exec_bit); 2280 } 2281 2282 unsigned long pte_sz_bits(unsigned long sz) 2283 { 2284 if (tlb_type == hypervisor) { 2285 switch (sz) { 2286 case 8 * 1024: 2287 default: 2288 return _PAGE_SZ8K_4V; 2289 case 64 * 1024: 2290 return _PAGE_SZ64K_4V; 2291 case 512 * 1024: 2292 return _PAGE_SZ512K_4V; 2293 case 4 * 1024 * 1024: 2294 return _PAGE_SZ4MB_4V; 2295 } 2296 } else { 2297 switch (sz) { 2298 case 8 * 1024: 2299 default: 2300 return _PAGE_SZ8K_4U; 2301 case 64 * 1024: 2302 return _PAGE_SZ64K_4U; 2303 case 512 * 1024: 2304 return _PAGE_SZ512K_4U; 2305 case 4 * 1024 * 1024: 2306 return _PAGE_SZ4MB_4U; 2307 } 2308 } 2309 } 2310 2311 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2312 { 2313 pte_t pte; 2314 2315 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2316 pte_val(pte) |= (((unsigned long)space) << 32); 2317 pte_val(pte) |= pte_sz_bits(page_size); 2318 2319 return pte; 2320 } 2321 2322 static unsigned long kern_large_tte(unsigned long paddr) 2323 { 2324 unsigned long val; 2325 2326 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2327 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2328 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2329 if (tlb_type == hypervisor) 2330 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2331 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2332 _PAGE_EXEC_4V | _PAGE_W_4V); 2333 2334 return val | paddr; 2335 } 2336 2337 /* If not locked, zap it. */ 2338 void __flush_tlb_all(void) 2339 { 2340 unsigned long pstate; 2341 int i; 2342 2343 __asm__ __volatile__("flushw\n\t" 2344 "rdpr %%pstate, %0\n\t" 2345 "wrpr %0, %1, %%pstate" 2346 : "=r" (pstate) 2347 : "i" (PSTATE_IE)); 2348 if (tlb_type == hypervisor) { 2349 sun4v_mmu_demap_all(); 2350 } else if (tlb_type == spitfire) { 2351 for (i = 0; i < 64; i++) { 2352 /* Spitfire Errata #32 workaround */ 2353 /* NOTE: Always runs on spitfire, so no 2354 * cheetah+ page size encodings. 2355 */ 2356 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2357 "flush %%g6" 2358 : /* No outputs */ 2359 : "r" (0), 2360 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2361 2362 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2363 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2364 "membar #Sync" 2365 : /* no outputs */ 2366 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2367 spitfire_put_dtlb_data(i, 0x0UL); 2368 } 2369 2370 /* Spitfire Errata #32 workaround */ 2371 /* NOTE: Always runs on spitfire, so no 2372 * cheetah+ page size encodings. 2373 */ 2374 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2375 "flush %%g6" 2376 : /* No outputs */ 2377 : "r" (0), 2378 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2379 2380 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2381 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2382 "membar #Sync" 2383 : /* no outputs */ 2384 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2385 spitfire_put_itlb_data(i, 0x0UL); 2386 } 2387 } 2388 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2389 cheetah_flush_dtlb_all(); 2390 cheetah_flush_itlb_all(); 2391 } 2392 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2393 : : "r" (pstate)); 2394 } 2395