1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/bug.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/errno.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <linux/pagemap.h> 21 #include <linux/ptrace.h> 22 #include <linux/mman.h> 23 #include <linux/mm.h> 24 #include <linux/bootmem.h> 25 #include <linux/highmem.h> 26 #include <linux/swap.h> 27 #include <linux/proc_fs.h> 28 #include <linux/pfn.h> 29 30 #include <asm/asm-offsets.h> 31 #include <asm/bootinfo.h> 32 #include <asm/cachectl.h> 33 #include <asm/cpu.h> 34 #include <asm/dma.h> 35 #include <asm/kmap_types.h> 36 #include <asm/mmu_context.h> 37 #include <asm/sections.h> 38 #include <asm/pgtable.h> 39 #include <asm/pgalloc.h> 40 #include <asm/tlb.h> 41 #include <asm/fixmap.h> 42 43 /* Atomicity and interruptability */ 44 #ifdef CONFIG_MIPS_MT_SMTC 45 46 #include <asm/mipsmtregs.h> 47 48 #define ENTER_CRITICAL(flags) \ 49 { \ 50 unsigned int mvpflags; \ 51 local_irq_save(flags);\ 52 mvpflags = dvpe() 53 #define EXIT_CRITICAL(flags) \ 54 evpe(mvpflags); \ 55 local_irq_restore(flags); \ 56 } 57 #else 58 59 #define ENTER_CRITICAL(flags) local_irq_save(flags) 60 #define EXIT_CRITICAL(flags) local_irq_restore(flags) 61 62 #endif /* CONFIG_MIPS_MT_SMTC */ 63 64 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 65 66 /* 67 * We have up to 8 empty zeroed pages so we can map one of the right colour 68 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 69 * where we have to avoid VCED / VECI exceptions for good performance at 70 * any price. Since page is never written to after the initialization we 71 * don't have to care about aliases on other CPUs. 72 */ 73 unsigned long empty_zero_page, zero_page_mask; 74 EXPORT_SYMBOL_GPL(empty_zero_page); 75 76 /* 77 * Not static inline because used by IP27 special magic initialization code 78 */ 79 unsigned long setup_zero_pages(void) 80 { 81 unsigned int order; 82 unsigned long size; 83 struct page *page; 84 85 if (cpu_has_vce) 86 order = 3; 87 else 88 order = 0; 89 90 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 91 if (!empty_zero_page) 92 panic("Oh boy, that early out of memory?"); 93 94 page = virt_to_page((void *)empty_zero_page); 95 split_page(page, order); 96 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { 97 SetPageReserved(page); 98 page++; 99 } 100 101 size = PAGE_SIZE << order; 102 zero_page_mask = (size - 1) & PAGE_MASK; 103 104 return 1UL << order; 105 } 106 107 /* 108 * These are almost like kmap_atomic / kunmap_atmic except they take an 109 * additional address argument as the hint. 110 */ 111 112 #define kmap_get_fixmap_pte(vaddr) \ 113 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 114 115 #ifdef CONFIG_MIPS_MT_SMTC 116 static pte_t *kmap_coherent_pte; 117 static void __init kmap_coherent_init(void) 118 { 119 unsigned long vaddr; 120 121 /* cache the first coherent kmap pte */ 122 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 123 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 124 } 125 #else 126 static inline void kmap_coherent_init(void) {} 127 #endif 128 129 void *kmap_coherent(struct page *page, unsigned long addr) 130 { 131 enum fixed_addresses idx; 132 unsigned long vaddr, flags, entrylo; 133 unsigned long old_ctx; 134 pte_t pte; 135 int tlbidx; 136 137 BUG_ON(Page_dcache_dirty(page)); 138 139 inc_preempt_count(); 140 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 141 #ifdef CONFIG_MIPS_MT_SMTC 142 idx += FIX_N_COLOURS * smp_processor_id(); 143 #endif 144 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 145 pte = mk_pte(page, PAGE_KERNEL); 146 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 147 entrylo = pte.pte_high; 148 #else 149 entrylo = pte_val(pte) >> 6; 150 #endif 151 152 ENTER_CRITICAL(flags); 153 old_ctx = read_c0_entryhi(); 154 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 155 write_c0_entrylo0(entrylo); 156 write_c0_entrylo1(entrylo); 157 #ifdef CONFIG_MIPS_MT_SMTC 158 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); 159 /* preload TLB instead of local_flush_tlb_one() */ 160 mtc0_tlbw_hazard(); 161 tlb_probe(); 162 tlb_probe_hazard(); 163 tlbidx = read_c0_index(); 164 mtc0_tlbw_hazard(); 165 if (tlbidx < 0) 166 tlb_write_random(); 167 else 168 tlb_write_indexed(); 169 #else 170 tlbidx = read_c0_wired(); 171 write_c0_wired(tlbidx + 1); 172 write_c0_index(tlbidx); 173 mtc0_tlbw_hazard(); 174 tlb_write_indexed(); 175 #endif 176 tlbw_use_hazard(); 177 write_c0_entryhi(old_ctx); 178 EXIT_CRITICAL(flags); 179 180 return (void*) vaddr; 181 } 182 183 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) 184 185 void kunmap_coherent(void) 186 { 187 #ifndef CONFIG_MIPS_MT_SMTC 188 unsigned int wired; 189 unsigned long flags, old_ctx; 190 191 ENTER_CRITICAL(flags); 192 old_ctx = read_c0_entryhi(); 193 wired = read_c0_wired() - 1; 194 write_c0_wired(wired); 195 write_c0_index(wired); 196 write_c0_entryhi(UNIQUE_ENTRYHI(wired)); 197 write_c0_entrylo0(0); 198 write_c0_entrylo1(0); 199 mtc0_tlbw_hazard(); 200 tlb_write_indexed(); 201 tlbw_use_hazard(); 202 write_c0_entryhi(old_ctx); 203 EXIT_CRITICAL(flags); 204 #endif 205 dec_preempt_count(); 206 preempt_check_resched(); 207 } 208 209 void copy_user_highpage(struct page *to, struct page *from, 210 unsigned long vaddr, struct vm_area_struct *vma) 211 { 212 void *vfrom, *vto; 213 214 vto = kmap_atomic(to, KM_USER1); 215 if (cpu_has_dc_aliases && 216 page_mapped(from) && !Page_dcache_dirty(from)) { 217 vfrom = kmap_coherent(from, vaddr); 218 copy_page(vto, vfrom); 219 kunmap_coherent(); 220 } else { 221 vfrom = kmap_atomic(from, KM_USER0); 222 copy_page(vto, vfrom); 223 kunmap_atomic(vfrom, KM_USER0); 224 } 225 if ((!cpu_has_ic_fills_f_dc) || 226 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 227 flush_data_cache_page((unsigned long)vto); 228 kunmap_atomic(vto, KM_USER1); 229 /* Make sure this page is cleared on other CPU's too before using it */ 230 smp_wmb(); 231 } 232 233 void copy_to_user_page(struct vm_area_struct *vma, 234 struct page *page, unsigned long vaddr, void *dst, const void *src, 235 unsigned long len) 236 { 237 if (cpu_has_dc_aliases && 238 page_mapped(page) && !Page_dcache_dirty(page)) { 239 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 240 memcpy(vto, src, len); 241 kunmap_coherent(); 242 } else { 243 memcpy(dst, src, len); 244 if (cpu_has_dc_aliases) 245 SetPageDcacheDirty(page); 246 } 247 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) 248 flush_cache_page(vma, vaddr, page_to_pfn(page)); 249 } 250 251 void copy_from_user_page(struct vm_area_struct *vma, 252 struct page *page, unsigned long vaddr, void *dst, const void *src, 253 unsigned long len) 254 { 255 if (cpu_has_dc_aliases && 256 page_mapped(page) && !Page_dcache_dirty(page)) { 257 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 258 memcpy(dst, vfrom, len); 259 kunmap_coherent(); 260 } else { 261 memcpy(dst, src, len); 262 if (cpu_has_dc_aliases) 263 SetPageDcacheDirty(page); 264 } 265 } 266 267 #ifdef CONFIG_HIGHMEM 268 unsigned long highstart_pfn, highend_pfn; 269 270 pte_t *kmap_pte; 271 pgprot_t kmap_prot; 272 273 static void __init kmap_init(void) 274 { 275 unsigned long kmap_vstart; 276 277 /* cache the first kmap pte */ 278 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 279 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 280 281 kmap_prot = PAGE_KERNEL; 282 } 283 #endif /* CONFIG_HIGHMEM */ 284 285 void __init fixrange_init(unsigned long start, unsigned long end, 286 pgd_t *pgd_base) 287 { 288 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 289 pgd_t *pgd; 290 pud_t *pud; 291 pmd_t *pmd; 292 pte_t *pte; 293 int i, j, k; 294 unsigned long vaddr; 295 296 vaddr = start; 297 i = __pgd_offset(vaddr); 298 j = __pud_offset(vaddr); 299 k = __pmd_offset(vaddr); 300 pgd = pgd_base + i; 301 302 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 303 pud = (pud_t *)pgd; 304 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 305 pmd = (pmd_t *)pud; 306 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 307 if (pmd_none(*pmd)) { 308 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 309 set_pmd(pmd, __pmd((unsigned long)pte)); 310 if (pte != pte_offset_kernel(pmd, 0)) 311 BUG(); 312 } 313 vaddr += PMD_SIZE; 314 } 315 k = 0; 316 } 317 j = 0; 318 } 319 #endif 320 } 321 322 #ifndef CONFIG_NEED_MULTIPLE_NODES 323 static int __init page_is_ram(unsigned long pagenr) 324 { 325 int i; 326 327 for (i = 0; i < boot_mem_map.nr_map; i++) { 328 unsigned long addr, end; 329 330 if (boot_mem_map.map[i].type != BOOT_MEM_RAM) 331 /* not usable memory */ 332 continue; 333 334 addr = PFN_UP(boot_mem_map.map[i].addr); 335 end = PFN_DOWN(boot_mem_map.map[i].addr + 336 boot_mem_map.map[i].size); 337 338 if (pagenr >= addr && pagenr < end) 339 return 1; 340 } 341 342 return 0; 343 } 344 345 void __init paging_init(void) 346 { 347 unsigned long max_zone_pfns[MAX_NR_ZONES]; 348 unsigned long lastpfn; 349 350 pagetable_init(); 351 352 #ifdef CONFIG_HIGHMEM 353 kmap_init(); 354 #endif 355 kmap_coherent_init(); 356 357 #ifdef CONFIG_ZONE_DMA 358 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 359 #endif 360 #ifdef CONFIG_ZONE_DMA32 361 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 362 #endif 363 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 364 lastpfn = max_low_pfn; 365 #ifdef CONFIG_HIGHMEM 366 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; 367 lastpfn = highend_pfn; 368 369 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { 370 printk(KERN_WARNING "This processor doesn't support highmem." 371 " %ldk highmem ignored\n", 372 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); 373 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; 374 lastpfn = max_low_pfn; 375 } 376 #endif 377 378 free_area_init_nodes(max_zone_pfns); 379 } 380 381 static struct kcore_list kcore_mem, kcore_vmalloc; 382 #ifdef CONFIG_64BIT 383 static struct kcore_list kcore_kseg0; 384 #endif 385 386 void __init mem_init(void) 387 { 388 unsigned long codesize, reservedpages, datasize, initsize; 389 unsigned long tmp, ram; 390 391 #ifdef CONFIG_HIGHMEM 392 #ifdef CONFIG_DISCONTIGMEM 393 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" 394 #endif 395 max_mapnr = highend_pfn; 396 #else 397 max_mapnr = max_low_pfn; 398 #endif 399 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 400 401 totalram_pages += free_all_bootmem(); 402 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 403 404 reservedpages = ram = 0; 405 for (tmp = 0; tmp < max_low_pfn; tmp++) 406 if (page_is_ram(tmp)) { 407 ram++; 408 if (PageReserved(pfn_to_page(tmp))) 409 reservedpages++; 410 } 411 num_physpages = ram; 412 413 #ifdef CONFIG_HIGHMEM 414 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 415 struct page *page = pfn_to_page(tmp); 416 417 if (!page_is_ram(tmp)) { 418 SetPageReserved(page); 419 continue; 420 } 421 ClearPageReserved(page); 422 init_page_count(page); 423 __free_page(page); 424 totalhigh_pages++; 425 } 426 totalram_pages += totalhigh_pages; 427 num_physpages += totalhigh_pages; 428 #endif 429 430 codesize = (unsigned long) &_etext - (unsigned long) &_text; 431 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 432 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 433 434 #ifdef CONFIG_64BIT 435 if ((unsigned long) &_text > (unsigned long) CKSEG0) 436 /* The -4 is a hack so that user tools don't have to handle 437 the overflow. */ 438 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); 439 #endif 440 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 441 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 442 VMALLOC_END-VMALLOC_START); 443 444 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 445 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", 446 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 447 ram << (PAGE_SHIFT-10), 448 codesize >> 10, 449 reservedpages << (PAGE_SHIFT-10), 450 datasize >> 10, 451 initsize >> 10, 452 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); 453 } 454 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 455 456 void free_init_pages(const char *what, unsigned long begin, unsigned long end) 457 { 458 unsigned long pfn; 459 460 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { 461 struct page *page = pfn_to_page(pfn); 462 void *addr = phys_to_virt(PFN_PHYS(pfn)); 463 464 ClearPageReserved(page); 465 init_page_count(page); 466 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 467 __free_page(page); 468 totalram_pages++; 469 } 470 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 471 } 472 473 #ifdef CONFIG_BLK_DEV_INITRD 474 void free_initrd_mem(unsigned long start, unsigned long end) 475 { 476 free_init_pages("initrd memory", 477 virt_to_phys((void *)start), 478 virt_to_phys((void *)end)); 479 } 480 #endif 481 482 void __init_refok free_initmem(void) 483 { 484 prom_free_prom_memory(); 485 free_init_pages("unused kernel memory", 486 __pa_symbol(&__init_begin), 487 __pa_symbol(&__init_end)); 488 } 489 490 unsigned long pgd_current[NR_CPUS]; 491 /* 492 * On 64-bit we've got three-level pagetables with a slightly 493 * different layout ... 494 */ 495 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) 496 497 /* 498 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER 499 * are constants. So we use the variants from asm-offset.h until that gcc 500 * will officially be retired. 501 */ 502 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); 503 #ifdef CONFIG_64BIT 504 #ifdef MODULE_START 505 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); 506 #endif 507 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); 508 #endif 509 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); 510