1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/bug.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/smp.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/pagemap.h> 22 #include <linux/ptrace.h> 23 #include <linux/mman.h> 24 #include <linux/mm.h> 25 #include <linux/bootmem.h> 26 #include <linux/highmem.h> 27 #include <linux/swap.h> 28 #include <linux/proc_fs.h> 29 #include <linux/pfn.h> 30 31 #include <asm/asm-offsets.h> 32 #include <asm/bootinfo.h> 33 #include <asm/cachectl.h> 34 #include <asm/cpu.h> 35 #include <asm/dma.h> 36 #include <asm/kmap_types.h> 37 #include <asm/mmu_context.h> 38 #include <asm/sections.h> 39 #include <asm/pgtable.h> 40 #include <asm/pgalloc.h> 41 #include <asm/tlb.h> 42 #include <asm/fixmap.h> 43 44 /* Atomicity and interruptability */ 45 #ifdef CONFIG_MIPS_MT_SMTC 46 47 #include <asm/mipsmtregs.h> 48 49 #define ENTER_CRITICAL(flags) \ 50 { \ 51 unsigned int mvpflags; \ 52 local_irq_save(flags);\ 53 mvpflags = dvpe() 54 #define EXIT_CRITICAL(flags) \ 55 evpe(mvpflags); \ 56 local_irq_restore(flags); \ 57 } 58 #else 59 60 #define ENTER_CRITICAL(flags) local_irq_save(flags) 61 #define EXIT_CRITICAL(flags) local_irq_restore(flags) 62 63 #endif /* CONFIG_MIPS_MT_SMTC */ 64 65 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 66 67 /* 68 * We have up to 8 empty zeroed pages so we can map one of the right colour 69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 70 * where we have to avoid VCED / VECI exceptions for good performance at 71 * any price. Since page is never written to after the initialization we 72 * don't have to care about aliases on other CPUs. 73 */ 74 unsigned long empty_zero_page, zero_page_mask; 75 EXPORT_SYMBOL_GPL(empty_zero_page); 76 77 /* 78 * Not static inline because used by IP27 special magic initialization code 79 */ 80 unsigned long setup_zero_pages(void) 81 { 82 unsigned int order; 83 unsigned long size; 84 struct page *page; 85 86 if (cpu_has_vce) 87 order = 3; 88 else 89 order = 0; 90 91 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 92 if (!empty_zero_page) 93 panic("Oh boy, that early out of memory?"); 94 95 page = virt_to_page((void *)empty_zero_page); 96 split_page(page, order); 97 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { 98 SetPageReserved(page); 99 page++; 100 } 101 102 size = PAGE_SIZE << order; 103 zero_page_mask = (size - 1) & PAGE_MASK; 104 105 return 1UL << order; 106 } 107 108 #ifdef CONFIG_MIPS_MT_SMTC 109 static pte_t *kmap_coherent_pte; 110 static void __init kmap_coherent_init(void) 111 { 112 unsigned long vaddr; 113 114 /* cache the first coherent kmap pte */ 115 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 116 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 117 } 118 #else 119 static inline void kmap_coherent_init(void) {} 120 #endif 121 122 void *kmap_coherent(struct page *page, unsigned long addr) 123 { 124 enum fixed_addresses idx; 125 unsigned long vaddr, flags, entrylo; 126 unsigned long old_ctx; 127 pte_t pte; 128 int tlbidx; 129 130 BUG_ON(Page_dcache_dirty(page)); 131 132 inc_preempt_count(); 133 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 134 #ifdef CONFIG_MIPS_MT_SMTC 135 idx += FIX_N_COLOURS * smp_processor_id(); 136 #endif 137 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 138 pte = mk_pte(page, PAGE_KERNEL); 139 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 140 entrylo = pte.pte_high; 141 #else 142 entrylo = pte_val(pte) >> 6; 143 #endif 144 145 ENTER_CRITICAL(flags); 146 old_ctx = read_c0_entryhi(); 147 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 148 write_c0_entrylo0(entrylo); 149 write_c0_entrylo1(entrylo); 150 #ifdef CONFIG_MIPS_MT_SMTC 151 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); 152 /* preload TLB instead of local_flush_tlb_one() */ 153 mtc0_tlbw_hazard(); 154 tlb_probe(); 155 tlb_probe_hazard(); 156 tlbidx = read_c0_index(); 157 mtc0_tlbw_hazard(); 158 if (tlbidx < 0) 159 tlb_write_random(); 160 else 161 tlb_write_indexed(); 162 #else 163 tlbidx = read_c0_wired(); 164 write_c0_wired(tlbidx + 1); 165 write_c0_index(tlbidx); 166 mtc0_tlbw_hazard(); 167 tlb_write_indexed(); 168 #endif 169 tlbw_use_hazard(); 170 write_c0_entryhi(old_ctx); 171 EXIT_CRITICAL(flags); 172 173 return (void*) vaddr; 174 } 175 176 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) 177 178 void kunmap_coherent(void) 179 { 180 #ifndef CONFIG_MIPS_MT_SMTC 181 unsigned int wired; 182 unsigned long flags, old_ctx; 183 184 ENTER_CRITICAL(flags); 185 old_ctx = read_c0_entryhi(); 186 wired = read_c0_wired() - 1; 187 write_c0_wired(wired); 188 write_c0_index(wired); 189 write_c0_entryhi(UNIQUE_ENTRYHI(wired)); 190 write_c0_entrylo0(0); 191 write_c0_entrylo1(0); 192 mtc0_tlbw_hazard(); 193 tlb_write_indexed(); 194 tlbw_use_hazard(); 195 write_c0_entryhi(old_ctx); 196 EXIT_CRITICAL(flags); 197 #endif 198 dec_preempt_count(); 199 preempt_check_resched(); 200 } 201 202 void copy_user_highpage(struct page *to, struct page *from, 203 unsigned long vaddr, struct vm_area_struct *vma) 204 { 205 void *vfrom, *vto; 206 207 vto = kmap_atomic(to, KM_USER1); 208 if (cpu_has_dc_aliases && 209 page_mapped(from) && !Page_dcache_dirty(from)) { 210 vfrom = kmap_coherent(from, vaddr); 211 copy_page(vto, vfrom); 212 kunmap_coherent(); 213 } else { 214 vfrom = kmap_atomic(from, KM_USER0); 215 copy_page(vto, vfrom); 216 kunmap_atomic(vfrom, KM_USER0); 217 } 218 if ((!cpu_has_ic_fills_f_dc) || 219 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 220 flush_data_cache_page((unsigned long)vto); 221 kunmap_atomic(vto, KM_USER1); 222 /* Make sure this page is cleared on other CPU's too before using it */ 223 smp_wmb(); 224 } 225 226 void copy_to_user_page(struct vm_area_struct *vma, 227 struct page *page, unsigned long vaddr, void *dst, const void *src, 228 unsigned long len) 229 { 230 if (cpu_has_dc_aliases && 231 page_mapped(page) && !Page_dcache_dirty(page)) { 232 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 233 memcpy(vto, src, len); 234 kunmap_coherent(); 235 } else { 236 memcpy(dst, src, len); 237 if (cpu_has_dc_aliases) 238 SetPageDcacheDirty(page); 239 } 240 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) 241 flush_cache_page(vma, vaddr, page_to_pfn(page)); 242 } 243 244 void copy_from_user_page(struct vm_area_struct *vma, 245 struct page *page, unsigned long vaddr, void *dst, const void *src, 246 unsigned long len) 247 { 248 if (cpu_has_dc_aliases && 249 page_mapped(page) && !Page_dcache_dirty(page)) { 250 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 251 memcpy(dst, vfrom, len); 252 kunmap_coherent(); 253 } else { 254 memcpy(dst, src, len); 255 if (cpu_has_dc_aliases) 256 SetPageDcacheDirty(page); 257 } 258 } 259 260 void __init fixrange_init(unsigned long start, unsigned long end, 261 pgd_t *pgd_base) 262 { 263 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 264 pgd_t *pgd; 265 pud_t *pud; 266 pmd_t *pmd; 267 pte_t *pte; 268 int i, j, k; 269 unsigned long vaddr; 270 271 vaddr = start; 272 i = __pgd_offset(vaddr); 273 j = __pud_offset(vaddr); 274 k = __pmd_offset(vaddr); 275 pgd = pgd_base + i; 276 277 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 278 pud = (pud_t *)pgd; 279 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 280 pmd = (pmd_t *)pud; 281 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 282 if (pmd_none(*pmd)) { 283 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 284 set_pmd(pmd, __pmd((unsigned long)pte)); 285 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 286 } 287 vaddr += PMD_SIZE; 288 } 289 k = 0; 290 } 291 j = 0; 292 } 293 #endif 294 } 295 296 #ifndef CONFIG_NEED_MULTIPLE_NODES 297 static int __init page_is_ram(unsigned long pagenr) 298 { 299 int i; 300 301 for (i = 0; i < boot_mem_map.nr_map; i++) { 302 unsigned long addr, end; 303 304 if (boot_mem_map.map[i].type != BOOT_MEM_RAM) 305 /* not usable memory */ 306 continue; 307 308 addr = PFN_UP(boot_mem_map.map[i].addr); 309 end = PFN_DOWN(boot_mem_map.map[i].addr + 310 boot_mem_map.map[i].size); 311 312 if (pagenr >= addr && pagenr < end) 313 return 1; 314 } 315 316 return 0; 317 } 318 319 void __init paging_init(void) 320 { 321 unsigned long max_zone_pfns[MAX_NR_ZONES]; 322 unsigned long lastpfn; 323 324 pagetable_init(); 325 326 #ifdef CONFIG_HIGHMEM 327 kmap_init(); 328 #endif 329 kmap_coherent_init(); 330 331 #ifdef CONFIG_ZONE_DMA 332 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 333 #endif 334 #ifdef CONFIG_ZONE_DMA32 335 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 336 #endif 337 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 338 lastpfn = max_low_pfn; 339 #ifdef CONFIG_HIGHMEM 340 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; 341 lastpfn = highend_pfn; 342 343 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { 344 printk(KERN_WARNING "This processor doesn't support highmem." 345 " %ldk highmem ignored\n", 346 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); 347 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; 348 lastpfn = max_low_pfn; 349 } 350 #endif 351 352 free_area_init_nodes(max_zone_pfns); 353 } 354 355 static struct kcore_list kcore_mem, kcore_vmalloc; 356 #ifdef CONFIG_64BIT 357 static struct kcore_list kcore_kseg0; 358 #endif 359 360 void __init mem_init(void) 361 { 362 unsigned long codesize, reservedpages, datasize, initsize; 363 unsigned long tmp, ram; 364 365 #ifdef CONFIG_HIGHMEM 366 #ifdef CONFIG_DISCONTIGMEM 367 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" 368 #endif 369 max_mapnr = highend_pfn; 370 #else 371 max_mapnr = max_low_pfn; 372 #endif 373 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 374 375 totalram_pages += free_all_bootmem(); 376 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 377 378 reservedpages = ram = 0; 379 for (tmp = 0; tmp < max_low_pfn; tmp++) 380 if (page_is_ram(tmp)) { 381 ram++; 382 if (PageReserved(pfn_to_page(tmp))) 383 reservedpages++; 384 } 385 num_physpages = ram; 386 387 #ifdef CONFIG_HIGHMEM 388 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 389 struct page *page = pfn_to_page(tmp); 390 391 if (!page_is_ram(tmp)) { 392 SetPageReserved(page); 393 continue; 394 } 395 ClearPageReserved(page); 396 init_page_count(page); 397 __free_page(page); 398 totalhigh_pages++; 399 } 400 totalram_pages += totalhigh_pages; 401 num_physpages += totalhigh_pages; 402 #endif 403 404 codesize = (unsigned long) &_etext - (unsigned long) &_text; 405 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 406 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 407 408 #ifdef CONFIG_64BIT 409 if ((unsigned long) &_text > (unsigned long) CKSEG0) 410 /* The -4 is a hack so that user tools don't have to handle 411 the overflow. */ 412 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); 413 #endif 414 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 415 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 416 VMALLOC_END-VMALLOC_START); 417 418 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 419 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", 420 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 421 ram << (PAGE_SHIFT-10), 422 codesize >> 10, 423 reservedpages << (PAGE_SHIFT-10), 424 datasize >> 10, 425 initsize >> 10, 426 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); 427 } 428 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 429 430 void free_init_pages(const char *what, unsigned long begin, unsigned long end) 431 { 432 unsigned long pfn; 433 434 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { 435 struct page *page = pfn_to_page(pfn); 436 void *addr = phys_to_virt(PFN_PHYS(pfn)); 437 438 ClearPageReserved(page); 439 init_page_count(page); 440 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 441 __free_page(page); 442 totalram_pages++; 443 } 444 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 445 } 446 447 #ifdef CONFIG_BLK_DEV_INITRD 448 void free_initrd_mem(unsigned long start, unsigned long end) 449 { 450 free_init_pages("initrd memory", 451 virt_to_phys((void *)start), 452 virt_to_phys((void *)end)); 453 } 454 #endif 455 456 void __init_refok free_initmem(void) 457 { 458 prom_free_prom_memory(); 459 free_init_pages("unused kernel memory", 460 __pa_symbol(&__init_begin), 461 __pa_symbol(&__init_end)); 462 } 463 464 unsigned long pgd_current[NR_CPUS]; 465 /* 466 * On 64-bit we've got three-level pagetables with a slightly 467 * different layout ... 468 */ 469 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) 470 471 /* 472 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER 473 * are constants. So we use the variants from asm-offset.h until that gcc 474 * will officially be retired. 475 */ 476 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); 477 #ifdef CONFIG_64BIT 478 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); 479 #endif 480 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); 481