1 /* 2 * arch/xtensa/mm/init.c 3 * 4 * Derived from MIPS, PPC. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2005 Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 14 * Marc Gauthier 15 * Kevin Chea 16 */ 17 18 #include <linux/init.h> 19 #include <linux/signal.h> 20 #include <linux/sched.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/string.h> 24 #include <linux/types.h> 25 #include <linux/ptrace.h> 26 #include <linux/bootmem.h> 27 #include <linux/swap.h> 28 29 #include <asm/pgtable.h> 30 #include <asm/bootparam.h> 31 #include <asm/mmu_context.h> 32 #include <asm/tlb.h> 33 #include <asm/tlbflush.h> 34 #include <asm/page.h> 35 #include <asm/pgalloc.h> 36 #include <asm/pgtable.h> 37 38 39 #define DEBUG 0 40 41 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 42 //static DEFINE_SPINLOCK(tlb_lock); 43 44 /* 45 * This flag is used to indicate that the page was mapped and modified in 46 * kernel space, so the cache is probably dirty at that address. 47 * If cache aliasing is enabled and the page color mismatches, update_mmu_cache 48 * synchronizes the caches if this bit is set. 49 */ 50 51 #define PG_cache_clean PG_arch_1 52 53 /* References to section boundaries */ 54 55 extern char _ftext, _etext, _fdata, _edata, _rodata_end; 56 extern char __init_begin, __init_end; 57 58 /* 59 * mem_reserve(start, end, must_exist) 60 * 61 * Reserve some memory from the memory pool. 62 * 63 * Parameters: 64 * start Start of region, 65 * end End of region, 66 * must_exist Must exist in memory pool. 67 * 68 * Returns: 69 * 0 (memory area couldn't be mapped) 70 * -1 (success) 71 */ 72 73 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) 74 { 75 int i; 76 77 if (start == end) 78 return 0; 79 80 start = start & PAGE_MASK; 81 end = PAGE_ALIGN(end); 82 83 for (i = 0; i < sysmem.nr_banks; i++) 84 if (start < sysmem.bank[i].end 85 && end >= sysmem.bank[i].start) 86 break; 87 88 if (i == sysmem.nr_banks) { 89 if (must_exist) 90 printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " 91 "not in any region!\n", start, end); 92 return 0; 93 } 94 95 if (start > sysmem.bank[i].start) { 96 if (end < sysmem.bank[i].end) { 97 /* split entry */ 98 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) 99 panic("meminfo overflow\n"); 100 sysmem.bank[sysmem.nr_banks].start = end; 101 sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; 102 sysmem.nr_banks++; 103 } 104 sysmem.bank[i].end = start; 105 } else { 106 if (end < sysmem.bank[i].end) 107 sysmem.bank[i].start = end; 108 else { 109 /* remove entry */ 110 sysmem.nr_banks--; 111 sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; 112 sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; 113 } 114 } 115 return -1; 116 } 117 118 119 /* 120 * Initialize the bootmem system and give it all the memory we have available. 121 */ 122 123 void __init bootmem_init(void) 124 { 125 unsigned long pfn; 126 unsigned long bootmap_start, bootmap_size; 127 int i; 128 129 max_low_pfn = max_pfn = 0; 130 min_low_pfn = ~0; 131 132 for (i=0; i < sysmem.nr_banks; i++) { 133 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; 134 if (pfn < min_low_pfn) 135 min_low_pfn = pfn; 136 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; 137 if (pfn > max_pfn) 138 max_pfn = pfn; 139 } 140 141 if (min_low_pfn > max_pfn) 142 panic("No memory found!\n"); 143 144 max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ? 145 max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT; 146 147 /* Find an area to use for the bootmem bitmap. */ 148 149 bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT; 150 bootmap_start = ~0; 151 152 for (i=0; i<sysmem.nr_banks; i++) 153 if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) { 154 bootmap_start = sysmem.bank[i].start; 155 break; 156 } 157 158 if (bootmap_start == ~0UL) 159 panic("Cannot find %ld bytes for bootmap\n", bootmap_size); 160 161 /* Reserve the bootmem bitmap area */ 162 163 mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); 164 bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn, 165 bootmap_start >> PAGE_SHIFT, 166 max_low_pfn); 167 168 /* Add all remaining memory pieces into the bootmem map */ 169 170 for (i=0; i<sysmem.nr_banks; i++) 171 free_bootmem(sysmem.bank[i].start, 172 sysmem.bank[i].end - sysmem.bank[i].start); 173 174 } 175 176 177 void __init paging_init(void) 178 { 179 unsigned long zones_size[MAX_NR_ZONES]; 180 int i; 181 182 /* All pages are DMA-able, so we put them all in the DMA zone. */ 183 184 zones_size[ZONE_DMA] = max_low_pfn; 185 for (i = 1; i < MAX_NR_ZONES; i++) 186 zones_size[i] = 0; 187 188 #ifdef CONFIG_HIGHMEM 189 zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 190 #endif 191 192 /* Initialize the kernel's page tables. */ 193 194 memset(swapper_pg_dir, 0, PAGE_SIZE); 195 196 free_area_init(zones_size); 197 } 198 199 /* 200 * Flush the mmu and reset associated register to default values. 201 */ 202 203 void __init init_mmu (void) 204 { 205 /* Writing zeros to the <t>TLBCFG special registers ensure 206 * that valid values exist in the register. For existing 207 * PGSZID<w> fields, zero selects the first element of the 208 * page-size array. For nonexistant PGSZID<w> fields, zero is 209 * the best value to write. Also, when changing PGSZID<w> 210 * fields, the corresponding TLB must be flushed. 211 */ 212 set_itlbcfg_register (0); 213 set_dtlbcfg_register (0); 214 flush_tlb_all (); 215 216 /* Set rasid register to a known value. */ 217 218 set_rasid_register (ASID_ALL_RESERVED); 219 220 /* Set PTEVADDR special register to the start of the page 221 * table, which is in kernel mappable space (ie. not 222 * statically mapped). This register's value is undefined on 223 * reset. 224 */ 225 set_ptevaddr_register (PGTABLE_START); 226 } 227 228 /* 229 * Initialize memory pages. 230 */ 231 232 void __init mem_init(void) 233 { 234 unsigned long codesize, reservedpages, datasize, initsize; 235 unsigned long highmemsize, tmp, ram; 236 237 max_mapnr = num_physpages = max_low_pfn; 238 high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); 239 highmemsize = 0; 240 241 #ifdef CONFIG_HIGHMEM 242 #error HIGHGMEM not implemented in init.c 243 #endif 244 245 totalram_pages += free_all_bootmem(); 246 247 reservedpages = ram = 0; 248 for (tmp = 0; tmp < max_low_pfn; tmp++) { 249 ram++; 250 if (PageReserved(mem_map+tmp)) 251 reservedpages++; 252 } 253 254 codesize = (unsigned long) &_etext - (unsigned long) &_ftext; 255 datasize = (unsigned long) &_edata - (unsigned long) &_fdata; 256 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 257 258 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " 259 "%ldk data, %ldk init %ldk highmem)\n", 260 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 261 ram << (PAGE_SHIFT-10), 262 codesize >> 10, 263 reservedpages << (PAGE_SHIFT-10), 264 datasize >> 10, 265 initsize >> 10, 266 highmemsize >> 10); 267 } 268 269 void 270 free_reserved_mem(void *start, void *end) 271 { 272 for (; start < end; start += PAGE_SIZE) { 273 ClearPageReserved(virt_to_page(start)); 274 init_page_count(virt_to_page(start)); 275 free_page((unsigned long)start); 276 totalram_pages++; 277 } 278 } 279 280 #ifdef CONFIG_BLK_DEV_INITRD 281 extern int initrd_is_mapped; 282 283 void free_initrd_mem(unsigned long start, unsigned long end) 284 { 285 if (initrd_is_mapped) { 286 free_reserved_mem((void*)start, (void*)end); 287 printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); 288 } 289 } 290 #endif 291 292 void free_initmem(void) 293 { 294 free_reserved_mem(&__init_begin, &__init_end); 295 printk("Freeing unused kernel memory: %dk freed\n", 296 (&__init_end - &__init_begin) >> 10); 297 } 298 299 void show_mem(void) 300 { 301 int i, free = 0, total = 0, reserved = 0; 302 int shared = 0, cached = 0; 303 304 printk("Mem-info:\n"); 305 show_free_areas(); 306 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 307 i = max_mapnr; 308 while (i-- > 0) { 309 total++; 310 if (PageReserved(mem_map+i)) 311 reserved++; 312 else if (PageSwapCache(mem_map+i)) 313 cached++; 314 else if (!page_count(mem_map + i)) 315 free++; 316 else 317 shared += page_count(mem_map + i) - 1; 318 } 319 printk("%d pages of RAM\n", total); 320 printk("%d reserved pages\n", reserved); 321 printk("%d pages shared\n", shared); 322 printk("%d pages swap cached\n",cached); 323 printk("%d free pages\n", free); 324 } 325 326 /* ------------------------------------------------------------------------- */ 327 328 #if (DCACHE_WAY_SIZE > PAGE_SIZE) 329 330 /* 331 * With cache aliasing, the page color of the page in kernel space and user 332 * space might mismatch. We temporarily map the page to a different virtual 333 * address with the same color and clear the page there. 334 */ 335 336 void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page) 337 { 338 339 /* There shouldn't be any entries for this page. */ 340 341 __flush_invalidate_dcache_page_phys(__pa(page_address(page))); 342 343 if (!PAGE_COLOR_EQ(vaddr, kaddr)) { 344 unsigned long v, p; 345 346 /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */ 347 348 spin_lock(&tlb_lock); 349 350 p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL))); 351 kaddr = (void*)PAGE_COLOR_MAP0(vaddr); 352 v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0; 353 __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v)); 354 355 clear_page(kaddr); 356 357 spin_unlock(&tlb_lock); 358 } else { 359 clear_page(kaddr); 360 } 361 362 /* We need to make sure that i$ and d$ are coherent. */ 363 364 clear_bit(PG_cache_clean, &page->flags); 365 } 366 367 /* 368 * With cache aliasing, we have to make sure that the page color of the page 369 * in kernel space matches that of the virtual user address before we read 370 * the page. If the page color differ, we create a temporary DTLB entry with 371 * the corrent page color and use this 'temporary' address as the source. 372 * We then use the same approach as in clear_user_page and copy the data 373 * to the kernel space and clear the PG_cache_clean bit to synchronize caches 374 * later. 375 * 376 * Note: 377 * Instead of using another 'way' for the temporary DTLB entry, we could 378 * probably use the same entry that points to the kernel address (after 379 * saving the original value and restoring it when we are done). 380 */ 381 382 void copy_user_page(void* to, void* from, unsigned long vaddr, 383 struct page* to_page) 384 { 385 /* There shouldn't be any entries for the new page. */ 386 387 __flush_invalidate_dcache_page_phys(__pa(page_address(to_page))); 388 389 spin_lock(&tlb_lock); 390 391 if (!PAGE_COLOR_EQ(vaddr, from)) { 392 unsigned long v, p, t; 393 394 __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1" 395 : "=a"(p), "=a"(t) : "a"(from)); 396 from = (void*)PAGE_COLOR_MAP0(vaddr); 397 v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0; 398 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); 399 } 400 401 if (!PAGE_COLOR_EQ(vaddr, to)) { 402 unsigned long v, p; 403 404 p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL))); 405 to = (void*)PAGE_COLOR_MAP1(vaddr); 406 v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1; 407 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); 408 } 409 copy_page(to, from); 410 411 spin_unlock(&tlb_lock); 412 413 /* We need to make sure that i$ and d$ are coherent. */ 414 415 clear_bit(PG_cache_clean, &to_page->flags); 416 } 417 418 419 420 /* 421 * Any time the kernel writes to a user page cache page, or it is about to 422 * read from a page cache page this routine is called. 423 * 424 * Note: 425 * The kernel currently only provides one architecture bit in the page 426 * flags that we use for I$/D$ coherency. Maybe, in future, we can 427 * use a sepearte bit for deferred dcache aliasing: 428 * If the page is not mapped yet, we only need to set a flag, 429 * if mapped, we need to invalidate the page. 430 */ 431 // FIXME: we probably need this for WB caches not only for Page Coloring.. 432 433 void flush_dcache_page(struct page *page) 434 { 435 unsigned long addr = __pa(page_address(page)); 436 struct address_space *mapping = page_mapping(page); 437 438 __flush_invalidate_dcache_page_phys(addr); 439 440 if (!test_bit(PG_cache_clean, &page->flags)) 441 return; 442 443 /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/ 444 #if 0 445 if (mapping && !mapping_mapped(mapping)) 446 clear_bit(PG_cache_clean, &page->flags); 447 else 448 #endif 449 __invalidate_icache_page_phys(addr); 450 } 451 452 void flush_cache_range(struct vm_area_struct* vma, unsigned long s, 453 unsigned long e) 454 { 455 __flush_invalidate_cache_all(); 456 } 457 458 void flush_cache_page(struct vm_area_struct* vma, unsigned long address, 459 unsigned long pfn) 460 { 461 struct page *page = pfn_to_page(pfn); 462 463 /* Remove any entry for the old mapping. */ 464 465 if (current->active_mm == vma->vm_mm) { 466 unsigned long addr = __pa(page_address(page)); 467 __flush_invalidate_dcache_page_phys(addr); 468 if ((vma->vm_flags & VM_EXEC) != 0) 469 __invalidate_icache_page_phys(addr); 470 } else { 471 BUG(); 472 } 473 } 474 475 #endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */ 476 477 478 pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr) 479 { 480 pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0); 481 if (likely(pte)) { 482 pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET); 483 int i; 484 for (i = 0; i < 1024; i++, ptep++) 485 pte_clear(mm, addr, ptep); 486 } 487 return pte; 488 } 489 490 struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr) 491 { 492 struct page *page; 493 494 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0); 495 496 if (likely(page)) { 497 pte_t* ptep = kmap_atomic(page, KM_USER0); 498 int i; 499 500 for (i = 0; i < 1024; i++, ptep++) 501 pte_clear(mm, addr, ptep); 502 503 kunmap_atomic(ptep, KM_USER0); 504 } 505 return page; 506 } 507 508 509 /* 510 * Handle D$/I$ coherency. 511 * 512 * Note: 513 * We only have one architecture bit for the page flags, so we cannot handle 514 * cache aliasing, yet. 515 */ 516 517 void 518 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) 519 { 520 unsigned long pfn = pte_pfn(pte); 521 struct page *page; 522 unsigned long vaddr = addr & PAGE_MASK; 523 524 if (!pfn_valid(pfn)) 525 return; 526 527 page = pfn_to_page(pfn); 528 529 invalidate_itlb_mapping(addr); 530 invalidate_dtlb_mapping(addr); 531 532 /* We have a new mapping. Use it. */ 533 534 write_dtlb_entry(pte, dtlb_probe(addr)); 535 536 /* If the processor can execute from this page, synchronize D$/I$. */ 537 538 if ((vma->vm_flags & VM_EXEC) != 0) { 539 540 write_itlb_entry(pte, itlb_probe(addr)); 541 542 /* Synchronize caches, if not clean. */ 543 544 if (!test_and_set_bit(PG_cache_clean, &page->flags)) { 545 __flush_dcache_page(vaddr); 546 __invalidate_icache_page(vaddr); 547 } 548 } 549 } 550 551