1 /* 2 * arch/sh/mm/cache-sh4.c 3 * 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 5 * Copyright (C) 2001 - 2007 Paul Mundt 6 * Copyright (C) 2003 Richard Curnow 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/io.h> 16 #include <linux/mutex.h> 17 #include <linux/fs.h> 18 #include <asm/mmu_context.h> 19 #include <asm/cacheflush.h> 20 21 /* 22 * The maximum number of pages we support up to when doing ranged dcache 23 * flushing. Anything exceeding this will simply flush the dcache in its 24 * entirety. 25 */ 26 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ 27 #define MAX_ICACHE_PAGES 32 28 29 static void __flush_cache_4096(unsigned long addr, unsigned long phys, 30 unsigned long exec_offset); 31 32 /* 33 * This is initialised here to ensure that it is not placed in the BSS. If 34 * that were to happen, note that cache_init gets called before the BSS is 35 * cleared, so this would get nulled out which would be hopeless. 36 */ 37 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = 38 (void (*)(unsigned long, unsigned long))0xdeadbeef; 39 40 /* 41 * Write back the range of D-cache, and purge the I-cache. 42 * 43 * Called from kernel/module.c:sys_init_module and routine for a.out format, 44 * signal handler code and kprobes code 45 */ 46 static void sh4_flush_icache_range(void *args) 47 { 48 struct flusher_data *data = args; 49 unsigned long start, end; 50 unsigned long flags, v; 51 int i; 52 53 start = data->addr1; 54 end = data->addr2; 55 56 /* If there are too many pages then just blow away the caches */ 57 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 58 local_flush_cache_all(NULL); 59 return; 60 } 61 62 /* 63 * Selectively flush d-cache then invalidate the i-cache. 64 * This is inefficient, so only use this for small ranges. 65 */ 66 start &= ~(L1_CACHE_BYTES-1); 67 end += L1_CACHE_BYTES-1; 68 end &= ~(L1_CACHE_BYTES-1); 69 70 local_irq_save(flags); 71 jump_to_uncached(); 72 73 for (v = start; v < end; v += L1_CACHE_BYTES) { 74 unsigned long icacheaddr; 75 76 __ocbwb(v); 77 78 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v & 79 cpu_data->icache.entry_mask); 80 81 /* Clear i-cache line valid-bit */ 82 for (i = 0; i < cpu_data->icache.ways; i++) { 83 __raw_writel(0, icacheaddr); 84 icacheaddr += cpu_data->icache.way_incr; 85 } 86 } 87 88 back_to_cached(); 89 local_irq_restore(flags); 90 } 91 92 static inline void flush_cache_4096(unsigned long start, 93 unsigned long phys) 94 { 95 unsigned long flags, exec_offset = 0; 96 97 /* 98 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 99 * Some types of SH-4 require PC to be in P2 to operate on the D-cache. 100 */ 101 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || 102 (start < CACHE_OC_ADDRESS_ARRAY)) 103 exec_offset = 0x20000000; 104 105 local_irq_save(flags); 106 __flush_cache_4096(start | SH_CACHE_ASSOC, 107 P1SEGADDR(phys), exec_offset); 108 local_irq_restore(flags); 109 } 110 111 /* 112 * Write back & invalidate the D-cache of the page. 113 * (To avoid "alias" issues) 114 */ 115 static void sh4_flush_dcache_page(void *arg) 116 { 117 struct page *page = arg; 118 #ifndef CONFIG_SMP 119 struct address_space *mapping = page_mapping(page); 120 121 if (mapping && !mapping_mapped(mapping)) 122 set_bit(PG_dcache_dirty, &page->flags); 123 else 124 #endif 125 { 126 unsigned long phys = PHYSADDR(page_address(page)); 127 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 128 int i, n; 129 130 /* Loop all the D-cache */ 131 n = boot_cpu_data.dcache.n_aliases; 132 for (i = 0; i < n; i++, addr += 4096) 133 flush_cache_4096(addr, phys); 134 } 135 136 wmb(); 137 } 138 139 /* TODO: Selective icache invalidation through IC address array.. */ 140 static void __uses_jump_to_uncached flush_icache_all(void) 141 { 142 unsigned long flags, ccr; 143 144 local_irq_save(flags); 145 jump_to_uncached(); 146 147 /* Flush I-cache */ 148 ccr = ctrl_inl(CCR); 149 ccr |= CCR_CACHE_ICI; 150 ctrl_outl(ccr, CCR); 151 152 /* 153 * back_to_cached() will take care of the barrier for us, don't add 154 * another one! 155 */ 156 157 back_to_cached(); 158 local_irq_restore(flags); 159 } 160 161 static inline void flush_dcache_all(void) 162 { 163 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); 164 wmb(); 165 } 166 167 static void sh4_flush_cache_all(void *unused) 168 { 169 flush_dcache_all(); 170 flush_icache_all(); 171 } 172 173 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, 174 unsigned long end) 175 { 176 unsigned long d = 0, p = start & PAGE_MASK; 177 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; 178 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; 179 unsigned long select_bit; 180 unsigned long all_aliases_mask; 181 unsigned long addr_offset; 182 pgd_t *dir; 183 pmd_t *pmd; 184 pud_t *pud; 185 pte_t *pte; 186 int i; 187 188 dir = pgd_offset(mm, p); 189 pud = pud_offset(dir, p); 190 pmd = pmd_offset(pud, p); 191 end = PAGE_ALIGN(end); 192 193 all_aliases_mask = (1 << n_aliases) - 1; 194 195 do { 196 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { 197 p &= PMD_MASK; 198 p += PMD_SIZE; 199 pmd++; 200 201 continue; 202 } 203 204 pte = pte_offset_kernel(pmd, p); 205 206 do { 207 unsigned long phys; 208 pte_t entry = *pte; 209 210 if (!(pte_val(entry) & _PAGE_PRESENT)) { 211 pte++; 212 p += PAGE_SIZE; 213 continue; 214 } 215 216 phys = pte_val(entry) & PTE_PHYS_MASK; 217 218 if ((p ^ phys) & alias_mask) { 219 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); 220 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); 221 222 if (d == all_aliases_mask) 223 goto loop_exit; 224 } 225 226 pte++; 227 p += PAGE_SIZE; 228 } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); 229 pmd++; 230 } while (p < end); 231 232 loop_exit: 233 addr_offset = 0; 234 select_bit = 1; 235 236 for (i = 0; i < n_aliases; i++) { 237 if (d & select_bit) { 238 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); 239 wmb(); 240 } 241 242 select_bit <<= 1; 243 addr_offset += PAGE_SIZE; 244 } 245 } 246 247 /* 248 * Note : (RPC) since the caches are physically tagged, the only point 249 * of flush_cache_mm for SH-4 is to get rid of aliases from the 250 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that 251 * lines can stay resident so long as the virtual address they were 252 * accessed with (hence cache set) is in accord with the physical 253 * address (i.e. tag). It's no different here. So I reckon we don't 254 * need to flush the I-cache, since aliases don't matter for that. We 255 * should try that. 256 * 257 * Caller takes mm->mmap_sem. 258 */ 259 static void sh4_flush_cache_mm(void *arg) 260 { 261 struct mm_struct *mm = arg; 262 263 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 264 return; 265 266 /* 267 * If cache is only 4k-per-way, there are never any 'aliases'. Since 268 * the cache is physically tagged, the data can just be left in there. 269 */ 270 if (boot_cpu_data.dcache.n_aliases == 0) 271 return; 272 273 /* 274 * Don't bother groveling around the dcache for the VMA ranges 275 * if there are too many PTEs to make it worthwhile. 276 */ 277 if (mm->nr_ptes >= MAX_DCACHE_PAGES) 278 flush_dcache_all(); 279 else { 280 struct vm_area_struct *vma; 281 282 /* 283 * In this case there are reasonably sized ranges to flush, 284 * iterate through the VMA list and take care of any aliases. 285 */ 286 for (vma = mm->mmap; vma; vma = vma->vm_next) 287 __flush_cache_mm(mm, vma->vm_start, vma->vm_end); 288 } 289 290 /* Only touch the icache if one of the VMAs has VM_EXEC set. */ 291 if (mm->exec_vm) 292 flush_icache_all(); 293 } 294 295 /* 296 * Write back and invalidate I/D-caches for the page. 297 * 298 * ADDR: Virtual Address (U0 address) 299 * PFN: Physical page number 300 */ 301 static void sh4_flush_cache_page(void *args) 302 { 303 struct flusher_data *data = args; 304 struct vm_area_struct *vma; 305 unsigned long address, pfn, phys; 306 unsigned int alias_mask; 307 308 vma = data->vma; 309 address = data->addr1; 310 pfn = data->addr2; 311 phys = pfn << PAGE_SHIFT; 312 313 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 314 return; 315 316 alias_mask = boot_cpu_data.dcache.alias_mask; 317 318 /* We only need to flush D-cache when we have alias */ 319 if ((address^phys) & alias_mask) { 320 /* Loop 4K of the D-cache */ 321 flush_cache_4096( 322 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), 323 phys); 324 /* Loop another 4K of the D-cache */ 325 flush_cache_4096( 326 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), 327 phys); 328 } 329 330 alias_mask = boot_cpu_data.icache.alias_mask; 331 if (vma->vm_flags & VM_EXEC) { 332 /* 333 * Evict entries from the portion of the cache from which code 334 * may have been executed at this address (virtual). There's 335 * no need to evict from the portion corresponding to the 336 * physical address as for the D-cache, because we know the 337 * kernel has never executed the code through its identity 338 * translation. 339 */ 340 flush_cache_4096( 341 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), 342 phys); 343 } 344 } 345 346 /* 347 * Write back and invalidate D-caches. 348 * 349 * START, END: Virtual Address (U0 address) 350 * 351 * NOTE: We need to flush the _physical_ page entry. 352 * Flushing the cache lines for U0 only isn't enough. 353 * We need to flush for P1 too, which may contain aliases. 354 */ 355 static void sh4_flush_cache_range(void *args) 356 { 357 struct flusher_data *data = args; 358 struct vm_area_struct *vma; 359 unsigned long start, end; 360 361 vma = data->vma; 362 start = data->addr1; 363 end = data->addr2; 364 365 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 366 return; 367 368 /* 369 * If cache is only 4k-per-way, there are never any 'aliases'. Since 370 * the cache is physically tagged, the data can just be left in there. 371 */ 372 if (boot_cpu_data.dcache.n_aliases == 0) 373 return; 374 375 /* 376 * Don't bother with the lookup and alias check if we have a 377 * wide range to cover, just blow away the dcache in its 378 * entirety instead. -- PFM. 379 */ 380 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) 381 flush_dcache_all(); 382 else 383 __flush_cache_mm(vma->vm_mm, start, end); 384 385 if (vma->vm_flags & VM_EXEC) { 386 /* 387 * TODO: Is this required??? Need to look at how I-cache 388 * coherency is assured when new programs are loaded to see if 389 * this matters. 390 */ 391 flush_icache_all(); 392 } 393 } 394 395 /** 396 * __flush_cache_4096 397 * 398 * @addr: address in memory mapped cache array 399 * @phys: P1 address to flush (has to match tags if addr has 'A' bit 400 * set i.e. associative write) 401 * @exec_offset: set to 0x20000000 if flush has to be executed from P2 402 * region else 0x0 403 * 404 * The offset into the cache array implied by 'addr' selects the 405 * 'colour' of the virtual address range that will be flushed. The 406 * operation (purge/write-back) is selected by the lower 2 bits of 407 * 'phys'. 408 */ 409 static void __flush_cache_4096(unsigned long addr, unsigned long phys, 410 unsigned long exec_offset) 411 { 412 int way_count; 413 unsigned long base_addr = addr; 414 struct cache_info *dcache; 415 unsigned long way_incr; 416 unsigned long a, ea, p; 417 unsigned long temp_pc; 418 419 dcache = &boot_cpu_data.dcache; 420 /* Write this way for better assembly. */ 421 way_count = dcache->ways; 422 way_incr = dcache->way_incr; 423 424 /* 425 * Apply exec_offset (i.e. branch to P2 if required.). 426 * 427 * FIXME: 428 * 429 * If I write "=r" for the (temp_pc), it puts this in r6 hence 430 * trashing exec_offset before it's been added on - why? Hence 431 * "=&r" as a 'workaround' 432 */ 433 asm volatile("mov.l 1f, %0\n\t" 434 "add %1, %0\n\t" 435 "jmp @%0\n\t" 436 "nop\n\t" 437 ".balign 4\n\t" 438 "1: .long 2f\n\t" 439 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); 440 441 /* 442 * We know there will be >=1 iteration, so write as do-while to avoid 443 * pointless nead-of-loop check for 0 iterations. 444 */ 445 do { 446 ea = base_addr + PAGE_SIZE; 447 a = base_addr; 448 p = phys; 449 450 do { 451 *(volatile unsigned long *)a = p; 452 /* 453 * Next line: intentionally not p+32, saves an add, p 454 * will do since only the cache tag bits need to 455 * match. 456 */ 457 *(volatile unsigned long *)(a+32) = p; 458 a += 64; 459 p += 64; 460 } while (a < ea); 461 462 base_addr += way_incr; 463 } while (--way_count != 0); 464 } 465 466 /* 467 * Break the 1, 2 and 4 way variants of this out into separate functions to 468 * avoid nearly all the overhead of having the conditional stuff in the function 469 * bodies (+ the 1 and 2 way cases avoid saving any registers too). 470 * 471 * We want to eliminate unnecessary bus transactions, so this code uses 472 * a non-obvious technique. 473 * 474 * Loop over a cache way sized block of, one cache line at a time. For each 475 * line, use movca.a to cause the current cache line contents to be written 476 * back, but without reading anything from main memory. However this has the 477 * side effect that the cache is now caching that memory location. So follow 478 * this with a cache invalidate to mark the cache line invalid. And do all 479 * this with interrupts disabled, to avoid the cache line being accidently 480 * evicted while it is holding garbage. 481 * 482 * This also breaks in a number of circumstances: 483 * - if there are modifications to the region of memory just above 484 * empty_zero_page (for example because a breakpoint has been placed 485 * there), then these can be lost. 486 * 487 * This is because the the memory address which the cache temporarily 488 * caches in the above description is empty_zero_page. So the 489 * movca.l hits the cache (it is assumed that it misses, or at least 490 * isn't dirty), modifies the line and then invalidates it, losing the 491 * required change. 492 * 493 * - If caches are disabled or configured in write-through mode, then 494 * the movca.l writes garbage directly into memory. 495 */ 496 static void __flush_dcache_segment_writethrough(unsigned long start, 497 unsigned long extent_per_way) 498 { 499 unsigned long addr; 500 int i; 501 502 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); 503 504 while (extent_per_way) { 505 for (i = 0; i < cpu_data->dcache.ways; i++) 506 __raw_writel(0, addr + cpu_data->dcache.way_incr * i); 507 508 addr += cpu_data->dcache.linesz; 509 extent_per_way -= cpu_data->dcache.linesz; 510 } 511 } 512 513 static void __flush_dcache_segment_1way(unsigned long start, 514 unsigned long extent_per_way) 515 { 516 unsigned long orig_sr, sr_with_bl; 517 unsigned long base_addr; 518 unsigned long way_incr, linesz, way_size; 519 struct cache_info *dcache; 520 register unsigned long a0, a0e; 521 522 asm volatile("stc sr, %0" : "=r" (orig_sr)); 523 sr_with_bl = orig_sr | (1<<28); 524 base_addr = ((unsigned long)&empty_zero_page[0]); 525 526 /* 527 * The previous code aligned base_addr to 16k, i.e. the way_size of all 528 * existing SH-4 D-caches. Whilst I don't see a need to have this 529 * aligned to any better than the cache line size (which it will be 530 * anyway by construction), let's align it to at least the way_size of 531 * any existing or conceivable SH-4 D-cache. -- RPC 532 */ 533 base_addr = ((base_addr >> 16) << 16); 534 base_addr |= start; 535 536 dcache = &boot_cpu_data.dcache; 537 linesz = dcache->linesz; 538 way_incr = dcache->way_incr; 539 way_size = dcache->way_size; 540 541 a0 = base_addr; 542 a0e = base_addr + extent_per_way; 543 do { 544 asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); 545 asm volatile("movca.l r0, @%0\n\t" 546 "ocbi @%0" : : "r" (a0)); 547 a0 += linesz; 548 asm volatile("movca.l r0, @%0\n\t" 549 "ocbi @%0" : : "r" (a0)); 550 a0 += linesz; 551 asm volatile("movca.l r0, @%0\n\t" 552 "ocbi @%0" : : "r" (a0)); 553 a0 += linesz; 554 asm volatile("movca.l r0, @%0\n\t" 555 "ocbi @%0" : : "r" (a0)); 556 asm volatile("ldc %0, sr" : : "r" (orig_sr)); 557 a0 += linesz; 558 } while (a0 < a0e); 559 } 560 561 static void __flush_dcache_segment_2way(unsigned long start, 562 unsigned long extent_per_way) 563 { 564 unsigned long orig_sr, sr_with_bl; 565 unsigned long base_addr; 566 unsigned long way_incr, linesz, way_size; 567 struct cache_info *dcache; 568 register unsigned long a0, a1, a0e; 569 570 asm volatile("stc sr, %0" : "=r" (orig_sr)); 571 sr_with_bl = orig_sr | (1<<28); 572 base_addr = ((unsigned long)&empty_zero_page[0]); 573 574 /* See comment under 1-way above */ 575 base_addr = ((base_addr >> 16) << 16); 576 base_addr |= start; 577 578 dcache = &boot_cpu_data.dcache; 579 linesz = dcache->linesz; 580 way_incr = dcache->way_incr; 581 way_size = dcache->way_size; 582 583 a0 = base_addr; 584 a1 = a0 + way_incr; 585 a0e = base_addr + extent_per_way; 586 do { 587 asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); 588 asm volatile("movca.l r0, @%0\n\t" 589 "movca.l r0, @%1\n\t" 590 "ocbi @%0\n\t" 591 "ocbi @%1" : : 592 "r" (a0), "r" (a1)); 593 a0 += linesz; 594 a1 += linesz; 595 asm volatile("movca.l r0, @%0\n\t" 596 "movca.l r0, @%1\n\t" 597 "ocbi @%0\n\t" 598 "ocbi @%1" : : 599 "r" (a0), "r" (a1)); 600 a0 += linesz; 601 a1 += linesz; 602 asm volatile("movca.l r0, @%0\n\t" 603 "movca.l r0, @%1\n\t" 604 "ocbi @%0\n\t" 605 "ocbi @%1" : : 606 "r" (a0), "r" (a1)); 607 a0 += linesz; 608 a1 += linesz; 609 asm volatile("movca.l r0, @%0\n\t" 610 "movca.l r0, @%1\n\t" 611 "ocbi @%0\n\t" 612 "ocbi @%1" : : 613 "r" (a0), "r" (a1)); 614 asm volatile("ldc %0, sr" : : "r" (orig_sr)); 615 a0 += linesz; 616 a1 += linesz; 617 } while (a0 < a0e); 618 } 619 620 static void __flush_dcache_segment_4way(unsigned long start, 621 unsigned long extent_per_way) 622 { 623 unsigned long orig_sr, sr_with_bl; 624 unsigned long base_addr; 625 unsigned long way_incr, linesz, way_size; 626 struct cache_info *dcache; 627 register unsigned long a0, a1, a2, a3, a0e; 628 629 asm volatile("stc sr, %0" : "=r" (orig_sr)); 630 sr_with_bl = orig_sr | (1<<28); 631 base_addr = ((unsigned long)&empty_zero_page[0]); 632 633 /* See comment under 1-way above */ 634 base_addr = ((base_addr >> 16) << 16); 635 base_addr |= start; 636 637 dcache = &boot_cpu_data.dcache; 638 linesz = dcache->linesz; 639 way_incr = dcache->way_incr; 640 way_size = dcache->way_size; 641 642 a0 = base_addr; 643 a1 = a0 + way_incr; 644 a2 = a1 + way_incr; 645 a3 = a2 + way_incr; 646 a0e = base_addr + extent_per_way; 647 do { 648 asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); 649 asm volatile("movca.l r0, @%0\n\t" 650 "movca.l r0, @%1\n\t" 651 "movca.l r0, @%2\n\t" 652 "movca.l r0, @%3\n\t" 653 "ocbi @%0\n\t" 654 "ocbi @%1\n\t" 655 "ocbi @%2\n\t" 656 "ocbi @%3\n\t" : : 657 "r" (a0), "r" (a1), "r" (a2), "r" (a3)); 658 a0 += linesz; 659 a1 += linesz; 660 a2 += linesz; 661 a3 += linesz; 662 asm volatile("movca.l r0, @%0\n\t" 663 "movca.l r0, @%1\n\t" 664 "movca.l r0, @%2\n\t" 665 "movca.l r0, @%3\n\t" 666 "ocbi @%0\n\t" 667 "ocbi @%1\n\t" 668 "ocbi @%2\n\t" 669 "ocbi @%3\n\t" : : 670 "r" (a0), "r" (a1), "r" (a2), "r" (a3)); 671 a0 += linesz; 672 a1 += linesz; 673 a2 += linesz; 674 a3 += linesz; 675 asm volatile("movca.l r0, @%0\n\t" 676 "movca.l r0, @%1\n\t" 677 "movca.l r0, @%2\n\t" 678 "movca.l r0, @%3\n\t" 679 "ocbi @%0\n\t" 680 "ocbi @%1\n\t" 681 "ocbi @%2\n\t" 682 "ocbi @%3\n\t" : : 683 "r" (a0), "r" (a1), "r" (a2), "r" (a3)); 684 a0 += linesz; 685 a1 += linesz; 686 a2 += linesz; 687 a3 += linesz; 688 asm volatile("movca.l r0, @%0\n\t" 689 "movca.l r0, @%1\n\t" 690 "movca.l r0, @%2\n\t" 691 "movca.l r0, @%3\n\t" 692 "ocbi @%0\n\t" 693 "ocbi @%1\n\t" 694 "ocbi @%2\n\t" 695 "ocbi @%3\n\t" : : 696 "r" (a0), "r" (a1), "r" (a2), "r" (a3)); 697 asm volatile("ldc %0, sr" : : "r" (orig_sr)); 698 a0 += linesz; 699 a1 += linesz; 700 a2 += linesz; 701 a3 += linesz; 702 } while (a0 < a0e); 703 } 704 705 extern void __weak sh4__flush_region_init(void); 706 707 /* 708 * SH-4 has virtually indexed and physically tagged cache. 709 */ 710 void __init sh4_cache_init(void) 711 { 712 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); 713 714 printk("PVR=%08x CVR=%08x PRR=%08x\n", 715 ctrl_inl(CCN_PVR), 716 ctrl_inl(CCN_CVR), 717 ctrl_inl(CCN_PRR)); 718 719 if (wt_enabled) 720 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; 721 else { 722 switch (boot_cpu_data.dcache.ways) { 723 case 1: 724 __flush_dcache_segment_fn = __flush_dcache_segment_1way; 725 break; 726 case 2: 727 __flush_dcache_segment_fn = __flush_dcache_segment_2way; 728 break; 729 case 4: 730 __flush_dcache_segment_fn = __flush_dcache_segment_4way; 731 break; 732 default: 733 panic("unknown number of cache ways\n"); 734 break; 735 } 736 } 737 738 local_flush_icache_range = sh4_flush_icache_range; 739 local_flush_dcache_page = sh4_flush_dcache_page; 740 local_flush_cache_all = sh4_flush_cache_all; 741 local_flush_cache_mm = sh4_flush_cache_mm; 742 local_flush_cache_dup_mm = sh4_flush_cache_mm; 743 local_flush_cache_page = sh4_flush_cache_page; 744 local_flush_cache_range = sh4_flush_cache_range; 745 746 sh4__flush_region_init(); 747 } 748