1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/hardirq.h> 11 #include <linux/init.h> 12 #include <linux/highmem.h> 13 #include <linux/kernel.h> 14 #include <linux/linkage.h> 15 #include <linux/preempt.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 #include <linux/mm.h> 19 #include <linux/module.h> 20 #include <linux/bitops.h> 21 22 #include <asm/bcache.h> 23 #include <asm/bootinfo.h> 24 #include <asm/cache.h> 25 #include <asm/cacheops.h> 26 #include <asm/cpu.h> 27 #include <asm/cpu-features.h> 28 #include <asm/cpu-type.h> 29 #include <asm/io.h> 30 #include <asm/page.h> 31 #include <asm/pgtable.h> 32 #include <asm/r4kcache.h> 33 #include <asm/sections.h> 34 #include <asm/mmu_context.h> 35 #include <asm/war.h> 36 #include <asm/cacheflush.h> /* for run_uncached() */ 37 #include <asm/traps.h> 38 #include <asm/dma-coherence.h> 39 40 /* 41 * Special Variant of smp_call_function for use by cache functions: 42 * 43 * o No return value 44 * o collapses to normal function call on UP kernels 45 * o collapses to normal function call on systems with a single shared 46 * primary cache. 47 * o doesn't disable interrupts on the local CPU 48 */ 49 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) 50 { 51 preempt_disable(); 52 53 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 54 smp_call_function(func, info, 1); 55 #endif 56 func(info); 57 preempt_enable(); 58 } 59 60 #if defined(CONFIG_MIPS_CMP) 61 #define cpu_has_safe_index_cacheops 0 62 #else 63 #define cpu_has_safe_index_cacheops 1 64 #endif 65 66 /* 67 * Must die. 68 */ 69 static unsigned long icache_size __read_mostly; 70 static unsigned long dcache_size __read_mostly; 71 static unsigned long scache_size __read_mostly; 72 73 /* 74 * Dummy cache handling routines for machines without boardcaches 75 */ 76 static void cache_noop(void) {} 77 78 static struct bcache_ops no_sc_ops = { 79 .bc_enable = (void *)cache_noop, 80 .bc_disable = (void *)cache_noop, 81 .bc_wback_inv = (void *)cache_noop, 82 .bc_inv = (void *)cache_noop 83 }; 84 85 struct bcache_ops *bcops = &no_sc_ops; 86 87 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 88 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 89 90 #define R4600_HIT_CACHEOP_WAR_IMPL \ 91 do { \ 92 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ 93 *(volatile unsigned long *)CKSEG1; \ 94 if (R4600_V1_HIT_CACHEOP_WAR) \ 95 __asm__ __volatile__("nop;nop;nop;nop"); \ 96 } while (0) 97 98 static void (*r4k_blast_dcache_page)(unsigned long addr); 99 100 static inline void r4k_blast_dcache_page_dc32(unsigned long addr) 101 { 102 R4600_HIT_CACHEOP_WAR_IMPL; 103 blast_dcache32_page(addr); 104 } 105 106 static inline void r4k_blast_dcache_page_dc64(unsigned long addr) 107 { 108 R4600_HIT_CACHEOP_WAR_IMPL; 109 blast_dcache64_page(addr); 110 } 111 112 static void r4k_blast_dcache_page_setup(void) 113 { 114 unsigned long dc_lsize = cpu_dcache_line_size(); 115 116 if (dc_lsize == 0) 117 r4k_blast_dcache_page = (void *)cache_noop; 118 else if (dc_lsize == 16) 119 r4k_blast_dcache_page = blast_dcache16_page; 120 else if (dc_lsize == 32) 121 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 122 else if (dc_lsize == 64) 123 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 124 } 125 126 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 127 128 static void r4k_blast_dcache_page_indexed_setup(void) 129 { 130 unsigned long dc_lsize = cpu_dcache_line_size(); 131 132 if (dc_lsize == 0) 133 r4k_blast_dcache_page_indexed = (void *)cache_noop; 134 else if (dc_lsize == 16) 135 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 136 else if (dc_lsize == 32) 137 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 138 else if (dc_lsize == 64) 139 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 140 } 141 142 void (* r4k_blast_dcache)(void); 143 EXPORT_SYMBOL(r4k_blast_dcache); 144 145 static void r4k_blast_dcache_setup(void) 146 { 147 unsigned long dc_lsize = cpu_dcache_line_size(); 148 149 if (dc_lsize == 0) 150 r4k_blast_dcache = (void *)cache_noop; 151 else if (dc_lsize == 16) 152 r4k_blast_dcache = blast_dcache16; 153 else if (dc_lsize == 32) 154 r4k_blast_dcache = blast_dcache32; 155 else if (dc_lsize == 64) 156 r4k_blast_dcache = blast_dcache64; 157 } 158 159 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 160 #define JUMP_TO_ALIGN(order) \ 161 __asm__ __volatile__( \ 162 "b\t1f\n\t" \ 163 ".align\t" #order "\n\t" \ 164 "1:\n\t" \ 165 ) 166 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 167 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 168 169 static inline void blast_r4600_v1_icache32(void) 170 { 171 unsigned long flags; 172 173 local_irq_save(flags); 174 blast_icache32(); 175 local_irq_restore(flags); 176 } 177 178 static inline void tx49_blast_icache32(void) 179 { 180 unsigned long start = INDEX_BASE; 181 unsigned long end = start + current_cpu_data.icache.waysize; 182 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 183 unsigned long ws_end = current_cpu_data.icache.ways << 184 current_cpu_data.icache.waybit; 185 unsigned long ws, addr; 186 187 CACHE32_UNROLL32_ALIGN2; 188 /* I'm in even chunk. blast odd chunks */ 189 for (ws = 0; ws < ws_end; ws += ws_inc) 190 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 191 cache32_unroll32(addr|ws, Index_Invalidate_I); 192 CACHE32_UNROLL32_ALIGN; 193 /* I'm in odd chunk. blast even chunks */ 194 for (ws = 0; ws < ws_end; ws += ws_inc) 195 for (addr = start; addr < end; addr += 0x400 * 2) 196 cache32_unroll32(addr|ws, Index_Invalidate_I); 197 } 198 199 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) 200 { 201 unsigned long flags; 202 203 local_irq_save(flags); 204 blast_icache32_page_indexed(page); 205 local_irq_restore(flags); 206 } 207 208 static inline void tx49_blast_icache32_page_indexed(unsigned long page) 209 { 210 unsigned long indexmask = current_cpu_data.icache.waysize - 1; 211 unsigned long start = INDEX_BASE + (page & indexmask); 212 unsigned long end = start + PAGE_SIZE; 213 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 214 unsigned long ws_end = current_cpu_data.icache.ways << 215 current_cpu_data.icache.waybit; 216 unsigned long ws, addr; 217 218 CACHE32_UNROLL32_ALIGN2; 219 /* I'm in even chunk. blast odd chunks */ 220 for (ws = 0; ws < ws_end; ws += ws_inc) 221 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 222 cache32_unroll32(addr|ws, Index_Invalidate_I); 223 CACHE32_UNROLL32_ALIGN; 224 /* I'm in odd chunk. blast even chunks */ 225 for (ws = 0; ws < ws_end; ws += ws_inc) 226 for (addr = start; addr < end; addr += 0x400 * 2) 227 cache32_unroll32(addr|ws, Index_Invalidate_I); 228 } 229 230 static void (* r4k_blast_icache_page)(unsigned long addr); 231 232 static void r4k_blast_icache_page_setup(void) 233 { 234 unsigned long ic_lsize = cpu_icache_line_size(); 235 236 if (ic_lsize == 0) 237 r4k_blast_icache_page = (void *)cache_noop; 238 else if (ic_lsize == 16) 239 r4k_blast_icache_page = blast_icache16_page; 240 else if (ic_lsize == 32) 241 r4k_blast_icache_page = blast_icache32_page; 242 else if (ic_lsize == 64) 243 r4k_blast_icache_page = blast_icache64_page; 244 } 245 246 247 static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 248 249 static void r4k_blast_icache_page_indexed_setup(void) 250 { 251 unsigned long ic_lsize = cpu_icache_line_size(); 252 253 if (ic_lsize == 0) 254 r4k_blast_icache_page_indexed = (void *)cache_noop; 255 else if (ic_lsize == 16) 256 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 257 else if (ic_lsize == 32) { 258 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 259 r4k_blast_icache_page_indexed = 260 blast_icache32_r4600_v1_page_indexed; 261 else if (TX49XX_ICACHE_INDEX_INV_WAR) 262 r4k_blast_icache_page_indexed = 263 tx49_blast_icache32_page_indexed; 264 else 265 r4k_blast_icache_page_indexed = 266 blast_icache32_page_indexed; 267 } else if (ic_lsize == 64) 268 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 269 } 270 271 void (* r4k_blast_icache)(void); 272 EXPORT_SYMBOL(r4k_blast_icache); 273 274 static void r4k_blast_icache_setup(void) 275 { 276 unsigned long ic_lsize = cpu_icache_line_size(); 277 278 if (ic_lsize == 0) 279 r4k_blast_icache = (void *)cache_noop; 280 else if (ic_lsize == 16) 281 r4k_blast_icache = blast_icache16; 282 else if (ic_lsize == 32) { 283 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 284 r4k_blast_icache = blast_r4600_v1_icache32; 285 else if (TX49XX_ICACHE_INDEX_INV_WAR) 286 r4k_blast_icache = tx49_blast_icache32; 287 else 288 r4k_blast_icache = blast_icache32; 289 } else if (ic_lsize == 64) 290 r4k_blast_icache = blast_icache64; 291 } 292 293 static void (* r4k_blast_scache_page)(unsigned long addr); 294 295 static void r4k_blast_scache_page_setup(void) 296 { 297 unsigned long sc_lsize = cpu_scache_line_size(); 298 299 if (scache_size == 0) 300 r4k_blast_scache_page = (void *)cache_noop; 301 else if (sc_lsize == 16) 302 r4k_blast_scache_page = blast_scache16_page; 303 else if (sc_lsize == 32) 304 r4k_blast_scache_page = blast_scache32_page; 305 else if (sc_lsize == 64) 306 r4k_blast_scache_page = blast_scache64_page; 307 else if (sc_lsize == 128) 308 r4k_blast_scache_page = blast_scache128_page; 309 } 310 311 static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 312 313 static void r4k_blast_scache_page_indexed_setup(void) 314 { 315 unsigned long sc_lsize = cpu_scache_line_size(); 316 317 if (scache_size == 0) 318 r4k_blast_scache_page_indexed = (void *)cache_noop; 319 else if (sc_lsize == 16) 320 r4k_blast_scache_page_indexed = blast_scache16_page_indexed; 321 else if (sc_lsize == 32) 322 r4k_blast_scache_page_indexed = blast_scache32_page_indexed; 323 else if (sc_lsize == 64) 324 r4k_blast_scache_page_indexed = blast_scache64_page_indexed; 325 else if (sc_lsize == 128) 326 r4k_blast_scache_page_indexed = blast_scache128_page_indexed; 327 } 328 329 static void (* r4k_blast_scache)(void); 330 331 static void r4k_blast_scache_setup(void) 332 { 333 unsigned long sc_lsize = cpu_scache_line_size(); 334 335 if (scache_size == 0) 336 r4k_blast_scache = (void *)cache_noop; 337 else if (sc_lsize == 16) 338 r4k_blast_scache = blast_scache16; 339 else if (sc_lsize == 32) 340 r4k_blast_scache = blast_scache32; 341 else if (sc_lsize == 64) 342 r4k_blast_scache = blast_scache64; 343 else if (sc_lsize == 128) 344 r4k_blast_scache = blast_scache128; 345 } 346 347 static inline void local_r4k___flush_cache_all(void * args) 348 { 349 #if defined(CONFIG_CPU_LOONGSON2) 350 r4k_blast_scache(); 351 return; 352 #endif 353 r4k_blast_dcache(); 354 r4k_blast_icache(); 355 356 switch (current_cpu_type()) { 357 case CPU_R4000SC: 358 case CPU_R4000MC: 359 case CPU_R4400SC: 360 case CPU_R4400MC: 361 case CPU_R10000: 362 case CPU_R12000: 363 case CPU_R14000: 364 r4k_blast_scache(); 365 } 366 } 367 368 static void r4k___flush_cache_all(void) 369 { 370 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); 371 } 372 373 static inline int has_valid_asid(const struct mm_struct *mm) 374 { 375 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 376 int i; 377 378 for_each_online_cpu(i) 379 if (cpu_context(i, mm)) 380 return 1; 381 382 return 0; 383 #else 384 return cpu_context(smp_processor_id(), mm); 385 #endif 386 } 387 388 static void r4k__flush_cache_vmap(void) 389 { 390 r4k_blast_dcache(); 391 } 392 393 static void r4k__flush_cache_vunmap(void) 394 { 395 r4k_blast_dcache(); 396 } 397 398 static inline void local_r4k_flush_cache_range(void * args) 399 { 400 struct vm_area_struct *vma = args; 401 int exec = vma->vm_flags & VM_EXEC; 402 403 if (!(has_valid_asid(vma->vm_mm))) 404 return; 405 406 r4k_blast_dcache(); 407 if (exec) 408 r4k_blast_icache(); 409 } 410 411 static void r4k_flush_cache_range(struct vm_area_struct *vma, 412 unsigned long start, unsigned long end) 413 { 414 int exec = vma->vm_flags & VM_EXEC; 415 416 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 417 r4k_on_each_cpu(local_r4k_flush_cache_range, vma); 418 } 419 420 static inline void local_r4k_flush_cache_mm(void * args) 421 { 422 struct mm_struct *mm = args; 423 424 if (!has_valid_asid(mm)) 425 return; 426 427 /* 428 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we 429 * only flush the primary caches but R10000 and R12000 behave sane ... 430 * R4000SC and R4400SC indexed S-cache ops also invalidate primary 431 * caches, so we can bail out early. 432 */ 433 if (current_cpu_type() == CPU_R4000SC || 434 current_cpu_type() == CPU_R4000MC || 435 current_cpu_type() == CPU_R4400SC || 436 current_cpu_type() == CPU_R4400MC) { 437 r4k_blast_scache(); 438 return; 439 } 440 441 r4k_blast_dcache(); 442 } 443 444 static void r4k_flush_cache_mm(struct mm_struct *mm) 445 { 446 if (!cpu_has_dc_aliases) 447 return; 448 449 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); 450 } 451 452 struct flush_cache_page_args { 453 struct vm_area_struct *vma; 454 unsigned long addr; 455 unsigned long pfn; 456 }; 457 458 static inline void local_r4k_flush_cache_page(void *args) 459 { 460 struct flush_cache_page_args *fcp_args = args; 461 struct vm_area_struct *vma = fcp_args->vma; 462 unsigned long addr = fcp_args->addr; 463 struct page *page = pfn_to_page(fcp_args->pfn); 464 int exec = vma->vm_flags & VM_EXEC; 465 struct mm_struct *mm = vma->vm_mm; 466 int map_coherent = 0; 467 pgd_t *pgdp; 468 pud_t *pudp; 469 pmd_t *pmdp; 470 pte_t *ptep; 471 void *vaddr; 472 473 /* 474 * If ownes no valid ASID yet, cannot possibly have gotten 475 * this page into the cache. 476 */ 477 if (!has_valid_asid(mm)) 478 return; 479 480 addr &= PAGE_MASK; 481 pgdp = pgd_offset(mm, addr); 482 pudp = pud_offset(pgdp, addr); 483 pmdp = pmd_offset(pudp, addr); 484 ptep = pte_offset(pmdp, addr); 485 486 /* 487 * If the page isn't marked valid, the page cannot possibly be 488 * in the cache. 489 */ 490 if (!(pte_present(*ptep))) 491 return; 492 493 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) 494 vaddr = NULL; 495 else { 496 /* 497 * Use kmap_coherent or kmap_atomic to do flushes for 498 * another ASID than the current one. 499 */ 500 map_coherent = (cpu_has_dc_aliases && 501 page_mapped(page) && !Page_dcache_dirty(page)); 502 if (map_coherent) 503 vaddr = kmap_coherent(page, addr); 504 else 505 vaddr = kmap_atomic(page); 506 addr = (unsigned long)vaddr; 507 } 508 509 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 510 r4k_blast_dcache_page(addr); 511 if (exec && !cpu_icache_snoops_remote_store) 512 r4k_blast_scache_page(addr); 513 } 514 if (exec) { 515 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { 516 int cpu = smp_processor_id(); 517 518 if (cpu_context(cpu, mm) != 0) 519 drop_mmu_context(mm, cpu); 520 } else 521 r4k_blast_icache_page(addr); 522 } 523 524 if (vaddr) { 525 if (map_coherent) 526 kunmap_coherent(); 527 else 528 kunmap_atomic(vaddr); 529 } 530 } 531 532 static void r4k_flush_cache_page(struct vm_area_struct *vma, 533 unsigned long addr, unsigned long pfn) 534 { 535 struct flush_cache_page_args args; 536 537 args.vma = vma; 538 args.addr = addr; 539 args.pfn = pfn; 540 541 r4k_on_each_cpu(local_r4k_flush_cache_page, &args); 542 } 543 544 static inline void local_r4k_flush_data_cache_page(void * addr) 545 { 546 r4k_blast_dcache_page((unsigned long) addr); 547 } 548 549 static void r4k_flush_data_cache_page(unsigned long addr) 550 { 551 if (in_atomic()) 552 local_r4k_flush_data_cache_page((void *)addr); 553 else 554 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); 555 } 556 557 struct flush_icache_range_args { 558 unsigned long start; 559 unsigned long end; 560 }; 561 562 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) 563 { 564 if (!cpu_has_ic_fills_f_dc) { 565 if (end - start >= dcache_size) { 566 r4k_blast_dcache(); 567 } else { 568 R4600_HIT_CACHEOP_WAR_IMPL; 569 protected_blast_dcache_range(start, end); 570 } 571 } 572 573 if (end - start > icache_size) 574 r4k_blast_icache(); 575 else 576 protected_blast_icache_range(start, end); 577 } 578 579 static inline void local_r4k_flush_icache_range_ipi(void *args) 580 { 581 struct flush_icache_range_args *fir_args = args; 582 unsigned long start = fir_args->start; 583 unsigned long end = fir_args->end; 584 585 local_r4k_flush_icache_range(start, end); 586 } 587 588 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 589 { 590 struct flush_icache_range_args args; 591 592 args.start = start; 593 args.end = end; 594 595 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); 596 instruction_hazard(); 597 } 598 599 #ifdef CONFIG_DMA_NONCOHERENT 600 601 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 602 { 603 /* Catch bad driver code */ 604 BUG_ON(size == 0); 605 606 preempt_disable(); 607 if (cpu_has_inclusive_pcaches) { 608 if (size >= scache_size) 609 r4k_blast_scache(); 610 else 611 blast_scache_range(addr, addr + size); 612 __sync(); 613 return; 614 } 615 616 /* 617 * Either no secondary cache or the available caches don't have the 618 * subset property so we have to flush the primary caches 619 * explicitly 620 */ 621 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 622 r4k_blast_dcache(); 623 } else { 624 R4600_HIT_CACHEOP_WAR_IMPL; 625 blast_dcache_range(addr, addr + size); 626 } 627 preempt_enable(); 628 629 bc_wback_inv(addr, size); 630 __sync(); 631 } 632 633 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 634 { 635 /* Catch bad driver code */ 636 BUG_ON(size == 0); 637 638 preempt_disable(); 639 if (cpu_has_inclusive_pcaches) { 640 if (size >= scache_size) 641 r4k_blast_scache(); 642 else { 643 /* 644 * There is no clearly documented alignment requirement 645 * for the cache instruction on MIPS processors and 646 * some processors, among them the RM5200 and RM7000 647 * QED processors will throw an address error for cache 648 * hit ops with insufficient alignment. Solved by 649 * aligning the address to cache line size. 650 */ 651 blast_inv_scache_range(addr, addr + size); 652 } 653 __sync(); 654 return; 655 } 656 657 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 658 r4k_blast_dcache(); 659 } else { 660 R4600_HIT_CACHEOP_WAR_IMPL; 661 blast_inv_dcache_range(addr, addr + size); 662 } 663 preempt_enable(); 664 665 bc_inv(addr, size); 666 __sync(); 667 } 668 #endif /* CONFIG_DMA_NONCOHERENT */ 669 670 /* 671 * While we're protected against bad userland addresses we don't care 672 * very much about what happens in that case. Usually a segmentation 673 * fault will dump the process later on anyway ... 674 */ 675 static void local_r4k_flush_cache_sigtramp(void * arg) 676 { 677 unsigned long ic_lsize = cpu_icache_line_size(); 678 unsigned long dc_lsize = cpu_dcache_line_size(); 679 unsigned long sc_lsize = cpu_scache_line_size(); 680 unsigned long addr = (unsigned long) arg; 681 682 R4600_HIT_CACHEOP_WAR_IMPL; 683 if (dc_lsize) 684 protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 685 if (!cpu_icache_snoops_remote_store && scache_size) 686 protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 687 if (ic_lsize) 688 protected_flush_icache_line(addr & ~(ic_lsize - 1)); 689 if (MIPS4K_ICACHE_REFILL_WAR) { 690 __asm__ __volatile__ ( 691 ".set push\n\t" 692 ".set noat\n\t" 693 ".set mips3\n\t" 694 #ifdef CONFIG_32BIT 695 "la $at,1f\n\t" 696 #endif 697 #ifdef CONFIG_64BIT 698 "dla $at,1f\n\t" 699 #endif 700 "cache %0,($at)\n\t" 701 "nop; nop; nop\n" 702 "1:\n\t" 703 ".set pop" 704 : 705 : "i" (Hit_Invalidate_I)); 706 } 707 if (MIPS_CACHE_SYNC_WAR) 708 __asm__ __volatile__ ("sync"); 709 } 710 711 static void r4k_flush_cache_sigtramp(unsigned long addr) 712 { 713 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); 714 } 715 716 static void r4k_flush_icache_all(void) 717 { 718 if (cpu_has_vtag_icache) 719 r4k_blast_icache(); 720 } 721 722 struct flush_kernel_vmap_range_args { 723 unsigned long vaddr; 724 int size; 725 }; 726 727 static inline void local_r4k_flush_kernel_vmap_range(void *args) 728 { 729 struct flush_kernel_vmap_range_args *vmra = args; 730 unsigned long vaddr = vmra->vaddr; 731 int size = vmra->size; 732 733 /* 734 * Aliases only affect the primary caches so don't bother with 735 * S-caches or T-caches. 736 */ 737 if (cpu_has_safe_index_cacheops && size >= dcache_size) 738 r4k_blast_dcache(); 739 else { 740 R4600_HIT_CACHEOP_WAR_IMPL; 741 blast_dcache_range(vaddr, vaddr + size); 742 } 743 } 744 745 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) 746 { 747 struct flush_kernel_vmap_range_args args; 748 749 args.vaddr = (unsigned long) vaddr; 750 args.size = size; 751 752 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); 753 } 754 755 static inline void rm7k_erratum31(void) 756 { 757 const unsigned long ic_lsize = 32; 758 unsigned long addr; 759 760 /* RM7000 erratum #31. The icache is screwed at startup. */ 761 write_c0_taglo(0); 762 write_c0_taghi(0); 763 764 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 765 __asm__ __volatile__ ( 766 ".set push\n\t" 767 ".set noreorder\n\t" 768 ".set mips3\n\t" 769 "cache\t%1, 0(%0)\n\t" 770 "cache\t%1, 0x1000(%0)\n\t" 771 "cache\t%1, 0x2000(%0)\n\t" 772 "cache\t%1, 0x3000(%0)\n\t" 773 "cache\t%2, 0(%0)\n\t" 774 "cache\t%2, 0x1000(%0)\n\t" 775 "cache\t%2, 0x2000(%0)\n\t" 776 "cache\t%2, 0x3000(%0)\n\t" 777 "cache\t%1, 0(%0)\n\t" 778 "cache\t%1, 0x1000(%0)\n\t" 779 "cache\t%1, 0x2000(%0)\n\t" 780 "cache\t%1, 0x3000(%0)\n\t" 781 ".set pop\n" 782 : 783 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 784 } 785 } 786 787 static inline void alias_74k_erratum(struct cpuinfo_mips *c) 788 { 789 unsigned int imp = c->processor_id & PRID_IMP_MASK; 790 unsigned int rev = c->processor_id & PRID_REV_MASK; 791 792 /* 793 * Early versions of the 74K do not update the cache tags on a 794 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG 795 * aliases. In this case it is better to treat the cache as always 796 * having aliases. 797 */ 798 switch (imp) { 799 case PRID_IMP_74K: 800 if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) 801 c->dcache.flags |= MIPS_CACHE_VTAG; 802 if (rev == PRID_REV_ENCODE_332(2, 4, 0)) 803 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 804 break; 805 case PRID_IMP_1074K: 806 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { 807 c->dcache.flags |= MIPS_CACHE_VTAG; 808 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 809 } 810 break; 811 default: 812 BUG(); 813 } 814 } 815 816 static char *way_string[] = { NULL, "direct mapped", "2-way", 817 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 818 }; 819 820 static void probe_pcache(void) 821 { 822 struct cpuinfo_mips *c = ¤t_cpu_data; 823 unsigned int config = read_c0_config(); 824 unsigned int prid = read_c0_prid(); 825 unsigned long config1; 826 unsigned int lsize; 827 828 switch (current_cpu_type()) { 829 case CPU_R4600: /* QED style two way caches? */ 830 case CPU_R4700: 831 case CPU_R5000: 832 case CPU_NEVADA: 833 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 834 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 835 c->icache.ways = 2; 836 c->icache.waybit = __ffs(icache_size/2); 837 838 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 839 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 840 c->dcache.ways = 2; 841 c->dcache.waybit= __ffs(dcache_size/2); 842 843 c->options |= MIPS_CPU_CACHE_CDEX_P; 844 break; 845 846 case CPU_R5432: 847 case CPU_R5500: 848 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 849 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 850 c->icache.ways = 2; 851 c->icache.waybit= 0; 852 853 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 854 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 855 c->dcache.ways = 2; 856 c->dcache.waybit = 0; 857 858 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; 859 break; 860 861 case CPU_TX49XX: 862 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 863 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 864 c->icache.ways = 4; 865 c->icache.waybit= 0; 866 867 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 868 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 869 c->dcache.ways = 4; 870 c->dcache.waybit = 0; 871 872 c->options |= MIPS_CPU_CACHE_CDEX_P; 873 c->options |= MIPS_CPU_PREFETCH; 874 break; 875 876 case CPU_R4000PC: 877 case CPU_R4000SC: 878 case CPU_R4000MC: 879 case CPU_R4400PC: 880 case CPU_R4400SC: 881 case CPU_R4400MC: 882 case CPU_R4300: 883 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 884 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 885 c->icache.ways = 1; 886 c->icache.waybit = 0; /* doesn't matter */ 887 888 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 889 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 890 c->dcache.ways = 1; 891 c->dcache.waybit = 0; /* does not matter */ 892 893 c->options |= MIPS_CPU_CACHE_CDEX_P; 894 break; 895 896 case CPU_R10000: 897 case CPU_R12000: 898 case CPU_R14000: 899 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 900 c->icache.linesz = 64; 901 c->icache.ways = 2; 902 c->icache.waybit = 0; 903 904 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); 905 c->dcache.linesz = 32; 906 c->dcache.ways = 2; 907 c->dcache.waybit = 0; 908 909 c->options |= MIPS_CPU_PREFETCH; 910 break; 911 912 case CPU_VR4133: 913 write_c0_config(config & ~VR41_CONF_P4K); 914 case CPU_VR4131: 915 /* Workaround for cache instruction bug of VR4131 */ 916 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || 917 c->processor_id == 0x0c82U) { 918 config |= 0x00400000U; 919 if (c->processor_id == 0x0c80U) 920 config |= VR41_CONF_BP; 921 write_c0_config(config); 922 } else 923 c->options |= MIPS_CPU_CACHE_CDEX_P; 924 925 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 926 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 927 c->icache.ways = 2; 928 c->icache.waybit = __ffs(icache_size/2); 929 930 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 931 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 932 c->dcache.ways = 2; 933 c->dcache.waybit = __ffs(dcache_size/2); 934 break; 935 936 case CPU_VR41XX: 937 case CPU_VR4111: 938 case CPU_VR4121: 939 case CPU_VR4122: 940 case CPU_VR4181: 941 case CPU_VR4181A: 942 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 943 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 944 c->icache.ways = 1; 945 c->icache.waybit = 0; /* doesn't matter */ 946 947 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 948 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 949 c->dcache.ways = 1; 950 c->dcache.waybit = 0; /* does not matter */ 951 952 c->options |= MIPS_CPU_CACHE_CDEX_P; 953 break; 954 955 case CPU_RM7000: 956 rm7k_erratum31(); 957 958 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 959 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 960 c->icache.ways = 4; 961 c->icache.waybit = __ffs(icache_size / c->icache.ways); 962 963 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 964 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 965 c->dcache.ways = 4; 966 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 967 968 c->options |= MIPS_CPU_CACHE_CDEX_P; 969 c->options |= MIPS_CPU_PREFETCH; 970 break; 971 972 case CPU_LOONGSON2: 973 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 974 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 975 if (prid & 0x3) 976 c->icache.ways = 4; 977 else 978 c->icache.ways = 2; 979 c->icache.waybit = 0; 980 981 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 982 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 983 if (prid & 0x3) 984 c->dcache.ways = 4; 985 else 986 c->dcache.ways = 2; 987 c->dcache.waybit = 0; 988 break; 989 990 default: 991 if (!(config & MIPS_CONF_M)) 992 panic("Don't know how to probe P-caches on this cpu."); 993 994 /* 995 * So we seem to be a MIPS32 or MIPS64 CPU 996 * So let's probe the I-cache ... 997 */ 998 config1 = read_c0_config1(); 999 1000 if ((lsize = ((config1 >> 19) & 7))) 1001 c->icache.linesz = 2 << lsize; 1002 else 1003 c->icache.linesz = lsize; 1004 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); 1005 c->icache.ways = 1 + ((config1 >> 16) & 7); 1006 1007 icache_size = c->icache.sets * 1008 c->icache.ways * 1009 c->icache.linesz; 1010 c->icache.waybit = __ffs(icache_size/c->icache.ways); 1011 1012 if (config & 0x8) /* VI bit */ 1013 c->icache.flags |= MIPS_CACHE_VTAG; 1014 1015 /* 1016 * Now probe the MIPS32 / MIPS64 data cache. 1017 */ 1018 c->dcache.flags = 0; 1019 1020 if ((lsize = ((config1 >> 10) & 7))) 1021 c->dcache.linesz = 2 << lsize; 1022 else 1023 c->dcache.linesz= lsize; 1024 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); 1025 c->dcache.ways = 1 + ((config1 >> 7) & 7); 1026 1027 dcache_size = c->dcache.sets * 1028 c->dcache.ways * 1029 c->dcache.linesz; 1030 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 1031 1032 c->options |= MIPS_CPU_PREFETCH; 1033 break; 1034 } 1035 1036 /* 1037 * Processor configuration sanity check for the R4000SC erratum 1038 * #5. With page sizes larger than 32kB there is no possibility 1039 * to get a VCE exception anymore so we don't care about this 1040 * misconfiguration. The case is rather theoretical anyway; 1041 * presumably no vendor is shipping his hardware in the "bad" 1042 * configuration. 1043 */ 1044 if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 && 1045 (prid & PRID_REV_MASK) < PRID_REV_R4400 && 1046 !(config & CONF_SC) && c->icache.linesz != 16 && 1047 PAGE_SIZE <= 0x8000) 1048 panic("Improper R4000SC processor configuration detected"); 1049 1050 /* compute a couple of other cache variables */ 1051 c->icache.waysize = icache_size / c->icache.ways; 1052 c->dcache.waysize = dcache_size / c->dcache.ways; 1053 1054 c->icache.sets = c->icache.linesz ? 1055 icache_size / (c->icache.linesz * c->icache.ways) : 0; 1056 c->dcache.sets = c->dcache.linesz ? 1057 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; 1058 1059 /* 1060 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB 1061 * 2-way virtually indexed so normally would suffer from aliases. So 1062 * normally they'd suffer from aliases but magic in the hardware deals 1063 * with that for us so we don't need to take care ourselves. 1064 */ 1065 switch (current_cpu_type()) { 1066 case CPU_20KC: 1067 case CPU_25KF: 1068 case CPU_SB1: 1069 case CPU_SB1A: 1070 case CPU_XLR: 1071 c->dcache.flags |= MIPS_CACHE_PINDEX; 1072 break; 1073 1074 case CPU_R10000: 1075 case CPU_R12000: 1076 case CPU_R14000: 1077 break; 1078 1079 case CPU_M14KC: 1080 case CPU_M14KEC: 1081 case CPU_24K: 1082 case CPU_34K: 1083 case CPU_74K: 1084 case CPU_1004K: 1085 if (current_cpu_type() == CPU_74K) 1086 alias_74k_erratum(c); 1087 if ((read_c0_config7() & (1 << 16))) { 1088 /* effectively physically indexed dcache, 1089 thus no virtual aliases. */ 1090 c->dcache.flags |= MIPS_CACHE_PINDEX; 1091 break; 1092 } 1093 default: 1094 if (c->dcache.waysize > PAGE_SIZE) 1095 c->dcache.flags |= MIPS_CACHE_ALIASES; 1096 } 1097 1098 switch (current_cpu_type()) { 1099 case CPU_20KC: 1100 /* 1101 * Some older 20Kc chips doesn't have the 'VI' bit in 1102 * the config register. 1103 */ 1104 c->icache.flags |= MIPS_CACHE_VTAG; 1105 break; 1106 1107 case CPU_ALCHEMY: 1108 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1109 break; 1110 } 1111 1112 #ifdef CONFIG_CPU_LOONGSON2 1113 /* 1114 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1115 * one op will act on all 4 ways 1116 */ 1117 c->icache.ways = 1; 1118 #endif 1119 1120 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1121 icache_size >> 10, 1122 c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", 1123 way_string[c->icache.ways], c->icache.linesz); 1124 1125 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", 1126 dcache_size >> 10, way_string[c->dcache.ways], 1127 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", 1128 (c->dcache.flags & MIPS_CACHE_ALIASES) ? 1129 "cache aliases" : "no aliases", 1130 c->dcache.linesz); 1131 } 1132 1133 /* 1134 * If you even _breathe_ on this function, look at the gcc output and make sure 1135 * it does not pop things on and off the stack for the cache sizing loop that 1136 * executes in KSEG1 space or else you will crash and burn badly. You have 1137 * been warned. 1138 */ 1139 static int probe_scache(void) 1140 { 1141 unsigned long flags, addr, begin, end, pow2; 1142 unsigned int config = read_c0_config(); 1143 struct cpuinfo_mips *c = ¤t_cpu_data; 1144 1145 if (config & CONF_SC) 1146 return 0; 1147 1148 begin = (unsigned long) &_stext; 1149 begin &= ~((4 * 1024 * 1024) - 1); 1150 end = begin + (4 * 1024 * 1024); 1151 1152 /* 1153 * This is such a bitch, you'd think they would make it easy to do 1154 * this. Away you daemons of stupidity! 1155 */ 1156 local_irq_save(flags); 1157 1158 /* Fill each size-multiple cache line with a valid tag. */ 1159 pow2 = (64 * 1024); 1160 for (addr = begin; addr < end; addr = (begin + pow2)) { 1161 unsigned long *p = (unsigned long *) addr; 1162 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ 1163 pow2 <<= 1; 1164 } 1165 1166 /* Load first line with zero (therefore invalid) tag. */ 1167 write_c0_taglo(0); 1168 write_c0_taghi(0); 1169 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ 1170 cache_op(Index_Store_Tag_I, begin); 1171 cache_op(Index_Store_Tag_D, begin); 1172 cache_op(Index_Store_Tag_SD, begin); 1173 1174 /* Now search for the wrap around point. */ 1175 pow2 = (128 * 1024); 1176 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1177 cache_op(Index_Load_Tag_SD, addr); 1178 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1179 if (!read_c0_taglo()) 1180 break; 1181 pow2 <<= 1; 1182 } 1183 local_irq_restore(flags); 1184 addr -= begin; 1185 1186 scache_size = addr; 1187 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1188 c->scache.ways = 1; 1189 c->dcache.waybit = 0; /* does not matter */ 1190 1191 return 1; 1192 } 1193 1194 #if defined(CONFIG_CPU_LOONGSON2) 1195 static void __init loongson2_sc_init(void) 1196 { 1197 struct cpuinfo_mips *c = ¤t_cpu_data; 1198 1199 scache_size = 512*1024; 1200 c->scache.linesz = 32; 1201 c->scache.ways = 4; 1202 c->scache.waybit = 0; 1203 c->scache.waysize = scache_size / (c->scache.ways); 1204 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1205 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1206 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1207 1208 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1209 } 1210 #endif 1211 1212 extern int r5k_sc_init(void); 1213 extern int rm7k_sc_init(void); 1214 extern int mips_sc_init(void); 1215 1216 static void setup_scache(void) 1217 { 1218 struct cpuinfo_mips *c = ¤t_cpu_data; 1219 unsigned int config = read_c0_config(); 1220 int sc_present = 0; 1221 1222 /* 1223 * Do the probing thing on R4000SC and R4400SC processors. Other 1224 * processors don't have a S-cache that would be relevant to the 1225 * Linux memory management. 1226 */ 1227 switch (current_cpu_type()) { 1228 case CPU_R4000SC: 1229 case CPU_R4000MC: 1230 case CPU_R4400SC: 1231 case CPU_R4400MC: 1232 sc_present = run_uncached(probe_scache); 1233 if (sc_present) 1234 c->options |= MIPS_CPU_CACHE_CDEX_S; 1235 break; 1236 1237 case CPU_R10000: 1238 case CPU_R12000: 1239 case CPU_R14000: 1240 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1241 c->scache.linesz = 64 << ((config >> 13) & 1); 1242 c->scache.ways = 2; 1243 c->scache.waybit= 0; 1244 sc_present = 1; 1245 break; 1246 1247 case CPU_R5000: 1248 case CPU_NEVADA: 1249 #ifdef CONFIG_R5000_CPU_SCACHE 1250 r5k_sc_init(); 1251 #endif 1252 return; 1253 1254 case CPU_RM7000: 1255 #ifdef CONFIG_RM7000_CPU_SCACHE 1256 rm7k_sc_init(); 1257 #endif 1258 return; 1259 1260 #if defined(CONFIG_CPU_LOONGSON2) 1261 case CPU_LOONGSON2: 1262 loongson2_sc_init(); 1263 return; 1264 #endif 1265 case CPU_XLP: 1266 /* don't need to worry about L2, fully coherent */ 1267 return; 1268 1269 default: 1270 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1271 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1272 #ifdef CONFIG_MIPS_CPU_SCACHE 1273 if (mips_sc_init ()) { 1274 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1275 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", 1276 scache_size >> 10, 1277 way_string[c->scache.ways], c->scache.linesz); 1278 } 1279 #else 1280 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) 1281 panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); 1282 #endif 1283 return; 1284 } 1285 sc_present = 0; 1286 } 1287 1288 if (!sc_present) 1289 return; 1290 1291 /* compute a couple of other cache variables */ 1292 c->scache.waysize = scache_size / c->scache.ways; 1293 1294 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1295 1296 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1297 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1298 1299 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1300 } 1301 1302 void au1x00_fixup_config_od(void) 1303 { 1304 /* 1305 * c0_config.od (bit 19) was write only (and read as 0) 1306 * on the early revisions of Alchemy SOCs. It disables the bus 1307 * transaction overlapping and needs to be set to fix various errata. 1308 */ 1309 switch (read_c0_prid()) { 1310 case 0x00030100: /* Au1000 DA */ 1311 case 0x00030201: /* Au1000 HA */ 1312 case 0x00030202: /* Au1000 HB */ 1313 case 0x01030200: /* Au1500 AB */ 1314 /* 1315 * Au1100 errata actually keeps silence about this bit, so we set it 1316 * just in case for those revisions that require it to be set according 1317 * to the (now gone) cpu table. 1318 */ 1319 case 0x02030200: /* Au1100 AB */ 1320 case 0x02030201: /* Au1100 BA */ 1321 case 0x02030202: /* Au1100 BC */ 1322 set_c0_config(1 << 19); 1323 break; 1324 } 1325 } 1326 1327 /* CP0 hazard avoidance. */ 1328 #define NXP_BARRIER() \ 1329 __asm__ __volatile__( \ 1330 ".set noreorder\n\t" \ 1331 "nop; nop; nop; nop; nop; nop;\n\t" \ 1332 ".set reorder\n\t") 1333 1334 static void nxp_pr4450_fixup_config(void) 1335 { 1336 unsigned long config0; 1337 1338 config0 = read_c0_config(); 1339 1340 /* clear all three cache coherency fields */ 1341 config0 &= ~(0x7 | (7 << 25) | (7 << 28)); 1342 config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | 1343 ((_page_cachable_default >> _CACHE_SHIFT) << 25) | 1344 ((_page_cachable_default >> _CACHE_SHIFT) << 28)); 1345 write_c0_config(config0); 1346 NXP_BARRIER(); 1347 } 1348 1349 static int cca = -1; 1350 1351 static int __init cca_setup(char *str) 1352 { 1353 get_option(&str, &cca); 1354 1355 return 0; 1356 } 1357 1358 early_param("cca", cca_setup); 1359 1360 static void coherency_setup(void) 1361 { 1362 if (cca < 0 || cca > 7) 1363 cca = read_c0_config() & CONF_CM_CMASK; 1364 _page_cachable_default = cca << _CACHE_SHIFT; 1365 1366 pr_debug("Using cache attribute %d\n", cca); 1367 change_c0_config(CONF_CM_CMASK, cca); 1368 1369 /* 1370 * c0_status.cu=0 specifies that updates by the sc instruction use 1371 * the coherency mode specified by the TLB; 1 means cachable 1372 * coherent update on write will be used. Not all processors have 1373 * this bit and; some wire it to zero, others like Toshiba had the 1374 * silly idea of putting something else there ... 1375 */ 1376 switch (current_cpu_type()) { 1377 case CPU_R4000PC: 1378 case CPU_R4000SC: 1379 case CPU_R4000MC: 1380 case CPU_R4400PC: 1381 case CPU_R4400SC: 1382 case CPU_R4400MC: 1383 clear_c0_config(CONF_CU); 1384 break; 1385 /* 1386 * We need to catch the early Alchemy SOCs with 1387 * the write-only co_config.od bit and set it back to one on: 1388 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB 1389 */ 1390 case CPU_ALCHEMY: 1391 au1x00_fixup_config_od(); 1392 break; 1393 1394 case PRID_IMP_PR4450: 1395 nxp_pr4450_fixup_config(); 1396 break; 1397 } 1398 } 1399 1400 static void r4k_cache_error_setup(void) 1401 { 1402 extern char __weak except_vec2_generic; 1403 extern char __weak except_vec2_sb1; 1404 1405 switch (current_cpu_type()) { 1406 case CPU_SB1: 1407 case CPU_SB1A: 1408 set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1409 break; 1410 1411 default: 1412 set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1413 break; 1414 } 1415 } 1416 1417 void r4k_cache_init(void) 1418 { 1419 extern void build_clear_page(void); 1420 extern void build_copy_page(void); 1421 struct cpuinfo_mips *c = ¤t_cpu_data; 1422 1423 probe_pcache(); 1424 setup_scache(); 1425 1426 r4k_blast_dcache_page_setup(); 1427 r4k_blast_dcache_page_indexed_setup(); 1428 r4k_blast_dcache_setup(); 1429 r4k_blast_icache_page_setup(); 1430 r4k_blast_icache_page_indexed_setup(); 1431 r4k_blast_icache_setup(); 1432 r4k_blast_scache_page_setup(); 1433 r4k_blast_scache_page_indexed_setup(); 1434 r4k_blast_scache_setup(); 1435 1436 /* 1437 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1438 * This code supports virtually indexed processors and will be 1439 * unnecessarily inefficient on physically indexed processors. 1440 */ 1441 if (c->dcache.linesz) 1442 shm_align_mask = max_t( unsigned long, 1443 c->dcache.sets * c->dcache.linesz - 1, 1444 PAGE_SIZE - 1); 1445 else 1446 shm_align_mask = PAGE_SIZE-1; 1447 1448 __flush_cache_vmap = r4k__flush_cache_vmap; 1449 __flush_cache_vunmap = r4k__flush_cache_vunmap; 1450 1451 flush_cache_all = cache_noop; 1452 __flush_cache_all = r4k___flush_cache_all; 1453 flush_cache_mm = r4k_flush_cache_mm; 1454 flush_cache_page = r4k_flush_cache_page; 1455 flush_cache_range = r4k_flush_cache_range; 1456 1457 __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; 1458 1459 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1460 flush_icache_all = r4k_flush_icache_all; 1461 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1462 flush_data_cache_page = r4k_flush_data_cache_page; 1463 flush_icache_range = r4k_flush_icache_range; 1464 local_flush_icache_range = local_r4k_flush_icache_range; 1465 1466 #if defined(CONFIG_DMA_NONCOHERENT) 1467 if (coherentio) { 1468 _dma_cache_wback_inv = (void *)cache_noop; 1469 _dma_cache_wback = (void *)cache_noop; 1470 _dma_cache_inv = (void *)cache_noop; 1471 } else { 1472 _dma_cache_wback_inv = r4k_dma_cache_wback_inv; 1473 _dma_cache_wback = r4k_dma_cache_wback_inv; 1474 _dma_cache_inv = r4k_dma_cache_inv; 1475 } 1476 #endif 1477 1478 build_clear_page(); 1479 build_copy_page(); 1480 1481 /* 1482 * We want to run CMP kernels on core with and without coherent 1483 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether 1484 * or not to flush caches. 1485 */ 1486 local_r4k___flush_cache_all(NULL); 1487 1488 coherency_setup(); 1489 board_cache_error_setup = r4k_cache_error_setup; 1490 } 1491