1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/hardirq.h> 11 #include <linux/init.h> 12 #include <linux/highmem.h> 13 #include <linux/kernel.h> 14 #include <linux/linkage.h> 15 #include <linux/sched.h> 16 #include <linux/smp.h> 17 #include <linux/mm.h> 18 #include <linux/module.h> 19 #include <linux/bitops.h> 20 21 #include <asm/bcache.h> 22 #include <asm/bootinfo.h> 23 #include <asm/cache.h> 24 #include <asm/cacheops.h> 25 #include <asm/cpu.h> 26 #include <asm/cpu-features.h> 27 #include <asm/io.h> 28 #include <asm/page.h> 29 #include <asm/pgtable.h> 30 #include <asm/r4kcache.h> 31 #include <asm/sections.h> 32 #include <asm/mmu_context.h> 33 #include <asm/war.h> 34 #include <asm/cacheflush.h> /* for run_uncached() */ 35 #include <asm/traps.h> 36 37 /* 38 * Special Variant of smp_call_function for use by cache functions: 39 * 40 * o No return value 41 * o collapses to normal function call on UP kernels 42 * o collapses to normal function call on systems with a single shared 43 * primary cache. 44 * o doesn't disable interrupts on the local CPU 45 */ 46 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) 47 { 48 preempt_disable(); 49 50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 51 smp_call_function(func, info, 1); 52 #endif 53 func(info); 54 preempt_enable(); 55 } 56 57 #if defined(CONFIG_MIPS_CMP) 58 #define cpu_has_safe_index_cacheops 0 59 #else 60 #define cpu_has_safe_index_cacheops 1 61 #endif 62 63 /* 64 * Must die. 65 */ 66 static unsigned long icache_size __read_mostly; 67 static unsigned long dcache_size __read_mostly; 68 static unsigned long scache_size __read_mostly; 69 70 /* 71 * Dummy cache handling routines for machines without boardcaches 72 */ 73 static void cache_noop(void) {} 74 75 static struct bcache_ops no_sc_ops = { 76 .bc_enable = (void *)cache_noop, 77 .bc_disable = (void *)cache_noop, 78 .bc_wback_inv = (void *)cache_noop, 79 .bc_inv = (void *)cache_noop 80 }; 81 82 struct bcache_ops *bcops = &no_sc_ops; 83 84 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 85 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 86 87 #define R4600_HIT_CACHEOP_WAR_IMPL \ 88 do { \ 89 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ 90 *(volatile unsigned long *)CKSEG1; \ 91 if (R4600_V1_HIT_CACHEOP_WAR) \ 92 __asm__ __volatile__("nop;nop;nop;nop"); \ 93 } while (0) 94 95 static void (*r4k_blast_dcache_page)(unsigned long addr); 96 97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr) 98 { 99 R4600_HIT_CACHEOP_WAR_IMPL; 100 blast_dcache32_page(addr); 101 } 102 103 static inline void r4k_blast_dcache_page_dc64(unsigned long addr) 104 { 105 R4600_HIT_CACHEOP_WAR_IMPL; 106 blast_dcache64_page(addr); 107 } 108 109 static void __cpuinit r4k_blast_dcache_page_setup(void) 110 { 111 unsigned long dc_lsize = cpu_dcache_line_size(); 112 113 if (dc_lsize == 0) 114 r4k_blast_dcache_page = (void *)cache_noop; 115 else if (dc_lsize == 16) 116 r4k_blast_dcache_page = blast_dcache16_page; 117 else if (dc_lsize == 32) 118 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 119 else if (dc_lsize == 64) 120 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 121 } 122 123 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 124 125 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) 126 { 127 unsigned long dc_lsize = cpu_dcache_line_size(); 128 129 if (dc_lsize == 0) 130 r4k_blast_dcache_page_indexed = (void *)cache_noop; 131 else if (dc_lsize == 16) 132 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 133 else if (dc_lsize == 32) 134 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 135 else if (dc_lsize == 64) 136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 137 } 138 139 static void (* r4k_blast_dcache)(void); 140 141 static void __cpuinit r4k_blast_dcache_setup(void) 142 { 143 unsigned long dc_lsize = cpu_dcache_line_size(); 144 145 if (dc_lsize == 0) 146 r4k_blast_dcache = (void *)cache_noop; 147 else if (dc_lsize == 16) 148 r4k_blast_dcache = blast_dcache16; 149 else if (dc_lsize == 32) 150 r4k_blast_dcache = blast_dcache32; 151 else if (dc_lsize == 64) 152 r4k_blast_dcache = blast_dcache64; 153 } 154 155 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 156 #define JUMP_TO_ALIGN(order) \ 157 __asm__ __volatile__( \ 158 "b\t1f\n\t" \ 159 ".align\t" #order "\n\t" \ 160 "1:\n\t" \ 161 ) 162 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 163 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 164 165 static inline void blast_r4600_v1_icache32(void) 166 { 167 unsigned long flags; 168 169 local_irq_save(flags); 170 blast_icache32(); 171 local_irq_restore(flags); 172 } 173 174 static inline void tx49_blast_icache32(void) 175 { 176 unsigned long start = INDEX_BASE; 177 unsigned long end = start + current_cpu_data.icache.waysize; 178 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 179 unsigned long ws_end = current_cpu_data.icache.ways << 180 current_cpu_data.icache.waybit; 181 unsigned long ws, addr; 182 183 CACHE32_UNROLL32_ALIGN2; 184 /* I'm in even chunk. blast odd chunks */ 185 for (ws = 0; ws < ws_end; ws += ws_inc) 186 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 187 cache32_unroll32(addr|ws, Index_Invalidate_I); 188 CACHE32_UNROLL32_ALIGN; 189 /* I'm in odd chunk. blast even chunks */ 190 for (ws = 0; ws < ws_end; ws += ws_inc) 191 for (addr = start; addr < end; addr += 0x400 * 2) 192 cache32_unroll32(addr|ws, Index_Invalidate_I); 193 } 194 195 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) 196 { 197 unsigned long flags; 198 199 local_irq_save(flags); 200 blast_icache32_page_indexed(page); 201 local_irq_restore(flags); 202 } 203 204 static inline void tx49_blast_icache32_page_indexed(unsigned long page) 205 { 206 unsigned long indexmask = current_cpu_data.icache.waysize - 1; 207 unsigned long start = INDEX_BASE + (page & indexmask); 208 unsigned long end = start + PAGE_SIZE; 209 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 210 unsigned long ws_end = current_cpu_data.icache.ways << 211 current_cpu_data.icache.waybit; 212 unsigned long ws, addr; 213 214 CACHE32_UNROLL32_ALIGN2; 215 /* I'm in even chunk. blast odd chunks */ 216 for (ws = 0; ws < ws_end; ws += ws_inc) 217 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 218 cache32_unroll32(addr|ws, Index_Invalidate_I); 219 CACHE32_UNROLL32_ALIGN; 220 /* I'm in odd chunk. blast even chunks */ 221 for (ws = 0; ws < ws_end; ws += ws_inc) 222 for (addr = start; addr < end; addr += 0x400 * 2) 223 cache32_unroll32(addr|ws, Index_Invalidate_I); 224 } 225 226 static void (* r4k_blast_icache_page)(unsigned long addr); 227 228 static void __cpuinit r4k_blast_icache_page_setup(void) 229 { 230 unsigned long ic_lsize = cpu_icache_line_size(); 231 232 if (ic_lsize == 0) 233 r4k_blast_icache_page = (void *)cache_noop; 234 else if (ic_lsize == 16) 235 r4k_blast_icache_page = blast_icache16_page; 236 else if (ic_lsize == 32) 237 r4k_blast_icache_page = blast_icache32_page; 238 else if (ic_lsize == 64) 239 r4k_blast_icache_page = blast_icache64_page; 240 } 241 242 243 static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 244 245 static void __cpuinit r4k_blast_icache_page_indexed_setup(void) 246 { 247 unsigned long ic_lsize = cpu_icache_line_size(); 248 249 if (ic_lsize == 0) 250 r4k_blast_icache_page_indexed = (void *)cache_noop; 251 else if (ic_lsize == 16) 252 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 253 else if (ic_lsize == 32) { 254 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 255 r4k_blast_icache_page_indexed = 256 blast_icache32_r4600_v1_page_indexed; 257 else if (TX49XX_ICACHE_INDEX_INV_WAR) 258 r4k_blast_icache_page_indexed = 259 tx49_blast_icache32_page_indexed; 260 else 261 r4k_blast_icache_page_indexed = 262 blast_icache32_page_indexed; 263 } else if (ic_lsize == 64) 264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 265 } 266 267 static void (* r4k_blast_icache)(void); 268 269 static void __cpuinit r4k_blast_icache_setup(void) 270 { 271 unsigned long ic_lsize = cpu_icache_line_size(); 272 273 if (ic_lsize == 0) 274 r4k_blast_icache = (void *)cache_noop; 275 else if (ic_lsize == 16) 276 r4k_blast_icache = blast_icache16; 277 else if (ic_lsize == 32) { 278 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 279 r4k_blast_icache = blast_r4600_v1_icache32; 280 else if (TX49XX_ICACHE_INDEX_INV_WAR) 281 r4k_blast_icache = tx49_blast_icache32; 282 else 283 r4k_blast_icache = blast_icache32; 284 } else if (ic_lsize == 64) 285 r4k_blast_icache = blast_icache64; 286 } 287 288 static void (* r4k_blast_scache_page)(unsigned long addr); 289 290 static void __cpuinit r4k_blast_scache_page_setup(void) 291 { 292 unsigned long sc_lsize = cpu_scache_line_size(); 293 294 if (scache_size == 0) 295 r4k_blast_scache_page = (void *)cache_noop; 296 else if (sc_lsize == 16) 297 r4k_blast_scache_page = blast_scache16_page; 298 else if (sc_lsize == 32) 299 r4k_blast_scache_page = blast_scache32_page; 300 else if (sc_lsize == 64) 301 r4k_blast_scache_page = blast_scache64_page; 302 else if (sc_lsize == 128) 303 r4k_blast_scache_page = blast_scache128_page; 304 } 305 306 static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 307 308 static void __cpuinit r4k_blast_scache_page_indexed_setup(void) 309 { 310 unsigned long sc_lsize = cpu_scache_line_size(); 311 312 if (scache_size == 0) 313 r4k_blast_scache_page_indexed = (void *)cache_noop; 314 else if (sc_lsize == 16) 315 r4k_blast_scache_page_indexed = blast_scache16_page_indexed; 316 else if (sc_lsize == 32) 317 r4k_blast_scache_page_indexed = blast_scache32_page_indexed; 318 else if (sc_lsize == 64) 319 r4k_blast_scache_page_indexed = blast_scache64_page_indexed; 320 else if (sc_lsize == 128) 321 r4k_blast_scache_page_indexed = blast_scache128_page_indexed; 322 } 323 324 static void (* r4k_blast_scache)(void); 325 326 static void __cpuinit r4k_blast_scache_setup(void) 327 { 328 unsigned long sc_lsize = cpu_scache_line_size(); 329 330 if (scache_size == 0) 331 r4k_blast_scache = (void *)cache_noop; 332 else if (sc_lsize == 16) 333 r4k_blast_scache = blast_scache16; 334 else if (sc_lsize == 32) 335 r4k_blast_scache = blast_scache32; 336 else if (sc_lsize == 64) 337 r4k_blast_scache = blast_scache64; 338 else if (sc_lsize == 128) 339 r4k_blast_scache = blast_scache128; 340 } 341 342 static inline void local_r4k___flush_cache_all(void * args) 343 { 344 #if defined(CONFIG_CPU_LOONGSON2) 345 r4k_blast_scache(); 346 return; 347 #endif 348 r4k_blast_dcache(); 349 r4k_blast_icache(); 350 351 switch (current_cpu_type()) { 352 case CPU_R4000SC: 353 case CPU_R4000MC: 354 case CPU_R4400SC: 355 case CPU_R4400MC: 356 case CPU_R10000: 357 case CPU_R12000: 358 case CPU_R14000: 359 r4k_blast_scache(); 360 } 361 } 362 363 static void r4k___flush_cache_all(void) 364 { 365 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); 366 } 367 368 static inline int has_valid_asid(const struct mm_struct *mm) 369 { 370 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 371 int i; 372 373 for_each_online_cpu(i) 374 if (cpu_context(i, mm)) 375 return 1; 376 377 return 0; 378 #else 379 return cpu_context(smp_processor_id(), mm); 380 #endif 381 } 382 383 static void r4k__flush_cache_vmap(void) 384 { 385 r4k_blast_dcache(); 386 } 387 388 static void r4k__flush_cache_vunmap(void) 389 { 390 r4k_blast_dcache(); 391 } 392 393 static inline void local_r4k_flush_cache_range(void * args) 394 { 395 struct vm_area_struct *vma = args; 396 int exec = vma->vm_flags & VM_EXEC; 397 398 if (!(has_valid_asid(vma->vm_mm))) 399 return; 400 401 r4k_blast_dcache(); 402 if (exec) 403 r4k_blast_icache(); 404 } 405 406 static void r4k_flush_cache_range(struct vm_area_struct *vma, 407 unsigned long start, unsigned long end) 408 { 409 int exec = vma->vm_flags & VM_EXEC; 410 411 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 412 r4k_on_each_cpu(local_r4k_flush_cache_range, vma); 413 } 414 415 static inline void local_r4k_flush_cache_mm(void * args) 416 { 417 struct mm_struct *mm = args; 418 419 if (!has_valid_asid(mm)) 420 return; 421 422 /* 423 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we 424 * only flush the primary caches but R10000 and R12000 behave sane ... 425 * R4000SC and R4400SC indexed S-cache ops also invalidate primary 426 * caches, so we can bail out early. 427 */ 428 if (current_cpu_type() == CPU_R4000SC || 429 current_cpu_type() == CPU_R4000MC || 430 current_cpu_type() == CPU_R4400SC || 431 current_cpu_type() == CPU_R4400MC) { 432 r4k_blast_scache(); 433 return; 434 } 435 436 r4k_blast_dcache(); 437 } 438 439 static void r4k_flush_cache_mm(struct mm_struct *mm) 440 { 441 if (!cpu_has_dc_aliases) 442 return; 443 444 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); 445 } 446 447 struct flush_cache_page_args { 448 struct vm_area_struct *vma; 449 unsigned long addr; 450 unsigned long pfn; 451 }; 452 453 static inline void local_r4k_flush_cache_page(void *args) 454 { 455 struct flush_cache_page_args *fcp_args = args; 456 struct vm_area_struct *vma = fcp_args->vma; 457 unsigned long addr = fcp_args->addr; 458 struct page *page = pfn_to_page(fcp_args->pfn); 459 int exec = vma->vm_flags & VM_EXEC; 460 struct mm_struct *mm = vma->vm_mm; 461 int map_coherent = 0; 462 pgd_t *pgdp; 463 pud_t *pudp; 464 pmd_t *pmdp; 465 pte_t *ptep; 466 void *vaddr; 467 468 /* 469 * If ownes no valid ASID yet, cannot possibly have gotten 470 * this page into the cache. 471 */ 472 if (!has_valid_asid(mm)) 473 return; 474 475 addr &= PAGE_MASK; 476 pgdp = pgd_offset(mm, addr); 477 pudp = pud_offset(pgdp, addr); 478 pmdp = pmd_offset(pudp, addr); 479 ptep = pte_offset(pmdp, addr); 480 481 /* 482 * If the page isn't marked valid, the page cannot possibly be 483 * in the cache. 484 */ 485 if (!(pte_present(*ptep))) 486 return; 487 488 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) 489 vaddr = NULL; 490 else { 491 /* 492 * Use kmap_coherent or kmap_atomic to do flushes for 493 * another ASID than the current one. 494 */ 495 map_coherent = (cpu_has_dc_aliases && 496 page_mapped(page) && !Page_dcache_dirty(page)); 497 if (map_coherent) 498 vaddr = kmap_coherent(page, addr); 499 else 500 vaddr = kmap_atomic(page); 501 addr = (unsigned long)vaddr; 502 } 503 504 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 505 r4k_blast_dcache_page(addr); 506 if (exec && !cpu_icache_snoops_remote_store) 507 r4k_blast_scache_page(addr); 508 } 509 if (exec) { 510 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { 511 int cpu = smp_processor_id(); 512 513 if (cpu_context(cpu, mm) != 0) 514 drop_mmu_context(mm, cpu); 515 } else 516 r4k_blast_icache_page(addr); 517 } 518 519 if (vaddr) { 520 if (map_coherent) 521 kunmap_coherent(); 522 else 523 kunmap_atomic(vaddr); 524 } 525 } 526 527 static void r4k_flush_cache_page(struct vm_area_struct *vma, 528 unsigned long addr, unsigned long pfn) 529 { 530 struct flush_cache_page_args args; 531 532 args.vma = vma; 533 args.addr = addr; 534 args.pfn = pfn; 535 536 r4k_on_each_cpu(local_r4k_flush_cache_page, &args); 537 } 538 539 static inline void local_r4k_flush_data_cache_page(void * addr) 540 { 541 r4k_blast_dcache_page((unsigned long) addr); 542 } 543 544 static void r4k_flush_data_cache_page(unsigned long addr) 545 { 546 if (in_atomic()) 547 local_r4k_flush_data_cache_page((void *)addr); 548 else 549 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); 550 } 551 552 struct flush_icache_range_args { 553 unsigned long start; 554 unsigned long end; 555 }; 556 557 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) 558 { 559 if (!cpu_has_ic_fills_f_dc) { 560 if (end - start >= dcache_size) { 561 r4k_blast_dcache(); 562 } else { 563 R4600_HIT_CACHEOP_WAR_IMPL; 564 protected_blast_dcache_range(start, end); 565 } 566 } 567 568 if (end - start > icache_size) 569 r4k_blast_icache(); 570 else 571 protected_blast_icache_range(start, end); 572 } 573 574 static inline void local_r4k_flush_icache_range_ipi(void *args) 575 { 576 struct flush_icache_range_args *fir_args = args; 577 unsigned long start = fir_args->start; 578 unsigned long end = fir_args->end; 579 580 local_r4k_flush_icache_range(start, end); 581 } 582 583 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 584 { 585 struct flush_icache_range_args args; 586 587 args.start = start; 588 args.end = end; 589 590 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); 591 instruction_hazard(); 592 } 593 594 #ifdef CONFIG_DMA_NONCOHERENT 595 596 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 597 { 598 /* Catch bad driver code */ 599 BUG_ON(size == 0); 600 601 if (cpu_has_inclusive_pcaches) { 602 if (size >= scache_size) 603 r4k_blast_scache(); 604 else 605 blast_scache_range(addr, addr + size); 606 __sync(); 607 return; 608 } 609 610 /* 611 * Either no secondary cache or the available caches don't have the 612 * subset property so we have to flush the primary caches 613 * explicitly 614 */ 615 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 616 r4k_blast_dcache(); 617 } else { 618 R4600_HIT_CACHEOP_WAR_IMPL; 619 blast_dcache_range(addr, addr + size); 620 } 621 622 bc_wback_inv(addr, size); 623 __sync(); 624 } 625 626 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 627 { 628 /* Catch bad driver code */ 629 BUG_ON(size == 0); 630 631 if (cpu_has_inclusive_pcaches) { 632 if (size >= scache_size) 633 r4k_blast_scache(); 634 else { 635 unsigned long lsize = cpu_scache_line_size(); 636 unsigned long almask = ~(lsize - 1); 637 638 /* 639 * There is no clearly documented alignment requirement 640 * for the cache instruction on MIPS processors and 641 * some processors, among them the RM5200 and RM7000 642 * QED processors will throw an address error for cache 643 * hit ops with insufficient alignment. Solved by 644 * aligning the address to cache line size. 645 */ 646 cache_op(Hit_Writeback_Inv_SD, addr & almask); 647 cache_op(Hit_Writeback_Inv_SD, 648 (addr + size - 1) & almask); 649 blast_inv_scache_range(addr, addr + size); 650 } 651 __sync(); 652 return; 653 } 654 655 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 656 r4k_blast_dcache(); 657 } else { 658 unsigned long lsize = cpu_dcache_line_size(); 659 unsigned long almask = ~(lsize - 1); 660 661 R4600_HIT_CACHEOP_WAR_IMPL; 662 cache_op(Hit_Writeback_Inv_D, addr & almask); 663 cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); 664 blast_inv_dcache_range(addr, addr + size); 665 } 666 667 bc_inv(addr, size); 668 __sync(); 669 } 670 #endif /* CONFIG_DMA_NONCOHERENT */ 671 672 /* 673 * While we're protected against bad userland addresses we don't care 674 * very much about what happens in that case. Usually a segmentation 675 * fault will dump the process later on anyway ... 676 */ 677 static void local_r4k_flush_cache_sigtramp(void * arg) 678 { 679 unsigned long ic_lsize = cpu_icache_line_size(); 680 unsigned long dc_lsize = cpu_dcache_line_size(); 681 unsigned long sc_lsize = cpu_scache_line_size(); 682 unsigned long addr = (unsigned long) arg; 683 684 R4600_HIT_CACHEOP_WAR_IMPL; 685 if (dc_lsize) 686 protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 687 if (!cpu_icache_snoops_remote_store && scache_size) 688 protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 689 if (ic_lsize) 690 protected_flush_icache_line(addr & ~(ic_lsize - 1)); 691 if (MIPS4K_ICACHE_REFILL_WAR) { 692 __asm__ __volatile__ ( 693 ".set push\n\t" 694 ".set noat\n\t" 695 ".set mips3\n\t" 696 #ifdef CONFIG_32BIT 697 "la $at,1f\n\t" 698 #endif 699 #ifdef CONFIG_64BIT 700 "dla $at,1f\n\t" 701 #endif 702 "cache %0,($at)\n\t" 703 "nop; nop; nop\n" 704 "1:\n\t" 705 ".set pop" 706 : 707 : "i" (Hit_Invalidate_I)); 708 } 709 if (MIPS_CACHE_SYNC_WAR) 710 __asm__ __volatile__ ("sync"); 711 } 712 713 static void r4k_flush_cache_sigtramp(unsigned long addr) 714 { 715 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); 716 } 717 718 static void r4k_flush_icache_all(void) 719 { 720 if (cpu_has_vtag_icache) 721 r4k_blast_icache(); 722 } 723 724 struct flush_kernel_vmap_range_args { 725 unsigned long vaddr; 726 int size; 727 }; 728 729 static inline void local_r4k_flush_kernel_vmap_range(void *args) 730 { 731 struct flush_kernel_vmap_range_args *vmra = args; 732 unsigned long vaddr = vmra->vaddr; 733 int size = vmra->size; 734 735 /* 736 * Aliases only affect the primary caches so don't bother with 737 * S-caches or T-caches. 738 */ 739 if (cpu_has_safe_index_cacheops && size >= dcache_size) 740 r4k_blast_dcache(); 741 else { 742 R4600_HIT_CACHEOP_WAR_IMPL; 743 blast_dcache_range(vaddr, vaddr + size); 744 } 745 } 746 747 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) 748 { 749 struct flush_kernel_vmap_range_args args; 750 751 args.vaddr = (unsigned long) vaddr; 752 args.size = size; 753 754 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); 755 } 756 757 static inline void rm7k_erratum31(void) 758 { 759 const unsigned long ic_lsize = 32; 760 unsigned long addr; 761 762 /* RM7000 erratum #31. The icache is screwed at startup. */ 763 write_c0_taglo(0); 764 write_c0_taghi(0); 765 766 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 767 __asm__ __volatile__ ( 768 ".set push\n\t" 769 ".set noreorder\n\t" 770 ".set mips3\n\t" 771 "cache\t%1, 0(%0)\n\t" 772 "cache\t%1, 0x1000(%0)\n\t" 773 "cache\t%1, 0x2000(%0)\n\t" 774 "cache\t%1, 0x3000(%0)\n\t" 775 "cache\t%2, 0(%0)\n\t" 776 "cache\t%2, 0x1000(%0)\n\t" 777 "cache\t%2, 0x2000(%0)\n\t" 778 "cache\t%2, 0x3000(%0)\n\t" 779 "cache\t%1, 0(%0)\n\t" 780 "cache\t%1, 0x1000(%0)\n\t" 781 "cache\t%1, 0x2000(%0)\n\t" 782 "cache\t%1, 0x3000(%0)\n\t" 783 ".set pop\n" 784 : 785 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 786 } 787 } 788 789 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", 790 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 791 }; 792 793 static void __cpuinit probe_pcache(void) 794 { 795 struct cpuinfo_mips *c = ¤t_cpu_data; 796 unsigned int config = read_c0_config(); 797 unsigned int prid = read_c0_prid(); 798 unsigned long config1; 799 unsigned int lsize; 800 801 switch (c->cputype) { 802 case CPU_R4600: /* QED style two way caches? */ 803 case CPU_R4700: 804 case CPU_R5000: 805 case CPU_NEVADA: 806 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 807 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 808 c->icache.ways = 2; 809 c->icache.waybit = __ffs(icache_size/2); 810 811 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 812 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 813 c->dcache.ways = 2; 814 c->dcache.waybit= __ffs(dcache_size/2); 815 816 c->options |= MIPS_CPU_CACHE_CDEX_P; 817 break; 818 819 case CPU_R5432: 820 case CPU_R5500: 821 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 822 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 823 c->icache.ways = 2; 824 c->icache.waybit= 0; 825 826 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 827 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 828 c->dcache.ways = 2; 829 c->dcache.waybit = 0; 830 831 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; 832 break; 833 834 case CPU_TX49XX: 835 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 836 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 837 c->icache.ways = 4; 838 c->icache.waybit= 0; 839 840 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 841 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 842 c->dcache.ways = 4; 843 c->dcache.waybit = 0; 844 845 c->options |= MIPS_CPU_CACHE_CDEX_P; 846 c->options |= MIPS_CPU_PREFETCH; 847 break; 848 849 case CPU_R4000PC: 850 case CPU_R4000SC: 851 case CPU_R4000MC: 852 case CPU_R4400PC: 853 case CPU_R4400SC: 854 case CPU_R4400MC: 855 case CPU_R4300: 856 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 857 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 858 c->icache.ways = 1; 859 c->icache.waybit = 0; /* doesn't matter */ 860 861 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 862 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 863 c->dcache.ways = 1; 864 c->dcache.waybit = 0; /* does not matter */ 865 866 c->options |= MIPS_CPU_CACHE_CDEX_P; 867 break; 868 869 case CPU_R10000: 870 case CPU_R12000: 871 case CPU_R14000: 872 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 873 c->icache.linesz = 64; 874 c->icache.ways = 2; 875 c->icache.waybit = 0; 876 877 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); 878 c->dcache.linesz = 32; 879 c->dcache.ways = 2; 880 c->dcache.waybit = 0; 881 882 c->options |= MIPS_CPU_PREFETCH; 883 break; 884 885 case CPU_VR4133: 886 write_c0_config(config & ~VR41_CONF_P4K); 887 case CPU_VR4131: 888 /* Workaround for cache instruction bug of VR4131 */ 889 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || 890 c->processor_id == 0x0c82U) { 891 config |= 0x00400000U; 892 if (c->processor_id == 0x0c80U) 893 config |= VR41_CONF_BP; 894 write_c0_config(config); 895 } else 896 c->options |= MIPS_CPU_CACHE_CDEX_P; 897 898 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 899 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 900 c->icache.ways = 2; 901 c->icache.waybit = __ffs(icache_size/2); 902 903 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 904 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 905 c->dcache.ways = 2; 906 c->dcache.waybit = __ffs(dcache_size/2); 907 break; 908 909 case CPU_VR41XX: 910 case CPU_VR4111: 911 case CPU_VR4121: 912 case CPU_VR4122: 913 case CPU_VR4181: 914 case CPU_VR4181A: 915 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 916 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 917 c->icache.ways = 1; 918 c->icache.waybit = 0; /* doesn't matter */ 919 920 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 921 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 922 c->dcache.ways = 1; 923 c->dcache.waybit = 0; /* does not matter */ 924 925 c->options |= MIPS_CPU_CACHE_CDEX_P; 926 break; 927 928 case CPU_RM7000: 929 rm7k_erratum31(); 930 931 case CPU_RM9000: 932 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 933 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 934 c->icache.ways = 4; 935 c->icache.waybit = __ffs(icache_size / c->icache.ways); 936 937 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 938 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 939 c->dcache.ways = 4; 940 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 941 942 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) 943 c->options |= MIPS_CPU_CACHE_CDEX_P; 944 #endif 945 c->options |= MIPS_CPU_PREFETCH; 946 break; 947 948 case CPU_LOONGSON2: 949 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 950 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 951 if (prid & 0x3) 952 c->icache.ways = 4; 953 else 954 c->icache.ways = 2; 955 c->icache.waybit = 0; 956 957 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 958 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 959 if (prid & 0x3) 960 c->dcache.ways = 4; 961 else 962 c->dcache.ways = 2; 963 c->dcache.waybit = 0; 964 break; 965 966 default: 967 if (!(config & MIPS_CONF_M)) 968 panic("Don't know how to probe P-caches on this cpu."); 969 970 /* 971 * So we seem to be a MIPS32 or MIPS64 CPU 972 * So let's probe the I-cache ... 973 */ 974 config1 = read_c0_config1(); 975 976 if ((lsize = ((config1 >> 19) & 7))) 977 c->icache.linesz = 2 << lsize; 978 else 979 c->icache.linesz = lsize; 980 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); 981 c->icache.ways = 1 + ((config1 >> 16) & 7); 982 983 icache_size = c->icache.sets * 984 c->icache.ways * 985 c->icache.linesz; 986 c->icache.waybit = __ffs(icache_size/c->icache.ways); 987 988 if (config & 0x8) /* VI bit */ 989 c->icache.flags |= MIPS_CACHE_VTAG; 990 991 /* 992 * Now probe the MIPS32 / MIPS64 data cache. 993 */ 994 c->dcache.flags = 0; 995 996 if ((lsize = ((config1 >> 10) & 7))) 997 c->dcache.linesz = 2 << lsize; 998 else 999 c->dcache.linesz= lsize; 1000 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); 1001 c->dcache.ways = 1 + ((config1 >> 7) & 7); 1002 1003 dcache_size = c->dcache.sets * 1004 c->dcache.ways * 1005 c->dcache.linesz; 1006 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 1007 1008 c->options |= MIPS_CPU_PREFETCH; 1009 break; 1010 } 1011 1012 /* 1013 * Processor configuration sanity check for the R4000SC erratum 1014 * #5. With page sizes larger than 32kB there is no possibility 1015 * to get a VCE exception anymore so we don't care about this 1016 * misconfiguration. The case is rather theoretical anyway; 1017 * presumably no vendor is shipping his hardware in the "bad" 1018 * configuration. 1019 */ 1020 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && 1021 !(config & CONF_SC) && c->icache.linesz != 16 && 1022 PAGE_SIZE <= 0x8000) 1023 panic("Improper R4000SC processor configuration detected"); 1024 1025 /* compute a couple of other cache variables */ 1026 c->icache.waysize = icache_size / c->icache.ways; 1027 c->dcache.waysize = dcache_size / c->dcache.ways; 1028 1029 c->icache.sets = c->icache.linesz ? 1030 icache_size / (c->icache.linesz * c->icache.ways) : 0; 1031 c->dcache.sets = c->dcache.linesz ? 1032 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; 1033 1034 /* 1035 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB 1036 * 2-way virtually indexed so normally would suffer from aliases. So 1037 * normally they'd suffer from aliases but magic in the hardware deals 1038 * with that for us so we don't need to take care ourselves. 1039 */ 1040 switch (c->cputype) { 1041 case CPU_20KC: 1042 case CPU_25KF: 1043 case CPU_SB1: 1044 case CPU_SB1A: 1045 case CPU_XLR: 1046 c->dcache.flags |= MIPS_CACHE_PINDEX; 1047 break; 1048 1049 case CPU_R10000: 1050 case CPU_R12000: 1051 case CPU_R14000: 1052 break; 1053 1054 case CPU_M14KC: 1055 case CPU_24K: 1056 case CPU_34K: 1057 case CPU_74K: 1058 case CPU_1004K: 1059 if ((read_c0_config7() & (1 << 16))) { 1060 /* effectively physically indexed dcache, 1061 thus no virtual aliases. */ 1062 c->dcache.flags |= MIPS_CACHE_PINDEX; 1063 break; 1064 } 1065 default: 1066 if (c->dcache.waysize > PAGE_SIZE) 1067 c->dcache.flags |= MIPS_CACHE_ALIASES; 1068 } 1069 1070 switch (c->cputype) { 1071 case CPU_20KC: 1072 /* 1073 * Some older 20Kc chips doesn't have the 'VI' bit in 1074 * the config register. 1075 */ 1076 c->icache.flags |= MIPS_CACHE_VTAG; 1077 break; 1078 1079 case CPU_ALCHEMY: 1080 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1081 break; 1082 } 1083 1084 #ifdef CONFIG_CPU_LOONGSON2 1085 /* 1086 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1087 * one op will act on all 4 ways 1088 */ 1089 c->icache.ways = 1; 1090 #endif 1091 1092 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1093 icache_size >> 10, 1094 c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", 1095 way_string[c->icache.ways], c->icache.linesz); 1096 1097 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", 1098 dcache_size >> 10, way_string[c->dcache.ways], 1099 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", 1100 (c->dcache.flags & MIPS_CACHE_ALIASES) ? 1101 "cache aliases" : "no aliases", 1102 c->dcache.linesz); 1103 } 1104 1105 /* 1106 * If you even _breathe_ on this function, look at the gcc output and make sure 1107 * it does not pop things on and off the stack for the cache sizing loop that 1108 * executes in KSEG1 space or else you will crash and burn badly. You have 1109 * been warned. 1110 */ 1111 static int __cpuinit probe_scache(void) 1112 { 1113 unsigned long flags, addr, begin, end, pow2; 1114 unsigned int config = read_c0_config(); 1115 struct cpuinfo_mips *c = ¤t_cpu_data; 1116 1117 if (config & CONF_SC) 1118 return 0; 1119 1120 begin = (unsigned long) &_stext; 1121 begin &= ~((4 * 1024 * 1024) - 1); 1122 end = begin + (4 * 1024 * 1024); 1123 1124 /* 1125 * This is such a bitch, you'd think they would make it easy to do 1126 * this. Away you daemons of stupidity! 1127 */ 1128 local_irq_save(flags); 1129 1130 /* Fill each size-multiple cache line with a valid tag. */ 1131 pow2 = (64 * 1024); 1132 for (addr = begin; addr < end; addr = (begin + pow2)) { 1133 unsigned long *p = (unsigned long *) addr; 1134 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ 1135 pow2 <<= 1; 1136 } 1137 1138 /* Load first line with zero (therefore invalid) tag. */ 1139 write_c0_taglo(0); 1140 write_c0_taghi(0); 1141 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ 1142 cache_op(Index_Store_Tag_I, begin); 1143 cache_op(Index_Store_Tag_D, begin); 1144 cache_op(Index_Store_Tag_SD, begin); 1145 1146 /* Now search for the wrap around point. */ 1147 pow2 = (128 * 1024); 1148 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1149 cache_op(Index_Load_Tag_SD, addr); 1150 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1151 if (!read_c0_taglo()) 1152 break; 1153 pow2 <<= 1; 1154 } 1155 local_irq_restore(flags); 1156 addr -= begin; 1157 1158 scache_size = addr; 1159 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1160 c->scache.ways = 1; 1161 c->dcache.waybit = 0; /* does not matter */ 1162 1163 return 1; 1164 } 1165 1166 #if defined(CONFIG_CPU_LOONGSON2) 1167 static void __init loongson2_sc_init(void) 1168 { 1169 struct cpuinfo_mips *c = ¤t_cpu_data; 1170 1171 scache_size = 512*1024; 1172 c->scache.linesz = 32; 1173 c->scache.ways = 4; 1174 c->scache.waybit = 0; 1175 c->scache.waysize = scache_size / (c->scache.ways); 1176 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1177 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1178 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1179 1180 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1181 } 1182 #endif 1183 1184 extern int r5k_sc_init(void); 1185 extern int rm7k_sc_init(void); 1186 extern int mips_sc_init(void); 1187 1188 static void __cpuinit setup_scache(void) 1189 { 1190 struct cpuinfo_mips *c = ¤t_cpu_data; 1191 unsigned int config = read_c0_config(); 1192 int sc_present = 0; 1193 1194 /* 1195 * Do the probing thing on R4000SC and R4400SC processors. Other 1196 * processors don't have a S-cache that would be relevant to the 1197 * Linux memory management. 1198 */ 1199 switch (c->cputype) { 1200 case CPU_R4000SC: 1201 case CPU_R4000MC: 1202 case CPU_R4400SC: 1203 case CPU_R4400MC: 1204 sc_present = run_uncached(probe_scache); 1205 if (sc_present) 1206 c->options |= MIPS_CPU_CACHE_CDEX_S; 1207 break; 1208 1209 case CPU_R10000: 1210 case CPU_R12000: 1211 case CPU_R14000: 1212 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1213 c->scache.linesz = 64 << ((config >> 13) & 1); 1214 c->scache.ways = 2; 1215 c->scache.waybit= 0; 1216 sc_present = 1; 1217 break; 1218 1219 case CPU_R5000: 1220 case CPU_NEVADA: 1221 #ifdef CONFIG_R5000_CPU_SCACHE 1222 r5k_sc_init(); 1223 #endif 1224 return; 1225 1226 case CPU_RM7000: 1227 case CPU_RM9000: 1228 #ifdef CONFIG_RM7000_CPU_SCACHE 1229 rm7k_sc_init(); 1230 #endif 1231 return; 1232 1233 #if defined(CONFIG_CPU_LOONGSON2) 1234 case CPU_LOONGSON2: 1235 loongson2_sc_init(); 1236 return; 1237 #endif 1238 case CPU_XLP: 1239 /* don't need to worry about L2, fully coherent */ 1240 return; 1241 1242 default: 1243 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1244 c->isa_level == MIPS_CPU_ISA_M32R2 || 1245 c->isa_level == MIPS_CPU_ISA_M64R1 || 1246 c->isa_level == MIPS_CPU_ISA_M64R2) { 1247 #ifdef CONFIG_MIPS_CPU_SCACHE 1248 if (mips_sc_init ()) { 1249 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1250 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", 1251 scache_size >> 10, 1252 way_string[c->scache.ways], c->scache.linesz); 1253 } 1254 #else 1255 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) 1256 panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); 1257 #endif 1258 return; 1259 } 1260 sc_present = 0; 1261 } 1262 1263 if (!sc_present) 1264 return; 1265 1266 /* compute a couple of other cache variables */ 1267 c->scache.waysize = scache_size / c->scache.ways; 1268 1269 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1270 1271 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1272 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1273 1274 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1275 } 1276 1277 void au1x00_fixup_config_od(void) 1278 { 1279 /* 1280 * c0_config.od (bit 19) was write only (and read as 0) 1281 * on the early revisions of Alchemy SOCs. It disables the bus 1282 * transaction overlapping and needs to be set to fix various errata. 1283 */ 1284 switch (read_c0_prid()) { 1285 case 0x00030100: /* Au1000 DA */ 1286 case 0x00030201: /* Au1000 HA */ 1287 case 0x00030202: /* Au1000 HB */ 1288 case 0x01030200: /* Au1500 AB */ 1289 /* 1290 * Au1100 errata actually keeps silence about this bit, so we set it 1291 * just in case for those revisions that require it to be set according 1292 * to the (now gone) cpu table. 1293 */ 1294 case 0x02030200: /* Au1100 AB */ 1295 case 0x02030201: /* Au1100 BA */ 1296 case 0x02030202: /* Au1100 BC */ 1297 set_c0_config(1 << 19); 1298 break; 1299 } 1300 } 1301 1302 /* CP0 hazard avoidance. */ 1303 #define NXP_BARRIER() \ 1304 __asm__ __volatile__( \ 1305 ".set noreorder\n\t" \ 1306 "nop; nop; nop; nop; nop; nop;\n\t" \ 1307 ".set reorder\n\t") 1308 1309 static void nxp_pr4450_fixup_config(void) 1310 { 1311 unsigned long config0; 1312 1313 config0 = read_c0_config(); 1314 1315 /* clear all three cache coherency fields */ 1316 config0 &= ~(0x7 | (7 << 25) | (7 << 28)); 1317 config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | 1318 ((_page_cachable_default >> _CACHE_SHIFT) << 25) | 1319 ((_page_cachable_default >> _CACHE_SHIFT) << 28)); 1320 write_c0_config(config0); 1321 NXP_BARRIER(); 1322 } 1323 1324 static int __cpuinitdata cca = -1; 1325 1326 static int __init cca_setup(char *str) 1327 { 1328 get_option(&str, &cca); 1329 1330 return 1; 1331 } 1332 1333 __setup("cca=", cca_setup); 1334 1335 static void __cpuinit coherency_setup(void) 1336 { 1337 if (cca < 0 || cca > 7) 1338 cca = read_c0_config() & CONF_CM_CMASK; 1339 _page_cachable_default = cca << _CACHE_SHIFT; 1340 1341 pr_debug("Using cache attribute %d\n", cca); 1342 change_c0_config(CONF_CM_CMASK, cca); 1343 1344 /* 1345 * c0_status.cu=0 specifies that updates by the sc instruction use 1346 * the coherency mode specified by the TLB; 1 means cachable 1347 * coherent update on write will be used. Not all processors have 1348 * this bit and; some wire it to zero, others like Toshiba had the 1349 * silly idea of putting something else there ... 1350 */ 1351 switch (current_cpu_type()) { 1352 case CPU_R4000PC: 1353 case CPU_R4000SC: 1354 case CPU_R4000MC: 1355 case CPU_R4400PC: 1356 case CPU_R4400SC: 1357 case CPU_R4400MC: 1358 clear_c0_config(CONF_CU); 1359 break; 1360 /* 1361 * We need to catch the early Alchemy SOCs with 1362 * the write-only co_config.od bit and set it back to one on: 1363 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB 1364 */ 1365 case CPU_ALCHEMY: 1366 au1x00_fixup_config_od(); 1367 break; 1368 1369 case PRID_IMP_PR4450: 1370 nxp_pr4450_fixup_config(); 1371 break; 1372 } 1373 } 1374 1375 #if defined(CONFIG_DMA_NONCOHERENT) 1376 1377 static int __cpuinitdata coherentio; 1378 1379 static int __init setcoherentio(char *str) 1380 { 1381 coherentio = 1; 1382 1383 return 1; 1384 } 1385 1386 __setup("coherentio", setcoherentio); 1387 #endif 1388 1389 static void __cpuinit r4k_cache_error_setup(void) 1390 { 1391 extern char __weak except_vec2_generic; 1392 extern char __weak except_vec2_sb1; 1393 struct cpuinfo_mips *c = ¤t_cpu_data; 1394 1395 switch (c->cputype) { 1396 case CPU_SB1: 1397 case CPU_SB1A: 1398 set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1399 break; 1400 1401 default: 1402 set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1403 break; 1404 } 1405 } 1406 1407 void __cpuinit r4k_cache_init(void) 1408 { 1409 extern void build_clear_page(void); 1410 extern void build_copy_page(void); 1411 struct cpuinfo_mips *c = ¤t_cpu_data; 1412 1413 probe_pcache(); 1414 setup_scache(); 1415 1416 r4k_blast_dcache_page_setup(); 1417 r4k_blast_dcache_page_indexed_setup(); 1418 r4k_blast_dcache_setup(); 1419 r4k_blast_icache_page_setup(); 1420 r4k_blast_icache_page_indexed_setup(); 1421 r4k_blast_icache_setup(); 1422 r4k_blast_scache_page_setup(); 1423 r4k_blast_scache_page_indexed_setup(); 1424 r4k_blast_scache_setup(); 1425 1426 /* 1427 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1428 * This code supports virtually indexed processors and will be 1429 * unnecessarily inefficient on physically indexed processors. 1430 */ 1431 if (c->dcache.linesz) 1432 shm_align_mask = max_t( unsigned long, 1433 c->dcache.sets * c->dcache.linesz - 1, 1434 PAGE_SIZE - 1); 1435 else 1436 shm_align_mask = PAGE_SIZE-1; 1437 1438 __flush_cache_vmap = r4k__flush_cache_vmap; 1439 __flush_cache_vunmap = r4k__flush_cache_vunmap; 1440 1441 flush_cache_all = cache_noop; 1442 __flush_cache_all = r4k___flush_cache_all; 1443 flush_cache_mm = r4k_flush_cache_mm; 1444 flush_cache_page = r4k_flush_cache_page; 1445 flush_cache_range = r4k_flush_cache_range; 1446 1447 __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; 1448 1449 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1450 flush_icache_all = r4k_flush_icache_all; 1451 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1452 flush_data_cache_page = r4k_flush_data_cache_page; 1453 flush_icache_range = r4k_flush_icache_range; 1454 local_flush_icache_range = local_r4k_flush_icache_range; 1455 1456 #if defined(CONFIG_DMA_NONCOHERENT) 1457 if (coherentio) { 1458 _dma_cache_wback_inv = (void *)cache_noop; 1459 _dma_cache_wback = (void *)cache_noop; 1460 _dma_cache_inv = (void *)cache_noop; 1461 } else { 1462 _dma_cache_wback_inv = r4k_dma_cache_wback_inv; 1463 _dma_cache_wback = r4k_dma_cache_wback_inv; 1464 _dma_cache_inv = r4k_dma_cache_inv; 1465 } 1466 #endif 1467 1468 build_clear_page(); 1469 build_copy_page(); 1470 #if !defined(CONFIG_MIPS_CMP) 1471 local_r4k___flush_cache_all(NULL); 1472 #endif 1473 coherency_setup(); 1474 board_cache_error_setup = r4k_cache_error_setup; 1475 } 1476