1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/cpu_pm.h> 11 #include <linux/hardirq.h> 12 #include <linux/init.h> 13 #include <linux/highmem.h> 14 #include <linux/kernel.h> 15 #include <linux/linkage.h> 16 #include <linux/preempt.h> 17 #include <linux/sched.h> 18 #include <linux/smp.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/bitops.h> 22 23 #include <asm/bcache.h> 24 #include <asm/bootinfo.h> 25 #include <asm/cache.h> 26 #include <asm/cacheops.h> 27 #include <asm/cpu.h> 28 #include <asm/cpu-features.h> 29 #include <asm/cpu-type.h> 30 #include <asm/io.h> 31 #include <asm/page.h> 32 #include <asm/pgtable.h> 33 #include <asm/r4kcache.h> 34 #include <asm/sections.h> 35 #include <asm/mmu_context.h> 36 #include <asm/war.h> 37 #include <asm/cacheflush.h> /* for run_uncached() */ 38 #include <asm/traps.h> 39 #include <asm/dma-coherence.h> 40 41 /* 42 * Special Variant of smp_call_function for use by cache functions: 43 * 44 * o No return value 45 * o collapses to normal function call on UP kernels 46 * o collapses to normal function call on systems with a single shared 47 * primary cache. 48 * o doesn't disable interrupts on the local CPU 49 */ 50 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) 51 { 52 preempt_disable(); 53 54 #ifndef CONFIG_MIPS_MT_SMP 55 smp_call_function(func, info, 1); 56 #endif 57 func(info); 58 preempt_enable(); 59 } 60 61 #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS) 62 #define cpu_has_safe_index_cacheops 0 63 #else 64 #define cpu_has_safe_index_cacheops 1 65 #endif 66 67 /* 68 * Must die. 69 */ 70 static unsigned long icache_size __read_mostly; 71 static unsigned long dcache_size __read_mostly; 72 static unsigned long scache_size __read_mostly; 73 74 /* 75 * Dummy cache handling routines for machines without boardcaches 76 */ 77 static void cache_noop(void) {} 78 79 static struct bcache_ops no_sc_ops = { 80 .bc_enable = (void *)cache_noop, 81 .bc_disable = (void *)cache_noop, 82 .bc_wback_inv = (void *)cache_noop, 83 .bc_inv = (void *)cache_noop 84 }; 85 86 struct bcache_ops *bcops = &no_sc_ops; 87 88 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 89 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 90 91 #define R4600_HIT_CACHEOP_WAR_IMPL \ 92 do { \ 93 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ 94 *(volatile unsigned long *)CKSEG1; \ 95 if (R4600_V1_HIT_CACHEOP_WAR) \ 96 __asm__ __volatile__("nop;nop;nop;nop"); \ 97 } while (0) 98 99 static void (*r4k_blast_dcache_page)(unsigned long addr); 100 101 static inline void r4k_blast_dcache_page_dc32(unsigned long addr) 102 { 103 R4600_HIT_CACHEOP_WAR_IMPL; 104 blast_dcache32_page(addr); 105 } 106 107 static inline void r4k_blast_dcache_page_dc64(unsigned long addr) 108 { 109 blast_dcache64_page(addr); 110 } 111 112 static inline void r4k_blast_dcache_page_dc128(unsigned long addr) 113 { 114 blast_dcache128_page(addr); 115 } 116 117 static void r4k_blast_dcache_page_setup(void) 118 { 119 unsigned long dc_lsize = cpu_dcache_line_size(); 120 121 switch (dc_lsize) { 122 case 0: 123 r4k_blast_dcache_page = (void *)cache_noop; 124 break; 125 case 16: 126 r4k_blast_dcache_page = blast_dcache16_page; 127 break; 128 case 32: 129 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 130 break; 131 case 64: 132 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 133 break; 134 case 128: 135 r4k_blast_dcache_page = r4k_blast_dcache_page_dc128; 136 break; 137 default: 138 break; 139 } 140 } 141 142 #ifndef CONFIG_EVA 143 #define r4k_blast_dcache_user_page r4k_blast_dcache_page 144 #else 145 146 static void (*r4k_blast_dcache_user_page)(unsigned long addr); 147 148 static void r4k_blast_dcache_user_page_setup(void) 149 { 150 unsigned long dc_lsize = cpu_dcache_line_size(); 151 152 if (dc_lsize == 0) 153 r4k_blast_dcache_user_page = (void *)cache_noop; 154 else if (dc_lsize == 16) 155 r4k_blast_dcache_user_page = blast_dcache16_user_page; 156 else if (dc_lsize == 32) 157 r4k_blast_dcache_user_page = blast_dcache32_user_page; 158 else if (dc_lsize == 64) 159 r4k_blast_dcache_user_page = blast_dcache64_user_page; 160 } 161 162 #endif 163 164 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 165 166 static void r4k_blast_dcache_page_indexed_setup(void) 167 { 168 unsigned long dc_lsize = cpu_dcache_line_size(); 169 170 if (dc_lsize == 0) 171 r4k_blast_dcache_page_indexed = (void *)cache_noop; 172 else if (dc_lsize == 16) 173 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 174 else if (dc_lsize == 32) 175 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 176 else if (dc_lsize == 64) 177 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 178 else if (dc_lsize == 128) 179 r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed; 180 } 181 182 void (* r4k_blast_dcache)(void); 183 EXPORT_SYMBOL(r4k_blast_dcache); 184 185 static void r4k_blast_dcache_setup(void) 186 { 187 unsigned long dc_lsize = cpu_dcache_line_size(); 188 189 if (dc_lsize == 0) 190 r4k_blast_dcache = (void *)cache_noop; 191 else if (dc_lsize == 16) 192 r4k_blast_dcache = blast_dcache16; 193 else if (dc_lsize == 32) 194 r4k_blast_dcache = blast_dcache32; 195 else if (dc_lsize == 64) 196 r4k_blast_dcache = blast_dcache64; 197 else if (dc_lsize == 128) 198 r4k_blast_dcache = blast_dcache128; 199 } 200 201 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 202 #define JUMP_TO_ALIGN(order) \ 203 __asm__ __volatile__( \ 204 "b\t1f\n\t" \ 205 ".align\t" #order "\n\t" \ 206 "1:\n\t" \ 207 ) 208 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 209 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 210 211 static inline void blast_r4600_v1_icache32(void) 212 { 213 unsigned long flags; 214 215 local_irq_save(flags); 216 blast_icache32(); 217 local_irq_restore(flags); 218 } 219 220 static inline void tx49_blast_icache32(void) 221 { 222 unsigned long start = INDEX_BASE; 223 unsigned long end = start + current_cpu_data.icache.waysize; 224 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 225 unsigned long ws_end = current_cpu_data.icache.ways << 226 current_cpu_data.icache.waybit; 227 unsigned long ws, addr; 228 229 CACHE32_UNROLL32_ALIGN2; 230 /* I'm in even chunk. blast odd chunks */ 231 for (ws = 0; ws < ws_end; ws += ws_inc) 232 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 233 cache32_unroll32(addr|ws, Index_Invalidate_I); 234 CACHE32_UNROLL32_ALIGN; 235 /* I'm in odd chunk. blast even chunks */ 236 for (ws = 0; ws < ws_end; ws += ws_inc) 237 for (addr = start; addr < end; addr += 0x400 * 2) 238 cache32_unroll32(addr|ws, Index_Invalidate_I); 239 } 240 241 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) 242 { 243 unsigned long flags; 244 245 local_irq_save(flags); 246 blast_icache32_page_indexed(page); 247 local_irq_restore(flags); 248 } 249 250 static inline void tx49_blast_icache32_page_indexed(unsigned long page) 251 { 252 unsigned long indexmask = current_cpu_data.icache.waysize - 1; 253 unsigned long start = INDEX_BASE + (page & indexmask); 254 unsigned long end = start + PAGE_SIZE; 255 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 256 unsigned long ws_end = current_cpu_data.icache.ways << 257 current_cpu_data.icache.waybit; 258 unsigned long ws, addr; 259 260 CACHE32_UNROLL32_ALIGN2; 261 /* I'm in even chunk. blast odd chunks */ 262 for (ws = 0; ws < ws_end; ws += ws_inc) 263 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 264 cache32_unroll32(addr|ws, Index_Invalidate_I); 265 CACHE32_UNROLL32_ALIGN; 266 /* I'm in odd chunk. blast even chunks */ 267 for (ws = 0; ws < ws_end; ws += ws_inc) 268 for (addr = start; addr < end; addr += 0x400 * 2) 269 cache32_unroll32(addr|ws, Index_Invalidate_I); 270 } 271 272 static void (* r4k_blast_icache_page)(unsigned long addr); 273 274 static void r4k_blast_icache_page_setup(void) 275 { 276 unsigned long ic_lsize = cpu_icache_line_size(); 277 278 if (ic_lsize == 0) 279 r4k_blast_icache_page = (void *)cache_noop; 280 else if (ic_lsize == 16) 281 r4k_blast_icache_page = blast_icache16_page; 282 else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) 283 r4k_blast_icache_page = loongson2_blast_icache32_page; 284 else if (ic_lsize == 32) 285 r4k_blast_icache_page = blast_icache32_page; 286 else if (ic_lsize == 64) 287 r4k_blast_icache_page = blast_icache64_page; 288 else if (ic_lsize == 128) 289 r4k_blast_icache_page = blast_icache128_page; 290 } 291 292 #ifndef CONFIG_EVA 293 #define r4k_blast_icache_user_page r4k_blast_icache_page 294 #else 295 296 static void (*r4k_blast_icache_user_page)(unsigned long addr); 297 298 static void __cpuinit r4k_blast_icache_user_page_setup(void) 299 { 300 unsigned long ic_lsize = cpu_icache_line_size(); 301 302 if (ic_lsize == 0) 303 r4k_blast_icache_user_page = (void *)cache_noop; 304 else if (ic_lsize == 16) 305 r4k_blast_icache_user_page = blast_icache16_user_page; 306 else if (ic_lsize == 32) 307 r4k_blast_icache_user_page = blast_icache32_user_page; 308 else if (ic_lsize == 64) 309 r4k_blast_icache_user_page = blast_icache64_user_page; 310 } 311 312 #endif 313 314 static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 315 316 static void r4k_blast_icache_page_indexed_setup(void) 317 { 318 unsigned long ic_lsize = cpu_icache_line_size(); 319 320 if (ic_lsize == 0) 321 r4k_blast_icache_page_indexed = (void *)cache_noop; 322 else if (ic_lsize == 16) 323 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 324 else if (ic_lsize == 32) { 325 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 326 r4k_blast_icache_page_indexed = 327 blast_icache32_r4600_v1_page_indexed; 328 else if (TX49XX_ICACHE_INDEX_INV_WAR) 329 r4k_blast_icache_page_indexed = 330 tx49_blast_icache32_page_indexed; 331 else if (current_cpu_type() == CPU_LOONGSON2) 332 r4k_blast_icache_page_indexed = 333 loongson2_blast_icache32_page_indexed; 334 else 335 r4k_blast_icache_page_indexed = 336 blast_icache32_page_indexed; 337 } else if (ic_lsize == 64) 338 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 339 } 340 341 void (* r4k_blast_icache)(void); 342 EXPORT_SYMBOL(r4k_blast_icache); 343 344 static void r4k_blast_icache_setup(void) 345 { 346 unsigned long ic_lsize = cpu_icache_line_size(); 347 348 if (ic_lsize == 0) 349 r4k_blast_icache = (void *)cache_noop; 350 else if (ic_lsize == 16) 351 r4k_blast_icache = blast_icache16; 352 else if (ic_lsize == 32) { 353 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 354 r4k_blast_icache = blast_r4600_v1_icache32; 355 else if (TX49XX_ICACHE_INDEX_INV_WAR) 356 r4k_blast_icache = tx49_blast_icache32; 357 else if (current_cpu_type() == CPU_LOONGSON2) 358 r4k_blast_icache = loongson2_blast_icache32; 359 else 360 r4k_blast_icache = blast_icache32; 361 } else if (ic_lsize == 64) 362 r4k_blast_icache = blast_icache64; 363 else if (ic_lsize == 128) 364 r4k_blast_icache = blast_icache128; 365 } 366 367 static void (* r4k_blast_scache_page)(unsigned long addr); 368 369 static void r4k_blast_scache_page_setup(void) 370 { 371 unsigned long sc_lsize = cpu_scache_line_size(); 372 373 if (scache_size == 0) 374 r4k_blast_scache_page = (void *)cache_noop; 375 else if (sc_lsize == 16) 376 r4k_blast_scache_page = blast_scache16_page; 377 else if (sc_lsize == 32) 378 r4k_blast_scache_page = blast_scache32_page; 379 else if (sc_lsize == 64) 380 r4k_blast_scache_page = blast_scache64_page; 381 else if (sc_lsize == 128) 382 r4k_blast_scache_page = blast_scache128_page; 383 } 384 385 static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 386 387 static void r4k_blast_scache_page_indexed_setup(void) 388 { 389 unsigned long sc_lsize = cpu_scache_line_size(); 390 391 if (scache_size == 0) 392 r4k_blast_scache_page_indexed = (void *)cache_noop; 393 else if (sc_lsize == 16) 394 r4k_blast_scache_page_indexed = blast_scache16_page_indexed; 395 else if (sc_lsize == 32) 396 r4k_blast_scache_page_indexed = blast_scache32_page_indexed; 397 else if (sc_lsize == 64) 398 r4k_blast_scache_page_indexed = blast_scache64_page_indexed; 399 else if (sc_lsize == 128) 400 r4k_blast_scache_page_indexed = blast_scache128_page_indexed; 401 } 402 403 static void (* r4k_blast_scache)(void); 404 405 static void r4k_blast_scache_setup(void) 406 { 407 unsigned long sc_lsize = cpu_scache_line_size(); 408 409 if (scache_size == 0) 410 r4k_blast_scache = (void *)cache_noop; 411 else if (sc_lsize == 16) 412 r4k_blast_scache = blast_scache16; 413 else if (sc_lsize == 32) 414 r4k_blast_scache = blast_scache32; 415 else if (sc_lsize == 64) 416 r4k_blast_scache = blast_scache64; 417 else if (sc_lsize == 128) 418 r4k_blast_scache = blast_scache128; 419 } 420 421 static inline void local_r4k___flush_cache_all(void * args) 422 { 423 switch (current_cpu_type()) { 424 case CPU_LOONGSON2: 425 case CPU_LOONGSON3: 426 case CPU_R4000SC: 427 case CPU_R4000MC: 428 case CPU_R4400SC: 429 case CPU_R4400MC: 430 case CPU_R10000: 431 case CPU_R12000: 432 case CPU_R14000: 433 /* 434 * These caches are inclusive caches, that is, if something 435 * is not cached in the S-cache, we know it also won't be 436 * in one of the primary caches. 437 */ 438 r4k_blast_scache(); 439 break; 440 441 default: 442 r4k_blast_dcache(); 443 r4k_blast_icache(); 444 break; 445 } 446 } 447 448 static void r4k___flush_cache_all(void) 449 { 450 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); 451 } 452 453 static inline int has_valid_asid(const struct mm_struct *mm) 454 { 455 #ifdef CONFIG_MIPS_MT_SMP 456 int i; 457 458 for_each_online_cpu(i) 459 if (cpu_context(i, mm)) 460 return 1; 461 462 return 0; 463 #else 464 return cpu_context(smp_processor_id(), mm); 465 #endif 466 } 467 468 static void r4k__flush_cache_vmap(void) 469 { 470 r4k_blast_dcache(); 471 } 472 473 static void r4k__flush_cache_vunmap(void) 474 { 475 r4k_blast_dcache(); 476 } 477 478 static inline void local_r4k_flush_cache_range(void * args) 479 { 480 struct vm_area_struct *vma = args; 481 int exec = vma->vm_flags & VM_EXEC; 482 483 if (!(has_valid_asid(vma->vm_mm))) 484 return; 485 486 r4k_blast_dcache(); 487 if (exec) 488 r4k_blast_icache(); 489 } 490 491 static void r4k_flush_cache_range(struct vm_area_struct *vma, 492 unsigned long start, unsigned long end) 493 { 494 int exec = vma->vm_flags & VM_EXEC; 495 496 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 497 r4k_on_each_cpu(local_r4k_flush_cache_range, vma); 498 } 499 500 static inline void local_r4k_flush_cache_mm(void * args) 501 { 502 struct mm_struct *mm = args; 503 504 if (!has_valid_asid(mm)) 505 return; 506 507 /* 508 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we 509 * only flush the primary caches but R10000 and R12000 behave sane ... 510 * R4000SC and R4400SC indexed S-cache ops also invalidate primary 511 * caches, so we can bail out early. 512 */ 513 if (current_cpu_type() == CPU_R4000SC || 514 current_cpu_type() == CPU_R4000MC || 515 current_cpu_type() == CPU_R4400SC || 516 current_cpu_type() == CPU_R4400MC) { 517 r4k_blast_scache(); 518 return; 519 } 520 521 r4k_blast_dcache(); 522 } 523 524 static void r4k_flush_cache_mm(struct mm_struct *mm) 525 { 526 if (!cpu_has_dc_aliases) 527 return; 528 529 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); 530 } 531 532 struct flush_cache_page_args { 533 struct vm_area_struct *vma; 534 unsigned long addr; 535 unsigned long pfn; 536 }; 537 538 static inline void local_r4k_flush_cache_page(void *args) 539 { 540 struct flush_cache_page_args *fcp_args = args; 541 struct vm_area_struct *vma = fcp_args->vma; 542 unsigned long addr = fcp_args->addr; 543 struct page *page = pfn_to_page(fcp_args->pfn); 544 int exec = vma->vm_flags & VM_EXEC; 545 struct mm_struct *mm = vma->vm_mm; 546 int map_coherent = 0; 547 pgd_t *pgdp; 548 pud_t *pudp; 549 pmd_t *pmdp; 550 pte_t *ptep; 551 void *vaddr; 552 553 /* 554 * If ownes no valid ASID yet, cannot possibly have gotten 555 * this page into the cache. 556 */ 557 if (!has_valid_asid(mm)) 558 return; 559 560 addr &= PAGE_MASK; 561 pgdp = pgd_offset(mm, addr); 562 pudp = pud_offset(pgdp, addr); 563 pmdp = pmd_offset(pudp, addr); 564 ptep = pte_offset(pmdp, addr); 565 566 /* 567 * If the page isn't marked valid, the page cannot possibly be 568 * in the cache. 569 */ 570 if (!(pte_present(*ptep))) 571 return; 572 573 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) 574 vaddr = NULL; 575 else { 576 /* 577 * Use kmap_coherent or kmap_atomic to do flushes for 578 * another ASID than the current one. 579 */ 580 map_coherent = (cpu_has_dc_aliases && 581 page_mapped(page) && !Page_dcache_dirty(page)); 582 if (map_coherent) 583 vaddr = kmap_coherent(page, addr); 584 else 585 vaddr = kmap_atomic(page); 586 addr = (unsigned long)vaddr; 587 } 588 589 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 590 vaddr ? r4k_blast_dcache_page(addr) : 591 r4k_blast_dcache_user_page(addr); 592 if (exec && !cpu_icache_snoops_remote_store) 593 r4k_blast_scache_page(addr); 594 } 595 if (exec) { 596 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { 597 int cpu = smp_processor_id(); 598 599 if (cpu_context(cpu, mm) != 0) 600 drop_mmu_context(mm, cpu); 601 } else 602 vaddr ? r4k_blast_icache_page(addr) : 603 r4k_blast_icache_user_page(addr); 604 } 605 606 if (vaddr) { 607 if (map_coherent) 608 kunmap_coherent(); 609 else 610 kunmap_atomic(vaddr); 611 } 612 } 613 614 static void r4k_flush_cache_page(struct vm_area_struct *vma, 615 unsigned long addr, unsigned long pfn) 616 { 617 struct flush_cache_page_args args; 618 619 args.vma = vma; 620 args.addr = addr; 621 args.pfn = pfn; 622 623 r4k_on_each_cpu(local_r4k_flush_cache_page, &args); 624 } 625 626 static inline void local_r4k_flush_data_cache_page(void * addr) 627 { 628 r4k_blast_dcache_page((unsigned long) addr); 629 } 630 631 static void r4k_flush_data_cache_page(unsigned long addr) 632 { 633 if (in_atomic()) 634 local_r4k_flush_data_cache_page((void *)addr); 635 else 636 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); 637 } 638 639 struct flush_icache_range_args { 640 unsigned long start; 641 unsigned long end; 642 }; 643 644 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) 645 { 646 if (!cpu_has_ic_fills_f_dc) { 647 if (end - start >= dcache_size) { 648 r4k_blast_dcache(); 649 } else { 650 R4600_HIT_CACHEOP_WAR_IMPL; 651 protected_blast_dcache_range(start, end); 652 } 653 } 654 655 if (end - start > icache_size) 656 r4k_blast_icache(); 657 else { 658 switch (boot_cpu_type()) { 659 case CPU_LOONGSON2: 660 protected_loongson2_blast_icache_range(start, end); 661 break; 662 663 default: 664 protected_blast_icache_range(start, end); 665 break; 666 } 667 } 668 #ifdef CONFIG_EVA 669 /* 670 * Due to all possible segment mappings, there might cache aliases 671 * caused by the bootloader being in non-EVA mode, and the CPU switching 672 * to EVA during early kernel init. It's best to flush the scache 673 * to avoid having secondary cores fetching stale data and lead to 674 * kernel crashes. 675 */ 676 bc_wback_inv(start, (end - start)); 677 __sync(); 678 #endif 679 } 680 681 static inline void local_r4k_flush_icache_range_ipi(void *args) 682 { 683 struct flush_icache_range_args *fir_args = args; 684 unsigned long start = fir_args->start; 685 unsigned long end = fir_args->end; 686 687 local_r4k_flush_icache_range(start, end); 688 } 689 690 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 691 { 692 struct flush_icache_range_args args; 693 694 args.start = start; 695 args.end = end; 696 697 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); 698 instruction_hazard(); 699 } 700 701 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) 702 703 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 704 { 705 /* Catch bad driver code */ 706 BUG_ON(size == 0); 707 708 preempt_disable(); 709 if (cpu_has_inclusive_pcaches) { 710 if (size >= scache_size) 711 r4k_blast_scache(); 712 else 713 blast_scache_range(addr, addr + size); 714 preempt_enable(); 715 __sync(); 716 return; 717 } 718 719 /* 720 * Either no secondary cache or the available caches don't have the 721 * subset property so we have to flush the primary caches 722 * explicitly 723 */ 724 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 725 r4k_blast_dcache(); 726 } else { 727 R4600_HIT_CACHEOP_WAR_IMPL; 728 blast_dcache_range(addr, addr + size); 729 } 730 preempt_enable(); 731 732 bc_wback_inv(addr, size); 733 __sync(); 734 } 735 736 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 737 { 738 /* Catch bad driver code */ 739 BUG_ON(size == 0); 740 741 preempt_disable(); 742 if (cpu_has_inclusive_pcaches) { 743 if (size >= scache_size) 744 r4k_blast_scache(); 745 else { 746 /* 747 * There is no clearly documented alignment requirement 748 * for the cache instruction on MIPS processors and 749 * some processors, among them the RM5200 and RM7000 750 * QED processors will throw an address error for cache 751 * hit ops with insufficient alignment. Solved by 752 * aligning the address to cache line size. 753 */ 754 blast_inv_scache_range(addr, addr + size); 755 } 756 preempt_enable(); 757 __sync(); 758 return; 759 } 760 761 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 762 r4k_blast_dcache(); 763 } else { 764 R4600_HIT_CACHEOP_WAR_IMPL; 765 blast_inv_dcache_range(addr, addr + size); 766 } 767 preempt_enable(); 768 769 bc_inv(addr, size); 770 __sync(); 771 } 772 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ 773 774 /* 775 * While we're protected against bad userland addresses we don't care 776 * very much about what happens in that case. Usually a segmentation 777 * fault will dump the process later on anyway ... 778 */ 779 static void local_r4k_flush_cache_sigtramp(void * arg) 780 { 781 unsigned long ic_lsize = cpu_icache_line_size(); 782 unsigned long dc_lsize = cpu_dcache_line_size(); 783 unsigned long sc_lsize = cpu_scache_line_size(); 784 unsigned long addr = (unsigned long) arg; 785 786 R4600_HIT_CACHEOP_WAR_IMPL; 787 if (dc_lsize) 788 protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 789 if (!cpu_icache_snoops_remote_store && scache_size) 790 protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 791 if (ic_lsize) 792 protected_flush_icache_line(addr & ~(ic_lsize - 1)); 793 if (MIPS4K_ICACHE_REFILL_WAR) { 794 __asm__ __volatile__ ( 795 ".set push\n\t" 796 ".set noat\n\t" 797 ".set mips3\n\t" 798 #ifdef CONFIG_32BIT 799 "la $at,1f\n\t" 800 #endif 801 #ifdef CONFIG_64BIT 802 "dla $at,1f\n\t" 803 #endif 804 "cache %0,($at)\n\t" 805 "nop; nop; nop\n" 806 "1:\n\t" 807 ".set pop" 808 : 809 : "i" (Hit_Invalidate_I)); 810 } 811 if (MIPS_CACHE_SYNC_WAR) 812 __asm__ __volatile__ ("sync"); 813 } 814 815 static void r4k_flush_cache_sigtramp(unsigned long addr) 816 { 817 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); 818 } 819 820 static void r4k_flush_icache_all(void) 821 { 822 if (cpu_has_vtag_icache) 823 r4k_blast_icache(); 824 } 825 826 struct flush_kernel_vmap_range_args { 827 unsigned long vaddr; 828 int size; 829 }; 830 831 static inline void local_r4k_flush_kernel_vmap_range(void *args) 832 { 833 struct flush_kernel_vmap_range_args *vmra = args; 834 unsigned long vaddr = vmra->vaddr; 835 int size = vmra->size; 836 837 /* 838 * Aliases only affect the primary caches so don't bother with 839 * S-caches or T-caches. 840 */ 841 if (cpu_has_safe_index_cacheops && size >= dcache_size) 842 r4k_blast_dcache(); 843 else { 844 R4600_HIT_CACHEOP_WAR_IMPL; 845 blast_dcache_range(vaddr, vaddr + size); 846 } 847 } 848 849 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) 850 { 851 struct flush_kernel_vmap_range_args args; 852 853 args.vaddr = (unsigned long) vaddr; 854 args.size = size; 855 856 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); 857 } 858 859 static inline void rm7k_erratum31(void) 860 { 861 const unsigned long ic_lsize = 32; 862 unsigned long addr; 863 864 /* RM7000 erratum #31. The icache is screwed at startup. */ 865 write_c0_taglo(0); 866 write_c0_taghi(0); 867 868 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 869 __asm__ __volatile__ ( 870 ".set push\n\t" 871 ".set noreorder\n\t" 872 ".set mips3\n\t" 873 "cache\t%1, 0(%0)\n\t" 874 "cache\t%1, 0x1000(%0)\n\t" 875 "cache\t%1, 0x2000(%0)\n\t" 876 "cache\t%1, 0x3000(%0)\n\t" 877 "cache\t%2, 0(%0)\n\t" 878 "cache\t%2, 0x1000(%0)\n\t" 879 "cache\t%2, 0x2000(%0)\n\t" 880 "cache\t%2, 0x3000(%0)\n\t" 881 "cache\t%1, 0(%0)\n\t" 882 "cache\t%1, 0x1000(%0)\n\t" 883 "cache\t%1, 0x2000(%0)\n\t" 884 "cache\t%1, 0x3000(%0)\n\t" 885 ".set pop\n" 886 : 887 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 888 } 889 } 890 891 static inline void alias_74k_erratum(struct cpuinfo_mips *c) 892 { 893 unsigned int imp = c->processor_id & PRID_IMP_MASK; 894 unsigned int rev = c->processor_id & PRID_REV_MASK; 895 896 /* 897 * Early versions of the 74K do not update the cache tags on a 898 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG 899 * aliases. In this case it is better to treat the cache as always 900 * having aliases. 901 */ 902 switch (imp) { 903 case PRID_IMP_74K: 904 if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) 905 c->dcache.flags |= MIPS_CACHE_VTAG; 906 if (rev == PRID_REV_ENCODE_332(2, 4, 0)) 907 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 908 break; 909 case PRID_IMP_1074K: 910 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { 911 c->dcache.flags |= MIPS_CACHE_VTAG; 912 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 913 } 914 break; 915 default: 916 BUG(); 917 } 918 } 919 920 static void b5k_instruction_hazard(void) 921 { 922 __sync(); 923 __sync(); 924 __asm__ __volatile__( 925 " nop; nop; nop; nop; nop; nop; nop; nop\n" 926 " nop; nop; nop; nop; nop; nop; nop; nop\n" 927 " nop; nop; nop; nop; nop; nop; nop; nop\n" 928 " nop; nop; nop; nop; nop; nop; nop; nop\n" 929 : : : "memory"); 930 } 931 932 static char *way_string[] = { NULL, "direct mapped", "2-way", 933 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 934 }; 935 936 static void probe_pcache(void) 937 { 938 struct cpuinfo_mips *c = ¤t_cpu_data; 939 unsigned int config = read_c0_config(); 940 unsigned int prid = read_c0_prid(); 941 unsigned long config1; 942 unsigned int lsize; 943 944 switch (current_cpu_type()) { 945 case CPU_R4600: /* QED style two way caches? */ 946 case CPU_R4700: 947 case CPU_R5000: 948 case CPU_NEVADA: 949 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 950 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 951 c->icache.ways = 2; 952 c->icache.waybit = __ffs(icache_size/2); 953 954 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 955 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 956 c->dcache.ways = 2; 957 c->dcache.waybit= __ffs(dcache_size/2); 958 959 c->options |= MIPS_CPU_CACHE_CDEX_P; 960 break; 961 962 case CPU_R5432: 963 case CPU_R5500: 964 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 965 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 966 c->icache.ways = 2; 967 c->icache.waybit= 0; 968 969 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 970 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 971 c->dcache.ways = 2; 972 c->dcache.waybit = 0; 973 974 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; 975 break; 976 977 case CPU_TX49XX: 978 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 979 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 980 c->icache.ways = 4; 981 c->icache.waybit= 0; 982 983 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 984 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 985 c->dcache.ways = 4; 986 c->dcache.waybit = 0; 987 988 c->options |= MIPS_CPU_CACHE_CDEX_P; 989 c->options |= MIPS_CPU_PREFETCH; 990 break; 991 992 case CPU_R4000PC: 993 case CPU_R4000SC: 994 case CPU_R4000MC: 995 case CPU_R4400PC: 996 case CPU_R4400SC: 997 case CPU_R4400MC: 998 case CPU_R4300: 999 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 1000 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 1001 c->icache.ways = 1; 1002 c->icache.waybit = 0; /* doesn't matter */ 1003 1004 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 1005 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 1006 c->dcache.ways = 1; 1007 c->dcache.waybit = 0; /* does not matter */ 1008 1009 c->options |= MIPS_CPU_CACHE_CDEX_P; 1010 break; 1011 1012 case CPU_R10000: 1013 case CPU_R12000: 1014 case CPU_R14000: 1015 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 1016 c->icache.linesz = 64; 1017 c->icache.ways = 2; 1018 c->icache.waybit = 0; 1019 1020 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); 1021 c->dcache.linesz = 32; 1022 c->dcache.ways = 2; 1023 c->dcache.waybit = 0; 1024 1025 c->options |= MIPS_CPU_PREFETCH; 1026 break; 1027 1028 case CPU_VR4133: 1029 write_c0_config(config & ~VR41_CONF_P4K); 1030 case CPU_VR4131: 1031 /* Workaround for cache instruction bug of VR4131 */ 1032 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || 1033 c->processor_id == 0x0c82U) { 1034 config |= 0x00400000U; 1035 if (c->processor_id == 0x0c80U) 1036 config |= VR41_CONF_BP; 1037 write_c0_config(config); 1038 } else 1039 c->options |= MIPS_CPU_CACHE_CDEX_P; 1040 1041 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 1042 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 1043 c->icache.ways = 2; 1044 c->icache.waybit = __ffs(icache_size/2); 1045 1046 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 1047 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 1048 c->dcache.ways = 2; 1049 c->dcache.waybit = __ffs(dcache_size/2); 1050 break; 1051 1052 case CPU_VR41XX: 1053 case CPU_VR4111: 1054 case CPU_VR4121: 1055 case CPU_VR4122: 1056 case CPU_VR4181: 1057 case CPU_VR4181A: 1058 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 1059 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 1060 c->icache.ways = 1; 1061 c->icache.waybit = 0; /* doesn't matter */ 1062 1063 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 1064 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 1065 c->dcache.ways = 1; 1066 c->dcache.waybit = 0; /* does not matter */ 1067 1068 c->options |= MIPS_CPU_CACHE_CDEX_P; 1069 break; 1070 1071 case CPU_RM7000: 1072 rm7k_erratum31(); 1073 1074 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 1075 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 1076 c->icache.ways = 4; 1077 c->icache.waybit = __ffs(icache_size / c->icache.ways); 1078 1079 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 1080 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 1081 c->dcache.ways = 4; 1082 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 1083 1084 c->options |= MIPS_CPU_CACHE_CDEX_P; 1085 c->options |= MIPS_CPU_PREFETCH; 1086 break; 1087 1088 case CPU_LOONGSON2: 1089 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 1090 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 1091 if (prid & 0x3) 1092 c->icache.ways = 4; 1093 else 1094 c->icache.ways = 2; 1095 c->icache.waybit = 0; 1096 1097 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 1098 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 1099 if (prid & 0x3) 1100 c->dcache.ways = 4; 1101 else 1102 c->dcache.ways = 2; 1103 c->dcache.waybit = 0; 1104 break; 1105 1106 case CPU_LOONGSON3: 1107 config1 = read_c0_config1(); 1108 lsize = (config1 >> 19) & 7; 1109 if (lsize) 1110 c->icache.linesz = 2 << lsize; 1111 else 1112 c->icache.linesz = 0; 1113 c->icache.sets = 64 << ((config1 >> 22) & 7); 1114 c->icache.ways = 1 + ((config1 >> 16) & 7); 1115 icache_size = c->icache.sets * 1116 c->icache.ways * 1117 c->icache.linesz; 1118 c->icache.waybit = 0; 1119 1120 lsize = (config1 >> 10) & 7; 1121 if (lsize) 1122 c->dcache.linesz = 2 << lsize; 1123 else 1124 c->dcache.linesz = 0; 1125 c->dcache.sets = 64 << ((config1 >> 13) & 7); 1126 c->dcache.ways = 1 + ((config1 >> 7) & 7); 1127 dcache_size = c->dcache.sets * 1128 c->dcache.ways * 1129 c->dcache.linesz; 1130 c->dcache.waybit = 0; 1131 break; 1132 1133 case CPU_CAVIUM_OCTEON3: 1134 /* For now lie about the number of ways. */ 1135 c->icache.linesz = 128; 1136 c->icache.sets = 16; 1137 c->icache.ways = 8; 1138 c->icache.flags |= MIPS_CACHE_VTAG; 1139 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 1140 1141 c->dcache.linesz = 128; 1142 c->dcache.ways = 8; 1143 c->dcache.sets = 8; 1144 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 1145 c->options |= MIPS_CPU_PREFETCH; 1146 break; 1147 1148 default: 1149 if (!(config & MIPS_CONF_M)) 1150 panic("Don't know how to probe P-caches on this cpu."); 1151 1152 /* 1153 * So we seem to be a MIPS32 or MIPS64 CPU 1154 * So let's probe the I-cache ... 1155 */ 1156 config1 = read_c0_config1(); 1157 1158 lsize = (config1 >> 19) & 7; 1159 1160 /* IL == 7 is reserved */ 1161 if (lsize == 7) 1162 panic("Invalid icache line size"); 1163 1164 c->icache.linesz = lsize ? 2 << lsize : 0; 1165 1166 c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); 1167 c->icache.ways = 1 + ((config1 >> 16) & 7); 1168 1169 icache_size = c->icache.sets * 1170 c->icache.ways * 1171 c->icache.linesz; 1172 c->icache.waybit = __ffs(icache_size/c->icache.ways); 1173 1174 if (config & 0x8) /* VI bit */ 1175 c->icache.flags |= MIPS_CACHE_VTAG; 1176 1177 /* 1178 * Now probe the MIPS32 / MIPS64 data cache. 1179 */ 1180 c->dcache.flags = 0; 1181 1182 lsize = (config1 >> 10) & 7; 1183 1184 /* DL == 7 is reserved */ 1185 if (lsize == 7) 1186 panic("Invalid dcache line size"); 1187 1188 c->dcache.linesz = lsize ? 2 << lsize : 0; 1189 1190 c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); 1191 c->dcache.ways = 1 + ((config1 >> 7) & 7); 1192 1193 dcache_size = c->dcache.sets * 1194 c->dcache.ways * 1195 c->dcache.linesz; 1196 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 1197 1198 c->options |= MIPS_CPU_PREFETCH; 1199 break; 1200 } 1201 1202 /* 1203 * Processor configuration sanity check for the R4000SC erratum 1204 * #5. With page sizes larger than 32kB there is no possibility 1205 * to get a VCE exception anymore so we don't care about this 1206 * misconfiguration. The case is rather theoretical anyway; 1207 * presumably no vendor is shipping his hardware in the "bad" 1208 * configuration. 1209 */ 1210 if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 && 1211 (prid & PRID_REV_MASK) < PRID_REV_R4400 && 1212 !(config & CONF_SC) && c->icache.linesz != 16 && 1213 PAGE_SIZE <= 0x8000) 1214 panic("Improper R4000SC processor configuration detected"); 1215 1216 /* compute a couple of other cache variables */ 1217 c->icache.waysize = icache_size / c->icache.ways; 1218 c->dcache.waysize = dcache_size / c->dcache.ways; 1219 1220 c->icache.sets = c->icache.linesz ? 1221 icache_size / (c->icache.linesz * c->icache.ways) : 0; 1222 c->dcache.sets = c->dcache.linesz ? 1223 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; 1224 1225 /* 1226 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB 1227 * 2-way virtually indexed so normally would suffer from aliases. So 1228 * normally they'd suffer from aliases but magic in the hardware deals 1229 * with that for us so we don't need to take care ourselves. 1230 */ 1231 switch (current_cpu_type()) { 1232 case CPU_20KC: 1233 case CPU_25KF: 1234 case CPU_SB1: 1235 case CPU_SB1A: 1236 case CPU_XLR: 1237 c->dcache.flags |= MIPS_CACHE_PINDEX; 1238 break; 1239 1240 case CPU_R10000: 1241 case CPU_R12000: 1242 case CPU_R14000: 1243 break; 1244 1245 case CPU_74K: 1246 case CPU_1074K: 1247 alias_74k_erratum(c); 1248 /* Fall through. */ 1249 case CPU_M14KC: 1250 case CPU_M14KEC: 1251 case CPU_24K: 1252 case CPU_34K: 1253 case CPU_1004K: 1254 case CPU_INTERAPTIV: 1255 case CPU_P5600: 1256 case CPU_PROAPTIV: 1257 case CPU_M5150: 1258 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1259 (c->icache.waysize > PAGE_SIZE)) 1260 c->icache.flags |= MIPS_CACHE_ALIASES; 1261 if (read_c0_config7() & MIPS_CONF7_AR) { 1262 /* 1263 * Effectively physically indexed dcache, 1264 * thus no virtual aliases. 1265 */ 1266 c->dcache.flags |= MIPS_CACHE_PINDEX; 1267 break; 1268 } 1269 default: 1270 if (c->dcache.waysize > PAGE_SIZE) 1271 c->dcache.flags |= MIPS_CACHE_ALIASES; 1272 } 1273 1274 switch (current_cpu_type()) { 1275 case CPU_20KC: 1276 /* 1277 * Some older 20Kc chips doesn't have the 'VI' bit in 1278 * the config register. 1279 */ 1280 c->icache.flags |= MIPS_CACHE_VTAG; 1281 break; 1282 1283 case CPU_ALCHEMY: 1284 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1285 break; 1286 1287 case CPU_LOONGSON2: 1288 /* 1289 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1290 * one op will act on all 4 ways 1291 */ 1292 c->icache.ways = 1; 1293 } 1294 1295 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1296 icache_size >> 10, 1297 c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", 1298 way_string[c->icache.ways], c->icache.linesz); 1299 1300 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", 1301 dcache_size >> 10, way_string[c->dcache.ways], 1302 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", 1303 (c->dcache.flags & MIPS_CACHE_ALIASES) ? 1304 "cache aliases" : "no aliases", 1305 c->dcache.linesz); 1306 } 1307 1308 /* 1309 * If you even _breathe_ on this function, look at the gcc output and make sure 1310 * it does not pop things on and off the stack for the cache sizing loop that 1311 * executes in KSEG1 space or else you will crash and burn badly. You have 1312 * been warned. 1313 */ 1314 static int probe_scache(void) 1315 { 1316 unsigned long flags, addr, begin, end, pow2; 1317 unsigned int config = read_c0_config(); 1318 struct cpuinfo_mips *c = ¤t_cpu_data; 1319 1320 if (config & CONF_SC) 1321 return 0; 1322 1323 begin = (unsigned long) &_stext; 1324 begin &= ~((4 * 1024 * 1024) - 1); 1325 end = begin + (4 * 1024 * 1024); 1326 1327 /* 1328 * This is such a bitch, you'd think they would make it easy to do 1329 * this. Away you daemons of stupidity! 1330 */ 1331 local_irq_save(flags); 1332 1333 /* Fill each size-multiple cache line with a valid tag. */ 1334 pow2 = (64 * 1024); 1335 for (addr = begin; addr < end; addr = (begin + pow2)) { 1336 unsigned long *p = (unsigned long *) addr; 1337 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ 1338 pow2 <<= 1; 1339 } 1340 1341 /* Load first line with zero (therefore invalid) tag. */ 1342 write_c0_taglo(0); 1343 write_c0_taghi(0); 1344 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ 1345 cache_op(Index_Store_Tag_I, begin); 1346 cache_op(Index_Store_Tag_D, begin); 1347 cache_op(Index_Store_Tag_SD, begin); 1348 1349 /* Now search for the wrap around point. */ 1350 pow2 = (128 * 1024); 1351 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1352 cache_op(Index_Load_Tag_SD, addr); 1353 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1354 if (!read_c0_taglo()) 1355 break; 1356 pow2 <<= 1; 1357 } 1358 local_irq_restore(flags); 1359 addr -= begin; 1360 1361 scache_size = addr; 1362 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1363 c->scache.ways = 1; 1364 c->dcache.waybit = 0; /* does not matter */ 1365 1366 return 1; 1367 } 1368 1369 static void __init loongson2_sc_init(void) 1370 { 1371 struct cpuinfo_mips *c = ¤t_cpu_data; 1372 1373 scache_size = 512*1024; 1374 c->scache.linesz = 32; 1375 c->scache.ways = 4; 1376 c->scache.waybit = 0; 1377 c->scache.waysize = scache_size / (c->scache.ways); 1378 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1379 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1380 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1381 1382 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1383 } 1384 1385 static void __init loongson3_sc_init(void) 1386 { 1387 struct cpuinfo_mips *c = ¤t_cpu_data; 1388 unsigned int config2, lsize; 1389 1390 config2 = read_c0_config2(); 1391 lsize = (config2 >> 4) & 15; 1392 if (lsize) 1393 c->scache.linesz = 2 << lsize; 1394 else 1395 c->scache.linesz = 0; 1396 c->scache.sets = 64 << ((config2 >> 8) & 15); 1397 c->scache.ways = 1 + (config2 & 15); 1398 1399 scache_size = c->scache.sets * 1400 c->scache.ways * 1401 c->scache.linesz; 1402 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ 1403 scache_size *= 4; 1404 c->scache.waybit = 0; 1405 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1406 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1407 if (scache_size) 1408 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1409 return; 1410 } 1411 1412 extern int r5k_sc_init(void); 1413 extern int rm7k_sc_init(void); 1414 extern int mips_sc_init(void); 1415 1416 static void setup_scache(void) 1417 { 1418 struct cpuinfo_mips *c = ¤t_cpu_data; 1419 unsigned int config = read_c0_config(); 1420 int sc_present = 0; 1421 1422 /* 1423 * Do the probing thing on R4000SC and R4400SC processors. Other 1424 * processors don't have a S-cache that would be relevant to the 1425 * Linux memory management. 1426 */ 1427 switch (current_cpu_type()) { 1428 case CPU_R4000SC: 1429 case CPU_R4000MC: 1430 case CPU_R4400SC: 1431 case CPU_R4400MC: 1432 sc_present = run_uncached(probe_scache); 1433 if (sc_present) 1434 c->options |= MIPS_CPU_CACHE_CDEX_S; 1435 break; 1436 1437 case CPU_R10000: 1438 case CPU_R12000: 1439 case CPU_R14000: 1440 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1441 c->scache.linesz = 64 << ((config >> 13) & 1); 1442 c->scache.ways = 2; 1443 c->scache.waybit= 0; 1444 sc_present = 1; 1445 break; 1446 1447 case CPU_R5000: 1448 case CPU_NEVADA: 1449 #ifdef CONFIG_R5000_CPU_SCACHE 1450 r5k_sc_init(); 1451 #endif 1452 return; 1453 1454 case CPU_RM7000: 1455 #ifdef CONFIG_RM7000_CPU_SCACHE 1456 rm7k_sc_init(); 1457 #endif 1458 return; 1459 1460 case CPU_LOONGSON2: 1461 loongson2_sc_init(); 1462 return; 1463 1464 case CPU_LOONGSON3: 1465 loongson3_sc_init(); 1466 return; 1467 1468 case CPU_CAVIUM_OCTEON3: 1469 case CPU_XLP: 1470 /* don't need to worry about L2, fully coherent */ 1471 return; 1472 1473 default: 1474 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1475 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1476 #ifdef CONFIG_MIPS_CPU_SCACHE 1477 if (mips_sc_init ()) { 1478 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1479 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", 1480 scache_size >> 10, 1481 way_string[c->scache.ways], c->scache.linesz); 1482 } 1483 #else 1484 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) 1485 panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); 1486 #endif 1487 return; 1488 } 1489 sc_present = 0; 1490 } 1491 1492 if (!sc_present) 1493 return; 1494 1495 /* compute a couple of other cache variables */ 1496 c->scache.waysize = scache_size / c->scache.ways; 1497 1498 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1499 1500 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1501 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1502 1503 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1504 } 1505 1506 void au1x00_fixup_config_od(void) 1507 { 1508 /* 1509 * c0_config.od (bit 19) was write only (and read as 0) 1510 * on the early revisions of Alchemy SOCs. It disables the bus 1511 * transaction overlapping and needs to be set to fix various errata. 1512 */ 1513 switch (read_c0_prid()) { 1514 case 0x00030100: /* Au1000 DA */ 1515 case 0x00030201: /* Au1000 HA */ 1516 case 0x00030202: /* Au1000 HB */ 1517 case 0x01030200: /* Au1500 AB */ 1518 /* 1519 * Au1100 errata actually keeps silence about this bit, so we set it 1520 * just in case for those revisions that require it to be set according 1521 * to the (now gone) cpu table. 1522 */ 1523 case 0x02030200: /* Au1100 AB */ 1524 case 0x02030201: /* Au1100 BA */ 1525 case 0x02030202: /* Au1100 BC */ 1526 set_c0_config(1 << 19); 1527 break; 1528 } 1529 } 1530 1531 /* CP0 hazard avoidance. */ 1532 #define NXP_BARRIER() \ 1533 __asm__ __volatile__( \ 1534 ".set noreorder\n\t" \ 1535 "nop; nop; nop; nop; nop; nop;\n\t" \ 1536 ".set reorder\n\t") 1537 1538 static void nxp_pr4450_fixup_config(void) 1539 { 1540 unsigned long config0; 1541 1542 config0 = read_c0_config(); 1543 1544 /* clear all three cache coherency fields */ 1545 config0 &= ~(0x7 | (7 << 25) | (7 << 28)); 1546 config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | 1547 ((_page_cachable_default >> _CACHE_SHIFT) << 25) | 1548 ((_page_cachable_default >> _CACHE_SHIFT) << 28)); 1549 write_c0_config(config0); 1550 NXP_BARRIER(); 1551 } 1552 1553 static int cca = -1; 1554 1555 static int __init cca_setup(char *str) 1556 { 1557 get_option(&str, &cca); 1558 1559 return 0; 1560 } 1561 1562 early_param("cca", cca_setup); 1563 1564 static void coherency_setup(void) 1565 { 1566 if (cca < 0 || cca > 7) 1567 cca = read_c0_config() & CONF_CM_CMASK; 1568 _page_cachable_default = cca << _CACHE_SHIFT; 1569 1570 pr_debug("Using cache attribute %d\n", cca); 1571 change_c0_config(CONF_CM_CMASK, cca); 1572 1573 /* 1574 * c0_status.cu=0 specifies that updates by the sc instruction use 1575 * the coherency mode specified by the TLB; 1 means cachable 1576 * coherent update on write will be used. Not all processors have 1577 * this bit and; some wire it to zero, others like Toshiba had the 1578 * silly idea of putting something else there ... 1579 */ 1580 switch (current_cpu_type()) { 1581 case CPU_R4000PC: 1582 case CPU_R4000SC: 1583 case CPU_R4000MC: 1584 case CPU_R4400PC: 1585 case CPU_R4400SC: 1586 case CPU_R4400MC: 1587 clear_c0_config(CONF_CU); 1588 break; 1589 /* 1590 * We need to catch the early Alchemy SOCs with 1591 * the write-only co_config.od bit and set it back to one on: 1592 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB 1593 */ 1594 case CPU_ALCHEMY: 1595 au1x00_fixup_config_od(); 1596 break; 1597 1598 case PRID_IMP_PR4450: 1599 nxp_pr4450_fixup_config(); 1600 break; 1601 } 1602 } 1603 1604 static void r4k_cache_error_setup(void) 1605 { 1606 extern char __weak except_vec2_generic; 1607 extern char __weak except_vec2_sb1; 1608 1609 switch (current_cpu_type()) { 1610 case CPU_SB1: 1611 case CPU_SB1A: 1612 set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1613 break; 1614 1615 default: 1616 set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1617 break; 1618 } 1619 } 1620 1621 void r4k_cache_init(void) 1622 { 1623 extern void build_clear_page(void); 1624 extern void build_copy_page(void); 1625 struct cpuinfo_mips *c = ¤t_cpu_data; 1626 1627 probe_pcache(); 1628 setup_scache(); 1629 1630 r4k_blast_dcache_page_setup(); 1631 r4k_blast_dcache_page_indexed_setup(); 1632 r4k_blast_dcache_setup(); 1633 r4k_blast_icache_page_setup(); 1634 r4k_blast_icache_page_indexed_setup(); 1635 r4k_blast_icache_setup(); 1636 r4k_blast_scache_page_setup(); 1637 r4k_blast_scache_page_indexed_setup(); 1638 r4k_blast_scache_setup(); 1639 #ifdef CONFIG_EVA 1640 r4k_blast_dcache_user_page_setup(); 1641 r4k_blast_icache_user_page_setup(); 1642 #endif 1643 1644 /* 1645 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1646 * This code supports virtually indexed processors and will be 1647 * unnecessarily inefficient on physically indexed processors. 1648 */ 1649 if (c->dcache.linesz) 1650 shm_align_mask = max_t( unsigned long, 1651 c->dcache.sets * c->dcache.linesz - 1, 1652 PAGE_SIZE - 1); 1653 else 1654 shm_align_mask = PAGE_SIZE-1; 1655 1656 __flush_cache_vmap = r4k__flush_cache_vmap; 1657 __flush_cache_vunmap = r4k__flush_cache_vunmap; 1658 1659 flush_cache_all = cache_noop; 1660 __flush_cache_all = r4k___flush_cache_all; 1661 flush_cache_mm = r4k_flush_cache_mm; 1662 flush_cache_page = r4k_flush_cache_page; 1663 flush_cache_range = r4k_flush_cache_range; 1664 1665 __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; 1666 1667 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1668 flush_icache_all = r4k_flush_icache_all; 1669 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1670 flush_data_cache_page = r4k_flush_data_cache_page; 1671 flush_icache_range = r4k_flush_icache_range; 1672 local_flush_icache_range = local_r4k_flush_icache_range; 1673 1674 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) 1675 if (coherentio) { 1676 _dma_cache_wback_inv = (void *)cache_noop; 1677 _dma_cache_wback = (void *)cache_noop; 1678 _dma_cache_inv = (void *)cache_noop; 1679 } else { 1680 _dma_cache_wback_inv = r4k_dma_cache_wback_inv; 1681 _dma_cache_wback = r4k_dma_cache_wback_inv; 1682 _dma_cache_inv = r4k_dma_cache_inv; 1683 } 1684 #endif 1685 1686 build_clear_page(); 1687 build_copy_page(); 1688 1689 /* 1690 * We want to run CMP kernels on core with and without coherent 1691 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether 1692 * or not to flush caches. 1693 */ 1694 local_r4k___flush_cache_all(NULL); 1695 1696 coherency_setup(); 1697 board_cache_error_setup = r4k_cache_error_setup; 1698 1699 /* 1700 * Per-CPU overrides 1701 */ 1702 switch (current_cpu_type()) { 1703 case CPU_BMIPS4350: 1704 case CPU_BMIPS4380: 1705 /* No IPI is needed because all CPUs share the same D$ */ 1706 flush_data_cache_page = r4k_blast_dcache_page; 1707 break; 1708 case CPU_BMIPS5000: 1709 /* We lose our superpowers if L2 is disabled */ 1710 if (c->scache.flags & MIPS_CACHE_NOT_PRESENT) 1711 break; 1712 1713 /* I$ fills from D$ just by emptying the write buffers */ 1714 flush_cache_page = (void *)b5k_instruction_hazard; 1715 flush_cache_range = (void *)b5k_instruction_hazard; 1716 flush_cache_sigtramp = (void *)b5k_instruction_hazard; 1717 local_flush_data_cache_page = (void *)b5k_instruction_hazard; 1718 flush_data_cache_page = (void *)b5k_instruction_hazard; 1719 flush_icache_range = (void *)b5k_instruction_hazard; 1720 local_flush_icache_range = (void *)b5k_instruction_hazard; 1721 1722 /* Cache aliases are handled in hardware; allow HIGHMEM */ 1723 current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES; 1724 1725 /* Optimization: an L2 flush implicitly flushes the L1 */ 1726 current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; 1727 break; 1728 } 1729 } 1730 1731 static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, 1732 void *v) 1733 { 1734 switch (cmd) { 1735 case CPU_PM_ENTER_FAILED: 1736 case CPU_PM_EXIT: 1737 coherency_setup(); 1738 break; 1739 } 1740 1741 return NOTIFY_OK; 1742 } 1743 1744 static struct notifier_block r4k_cache_pm_notifier_block = { 1745 .notifier_call = r4k_cache_pm_notifier, 1746 }; 1747 1748 int __init r4k_cache_init_pm(void) 1749 { 1750 return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); 1751 } 1752 arch_initcall(r4k_cache_init_pm); 1753