1 /* 2 * arch/xtensa/mm/mmu.c 3 * 4 * Logic that manipulates the Xtensa MMU. Derived from MIPS. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2003 Tensilica Inc. 11 * 12 * Joe Taylor 13 * Chris Zankel <chris@zankel.net> 14 * Marc Gauthier 15 */ 16 17 #include <linux/mm.h> 18 #include <asm/processor.h> 19 #include <asm/mmu_context.h> 20 #include <asm/tlbflush.h> 21 #include <asm/system.h> 22 #include <asm/cacheflush.h> 23 24 25 static inline void __flush_itlb_all (void) 26 { 27 int way, index; 28 29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) { 30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) { 31 int entry = way + (index << PAGE_SHIFT); 32 invalidate_itlb_entry_no_isync (entry); 33 } 34 } 35 asm volatile ("isync\n"); 36 } 37 38 static inline void __flush_dtlb_all (void) 39 { 40 int way, index; 41 42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) { 43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) { 44 int entry = way + (index << PAGE_SHIFT); 45 invalidate_dtlb_entry_no_isync (entry); 46 } 47 } 48 asm volatile ("isync\n"); 49 } 50 51 52 void flush_tlb_all (void) 53 { 54 __flush_itlb_all(); 55 __flush_dtlb_all(); 56 } 57 58 /* If mm is current, we simply assign the current task a new ASID, thus, 59 * invalidating all previous tlb entries. If mm is someone else's user mapping, 60 * wie invalidate the context, thus, when that user mapping is swapped in, 61 * a new context will be assigned to it. 62 */ 63 64 void flush_tlb_mm(struct mm_struct *mm) 65 { 66 #if 0 67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context); 68 #endif 69 70 if (mm == current->active_mm) { 71 int flags; 72 local_save_flags(flags); 73 get_new_mmu_context(mm, asid_cache); 74 set_rasid_register(ASID_INSERT(mm->context)); 75 local_irq_restore(flags); 76 } 77 else 78 mm->context = 0; 79 } 80 81 void flush_tlb_range (struct vm_area_struct *vma, 82 unsigned long start, unsigned long end) 83 { 84 struct mm_struct *mm = vma->vm_mm; 85 unsigned long flags; 86 87 if (mm->context == NO_CONTEXT) 88 return; 89 90 #if 0 91 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", 92 (unsigned long)mm->context, start, end); 93 #endif 94 local_save_flags(flags); 95 96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) { 97 int oldpid = get_rasid_register(); 98 set_rasid_register (ASID_INSERT(mm->context)); 99 start &= PAGE_MASK; 100 if (vma->vm_flags & VM_EXEC) 101 while(start < end) { 102 invalidate_itlb_mapping(start); 103 invalidate_dtlb_mapping(start); 104 start += PAGE_SIZE; 105 } 106 else 107 while(start < end) { 108 invalidate_dtlb_mapping(start); 109 start += PAGE_SIZE; 110 } 111 112 set_rasid_register(oldpid); 113 } else { 114 get_new_mmu_context(mm, asid_cache); 115 if (mm == current->active_mm) 116 set_rasid_register(ASID_INSERT(mm->context)); 117 } 118 local_irq_restore(flags); 119 } 120 121 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) 122 { 123 struct mm_struct* mm = vma->vm_mm; 124 unsigned long flags; 125 int oldpid; 126 #if 0 127 printk("[tlbpage<%02lx,%08lx>]\n", 128 (unsigned long)mm->context, page); 129 #endif 130 131 if(mm->context == NO_CONTEXT) 132 return; 133 134 local_save_flags(flags); 135 136 oldpid = get_rasid_register(); 137 138 if (vma->vm_flags & VM_EXEC) 139 invalidate_itlb_mapping(page); 140 invalidate_dtlb_mapping(page); 141 142 set_rasid_register(oldpid); 143 144 local_irq_restore(flags); 145 146 #if 0 147 flush_tlb_all(); 148 return; 149 #endif 150 } 151 152 153 #ifdef DEBUG_TLB 154 155 #define USE_ITLB 0 156 #define USE_DTLB 1 157 158 struct way_config_t { 159 int indicies; 160 int indicies_log2; 161 int pgsz_log2; 162 int arf; 163 }; 164 165 static struct way_config_t itlb[XCHAL_ITLB_WAYS] = 166 { 167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES), 168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2), 169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN), 170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF) 171 }, 172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES), 173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2), 174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN), 175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF) 176 }, 177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES), 178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2), 179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN), 180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF) 181 }, 182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES), 183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2), 184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN), 185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF) 186 }, 187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES), 188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2), 189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN), 190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF) 191 }, 192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES), 193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2), 194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN), 195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF) 196 }, 197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES), 198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2), 199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN), 200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF) 201 } 202 }; 203 204 static struct way_config_t dtlb[XCHAL_DTLB_WAYS] = 205 { 206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES), 207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2), 208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN), 209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF) 210 }, 211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES), 212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2), 213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN), 214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF) 215 }, 216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES), 217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2), 218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN), 219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF) 220 }, 221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES), 222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2), 223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN), 224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF) 225 }, 226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES), 227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2), 228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN), 229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF) 230 }, 231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES), 232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2), 233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN), 234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF) 235 }, 236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES), 237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2), 238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN), 239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF) 240 }, 241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES), 242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2), 243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN), 244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF) 245 }, 246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES), 247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2), 248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN), 249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF) 250 }, 251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES), 252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2), 253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN), 254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF) 255 } 256 }; 257 258 /* Total number of entries: */ 259 #define ITLB_TOTAL_ENTRIES \ 260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \ 261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \ 262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \ 263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \ 264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \ 265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \ 266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES) 267 #define DTLB_TOTAL_ENTRIES \ 268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \ 269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \ 270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \ 271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \ 272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \ 273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \ 274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \ 275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \ 276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \ 277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES) 278 279 280 typedef struct { 281 unsigned va; 282 unsigned pa; 283 unsigned char asid; 284 unsigned char ca; 285 unsigned char way; 286 unsigned char index; 287 unsigned char pgsz_log2; /* 0 .. 32 */ 288 unsigned char type; /* 0=ITLB 1=DTLB */ 289 } tlb_dump_entry_t; 290 291 /* Return -1 if a precedes b, +1 if a follows b, 0 if same: */ 292 int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b ) 293 { 294 if (a->asid < b->asid) return -1; 295 if (a->asid > b->asid) return 1; 296 if (a->va < b->va) return -1; 297 if (a->va > b->va) return 1; 298 if (a->pa < b->pa) return -1; 299 if (a->pa > b->pa) return 1; 300 if (a->ca < b->ca) return -1; 301 if (a->ca > b->ca) return 1; 302 if (a->way < b->way) return -1; 303 if (a->way > b->way) return 1; 304 if (a->index < b->index) return -1; 305 if (a->index > b->index) return 1; 306 return 0; 307 } 308 309 void sort_tlb_dump_info( tlb_dump_entry_t *t, int n ) 310 { 311 int i, j; 312 /* Simple O(n*n) sort: */ 313 for (i = 0; i < n-1; i++) 314 for (j = i+1; j < n; j++) 315 if (cmp_tlb_dump_info(t+i, t+j) > 0) { 316 tlb_dump_entry_t tmp = t[i]; 317 t[i] = t[j]; 318 t[j] = tmp; 319 } 320 } 321 322 323 static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES]; 324 static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES]; 325 326 327 static inline char *way_type (int type) 328 { 329 return type ? "autorefill" : "non-autorefill"; 330 } 331 332 void print_entry (struct way_config_t *way_info, 333 unsigned int way, 334 unsigned int index, 335 unsigned int virtual, 336 unsigned int translation) 337 { 338 char valid_chr; 339 unsigned int va, pa, asid, ca; 340 341 va = virtual & 342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1); 343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1); 344 pa = translation & ~((1 << way_info->pgsz_log2) - 1); 345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1); 346 valid_chr = asid ? 'V' : 'I'; 347 348 /* Compute and incorporate the effect of the index bits on the 349 * va. It's more useful for kernel debugging, since we always 350 * want to know the effective va anyway. */ 351 352 va += index << way_info->pgsz_log2; 353 354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n", 355 way, index, valid_chr, va, pa, asid, ca); 356 } 357 358 void print_itlb_entry (struct way_config_t *way_info, int way, int index) 359 { 360 print_entry (way_info, way, index, 361 read_itlb_virtual (way + (index << way_info->pgsz_log2)), 362 read_itlb_translation (way + (index << way_info->pgsz_log2))); 363 } 364 365 void print_dtlb_entry (struct way_config_t *way_info, int way, int index) 366 { 367 print_entry (way_info, way, index, 368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)), 369 read_dtlb_translation (way + (index << way_info->pgsz_log2))); 370 } 371 372 void dump_itlb (void) 373 { 374 int way, index; 375 376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS); 377 378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) { 379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n", 380 way, itlb[way].indicies, 381 itlb[way].pgsz_log2, way_type(itlb[way].arf)); 382 for (index = 0; index < itlb[way].indicies; index++) { 383 print_itlb_entry(&itlb[way], way, index); 384 } 385 } 386 } 387 388 void dump_dtlb (void) 389 { 390 int way, index; 391 392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS); 393 394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) { 395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n", 396 way, dtlb[way].indicies, 397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf)); 398 for (index = 0; index < dtlb[way].indicies; index++) { 399 print_dtlb_entry(&dtlb[way], way, index); 400 } 401 } 402 } 403 404 void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config, 405 int entries, int ways, int type, int show_invalid) 406 { 407 tlb_dump_entry_t *e = tinfo; 408 int way, i; 409 410 /* Gather all info: */ 411 for (way = 0; way < ways; way++) { 412 struct way_config_t *cfg = config + way; 413 for (i = 0; i < cfg->indicies; i++) { 414 unsigned wayindex = way + (i << cfg->pgsz_log2); 415 unsigned vv = (type ? read_dtlb_virtual (wayindex) 416 : read_itlb_virtual (wayindex)); 417 unsigned pp = (type ? read_dtlb_translation (wayindex) 418 : read_itlb_translation (wayindex)); 419 420 /* Compute and incorporate the effect of the index bits on the 421 * va. It's more useful for kernel debugging, since we always 422 * want to know the effective va anyway. */ 423 424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1)); 425 e->va += (i << cfg->pgsz_log2); 426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1)); 427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1)); 428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1)); 429 e->way = way; 430 e->index = i; 431 e->pgsz_log2 = cfg->pgsz_log2; 432 e->type = type; 433 e++; 434 } 435 } 436 #if 1 437 /* Sort by ASID and VADDR: */ 438 sort_tlb_dump_info (tinfo, entries); 439 #endif 440 441 /* Display all sorted info: */ 442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I')); 443 for (e = tinfo, i = 0; i < entries; i++, e++) { 444 #if 0 445 if (e->asid == 0 && !show_invalid) 446 continue; 447 #endif 448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n", 449 (e->type ? 'D' : 'I'), e->way, e->index, 450 e->asid, e->va, e->pa, e->ca, 451 (1 << (e->pgsz_log2 % 10)), 452 " kMG"[e->pgsz_log2 / 10] 453 ); 454 } 455 } 456 457 void dump_tlbs2 (int showinv) 458 { 459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv); 460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv); 461 } 462 463 void dump_all_tlbs (void) 464 { 465 dump_tlbs2 (1); 466 } 467 468 void dump_valid_tlbs (void) 469 { 470 dump_tlbs2 (0); 471 } 472 473 474 void dump_tlbs (void) 475 { 476 dump_itlb(); 477 dump_dtlb(); 478 } 479 480 void dump_cache_tag(int dcache, int idx) 481 { 482 int w, i, s, e; 483 unsigned long tag, index; 484 unsigned long num_lines, num_ways, cache_size, line_size; 485 486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS; 487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE; 488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE; 489 490 num_lines = cache_size / num_ways; 491 492 s = 0; e = num_lines; 493 494 if (idx >= 0) 495 e = (s = idx * line_size) + 1; 496 497 for (i = s; i < e; i+= line_size) { 498 printk("\nline %#08x:", i); 499 for (w = 0; w < num_ways; w++) { 500 index = w * num_lines + i; 501 if (dcache) 502 __asm__ __volatile__("ldct %0, %1\n\t" 503 : "=a"(tag) : "a"(index)); 504 else 505 __asm__ __volatile__("lict %0, %1\n\t" 506 : "=a"(tag) : "a"(index)); 507 508 printk(" %#010lx", tag); 509 } 510 } 511 printk ("\n"); 512 } 513 514 void dump_icache(int index) 515 { 516 unsigned long data, addr; 517 int w, i; 518 519 const unsigned long num_ways = XCHAL_ICACHE_WAYS; 520 const unsigned long cache_size = XCHAL_ICACHE_SIZE; 521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE; 522 const unsigned long num_lines = cache_size / num_ways / line_size; 523 524 for (w = 0; w < num_ways; w++) { 525 printk ("\nWay %d", w); 526 527 for (i = 0; i < line_size; i+= 4) { 528 addr = w * num_lines + index * line_size + i; 529 __asm__ __volatile__("licw %0, %1\n\t" 530 : "=a"(data) : "a"(addr)); 531 printk(" %#010lx", data); 532 } 533 } 534 printk ("\n"); 535 } 536 537 void dump_cache_tags(void) 538 { 539 printk("Instruction cache\n"); 540 dump_cache_tag(0, -1); 541 printk("Data cache\n"); 542 dump_cache_tag(1, -1); 543 } 544 545 #endif 546