1 /* 2 * Dump R4x00 TLB for debugging purposes. 3 * 4 * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. 5 * Copyright (C) 1999 by Silicon Graphics, Inc. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/mm.h> 9 10 #include <asm/hazards.h> 11 #include <asm/mipsregs.h> 12 #include <asm/page.h> 13 #include <asm/pgtable.h> 14 #include <asm/tlbdebug.h> 15 16 void dump_tlb_regs(void) 17 { 18 const int field = 2 * sizeof(unsigned long); 19 20 pr_info("Index : %0x\n", read_c0_index()); 21 pr_info("PageMask : %0x\n", read_c0_pagemask()); 22 if (cpu_has_guestid) 23 pr_info("GuestCtl1: %0x\n", read_c0_guestctl1()); 24 pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi()); 25 pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0()); 26 pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1()); 27 pr_info("Wired : %0x\n", read_c0_wired()); 28 switch (current_cpu_type()) { 29 case CPU_R10000: 30 case CPU_R12000: 31 case CPU_R14000: 32 case CPU_R16000: 33 pr_info("FrameMask: %0x\n", read_c0_framemask()); 34 break; 35 } 36 if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa) 37 pr_info("PageGrain: %0x\n", read_c0_pagegrain()); 38 if (cpu_has_htw) { 39 pr_info("PWField : %0*lx\n", field, read_c0_pwfield()); 40 pr_info("PWSize : %0*lx\n", field, read_c0_pwsize()); 41 pr_info("PWCtl : %0x\n", read_c0_pwctl()); 42 } 43 } 44 45 static inline const char *msk2str(unsigned int mask) 46 { 47 switch (mask) { 48 case PM_4K: return "4kb"; 49 case PM_16K: return "16kb"; 50 case PM_64K: return "64kb"; 51 case PM_256K: return "256kb"; 52 #ifdef CONFIG_CPU_CAVIUM_OCTEON 53 case PM_8K: return "8kb"; 54 case PM_32K: return "32kb"; 55 case PM_128K: return "128kb"; 56 case PM_512K: return "512kb"; 57 case PM_2M: return "2Mb"; 58 case PM_8M: return "8Mb"; 59 case PM_32M: return "32Mb"; 60 #endif 61 #ifndef CONFIG_CPU_VR41XX 62 case PM_1M: return "1Mb"; 63 case PM_4M: return "4Mb"; 64 case PM_16M: return "16Mb"; 65 case PM_64M: return "64Mb"; 66 case PM_256M: return "256Mb"; 67 case PM_1G: return "1Gb"; 68 #endif 69 } 70 return ""; 71 } 72 73 static void dump_tlb(int first, int last) 74 { 75 unsigned long s_entryhi, entryhi, asid; 76 unsigned long long entrylo0, entrylo1, pa; 77 unsigned int s_index, s_pagemask, s_guestctl1 = 0; 78 unsigned int pagemask, guestctl1 = 0, c0, c1, i; 79 unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); 80 int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); 81 #ifdef CONFIG_32BIT 82 bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); 83 int pwidth = xpa ? 11 : 8; 84 int vwidth = 8; 85 #else 86 bool xpa = false; 87 int pwidth = 11; 88 int vwidth = 11; 89 #endif 90 91 s_pagemask = read_c0_pagemask(); 92 s_entryhi = read_c0_entryhi(); 93 s_index = read_c0_index(); 94 asid = s_entryhi & asidmask; 95 if (cpu_has_guestid) 96 s_guestctl1 = read_c0_guestctl1(); 97 98 for (i = first; i <= last; i++) { 99 write_c0_index(i); 100 mtc0_tlbr_hazard(); 101 tlb_read(); 102 tlb_read_hazard(); 103 pagemask = read_c0_pagemask(); 104 entryhi = read_c0_entryhi(); 105 entrylo0 = read_c0_entrylo0(); 106 entrylo1 = read_c0_entrylo1(); 107 if (cpu_has_guestid) 108 guestctl1 = read_c0_guestctl1(); 109 110 /* EHINV bit marks entire entry as invalid */ 111 if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV) 112 continue; 113 /* 114 * Prior to tlbinv, unused entries have a virtual address of 115 * CKSEG0. 116 */ 117 if ((entryhi & ~0x1ffffUL) == CKSEG0) 118 continue; 119 /* 120 * ASID takes effect in absence of G (global) bit. 121 * We check both G bits, even though architecturally they should 122 * match one another, because some revisions of the SB1 core may 123 * leave only a single G bit set after a machine check exception 124 * due to duplicate TLB entry. 125 */ 126 if (!((entrylo0 | entrylo1) & ENTRYLO_G) && 127 (entryhi & asidmask) != asid) 128 continue; 129 130 /* 131 * Only print entries in use 132 */ 133 printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); 134 135 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 136 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 137 138 pr_cont("va=%0*lx asid=%0*lx", 139 vwidth, (entryhi & ~0x1fffUL), 140 asidwidth, entryhi & asidmask); 141 if (cpu_has_guestid) 142 pr_cont(" gid=%02lx", 143 (guestctl1 & MIPS_GCTL1_RID) 144 >> MIPS_GCTL1_RID_SHIFT); 145 /* RI/XI are in awkward places, so mask them off separately */ 146 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 147 if (xpa) 148 pa |= (unsigned long long)readx_c0_entrylo0() << 30; 149 pa = (pa << 6) & PAGE_MASK; 150 pr_cont("\n\t["); 151 if (cpu_has_rixi) 152 pr_cont("ri=%d xi=%d ", 153 (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, 154 (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); 155 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [", 156 pwidth, pa, c0, 157 (entrylo0 & ENTRYLO_D) ? 1 : 0, 158 (entrylo0 & ENTRYLO_V) ? 1 : 0, 159 (entrylo0 & ENTRYLO_G) ? 1 : 0); 160 /* RI/XI are in awkward places, so mask them off separately */ 161 pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 162 if (xpa) 163 pa |= (unsigned long long)readx_c0_entrylo1() << 30; 164 pa = (pa << 6) & PAGE_MASK; 165 if (cpu_has_rixi) 166 pr_cont("ri=%d xi=%d ", 167 (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, 168 (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); 169 pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n", 170 pwidth, pa, c1, 171 (entrylo1 & ENTRYLO_D) ? 1 : 0, 172 (entrylo1 & ENTRYLO_V) ? 1 : 0, 173 (entrylo1 & ENTRYLO_G) ? 1 : 0); 174 } 175 printk("\n"); 176 177 write_c0_entryhi(s_entryhi); 178 write_c0_index(s_index); 179 write_c0_pagemask(s_pagemask); 180 if (cpu_has_guestid) 181 write_c0_guestctl1(s_guestctl1); 182 } 183 184 void dump_tlb_all(void) 185 { 186 dump_tlb(0, current_cpu_data.tlbsize - 1); 187 } 188