1 /* 2 * Microblaze MMU emulation for qemu. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 25 static unsigned int tlb_decode_size(unsigned int f) 26 { 27 static const unsigned int sizes[] = { 28 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, 29 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024 30 }; 31 assert(f < ARRAY_SIZE(sizes)); 32 return sizes[f]; 33 } 34 35 static void mmu_flush_idx(CPUMBState *env, unsigned int idx) 36 { 37 CPUState *cs = CPU(mb_env_get_cpu(env)); 38 struct microblaze_mmu *mmu = &env->mmu; 39 unsigned int tlb_size; 40 uint32_t tlb_tag, end, t; 41 42 t = mmu->rams[RAM_TAG][idx]; 43 if (!(t & TLB_VALID)) 44 return; 45 46 tlb_tag = t & TLB_EPN_MASK; 47 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); 48 end = tlb_tag + tlb_size; 49 50 while (tlb_tag < end) { 51 tlb_flush_page(cs, tlb_tag); 52 tlb_tag += TARGET_PAGE_SIZE; 53 } 54 } 55 56 static void mmu_change_pid(CPUMBState *env, unsigned int newpid) 57 { 58 struct microblaze_mmu *mmu = &env->mmu; 59 unsigned int i; 60 uint32_t t; 61 62 if (newpid & ~0xff) 63 qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid); 64 65 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { 66 /* Lookup and decode. */ 67 t = mmu->rams[RAM_TAG][i]; 68 if (t & TLB_VALID) { 69 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i])) 70 mmu_flush_idx(env, i); 71 } 72 } 73 } 74 75 /* rw - 0 = read, 1 = write, 2 = fetch. */ 76 unsigned int mmu_translate(struct microblaze_mmu *mmu, 77 struct microblaze_mmu_lookup *lu, 78 target_ulong vaddr, int rw, int mmu_idx) 79 { 80 unsigned int i, hit = 0; 81 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel; 82 uint64_t tlb_tag, tlb_rpn, mask; 83 uint32_t tlb_size, t0; 84 85 lu->err = ERR_MISS; 86 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { 87 uint64_t t, d; 88 89 /* Lookup and decode. */ 90 t = mmu->rams[RAM_TAG][i]; 91 if (t & TLB_VALID) { 92 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); 93 if (tlb_size < TARGET_PAGE_SIZE) { 94 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size); 95 abort(); 96 } 97 98 mask = ~((uint64_t)tlb_size - 1); 99 tlb_tag = t & TLB_EPN_MASK; 100 if ((vaddr & mask) != (tlb_tag & mask)) { 101 continue; 102 } 103 if (mmu->tids[i] 104 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) { 105 continue; 106 } 107 108 /* Bring in the data part. */ 109 d = mmu->rams[RAM_DATA][i]; 110 tlb_ex = d & TLB_EX; 111 tlb_wr = d & TLB_WR; 112 113 /* Now let's see if there is a zone that overrides the protbits. */ 114 tlb_zsel = (d >> 4) & 0xf; 115 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2)); 116 t0 &= 0x3; 117 118 if (tlb_zsel > mmu->c_mmu_zones) { 119 qemu_log_mask(LOG_GUEST_ERROR, 120 "tlb zone select out of range! %d\n", tlb_zsel); 121 t0 = 1; /* Ignore. */ 122 } 123 124 if (mmu->c_mmu == 1) { 125 t0 = 1; /* Zones are disabled. */ 126 } 127 128 switch (t0) { 129 case 0: 130 if (mmu_idx == MMU_USER_IDX) 131 continue; 132 break; 133 case 2: 134 if (mmu_idx != MMU_USER_IDX) { 135 tlb_ex = 1; 136 tlb_wr = 1; 137 } 138 break; 139 case 3: 140 tlb_ex = 1; 141 tlb_wr = 1; 142 break; 143 default: break; 144 } 145 146 lu->err = ERR_PROT; 147 lu->prot = PAGE_READ; 148 if (tlb_wr) 149 lu->prot |= PAGE_WRITE; 150 else if (rw == 1) 151 goto done; 152 if (tlb_ex) 153 lu->prot |=PAGE_EXEC; 154 else if (rw == 2) { 155 goto done; 156 } 157 158 tlb_rpn = d & TLB_RPN_MASK; 159 160 lu->vaddr = tlb_tag; 161 lu->paddr = tlb_rpn & mmu->c_addr_mask; 162 lu->size = tlb_size; 163 lu->err = ERR_HIT; 164 lu->idx = i; 165 hit = 1; 166 goto done; 167 } 168 } 169 done: 170 qemu_log_mask(CPU_LOG_MMU, 171 "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", 172 vaddr, rw, tlb_wr, tlb_ex, hit); 173 return hit; 174 } 175 176 /* Writes/reads to the MMU's special regs end up here. */ 177 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn) 178 { 179 unsigned int i; 180 uint32_t r = 0; 181 182 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { 183 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); 184 return 0; 185 } 186 if (ext && rn != MMU_R_TLBLO) { 187 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n"); 188 return 0; 189 } 190 191 switch (rn) { 192 /* Reads to HI/LO trig reads from the mmu rams. */ 193 case MMU_R_TLBLO: 194 case MMU_R_TLBHI: 195 if (!(env->mmu.c_mmu_tlb_access & 1)) { 196 qemu_log_mask(LOG_GUEST_ERROR, 197 "Invalid access to MMU reg %d\n", rn); 198 return 0; 199 } 200 201 i = env->mmu.regs[MMU_R_TLBX] & 0xff; 202 r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32); 203 if (rn == MMU_R_TLBHI) 204 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i]; 205 break; 206 case MMU_R_PID: 207 case MMU_R_ZPR: 208 if (!(env->mmu.c_mmu_tlb_access & 1)) { 209 qemu_log_mask(LOG_GUEST_ERROR, 210 "Invalid access to MMU reg %d\n", rn); 211 return 0; 212 } 213 r = env->mmu.regs[rn]; 214 break; 215 case MMU_R_TLBX: 216 r = env->mmu.regs[rn]; 217 break; 218 case MMU_R_TLBSX: 219 qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n"); 220 break; 221 default: 222 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); 223 break; 224 } 225 qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r); 226 return r; 227 } 228 229 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v) 230 { 231 MicroBlazeCPU *cpu = mb_env_get_cpu(env); 232 uint64_t tmp64; 233 unsigned int i; 234 qemu_log_mask(CPU_LOG_MMU, 235 "%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]); 236 237 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) { 238 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); 239 return; 240 } 241 if (ext && rn != MMU_R_TLBLO) { 242 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n"); 243 return; 244 } 245 246 switch (rn) { 247 /* Writes to HI/LO trig writes to the mmu rams. */ 248 case MMU_R_TLBLO: 249 case MMU_R_TLBHI: 250 i = env->mmu.regs[MMU_R_TLBX] & 0xff; 251 if (rn == MMU_R_TLBHI) { 252 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0)) 253 qemu_log_mask(LOG_GUEST_ERROR, 254 "invalidating index %x at pc=%" PRIx64 "\n", 255 i, env->sregs[SR_PC]); 256 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff; 257 mmu_flush_idx(env, i); 258 } 259 tmp64 = env->mmu.rams[rn & 1][i]; 260 env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v); 261 break; 262 case MMU_R_ZPR: 263 if (env->mmu.c_mmu_tlb_access <= 1) { 264 qemu_log_mask(LOG_GUEST_ERROR, 265 "Invalid access to MMU reg %d\n", rn); 266 return; 267 } 268 269 /* Changes to the zone protection reg flush the QEMU TLB. 270 Fortunately, these are very uncommon. */ 271 if (v != env->mmu.regs[rn]) { 272 tlb_flush(CPU(cpu)); 273 } 274 env->mmu.regs[rn] = v; 275 break; 276 case MMU_R_PID: 277 if (env->mmu.c_mmu_tlb_access <= 1) { 278 qemu_log_mask(LOG_GUEST_ERROR, 279 "Invalid access to MMU reg %d\n", rn); 280 return; 281 } 282 283 if (v != env->mmu.regs[rn]) { 284 mmu_change_pid(env, v); 285 env->mmu.regs[rn] = v; 286 } 287 break; 288 case MMU_R_TLBX: 289 /* Bit 31 is read-only. */ 290 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v); 291 break; 292 case MMU_R_TLBSX: 293 { 294 struct microblaze_mmu_lookup lu; 295 int hit; 296 297 if (env->mmu.c_mmu_tlb_access <= 1) { 298 qemu_log_mask(LOG_GUEST_ERROR, 299 "Invalid access to MMU reg %d\n", rn); 300 return; 301 } 302 303 hit = mmu_translate(&env->mmu, &lu, 304 v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false)); 305 if (hit) { 306 env->mmu.regs[MMU_R_TLBX] = lu.idx; 307 } else { 308 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK; 309 } 310 break; 311 } 312 default: 313 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); 314 break; 315 } 316 } 317 318 void mmu_init(struct microblaze_mmu *mmu) 319 { 320 int i; 321 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) { 322 mmu->regs[i] = 0; 323 } 324 } 325