1 /* 2 * MicroBlaze helper routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "qemu/host-utils.h" 25 #include "exec/log.h" 26 27 #if defined(CONFIG_USER_ONLY) 28 29 void mb_cpu_do_interrupt(CPUState *cs) 30 { 31 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 32 CPUMBState *env = &cpu->env; 33 34 cs->exception_index = -1; 35 env->res_addr = RES_ADDR_NONE; 36 env->regs[14] = env->pc; 37 } 38 39 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 40 MMUAccessType access_type, int mmu_idx, 41 bool probe, uintptr_t retaddr) 42 { 43 cs->exception_index = 0xaa; 44 cpu_loop_exit_restore(cs, retaddr); 45 } 46 47 #else /* !CONFIG_USER_ONLY */ 48 49 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 50 MMUAccessType access_type, int mmu_idx, 51 bool probe, uintptr_t retaddr) 52 { 53 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 54 CPUMBState *env = &cpu->env; 55 struct microblaze_mmu_lookup lu; 56 unsigned int hit; 57 int prot; 58 59 if (mmu_idx == MMU_NOMMU_IDX) { 60 /* MMU disabled or not available. */ 61 address &= TARGET_PAGE_MASK; 62 prot = PAGE_BITS; 63 tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); 64 return true; 65 } 66 67 hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx); 68 if (likely(hit)) { 69 uint32_t vaddr = address & TARGET_PAGE_MASK; 70 uint32_t paddr = lu.paddr + vaddr - lu.vaddr; 71 72 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n", 73 mmu_idx, vaddr, paddr, lu.prot); 74 tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE); 75 return true; 76 } 77 78 /* TLB miss. */ 79 if (probe) { 80 return false; 81 } 82 83 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n", 84 mmu_idx, address); 85 86 env->ear = address; 87 switch (lu.err) { 88 case ERR_PROT: 89 env->esr = access_type == MMU_INST_FETCH ? 17 : 16; 90 env->esr |= (access_type == MMU_DATA_STORE) << 10; 91 break; 92 case ERR_MISS: 93 env->esr = access_type == MMU_INST_FETCH ? 19 : 18; 94 env->esr |= (access_type == MMU_DATA_STORE) << 10; 95 break; 96 default: 97 abort(); 98 } 99 100 if (cs->exception_index == EXCP_MMU) { 101 cpu_abort(cs, "recursive faults\n"); 102 } 103 104 /* TLB miss. */ 105 cs->exception_index = EXCP_MMU; 106 cpu_loop_exit_restore(cs, retaddr); 107 } 108 109 void mb_cpu_do_interrupt(CPUState *cs) 110 { 111 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 112 CPUMBState *env = &cpu->env; 113 uint32_t t, msr = mb_cpu_read_msr(env); 114 115 /* IMM flag cannot propagate across a branch and into the dslot. */ 116 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG)); 117 /* BIMM flag cannot be set without D_FLAG. */ 118 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG); 119 /* RTI flags are private to translate. */ 120 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); 121 env->res_addr = RES_ADDR_NONE; 122 switch (cs->exception_index) { 123 case EXCP_HW_EXCP: 124 if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) { 125 qemu_log_mask(LOG_GUEST_ERROR, "Exception raised on system without exceptions!\n"); 126 return; 127 } 128 129 env->regs[17] = env->pc + 4; 130 env->esr &= ~(1 << 12); 131 132 /* Exception breaks branch + dslot sequence? */ 133 if (env->iflags & D_FLAG) { 134 env->esr |= 1 << 12 ; 135 env->btr = env->btarget; 136 } 137 138 /* Disable the MMU. */ 139 t = (msr & (MSR_VM | MSR_UM)) << 1; 140 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 141 msr |= t; 142 /* Exception in progress. */ 143 msr |= MSR_EIP; 144 mb_cpu_write_msr(env, msr); 145 146 qemu_log_mask(CPU_LOG_INT, 147 "hw exception at pc=%x ear=%" PRIx64 " " 148 "esr=%x iflags=%x\n", 149 env->pc, env->ear, 150 env->esr, env->iflags); 151 log_cpu_state_mask(CPU_LOG_INT, cs, 0); 152 env->iflags = 0; 153 env->pc = cpu->cfg.base_vectors + 0x20; 154 break; 155 156 case EXCP_MMU: 157 env->regs[17] = env->pc; 158 159 qemu_log_mask(CPU_LOG_INT, 160 "MMU exception at pc=%x iflags=%x ear=%" PRIx64 "\n", 161 env->pc, env->iflags, env->ear); 162 163 env->esr &= ~(1 << 12); 164 /* Exception breaks branch + dslot sequence? */ 165 if (env->iflags & D_FLAG) { 166 env->esr |= 1 << 12 ; 167 env->btr = env->btarget; 168 169 /* Reexecute the branch. */ 170 env->regs[17] -= 4; 171 /* was the branch immprefixed?. */ 172 if (env->iflags & BIMM_FLAG) { 173 env->regs[17] -= 4; 174 log_cpu_state_mask(CPU_LOG_INT, cs, 0); 175 } 176 } else if (env->iflags & IMM_FLAG) { 177 env->regs[17] -= 4; 178 } 179 180 /* Disable the MMU. */ 181 t = (msr & (MSR_VM | MSR_UM)) << 1; 182 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 183 msr |= t; 184 /* Exception in progress. */ 185 msr |= MSR_EIP; 186 mb_cpu_write_msr(env, msr); 187 188 qemu_log_mask(CPU_LOG_INT, 189 "exception at pc=%x ear=%" PRIx64 " iflags=%x\n", 190 env->pc, env->ear, env->iflags); 191 log_cpu_state_mask(CPU_LOG_INT, cs, 0); 192 env->iflags = 0; 193 env->pc = cpu->cfg.base_vectors + 0x20; 194 break; 195 196 case EXCP_IRQ: 197 assert(!(msr & (MSR_EIP | MSR_BIP))); 198 assert(msr & MSR_IE); 199 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 200 201 t = (msr & (MSR_VM | MSR_UM)) << 1; 202 203 #if 0 204 #include "disas/disas.h" 205 206 /* Useful instrumentation when debugging interrupt issues in either 207 the models or in sw. */ 208 { 209 const char *sym; 210 211 sym = lookup_symbol(env->pc); 212 if (sym 213 && (!strcmp("netif_rx", sym) 214 || !strcmp("process_backlog", sym))) { 215 216 qemu_log("interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n", 217 env->pc, msr, t, env->iflags, sym); 218 219 log_cpu_state(cs, 0); 220 } 221 } 222 #endif 223 qemu_log_mask(CPU_LOG_INT, 224 "interrupt at pc=%x msr=%x %x iflags=%x\n", 225 env->pc, msr, t, env->iflags); 226 227 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM | MSR_IE); 228 msr |= t; 229 mb_cpu_write_msr(env, msr); 230 231 env->regs[14] = env->pc; 232 env->iflags = 0; 233 env->pc = cpu->cfg.base_vectors + 0x10; 234 //log_cpu_state_mask(CPU_LOG_INT, cs, 0); 235 break; 236 237 case EXCP_HW_BREAK: 238 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 239 240 t = (msr & (MSR_VM | MSR_UM)) << 1; 241 qemu_log_mask(CPU_LOG_INT, 242 "break at pc=%x msr=%x %x iflags=%x\n", 243 env->pc, msr, t, env->iflags); 244 log_cpu_state_mask(CPU_LOG_INT, cs, 0); 245 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 246 msr |= t; 247 msr |= MSR_BIP; 248 env->regs[16] = env->pc; 249 env->iflags = 0; 250 env->pc = cpu->cfg.base_vectors + 0x18; 251 mb_cpu_write_msr(env, msr); 252 break; 253 default: 254 cpu_abort(cs, "unhandled exception type=%d\n", 255 cs->exception_index); 256 break; 257 } 258 } 259 260 hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 261 { 262 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 263 CPUMBState *env = &cpu->env; 264 target_ulong vaddr, paddr = 0; 265 struct microblaze_mmu_lookup lu; 266 int mmu_idx = cpu_mmu_index(env, false); 267 unsigned int hit; 268 269 if (mmu_idx != MMU_NOMMU_IDX) { 270 hit = mmu_translate(&env->mmu, &lu, addr, 0, 0); 271 if (hit) { 272 vaddr = addr & TARGET_PAGE_MASK; 273 paddr = lu.paddr + vaddr - lu.vaddr; 274 } else 275 paddr = 0; /* ???. */ 276 } else 277 paddr = addr & TARGET_PAGE_MASK; 278 279 return paddr; 280 } 281 #endif 282 283 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 284 { 285 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 286 CPUMBState *env = &cpu->env; 287 288 if ((interrupt_request & CPU_INTERRUPT_HARD) 289 && (env->msr & MSR_IE) 290 && !(env->msr & (MSR_EIP | MSR_BIP)) 291 && !(env->iflags & (D_FLAG | IMM_FLAG))) { 292 cs->exception_index = EXCP_IRQ; 293 mb_cpu_do_interrupt(cs); 294 return true; 295 } 296 return false; 297 } 298 299 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 300 MMUAccessType access_type, 301 int mmu_idx, uintptr_t retaddr) 302 { 303 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 304 uint32_t esr, iflags; 305 306 /* Recover the pc and iflags from the corresponding insn_start. */ 307 cpu_restore_state(cs, retaddr, true); 308 iflags = cpu->env.iflags; 309 310 qemu_log_mask(CPU_LOG_INT, 311 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n", 312 (target_ulong)addr, cpu->env.pc, iflags); 313 314 esr = ESR_EC_UNALIGNED_DATA; 315 if (likely(iflags & ESR_ESS_FLAG)) { 316 esr |= iflags & ESR_ESS_MASK; 317 } else { 318 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); 319 } 320 321 cpu->env.ear = addr; 322 cpu->env.esr = esr; 323 cs->exception_index = EXCP_HW_EXCP; 324 cpu_loop_exit(cs); 325 } 326