1 /* 2 * MicroBlaze helper routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "qemu/host-utils.h" 25 #include "exec/log.h" 26 27 #ifndef CONFIG_USER_ONLY 28 static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, 29 MMUAccessType access_type) 30 { 31 if (access_type == MMU_INST_FETCH) { 32 return !cpu->ns_axi_ip; 33 } else { 34 return !cpu->ns_axi_dp; 35 } 36 } 37 38 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 39 MMUAccessType access_type, int mmu_idx, 40 bool probe, uintptr_t retaddr) 41 { 42 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 43 CPUMBState *env = &cpu->env; 44 MicroBlazeMMULookup lu; 45 unsigned int hit; 46 int prot; 47 MemTxAttrs attrs = {}; 48 49 attrs.secure = mb_cpu_access_is_secure(cpu, access_type); 50 51 if (mmu_idx == MMU_NOMMU_IDX) { 52 /* MMU disabled or not available. */ 53 address &= TARGET_PAGE_MASK; 54 prot = PAGE_RWX; 55 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx, 56 TARGET_PAGE_SIZE); 57 return true; 58 } 59 60 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx); 61 if (likely(hit)) { 62 uint32_t vaddr = address & TARGET_PAGE_MASK; 63 uint32_t paddr = lu.paddr + vaddr - lu.vaddr; 64 65 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n", 66 mmu_idx, vaddr, paddr, lu.prot); 67 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx, 68 TARGET_PAGE_SIZE); 69 return true; 70 } 71 72 /* TLB miss. */ 73 if (probe) { 74 return false; 75 } 76 77 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n", 78 mmu_idx, address); 79 80 env->ear = address; 81 switch (lu.err) { 82 case ERR_PROT: 83 env->esr = access_type == MMU_INST_FETCH ? 17 : 16; 84 env->esr |= (access_type == MMU_DATA_STORE) << 10; 85 break; 86 case ERR_MISS: 87 env->esr = access_type == MMU_INST_FETCH ? 19 : 18; 88 env->esr |= (access_type == MMU_DATA_STORE) << 10; 89 break; 90 default: 91 abort(); 92 } 93 94 if (cs->exception_index == EXCP_MMU) { 95 cpu_abort(cs, "recursive faults\n"); 96 } 97 98 /* TLB miss. */ 99 cs->exception_index = EXCP_MMU; 100 cpu_loop_exit_restore(cs, retaddr); 101 } 102 103 void mb_cpu_do_interrupt(CPUState *cs) 104 { 105 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 106 CPUMBState *env = &cpu->env; 107 uint32_t t, msr = mb_cpu_read_msr(env); 108 bool set_esr; 109 110 /* IMM flag cannot propagate across a branch and into the dslot. */ 111 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG)); 112 /* BIMM flag cannot be set without D_FLAG. */ 113 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG); 114 /* RTI flags are private to translate. */ 115 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); 116 117 switch (cs->exception_index) { 118 case EXCP_HW_EXCP: 119 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) { 120 qemu_log_mask(LOG_GUEST_ERROR, 121 "Exception raised on system without exceptions!\n"); 122 return; 123 } 124 125 qemu_log_mask(CPU_LOG_INT, 126 "INT: HWE at pc=%08x msr=%08x iflags=%x\n", 127 env->pc, msr, env->iflags); 128 129 /* Exception breaks branch + dslot sequence? */ 130 set_esr = true; 131 env->esr &= ~D_FLAG; 132 if (env->iflags & D_FLAG) { 133 env->esr |= D_FLAG; 134 env->btr = env->btarget; 135 } 136 137 /* Exception in progress. */ 138 msr |= MSR_EIP; 139 env->regs[17] = env->pc + 4; 140 env->pc = cpu->cfg.base_vectors + 0x20; 141 break; 142 143 case EXCP_MMU: 144 qemu_log_mask(CPU_LOG_INT, 145 "INT: MMU at pc=%08x msr=%08x " 146 "ear=%" PRIx64 " iflags=%x\n", 147 env->pc, msr, env->ear, env->iflags); 148 149 /* Exception breaks branch + dslot sequence? */ 150 set_esr = true; 151 env->esr &= ~D_FLAG; 152 if (env->iflags & D_FLAG) { 153 env->esr |= D_FLAG; 154 env->btr = env->btarget; 155 /* Reexecute the branch. */ 156 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4); 157 } else if (env->iflags & IMM_FLAG) { 158 /* Reexecute the imm. */ 159 env->regs[17] = env->pc - 4; 160 } else { 161 env->regs[17] = env->pc; 162 } 163 164 /* Exception in progress. */ 165 msr |= MSR_EIP; 166 env->pc = cpu->cfg.base_vectors + 0x20; 167 break; 168 169 case EXCP_IRQ: 170 assert(!(msr & (MSR_EIP | MSR_BIP))); 171 assert(msr & MSR_IE); 172 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 173 174 qemu_log_mask(CPU_LOG_INT, 175 "INT: DEV at pc=%08x msr=%08x iflags=%x\n", 176 env->pc, msr, env->iflags); 177 set_esr = false; 178 179 /* Disable interrupts. */ 180 msr &= ~MSR_IE; 181 env->regs[14] = env->pc; 182 env->pc = cpu->cfg.base_vectors + 0x10; 183 break; 184 185 case EXCP_HW_BREAK: 186 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 187 188 qemu_log_mask(CPU_LOG_INT, 189 "INT: BRK at pc=%08x msr=%08x iflags=%x\n", 190 env->pc, msr, env->iflags); 191 set_esr = false; 192 193 /* Break in progress. */ 194 msr |= MSR_BIP; 195 env->regs[16] = env->pc; 196 env->pc = cpu->cfg.base_vectors + 0x18; 197 break; 198 199 default: 200 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index); 201 /* not reached */ 202 } 203 204 /* Save previous mode, disable mmu, disable user-mode. */ 205 t = (msr & (MSR_VM | MSR_UM)) << 1; 206 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 207 msr |= t; 208 mb_cpu_write_msr(env, msr); 209 210 env->res_addr = RES_ADDR_NONE; 211 env->iflags = 0; 212 213 if (!set_esr) { 214 qemu_log_mask(CPU_LOG_INT, 215 " to pc=%08x msr=%08x\n", env->pc, msr); 216 } else if (env->esr & D_FLAG) { 217 qemu_log_mask(CPU_LOG_INT, 218 " to pc=%08x msr=%08x esr=%04x btr=%08x\n", 219 env->pc, msr, env->esr, env->btr); 220 } else { 221 qemu_log_mask(CPU_LOG_INT, 222 " to pc=%08x msr=%08x esr=%04x\n", 223 env->pc, msr, env->esr); 224 } 225 } 226 227 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 228 MemTxAttrs *attrs) 229 { 230 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 231 target_ulong vaddr, paddr = 0; 232 MicroBlazeMMULookup lu; 233 int mmu_idx = cpu_mmu_index(cs, false); 234 unsigned int hit; 235 236 /* Caller doesn't initialize */ 237 *attrs = (MemTxAttrs) {}; 238 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD); 239 240 if (mmu_idx != MMU_NOMMU_IDX) { 241 hit = mmu_translate(cpu, &lu, addr, 0, 0); 242 if (hit) { 243 vaddr = addr & TARGET_PAGE_MASK; 244 paddr = lu.paddr + vaddr - lu.vaddr; 245 } else 246 paddr = 0; /* ???. */ 247 } else 248 paddr = addr & TARGET_PAGE_MASK; 249 250 return paddr; 251 } 252 253 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 254 { 255 CPUMBState *env = cpu_env(cs); 256 257 if ((interrupt_request & CPU_INTERRUPT_HARD) 258 && (env->msr & MSR_IE) 259 && !(env->msr & (MSR_EIP | MSR_BIP)) 260 && !(env->iflags & (D_FLAG | IMM_FLAG))) { 261 cs->exception_index = EXCP_IRQ; 262 mb_cpu_do_interrupt(cs); 263 return true; 264 } 265 return false; 266 } 267 268 #endif /* !CONFIG_USER_ONLY */ 269 270 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 271 MMUAccessType access_type, 272 int mmu_idx, uintptr_t retaddr) 273 { 274 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 275 uint32_t esr, iflags; 276 277 /* Recover the pc and iflags from the corresponding insn_start. */ 278 cpu_restore_state(cs, retaddr); 279 iflags = cpu->env.iflags; 280 281 qemu_log_mask(CPU_LOG_INT, 282 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n", 283 (target_ulong)addr, cpu->env.pc, iflags); 284 285 esr = ESR_EC_UNALIGNED_DATA; 286 if (likely(iflags & ESR_ESS_FLAG)) { 287 esr |= iflags & ESR_ESS_MASK; 288 } else { 289 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); 290 } 291 292 cpu->env.ear = addr; 293 cpu->env.esr = esr; 294 cs->exception_index = EXCP_HW_EXCP; 295 cpu_loop_exit(cs); 296 } 297