1 /* 2 * MicroBlaze helper routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com> 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "qemu/host-utils.h" 26 #include "exec/log.h" 27 28 #ifndef CONFIG_USER_ONLY 29 static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, 30 MMUAccessType access_type) 31 { 32 if (access_type == MMU_INST_FETCH) { 33 return !cpu->ns_axi_ip; 34 } else { 35 return !cpu->ns_axi_dp; 36 } 37 } 38 39 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 40 MMUAccessType access_type, int mmu_idx, 41 bool probe, uintptr_t retaddr) 42 { 43 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 44 CPUMBState *env = &cpu->env; 45 MicroBlazeMMULookup lu; 46 unsigned int hit; 47 int prot; 48 MemTxAttrs attrs = {}; 49 50 attrs.secure = mb_cpu_access_is_secure(cpu, access_type); 51 52 if (mmu_idx == MMU_NOMMU_IDX) { 53 /* MMU disabled or not available. */ 54 address &= TARGET_PAGE_MASK; 55 prot = PAGE_RWX; 56 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx, 57 TARGET_PAGE_SIZE); 58 return true; 59 } 60 61 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx); 62 if (likely(hit)) { 63 uint32_t vaddr = address & TARGET_PAGE_MASK; 64 uint32_t paddr = lu.paddr + vaddr - lu.vaddr; 65 66 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n", 67 mmu_idx, vaddr, paddr, lu.prot); 68 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx, 69 TARGET_PAGE_SIZE); 70 return true; 71 } 72 73 /* TLB miss. */ 74 if (probe) { 75 return false; 76 } 77 78 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n", 79 mmu_idx, address); 80 81 env->ear = address; 82 switch (lu.err) { 83 case ERR_PROT: 84 env->esr = access_type == MMU_INST_FETCH ? 17 : 16; 85 env->esr |= (access_type == MMU_DATA_STORE) << 10; 86 break; 87 case ERR_MISS: 88 env->esr = access_type == MMU_INST_FETCH ? 19 : 18; 89 env->esr |= (access_type == MMU_DATA_STORE) << 10; 90 break; 91 default: 92 abort(); 93 } 94 95 if (cs->exception_index == EXCP_MMU) { 96 cpu_abort(cs, "recursive faults\n"); 97 } 98 99 /* TLB miss. */ 100 cs->exception_index = EXCP_MMU; 101 cpu_loop_exit_restore(cs, retaddr); 102 } 103 104 void mb_cpu_do_interrupt(CPUState *cs) 105 { 106 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 107 CPUMBState *env = &cpu->env; 108 uint32_t t, msr = mb_cpu_read_msr(env); 109 bool set_esr; 110 111 /* IMM flag cannot propagate across a branch and into the dslot. */ 112 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG)); 113 /* BIMM flag cannot be set without D_FLAG. */ 114 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG); 115 /* RTI flags are private to translate. */ 116 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG))); 117 118 switch (cs->exception_index) { 119 case EXCP_HW_EXCP: 120 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) { 121 qemu_log_mask(LOG_GUEST_ERROR, 122 "Exception raised on system without exceptions!\n"); 123 return; 124 } 125 126 qemu_log_mask(CPU_LOG_INT, 127 "INT: HWE at pc=%08x msr=%08x iflags=%x\n", 128 env->pc, msr, env->iflags); 129 130 /* Exception breaks branch + dslot sequence? */ 131 set_esr = true; 132 env->esr &= ~D_FLAG; 133 if (env->iflags & D_FLAG) { 134 env->esr |= D_FLAG; 135 env->btr = env->btarget; 136 } 137 138 /* Exception in progress. */ 139 msr |= MSR_EIP; 140 env->regs[17] = env->pc + 4; 141 env->pc = cpu->cfg.base_vectors + 0x20; 142 break; 143 144 case EXCP_MMU: 145 qemu_log_mask(CPU_LOG_INT, 146 "INT: MMU at pc=%08x msr=%08x " 147 "ear=%" PRIx64 " iflags=%x\n", 148 env->pc, msr, env->ear, env->iflags); 149 150 /* Exception breaks branch + dslot sequence? */ 151 set_esr = true; 152 env->esr &= ~D_FLAG; 153 if (env->iflags & D_FLAG) { 154 env->esr |= D_FLAG; 155 env->btr = env->btarget; 156 /* Reexecute the branch. */ 157 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4); 158 } else if (env->iflags & IMM_FLAG) { 159 /* Reexecute the imm. */ 160 env->regs[17] = env->pc - 4; 161 } else { 162 env->regs[17] = env->pc; 163 } 164 165 /* Exception in progress. */ 166 msr |= MSR_EIP; 167 env->pc = cpu->cfg.base_vectors + 0x20; 168 break; 169 170 case EXCP_IRQ: 171 assert(!(msr & (MSR_EIP | MSR_BIP))); 172 assert(msr & MSR_IE); 173 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 174 175 qemu_log_mask(CPU_LOG_INT, 176 "INT: DEV at pc=%08x msr=%08x iflags=%x\n", 177 env->pc, msr, env->iflags); 178 set_esr = false; 179 180 /* Disable interrupts. */ 181 msr &= ~MSR_IE; 182 env->regs[14] = env->pc; 183 env->pc = cpu->cfg.base_vectors + 0x10; 184 break; 185 186 case EXCP_HW_BREAK: 187 assert(!(env->iflags & (D_FLAG | IMM_FLAG))); 188 189 qemu_log_mask(CPU_LOG_INT, 190 "INT: BRK at pc=%08x msr=%08x iflags=%x\n", 191 env->pc, msr, env->iflags); 192 set_esr = false; 193 194 /* Break in progress. */ 195 msr |= MSR_BIP; 196 env->regs[16] = env->pc; 197 env->pc = cpu->cfg.base_vectors + 0x18; 198 break; 199 200 default: 201 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index); 202 /* not reached */ 203 } 204 205 /* Save previous mode, disable mmu, disable user-mode. */ 206 t = (msr & (MSR_VM | MSR_UM)) << 1; 207 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM); 208 msr |= t; 209 mb_cpu_write_msr(env, msr); 210 211 env->res_addr = RES_ADDR_NONE; 212 env->iflags = 0; 213 214 if (!set_esr) { 215 qemu_log_mask(CPU_LOG_INT, 216 " to pc=%08x msr=%08x\n", env->pc, msr); 217 } else if (env->esr & D_FLAG) { 218 qemu_log_mask(CPU_LOG_INT, 219 " to pc=%08x msr=%08x esr=%04x btr=%08x\n", 220 env->pc, msr, env->esr, env->btr); 221 } else { 222 qemu_log_mask(CPU_LOG_INT, 223 " to pc=%08x msr=%08x esr=%04x\n", 224 env->pc, msr, env->esr); 225 } 226 } 227 228 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 229 MemTxAttrs *attrs) 230 { 231 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 232 target_ulong vaddr, paddr = 0; 233 MicroBlazeMMULookup lu; 234 int mmu_idx = cpu_mmu_index(cs, false); 235 unsigned int hit; 236 237 /* Caller doesn't initialize */ 238 *attrs = (MemTxAttrs) {}; 239 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD); 240 241 if (mmu_idx != MMU_NOMMU_IDX) { 242 hit = mmu_translate(cpu, &lu, addr, 0, 0); 243 if (hit) { 244 vaddr = addr & TARGET_PAGE_MASK; 245 paddr = lu.paddr + vaddr - lu.vaddr; 246 } else 247 paddr = 0; /* ???. */ 248 } else 249 paddr = addr & TARGET_PAGE_MASK; 250 251 return paddr; 252 } 253 254 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 255 { 256 CPUMBState *env = cpu_env(cs); 257 258 if ((interrupt_request & CPU_INTERRUPT_HARD) 259 && (env->msr & MSR_IE) 260 && !(env->msr & (MSR_EIP | MSR_BIP)) 261 && !(env->iflags & (D_FLAG | IMM_FLAG))) { 262 cs->exception_index = EXCP_IRQ; 263 mb_cpu_do_interrupt(cs); 264 return true; 265 } 266 return false; 267 } 268 269 #endif /* !CONFIG_USER_ONLY */ 270 271 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 272 MMUAccessType access_type, 273 int mmu_idx, uintptr_t retaddr) 274 { 275 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 276 uint32_t esr, iflags; 277 278 /* Recover the pc and iflags from the corresponding insn_start. */ 279 cpu_restore_state(cs, retaddr); 280 iflags = cpu->env.iflags; 281 282 qemu_log_mask(CPU_LOG_INT, 283 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n", 284 (target_ulong)addr, cpu->env.pc, iflags); 285 286 esr = ESR_EC_UNALIGNED_DATA; 287 if (likely(iflags & ESR_ESS_FLAG)) { 288 esr |= iflags & ESR_ESS_MASK; 289 } else { 290 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); 291 } 292 293 cpu->env.ear = addr; 294 cpu->env.esr = esr; 295 cs->exception_index = EXCP_HW_EXCP; 296 cpu_loop_exit(cs); 297 } 298