xref: /openbmc/qemu/target/microblaze/helper.c (revision b0476d66)
1 /*
2  *  MicroBlaze helper routines.
3  *
4  *  Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
25 #include "exec/log.h"
26 
27 #if defined(CONFIG_USER_ONLY)
28 
29 void mb_cpu_do_interrupt(CPUState *cs)
30 {
31     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
32     CPUMBState *env = &cpu->env;
33 
34     cs->exception_index = -1;
35     env->res_addr = RES_ADDR_NONE;
36     env->regs[14] = env->pc;
37 }
38 
39 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
40                      MMUAccessType access_type, int mmu_idx,
41                      bool probe, uintptr_t retaddr)
42 {
43     cs->exception_index = 0xaa;
44     cpu_loop_exit_restore(cs, retaddr);
45 }
46 
47 #else /* !CONFIG_USER_ONLY */
48 
49 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
50                      MMUAccessType access_type, int mmu_idx,
51                      bool probe, uintptr_t retaddr)
52 {
53     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
54     CPUMBState *env = &cpu->env;
55     struct microblaze_mmu_lookup lu;
56     unsigned int hit;
57     int prot;
58 
59     if (mmu_idx == MMU_NOMMU_IDX) {
60         /* MMU disabled or not available.  */
61         address &= TARGET_PAGE_MASK;
62         prot = PAGE_BITS;
63         tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
64         return true;
65     }
66 
67     hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx);
68     if (likely(hit)) {
69         uint32_t vaddr = address & TARGET_PAGE_MASK;
70         uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
71 
72         qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
73                       mmu_idx, vaddr, paddr, lu.prot);
74         tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
75         return true;
76     }
77 
78     /* TLB miss.  */
79     if (probe) {
80         return false;
81     }
82 
83     qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
84                   mmu_idx, address);
85 
86     env->ear = address;
87     switch (lu.err) {
88     case ERR_PROT:
89         env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
90         env->esr |= (access_type == MMU_DATA_STORE) << 10;
91         break;
92     case ERR_MISS:
93         env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
94         env->esr |= (access_type == MMU_DATA_STORE) << 10;
95         break;
96     default:
97         abort();
98     }
99 
100     if (cs->exception_index == EXCP_MMU) {
101         cpu_abort(cs, "recursive faults\n");
102     }
103 
104     /* TLB miss.  */
105     cs->exception_index = EXCP_MMU;
106     cpu_loop_exit_restore(cs, retaddr);
107 }
108 
109 void mb_cpu_do_interrupt(CPUState *cs)
110 {
111     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
112     CPUMBState *env = &cpu->env;
113     uint32_t t, msr = mb_cpu_read_msr(env);
114 
115     /* IMM flag cannot propagate across a branch and into the dslot.  */
116     assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG)));
117     assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
118     env->res_addr = RES_ADDR_NONE;
119     switch (cs->exception_index) {
120         case EXCP_HW_EXCP:
121             if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) {
122                 qemu_log_mask(LOG_GUEST_ERROR, "Exception raised on system without exceptions!\n");
123                 return;
124             }
125 
126             env->regs[17] = env->pc + 4;
127             env->esr &= ~(1 << 12);
128 
129             /* Exception breaks branch + dslot sequence?  */
130             if (env->iflags & D_FLAG) {
131                 env->esr |= 1 << 12 ;
132                 env->btr = env->btarget;
133             }
134 
135             /* Disable the MMU.  */
136             t = (msr & (MSR_VM | MSR_UM)) << 1;
137             msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
138             msr |= t;
139             /* Exception in progress.  */
140             msr |= MSR_EIP;
141             mb_cpu_write_msr(env, msr);
142 
143             qemu_log_mask(CPU_LOG_INT,
144                           "hw exception at pc=%x ear=%" PRIx64 " "
145                           "esr=%x iflags=%x\n",
146                           env->pc, env->ear,
147                           env->esr, env->iflags);
148             log_cpu_state_mask(CPU_LOG_INT, cs, 0);
149             env->iflags &= ~(IMM_FLAG | D_FLAG);
150             env->pc = cpu->cfg.base_vectors + 0x20;
151             break;
152 
153         case EXCP_MMU:
154             env->regs[17] = env->pc;
155 
156             qemu_log_mask(CPU_LOG_INT,
157                           "MMU exception at pc=%x iflags=%x ear=%" PRIx64 "\n",
158                           env->pc, env->iflags, env->ear);
159 
160             env->esr &= ~(1 << 12);
161             /* Exception breaks branch + dslot sequence?  */
162             if (env->iflags & D_FLAG) {
163                 env->esr |= 1 << 12 ;
164                 env->btr = env->btarget;
165 
166                 /* Reexecute the branch.  */
167                 env->regs[17] -= 4;
168                 /* was the branch immprefixed?.  */
169                 if (env->iflags & BIMM_FLAG) {
170                     env->regs[17] -= 4;
171                     log_cpu_state_mask(CPU_LOG_INT, cs, 0);
172                 }
173             } else if (env->iflags & IMM_FLAG) {
174                 env->regs[17] -= 4;
175             }
176 
177             /* Disable the MMU.  */
178             t = (msr & (MSR_VM | MSR_UM)) << 1;
179             msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
180             msr |= t;
181             /* Exception in progress.  */
182             msr |= MSR_EIP;
183             mb_cpu_write_msr(env, msr);
184 
185             qemu_log_mask(CPU_LOG_INT,
186                           "exception at pc=%x ear=%" PRIx64 " iflags=%x\n",
187                           env->pc, env->ear, env->iflags);
188             log_cpu_state_mask(CPU_LOG_INT, cs, 0);
189             env->iflags &= ~(IMM_FLAG | D_FLAG);
190             env->pc = cpu->cfg.base_vectors + 0x20;
191             break;
192 
193         case EXCP_IRQ:
194             assert(!(msr & (MSR_EIP | MSR_BIP)));
195             assert(msr & MSR_IE);
196             assert(!(env->iflags & D_FLAG));
197 
198             t = (msr & (MSR_VM | MSR_UM)) << 1;
199 
200 #if 0
201 #include "disas/disas.h"
202 
203 /* Useful instrumentation when debugging interrupt issues in either
204    the models or in sw.  */
205             {
206                 const char *sym;
207 
208                 sym = lookup_symbol(env->pc);
209                 if (sym
210                     && (!strcmp("netif_rx", sym)
211                         || !strcmp("process_backlog", sym))) {
212 
213                     qemu_log("interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n",
214                              env->pc, msr, t, env->iflags, sym);
215 
216                     log_cpu_state(cs, 0);
217                 }
218             }
219 #endif
220             qemu_log_mask(CPU_LOG_INT,
221                           "interrupt at pc=%x msr=%x %x iflags=%x\n",
222                           env->pc, msr, t, env->iflags);
223 
224             msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM | MSR_IE);
225             msr |= t;
226             mb_cpu_write_msr(env, msr);
227 
228             env->regs[14] = env->pc;
229             env->pc = cpu->cfg.base_vectors + 0x10;
230             //log_cpu_state_mask(CPU_LOG_INT, cs, 0);
231             break;
232 
233         case EXCP_HW_BREAK:
234             assert(!(env->iflags & IMM_FLAG));
235             assert(!(env->iflags & D_FLAG));
236             t = (msr & (MSR_VM | MSR_UM)) << 1;
237             qemu_log_mask(CPU_LOG_INT,
238                           "break at pc=%x msr=%x %x iflags=%x\n",
239                           env->pc, msr, t, env->iflags);
240             log_cpu_state_mask(CPU_LOG_INT, cs, 0);
241             msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
242             msr |= t;
243             msr |= MSR_BIP;
244             env->regs[16] = env->pc;
245             env->pc = cpu->cfg.base_vectors + 0x18;
246             mb_cpu_write_msr(env, msr);
247             break;
248         default:
249             cpu_abort(cs, "unhandled exception type=%d\n",
250                       cs->exception_index);
251             break;
252     }
253 }
254 
255 hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
256 {
257     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
258     CPUMBState *env = &cpu->env;
259     target_ulong vaddr, paddr = 0;
260     struct microblaze_mmu_lookup lu;
261     int mmu_idx = cpu_mmu_index(env, false);
262     unsigned int hit;
263 
264     if (mmu_idx != MMU_NOMMU_IDX) {
265         hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
266         if (hit) {
267             vaddr = addr & TARGET_PAGE_MASK;
268             paddr = lu.paddr + vaddr - lu.vaddr;
269         } else
270             paddr = 0; /* ???.  */
271     } else
272         paddr = addr & TARGET_PAGE_MASK;
273 
274     return paddr;
275 }
276 #endif
277 
278 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
279 {
280     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
281     CPUMBState *env = &cpu->env;
282 
283     if ((interrupt_request & CPU_INTERRUPT_HARD)
284         && (env->msr & MSR_IE)
285         && !(env->msr & (MSR_EIP | MSR_BIP))
286         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
287         cs->exception_index = EXCP_IRQ;
288         mb_cpu_do_interrupt(cs);
289         return true;
290     }
291     return false;
292 }
293 
294 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
295                                 MMUAccessType access_type,
296                                 int mmu_idx, uintptr_t retaddr)
297 {
298     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
299     uint32_t esr, iflags;
300 
301     /* Recover the pc and iflags from the corresponding insn_start.  */
302     cpu_restore_state(cs, retaddr, true);
303     iflags = cpu->env.iflags;
304 
305     qemu_log_mask(CPU_LOG_INT,
306                   "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
307                   (target_ulong)addr, cpu->env.pc, iflags);
308 
309     esr = ESR_EC_UNALIGNED_DATA;
310     if (likely(iflags & ESR_ESS_FLAG)) {
311         esr |= iflags & ESR_ESS_MASK;
312     } else {
313         qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
314     }
315 
316     cpu->env.ear = addr;
317     cpu->env.esr = esr;
318     cs->exception_index = EXCP_HW_EXCP;
319     cpu_loop_exit(cs);
320 }
321