xref: /openbmc/qemu/target/xtensa/helper.c (revision 406d2aa2)
1 /*
2  * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above copyright
10  *       notice, this list of conditions and the following disclaimer in the
11  *       documentation and/or other materials provided with the distribution.
12  *     * Neither the name of the Open Source and Linux Lab nor the
13  *       names of its contributors may be used to endorse or promote products
14  *       derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "cpu.h"
30 #include "exec/exec-all.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
35 #endif
36 
37 static struct XtensaConfigList *xtensa_cores;
38 
39 static void xtensa_core_class_init(ObjectClass *oc, void *data)
40 {
41     CPUClass *cc = CPU_CLASS(oc);
42     XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
43     const XtensaConfig *config = data;
44 
45     xcc->config = config;
46 
47     /* Use num_core_regs to see only non-privileged registers in an unmodified
48      * gdb. Use num_regs to see all registers. gdb modification is required
49      * for that: reset bit 0 in the 'flags' field of the registers definitions
50      * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
51      */
52     cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
53 }
54 
55 static void init_libisa(XtensaConfig *config)
56 {
57     unsigned i, j;
58     unsigned opcodes;
59 
60     config->isa = xtensa_isa_init(config->isa_internal, NULL, NULL);
61     assert(xtensa_isa_maxlength(config->isa) <= MAX_INSN_LENGTH);
62     opcodes = xtensa_isa_num_opcodes(config->isa);
63     config->opcode_ops = g_new(XtensaOpcodeOps *, opcodes);
64 
65     for (i = 0; i < opcodes; ++i) {
66         const char *opc_name = xtensa_opcode_name(config->isa, i);
67         XtensaOpcodeOps *ops = NULL;
68 
69         assert(xtensa_opcode_num_operands(config->isa, i) <= MAX_OPCODE_ARGS);
70         if (!config->opcode_translators) {
71             ops = xtensa_find_opcode_ops(&xtensa_core_opcodes, opc_name);
72         } else {
73             for (j = 0; !ops && config->opcode_translators[j]; ++j) {
74                 ops = xtensa_find_opcode_ops(config->opcode_translators[j],
75                                              opc_name);
76             }
77         }
78 #ifdef DEBUG
79         if (ops == NULL) {
80             fprintf(stderr,
81                     "opcode translator not found for %s's opcode '%s'\n",
82                     config->name, opc_name);
83         }
84 #endif
85         config->opcode_ops[i] = ops;
86     }
87 }
88 
89 void xtensa_finalize_config(XtensaConfig *config)
90 {
91     unsigned i, n = 0;
92 
93     if (config->isa_internal) {
94         init_libisa(config);
95     }
96     if (config->gdb_regmap.num_regs) {
97         return;
98     }
99 
100     for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
101         n += (config->gdb_regmap.reg[i].type != 6);
102     }
103     config->gdb_regmap.num_regs = n;
104 }
105 
106 void xtensa_register_core(XtensaConfigList *node)
107 {
108     TypeInfo type = {
109         .parent = TYPE_XTENSA_CPU,
110         .class_init = xtensa_core_class_init,
111         .class_data = (void *)node->config,
112     };
113 
114     node->next = xtensa_cores;
115     xtensa_cores = node;
116     type.name = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node->config->name);
117     type_register(&type);
118     g_free((gpointer)type.name);
119 }
120 
121 static uint32_t check_hw_breakpoints(CPUXtensaState *env)
122 {
123     unsigned i;
124 
125     for (i = 0; i < env->config->ndbreak; ++i) {
126         if (env->cpu_watchpoint[i] &&
127                 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
128             return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
129         }
130     }
131     return 0;
132 }
133 
134 void xtensa_breakpoint_handler(CPUState *cs)
135 {
136     XtensaCPU *cpu = XTENSA_CPU(cs);
137     CPUXtensaState *env = &cpu->env;
138 
139     if (cs->watchpoint_hit) {
140         if (cs->watchpoint_hit->flags & BP_CPU) {
141             uint32_t cause;
142 
143             cs->watchpoint_hit = NULL;
144             cause = check_hw_breakpoints(env);
145             if (cause) {
146                 debug_exception_env(env, cause);
147             }
148             cpu_loop_exit_noexc(cs);
149         }
150     }
151 }
152 
153 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
154 {
155     XtensaConfigList *core = xtensa_cores;
156     cpu_fprintf(f, "Available CPUs:\n");
157     for (; core; core = core->next) {
158         cpu_fprintf(f, "  %s\n", core->config->name);
159     }
160 }
161 
162 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
163 {
164     XtensaCPU *cpu = XTENSA_CPU(cs);
165     uint32_t paddr;
166     uint32_t page_size;
167     unsigned access;
168 
169     if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
170                 &paddr, &page_size, &access) == 0) {
171         return paddr;
172     }
173     if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
174                 &paddr, &page_size, &access) == 0) {
175         return paddr;
176     }
177     return ~0;
178 }
179 
180 static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
181 {
182     if (xtensa_option_enabled(env->config,
183                 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
184         return vector - env->config->vecbase + env->sregs[VECBASE];
185     } else {
186         return vector;
187     }
188 }
189 
190 /*!
191  * Handle penging IRQ.
192  * For the high priority interrupt jump to the corresponding interrupt vector.
193  * For the level-1 interrupt convert it to either user, kernel or double
194  * exception with the 'level-1 interrupt' exception cause.
195  */
196 static void handle_interrupt(CPUXtensaState *env)
197 {
198     int level = env->pending_irq_level;
199 
200     if (level > xtensa_get_cintlevel(env) &&
201             level <= env->config->nlevel &&
202             (env->config->level_mask[level] &
203              env->sregs[INTSET] &
204              env->sregs[INTENABLE])) {
205         CPUState *cs = CPU(xtensa_env_get_cpu(env));
206 
207         if (level > 1) {
208             env->sregs[EPC1 + level - 1] = env->pc;
209             env->sregs[EPS2 + level - 2] = env->sregs[PS];
210             env->sregs[PS] =
211                 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
212             env->pc = relocated_vector(env,
213                     env->config->interrupt_vector[level]);
214         } else {
215             env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
216 
217             if (env->sregs[PS] & PS_EXCM) {
218                 if (env->config->ndepc) {
219                     env->sregs[DEPC] = env->pc;
220                 } else {
221                     env->sregs[EPC1] = env->pc;
222                 }
223                 cs->exception_index = EXC_DOUBLE;
224             } else {
225                 env->sregs[EPC1] = env->pc;
226                 cs->exception_index =
227                     (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
228             }
229             env->sregs[PS] |= PS_EXCM;
230         }
231         env->exception_taken = 1;
232     }
233 }
234 
235 /* Called from cpu_handle_interrupt with BQL held */
236 void xtensa_cpu_do_interrupt(CPUState *cs)
237 {
238     XtensaCPU *cpu = XTENSA_CPU(cs);
239     CPUXtensaState *env = &cpu->env;
240 
241     if (cs->exception_index == EXC_IRQ) {
242         qemu_log_mask(CPU_LOG_INT,
243                 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
244                 "pc = %08x, a0 = %08x, ps = %08x, "
245                 "intset = %08x, intenable = %08x, "
246                 "ccount = %08x\n",
247                 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
248                 env->pc, env->regs[0], env->sregs[PS],
249                 env->sregs[INTSET], env->sregs[INTENABLE],
250                 env->sregs[CCOUNT]);
251         handle_interrupt(env);
252     }
253 
254     switch (cs->exception_index) {
255     case EXC_WINDOW_OVERFLOW4:
256     case EXC_WINDOW_UNDERFLOW4:
257     case EXC_WINDOW_OVERFLOW8:
258     case EXC_WINDOW_UNDERFLOW8:
259     case EXC_WINDOW_OVERFLOW12:
260     case EXC_WINDOW_UNDERFLOW12:
261     case EXC_KERNEL:
262     case EXC_USER:
263     case EXC_DOUBLE:
264     case EXC_DEBUG:
265         qemu_log_mask(CPU_LOG_INT, "%s(%d) "
266                 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
267                 __func__, cs->exception_index,
268                 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
269         if (env->config->exception_vector[cs->exception_index]) {
270             env->pc = relocated_vector(env,
271                     env->config->exception_vector[cs->exception_index]);
272             env->exception_taken = 1;
273         } else {
274             qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
275                           __func__, env->pc, cs->exception_index);
276         }
277         break;
278 
279     case EXC_IRQ:
280         break;
281 
282     default:
283         qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
284                 __func__, env->pc, cs->exception_index);
285         break;
286     }
287     check_interrupts(env);
288 }
289 
290 bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
291 {
292     if (interrupt_request & CPU_INTERRUPT_HARD) {
293         cs->exception_index = EXC_IRQ;
294         xtensa_cpu_do_interrupt(cs);
295         return true;
296     }
297     return false;
298 }
299 
300 static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
301         const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
302 {
303     unsigned wi, ei;
304 
305     for (wi = 0; wi < tlb->nways; ++wi) {
306         for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
307             entry[wi][ei].asid = 0;
308             entry[wi][ei].variable = true;
309         }
310     }
311 }
312 
313 static void reset_tlb_mmu_ways56(CPUXtensaState *env,
314         const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
315 {
316     if (!tlb->varway56) {
317         static const xtensa_tlb_entry way5[] = {
318             {
319                 .vaddr = 0xd0000000,
320                 .paddr = 0,
321                 .asid = 1,
322                 .attr = 7,
323                 .variable = false,
324             }, {
325                 .vaddr = 0xd8000000,
326                 .paddr = 0,
327                 .asid = 1,
328                 .attr = 3,
329                 .variable = false,
330             }
331         };
332         static const xtensa_tlb_entry way6[] = {
333             {
334                 .vaddr = 0xe0000000,
335                 .paddr = 0xf0000000,
336                 .asid = 1,
337                 .attr = 7,
338                 .variable = false,
339             }, {
340                 .vaddr = 0xf0000000,
341                 .paddr = 0xf0000000,
342                 .asid = 1,
343                 .attr = 3,
344                 .variable = false,
345             }
346         };
347         memcpy(entry[5], way5, sizeof(way5));
348         memcpy(entry[6], way6, sizeof(way6));
349     } else {
350         uint32_t ei;
351         for (ei = 0; ei < 8; ++ei) {
352             entry[6][ei].vaddr = ei << 29;
353             entry[6][ei].paddr = ei << 29;
354             entry[6][ei].asid = 1;
355             entry[6][ei].attr = 3;
356         }
357     }
358 }
359 
360 static void reset_tlb_region_way0(CPUXtensaState *env,
361         xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
362 {
363     unsigned ei;
364 
365     for (ei = 0; ei < 8; ++ei) {
366         entry[0][ei].vaddr = ei << 29;
367         entry[0][ei].paddr = ei << 29;
368         entry[0][ei].asid = 1;
369         entry[0][ei].attr = 2;
370         entry[0][ei].variable = true;
371     }
372 }
373 
374 void reset_mmu(CPUXtensaState *env)
375 {
376     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
377         env->sregs[RASID] = 0x04030201;
378         env->sregs[ITLBCFG] = 0;
379         env->sregs[DTLBCFG] = 0;
380         env->autorefill_idx = 0;
381         reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
382         reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
383         reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
384         reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
385     } else {
386         reset_tlb_region_way0(env, env->itlb);
387         reset_tlb_region_way0(env, env->dtlb);
388     }
389 }
390 
391 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
392 {
393     unsigned i;
394     for (i = 0; i < 4; ++i) {
395         if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
396             return i;
397         }
398     }
399     return 0xff;
400 }
401 
402 /*!
403  * Lookup xtensa TLB for the given virtual address.
404  * See ISA, 4.6.2.2
405  *
406  * \param pwi: [out] way index
407  * \param pei: [out] entry index
408  * \param pring: [out] access ring
409  * \return 0 if ok, exception cause code otherwise
410  */
411 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
412         uint32_t *pwi, uint32_t *pei, uint8_t *pring)
413 {
414     const xtensa_tlb *tlb = dtlb ?
415         &env->config->dtlb : &env->config->itlb;
416     const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
417         env->dtlb : env->itlb;
418 
419     int nhits = 0;
420     unsigned wi;
421 
422     for (wi = 0; wi < tlb->nways; ++wi) {
423         uint32_t vpn;
424         uint32_t ei;
425         split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
426         if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
427             unsigned ring = get_ring(env, entry[wi][ei].asid);
428             if (ring < 4) {
429                 if (++nhits > 1) {
430                     return dtlb ?
431                         LOAD_STORE_TLB_MULTI_HIT_CAUSE :
432                         INST_TLB_MULTI_HIT_CAUSE;
433                 }
434                 *pwi = wi;
435                 *pei = ei;
436                 *pring = ring;
437             }
438         }
439     }
440     return nhits ? 0 :
441         (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
442 }
443 
444 /*!
445  * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
446  * See ISA, 4.6.5.10
447  */
448 static unsigned mmu_attr_to_access(uint32_t attr)
449 {
450     unsigned access = 0;
451 
452     if (attr < 12) {
453         access |= PAGE_READ;
454         if (attr & 0x1) {
455             access |= PAGE_EXEC;
456         }
457         if (attr & 0x2) {
458             access |= PAGE_WRITE;
459         }
460 
461         switch (attr & 0xc) {
462         case 0:
463             access |= PAGE_CACHE_BYPASS;
464             break;
465 
466         case 4:
467             access |= PAGE_CACHE_WB;
468             break;
469 
470         case 8:
471             access |= PAGE_CACHE_WT;
472             break;
473         }
474     } else if (attr == 13) {
475         access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
476     }
477     return access;
478 }
479 
480 /*!
481  * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
482  * See ISA, 4.6.3.3
483  */
484 static unsigned region_attr_to_access(uint32_t attr)
485 {
486     static const unsigned access[16] = {
487          [0] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_WT,
488          [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
489          [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
490          [3] =                          PAGE_EXEC | PAGE_CACHE_WB,
491          [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
492          [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
493         [14] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_ISOLATE,
494     };
495 
496     return access[attr & 0xf];
497 }
498 
499 /*!
500  * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
501  * See ISA, A.2.14 The Cache Attribute Register
502  */
503 static unsigned cacheattr_attr_to_access(uint32_t attr)
504 {
505     static const unsigned access[16] = {
506          [0] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_WT,
507          [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
508          [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
509          [3] =                          PAGE_EXEC | PAGE_CACHE_WB,
510          [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
511         [14] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_ISOLATE,
512     };
513 
514     return access[attr & 0xf];
515 }
516 
517 static bool is_access_granted(unsigned access, int is_write)
518 {
519     switch (is_write) {
520     case 0:
521         return access & PAGE_READ;
522 
523     case 1:
524         return access & PAGE_WRITE;
525 
526     case 2:
527         return access & PAGE_EXEC;
528 
529     default:
530         return 0;
531     }
532 }
533 
534 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
535 
536 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
537         uint32_t vaddr, int is_write, int mmu_idx,
538         uint32_t *paddr, uint32_t *page_size, unsigned *access,
539         bool may_lookup_pt)
540 {
541     bool dtlb = is_write != 2;
542     uint32_t wi;
543     uint32_t ei;
544     uint8_t ring;
545     uint32_t vpn;
546     uint32_t pte;
547     const xtensa_tlb_entry *entry = NULL;
548     xtensa_tlb_entry tmp_entry;
549     int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
550 
551     if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
552             may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
553         ring = (pte >> 4) & 0x3;
554         wi = 0;
555         split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
556 
557         if (update_tlb) {
558             wi = ++env->autorefill_idx & 0x3;
559             xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
560             env->sregs[EXCVADDR] = vaddr;
561             qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
562                           __func__, vaddr, vpn, pte);
563         } else {
564             xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
565             entry = &tmp_entry;
566         }
567         ret = 0;
568     }
569     if (ret != 0) {
570         return ret;
571     }
572 
573     if (entry == NULL) {
574         entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
575     }
576 
577     if (ring < mmu_idx) {
578         return dtlb ?
579             LOAD_STORE_PRIVILEGE_CAUSE :
580             INST_FETCH_PRIVILEGE_CAUSE;
581     }
582 
583     *access = mmu_attr_to_access(entry->attr) &
584         ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
585     if (!is_access_granted(*access, is_write)) {
586         return dtlb ?
587             (is_write ?
588              STORE_PROHIBITED_CAUSE :
589              LOAD_PROHIBITED_CAUSE) :
590             INST_FETCH_PROHIBITED_CAUSE;
591     }
592 
593     *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
594     *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
595 
596     return 0;
597 }
598 
599 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
600 {
601     CPUState *cs = CPU(xtensa_env_get_cpu(env));
602     uint32_t paddr;
603     uint32_t page_size;
604     unsigned access;
605     uint32_t pt_vaddr =
606         (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
607     int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
608             &paddr, &page_size, &access, false);
609 
610     qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n",
611                   __func__, vaddr, ret ? ~0 : paddr);
612 
613     if (ret == 0) {
614         *pte = ldl_phys(cs->as, paddr);
615     }
616     return ret;
617 }
618 
619 static int get_physical_addr_region(CPUXtensaState *env,
620         uint32_t vaddr, int is_write, int mmu_idx,
621         uint32_t *paddr, uint32_t *page_size, unsigned *access)
622 {
623     bool dtlb = is_write != 2;
624     uint32_t wi = 0;
625     uint32_t ei = (vaddr >> 29) & 0x7;
626     const xtensa_tlb_entry *entry =
627         xtensa_tlb_get_entry(env, dtlb, wi, ei);
628 
629     *access = region_attr_to_access(entry->attr);
630     if (!is_access_granted(*access, is_write)) {
631         return dtlb ?
632             (is_write ?
633              STORE_PROHIBITED_CAUSE :
634              LOAD_PROHIBITED_CAUSE) :
635             INST_FETCH_PROHIBITED_CAUSE;
636     }
637 
638     *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
639     *page_size = ~REGION_PAGE_MASK + 1;
640 
641     return 0;
642 }
643 
644 /*!
645  * Convert virtual address to physical addr.
646  * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
647  *
648  * \return 0 if ok, exception cause code otherwise
649  */
650 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
651         uint32_t vaddr, int is_write, int mmu_idx,
652         uint32_t *paddr, uint32_t *page_size, unsigned *access)
653 {
654     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
655         return get_physical_addr_mmu(env, update_tlb,
656                 vaddr, is_write, mmu_idx, paddr, page_size, access, true);
657     } else if (xtensa_option_bits_enabled(env->config,
658                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
659                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
660         return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
661                 paddr, page_size, access);
662     } else {
663         *paddr = vaddr;
664         *page_size = TARGET_PAGE_SIZE;
665         *access = cacheattr_attr_to_access(
666                 env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
667         return 0;
668     }
669 }
670 
671 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
672         CPUXtensaState *env, bool dtlb)
673 {
674     unsigned wi, ei;
675     const xtensa_tlb *conf =
676         dtlb ? &env->config->dtlb : &env->config->itlb;
677     unsigned (*attr_to_access)(uint32_t) =
678         xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
679         mmu_attr_to_access : region_attr_to_access;
680 
681     for (wi = 0; wi < conf->nways; ++wi) {
682         uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
683         const char *sz_text;
684         bool print_header = true;
685 
686         if (sz >= 0x100000) {
687             sz >>= 20;
688             sz_text = "MB";
689         } else {
690             sz >>= 10;
691             sz_text = "KB";
692         }
693 
694         for (ei = 0; ei < conf->way_size[wi]; ++ei) {
695             const xtensa_tlb_entry *entry =
696                 xtensa_tlb_get_entry(env, dtlb, wi, ei);
697 
698             if (entry->asid) {
699                 static const char * const cache_text[8] = {
700                     [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
701                     [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
702                     [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
703                     [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
704                 };
705                 unsigned access = attr_to_access(entry->attr);
706                 unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
707                     PAGE_CACHE_SHIFT;
708 
709                 if (print_header) {
710                     print_header = false;
711                     cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
712                     cpu_fprintf(f,
713                             "\tVaddr       Paddr       ASID  Attr RWX Cache\n"
714                             "\t----------  ----------  ----  ---- --- -------\n");
715                 }
716                 cpu_fprintf(f,
717                         "\t0x%08x  0x%08x  0x%02x  0x%02x %c%c%c %-7s\n",
718                         entry->vaddr,
719                         entry->paddr,
720                         entry->asid,
721                         entry->attr,
722                         (access & PAGE_READ) ? 'R' : '-',
723                         (access & PAGE_WRITE) ? 'W' : '-',
724                         (access & PAGE_EXEC) ? 'X' : '-',
725                         cache_text[cache_idx] ? cache_text[cache_idx] :
726                             "Invalid");
727             }
728         }
729     }
730 }
731 
732 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
733 {
734     if (xtensa_option_bits_enabled(env->config,
735                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
736                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
737                 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
738 
739         cpu_fprintf(f, "ITLB:\n");
740         dump_tlb(f, cpu_fprintf, env, false);
741         cpu_fprintf(f, "\nDTLB:\n");
742         dump_tlb(f, cpu_fprintf, env, true);
743     } else {
744         cpu_fprintf(f, "No TLB for this CPU core\n");
745     }
746 }
747 
748 void xtensa_runstall(CPUXtensaState *env, bool runstall)
749 {
750     CPUState *cpu = CPU(xtensa_env_get_cpu(env));
751 
752     env->runstall = runstall;
753     cpu->halted = runstall;
754     if (runstall) {
755         cpu_interrupt(cpu, CPU_INTERRUPT_HALT);
756     } else {
757         cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
758     }
759 }
760