xref: /openbmc/qemu/target/xtensa/helper.c (revision 04e3aabd)
1 /*
2  * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above copyright
10  *       notice, this list of conditions and the following disclaimer in the
11  *       documentation and/or other materials provided with the distribution.
12  *     * Neither the name of the Open Source and Linux Lab nor the
13  *       names of its contributors may be used to endorse or promote products
14  *       derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "cpu.h"
30 #include "exec/exec-all.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
35 #endif
36 
37 static struct XtensaConfigList *xtensa_cores;
38 
39 static void xtensa_core_class_init(ObjectClass *oc, void *data)
40 {
41     CPUClass *cc = CPU_CLASS(oc);
42     XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
43     const XtensaConfig *config = data;
44 
45     xcc->config = config;
46 
47     /* Use num_core_regs to see only non-privileged registers in an unmodified
48      * gdb. Use num_regs to see all registers. gdb modification is required
49      * for that: reset bit 0 in the 'flags' field of the registers definitions
50      * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
51      */
52     cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
53 }
54 
55 void xtensa_finalize_config(XtensaConfig *config)
56 {
57     unsigned i, n = 0;
58 
59     if (config->gdb_regmap.num_regs) {
60         return;
61     }
62 
63     for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
64         n += (config->gdb_regmap.reg[i].type != 6);
65     }
66     config->gdb_regmap.num_regs = n;
67 }
68 
69 void xtensa_register_core(XtensaConfigList *node)
70 {
71     TypeInfo type = {
72         .parent = TYPE_XTENSA_CPU,
73         .class_init = xtensa_core_class_init,
74         .class_data = (void *)node->config,
75     };
76 
77     node->next = xtensa_cores;
78     xtensa_cores = node;
79     type.name = g_strdup_printf("%s-" TYPE_XTENSA_CPU, node->config->name);
80     type_register(&type);
81     g_free((gpointer)type.name);
82 }
83 
84 static uint32_t check_hw_breakpoints(CPUXtensaState *env)
85 {
86     unsigned i;
87 
88     for (i = 0; i < env->config->ndbreak; ++i) {
89         if (env->cpu_watchpoint[i] &&
90                 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
91             return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
92         }
93     }
94     return 0;
95 }
96 
97 void xtensa_breakpoint_handler(CPUState *cs)
98 {
99     XtensaCPU *cpu = XTENSA_CPU(cs);
100     CPUXtensaState *env = &cpu->env;
101 
102     if (cs->watchpoint_hit) {
103         if (cs->watchpoint_hit->flags & BP_CPU) {
104             uint32_t cause;
105 
106             cs->watchpoint_hit = NULL;
107             cause = check_hw_breakpoints(env);
108             if (cause) {
109                 debug_exception_env(env, cause);
110             }
111             cpu_loop_exit_noexc(cs);
112         }
113     }
114 }
115 
116 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
117 {
118     XtensaConfigList *core = xtensa_cores;
119     cpu_fprintf(f, "Available CPUs:\n");
120     for (; core; core = core->next) {
121         cpu_fprintf(f, "  %s\n", core->config->name);
122     }
123 }
124 
125 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
126 {
127     XtensaCPU *cpu = XTENSA_CPU(cs);
128     uint32_t paddr;
129     uint32_t page_size;
130     unsigned access;
131 
132     if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
133                 &paddr, &page_size, &access) == 0) {
134         return paddr;
135     }
136     if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
137                 &paddr, &page_size, &access) == 0) {
138         return paddr;
139     }
140     return ~0;
141 }
142 
143 static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
144 {
145     if (xtensa_option_enabled(env->config,
146                 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
147         return vector - env->config->vecbase + env->sregs[VECBASE];
148     } else {
149         return vector;
150     }
151 }
152 
153 /*!
154  * Handle penging IRQ.
155  * For the high priority interrupt jump to the corresponding interrupt vector.
156  * For the level-1 interrupt convert it to either user, kernel or double
157  * exception with the 'level-1 interrupt' exception cause.
158  */
159 static void handle_interrupt(CPUXtensaState *env)
160 {
161     int level = env->pending_irq_level;
162 
163     if (level > xtensa_get_cintlevel(env) &&
164             level <= env->config->nlevel &&
165             (env->config->level_mask[level] &
166              env->sregs[INTSET] &
167              env->sregs[INTENABLE])) {
168         CPUState *cs = CPU(xtensa_env_get_cpu(env));
169 
170         if (level > 1) {
171             env->sregs[EPC1 + level - 1] = env->pc;
172             env->sregs[EPS2 + level - 2] = env->sregs[PS];
173             env->sregs[PS] =
174                 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
175             env->pc = relocated_vector(env,
176                     env->config->interrupt_vector[level]);
177         } else {
178             env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
179 
180             if (env->sregs[PS] & PS_EXCM) {
181                 if (env->config->ndepc) {
182                     env->sregs[DEPC] = env->pc;
183                 } else {
184                     env->sregs[EPC1] = env->pc;
185                 }
186                 cs->exception_index = EXC_DOUBLE;
187             } else {
188                 env->sregs[EPC1] = env->pc;
189                 cs->exception_index =
190                     (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
191             }
192             env->sregs[PS] |= PS_EXCM;
193         }
194         env->exception_taken = 1;
195     }
196 }
197 
198 /* Called from cpu_handle_interrupt with BQL held */
199 void xtensa_cpu_do_interrupt(CPUState *cs)
200 {
201     XtensaCPU *cpu = XTENSA_CPU(cs);
202     CPUXtensaState *env = &cpu->env;
203 
204     if (cs->exception_index == EXC_IRQ) {
205         qemu_log_mask(CPU_LOG_INT,
206                 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
207                 "pc = %08x, a0 = %08x, ps = %08x, "
208                 "intset = %08x, intenable = %08x, "
209                 "ccount = %08x\n",
210                 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
211                 env->pc, env->regs[0], env->sregs[PS],
212                 env->sregs[INTSET], env->sregs[INTENABLE],
213                 env->sregs[CCOUNT]);
214         handle_interrupt(env);
215     }
216 
217     switch (cs->exception_index) {
218     case EXC_WINDOW_OVERFLOW4:
219     case EXC_WINDOW_UNDERFLOW4:
220     case EXC_WINDOW_OVERFLOW8:
221     case EXC_WINDOW_UNDERFLOW8:
222     case EXC_WINDOW_OVERFLOW12:
223     case EXC_WINDOW_UNDERFLOW12:
224     case EXC_KERNEL:
225     case EXC_USER:
226     case EXC_DOUBLE:
227     case EXC_DEBUG:
228         qemu_log_mask(CPU_LOG_INT, "%s(%d) "
229                 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
230                 __func__, cs->exception_index,
231                 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
232         if (env->config->exception_vector[cs->exception_index]) {
233             env->pc = relocated_vector(env,
234                     env->config->exception_vector[cs->exception_index]);
235             env->exception_taken = 1;
236         } else {
237             qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
238                           __func__, env->pc, cs->exception_index);
239         }
240         break;
241 
242     case EXC_IRQ:
243         break;
244 
245     default:
246         qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
247                 __func__, env->pc, cs->exception_index);
248         break;
249     }
250     check_interrupts(env);
251 }
252 
253 bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
254 {
255     if (interrupt_request & CPU_INTERRUPT_HARD) {
256         cs->exception_index = EXC_IRQ;
257         xtensa_cpu_do_interrupt(cs);
258         return true;
259     }
260     return false;
261 }
262 
263 static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
264         const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
265 {
266     unsigned wi, ei;
267 
268     for (wi = 0; wi < tlb->nways; ++wi) {
269         for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
270             entry[wi][ei].asid = 0;
271             entry[wi][ei].variable = true;
272         }
273     }
274 }
275 
276 static void reset_tlb_mmu_ways56(CPUXtensaState *env,
277         const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
278 {
279     if (!tlb->varway56) {
280         static const xtensa_tlb_entry way5[] = {
281             {
282                 .vaddr = 0xd0000000,
283                 .paddr = 0,
284                 .asid = 1,
285                 .attr = 7,
286                 .variable = false,
287             }, {
288                 .vaddr = 0xd8000000,
289                 .paddr = 0,
290                 .asid = 1,
291                 .attr = 3,
292                 .variable = false,
293             }
294         };
295         static const xtensa_tlb_entry way6[] = {
296             {
297                 .vaddr = 0xe0000000,
298                 .paddr = 0xf0000000,
299                 .asid = 1,
300                 .attr = 7,
301                 .variable = false,
302             }, {
303                 .vaddr = 0xf0000000,
304                 .paddr = 0xf0000000,
305                 .asid = 1,
306                 .attr = 3,
307                 .variable = false,
308             }
309         };
310         memcpy(entry[5], way5, sizeof(way5));
311         memcpy(entry[6], way6, sizeof(way6));
312     } else {
313         uint32_t ei;
314         for (ei = 0; ei < 8; ++ei) {
315             entry[6][ei].vaddr = ei << 29;
316             entry[6][ei].paddr = ei << 29;
317             entry[6][ei].asid = 1;
318             entry[6][ei].attr = 3;
319         }
320     }
321 }
322 
323 static void reset_tlb_region_way0(CPUXtensaState *env,
324         xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
325 {
326     unsigned ei;
327 
328     for (ei = 0; ei < 8; ++ei) {
329         entry[0][ei].vaddr = ei << 29;
330         entry[0][ei].paddr = ei << 29;
331         entry[0][ei].asid = 1;
332         entry[0][ei].attr = 2;
333         entry[0][ei].variable = true;
334     }
335 }
336 
337 void reset_mmu(CPUXtensaState *env)
338 {
339     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
340         env->sregs[RASID] = 0x04030201;
341         env->sregs[ITLBCFG] = 0;
342         env->sregs[DTLBCFG] = 0;
343         env->autorefill_idx = 0;
344         reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
345         reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
346         reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
347         reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
348     } else {
349         reset_tlb_region_way0(env, env->itlb);
350         reset_tlb_region_way0(env, env->dtlb);
351     }
352 }
353 
354 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
355 {
356     unsigned i;
357     for (i = 0; i < 4; ++i) {
358         if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
359             return i;
360         }
361     }
362     return 0xff;
363 }
364 
365 /*!
366  * Lookup xtensa TLB for the given virtual address.
367  * See ISA, 4.6.2.2
368  *
369  * \param pwi: [out] way index
370  * \param pei: [out] entry index
371  * \param pring: [out] access ring
372  * \return 0 if ok, exception cause code otherwise
373  */
374 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
375         uint32_t *pwi, uint32_t *pei, uint8_t *pring)
376 {
377     const xtensa_tlb *tlb = dtlb ?
378         &env->config->dtlb : &env->config->itlb;
379     const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
380         env->dtlb : env->itlb;
381 
382     int nhits = 0;
383     unsigned wi;
384 
385     for (wi = 0; wi < tlb->nways; ++wi) {
386         uint32_t vpn;
387         uint32_t ei;
388         split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
389         if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
390             unsigned ring = get_ring(env, entry[wi][ei].asid);
391             if (ring < 4) {
392                 if (++nhits > 1) {
393                     return dtlb ?
394                         LOAD_STORE_TLB_MULTI_HIT_CAUSE :
395                         INST_TLB_MULTI_HIT_CAUSE;
396                 }
397                 *pwi = wi;
398                 *pei = ei;
399                 *pring = ring;
400             }
401         }
402     }
403     return nhits ? 0 :
404         (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
405 }
406 
407 /*!
408  * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
409  * See ISA, 4.6.5.10
410  */
411 static unsigned mmu_attr_to_access(uint32_t attr)
412 {
413     unsigned access = 0;
414 
415     if (attr < 12) {
416         access |= PAGE_READ;
417         if (attr & 0x1) {
418             access |= PAGE_EXEC;
419         }
420         if (attr & 0x2) {
421             access |= PAGE_WRITE;
422         }
423 
424         switch (attr & 0xc) {
425         case 0:
426             access |= PAGE_CACHE_BYPASS;
427             break;
428 
429         case 4:
430             access |= PAGE_CACHE_WB;
431             break;
432 
433         case 8:
434             access |= PAGE_CACHE_WT;
435             break;
436         }
437     } else if (attr == 13) {
438         access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
439     }
440     return access;
441 }
442 
443 /*!
444  * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
445  * See ISA, 4.6.3.3
446  */
447 static unsigned region_attr_to_access(uint32_t attr)
448 {
449     static const unsigned access[16] = {
450          [0] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_WT,
451          [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
452          [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
453          [3] =                          PAGE_EXEC | PAGE_CACHE_WB,
454          [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
455          [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
456         [14] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_ISOLATE,
457     };
458 
459     return access[attr & 0xf];
460 }
461 
462 /*!
463  * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
464  * See ISA, A.2.14 The Cache Attribute Register
465  */
466 static unsigned cacheattr_attr_to_access(uint32_t attr)
467 {
468     static const unsigned access[16] = {
469          [0] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_WT,
470          [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
471          [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
472          [3] =                          PAGE_EXEC | PAGE_CACHE_WB,
473          [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
474         [14] = PAGE_READ | PAGE_WRITE             | PAGE_CACHE_ISOLATE,
475     };
476 
477     return access[attr & 0xf];
478 }
479 
480 static bool is_access_granted(unsigned access, int is_write)
481 {
482     switch (is_write) {
483     case 0:
484         return access & PAGE_READ;
485 
486     case 1:
487         return access & PAGE_WRITE;
488 
489     case 2:
490         return access & PAGE_EXEC;
491 
492     default:
493         return 0;
494     }
495 }
496 
497 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
498 
499 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
500         uint32_t vaddr, int is_write, int mmu_idx,
501         uint32_t *paddr, uint32_t *page_size, unsigned *access,
502         bool may_lookup_pt)
503 {
504     bool dtlb = is_write != 2;
505     uint32_t wi;
506     uint32_t ei;
507     uint8_t ring;
508     uint32_t vpn;
509     uint32_t pte;
510     const xtensa_tlb_entry *entry = NULL;
511     xtensa_tlb_entry tmp_entry;
512     int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
513 
514     if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
515             may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
516         ring = (pte >> 4) & 0x3;
517         wi = 0;
518         split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
519 
520         if (update_tlb) {
521             wi = ++env->autorefill_idx & 0x3;
522             xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
523             env->sregs[EXCVADDR] = vaddr;
524             qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
525                           __func__, vaddr, vpn, pte);
526         } else {
527             xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
528             entry = &tmp_entry;
529         }
530         ret = 0;
531     }
532     if (ret != 0) {
533         return ret;
534     }
535 
536     if (entry == NULL) {
537         entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
538     }
539 
540     if (ring < mmu_idx) {
541         return dtlb ?
542             LOAD_STORE_PRIVILEGE_CAUSE :
543             INST_FETCH_PRIVILEGE_CAUSE;
544     }
545 
546     *access = mmu_attr_to_access(entry->attr) &
547         ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
548     if (!is_access_granted(*access, is_write)) {
549         return dtlb ?
550             (is_write ?
551              STORE_PROHIBITED_CAUSE :
552              LOAD_PROHIBITED_CAUSE) :
553             INST_FETCH_PROHIBITED_CAUSE;
554     }
555 
556     *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
557     *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
558 
559     return 0;
560 }
561 
562 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
563 {
564     CPUState *cs = CPU(xtensa_env_get_cpu(env));
565     uint32_t paddr;
566     uint32_t page_size;
567     unsigned access;
568     uint32_t pt_vaddr =
569         (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
570     int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
571             &paddr, &page_size, &access, false);
572 
573     qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n",
574                   __func__, vaddr, ret ? ~0 : paddr);
575 
576     if (ret == 0) {
577         *pte = ldl_phys(cs->as, paddr);
578     }
579     return ret;
580 }
581 
582 static int get_physical_addr_region(CPUXtensaState *env,
583         uint32_t vaddr, int is_write, int mmu_idx,
584         uint32_t *paddr, uint32_t *page_size, unsigned *access)
585 {
586     bool dtlb = is_write != 2;
587     uint32_t wi = 0;
588     uint32_t ei = (vaddr >> 29) & 0x7;
589     const xtensa_tlb_entry *entry =
590         xtensa_tlb_get_entry(env, dtlb, wi, ei);
591 
592     *access = region_attr_to_access(entry->attr);
593     if (!is_access_granted(*access, is_write)) {
594         return dtlb ?
595             (is_write ?
596              STORE_PROHIBITED_CAUSE :
597              LOAD_PROHIBITED_CAUSE) :
598             INST_FETCH_PROHIBITED_CAUSE;
599     }
600 
601     *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
602     *page_size = ~REGION_PAGE_MASK + 1;
603 
604     return 0;
605 }
606 
607 /*!
608  * Convert virtual address to physical addr.
609  * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
610  *
611  * \return 0 if ok, exception cause code otherwise
612  */
613 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
614         uint32_t vaddr, int is_write, int mmu_idx,
615         uint32_t *paddr, uint32_t *page_size, unsigned *access)
616 {
617     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
618         return get_physical_addr_mmu(env, update_tlb,
619                 vaddr, is_write, mmu_idx, paddr, page_size, access, true);
620     } else if (xtensa_option_bits_enabled(env->config,
621                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
622                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
623         return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
624                 paddr, page_size, access);
625     } else {
626         *paddr = vaddr;
627         *page_size = TARGET_PAGE_SIZE;
628         *access = cacheattr_attr_to_access(
629                 env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
630         return 0;
631     }
632 }
633 
634 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
635         CPUXtensaState *env, bool dtlb)
636 {
637     unsigned wi, ei;
638     const xtensa_tlb *conf =
639         dtlb ? &env->config->dtlb : &env->config->itlb;
640     unsigned (*attr_to_access)(uint32_t) =
641         xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
642         mmu_attr_to_access : region_attr_to_access;
643 
644     for (wi = 0; wi < conf->nways; ++wi) {
645         uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
646         const char *sz_text;
647         bool print_header = true;
648 
649         if (sz >= 0x100000) {
650             sz >>= 20;
651             sz_text = "MB";
652         } else {
653             sz >>= 10;
654             sz_text = "KB";
655         }
656 
657         for (ei = 0; ei < conf->way_size[wi]; ++ei) {
658             const xtensa_tlb_entry *entry =
659                 xtensa_tlb_get_entry(env, dtlb, wi, ei);
660 
661             if (entry->asid) {
662                 static const char * const cache_text[8] = {
663                     [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
664                     [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
665                     [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
666                     [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
667                 };
668                 unsigned access = attr_to_access(entry->attr);
669                 unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
670                     PAGE_CACHE_SHIFT;
671 
672                 if (print_header) {
673                     print_header = false;
674                     cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
675                     cpu_fprintf(f,
676                             "\tVaddr       Paddr       ASID  Attr RWX Cache\n"
677                             "\t----------  ----------  ----  ---- --- -------\n");
678                 }
679                 cpu_fprintf(f,
680                         "\t0x%08x  0x%08x  0x%02x  0x%02x %c%c%c %-7s\n",
681                         entry->vaddr,
682                         entry->paddr,
683                         entry->asid,
684                         entry->attr,
685                         (access & PAGE_READ) ? 'R' : '-',
686                         (access & PAGE_WRITE) ? 'W' : '-',
687                         (access & PAGE_EXEC) ? 'X' : '-',
688                         cache_text[cache_idx] ? cache_text[cache_idx] :
689                             "Invalid");
690             }
691         }
692     }
693 }
694 
695 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
696 {
697     if (xtensa_option_bits_enabled(env->config,
698                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
699                 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
700                 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
701 
702         cpu_fprintf(f, "ITLB:\n");
703         dump_tlb(f, cpu_fprintf, env, false);
704         cpu_fprintf(f, "\nDTLB:\n");
705         dump_tlb(f, cpu_fprintf, env, true);
706     } else {
707         cpu_fprintf(f, "No TLB for this CPU core\n");
708     }
709 }
710 
711 void xtensa_runstall(CPUXtensaState *env, bool runstall)
712 {
713     CPUState *cpu = CPU(xtensa_env_get_cpu(env));
714 
715     env->runstall = runstall;
716     cpu->halted = runstall;
717     if (runstall) {
718         cpu_interrupt(cpu, CPU_INTERRUPT_HALT);
719     } else {
720         cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
721     }
722 }
723