xref: /openbmc/qemu/target/sparc/mmu_helper.c (revision 9c2037d0)
1 /*
2  *  Sparc MMU helpers
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "trace.h"
24 #include "exec/address-spaces.h"
25 
26 /* Sparc MMU emulation */
27 
28 #if defined(CONFIG_USER_ONLY)
29 
30 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
31                                int mmu_idx)
32 {
33     if (rw & 2) {
34         cs->exception_index = TT_TFAULT;
35     } else {
36         cs->exception_index = TT_DFAULT;
37     }
38     return 1;
39 }
40 
41 #else
42 
43 #ifndef TARGET_SPARC64
44 /*
45  * Sparc V8 Reference MMU (SRMMU)
46  */
47 static const int access_table[8][8] = {
48     { 0, 0, 0, 0, 8, 0, 12, 12 },
49     { 0, 0, 0, 0, 8, 0, 0, 0 },
50     { 8, 8, 0, 0, 0, 8, 12, 12 },
51     { 8, 8, 0, 0, 0, 8, 0, 0 },
52     { 8, 0, 8, 0, 8, 8, 12, 12 },
53     { 8, 0, 8, 0, 8, 0, 8, 0 },
54     { 8, 8, 8, 0, 8, 8, 12, 12 },
55     { 8, 8, 8, 0, 8, 8, 8, 0 }
56 };
57 
58 static const int perm_table[2][8] = {
59     {
60         PAGE_READ,
61         PAGE_READ | PAGE_WRITE,
62         PAGE_READ | PAGE_EXEC,
63         PAGE_READ | PAGE_WRITE | PAGE_EXEC,
64         PAGE_EXEC,
65         PAGE_READ | PAGE_WRITE,
66         PAGE_READ | PAGE_EXEC,
67         PAGE_READ | PAGE_WRITE | PAGE_EXEC
68     },
69     {
70         PAGE_READ,
71         PAGE_READ | PAGE_WRITE,
72         PAGE_READ | PAGE_EXEC,
73         PAGE_READ | PAGE_WRITE | PAGE_EXEC,
74         PAGE_EXEC,
75         PAGE_READ,
76         0,
77         0,
78     }
79 };
80 
81 static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
82                                 int *prot, int *access_index,
83                                 target_ulong address, int rw, int mmu_idx,
84                                 target_ulong *page_size)
85 {
86     int access_perms = 0;
87     hwaddr pde_ptr;
88     uint32_t pde;
89     int error_code = 0, is_dirty, is_user;
90     unsigned long page_offset;
91     CPUState *cs = CPU(sparc_env_get_cpu(env));
92 
93     is_user = mmu_idx == MMU_USER_IDX;
94 
95     if (mmu_idx == MMU_PHYS_IDX) {
96         *page_size = TARGET_PAGE_SIZE;
97         /* Boot mode: instruction fetches are taken from PROM */
98         if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
99             *physical = env->prom_addr | (address & 0x7ffffULL);
100             *prot = PAGE_READ | PAGE_EXEC;
101             return 0;
102         }
103         *physical = address;
104         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
105         return 0;
106     }
107 
108     *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
109     *physical = 0xffffffffffff0000ULL;
110 
111     /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
112     /* Context base + context number */
113     pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
114     pde = ldl_phys(cs->as, pde_ptr);
115 
116     /* Ctx pde */
117     switch (pde & PTE_ENTRYTYPE_MASK) {
118     default:
119     case 0: /* Invalid */
120         return 1 << 2;
121     case 2: /* L0 PTE, maybe should not happen? */
122     case 3: /* Reserved */
123         return 4 << 2;
124     case 1: /* L0 PDE */
125         pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
126         pde = ldl_phys(cs->as, pde_ptr);
127 
128         switch (pde & PTE_ENTRYTYPE_MASK) {
129         default:
130         case 0: /* Invalid */
131             return (1 << 8) | (1 << 2);
132         case 3: /* Reserved */
133             return (1 << 8) | (4 << 2);
134         case 1: /* L1 PDE */
135             pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
136             pde = ldl_phys(cs->as, pde_ptr);
137 
138             switch (pde & PTE_ENTRYTYPE_MASK) {
139             default:
140             case 0: /* Invalid */
141                 return (2 << 8) | (1 << 2);
142             case 3: /* Reserved */
143                 return (2 << 8) | (4 << 2);
144             case 1: /* L2 PDE */
145                 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
146                 pde = ldl_phys(cs->as, pde_ptr);
147 
148                 switch (pde & PTE_ENTRYTYPE_MASK) {
149                 default:
150                 case 0: /* Invalid */
151                     return (3 << 8) | (1 << 2);
152                 case 1: /* PDE, should not happen */
153                 case 3: /* Reserved */
154                     return (3 << 8) | (4 << 2);
155                 case 2: /* L3 PTE */
156                     page_offset = 0;
157                 }
158                 *page_size = TARGET_PAGE_SIZE;
159                 break;
160             case 2: /* L2 PTE */
161                 page_offset = address & 0x3f000;
162                 *page_size = 0x40000;
163             }
164             break;
165         case 2: /* L1 PTE */
166             page_offset = address & 0xfff000;
167             *page_size = 0x1000000;
168         }
169     }
170 
171     /* check access */
172     access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
173     error_code = access_table[*access_index][access_perms];
174     if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
175         return error_code;
176     }
177 
178     /* update page modified and dirty bits */
179     is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
180     if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
181         pde |= PG_ACCESSED_MASK;
182         if (is_dirty) {
183             pde |= PG_MODIFIED_MASK;
184         }
185         stl_phys_notdirty(cs->as, pde_ptr, pde);
186     }
187 
188     /* the page can be put in the TLB */
189     *prot = perm_table[is_user][access_perms];
190     if (!(pde & PG_MODIFIED_MASK)) {
191         /* only set write access if already dirty... otherwise wait
192            for dirty access */
193         *prot &= ~PAGE_WRITE;
194     }
195 
196     /* Even if large ptes, we map only one 4KB page in the cache to
197        avoid filling it too fast */
198     *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
199     return error_code;
200 }
201 
202 /* Perform address translation */
203 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
204                                int mmu_idx)
205 {
206     SPARCCPU *cpu = SPARC_CPU(cs);
207     CPUSPARCState *env = &cpu->env;
208     hwaddr paddr;
209     target_ulong vaddr;
210     target_ulong page_size;
211     int error_code = 0, prot, access_index;
212 
213     address &= TARGET_PAGE_MASK;
214     error_code = get_physical_address(env, &paddr, &prot, &access_index,
215                                       address, rw, mmu_idx, &page_size);
216     vaddr = address;
217     if (error_code == 0) {
218         qemu_log_mask(CPU_LOG_MMU,
219                 "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
220                 TARGET_FMT_lx "\n", address, paddr, vaddr);
221         tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
222         return 0;
223     }
224 
225     if (env->mmuregs[3]) { /* Fault status register */
226         env->mmuregs[3] = 1; /* overflow (not read before another fault) */
227     }
228     env->mmuregs[3] |= (access_index << 5) | error_code | 2;
229     env->mmuregs[4] = address; /* Fault address register */
230 
231     if ((env->mmuregs[0] & MMU_NF) || env->psret == 0)  {
232         /* No fault mode: if a mapping is available, just override
233            permissions. If no mapping is available, redirect accesses to
234            neverland. Fake/overridden mappings will be flushed when
235            switching to normal mode. */
236         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
237         tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
238         return 0;
239     } else {
240         if (rw & 2) {
241             cs->exception_index = TT_TFAULT;
242         } else {
243             cs->exception_index = TT_DFAULT;
244         }
245         return 1;
246     }
247 }
248 
249 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
250 {
251     CPUState *cs = CPU(sparc_env_get_cpu(env));
252     hwaddr pde_ptr;
253     uint32_t pde;
254 
255     /* Context base + context number */
256     pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
257         (env->mmuregs[2] << 2);
258     pde = ldl_phys(cs->as, pde_ptr);
259 
260     switch (pde & PTE_ENTRYTYPE_MASK) {
261     default:
262     case 0: /* Invalid */
263     case 2: /* PTE, maybe should not happen? */
264     case 3: /* Reserved */
265         return 0;
266     case 1: /* L1 PDE */
267         if (mmulev == 3) {
268             return pde;
269         }
270         pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
271         pde = ldl_phys(cs->as, pde_ptr);
272 
273         switch (pde & PTE_ENTRYTYPE_MASK) {
274         default:
275         case 0: /* Invalid */
276         case 3: /* Reserved */
277             return 0;
278         case 2: /* L1 PTE */
279             return pde;
280         case 1: /* L2 PDE */
281             if (mmulev == 2) {
282                 return pde;
283             }
284             pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
285             pde = ldl_phys(cs->as, pde_ptr);
286 
287             switch (pde & PTE_ENTRYTYPE_MASK) {
288             default:
289             case 0: /* Invalid */
290             case 3: /* Reserved */
291                 return 0;
292             case 2: /* L2 PTE */
293                 return pde;
294             case 1: /* L3 PDE */
295                 if (mmulev == 1) {
296                     return pde;
297                 }
298                 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
299                 pde = ldl_phys(cs->as, pde_ptr);
300 
301                 switch (pde & PTE_ENTRYTYPE_MASK) {
302                 default:
303                 case 0: /* Invalid */
304                 case 1: /* PDE, should not happen */
305                 case 3: /* Reserved */
306                     return 0;
307                 case 2: /* L3 PTE */
308                     return pde;
309                 }
310             }
311         }
312     }
313     return 0;
314 }
315 
316 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
317 {
318     CPUState *cs = CPU(sparc_env_get_cpu(env));
319     target_ulong va, va1, va2;
320     unsigned int n, m, o;
321     hwaddr pde_ptr, pa;
322     uint32_t pde;
323 
324     pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
325     pde = ldl_phys(cs->as, pde_ptr);
326     (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
327                    (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
328     for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
329         pde = mmu_probe(env, va, 2);
330         if (pde) {
331             pa = cpu_get_phys_page_debug(cs, va);
332             (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
333                            " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
334             for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
335                 pde = mmu_probe(env, va1, 1);
336                 if (pde) {
337                     pa = cpu_get_phys_page_debug(cs, va1);
338                     (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
339                                    TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
340                                    va1, pa, pde);
341                     for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
342                         pde = mmu_probe(env, va2, 0);
343                         if (pde) {
344                             pa = cpu_get_phys_page_debug(cs, va2);
345                             (*cpu_fprintf)(f, "  VA: " TARGET_FMT_lx ", PA: "
346                                            TARGET_FMT_plx " PTE: "
347                                            TARGET_FMT_lx "\n",
348                                            va2, pa, pde);
349                         }
350                     }
351                 }
352             }
353         }
354     }
355 }
356 
357 /* Gdb expects all registers windows to be flushed in ram. This function handles
358  * reads (and only reads) in stack frames as if windows were flushed. We assume
359  * that the sparc ABI is followed.
360  */
361 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
362                               uint8_t *buf, int len, bool is_write)
363 {
364     SPARCCPU *cpu = SPARC_CPU(cs);
365     CPUSPARCState *env = &cpu->env;
366     target_ulong addr = address;
367     int i;
368     int len1;
369     int cwp = env->cwp;
370 
371     if (!is_write) {
372         for (i = 0; i < env->nwindows; i++) {
373             int off;
374             target_ulong fp = env->regbase[cwp * 16 + 22];
375 
376             /* Assume fp == 0 means end of frame.  */
377             if (fp == 0) {
378                 break;
379             }
380 
381             cwp = cpu_cwp_inc(env, cwp + 1);
382 
383             /* Invalid window ? */
384             if (env->wim & (1 << cwp)) {
385                 break;
386             }
387 
388             /* According to the ABI, the stack is growing downward.  */
389             if (addr + len < fp) {
390                 break;
391             }
392 
393             /* Not in this frame.  */
394             if (addr > fp + 64) {
395                 continue;
396             }
397 
398             /* Handle access before this window.  */
399             if (addr < fp) {
400                 len1 = fp - addr;
401                 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
402                     return -1;
403                 }
404                 addr += len1;
405                 len -= len1;
406                 buf += len1;
407             }
408 
409             /* Access byte per byte to registers. Not very efficient but speed
410              * is not critical.
411              */
412             off = addr - fp;
413             len1 = 64 - off;
414 
415             if (len1 > len) {
416                 len1 = len;
417             }
418 
419             for (; len1; len1--) {
420                 int reg = cwp * 16 + 8 + (off >> 2);
421                 union {
422                     uint32_t v;
423                     uint8_t c[4];
424                 } u;
425                 u.v = cpu_to_be32(env->regbase[reg]);
426                 *buf++ = u.c[off & 3];
427                 addr++;
428                 len--;
429                 off++;
430             }
431 
432             if (len == 0) {
433                 return 0;
434             }
435         }
436     }
437     return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
438 }
439 
440 #else /* !TARGET_SPARC64 */
441 
442 /* 41 bit physical address space */
443 static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
444 {
445     return x & 0x1ffffffffffULL;
446 }
447 
448 /*
449  * UltraSparc IIi I/DMMUs
450  */
451 
452 /* Returns true if TTE tag is valid and matches virtual address value
453    in context requires virtual address mask value calculated from TTE
454    entry size */
455 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
456                                        uint64_t address, uint64_t context,
457                                        hwaddr *physical)
458 {
459     uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
460 
461     /* valid, context match, virtual address match? */
462     if (TTE_IS_VALID(tlb->tte) &&
463         (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
464         && compare_masked(address, tlb->tag, mask)) {
465         /* decode physical address */
466         *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
467         return 1;
468     }
469 
470     return 0;
471 }
472 
473 static int get_physical_address_data(CPUSPARCState *env,
474                                      hwaddr *physical, int *prot,
475                                      target_ulong address, int rw, int mmu_idx)
476 {
477     CPUState *cs = CPU(sparc_env_get_cpu(env));
478     unsigned int i;
479     uint64_t context;
480     uint64_t sfsr = 0;
481     bool is_user = false;
482 
483     switch (mmu_idx) {
484     case MMU_PHYS_IDX:
485         g_assert_not_reached();
486     case MMU_USER_IDX:
487         is_user = true;
488         /* fallthru */
489     case MMU_KERNEL_IDX:
490         context = env->dmmu.mmu_primary_context & 0x1fff;
491         sfsr |= SFSR_CT_PRIMARY;
492         break;
493     case MMU_USER_SECONDARY_IDX:
494         is_user = true;
495         /* fallthru */
496     case MMU_KERNEL_SECONDARY_IDX:
497         context = env->dmmu.mmu_secondary_context & 0x1fff;
498         sfsr |= SFSR_CT_SECONDARY;
499         break;
500     case MMU_NUCLEUS_IDX:
501         sfsr |= SFSR_CT_NUCLEUS;
502         /* FALLTHRU */
503     default:
504         context = 0;
505         break;
506     }
507 
508     if (rw == 1) {
509         sfsr |= SFSR_WRITE_BIT;
510     } else if (rw == 4) {
511         sfsr |= SFSR_NF_BIT;
512     }
513 
514     for (i = 0; i < 64; i++) {
515         /* ctx match, vaddr match, valid? */
516         if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
517             int do_fault = 0;
518 
519             /* access ok? */
520             /* multiple bits in SFSR.FT may be set on TT_DFAULT */
521             if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
522                 do_fault = 1;
523                 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
524                 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
525             }
526             if (rw == 4) {
527                 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
528                     do_fault = 1;
529                     sfsr |= SFSR_FT_NF_E_BIT;
530                 }
531             } else {
532                 if (TTE_IS_NFO(env->dtlb[i].tte)) {
533                     do_fault = 1;
534                     sfsr |= SFSR_FT_NFO_BIT;
535                 }
536             }
537 
538             if (do_fault) {
539                 /* faults above are reported with TT_DFAULT. */
540                 cs->exception_index = TT_DFAULT;
541             } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
542                 do_fault = 1;
543                 cs->exception_index = TT_DPROT;
544 
545                 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
546             }
547 
548             if (!do_fault) {
549                 *prot = PAGE_READ;
550                 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
551                     *prot |= PAGE_WRITE;
552                 }
553 
554                 TTE_SET_USED(env->dtlb[i].tte);
555 
556                 return 0;
557             }
558 
559             if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
560                 sfsr |= SFSR_OW_BIT; /* overflow (not read before
561                                         another fault) */
562             }
563 
564             if (env->pstate & PS_PRIV) {
565                 sfsr |= SFSR_PR_BIT;
566             }
567 
568             /* FIXME: ASI field in SFSR must be set */
569             env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
570 
571             env->dmmu.sfar = address; /* Fault address register */
572 
573             env->dmmu.tag_access = (address & ~0x1fffULL) | context;
574 
575             return 1;
576         }
577     }
578 
579     trace_mmu_helper_dmiss(address, context);
580 
581     /*
582      * On MMU misses:
583      * - UltraSPARC IIi: SFSR and SFAR unmodified
584      * - JPS1: SFAR updated and some fields of SFSR updated
585      */
586     env->dmmu.tag_access = (address & ~0x1fffULL) | context;
587     cs->exception_index = TT_DMISS;
588     return 1;
589 }
590 
591 static int get_physical_address_code(CPUSPARCState *env,
592                                      hwaddr *physical, int *prot,
593                                      target_ulong address, int mmu_idx)
594 {
595     CPUState *cs = CPU(sparc_env_get_cpu(env));
596     unsigned int i;
597     uint64_t context;
598     bool is_user = false;
599 
600     switch (mmu_idx) {
601     case MMU_PHYS_IDX:
602     case MMU_USER_SECONDARY_IDX:
603     case MMU_KERNEL_SECONDARY_IDX:
604         g_assert_not_reached();
605     case MMU_USER_IDX:
606         is_user = true;
607         /* fallthru */
608     case MMU_KERNEL_IDX:
609         context = env->dmmu.mmu_primary_context & 0x1fff;
610         break;
611     default:
612         context = 0;
613         break;
614     }
615 
616     if (env->tl == 0) {
617         /* PRIMARY context */
618         context = env->dmmu.mmu_primary_context & 0x1fff;
619     } else {
620         /* NUCLEUS context */
621         context = 0;
622     }
623 
624     for (i = 0; i < 64; i++) {
625         /* ctx match, vaddr match, valid? */
626         if (ultrasparc_tag_match(&env->itlb[i],
627                                  address, context, physical)) {
628             /* access ok? */
629             if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
630                 /* Fault status register */
631                 if (env->immu.sfsr & SFSR_VALID_BIT) {
632                     env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
633                                                      another fault) */
634                 } else {
635                     env->immu.sfsr = 0;
636                 }
637                 if (env->pstate & PS_PRIV) {
638                     env->immu.sfsr |= SFSR_PR_BIT;
639                 }
640                 if (env->tl > 0) {
641                     env->immu.sfsr |= SFSR_CT_NUCLEUS;
642                 }
643 
644                 /* FIXME: ASI field in SFSR must be set */
645                 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
646                 cs->exception_index = TT_TFAULT;
647 
648                 env->immu.tag_access = (address & ~0x1fffULL) | context;
649 
650                 trace_mmu_helper_tfault(address, context);
651 
652                 return 1;
653             }
654             *prot = PAGE_EXEC;
655             TTE_SET_USED(env->itlb[i].tte);
656             return 0;
657         }
658     }
659 
660     trace_mmu_helper_tmiss(address, context);
661 
662     /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
663     env->immu.tag_access = (address & ~0x1fffULL) | context;
664     cs->exception_index = TT_TMISS;
665     return 1;
666 }
667 
668 static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
669                                 int *prot, int *access_index,
670                                 target_ulong address, int rw, int mmu_idx,
671                                 target_ulong *page_size)
672 {
673     /* ??? We treat everything as a small page, then explicitly flush
674        everything when an entry is evicted.  */
675     *page_size = TARGET_PAGE_SIZE;
676 
677     /* safety net to catch wrong softmmu index use from dynamic code */
678     if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
679         if (rw == 2) {
680             trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
681                                                 env->dmmu.mmu_primary_context,
682                                                 env->dmmu.mmu_secondary_context,
683                                                 address);
684         } else {
685             trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
686                                                 env->dmmu.mmu_primary_context,
687                                                 env->dmmu.mmu_secondary_context,
688                                                 address);
689         }
690     }
691 
692     if (mmu_idx == MMU_PHYS_IDX) {
693         *physical = ultrasparc_truncate_physical(address);
694         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
695         return 0;
696     }
697 
698     if (rw == 2) {
699         return get_physical_address_code(env, physical, prot, address,
700                                          mmu_idx);
701     } else {
702         return get_physical_address_data(env, physical, prot, address, rw,
703                                          mmu_idx);
704     }
705 }
706 
707 /* Perform address translation */
708 int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
709                                int mmu_idx)
710 {
711     SPARCCPU *cpu = SPARC_CPU(cs);
712     CPUSPARCState *env = &cpu->env;
713     target_ulong vaddr;
714     hwaddr paddr;
715     target_ulong page_size;
716     int error_code = 0, prot, access_index;
717 
718     address &= TARGET_PAGE_MASK;
719     error_code = get_physical_address(env, &paddr, &prot, &access_index,
720                                       address, rw, mmu_idx, &page_size);
721     if (error_code == 0) {
722         vaddr = address;
723 
724         trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
725                                    env->dmmu.mmu_primary_context,
726                                    env->dmmu.mmu_secondary_context);
727 
728         tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
729         return 0;
730     }
731     /* XXX */
732     return 1;
733 }
734 
735 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
736 {
737     unsigned int i;
738     const char *mask;
739 
740     (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %"
741                    PRId64 "\n",
742                    env->dmmu.mmu_primary_context,
743                    env->dmmu.mmu_secondary_context);
744     (*cpu_fprintf)(f, "DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
745                    "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
746     if ((env->lsu & DMMU_E) == 0) {
747         (*cpu_fprintf)(f, "DMMU disabled\n");
748     } else {
749         (*cpu_fprintf)(f, "DMMU dump\n");
750         for (i = 0; i < 64; i++) {
751             switch (TTE_PGSIZE(env->dtlb[i].tte)) {
752             default:
753             case 0x0:
754                 mask = "  8k";
755                 break;
756             case 0x1:
757                 mask = " 64k";
758                 break;
759             case 0x2:
760                 mask = "512k";
761                 break;
762             case 0x3:
763                 mask = "  4M";
764                 break;
765             }
766             if (TTE_IS_VALID(env->dtlb[i].tte)) {
767                 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
768                                ", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
769                                i,
770                                env->dtlb[i].tag & (uint64_t)~0x1fffULL,
771                                TTE_PA(env->dtlb[i].tte),
772                                mask,
773                                TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
774                                TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
775                                TTE_IS_LOCKED(env->dtlb[i].tte) ?
776                                "locked" : "unlocked",
777                                env->dtlb[i].tag & (uint64_t)0x1fffULL,
778                                TTE_IS_GLOBAL(env->dtlb[i].tte) ?
779                                "global" : "local");
780             }
781         }
782     }
783     if ((env->lsu & IMMU_E) == 0) {
784         (*cpu_fprintf)(f, "IMMU disabled\n");
785     } else {
786         (*cpu_fprintf)(f, "IMMU dump\n");
787         for (i = 0; i < 64; i++) {
788             switch (TTE_PGSIZE(env->itlb[i].tte)) {
789             default:
790             case 0x0:
791                 mask = "  8k";
792                 break;
793             case 0x1:
794                 mask = " 64k";
795                 break;
796             case 0x2:
797                 mask = "512k";
798                 break;
799             case 0x3:
800                 mask = "  4M";
801                 break;
802             }
803             if (TTE_IS_VALID(env->itlb[i].tte)) {
804                 (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
805                                ", %s, %s, %s, ctx %" PRId64 " %s\n",
806                                i,
807                                env->itlb[i].tag & (uint64_t)~0x1fffULL,
808                                TTE_PA(env->itlb[i].tte),
809                                mask,
810                                TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
811                                TTE_IS_LOCKED(env->itlb[i].tte) ?
812                                "locked" : "unlocked",
813                                env->itlb[i].tag & (uint64_t)0x1fffULL,
814                                TTE_IS_GLOBAL(env->itlb[i].tte) ?
815                                "global" : "local");
816             }
817         }
818     }
819 }
820 
821 #endif /* TARGET_SPARC64 */
822 
823 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
824                                    target_ulong addr, int rw, int mmu_idx)
825 {
826     target_ulong page_size;
827     int prot, access_index;
828 
829     return get_physical_address(env, phys, &prot, &access_index, addr, rw,
830                                 mmu_idx, &page_size);
831 }
832 
833 #if defined(TARGET_SPARC64)
834 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
835                                            int mmu_idx)
836 {
837     hwaddr phys_addr;
838 
839     if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
840         return -1;
841     }
842     return phys_addr;
843 }
844 #endif
845 
846 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
847 {
848     SPARCCPU *cpu = SPARC_CPU(cs);
849     CPUSPARCState *env = &cpu->env;
850     hwaddr phys_addr;
851     int mmu_idx = cpu_mmu_index(env, false);
852     MemoryRegionSection section;
853 
854     if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
855         if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
856             return -1;
857         }
858     }
859     section = memory_region_find(get_system_memory(), phys_addr, 1);
860     memory_region_unref(section.mr);
861     if (!int128_nz(section.size)) {
862         return -1;
863     }
864     return phys_addr;
865 }
866 #endif
867