xref: /openbmc/qemu/target/ppc/mmu-radix64.c (revision 5b73b248)
1 /*
2  *  PowerPC Radix MMU mulation helpers for QEMU.
3  *
4  *  Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/page-protection.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/kvm.h"
26 #include "kvm_ppc.h"
27 #include "exec/log.h"
28 #include "internal.h"
29 #include "mmu-radix64.h"
30 #include "mmu-book3s-v3.h"
31 #include "mmu-books.h"
32 
33 /* Radix Partition Table Entry Fields */
34 #define PATE1_R_PRTB           0x0FFFFFFFFFFFF000
35 #define PATE1_R_PRTS           0x000000000000001F
36 
37 /* Radix Process Table Entry Fields */
38 #define PRTBE_R_GET_RTS(rts) \
39     ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31)
40 #define PRTBE_R_RPDB            0x0FFFFFFFFFFFFF00
41 #define PRTBE_R_RPDS            0x000000000000001F
42 
43 /* Radix Page Directory/Table Entry Fields */
44 #define R_PTE_VALID             0x8000000000000000
45 #define R_PTE_LEAF              0x4000000000000000
46 #define R_PTE_SW0               0x2000000000000000
47 #define R_PTE_RPN               0x01FFFFFFFFFFF000
48 #define R_PTE_SW1               0x0000000000000E00
49 #define R_GET_SW(sw)            (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7))
50 #define R_PTE_R                 0x0000000000000100
51 #define R_PTE_C                 0x0000000000000080
52 #define R_PTE_ATT               0x0000000000000030
53 #define R_PTE_ATT_NORMAL        0x0000000000000000
54 #define R_PTE_ATT_SAO           0x0000000000000010
55 #define R_PTE_ATT_NI_IO         0x0000000000000020
56 #define R_PTE_ATT_TOLERANT_IO   0x0000000000000030
57 #define R_PTE_EAA_PRIV          0x0000000000000008
58 #define R_PTE_EAA_R             0x0000000000000004
59 #define R_PTE_EAA_RW            0x0000000000000002
60 #define R_PTE_EAA_X             0x0000000000000001
61 #define R_PDE_NLB               PRTBE_R_RPDB
62 #define R_PDE_NLS               PRTBE_R_RPDS
63 
64 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
65                                                  vaddr eaddr,
66                                                  uint64_t *lpid, uint64_t *pid)
67 {
68     /* When EA(2:11) are nonzero, raise a segment interrupt */
69     if (eaddr & ~R_EADDR_VALID_MASK) {
70         return false;
71     }
72 
73     if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
74         switch (eaddr & R_EADDR_QUADRANT) {
75         case R_EADDR_QUADRANT0:
76             *lpid = 0;
77             *pid = env->spr[SPR_BOOKS_PID];
78             break;
79         case R_EADDR_QUADRANT1:
80             *lpid = env->spr[SPR_LPIDR];
81             *pid = env->spr[SPR_BOOKS_PID];
82             break;
83         case R_EADDR_QUADRANT2:
84             *lpid = env->spr[SPR_LPIDR];
85             *pid = 0;
86             break;
87         case R_EADDR_QUADRANT3:
88             *lpid = 0;
89             *pid = 0;
90             break;
91         default:
92             g_assert_not_reached();
93         }
94     } else {  /* !MSR[HV] -> Guest */
95         switch (eaddr & R_EADDR_QUADRANT) {
96         case R_EADDR_QUADRANT0: /* Guest application */
97             *lpid = env->spr[SPR_LPIDR];
98             *pid = env->spr[SPR_BOOKS_PID];
99             break;
100         case R_EADDR_QUADRANT1: /* Illegal */
101         case R_EADDR_QUADRANT2:
102             return false;
103         case R_EADDR_QUADRANT3: /* Guest OS */
104             *lpid = env->spr[SPR_LPIDR];
105             *pid = 0; /* pid set to 0 -> addresses guest operating system */
106             break;
107         default:
108             g_assert_not_reached();
109         }
110     }
111 
112     return true;
113 }
114 
115 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
116                                    vaddr eaddr)
117 {
118     CPUState *cs = CPU(cpu);
119     CPUPPCState *env = &cpu->env;
120 
121     switch (access_type) {
122     case MMU_INST_FETCH:
123         /* Instruction Segment Interrupt */
124         cs->exception_index = POWERPC_EXCP_ISEG;
125         break;
126     case MMU_DATA_STORE:
127     case MMU_DATA_LOAD:
128         /* Data Segment Interrupt */
129         cs->exception_index = POWERPC_EXCP_DSEG;
130         env->spr[SPR_DAR] = eaddr;
131         break;
132     default:
133         g_assert_not_reached();
134     }
135     env->error_code = 0;
136 }
137 
138 static inline const char *access_str(MMUAccessType access_type)
139 {
140     return access_type == MMU_DATA_LOAD ? "reading" :
141         (access_type == MMU_DATA_STORE ? "writing" : "execute");
142 }
143 
144 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
145                                  vaddr eaddr, uint32_t cause)
146 {
147     CPUState *cs = CPU(cpu);
148     CPUPPCState *env = &cpu->env;
149 
150     qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
151                   __func__, access_str(access_type),
152                   eaddr, cause);
153 
154     switch (access_type) {
155     case MMU_INST_FETCH:
156         /* Instruction Storage Interrupt */
157         cs->exception_index = POWERPC_EXCP_ISI;
158         env->error_code = cause;
159         break;
160     case MMU_DATA_STORE:
161         cause |= DSISR_ISSTORE;
162         /* fall through */
163     case MMU_DATA_LOAD:
164         /* Data Storage Interrupt */
165         cs->exception_index = POWERPC_EXCP_DSI;
166         env->spr[SPR_DSISR] = cause;
167         env->spr[SPR_DAR] = eaddr;
168         env->error_code = 0;
169         break;
170     default:
171         g_assert_not_reached();
172     }
173 }
174 
175 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
176                                   vaddr eaddr, hwaddr g_raddr, uint32_t cause)
177 {
178     CPUState *cs = CPU(cpu);
179     CPUPPCState *env = &cpu->env;
180 
181     env->error_code = 0;
182     if (cause & DSISR_PRTABLE_FAULT) {
183         /* HDSI PRTABLE_FAULT gets the originating access type in error_code */
184         env->error_code = access_type;
185         access_type = MMU_DATA_LOAD;
186     }
187 
188     qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
189                   HWADDR_PRIx" cause %08x\n",
190                   __func__, access_str(access_type),
191                   eaddr, g_raddr, cause);
192 
193     switch (access_type) {
194     case MMU_INST_FETCH:
195         /* H Instruction Storage Interrupt */
196         cs->exception_index = POWERPC_EXCP_HISI;
197         env->spr[SPR_ASDR] = g_raddr;
198         env->error_code = cause;
199         break;
200     case MMU_DATA_STORE:
201         cause |= DSISR_ISSTORE;
202         /* fall through */
203     case MMU_DATA_LOAD:
204         /* H Data Storage Interrupt */
205         cs->exception_index = POWERPC_EXCP_HDSI;
206         env->spr[SPR_HDSISR] = cause;
207         env->spr[SPR_HDAR] = eaddr;
208         env->spr[SPR_ASDR] = g_raddr;
209         break;
210     default:
211         g_assert_not_reached();
212     }
213 }
214 
215 static int ppc_radix64_get_prot_eaa(uint64_t pte)
216 {
217     return (pte & R_PTE_EAA_R ? PAGE_READ : 0) |
218            (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) |
219            (pte & R_PTE_EAA_X ? PAGE_EXEC : 0);
220 }
221 
222 static int ppc_radix64_get_prot_amr(const PowerPCCPU *cpu)
223 {
224     const CPUPPCState *env = &cpu->env;
225     int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */
226     int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */
227 
228     return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */
229            (amr & 0x1 ? 0 : PAGE_READ) |
230            (iamr & 0x1 ? 0 : PAGE_EXEC);
231 }
232 
233 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
234                                    uint64_t pte, int *fault_cause, int *prot,
235                                    int mmu_idx, bool partition_scoped)
236 {
237     CPUPPCState *env = &cpu->env;
238 
239     /* Check Page Attributes (pte58:59) */
240     if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
241         /*
242          * Radix PTE entries with the non-idempotent I/O attribute are treated
243          * as guarded storage
244          */
245         *fault_cause |= SRR1_NOEXEC_GUARD;
246         return true;
247     }
248 
249     /* Determine permissions allowed by Encoded Access Authority */
250     if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
251         FIELD_EX64(env->msr, MSR, PR)) {
252         *prot = 0;
253     } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
254                partition_scoped) {
255         *prot = ppc_radix64_get_prot_eaa(pte);
256     } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
257         *prot = ppc_radix64_get_prot_eaa(pte);
258         *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
259     }
260 
261     /* Check if requested access type is allowed */
262     if (!check_prot_access_type(*prot, access_type)) {
263         /* Page Protected for that Access */
264         *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
265                                                         DSISR_PROTFAULT;
266         return true;
267     }
268 
269     return false;
270 }
271 
272 static int ppc_radix64_check_rc(MMUAccessType access_type, uint64_t pte)
273 {
274     switch (access_type) {
275     case MMU_DATA_STORE:
276         if (!(pte & R_PTE_C)) {
277             break;
278         }
279         /* fall through */
280     case MMU_INST_FETCH:
281     case MMU_DATA_LOAD:
282         if (!(pte & R_PTE_R)) {
283             break;
284         }
285 
286         /* R/C bits are already set appropriately for this access */
287         return 0;
288     }
289 
290     return 1;
291 }
292 
293 static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
294 {
295     bool ret;
296 
297     /*
298      * Check if this is a valid level, according to POWER9 and POWER10
299      * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
300      * Supported Radix Tree Configurations and Resulting Page Sizes.
301      *
302      * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
303      * CPUs that supports a different Radix MMU configuration will need their
304      * own implementation.
305      */
306     switch (level) {
307     case 0:     /* Root Page Dir */
308         ret = psize == 52 && nls == 13;
309         break;
310     case 1:
311     case 2:
312         ret = nls == 9;
313         break;
314     case 3:
315         ret = nls == 9 || nls == 5;
316         break;
317     default:
318         ret = false;
319     }
320 
321     if (unlikely(!ret)) {
322         qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: "
323                       "level %d size %d nls %"PRIu64"\n",
324                       level, psize, nls);
325     }
326     return ret;
327 }
328 
329 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
330                                   uint64_t *pte_addr, uint64_t *nls,
331                                   int *psize, uint64_t *pte, int *fault_cause)
332 {
333     uint64_t index, mask, nlb, pde;
334 
335     /* Read page <directory/table> entry from guest address space */
336     pde = ldq_phys(as, *pte_addr);
337     if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
338         *fault_cause |= DSISR_NOPTE;
339         return 1;
340     }
341 
342     *pte = pde;
343     *psize -= *nls;
344     if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
345         *nls = pde & R_PDE_NLS;
346         index = eaddr >> (*psize - *nls);       /* Shift */
347         index &= ((1UL << *nls) - 1);           /* Mask */
348         nlb = pde & R_PDE_NLB;
349         mask = MAKE_64BIT_MASK(0, *nls + 3);
350 
351         if (nlb & mask) {
352             qemu_log_mask(LOG_GUEST_ERROR,
353                 "%s: misaligned page dir/table base: 0x%" PRIx64
354                 " page dir size: 0x%" PRIx64 "\n",
355                 __func__, nlb, mask + 1);
356             nlb &= ~mask;
357         }
358         *pte_addr = nlb + index * sizeof(pde);
359     }
360     return 0;
361 }
362 
363 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
364                                  uint64_t base_addr, uint64_t nls,
365                                  hwaddr *raddr, int *psize, uint64_t *pte,
366                                  int *fault_cause, hwaddr *pte_addr)
367 {
368     uint64_t index, pde, rpn, mask;
369     int level = 0;
370 
371     index = eaddr >> (*psize - nls);    /* Shift */
372     index &= ((1UL << nls) - 1);        /* Mask */
373     mask = MAKE_64BIT_MASK(0, nls + 3);
374 
375     if (base_addr & mask) {
376         qemu_log_mask(LOG_GUEST_ERROR,
377             "%s: misaligned page dir base: 0x%" PRIx64
378             " page dir size: 0x%" PRIx64 "\n",
379             __func__, base_addr, mask + 1);
380         base_addr &= ~mask;
381     }
382     *pte_addr = base_addr + index * sizeof(pde);
383 
384     do {
385         int ret;
386 
387         if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
388             *fault_cause |= DSISR_R_BADCONFIG;
389             return 1;
390         }
391 
392         ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
393                                      fault_cause);
394         if (ret) {
395             return ret;
396         }
397     } while (!(pde & R_PTE_LEAF));
398 
399     *pte = pde;
400     rpn = pde & R_PTE_RPN;
401     mask = (1UL << *psize) - 1;
402 
403     /* Or high bits of rpn and low bits to ea to form whole real addr */
404     *raddr = (rpn & ~mask) | (eaddr & mask);
405     return 0;
406 }
407 
408 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
409 {
410     CPUPPCState *env = &cpu->env;
411 
412     if (!(pate->dw0 & PATE0_HR)) {
413         return false;
414     }
415     if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
416         return false;
417     }
418     if ((pate->dw0 & PATE1_R_PRTS) < 5) {
419         return false;
420     }
421     /* More checks ... */
422     return true;
423 }
424 
425 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
426                                               MMUAccessType orig_access_type,
427                                               vaddr eaddr, hwaddr g_raddr,
428                                               ppc_v3_pate_t pate,
429                                               hwaddr *h_raddr, int *h_prot,
430                                               int *h_page_size, bool pde_addr,
431                                               int mmu_idx, uint64_t lpid,
432                                               bool guest_visible)
433 {
434     MMUAccessType access_type = orig_access_type;
435     int fault_cause = 0;
436     hwaddr pte_addr;
437     uint64_t pte;
438 
439     if (pde_addr) {
440         /*
441          * Translation of process-scoped tables/directories is performed as
442          * a read-access.
443          */
444         access_type = MMU_DATA_LOAD;
445     }
446 
447     qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
448                   " mmu_idx %u 0x%"HWADDR_PRIx"\n",
449                   __func__, access_str(access_type),
450                   eaddr, mmu_idx, g_raddr);
451 
452     *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
453     /* No valid pte or access denied due to protection */
454     if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
455                               pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
456                               &pte, &fault_cause, &pte_addr) ||
457         ppc_radix64_check_prot(cpu, access_type, pte,
458                                &fault_cause, h_prot, mmu_idx, true)) {
459         if (pde_addr) { /* address being translated was that of a guest pde */
460             fault_cause |= DSISR_PRTABLE_FAULT;
461         }
462         if (guest_visible) {
463             ppc_radix64_raise_hsi(cpu, orig_access_type,
464                                   eaddr, g_raddr, fault_cause);
465         }
466         return 1;
467     }
468 
469     if (guest_visible) {
470         if (ppc_radix64_check_rc(access_type, pte)) {
471             /*
472              * Per ISA 3.1 Book III, 7.5.3 and 7.5.5, failure to set R/C during
473              * partition-scoped translation when effLPID = 0 results in normal
474              * (non-Hypervisor) Data and Instruction Storage Interrupts
475              * respectively.
476              *
477              * ISA 3.0 is ambiguous about this, but tests on POWER9 hardware
478              * seem to exhibit the same behavior.
479              */
480             if (lpid > 0) {
481                 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr,
482                                       DSISR_ATOMIC_RC);
483             } else {
484                 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
485             }
486             return 1;
487         }
488     }
489 
490     return 0;
491 }
492 
493 /*
494  * The spapr vhc has a flat partition scope provided by qemu memory when
495  * not nested.
496  *
497  * When running a nested guest, the addressing is 2-level radix on top of the
498  * vhc memory, so it works practically identically to the bare metal 2-level
499  * radix. So that code is selected directly. A cleaner and more flexible nested
500  * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
501  * function but that is not required for the moment.
502  */
503 static bool vhyp_flat_addressing(PowerPCCPU *cpu)
504 {
505     if (cpu->vhyp) {
506         return !vhyp_cpu_in_nested(cpu);
507     }
508     return false;
509 }
510 
511 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
512                                             MMUAccessType access_type,
513                                             vaddr eaddr, uint64_t pid,
514                                             ppc_v3_pate_t pate, hwaddr *g_raddr,
515                                             int *g_prot, int *g_page_size,
516                                             int mmu_idx, uint64_t lpid,
517                                             bool guest_visible)
518 {
519     CPUState *cs = CPU(cpu);
520     CPUPPCState *env = &cpu->env;
521     uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
522     int fault_cause = 0, h_page_size, h_prot;
523     hwaddr h_raddr, pte_addr;
524     int ret;
525 
526     qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
527                   " mmu_idx %u pid %"PRIu64"\n",
528                   __func__, access_str(access_type),
529                   eaddr, mmu_idx, pid);
530 
531     prtb = (pate.dw1 & PATE1_R_PRTB);
532     size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
533     if (prtb & (size - 1)) {
534         /* Process Table not properly aligned */
535         if (guest_visible) {
536             ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
537         }
538         return 1;
539     }
540 
541     /* Index Process Table by PID to Find Corresponding Process Table Entry */
542     offset = pid * sizeof(struct prtb_entry);
543     if (offset >= size) {
544         /* offset exceeds size of the process table */
545         if (guest_visible) {
546             ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
547         }
548         return 1;
549     }
550     prtbe_addr = prtb + offset;
551 
552     if (vhyp_flat_addressing(cpu)) {
553         prtbe0 = ldq_phys(cs->as, prtbe_addr);
554     } else {
555         /*
556          * Process table addresses are subject to partition-scoped
557          * translation
558          *
559          * On a Radix host, the partition-scoped page table for LPID=0
560          * is only used to translate the effective addresses of the
561          * process table entries.
562          */
563         /* mmu_idx is 5 because we're translating from hypervisor scope */
564         ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
565                                                  prtbe_addr, pate, &h_raddr,
566                                                  &h_prot, &h_page_size, true,
567                                                  5, lpid, guest_visible);
568         if (ret) {
569             return ret;
570         }
571         prtbe0 = ldq_phys(cs->as, h_raddr);
572     }
573 
574     /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
575     *g_page_size = PRTBE_R_GET_RTS(prtbe0);
576     base_addr = prtbe0 & PRTBE_R_RPDB;
577     nls = prtbe0 & PRTBE_R_RPDS;
578     if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
579         /*
580          * Can treat process table addresses as real addresses
581          */
582         ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
583                                     nls, g_raddr, g_page_size, &pte,
584                                     &fault_cause, &pte_addr);
585         if (ret) {
586             /* No valid PTE */
587             if (guest_visible) {
588                 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
589             }
590             return ret;
591         }
592     } else {
593         uint64_t rpn, mask;
594         int level = 0;
595 
596         index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
597         index &= ((1UL << nls) - 1);                            /* Mask */
598         pte_addr = base_addr + (index * sizeof(pte));
599 
600         /*
601          * Each process table address is subject to a partition-scoped
602          * translation
603          */
604         do {
605             /* mmu_idx is 5 because we're translating from hypervisor scope */
606             ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
607                                                      pte_addr, pate, &h_raddr,
608                                                      &h_prot, &h_page_size,
609                                                      true, 5, lpid,
610                                                      guest_visible);
611             if (ret) {
612                 return ret;
613             }
614 
615             if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
616                 fault_cause |= DSISR_R_BADCONFIG;
617                 ret = 1;
618             } else {
619                 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK,
620                                              &h_raddr, &nls, g_page_size,
621                                              &pte, &fault_cause);
622             }
623 
624             if (ret) {
625                 /* No valid pte */
626                 if (guest_visible) {
627                     ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
628                 }
629                 return ret;
630             }
631             pte_addr = h_raddr;
632         } while (!(pte & R_PTE_LEAF));
633 
634         rpn = pte & R_PTE_RPN;
635         mask = (1UL << *g_page_size) - 1;
636 
637         /* Or high bits of rpn and low bits to ea to form whole real addr */
638         *g_raddr = (rpn & ~mask) | (eaddr & mask);
639     }
640 
641     if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
642                                g_prot, mmu_idx, false)) {
643         /* Access denied due to protection */
644         if (guest_visible) {
645             ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
646         }
647         return 1;
648     }
649 
650     if (guest_visible) {
651         /* R/C bits not appropriately set for access */
652         if (ppc_radix64_check_rc(access_type, pte)) {
653             ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
654             return 1;
655         }
656     }
657 
658     return 0;
659 }
660 
661 /*
662  * Radix tree translation is a 2 steps translation process:
663  *
664  * 1. Process-scoped translation:   Guest Eff Addr  -> Guest Real Addr
665  * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
666  *
667  *                                  MSR[HV]
668  *              +-------------+----------------+---------------+
669  *              |             |     HV = 0     |     HV = 1    |
670  *              +-------------+----------------+---------------+
671  *              | Relocation  |    Partition   |      No       |
672  *              | = Off       |     Scoped     |  Translation  |
673  *  Relocation  +-------------+----------------+---------------+
674  *              | Relocation  |   Partition &  |    Process    |
675  *              | = On        | Process Scoped |    Scoped     |
676  *              +-------------+----------------+---------------+
677  */
678 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
679                                    MMUAccessType access_type, hwaddr *raddr,
680                                    int *psizep, int *protp, int mmu_idx,
681                                    bool guest_visible)
682 {
683     CPUPPCState *env = &cpu->env;
684     uint64_t lpid, pid;
685     ppc_v3_pate_t pate;
686     int psize, prot;
687     hwaddr g_raddr;
688     bool relocation;
689 
690     assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
691 
692     relocation = !mmuidx_real(mmu_idx);
693 
694     /* HV or virtual hypervisor Real Mode Access */
695     if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
696         /* In real mode top 4 effective addr bits (mostly) ignored */
697         *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
698 
699         /* In HV mode, add HRMOR if top EA bit is clear */
700         if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
701             if (!(eaddr >> 63)) {
702                 *raddr |= env->spr[SPR_HRMOR];
703            }
704         }
705         *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
706         *psizep = TARGET_PAGE_BITS;
707         return true;
708     }
709 
710     /*
711      * Check UPRT (we avoid the check in real mode to deal with
712      * transitional states during kexec.
713      */
714     if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
715         qemu_log_mask(LOG_GUEST_ERROR,
716                       "LPCR:UPRT not set in radix mode ! LPCR="
717                       TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
718     }
719 
720     /* Virtual Mode Access - get the fully qualified address */
721     if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
722         if (guest_visible) {
723             ppc_radix64_raise_segi(cpu, access_type, eaddr);
724         }
725         return false;
726     }
727 
728     /* Get Partition Table */
729     if (cpu->vhyp) {
730         if (!cpu->vhyp_class->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
731             if (guest_visible) {
732                 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
733                                       DSISR_R_BADCONFIG);
734             }
735             return false;
736         }
737     } else {
738         if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
739             if (guest_visible) {
740                 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
741                                       DSISR_R_BADCONFIG);
742             }
743             return false;
744         }
745         if (!validate_pate(cpu, lpid, &pate)) {
746             if (guest_visible) {
747                 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
748                                       DSISR_R_BADCONFIG);
749             }
750             return false;
751         }
752     }
753 
754     *psizep = INT_MAX;
755     *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
756 
757     /*
758      * Perform process-scoped translation if relocation enabled.
759      *
760      * - Translates an effective address to a host real address in
761      *   quadrants 0 and 3 when HV=1.
762      *
763      * - Translates an effective address to a guest real address.
764      */
765     if (relocation) {
766         int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
767                                                    pate, &g_raddr, &prot,
768                                                    &psize, mmu_idx, lpid,
769                                                    guest_visible);
770         if (ret) {
771             return false;
772         }
773         *psizep = MIN(*psizep, psize);
774         *protp &= prot;
775     } else {
776         g_raddr = eaddr & R_EADDR_MASK;
777     }
778 
779     if (vhyp_flat_addressing(cpu)) {
780         *raddr = g_raddr;
781     } else {
782         /*
783          * Perform partition-scoped translation if !HV or HV access to
784          * quadrants 1 or 2. Translates a guest real address to a host
785          * real address.
786          */
787         if (lpid || !mmuidx_hv(mmu_idx)) {
788             int ret;
789 
790             ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
791                                                      g_raddr, pate, raddr,
792                                                      &prot, &psize, false,
793                                                      mmu_idx, lpid,
794                                                      guest_visible);
795             if (ret) {
796                 return false;
797             }
798             *psizep = MIN(*psizep, psize);
799             *protp &= prot;
800         } else {
801             *raddr = g_raddr;
802         }
803     }
804 
805     return true;
806 }
807 
808 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
809                        hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
810                        bool guest_visible)
811 {
812     bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
813                                       psizep, protp, mmu_idx, guest_visible);
814 
815     qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
816                   " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
817                   __func__, access_str(access_type),
818                   eaddr, mmu_idx,
819                   *protp & PAGE_READ ? 'r' : '-',
820                   *protp & PAGE_WRITE ? 'w' : '-',
821                   *protp & PAGE_EXEC ? 'x' : '-',
822                   *raddrp);
823 
824     return ret;
825 }
826