xref: /openbmc/qemu/target/ppc/mmu-radix64.c (revision 0c4e9931)
1 /*
2  *  PowerPC Radix MMU mulation helpers for QEMU.
3  *
4  *  Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/kvm.h"
26 #include "kvm_ppc.h"
27 #include "exec/log.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
30 
31 static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr,
32                                                  uint64_t *lpid, uint64_t *pid)
33 {
34     if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
35         switch (eaddr & R_EADDR_QUADRANT) {
36         case R_EADDR_QUADRANT0:
37             *lpid = 0;
38             *pid = env->spr[SPR_BOOKS_PID];
39             break;
40         case R_EADDR_QUADRANT1:
41             *lpid = env->spr[SPR_LPIDR];
42             *pid = env->spr[SPR_BOOKS_PID];
43             break;
44         case R_EADDR_QUADRANT2:
45             *lpid = env->spr[SPR_LPIDR];
46             *pid = 0;
47             break;
48         case R_EADDR_QUADRANT3:
49             *lpid = 0;
50             *pid = 0;
51             break;
52         }
53     } else {  /* !MSR[HV] -> Guest */
54         switch (eaddr & R_EADDR_QUADRANT) {
55         case R_EADDR_QUADRANT0: /* Guest application */
56             *lpid = env->spr[SPR_LPIDR];
57             *pid = env->spr[SPR_BOOKS_PID];
58             break;
59         case R_EADDR_QUADRANT1: /* Illegal */
60         case R_EADDR_QUADRANT2:
61             return false;
62         case R_EADDR_QUADRANT3: /* Guest OS */
63             *lpid = env->spr[SPR_LPIDR];
64             *pid = 0; /* pid set to 0 -> addresses guest operating system */
65             break;
66         }
67     }
68 
69     return true;
70 }
71 
72 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
73 {
74     CPUState *cs = CPU(cpu);
75     CPUPPCState *env = &cpu->env;
76 
77     if (rwx == 2) { /* Instruction Segment Interrupt */
78         cs->exception_index = POWERPC_EXCP_ISEG;
79     } else { /* Data Segment Interrupt */
80         cs->exception_index = POWERPC_EXCP_DSEG;
81         env->spr[SPR_DAR] = eaddr;
82     }
83     env->error_code = 0;
84 }
85 
86 static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
87                                 uint32_t cause)
88 {
89     CPUState *cs = CPU(cpu);
90     CPUPPCState *env = &cpu->env;
91 
92     if (rwx == 2) { /* Instruction Storage Interrupt */
93         cs->exception_index = POWERPC_EXCP_ISI;
94         env->error_code = cause;
95     } else { /* Data Storage Interrupt */
96         cs->exception_index = POWERPC_EXCP_DSI;
97         if (rwx == 1) { /* Write -> Store */
98             cause |= DSISR_ISSTORE;
99         }
100         env->spr[SPR_DSISR] = cause;
101         env->spr[SPR_DAR] = eaddr;
102         env->error_code = 0;
103     }
104 }
105 
106 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
107                                   hwaddr g_raddr, uint32_t cause)
108 {
109     CPUState *cs = CPU(cpu);
110     CPUPPCState *env = &cpu->env;
111 
112     if (rwx == 2) { /* H Instruction Storage Interrupt */
113         cs->exception_index = POWERPC_EXCP_HISI;
114         env->spr[SPR_ASDR] = g_raddr;
115         env->error_code = cause;
116     } else { /* H Data Storage Interrupt */
117         cs->exception_index = POWERPC_EXCP_HDSI;
118         if (rwx == 1) { /* Write -> Store */
119             cause |= DSISR_ISSTORE;
120         }
121         env->spr[SPR_HDSISR] = cause;
122         env->spr[SPR_HDAR] = eaddr;
123         env->spr[SPR_ASDR] = g_raddr;
124         env->error_code = 0;
125     }
126 }
127 
128 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
129                                    int *fault_cause, int *prot,
130                                    bool partition_scoped)
131 {
132     CPUPPCState *env = &cpu->env;
133     const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC };
134 
135     /* Check Page Attributes (pte58:59) */
136     if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) {
137         /*
138          * Radix PTE entries with the non-idempotent I/O attribute are treated
139          * as guarded storage
140          */
141         *fault_cause |= SRR1_NOEXEC_GUARD;
142         return true;
143     }
144 
145     /* Determine permissions allowed by Encoded Access Authority */
146     if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
147         *prot = 0;
148     } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) {
149         *prot = ppc_radix64_get_prot_eaa(pte);
150     } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
151         *prot = ppc_radix64_get_prot_eaa(pte);
152         *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
153     }
154 
155     /* Check if requested access type is allowed */
156     if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */
157         *fault_cause |= DSISR_PROTFAULT;
158         return true;
159     }
160 
161     return false;
162 }
163 
164 static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
165                                hwaddr pte_addr, int *prot)
166 {
167     CPUState *cs = CPU(cpu);
168     uint64_t npte;
169 
170     npte = pte | R_PTE_R; /* Always set reference bit */
171 
172     if (rwx == 1) { /* Store/Write */
173         npte |= R_PTE_C; /* Set change bit */
174     } else {
175         /*
176          * Treat the page as read-only for now, so that a later write
177          * will pass through this function again to set the C bit.
178          */
179         *prot &= ~PAGE_WRITE;
180     }
181 
182     if (pte ^ npte) { /* If pte has changed then write it back */
183         stq_phys(cs->as, pte_addr, npte);
184     }
185 }
186 
187 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
188                                   uint64_t *pte_addr, uint64_t *nls,
189                                   int *psize, uint64_t *pte, int *fault_cause)
190 {
191     uint64_t index, pde;
192 
193     if (*nls < 5) { /* Directory maps less than 2**5 entries */
194         *fault_cause |= DSISR_R_BADCONFIG;
195         return 1;
196     }
197 
198     /* Read page <directory/table> entry from guest address space */
199     pde = ldq_phys(as, *pte_addr);
200     if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
201         *fault_cause |= DSISR_NOPTE;
202         return 1;
203     }
204 
205     *pte = pde;
206     *psize -= *nls;
207     if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
208         *nls = pde & R_PDE_NLS;
209         index = eaddr >> (*psize - *nls);       /* Shift */
210         index &= ((1UL << *nls) - 1);           /* Mask */
211         *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
212     }
213     return 0;
214 }
215 
216 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
217                                  uint64_t base_addr, uint64_t nls,
218                                  hwaddr *raddr, int *psize, uint64_t *pte,
219                                  int *fault_cause, hwaddr *pte_addr)
220 {
221     uint64_t index, pde, rpn , mask;
222 
223     if (nls < 5) { /* Directory maps less than 2**5 entries */
224         *fault_cause |= DSISR_R_BADCONFIG;
225         return 1;
226     }
227 
228     index = eaddr >> (*psize - nls);    /* Shift */
229     index &= ((1UL << nls) - 1);       /* Mask */
230     *pte_addr = base_addr + (index * sizeof(pde));
231     do {
232         int ret;
233 
234         ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
235                                      fault_cause);
236         if (ret) {
237             return ret;
238         }
239     } while (!(pde & R_PTE_LEAF));
240 
241     *pte = pde;
242     rpn = pde & R_PTE_RPN;
243     mask = (1UL << *psize) - 1;
244 
245     /* Or high bits of rpn and low bits to ea to form whole real addr */
246     *raddr = (rpn & ~mask) | (eaddr & mask);
247     return 0;
248 }
249 
250 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
251 {
252     CPUPPCState *env = &cpu->env;
253 
254     if (!(pate->dw0 & PATE0_HR)) {
255         return false;
256     }
257     if (lpid == 0 && !msr_hv) {
258         return false;
259     }
260     if ((pate->dw0 & PATE1_R_PRTS) < 5) {
261         return false;
262     }
263     /* More checks ... */
264     return true;
265 }
266 
267 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx,
268                                               vaddr eaddr, hwaddr g_raddr,
269                                               ppc_v3_pate_t pate,
270                                               hwaddr *h_raddr, int *h_prot,
271                                               int *h_page_size, bool pde_addr,
272                                               bool cause_excp)
273 {
274     int fault_cause = 0;
275     hwaddr pte_addr;
276     uint64_t pte;
277 
278     *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
279     /* No valid pte or access denied due to protection */
280     if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
281                               pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
282                               &pte, &fault_cause, &pte_addr) ||
283         ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) {
284         if (pde_addr) /* address being translated was that of a guest pde */
285             fault_cause |= DSISR_PRTABLE_FAULT;
286         if (cause_excp) {
287             ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause);
288         }
289         return 1;
290     }
291 
292     /* Update Reference and Change Bits */
293     ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot);
294 
295     return 0;
296 }
297 
298 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
299                                             vaddr eaddr, uint64_t pid,
300                                             ppc_v3_pate_t pate, hwaddr *g_raddr,
301                                             int *g_prot, int *g_page_size,
302                                             bool cause_excp)
303 {
304     CPUState *cs = CPU(cpu);
305     CPUPPCState *env = &cpu->env;
306     uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
307     int fault_cause = 0, h_page_size, h_prot;
308     hwaddr h_raddr, pte_addr;
309     int ret;
310 
311     /* Index Process Table by PID to Find Corresponding Process Table Entry */
312     offset = pid * sizeof(struct prtb_entry);
313     size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
314     if (offset >= size) {
315         /* offset exceeds size of the process table */
316         if (cause_excp) {
317             ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
318         }
319         return 1;
320     }
321     prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
322 
323     if (cpu->vhyp) {
324         prtbe0 = ldq_phys(cs->as, prtbe_addr);
325     } else {
326         /*
327          * Process table addresses are subject to partition-scoped
328          * translation
329          *
330          * On a Radix host, the partition-scoped page table for LPID=0
331          * is only used to translate the effective addresses of the
332          * process table entries.
333          */
334         ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
335                                                  pate, &h_raddr, &h_prot,
336                                                  &h_page_size, 1, 1);
337         if (ret) {
338             return ret;
339         }
340         prtbe0 = ldq_phys(cs->as, h_raddr);
341     }
342 
343     /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
344     *g_page_size = PRTBE_R_GET_RTS(prtbe0);
345     base_addr = prtbe0 & PRTBE_R_RPDB;
346     nls = prtbe0 & PRTBE_R_RPDS;
347     if (msr_hv || cpu->vhyp) {
348         /*
349          * Can treat process table addresses as real addresses
350          */
351         ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
352                                     nls, g_raddr, g_page_size, &pte,
353                                     &fault_cause, &pte_addr);
354         if (ret) {
355             /* No valid PTE */
356             if (cause_excp) {
357                 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
358             }
359             return ret;
360         }
361     } else {
362         uint64_t rpn, mask;
363 
364         index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
365         index &= ((1UL << nls) - 1);                            /* Mask */
366         pte_addr = base_addr + (index * sizeof(pte));
367 
368         /*
369          * Each process table address is subject to a partition-scoped
370          * translation
371          */
372         do {
373             ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
374                                                      pate, &h_raddr, &h_prot,
375                                                      &h_page_size, 1, 1);
376             if (ret) {
377                 return ret;
378             }
379 
380             ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
381                                          &nls, g_page_size, &pte, &fault_cause);
382             if (ret) {
383                 /* No valid pte */
384                 if (cause_excp) {
385                     ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
386                 }
387                 return ret;
388             }
389             pte_addr = h_raddr;
390         } while (!(pte & R_PTE_LEAF));
391 
392         rpn = pte & R_PTE_RPN;
393         mask = (1UL << *g_page_size) - 1;
394 
395         /* Or high bits of rpn and low bits to ea to form whole real addr */
396         *g_raddr = (rpn & ~mask) | (eaddr & mask);
397     }
398 
399     if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) {
400         /* Access denied due to protection */
401         if (cause_excp) {
402             ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
403         }
404         return 1;
405     }
406 
407     ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
408 
409     return 0;
410 }
411 
412 /*
413  * Radix tree translation is a 2 steps translation process:
414  *
415  * 1. Process-scoped translation:   Guest Eff Addr  -> Guest Real Addr
416  * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
417  *
418  *                                  MSR[HV]
419  *              +-------------+----------------+---------------+
420  *              |             |     HV = 0     |     HV = 1    |
421  *              +-------------+----------------+---------------+
422  *              | Relocation  |    Partition   |      No       |
423  *              | = Off       |     Scoped     |  Translation  |
424  *  Relocation  +-------------+----------------+---------------+
425  *              | Relocation  |   Partition &  |    Process    |
426  *              | = On        | Process Scoped |    Scoped     |
427  *              +-------------+----------------+---------------+
428  */
429 static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
430                              bool relocation,
431                              hwaddr *raddr, int *psizep, int *protp,
432                              bool cause_excp)
433 {
434     CPUPPCState *env = &cpu->env;
435     uint64_t lpid = 0, pid = 0;
436     ppc_v3_pate_t pate;
437     int psize, prot;
438     hwaddr g_raddr;
439 
440     /* Virtual Mode Access - get the fully qualified address */
441     if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
442         if (cause_excp) {
443             ppc_radix64_raise_segi(cpu, rwx, eaddr);
444         }
445         return 1;
446     }
447 
448     /* Get Process Table */
449     if (cpu->vhyp) {
450         PPCVirtualHypervisorClass *vhc;
451         vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
452         vhc->get_pate(cpu->vhyp, &pate);
453     } else {
454         if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
455             if (cause_excp) {
456                 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
457             }
458             return 1;
459         }
460         if (!validate_pate(cpu, lpid, &pate)) {
461             if (cause_excp) {
462                 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
463             }
464             return 1;
465         }
466     }
467 
468     *psizep = INT_MAX;
469     *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
470 
471     /*
472      * Perform process-scoped translation if relocation enabled.
473      *
474      * - Translates an effective address to a host real address in
475      *   quadrants 0 and 3 when HV=1.
476      *
477      * - Translates an effective address to a guest real address.
478      */
479     if (relocation) {
480         int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
481                                                    pate, &g_raddr, &prot,
482                                                    &psize, cause_excp);
483         if (ret) {
484             return ret;
485         }
486         *psizep = MIN(*psizep, psize);
487         *protp &= prot;
488     } else {
489         g_raddr = eaddr & R_EADDR_MASK;
490     }
491 
492     if (cpu->vhyp) {
493         *raddr = g_raddr;
494     } else {
495         /*
496          * Perform partition-scoped translation if !HV or HV access to
497          * quadrants 1 or 2. Translates a guest real address to a host
498          * real address.
499          */
500         if (lpid || !msr_hv) {
501             int ret;
502 
503             ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr,
504                                                      pate, raddr, &prot, &psize,
505                                                      0, cause_excp);
506             if (ret) {
507                 return ret;
508             }
509             *psizep = MIN(*psizep, psize);
510             *protp &= prot;
511         } else {
512             *raddr = g_raddr;
513         }
514     }
515 
516     return 0;
517 }
518 
519 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
520                                  int mmu_idx)
521 {
522     CPUState *cs = CPU(cpu);
523     CPUPPCState *env = &cpu->env;
524     int page_size, prot;
525     bool relocation;
526     hwaddr raddr;
527 
528     assert(!(msr_hv && cpu->vhyp));
529     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
530 
531     relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
532     /* HV or virtual hypervisor Real Mode Access */
533     if (!relocation && (msr_hv || cpu->vhyp)) {
534         /* In real mode top 4 effective addr bits (mostly) ignored */
535         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
536 
537         /* In HV mode, add HRMOR if top EA bit is clear */
538         if (msr_hv || !env->has_hv_mode) {
539             if (!(eaddr >> 63)) {
540                 raddr |= env->spr[SPR_HRMOR];
541            }
542         }
543         tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
544                      PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
545                      TARGET_PAGE_SIZE);
546         return 0;
547     }
548 
549     /*
550      * Check UPRT (we avoid the check in real mode to deal with
551      * transitional states during kexec.
552      */
553     if (!ppc64_use_proc_tbl(cpu)) {
554         qemu_log_mask(LOG_GUEST_ERROR,
555                       "LPCR:UPRT not set in radix mode ! LPCR="
556                       TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
557     }
558 
559     /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
560     if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
561                           &page_size, &prot, true)) {
562         return 1;
563     }
564 
565     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
566                  prot, mmu_idx, 1UL << page_size);
567     return 0;
568 }
569 
570 hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
571 {
572     CPUPPCState *env = &cpu->env;
573     int psize, prot;
574     hwaddr raddr;
575 
576     /* Handle Real Mode */
577     if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
578         /* In real mode top 4 effective addr bits (mostly) ignored */
579         return eaddr & 0x0FFFFFFFFFFFFFFFULL;
580     }
581 
582     if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
583                           &prot, false)) {
584         return -1;
585     }
586 
587     return raddr & TARGET_PAGE_MASK;
588 }
589