xref: /openbmc/qemu/target/ppc/mmu-hash64.c (revision ac12b601)
1 /*
2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (c) 2013 David Gibson, IBM Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/error-report.h"
26 #include "qemu/qemu-print.h"
27 #include "sysemu/hw_accel.h"
28 #include "kvm_ppc.h"
29 #include "mmu-hash64.h"
30 #include "exec/log.h"
31 #include "hw/hw.h"
32 #include "mmu-book3s-v3.h"
33 #include "helper_regs.h"
34 
35 /* #define DEBUG_SLB */
36 
37 #ifdef DEBUG_SLB
38 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
39 #else
40 #  define LOG_SLB(...) do { } while (0)
41 #endif
42 
43 /*
44  * SLB handling
45  */
46 
47 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
48 {
49     CPUPPCState *env = &cpu->env;
50     uint64_t esid_256M, esid_1T;
51     int n;
52 
53     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
54 
55     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
56     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
57 
58     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
59         ppc_slb_t *slb = &env->slb[n];
60 
61         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
62                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
63         /*
64          * We check for 1T matches on all MMUs here - if the MMU
65          * doesn't have 1T segment support, we will have prevented 1T
66          * entries from being inserted in the slbmte code.
67          */
68         if (((slb->esid == esid_256M) &&
69              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
70             || ((slb->esid == esid_1T) &&
71                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
72             return slb;
73         }
74     }
75 
76     return NULL;
77 }
78 
79 void dump_slb(PowerPCCPU *cpu)
80 {
81     CPUPPCState *env = &cpu->env;
82     int i;
83     uint64_t slbe, slbv;
84 
85     cpu_synchronize_state(CPU(cpu));
86 
87     qemu_printf("SLB\tESID\t\t\tVSID\n");
88     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
89         slbe = env->slb[i].esid;
90         slbv = env->slb[i].vsid;
91         if (slbe == 0 && slbv == 0) {
92             continue;
93         }
94         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
95                     i, slbe, slbv);
96     }
97 }
98 
99 void helper_slbia(CPUPPCState *env, uint32_t ih)
100 {
101     PowerPCCPU *cpu = env_archcpu(env);
102     int starting_entry;
103     int n;
104 
105     /*
106      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
107      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
108      * can overwrite a valid SLB without flushing its lookaside information.
109      *
110      * It would be possible to keep the TLB in synch with the SLB by flushing
111      * when a valid entry is overwritten by slbmte, and therefore slbia would
112      * not have to flush unless it evicts a valid SLB entry. However it is
113      * expected that slbmte is more common than slbia, and slbia is usually
114      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
115      * good one.
116      *
117      * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
118      * the same SLB entries (everything but entry 0), but differ in what
119      * "lookaside information" is invalidated. TCG can ignore this and flush
120      * everything.
121      *
122      * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
123      * invalidated.
124      */
125 
126     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
127 
128     starting_entry = 1; /* default for IH=0,1,2,6 */
129 
130     if (env->mmu_model == POWERPC_MMU_3_00) {
131         switch (ih) {
132         case 0x7:
133             /* invalidate no SLBs, but all lookaside information */
134             return;
135 
136         case 0x3:
137         case 0x4:
138             /* also considers SLB entry 0 */
139             starting_entry = 0;
140             break;
141 
142         case 0x5:
143             /* treat undefined values as ih==0, and warn */
144             qemu_log_mask(LOG_GUEST_ERROR,
145                           "slbia undefined IH field %u.\n", ih);
146             break;
147 
148         default:
149             /* 0,1,2,6 */
150             break;
151         }
152     }
153 
154     for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
155         ppc_slb_t *slb = &env->slb[n];
156 
157         if (!(slb->esid & SLB_ESID_V)) {
158             continue;
159         }
160         if (env->mmu_model == POWERPC_MMU_3_00) {
161             if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
162                 /* preserves entries with a class value of 0 */
163                 continue;
164             }
165         }
166 
167         slb->esid &= ~SLB_ESID_V;
168     }
169 }
170 
171 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
172                            target_ulong global)
173 {
174     PowerPCCPU *cpu = env_archcpu(env);
175     ppc_slb_t *slb;
176 
177     slb = slb_lookup(cpu, addr);
178     if (!slb) {
179         return;
180     }
181 
182     if (slb->esid & SLB_ESID_V) {
183         slb->esid &= ~SLB_ESID_V;
184 
185         /*
186          * XXX: given the fact that segment size is 256 MB or 1TB,
187          *      and we still don't have a tlb_flush_mask(env, n, mask)
188          *      in QEMU, we just invalidate all TLBs
189          */
190         env->tlb_need_flush |=
191             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
192     }
193 }
194 
195 void helper_slbie(CPUPPCState *env, target_ulong addr)
196 {
197     __helper_slbie(env, addr, false);
198 }
199 
200 void helper_slbieg(CPUPPCState *env, target_ulong addr)
201 {
202     __helper_slbie(env, addr, true);
203 }
204 
205 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
206                   target_ulong esid, target_ulong vsid)
207 {
208     CPUPPCState *env = &cpu->env;
209     ppc_slb_t *slb = &env->slb[slot];
210     const PPCHash64SegmentPageSizes *sps = NULL;
211     int i;
212 
213     if (slot >= cpu->hash64_opts->slb_size) {
214         return -1; /* Bad slot number */
215     }
216     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
217         return -1; /* Reserved bits set */
218     }
219     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
220         return -1; /* Bad segment size */
221     }
222     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
223         return -1; /* 1T segment on MMU that doesn't support it */
224     }
225 
226     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
227         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
228 
229         if (!sps1->page_shift) {
230             break;
231         }
232 
233         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
234             sps = sps1;
235             break;
236         }
237     }
238 
239     if (!sps) {
240         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
241                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
242                      slot, esid, vsid);
243         return -1;
244     }
245 
246     slb->esid = esid;
247     slb->vsid = vsid;
248     slb->sps = sps;
249 
250     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
251             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
252             slb->esid, slb->vsid);
253 
254     return 0;
255 }
256 
257 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
258                              target_ulong *rt)
259 {
260     CPUPPCState *env = &cpu->env;
261     int slot = rb & 0xfff;
262     ppc_slb_t *slb = &env->slb[slot];
263 
264     if (slot >= cpu->hash64_opts->slb_size) {
265         return -1;
266     }
267 
268     *rt = slb->esid;
269     return 0;
270 }
271 
272 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
273                              target_ulong *rt)
274 {
275     CPUPPCState *env = &cpu->env;
276     int slot = rb & 0xfff;
277     ppc_slb_t *slb = &env->slb[slot];
278 
279     if (slot >= cpu->hash64_opts->slb_size) {
280         return -1;
281     }
282 
283     *rt = slb->vsid;
284     return 0;
285 }
286 
287 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
288                              target_ulong *rt)
289 {
290     CPUPPCState *env = &cpu->env;
291     ppc_slb_t *slb;
292 
293     if (!msr_is_64bit(env, env->msr)) {
294         rb &= 0xffffffff;
295     }
296     slb = slb_lookup(cpu, rb);
297     if (slb == NULL) {
298         *rt = (target_ulong)-1ul;
299     } else {
300         *rt = slb->vsid;
301     }
302     return 0;
303 }
304 
305 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
306 {
307     PowerPCCPU *cpu = env_archcpu(env);
308 
309     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
310         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
311                                POWERPC_EXCP_INVAL, GETPC());
312     }
313 }
314 
315 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
316 {
317     PowerPCCPU *cpu = env_archcpu(env);
318     target_ulong rt = 0;
319 
320     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
321         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
322                                POWERPC_EXCP_INVAL, GETPC());
323     }
324     return rt;
325 }
326 
327 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
328 {
329     PowerPCCPU *cpu = env_archcpu(env);
330     target_ulong rt = 0;
331 
332     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
333         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
334                                POWERPC_EXCP_INVAL, GETPC());
335     }
336     return rt;
337 }
338 
339 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
340 {
341     PowerPCCPU *cpu = env_archcpu(env);
342     target_ulong rt = 0;
343 
344     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
345         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
346                                POWERPC_EXCP_INVAL, GETPC());
347     }
348     return rt;
349 }
350 
351 /* Check No-Execute or Guarded Storage */
352 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
353                                               ppc_hash_pte64_t pte)
354 {
355     /* Exec permissions CANNOT take away read or write permissions */
356     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
357             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
358 }
359 
360 /* Check Basic Storage Protection */
361 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
362                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
363 {
364     CPUPPCState *env = &cpu->env;
365     unsigned pp, key;
366     /*
367      * Some pp bit combinations have undefined behaviour, so default
368      * to no access in those cases
369      */
370     int prot = 0;
371 
372     key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
373              : (slb->vsid & SLB_VSID_KS));
374     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
375 
376     if (key == 0) {
377         switch (pp) {
378         case 0x0:
379         case 0x1:
380         case 0x2:
381             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
382             break;
383 
384         case 0x3:
385         case 0x6:
386             prot = PAGE_READ | PAGE_EXEC;
387             break;
388         }
389     } else {
390         switch (pp) {
391         case 0x0:
392         case 0x6:
393             break;
394 
395         case 0x1:
396         case 0x3:
397             prot = PAGE_READ | PAGE_EXEC;
398             break;
399 
400         case 0x2:
401             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
402             break;
403         }
404     }
405 
406     return prot;
407 }
408 
409 /* Check the instruction access permissions specified in the IAMR */
410 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
411 {
412     CPUPPCState *env = &cpu->env;
413     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
414 
415     /*
416      * An instruction fetch is permitted if the IAMR bit is 0.
417      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
418      * can only take away EXEC permissions not READ or WRITE permissions.
419      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
420      * EXEC permissions are allowed.
421      */
422     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
423                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
424 }
425 
426 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
427 {
428     CPUPPCState *env = &cpu->env;
429     int key, amrbits;
430     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
431 
432     /* Only recent MMUs implement Virtual Page Class Key Protection */
433     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
434         return prot;
435     }
436 
437     key = HPTE64_R_KEY(pte.pte1);
438     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
439 
440     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
441     /*         env->spr[SPR_AMR]); */
442 
443     /*
444      * A store is permitted if the AMR bit is 0. Remove write
445      * protection if it is set.
446      */
447     if (amrbits & 0x2) {
448         prot &= ~PAGE_WRITE;
449     }
450     /*
451      * A load is permitted if the AMR bit is 0. Remove read
452      * protection if it is set.
453      */
454     if (amrbits & 0x1) {
455         prot &= ~PAGE_READ;
456     }
457 
458     switch (env->mmu_model) {
459     /*
460      * MMU version 2.07 and later support IAMR
461      * Check if the IAMR allows the instruction access - it will return
462      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
463      * if it does (and prot will be unchanged indicating execution support).
464      */
465     case POWERPC_MMU_2_07:
466     case POWERPC_MMU_3_00:
467         prot &= ppc_hash64_iamr_prot(cpu, key);
468         break;
469     default:
470         break;
471     }
472 
473     return prot;
474 }
475 
476 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
477                                              hwaddr ptex, int n)
478 {
479     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
480     hwaddr base;
481     hwaddr plen = n * HASH_PTE_SIZE_64;
482     const ppc_hash_pte64_t *hptes;
483 
484     if (cpu->vhyp) {
485         PPCVirtualHypervisorClass *vhc =
486             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
487         return vhc->map_hptes(cpu->vhyp, ptex, n);
488     }
489     base = ppc_hash64_hpt_base(cpu);
490 
491     if (!base) {
492         return NULL;
493     }
494 
495     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
496                               MEMTXATTRS_UNSPECIFIED);
497     if (plen < (n * HASH_PTE_SIZE_64)) {
498         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
499     }
500     return hptes;
501 }
502 
503 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
504                             hwaddr ptex, int n)
505 {
506     if (cpu->vhyp) {
507         PPCVirtualHypervisorClass *vhc =
508             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
509         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
510         return;
511     }
512 
513     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
514                         false, n * HASH_PTE_SIZE_64);
515 }
516 
517 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
518                                 uint64_t pte0, uint64_t pte1)
519 {
520     int i;
521 
522     if (!(pte0 & HPTE64_V_LARGE)) {
523         if (sps->page_shift != 12) {
524             /* 4kiB page in a non 4kiB segment */
525             return 0;
526         }
527         /* Normal 4kiB page */
528         return 12;
529     }
530 
531     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
532         const PPCHash64PageSize *ps = &sps->enc[i];
533         uint64_t mask;
534 
535         if (!ps->page_shift) {
536             break;
537         }
538 
539         if (ps->page_shift == 12) {
540             /* L bit is set so this can't be a 4kiB page */
541             continue;
542         }
543 
544         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
545 
546         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
547             return ps->page_shift;
548         }
549     }
550 
551     return 0; /* Bad page size encoding */
552 }
553 
554 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
555 {
556     /* Insert B into pte0 */
557     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
558             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
559              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
560 
561     /* Remove B from pte1 */
562     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
563 }
564 
565 
566 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
567                                      const PPCHash64SegmentPageSizes *sps,
568                                      target_ulong ptem,
569                                      ppc_hash_pte64_t *pte, unsigned *pshift)
570 {
571     int i;
572     const ppc_hash_pte64_t *pteg;
573     target_ulong pte0, pte1;
574     target_ulong ptex;
575 
576     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
577     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
578     if (!pteg) {
579         return -1;
580     }
581     for (i = 0; i < HPTES_PER_GROUP; i++) {
582         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
583         /*
584          * pte0 contains the valid bit and must be read before pte1,
585          * otherwise we might see an old pte1 with a new valid bit and
586          * thus an inconsistent hpte value
587          */
588         smp_rmb();
589         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
590 
591         /* Convert format if necessary */
592         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
593             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
594         }
595 
596         /* This compares V, B, H (secondary) and the AVPN */
597         if (HPTE64_V_COMPARE(pte0, ptem)) {
598             *pshift = hpte_page_shift(sps, pte0, pte1);
599             /*
600              * If there is no match, ignore the PTE, it could simply
601              * be for a different segment size encoding and the
602              * architecture specifies we should not match. Linux will
603              * potentially leave behind PTEs for the wrong base page
604              * size when demoting segments.
605              */
606             if (*pshift == 0) {
607                 continue;
608             }
609             /*
610              * We don't do anything with pshift yet as qemu TLB only
611              * deals with 4K pages anyway
612              */
613             pte->pte0 = pte0;
614             pte->pte1 = pte1;
615             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
616             return ptex + i;
617         }
618     }
619     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
620     /*
621      * We didn't find a valid entry.
622      */
623     return -1;
624 }
625 
626 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
627                                      ppc_slb_t *slb, target_ulong eaddr,
628                                      ppc_hash_pte64_t *pte, unsigned *pshift)
629 {
630     CPUPPCState *env = &cpu->env;
631     hwaddr hash, ptex;
632     uint64_t vsid, epnmask, epn, ptem;
633     const PPCHash64SegmentPageSizes *sps = slb->sps;
634 
635     /*
636      * The SLB store path should prevent any bad page size encodings
637      * getting in there, so:
638      */
639     assert(sps);
640 
641     /* If ISL is set in LPCR we need to clamp the page size to 4K */
642     if (env->spr[SPR_LPCR] & LPCR_ISL) {
643         /* We assume that when using TCG, 4k is first entry of SPS */
644         sps = &cpu->hash64_opts->sps[0];
645         assert(sps->page_shift == 12);
646     }
647 
648     epnmask = ~((1ULL << sps->page_shift) - 1);
649 
650     if (slb->vsid & SLB_VSID_B) {
651         /* 1TB segment */
652         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
653         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
654         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
655     } else {
656         /* 256M segment */
657         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
658         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
659         hash = vsid ^ (epn >> sps->page_shift);
660     }
661     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
662     ptem |= HPTE64_V_VALID;
663 
664     /* Page address translation */
665     qemu_log_mask(CPU_LOG_MMU,
666             "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
667             " hash " TARGET_FMT_plx "\n",
668             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
669 
670     /* Primary PTEG lookup */
671     qemu_log_mask(CPU_LOG_MMU,
672             "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
673             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
674             " hash=" TARGET_FMT_plx "\n",
675             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
676             vsid, ptem,  hash);
677     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
678 
679     if (ptex == -1) {
680         /* Secondary PTEG lookup */
681         ptem |= HPTE64_V_SECONDARY;
682         qemu_log_mask(CPU_LOG_MMU,
683                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
684                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
685                 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
686                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
687 
688         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
689     }
690 
691     return ptex;
692 }
693 
694 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
695                                           uint64_t pte0, uint64_t pte1)
696 {
697     int i;
698 
699     if (!(pte0 & HPTE64_V_LARGE)) {
700         return 12;
701     }
702 
703     /*
704      * The encodings in env->sps need to be carefully chosen so that
705      * this gives an unambiguous result.
706      */
707     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
708         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
709         unsigned shift;
710 
711         if (!sps->page_shift) {
712             break;
713         }
714 
715         shift = hpte_page_shift(sps, pte0, pte1);
716         if (shift) {
717             return shift;
718         }
719     }
720 
721     return 0;
722 }
723 
724 static bool ppc_hash64_use_vrma(CPUPPCState *env)
725 {
726     switch (env->mmu_model) {
727     case POWERPC_MMU_3_00:
728         /*
729          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
730          * register no longer exist
731          */
732         return true;
733 
734     default:
735         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
736     }
737 }
738 
739 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
740 {
741     CPUPPCState *env = &POWERPC_CPU(cs)->env;
742     bool vpm;
743 
744     if (msr_ir) {
745         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
746     } else {
747         vpm = ppc_hash64_use_vrma(env);
748     }
749     if (vpm && !msr_hv) {
750         cs->exception_index = POWERPC_EXCP_HISI;
751     } else {
752         cs->exception_index = POWERPC_EXCP_ISI;
753     }
754     env->error_code = error_code;
755 }
756 
757 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
758 {
759     CPUPPCState *env = &POWERPC_CPU(cs)->env;
760     bool vpm;
761 
762     if (msr_dr) {
763         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
764     } else {
765         vpm = ppc_hash64_use_vrma(env);
766     }
767     if (vpm && !msr_hv) {
768         cs->exception_index = POWERPC_EXCP_HDSI;
769         env->spr[SPR_HDAR] = dar;
770         env->spr[SPR_HDSISR] = dsisr;
771     } else {
772         cs->exception_index = POWERPC_EXCP_DSI;
773         env->spr[SPR_DAR] = dar;
774         env->spr[SPR_DSISR] = dsisr;
775    }
776     env->error_code = 0;
777 }
778 
779 
780 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
781 {
782     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
783 
784     if (cpu->vhyp) {
785         PPCVirtualHypervisorClass *vhc =
786             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
787         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
788         return;
789     }
790     base = ppc_hash64_hpt_base(cpu);
791 
792 
793     /* The HW performs a non-atomic byte update */
794     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
795 }
796 
797 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
798 {
799     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
800 
801     if (cpu->vhyp) {
802         PPCVirtualHypervisorClass *vhc =
803             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
804         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
805         return;
806     }
807     base = ppc_hash64_hpt_base(cpu);
808 
809     /* The HW performs a non-atomic byte update */
810     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
811 }
812 
813 static target_ulong rmls_limit(PowerPCCPU *cpu)
814 {
815     CPUPPCState *env = &cpu->env;
816     /*
817      * In theory the meanings of RMLS values are implementation
818      * dependent.  In practice, this seems to have been the set from
819      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
820      *
821      * Unsupported values mean the OS has shot itself in the
822      * foot. Return a 0-sized RMA in this case, which we expect
823      * to trigger an immediate DSI or ISI
824      */
825     static const target_ulong rma_sizes[16] = {
826         [0] = 256 * GiB,
827         [1] = 16 * GiB,
828         [2] = 1 * GiB,
829         [3] = 64 * MiB,
830         [4] = 256 * MiB,
831         [7] = 128 * MiB,
832         [8] = 32 * MiB,
833     };
834     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
835 
836     return rma_sizes[rmls];
837 }
838 
839 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
840 {
841     CPUPPCState *env = &cpu->env;
842     target_ulong lpcr = env->spr[SPR_LPCR];
843     uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
844     target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
845     int i;
846 
847     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
848         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
849 
850         if (!sps->page_shift) {
851             break;
852         }
853 
854         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
855             slb->esid = SLB_ESID_V;
856             slb->vsid = vsid;
857             slb->sps = sps;
858             return 0;
859         }
860     }
861 
862     error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
863                  TARGET_FMT_lx, lpcr);
864 
865     return -1;
866 }
867 
868 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
869                                 int rwx, int mmu_idx)
870 {
871     CPUState *cs = CPU(cpu);
872     CPUPPCState *env = &cpu->env;
873     ppc_slb_t vrma_slbe;
874     ppc_slb_t *slb;
875     unsigned apshift;
876     hwaddr ptex;
877     ppc_hash_pte64_t pte;
878     int exec_prot, pp_prot, amr_prot, prot;
879     const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
880     hwaddr raddr;
881 
882     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
883 
884     /*
885      * Note on LPCR usage: 970 uses HID4, but our special variant of
886      * store_spr copies relevant fields into env->spr[SPR_LPCR].
887      * Similarly we filter unimplemented bits when storing into LPCR
888      * depending on the MMU version. This code can thus just use the
889      * LPCR "as-is".
890      */
891 
892     /* 1. Handle real mode accesses */
893     if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
894         /*
895          * Translation is supposedly "off", but in real mode the top 4
896          * effective address bits are (mostly) ignored
897          */
898         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
899 
900         if (cpu->vhyp) {
901             /*
902              * In virtual hypervisor mode, there's nothing to do:
903              *   EA == GPA == qemu guest address
904              */
905         } else if (msr_hv || !env->has_hv_mode) {
906             /* In HV mode, add HRMOR if top EA bit is clear */
907             if (!(eaddr >> 63)) {
908                 raddr |= env->spr[SPR_HRMOR];
909             }
910         } else if (ppc_hash64_use_vrma(env)) {
911             /* Emulated VRMA mode */
912             slb = &vrma_slbe;
913             if (build_vrma_slbe(cpu, slb) != 0) {
914                 /* Invalid VRMA setup, machine check */
915                 cs->exception_index = POWERPC_EXCP_MCHECK;
916                 env->error_code = 0;
917                 return 1;
918             }
919 
920             goto skip_slb_search;
921         } else {
922             target_ulong limit = rmls_limit(cpu);
923 
924             /* Emulated old-style RMO mode, bounds check against RMLS */
925             if (raddr >= limit) {
926                 if (rwx == 2) {
927                     ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
928                 } else {
929                     int dsisr = DSISR_PROTFAULT;
930                     if (rwx == 1) {
931                         dsisr |= DSISR_ISSTORE;
932                     }
933                     ppc_hash64_set_dsi(cs, eaddr, dsisr);
934                 }
935                 return 1;
936             }
937 
938             raddr |= env->spr[SPR_RMOR];
939         }
940         tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
941                      PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
942                      TARGET_PAGE_SIZE);
943         return 0;
944     }
945 
946     /* 2. Translation is on, so look up the SLB */
947     slb = slb_lookup(cpu, eaddr);
948     if (!slb) {
949         /* No entry found, check if in-memory segment tables are in use */
950         if (ppc64_use_proc_tbl(cpu)) {
951             /* TODO - Unsupported */
952             error_report("Segment Table Support Unimplemented");
953             exit(1);
954         }
955         /* Segment still not found, generate the appropriate interrupt */
956         if (rwx == 2) {
957             cs->exception_index = POWERPC_EXCP_ISEG;
958             env->error_code = 0;
959         } else {
960             cs->exception_index = POWERPC_EXCP_DSEG;
961             env->error_code = 0;
962             env->spr[SPR_DAR] = eaddr;
963         }
964         return 1;
965     }
966 
967 skip_slb_search:
968 
969     /* 3. Check for segment level no-execute violation */
970     if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
971         ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
972         return 1;
973     }
974 
975     /* 4. Locate the PTE in the hash table */
976     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
977     if (ptex == -1) {
978         if (rwx == 2) {
979             ppc_hash64_set_isi(cs, SRR1_NOPTE);
980         } else {
981             int dsisr = DSISR_NOPTE;
982             if (rwx == 1) {
983                 dsisr |= DSISR_ISSTORE;
984             }
985             ppc_hash64_set_dsi(cs, eaddr, dsisr);
986         }
987         return 1;
988     }
989     qemu_log_mask(CPU_LOG_MMU,
990                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
991 
992     /* 5. Check access permissions */
993 
994     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
995     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
996     amr_prot = ppc_hash64_amr_prot(cpu, pte);
997     prot = exec_prot & pp_prot & amr_prot;
998 
999     if ((need_prot[rwx] & ~prot) != 0) {
1000         /* Access right violation */
1001         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
1002         if (rwx == 2) {
1003             int srr1 = 0;
1004             if (PAGE_EXEC & ~exec_prot) {
1005                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
1006             } else if (PAGE_EXEC & ~pp_prot) {
1007                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
1008             }
1009             if (PAGE_EXEC & ~amr_prot) {
1010                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
1011             }
1012             ppc_hash64_set_isi(cs, srr1);
1013         } else {
1014             int dsisr = 0;
1015             if (need_prot[rwx] & ~pp_prot) {
1016                 dsisr |= DSISR_PROTFAULT;
1017             }
1018             if (rwx == 1) {
1019                 dsisr |= DSISR_ISSTORE;
1020             }
1021             if (need_prot[rwx] & ~amr_prot) {
1022                 dsisr |= DSISR_AMR;
1023             }
1024             ppc_hash64_set_dsi(cs, eaddr, dsisr);
1025         }
1026         return 1;
1027     }
1028 
1029     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
1030 
1031     /* 6. Update PTE referenced and changed bits if necessary */
1032 
1033     if (!(pte.pte1 & HPTE64_R_R)) {
1034         ppc_hash64_set_r(cpu, ptex, pte.pte1);
1035     }
1036     if (!(pte.pte1 & HPTE64_R_C)) {
1037         if (rwx == 1) {
1038             ppc_hash64_set_c(cpu, ptex, pte.pte1);
1039         } else {
1040             /*
1041              * Treat the page as read-only for now, so that a later write
1042              * will pass through this function again to set the C bit
1043              */
1044             prot &= ~PAGE_WRITE;
1045         }
1046     }
1047 
1048     /* 7. Determine the real address from the PTE */
1049 
1050     raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
1051 
1052     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1053                  prot, mmu_idx, 1ULL << apshift);
1054 
1055     return 0;
1056 }
1057 
1058 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
1059 {
1060     CPUPPCState *env = &cpu->env;
1061     ppc_slb_t vrma_slbe;
1062     ppc_slb_t *slb;
1063     hwaddr ptex, raddr;
1064     ppc_hash_pte64_t pte;
1065     unsigned apshift;
1066 
1067     /* Handle real mode */
1068     if (msr_dr == 0) {
1069         /* In real mode the top 4 effective address bits are ignored */
1070         raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
1071 
1072         if (cpu->vhyp) {
1073             /*
1074              * In virtual hypervisor mode, there's nothing to do:
1075              *   EA == GPA == qemu guest address
1076              */
1077             return raddr;
1078         } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
1079             /* In HV mode, add HRMOR if top EA bit is clear */
1080             return raddr | env->spr[SPR_HRMOR];
1081         } else if (ppc_hash64_use_vrma(env)) {
1082             /* Emulated VRMA mode */
1083             slb = &vrma_slbe;
1084             if (build_vrma_slbe(cpu, slb) != 0) {
1085                 return -1;
1086             }
1087         } else {
1088             target_ulong limit = rmls_limit(cpu);
1089 
1090             /* Emulated old-style RMO mode, bounds check against RMLS */
1091             if (raddr >= limit) {
1092                 return -1;
1093             }
1094             return raddr | env->spr[SPR_RMOR];
1095         }
1096     } else {
1097         slb = slb_lookup(cpu, addr);
1098         if (!slb) {
1099             return -1;
1100         }
1101     }
1102 
1103     ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
1104     if (ptex == -1) {
1105         return -1;
1106     }
1107 
1108     return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
1109         & TARGET_PAGE_MASK;
1110 }
1111 
1112 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
1113                                target_ulong pte0, target_ulong pte1)
1114 {
1115     /*
1116      * XXX: given the fact that there are too many segments to
1117      * invalidate, and we still don't have a tlb_flush_mask(env, n,
1118      * mask) in QEMU, we just invalidate all TLBs
1119      */
1120     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
1121 }
1122 
1123 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
1124 {
1125     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1126     CPUPPCState *env = &cpu->env;
1127 
1128     env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
1129     /* The gtse bit affects hflags */
1130     hreg_compute_hflags(env);
1131 }
1132 
1133 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1134 {
1135     PowerPCCPU *cpu = env_archcpu(env);
1136 
1137     ppc_store_lpcr(cpu, val);
1138 }
1139 
1140 void ppc_hash64_init(PowerPCCPU *cpu)
1141 {
1142     CPUPPCState *env = &cpu->env;
1143     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1144 
1145     if (!pcc->hash64_opts) {
1146         assert(!mmu_is_64bit(env->mmu_model));
1147         return;
1148     }
1149 
1150     cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1151 }
1152 
1153 void ppc_hash64_finalize(PowerPCCPU *cpu)
1154 {
1155     g_free(cpu->hash64_opts);
1156 }
1157 
1158 const PPCHash64Options ppc_hash64_opts_basic = {
1159     .flags = 0,
1160     .slb_size = 64,
1161     .sps = {
1162         { .page_shift = 12, /* 4K */
1163           .slb_enc = 0,
1164           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1165         },
1166         { .page_shift = 24, /* 16M */
1167           .slb_enc = 0x100,
1168           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1169         },
1170     },
1171 };
1172 
1173 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1174     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1175     .slb_size = 32,
1176     .sps = {
1177         {
1178             .page_shift = 12, /* 4K */
1179             .slb_enc = 0,
1180             .enc = { { .page_shift = 12, .pte_enc = 0 },
1181                      { .page_shift = 16, .pte_enc = 0x7 },
1182                      { .page_shift = 24, .pte_enc = 0x38 }, },
1183         },
1184         {
1185             .page_shift = 16, /* 64K */
1186             .slb_enc = SLB_VSID_64K,
1187             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1188                      { .page_shift = 24, .pte_enc = 0x8 }, },
1189         },
1190         {
1191             .page_shift = 24, /* 16M */
1192             .slb_enc = SLB_VSID_16M,
1193             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1194         },
1195         {
1196             .page_shift = 34, /* 16G */
1197             .slb_enc = SLB_VSID_16G,
1198             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1199         },
1200     }
1201 };
1202 
1203 void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
1204                                  bool (*cb)(void *, uint32_t, uint32_t),
1205                                  void *opaque)
1206 {
1207     PPCHash64Options *opts = cpu->hash64_opts;
1208     int i;
1209     int n = 0;
1210     bool ci_largepage = false;
1211 
1212     assert(opts);
1213 
1214     n = 0;
1215     for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
1216         PPCHash64SegmentPageSizes *sps = &opts->sps[i];
1217         int j;
1218         int m = 0;
1219 
1220         assert(n <= i);
1221 
1222         if (!sps->page_shift) {
1223             break;
1224         }
1225 
1226         for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
1227             PPCHash64PageSize *ps = &sps->enc[j];
1228 
1229             assert(m <= j);
1230             if (!ps->page_shift) {
1231                 break;
1232             }
1233 
1234             if (cb(opaque, sps->page_shift, ps->page_shift)) {
1235                 if (ps->page_shift >= 16) {
1236                     ci_largepage = true;
1237                 }
1238                 sps->enc[m++] = *ps;
1239             }
1240         }
1241 
1242         /* Clear rest of the row */
1243         for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
1244             memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
1245         }
1246 
1247         if (m) {
1248             n++;
1249         }
1250     }
1251 
1252     /* Clear the rest of the table */
1253     for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
1254         memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
1255     }
1256 
1257     if (!ci_largepage) {
1258         opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
1259     }
1260 }
1261