xref: /openbmc/qemu/target/ppc/mmu-hash64.c (revision 37677d7d)
1 /*
2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (c) 2013 David Gibson, IBM Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "qemu/qemu-print.h"
26 #include "sysemu/hw_accel.h"
27 #include "kvm_ppc.h"
28 #include "mmu-hash64.h"
29 #include "exec/log.h"
30 #include "hw/hw.h"
31 #include "mmu-book3s-v3.h"
32 
33 /* #define DEBUG_SLB */
34 
35 #ifdef DEBUG_SLB
36 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
37 #else
38 #  define LOG_SLB(...) do { } while (0)
39 #endif
40 
41 /*
42  * SLB handling
43  */
44 
45 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
46 {
47     CPUPPCState *env = &cpu->env;
48     uint64_t esid_256M, esid_1T;
49     int n;
50 
51     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
52 
53     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
54     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
55 
56     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
57         ppc_slb_t *slb = &env->slb[n];
58 
59         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
60                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
61         /*
62          * We check for 1T matches on all MMUs here - if the MMU
63          * doesn't have 1T segment support, we will have prevented 1T
64          * entries from being inserted in the slbmte code.
65          */
66         if (((slb->esid == esid_256M) &&
67              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
68             || ((slb->esid == esid_1T) &&
69                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
70             return slb;
71         }
72     }
73 
74     return NULL;
75 }
76 
77 void dump_slb(PowerPCCPU *cpu)
78 {
79     CPUPPCState *env = &cpu->env;
80     int i;
81     uint64_t slbe, slbv;
82 
83     cpu_synchronize_state(CPU(cpu));
84 
85     qemu_printf("SLB\tESID\t\t\tVSID\n");
86     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
87         slbe = env->slb[i].esid;
88         slbv = env->slb[i].vsid;
89         if (slbe == 0 && slbv == 0) {
90             continue;
91         }
92         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
93                     i, slbe, slbv);
94     }
95 }
96 
97 void helper_slbia(CPUPPCState *env)
98 {
99     PowerPCCPU *cpu = env_archcpu(env);
100     int n;
101 
102     /* XXX: Warning: slbia never invalidates the first segment */
103     for (n = 1; n < cpu->hash64_opts->slb_size; n++) {
104         ppc_slb_t *slb = &env->slb[n];
105 
106         if (slb->esid & SLB_ESID_V) {
107             slb->esid &= ~SLB_ESID_V;
108             /*
109              * XXX: given the fact that segment size is 256 MB or 1TB,
110              *      and we still don't have a tlb_flush_mask(env, n, mask)
111              *      in QEMU, we just invalidate all TLBs
112              */
113             env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
114         }
115     }
116 }
117 
118 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
119                            target_ulong global)
120 {
121     PowerPCCPU *cpu = env_archcpu(env);
122     ppc_slb_t *slb;
123 
124     slb = slb_lookup(cpu, addr);
125     if (!slb) {
126         return;
127     }
128 
129     if (slb->esid & SLB_ESID_V) {
130         slb->esid &= ~SLB_ESID_V;
131 
132         /*
133          * XXX: given the fact that segment size is 256 MB or 1TB,
134          *      and we still don't have a tlb_flush_mask(env, n, mask)
135          *      in QEMU, we just invalidate all TLBs
136          */
137         env->tlb_need_flush |=
138             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
139     }
140 }
141 
142 void helper_slbie(CPUPPCState *env, target_ulong addr)
143 {
144     __helper_slbie(env, addr, false);
145 }
146 
147 void helper_slbieg(CPUPPCState *env, target_ulong addr)
148 {
149     __helper_slbie(env, addr, true);
150 }
151 
152 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
153                   target_ulong esid, target_ulong vsid)
154 {
155     CPUPPCState *env = &cpu->env;
156     ppc_slb_t *slb = &env->slb[slot];
157     const PPCHash64SegmentPageSizes *sps = NULL;
158     int i;
159 
160     if (slot >= cpu->hash64_opts->slb_size) {
161         return -1; /* Bad slot number */
162     }
163     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
164         return -1; /* Reserved bits set */
165     }
166     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
167         return -1; /* Bad segment size */
168     }
169     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
170         return -1; /* 1T segment on MMU that doesn't support it */
171     }
172 
173     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
174         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
175 
176         if (!sps1->page_shift) {
177             break;
178         }
179 
180         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
181             sps = sps1;
182             break;
183         }
184     }
185 
186     if (!sps) {
187         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
188                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
189                      slot, esid, vsid);
190         return -1;
191     }
192 
193     slb->esid = esid;
194     slb->vsid = vsid;
195     slb->sps = sps;
196 
197     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
198             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
199             slb->esid, slb->vsid);
200 
201     return 0;
202 }
203 
204 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
205                              target_ulong *rt)
206 {
207     CPUPPCState *env = &cpu->env;
208     int slot = rb & 0xfff;
209     ppc_slb_t *slb = &env->slb[slot];
210 
211     if (slot >= cpu->hash64_opts->slb_size) {
212         return -1;
213     }
214 
215     *rt = slb->esid;
216     return 0;
217 }
218 
219 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
220                              target_ulong *rt)
221 {
222     CPUPPCState *env = &cpu->env;
223     int slot = rb & 0xfff;
224     ppc_slb_t *slb = &env->slb[slot];
225 
226     if (slot >= cpu->hash64_opts->slb_size) {
227         return -1;
228     }
229 
230     *rt = slb->vsid;
231     return 0;
232 }
233 
234 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
235                              target_ulong *rt)
236 {
237     CPUPPCState *env = &cpu->env;
238     ppc_slb_t *slb;
239 
240     if (!msr_is_64bit(env, env->msr)) {
241         rb &= 0xffffffff;
242     }
243     slb = slb_lookup(cpu, rb);
244     if (slb == NULL) {
245         *rt = (target_ulong)-1ul;
246     } else {
247         *rt = slb->vsid;
248     }
249     return 0;
250 }
251 
252 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
253 {
254     PowerPCCPU *cpu = env_archcpu(env);
255 
256     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
257         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
258                                POWERPC_EXCP_INVAL, GETPC());
259     }
260 }
261 
262 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
263 {
264     PowerPCCPU *cpu = env_archcpu(env);
265     target_ulong rt = 0;
266 
267     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
268         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
269                                POWERPC_EXCP_INVAL, GETPC());
270     }
271     return rt;
272 }
273 
274 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
275 {
276     PowerPCCPU *cpu = env_archcpu(env);
277     target_ulong rt = 0;
278 
279     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
280         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
281                                POWERPC_EXCP_INVAL, GETPC());
282     }
283     return rt;
284 }
285 
286 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
287 {
288     PowerPCCPU *cpu = env_archcpu(env);
289     target_ulong rt = 0;
290 
291     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
292         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
293                                POWERPC_EXCP_INVAL, GETPC());
294     }
295     return rt;
296 }
297 
298 /* Check No-Execute or Guarded Storage */
299 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
300                                               ppc_hash_pte64_t pte)
301 {
302     /* Exec permissions CANNOT take away read or write permissions */
303     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
304             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
305 }
306 
307 /* Check Basic Storage Protection */
308 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
309                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
310 {
311     CPUPPCState *env = &cpu->env;
312     unsigned pp, key;
313     /*
314      * Some pp bit combinations have undefined behaviour, so default
315      * to no access in those cases
316      */
317     int prot = 0;
318 
319     key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
320              : (slb->vsid & SLB_VSID_KS));
321     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
322 
323     if (key == 0) {
324         switch (pp) {
325         case 0x0:
326         case 0x1:
327         case 0x2:
328             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
329             break;
330 
331         case 0x3:
332         case 0x6:
333             prot = PAGE_READ | PAGE_EXEC;
334             break;
335         }
336     } else {
337         switch (pp) {
338         case 0x0:
339         case 0x6:
340             break;
341 
342         case 0x1:
343         case 0x3:
344             prot = PAGE_READ | PAGE_EXEC;
345             break;
346 
347         case 0x2:
348             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
349             break;
350         }
351     }
352 
353     return prot;
354 }
355 
356 /* Check the instruction access permissions specified in the IAMR */
357 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
358 {
359     CPUPPCState *env = &cpu->env;
360     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
361 
362     /*
363      * An instruction fetch is permitted if the IAMR bit is 0.
364      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
365      * can only take away EXEC permissions not READ or WRITE permissions.
366      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
367      * EXEC permissions are allowed.
368      */
369     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
370                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
371 }
372 
373 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
374 {
375     CPUPPCState *env = &cpu->env;
376     int key, amrbits;
377     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
378 
379     /* Only recent MMUs implement Virtual Page Class Key Protection */
380     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
381         return prot;
382     }
383 
384     key = HPTE64_R_KEY(pte.pte1);
385     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
386 
387     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
388     /*         env->spr[SPR_AMR]); */
389 
390     /*
391      * A store is permitted if the AMR bit is 0. Remove write
392      * protection if it is set.
393      */
394     if (amrbits & 0x2) {
395         prot &= ~PAGE_WRITE;
396     }
397     /*
398      * A load is permitted if the AMR bit is 0. Remove read
399      * protection if it is set.
400      */
401     if (amrbits & 0x1) {
402         prot &= ~PAGE_READ;
403     }
404 
405     switch (env->mmu_model) {
406     /*
407      * MMU version 2.07 and later support IAMR
408      * Check if the IAMR allows the instruction access - it will return
409      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
410      * if it does (and prot will be unchanged indicating execution support).
411      */
412     case POWERPC_MMU_2_07:
413     case POWERPC_MMU_3_00:
414         prot &= ppc_hash64_iamr_prot(cpu, key);
415         break;
416     default:
417         break;
418     }
419 
420     return prot;
421 }
422 
423 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
424                                              hwaddr ptex, int n)
425 {
426     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
427     hwaddr base;
428     hwaddr plen = n * HASH_PTE_SIZE_64;
429     const ppc_hash_pte64_t *hptes;
430 
431     if (cpu->vhyp) {
432         PPCVirtualHypervisorClass *vhc =
433             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
434         return vhc->map_hptes(cpu->vhyp, ptex, n);
435     }
436     base = ppc_hash64_hpt_base(cpu);
437 
438     if (!base) {
439         return NULL;
440     }
441 
442     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
443                               MEMTXATTRS_UNSPECIFIED);
444     if (plen < (n * HASH_PTE_SIZE_64)) {
445         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
446     }
447     return hptes;
448 }
449 
450 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
451                             hwaddr ptex, int n)
452 {
453     if (cpu->vhyp) {
454         PPCVirtualHypervisorClass *vhc =
455             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
456         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
457         return;
458     }
459 
460     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
461                         false, n * HASH_PTE_SIZE_64);
462 }
463 
464 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
465                                 uint64_t pte0, uint64_t pte1)
466 {
467     int i;
468 
469     if (!(pte0 & HPTE64_V_LARGE)) {
470         if (sps->page_shift != 12) {
471             /* 4kiB page in a non 4kiB segment */
472             return 0;
473         }
474         /* Normal 4kiB page */
475         return 12;
476     }
477 
478     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
479         const PPCHash64PageSize *ps = &sps->enc[i];
480         uint64_t mask;
481 
482         if (!ps->page_shift) {
483             break;
484         }
485 
486         if (ps->page_shift == 12) {
487             /* L bit is set so this can't be a 4kiB page */
488             continue;
489         }
490 
491         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
492 
493         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
494             return ps->page_shift;
495         }
496     }
497 
498     return 0; /* Bad page size encoding */
499 }
500 
501 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
502 {
503     /* Insert B into pte0 */
504     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
505             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
506              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
507 
508     /* Remove B from pte1 */
509     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
510 }
511 
512 
513 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
514                                      const PPCHash64SegmentPageSizes *sps,
515                                      target_ulong ptem,
516                                      ppc_hash_pte64_t *pte, unsigned *pshift)
517 {
518     int i;
519     const ppc_hash_pte64_t *pteg;
520     target_ulong pte0, pte1;
521     target_ulong ptex;
522 
523     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
524     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
525     if (!pteg) {
526         return -1;
527     }
528     for (i = 0; i < HPTES_PER_GROUP; i++) {
529         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
530         /*
531          * pte0 contains the valid bit and must be read before pte1,
532          * otherwise we might see an old pte1 with a new valid bit and
533          * thus an inconsistent hpte value
534          */
535         smp_rmb();
536         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
537 
538         /* Convert format if necessary */
539         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
540             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
541         }
542 
543         /* This compares V, B, H (secondary) and the AVPN */
544         if (HPTE64_V_COMPARE(pte0, ptem)) {
545             *pshift = hpte_page_shift(sps, pte0, pte1);
546             /*
547              * If there is no match, ignore the PTE, it could simply
548              * be for a different segment size encoding and the
549              * architecture specifies we should not match. Linux will
550              * potentially leave behind PTEs for the wrong base page
551              * size when demoting segments.
552              */
553             if (*pshift == 0) {
554                 continue;
555             }
556             /*
557              * We don't do anything with pshift yet as qemu TLB only
558              * deals with 4K pages anyway
559              */
560             pte->pte0 = pte0;
561             pte->pte1 = pte1;
562             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
563             return ptex + i;
564         }
565     }
566     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
567     /*
568      * We didn't find a valid entry.
569      */
570     return -1;
571 }
572 
573 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
574                                      ppc_slb_t *slb, target_ulong eaddr,
575                                      ppc_hash_pte64_t *pte, unsigned *pshift)
576 {
577     CPUPPCState *env = &cpu->env;
578     hwaddr hash, ptex;
579     uint64_t vsid, epnmask, epn, ptem;
580     const PPCHash64SegmentPageSizes *sps = slb->sps;
581 
582     /*
583      * The SLB store path should prevent any bad page size encodings
584      * getting in there, so:
585      */
586     assert(sps);
587 
588     /* If ISL is set in LPCR we need to clamp the page size to 4K */
589     if (env->spr[SPR_LPCR] & LPCR_ISL) {
590         /* We assume that when using TCG, 4k is first entry of SPS */
591         sps = &cpu->hash64_opts->sps[0];
592         assert(sps->page_shift == 12);
593     }
594 
595     epnmask = ~((1ULL << sps->page_shift) - 1);
596 
597     if (slb->vsid & SLB_VSID_B) {
598         /* 1TB segment */
599         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
600         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
601         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
602     } else {
603         /* 256M segment */
604         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
605         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
606         hash = vsid ^ (epn >> sps->page_shift);
607     }
608     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
609     ptem |= HPTE64_V_VALID;
610 
611     /* Page address translation */
612     qemu_log_mask(CPU_LOG_MMU,
613             "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
614             " hash " TARGET_FMT_plx "\n",
615             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
616 
617     /* Primary PTEG lookup */
618     qemu_log_mask(CPU_LOG_MMU,
619             "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
620             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
621             " hash=" TARGET_FMT_plx "\n",
622             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
623             vsid, ptem,  hash);
624     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
625 
626     if (ptex == -1) {
627         /* Secondary PTEG lookup */
628         ptem |= HPTE64_V_SECONDARY;
629         qemu_log_mask(CPU_LOG_MMU,
630                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
631                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
632                 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
633                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
634 
635         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
636     }
637 
638     return ptex;
639 }
640 
641 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
642                                           uint64_t pte0, uint64_t pte1)
643 {
644     int i;
645 
646     if (!(pte0 & HPTE64_V_LARGE)) {
647         return 12;
648     }
649 
650     /*
651      * The encodings in env->sps need to be carefully chosen so that
652      * this gives an unambiguous result.
653      */
654     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
655         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
656         unsigned shift;
657 
658         if (!sps->page_shift) {
659             break;
660         }
661 
662         shift = hpte_page_shift(sps, pte0, pte1);
663         if (shift) {
664             return shift;
665         }
666     }
667 
668     return 0;
669 }
670 
671 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
672 {
673     CPUPPCState *env = &POWERPC_CPU(cs)->env;
674     bool vpm;
675 
676     if (msr_ir) {
677         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
678     } else {
679         switch (env->mmu_model) {
680         case POWERPC_MMU_3_00:
681             /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
682             vpm = true;
683             break;
684         default:
685             vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
686             break;
687         }
688     }
689     if (vpm && !msr_hv) {
690         cs->exception_index = POWERPC_EXCP_HISI;
691     } else {
692         cs->exception_index = POWERPC_EXCP_ISI;
693     }
694     env->error_code = error_code;
695 }
696 
697 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
698 {
699     CPUPPCState *env = &POWERPC_CPU(cs)->env;
700     bool vpm;
701 
702     if (msr_dr) {
703         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
704     } else {
705         switch (env->mmu_model) {
706         case POWERPC_MMU_3_00:
707             /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
708             vpm = true;
709             break;
710         default:
711             vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
712             break;
713         }
714     }
715     if (vpm && !msr_hv) {
716         cs->exception_index = POWERPC_EXCP_HDSI;
717         env->spr[SPR_HDAR] = dar;
718         env->spr[SPR_HDSISR] = dsisr;
719     } else {
720         cs->exception_index = POWERPC_EXCP_DSI;
721         env->spr[SPR_DAR] = dar;
722         env->spr[SPR_DSISR] = dsisr;
723    }
724     env->error_code = 0;
725 }
726 
727 
728 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
729 {
730     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
731 
732     if (cpu->vhyp) {
733         PPCVirtualHypervisorClass *vhc =
734             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
735         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
736         return;
737     }
738     base = ppc_hash64_hpt_base(cpu);
739 
740 
741     /* The HW performs a non-atomic byte update */
742     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
743 }
744 
745 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
746 {
747     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
748 
749     if (cpu->vhyp) {
750         PPCVirtualHypervisorClass *vhc =
751             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
752         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
753         return;
754     }
755     base = ppc_hash64_hpt_base(cpu);
756 
757     /* The HW performs a non-atomic byte update */
758     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
759 }
760 
761 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
762                                 int rwx, int mmu_idx)
763 {
764     CPUState *cs = CPU(cpu);
765     CPUPPCState *env = &cpu->env;
766     ppc_slb_t *slb;
767     unsigned apshift;
768     hwaddr ptex;
769     ppc_hash_pte64_t pte;
770     int exec_prot, pp_prot, amr_prot, prot;
771     const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
772     hwaddr raddr;
773 
774     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
775 
776     /*
777      * Note on LPCR usage: 970 uses HID4, but our special variant of
778      * store_spr copies relevant fields into env->spr[SPR_LPCR].
779      * Similarily we filter unimplemented bits when storing into LPCR
780      * depending on the MMU version. This code can thus just use the
781      * LPCR "as-is".
782      */
783 
784     /* 1. Handle real mode accesses */
785     if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
786         /*
787          * Translation is supposedly "off", but in real mode the top 4
788          * effective address bits are (mostly) ignored
789          */
790         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
791 
792         /* In HV mode, add HRMOR if top EA bit is clear */
793         if (msr_hv || !env->has_hv_mode) {
794             if (!(eaddr >> 63)) {
795                 raddr |= env->spr[SPR_HRMOR];
796             }
797         } else {
798             /* Otherwise, check VPM for RMA vs VRMA */
799             if (env->spr[SPR_LPCR] & LPCR_VPM0) {
800                 slb = &env->vrma_slb;
801                 if (slb->sps) {
802                     goto skip_slb_search;
803                 }
804                 /* Not much else to do here */
805                 cs->exception_index = POWERPC_EXCP_MCHECK;
806                 env->error_code = 0;
807                 return 1;
808             } else if (raddr < env->rmls) {
809                 /* RMA. Check bounds in RMLS */
810                 raddr |= env->spr[SPR_RMOR];
811             } else {
812                 /* The access failed, generate the approriate interrupt */
813                 if (rwx == 2) {
814                     ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
815                 } else {
816                     int dsisr = DSISR_PROTFAULT;
817                     if (rwx == 1) {
818                         dsisr |= DSISR_ISSTORE;
819                     }
820                     ppc_hash64_set_dsi(cs, eaddr, dsisr);
821                 }
822                 return 1;
823             }
824         }
825         tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
826                      PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
827                      TARGET_PAGE_SIZE);
828         return 0;
829     }
830 
831     /* 2. Translation is on, so look up the SLB */
832     slb = slb_lookup(cpu, eaddr);
833     if (!slb) {
834         /* No entry found, check if in-memory segment tables are in use */
835         if (ppc64_use_proc_tbl(cpu)) {
836             /* TODO - Unsupported */
837             error_report("Segment Table Support Unimplemented");
838             exit(1);
839         }
840         /* Segment still not found, generate the appropriate interrupt */
841         if (rwx == 2) {
842             cs->exception_index = POWERPC_EXCP_ISEG;
843             env->error_code = 0;
844         } else {
845             cs->exception_index = POWERPC_EXCP_DSEG;
846             env->error_code = 0;
847             env->spr[SPR_DAR] = eaddr;
848         }
849         return 1;
850     }
851 
852 skip_slb_search:
853 
854     /* 3. Check for segment level no-execute violation */
855     if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
856         ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
857         return 1;
858     }
859 
860     /* 4. Locate the PTE in the hash table */
861     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
862     if (ptex == -1) {
863         if (rwx == 2) {
864             ppc_hash64_set_isi(cs, SRR1_NOPTE);
865         } else {
866             int dsisr = DSISR_NOPTE;
867             if (rwx == 1) {
868                 dsisr |= DSISR_ISSTORE;
869             }
870             ppc_hash64_set_dsi(cs, eaddr, dsisr);
871         }
872         return 1;
873     }
874     qemu_log_mask(CPU_LOG_MMU,
875                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
876 
877     /* 5. Check access permissions */
878 
879     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
880     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
881     amr_prot = ppc_hash64_amr_prot(cpu, pte);
882     prot = exec_prot & pp_prot & amr_prot;
883 
884     if ((need_prot[rwx] & ~prot) != 0) {
885         /* Access right violation */
886         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
887         if (rwx == 2) {
888             int srr1 = 0;
889             if (PAGE_EXEC & ~exec_prot) {
890                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
891             } else if (PAGE_EXEC & ~pp_prot) {
892                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
893             }
894             if (PAGE_EXEC & ~amr_prot) {
895                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
896             }
897             ppc_hash64_set_isi(cs, srr1);
898         } else {
899             int dsisr = 0;
900             if (need_prot[rwx] & ~pp_prot) {
901                 dsisr |= DSISR_PROTFAULT;
902             }
903             if (rwx == 1) {
904                 dsisr |= DSISR_ISSTORE;
905             }
906             if (need_prot[rwx] & ~amr_prot) {
907                 dsisr |= DSISR_AMR;
908             }
909             ppc_hash64_set_dsi(cs, eaddr, dsisr);
910         }
911         return 1;
912     }
913 
914     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
915 
916     /* 6. Update PTE referenced and changed bits if necessary */
917 
918     if (!(pte.pte1 & HPTE64_R_R)) {
919         ppc_hash64_set_r(cpu, ptex, pte.pte1);
920     }
921     if (!(pte.pte1 & HPTE64_R_C)) {
922         if (rwx == 1) {
923             ppc_hash64_set_c(cpu, ptex, pte.pte1);
924         } else {
925             /*
926              * Treat the page as read-only for now, so that a later write
927              * will pass through this function again to set the C bit
928              */
929             prot &= ~PAGE_WRITE;
930         }
931     }
932 
933     /* 7. Determine the real address from the PTE */
934 
935     raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
936 
937     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
938                  prot, mmu_idx, 1ULL << apshift);
939 
940     return 0;
941 }
942 
943 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
944 {
945     CPUPPCState *env = &cpu->env;
946     ppc_slb_t *slb;
947     hwaddr ptex, raddr;
948     ppc_hash_pte64_t pte;
949     unsigned apshift;
950 
951     /* Handle real mode */
952     if (msr_dr == 0) {
953         /* In real mode the top 4 effective address bits are ignored */
954         raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
955 
956         /* In HV mode, add HRMOR if top EA bit is clear */
957         if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
958             return raddr | env->spr[SPR_HRMOR];
959         }
960 
961         /* Otherwise, check VPM for RMA vs VRMA */
962         if (env->spr[SPR_LPCR] & LPCR_VPM0) {
963             slb = &env->vrma_slb;
964             if (!slb->sps) {
965                 return -1;
966             }
967         } else if (raddr < env->rmls) {
968             /* RMA. Check bounds in RMLS */
969             return raddr | env->spr[SPR_RMOR];
970         } else {
971             return -1;
972         }
973     } else {
974         slb = slb_lookup(cpu, addr);
975         if (!slb) {
976             return -1;
977         }
978     }
979 
980     ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
981     if (ptex == -1) {
982         return -1;
983     }
984 
985     return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
986         & TARGET_PAGE_MASK;
987 }
988 
989 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
990                                target_ulong pte0, target_ulong pte1)
991 {
992     /*
993      * XXX: given the fact that there are too many segments to
994      * invalidate, and we still don't have a tlb_flush_mask(env, n,
995      * mask) in QEMU, we just invalidate all TLBs
996      */
997     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
998 }
999 
1000 static void ppc_hash64_update_rmls(PowerPCCPU *cpu)
1001 {
1002     CPUPPCState *env = &cpu->env;
1003     uint64_t lpcr = env->spr[SPR_LPCR];
1004 
1005     /*
1006      * This is the full 4 bits encoding of POWER8. Previous
1007      * CPUs only support a subset of these but the filtering
1008      * is done when writing LPCR
1009      */
1010     switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
1011     case 0x8: /* 32MB */
1012         env->rmls = 0x2000000ull;
1013         break;
1014     case 0x3: /* 64MB */
1015         env->rmls = 0x4000000ull;
1016         break;
1017     case 0x7: /* 128MB */
1018         env->rmls = 0x8000000ull;
1019         break;
1020     case 0x4: /* 256MB */
1021         env->rmls = 0x10000000ull;
1022         break;
1023     case 0x2: /* 1GB */
1024         env->rmls = 0x40000000ull;
1025         break;
1026     case 0x1: /* 16GB */
1027         env->rmls = 0x400000000ull;
1028         break;
1029     default:
1030         /* What to do here ??? */
1031         env->rmls = 0;
1032     }
1033 }
1034 
1035 static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
1036 {
1037     CPUPPCState *env = &cpu->env;
1038     const PPCHash64SegmentPageSizes *sps = NULL;
1039     target_ulong esid, vsid, lpcr;
1040     ppc_slb_t *slb = &env->vrma_slb;
1041     uint32_t vrmasd;
1042     int i;
1043 
1044     /* First clear it */
1045     slb->esid = slb->vsid = 0;
1046     slb->sps = NULL;
1047 
1048     /* Is VRMA enabled ? */
1049     lpcr = env->spr[SPR_LPCR];
1050     if (!(lpcr & LPCR_VPM0)) {
1051         return;
1052     }
1053 
1054     /*
1055      * Make one up. Mostly ignore the ESID which will not be needed
1056      * for translation
1057      */
1058     vsid = SLB_VSID_VRMA;
1059     vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
1060     vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
1061     esid = SLB_ESID_V;
1062 
1063     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
1064         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
1065 
1066         if (!sps1->page_shift) {
1067             break;
1068         }
1069 
1070         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
1071             sps = sps1;
1072             break;
1073         }
1074     }
1075 
1076     if (!sps) {
1077         error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
1078                      " vsid 0x"TARGET_FMT_lx, esid, vsid);
1079         return;
1080     }
1081 
1082     slb->vsid = vsid;
1083     slb->esid = esid;
1084     slb->sps = sps;
1085 }
1086 
1087 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
1088 {
1089     CPUPPCState *env = &cpu->env;
1090     uint64_t lpcr = 0;
1091 
1092     /* Filter out bits */
1093     switch (env->mmu_model) {
1094     case POWERPC_MMU_64B: /* 970 */
1095         if (val & 0x40) {
1096             lpcr |= LPCR_LPES0;
1097         }
1098         if (val & 0x8000000000000000ull) {
1099             lpcr |= LPCR_LPES1;
1100         }
1101         if (val & 0x20) {
1102             lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
1103         }
1104         if (val & 0x4000000000000000ull) {
1105             lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
1106         }
1107         if (val & 0x2000000000000000ull) {
1108             lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
1109         }
1110         env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
1111 
1112         /*
1113          * XXX We could also write LPID from HID4 here
1114          * but since we don't tag any translation on it
1115          * it doesn't actually matter
1116          *
1117          * XXX For proper emulation of 970 we also need
1118          * to dig HRMOR out of HID5
1119          */
1120         break;
1121     case POWERPC_MMU_2_03: /* P5p */
1122         lpcr = val & (LPCR_RMLS | LPCR_ILE |
1123                       LPCR_LPES0 | LPCR_LPES1 |
1124                       LPCR_RMI | LPCR_HDICE);
1125         break;
1126     case POWERPC_MMU_2_06: /* P7 */
1127         lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
1128                       LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1129                       LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
1130                       LPCR_MER | LPCR_TC |
1131                       LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
1132         break;
1133     case POWERPC_MMU_2_07: /* P8 */
1134         lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
1135                       LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1136                       LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
1137                       LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
1138                       LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
1139         break;
1140     case POWERPC_MMU_3_00: /* P9 */
1141         lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
1142                       (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
1143                       LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
1144                       (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
1145                       LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
1146                       LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
1147         /*
1148          * If we have a virtual hypervisor, we need to bring back RMLS. It
1149          * doesn't exist on an actual P9 but that's all we know how to
1150          * configure with softmmu at the moment
1151          */
1152         if (cpu->vhyp) {
1153             lpcr |= (val & LPCR_RMLS);
1154         }
1155         break;
1156     default:
1157         ;
1158     }
1159     env->spr[SPR_LPCR] = lpcr;
1160     ppc_hash64_update_rmls(cpu);
1161     ppc_hash64_update_vrma(cpu);
1162 }
1163 
1164 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1165 {
1166     PowerPCCPU *cpu = env_archcpu(env);
1167 
1168     ppc_store_lpcr(cpu, val);
1169 }
1170 
1171 void ppc_hash64_init(PowerPCCPU *cpu)
1172 {
1173     CPUPPCState *env = &cpu->env;
1174     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1175 
1176     if (!pcc->hash64_opts) {
1177         assert(!(env->mmu_model & POWERPC_MMU_64));
1178         return;
1179     }
1180 
1181     cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1182 }
1183 
1184 void ppc_hash64_finalize(PowerPCCPU *cpu)
1185 {
1186     g_free(cpu->hash64_opts);
1187 }
1188 
1189 const PPCHash64Options ppc_hash64_opts_basic = {
1190     .flags = 0,
1191     .slb_size = 64,
1192     .sps = {
1193         { .page_shift = 12, /* 4K */
1194           .slb_enc = 0,
1195           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1196         },
1197         { .page_shift = 24, /* 16M */
1198           .slb_enc = 0x100,
1199           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1200         },
1201     },
1202 };
1203 
1204 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1205     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1206     .slb_size = 32,
1207     .sps = {
1208         {
1209             .page_shift = 12, /* 4K */
1210             .slb_enc = 0,
1211             .enc = { { .page_shift = 12, .pte_enc = 0 },
1212                      { .page_shift = 16, .pte_enc = 0x7 },
1213                      { .page_shift = 24, .pte_enc = 0x38 }, },
1214         },
1215         {
1216             .page_shift = 16, /* 64K */
1217             .slb_enc = SLB_VSID_64K,
1218             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1219                      { .page_shift = 24, .pte_enc = 0x8 }, },
1220         },
1221         {
1222             .page_shift = 24, /* 16M */
1223             .slb_enc = SLB_VSID_16M,
1224             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1225         },
1226         {
1227             .page_shift = 34, /* 16G */
1228             .slb_enc = SLB_VSID_16G,
1229             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1230         },
1231     }
1232 };
1233 
1234 void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
1235                                  bool (*cb)(void *, uint32_t, uint32_t),
1236                                  void *opaque)
1237 {
1238     PPCHash64Options *opts = cpu->hash64_opts;
1239     int i;
1240     int n = 0;
1241     bool ci_largepage = false;
1242 
1243     assert(opts);
1244 
1245     n = 0;
1246     for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
1247         PPCHash64SegmentPageSizes *sps = &opts->sps[i];
1248         int j;
1249         int m = 0;
1250 
1251         assert(n <= i);
1252 
1253         if (!sps->page_shift) {
1254             break;
1255         }
1256 
1257         for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
1258             PPCHash64PageSize *ps = &sps->enc[j];
1259 
1260             assert(m <= j);
1261             if (!ps->page_shift) {
1262                 break;
1263             }
1264 
1265             if (cb(opaque, sps->page_shift, ps->page_shift)) {
1266                 if (ps->page_shift >= 16) {
1267                     ci_largepage = true;
1268                 }
1269                 sps->enc[m++] = *ps;
1270             }
1271         }
1272 
1273         /* Clear rest of the row */
1274         for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
1275             memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
1276         }
1277 
1278         if (m) {
1279             n++;
1280         }
1281     }
1282 
1283     /* Clear the rest of the table */
1284     for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
1285         memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
1286     }
1287 
1288     if (!ci_largepage) {
1289         opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
1290     }
1291 }
1292