xref: /openbmc/qemu/target/ppc/mmu-hash64.c (revision f9e3e1a35e8fd63d61fae58bd98d24d7defa9316)
1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3fcf5ef2aSThomas Huth  *
4fcf5ef2aSThomas Huth  *  Copyright (c) 2003-2007 Jocelyn Mayer
5fcf5ef2aSThomas Huth  *  Copyright (c) 2013 David Gibson, IBM Corporation
6fcf5ef2aSThomas Huth  *
7fcf5ef2aSThomas Huth  * This library is free software; you can redistribute it and/or
8fcf5ef2aSThomas Huth  * modify it under the terms of the GNU Lesser General Public
9fcf5ef2aSThomas Huth  * License as published by the Free Software Foundation; either
10fcf5ef2aSThomas Huth  * version 2 of the License, or (at your option) any later version.
11fcf5ef2aSThomas Huth  *
12fcf5ef2aSThomas Huth  * This library is distributed in the hope that it will be useful,
13fcf5ef2aSThomas Huth  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14fcf5ef2aSThomas Huth  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15fcf5ef2aSThomas Huth  * Lesser General Public License for more details.
16fcf5ef2aSThomas Huth  *
17fcf5ef2aSThomas Huth  * You should have received a copy of the GNU Lesser General Public
18fcf5ef2aSThomas Huth  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19fcf5ef2aSThomas Huth  */
20fcf5ef2aSThomas Huth #include "qemu/osdep.h"
21a864a6b3SDavid Gibson #include "qemu/units.h"
22fcf5ef2aSThomas Huth #include "cpu.h"
23fcf5ef2aSThomas Huth #include "exec/exec-all.h"
24fcf5ef2aSThomas Huth #include "exec/helper-proto.h"
25fcf5ef2aSThomas Huth #include "qemu/error-report.h"
26fad866daSMarkus Armbruster #include "qemu/qemu-print.h"
27b3946626SVincent Palatin #include "sysemu/hw_accel.h"
28fcf5ef2aSThomas Huth #include "kvm_ppc.h"
29fcf5ef2aSThomas Huth #include "mmu-hash64.h"
30fcf5ef2aSThomas Huth #include "exec/log.h"
317222b94aSDavid Gibson #include "hw/hw.h"
32b2899495SSuraj Jitindar Singh #include "mmu-book3s-v3.h"
33fcf5ef2aSThomas Huth 
34d75cbae8SDavid Gibson /* #define DEBUG_SLB */
35fcf5ef2aSThomas Huth 
36fcf5ef2aSThomas Huth #ifdef DEBUG_SLB
37fcf5ef2aSThomas Huth #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
38fcf5ef2aSThomas Huth #else
39fcf5ef2aSThomas Huth #  define LOG_SLB(...) do { } while (0)
40fcf5ef2aSThomas Huth #endif
41fcf5ef2aSThomas Huth 
42fcf5ef2aSThomas Huth /*
43fcf5ef2aSThomas Huth  * SLB handling
44fcf5ef2aSThomas Huth  */
45fcf5ef2aSThomas Huth 
46fcf5ef2aSThomas Huth static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
47fcf5ef2aSThomas Huth {
48fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
49fcf5ef2aSThomas Huth     uint64_t esid_256M, esid_1T;
50fcf5ef2aSThomas Huth     int n;
51fcf5ef2aSThomas Huth 
52fcf5ef2aSThomas Huth     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
53fcf5ef2aSThomas Huth 
54fcf5ef2aSThomas Huth     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
55fcf5ef2aSThomas Huth     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
56fcf5ef2aSThomas Huth 
5767d7d66fSDavid Gibson     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
58fcf5ef2aSThomas Huth         ppc_slb_t *slb = &env->slb[n];
59fcf5ef2aSThomas Huth 
60fcf5ef2aSThomas Huth         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
61fcf5ef2aSThomas Huth                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
62d75cbae8SDavid Gibson         /*
63d75cbae8SDavid Gibson          * We check for 1T matches on all MMUs here - if the MMU
64fcf5ef2aSThomas Huth          * doesn't have 1T segment support, we will have prevented 1T
65d75cbae8SDavid Gibson          * entries from being inserted in the slbmte code.
66d75cbae8SDavid Gibson          */
67fcf5ef2aSThomas Huth         if (((slb->esid == esid_256M) &&
68fcf5ef2aSThomas Huth              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
69fcf5ef2aSThomas Huth             || ((slb->esid == esid_1T) &&
70fcf5ef2aSThomas Huth                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
71fcf5ef2aSThomas Huth             return slb;
72fcf5ef2aSThomas Huth         }
73fcf5ef2aSThomas Huth     }
74fcf5ef2aSThomas Huth 
75fcf5ef2aSThomas Huth     return NULL;
76fcf5ef2aSThomas Huth }
77fcf5ef2aSThomas Huth 
78fad866daSMarkus Armbruster void dump_slb(PowerPCCPU *cpu)
79fcf5ef2aSThomas Huth {
80fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
81fcf5ef2aSThomas Huth     int i;
82fcf5ef2aSThomas Huth     uint64_t slbe, slbv;
83fcf5ef2aSThomas Huth 
84fcf5ef2aSThomas Huth     cpu_synchronize_state(CPU(cpu));
85fcf5ef2aSThomas Huth 
86fad866daSMarkus Armbruster     qemu_printf("SLB\tESID\t\t\tVSID\n");
8767d7d66fSDavid Gibson     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
88fcf5ef2aSThomas Huth         slbe = env->slb[i].esid;
89fcf5ef2aSThomas Huth         slbv = env->slb[i].vsid;
90fcf5ef2aSThomas Huth         if (slbe == 0 && slbv == 0) {
91fcf5ef2aSThomas Huth             continue;
92fcf5ef2aSThomas Huth         }
93fad866daSMarkus Armbruster         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
94fcf5ef2aSThomas Huth                     i, slbe, slbv);
95fcf5ef2aSThomas Huth     }
96fcf5ef2aSThomas Huth }
97fcf5ef2aSThomas Huth 
98fcf5ef2aSThomas Huth void helper_slbia(CPUPPCState *env)
99fcf5ef2aSThomas Huth {
100db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
101fcf5ef2aSThomas Huth     int n;
102fcf5ef2aSThomas Huth 
103*f9e3e1a3SNicholas Piggin     /*
104*f9e3e1a3SNicholas Piggin      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
105*f9e3e1a3SNicholas Piggin      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
106*f9e3e1a3SNicholas Piggin      * can overwrite a valid SLB without flushing its lookaside information.
107*f9e3e1a3SNicholas Piggin      *
108*f9e3e1a3SNicholas Piggin      * It would be possible to keep the TLB in synch with the SLB by flushing
109*f9e3e1a3SNicholas Piggin      * when a valid entry is overwritten by slbmte, and therefore slbia would
110*f9e3e1a3SNicholas Piggin      * not have to flush unless it evicts a valid SLB entry. However it is
111*f9e3e1a3SNicholas Piggin      * expected that slbmte is more common than slbia, and slbia is usually
112*f9e3e1a3SNicholas Piggin      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
113*f9e3e1a3SNicholas Piggin      * good one.
114*f9e3e1a3SNicholas Piggin      */
115*f9e3e1a3SNicholas Piggin 
116fcf5ef2aSThomas Huth     /* XXX: Warning: slbia never invalidates the first segment */
11767d7d66fSDavid Gibson     for (n = 1; n < cpu->hash64_opts->slb_size; n++) {
118fcf5ef2aSThomas Huth         ppc_slb_t *slb = &env->slb[n];
119fcf5ef2aSThomas Huth 
120fcf5ef2aSThomas Huth         if (slb->esid & SLB_ESID_V) {
121fcf5ef2aSThomas Huth             slb->esid &= ~SLB_ESID_V;
122*f9e3e1a3SNicholas Piggin         }
123*f9e3e1a3SNicholas Piggin     }
124*f9e3e1a3SNicholas Piggin 
125fcf5ef2aSThomas Huth     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
126fcf5ef2aSThomas Huth }
127fcf5ef2aSThomas Huth 
128a63f1dfcSNikunj A Dadhania static void __helper_slbie(CPUPPCState *env, target_ulong addr,
129a63f1dfcSNikunj A Dadhania                            target_ulong global)
130fcf5ef2aSThomas Huth {
131db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
132fcf5ef2aSThomas Huth     ppc_slb_t *slb;
133fcf5ef2aSThomas Huth 
134fcf5ef2aSThomas Huth     slb = slb_lookup(cpu, addr);
135fcf5ef2aSThomas Huth     if (!slb) {
136fcf5ef2aSThomas Huth         return;
137fcf5ef2aSThomas Huth     }
138fcf5ef2aSThomas Huth 
139fcf5ef2aSThomas Huth     if (slb->esid & SLB_ESID_V) {
140fcf5ef2aSThomas Huth         slb->esid &= ~SLB_ESID_V;
141fcf5ef2aSThomas Huth 
142d75cbae8SDavid Gibson         /*
143d75cbae8SDavid Gibson          * XXX: given the fact that segment size is 256 MB or 1TB,
144fcf5ef2aSThomas Huth          *      and we still don't have a tlb_flush_mask(env, n, mask)
145fcf5ef2aSThomas Huth          *      in QEMU, we just invalidate all TLBs
146fcf5ef2aSThomas Huth          */
147a63f1dfcSNikunj A Dadhania         env->tlb_need_flush |=
148a63f1dfcSNikunj A Dadhania             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
149fcf5ef2aSThomas Huth     }
150fcf5ef2aSThomas Huth }
151fcf5ef2aSThomas Huth 
152a63f1dfcSNikunj A Dadhania void helper_slbie(CPUPPCState *env, target_ulong addr)
153a63f1dfcSNikunj A Dadhania {
154a63f1dfcSNikunj A Dadhania     __helper_slbie(env, addr, false);
155a63f1dfcSNikunj A Dadhania }
156a63f1dfcSNikunj A Dadhania 
157a63f1dfcSNikunj A Dadhania void helper_slbieg(CPUPPCState *env, target_ulong addr)
158a63f1dfcSNikunj A Dadhania {
159a63f1dfcSNikunj A Dadhania     __helper_slbie(env, addr, true);
160a63f1dfcSNikunj A Dadhania }
161a63f1dfcSNikunj A Dadhania 
162fcf5ef2aSThomas Huth int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
163fcf5ef2aSThomas Huth                   target_ulong esid, target_ulong vsid)
164fcf5ef2aSThomas Huth {
165fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
166fcf5ef2aSThomas Huth     ppc_slb_t *slb = &env->slb[slot];
167b07c59f7SDavid Gibson     const PPCHash64SegmentPageSizes *sps = NULL;
168fcf5ef2aSThomas Huth     int i;
169fcf5ef2aSThomas Huth 
17067d7d66fSDavid Gibson     if (slot >= cpu->hash64_opts->slb_size) {
171fcf5ef2aSThomas Huth         return -1; /* Bad slot number */
172fcf5ef2aSThomas Huth     }
173fcf5ef2aSThomas Huth     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
174fcf5ef2aSThomas Huth         return -1; /* Reserved bits set */
175fcf5ef2aSThomas Huth     }
176fcf5ef2aSThomas Huth     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
177fcf5ef2aSThomas Huth         return -1; /* Bad segment size */
178fcf5ef2aSThomas Huth     }
17958969eeeSDavid Gibson     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
180fcf5ef2aSThomas Huth         return -1; /* 1T segment on MMU that doesn't support it */
181fcf5ef2aSThomas Huth     }
182fcf5ef2aSThomas Huth 
183fcf5ef2aSThomas Huth     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
184b07c59f7SDavid Gibson         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
185fcf5ef2aSThomas Huth 
186fcf5ef2aSThomas Huth         if (!sps1->page_shift) {
187fcf5ef2aSThomas Huth             break;
188fcf5ef2aSThomas Huth         }
189fcf5ef2aSThomas Huth 
190fcf5ef2aSThomas Huth         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
191fcf5ef2aSThomas Huth             sps = sps1;
192fcf5ef2aSThomas Huth             break;
193fcf5ef2aSThomas Huth         }
194fcf5ef2aSThomas Huth     }
195fcf5ef2aSThomas Huth 
196fcf5ef2aSThomas Huth     if (!sps) {
197fcf5ef2aSThomas Huth         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
198fcf5ef2aSThomas Huth                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
199fcf5ef2aSThomas Huth                      slot, esid, vsid);
200fcf5ef2aSThomas Huth         return -1;
201fcf5ef2aSThomas Huth     }
202fcf5ef2aSThomas Huth 
203fcf5ef2aSThomas Huth     slb->esid = esid;
204fcf5ef2aSThomas Huth     slb->vsid = vsid;
205fcf5ef2aSThomas Huth     slb->sps = sps;
206fcf5ef2aSThomas Huth 
20776134d48SSuraj Jitindar Singh     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
20876134d48SSuraj Jitindar Singh             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
209fcf5ef2aSThomas Huth             slb->esid, slb->vsid);
210fcf5ef2aSThomas Huth 
211fcf5ef2aSThomas Huth     return 0;
212fcf5ef2aSThomas Huth }
213fcf5ef2aSThomas Huth 
214fcf5ef2aSThomas Huth static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
215fcf5ef2aSThomas Huth                              target_ulong *rt)
216fcf5ef2aSThomas Huth {
217fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
218fcf5ef2aSThomas Huth     int slot = rb & 0xfff;
219fcf5ef2aSThomas Huth     ppc_slb_t *slb = &env->slb[slot];
220fcf5ef2aSThomas Huth 
22167d7d66fSDavid Gibson     if (slot >= cpu->hash64_opts->slb_size) {
222fcf5ef2aSThomas Huth         return -1;
223fcf5ef2aSThomas Huth     }
224fcf5ef2aSThomas Huth 
225fcf5ef2aSThomas Huth     *rt = slb->esid;
226fcf5ef2aSThomas Huth     return 0;
227fcf5ef2aSThomas Huth }
228fcf5ef2aSThomas Huth 
229fcf5ef2aSThomas Huth static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
230fcf5ef2aSThomas Huth                              target_ulong *rt)
231fcf5ef2aSThomas Huth {
232fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
233fcf5ef2aSThomas Huth     int slot = rb & 0xfff;
234fcf5ef2aSThomas Huth     ppc_slb_t *slb = &env->slb[slot];
235fcf5ef2aSThomas Huth 
23667d7d66fSDavid Gibson     if (slot >= cpu->hash64_opts->slb_size) {
237fcf5ef2aSThomas Huth         return -1;
238fcf5ef2aSThomas Huth     }
239fcf5ef2aSThomas Huth 
240fcf5ef2aSThomas Huth     *rt = slb->vsid;
241fcf5ef2aSThomas Huth     return 0;
242fcf5ef2aSThomas Huth }
243fcf5ef2aSThomas Huth 
244fcf5ef2aSThomas Huth static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
245fcf5ef2aSThomas Huth                              target_ulong *rt)
246fcf5ef2aSThomas Huth {
247fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
248fcf5ef2aSThomas Huth     ppc_slb_t *slb;
249fcf5ef2aSThomas Huth 
250fcf5ef2aSThomas Huth     if (!msr_is_64bit(env, env->msr)) {
251fcf5ef2aSThomas Huth         rb &= 0xffffffff;
252fcf5ef2aSThomas Huth     }
253fcf5ef2aSThomas Huth     slb = slb_lookup(cpu, rb);
254fcf5ef2aSThomas Huth     if (slb == NULL) {
255fcf5ef2aSThomas Huth         *rt = (target_ulong)-1ul;
256fcf5ef2aSThomas Huth     } else {
257fcf5ef2aSThomas Huth         *rt = slb->vsid;
258fcf5ef2aSThomas Huth     }
259fcf5ef2aSThomas Huth     return 0;
260fcf5ef2aSThomas Huth }
261fcf5ef2aSThomas Huth 
262fcf5ef2aSThomas Huth void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
263fcf5ef2aSThomas Huth {
264db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
265fcf5ef2aSThomas Huth 
266fcf5ef2aSThomas Huth     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
267fcf5ef2aSThomas Huth         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
268fcf5ef2aSThomas Huth                                POWERPC_EXCP_INVAL, GETPC());
269fcf5ef2aSThomas Huth     }
270fcf5ef2aSThomas Huth }
271fcf5ef2aSThomas Huth 
272fcf5ef2aSThomas Huth target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
273fcf5ef2aSThomas Huth {
274db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
275fcf5ef2aSThomas Huth     target_ulong rt = 0;
276fcf5ef2aSThomas Huth 
277fcf5ef2aSThomas Huth     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
278fcf5ef2aSThomas Huth         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
279fcf5ef2aSThomas Huth                                POWERPC_EXCP_INVAL, GETPC());
280fcf5ef2aSThomas Huth     }
281fcf5ef2aSThomas Huth     return rt;
282fcf5ef2aSThomas Huth }
283fcf5ef2aSThomas Huth 
284fcf5ef2aSThomas Huth target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
285fcf5ef2aSThomas Huth {
286db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
287fcf5ef2aSThomas Huth     target_ulong rt = 0;
288fcf5ef2aSThomas Huth 
289fcf5ef2aSThomas Huth     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
290fcf5ef2aSThomas Huth         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
291fcf5ef2aSThomas Huth                                POWERPC_EXCP_INVAL, GETPC());
292fcf5ef2aSThomas Huth     }
293fcf5ef2aSThomas Huth     return rt;
294fcf5ef2aSThomas Huth }
295fcf5ef2aSThomas Huth 
296fcf5ef2aSThomas Huth target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
297fcf5ef2aSThomas Huth {
298db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
299fcf5ef2aSThomas Huth     target_ulong rt = 0;
300fcf5ef2aSThomas Huth 
301fcf5ef2aSThomas Huth     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
302fcf5ef2aSThomas Huth         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
303fcf5ef2aSThomas Huth                                POWERPC_EXCP_INVAL, GETPC());
304fcf5ef2aSThomas Huth     }
305fcf5ef2aSThomas Huth     return rt;
306fcf5ef2aSThomas Huth }
307fcf5ef2aSThomas Huth 
30807a68f99SSuraj Jitindar Singh /* Check No-Execute or Guarded Storage */
30907a68f99SSuraj Jitindar Singh static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
31007a68f99SSuraj Jitindar Singh                                               ppc_hash_pte64_t pte)
31107a68f99SSuraj Jitindar Singh {
31207a68f99SSuraj Jitindar Singh     /* Exec permissions CANNOT take away read or write permissions */
31307a68f99SSuraj Jitindar Singh     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
31407a68f99SSuraj Jitindar Singh             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
31507a68f99SSuraj Jitindar Singh }
31607a68f99SSuraj Jitindar Singh 
31707a68f99SSuraj Jitindar Singh /* Check Basic Storage Protection */
318fcf5ef2aSThomas Huth static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
319fcf5ef2aSThomas Huth                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
320fcf5ef2aSThomas Huth {
321fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
322fcf5ef2aSThomas Huth     unsigned pp, key;
323d75cbae8SDavid Gibson     /*
324d75cbae8SDavid Gibson      * Some pp bit combinations have undefined behaviour, so default
325d75cbae8SDavid Gibson      * to no access in those cases
326d75cbae8SDavid Gibson      */
327fcf5ef2aSThomas Huth     int prot = 0;
328fcf5ef2aSThomas Huth 
329fcf5ef2aSThomas Huth     key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
330fcf5ef2aSThomas Huth              : (slb->vsid & SLB_VSID_KS));
331fcf5ef2aSThomas Huth     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
332fcf5ef2aSThomas Huth 
333fcf5ef2aSThomas Huth     if (key == 0) {
334fcf5ef2aSThomas Huth         switch (pp) {
335fcf5ef2aSThomas Huth         case 0x0:
336fcf5ef2aSThomas Huth         case 0x1:
337fcf5ef2aSThomas Huth         case 0x2:
338347a5c73SSuraj Jitindar Singh             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
339fcf5ef2aSThomas Huth             break;
340fcf5ef2aSThomas Huth 
341fcf5ef2aSThomas Huth         case 0x3:
342fcf5ef2aSThomas Huth         case 0x6:
343347a5c73SSuraj Jitindar Singh             prot = PAGE_READ | PAGE_EXEC;
344fcf5ef2aSThomas Huth             break;
345fcf5ef2aSThomas Huth         }
346fcf5ef2aSThomas Huth     } else {
347fcf5ef2aSThomas Huth         switch (pp) {
348fcf5ef2aSThomas Huth         case 0x0:
349fcf5ef2aSThomas Huth         case 0x6:
350fcf5ef2aSThomas Huth             break;
351fcf5ef2aSThomas Huth 
352fcf5ef2aSThomas Huth         case 0x1:
353fcf5ef2aSThomas Huth         case 0x3:
354347a5c73SSuraj Jitindar Singh             prot = PAGE_READ | PAGE_EXEC;
355fcf5ef2aSThomas Huth             break;
356fcf5ef2aSThomas Huth 
357fcf5ef2aSThomas Huth         case 0x2:
358347a5c73SSuraj Jitindar Singh             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
359fcf5ef2aSThomas Huth             break;
360fcf5ef2aSThomas Huth         }
361fcf5ef2aSThomas Huth     }
362fcf5ef2aSThomas Huth 
363fcf5ef2aSThomas Huth     return prot;
364fcf5ef2aSThomas Huth }
365fcf5ef2aSThomas Huth 
366a6152b52SSuraj Jitindar Singh /* Check the instruction access permissions specified in the IAMR */
367a6152b52SSuraj Jitindar Singh static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
368a6152b52SSuraj Jitindar Singh {
369a6152b52SSuraj Jitindar Singh     CPUPPCState *env = &cpu->env;
370a6152b52SSuraj Jitindar Singh     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
371a6152b52SSuraj Jitindar Singh 
372a6152b52SSuraj Jitindar Singh     /*
373a6152b52SSuraj Jitindar Singh      * An instruction fetch is permitted if the IAMR bit is 0.
374a6152b52SSuraj Jitindar Singh      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
375a6152b52SSuraj Jitindar Singh      * can only take away EXEC permissions not READ or WRITE permissions.
376a6152b52SSuraj Jitindar Singh      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
377a6152b52SSuraj Jitindar Singh      * EXEC permissions are allowed.
378a6152b52SSuraj Jitindar Singh      */
379a6152b52SSuraj Jitindar Singh     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
380a6152b52SSuraj Jitindar Singh                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
381a6152b52SSuraj Jitindar Singh }
382a6152b52SSuraj Jitindar Singh 
383fcf5ef2aSThomas Huth static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
384fcf5ef2aSThomas Huth {
385fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
386fcf5ef2aSThomas Huth     int key, amrbits;
387fcf5ef2aSThomas Huth     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
388fcf5ef2aSThomas Huth 
389fcf5ef2aSThomas Huth     /* Only recent MMUs implement Virtual Page Class Key Protection */
39058969eeeSDavid Gibson     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
391fcf5ef2aSThomas Huth         return prot;
392fcf5ef2aSThomas Huth     }
393fcf5ef2aSThomas Huth 
394fcf5ef2aSThomas Huth     key = HPTE64_R_KEY(pte.pte1);
395fcf5ef2aSThomas Huth     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
396fcf5ef2aSThomas Huth 
397fcf5ef2aSThomas Huth     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
398fcf5ef2aSThomas Huth     /*         env->spr[SPR_AMR]); */
399fcf5ef2aSThomas Huth 
400fcf5ef2aSThomas Huth     /*
401fcf5ef2aSThomas Huth      * A store is permitted if the AMR bit is 0. Remove write
402fcf5ef2aSThomas Huth      * protection if it is set.
403fcf5ef2aSThomas Huth      */
404fcf5ef2aSThomas Huth     if (amrbits & 0x2) {
405fcf5ef2aSThomas Huth         prot &= ~PAGE_WRITE;
406fcf5ef2aSThomas Huth     }
407fcf5ef2aSThomas Huth     /*
408fcf5ef2aSThomas Huth      * A load is permitted if the AMR bit is 0. Remove read
409fcf5ef2aSThomas Huth      * protection if it is set.
410fcf5ef2aSThomas Huth      */
411fcf5ef2aSThomas Huth     if (amrbits & 0x1) {
412fcf5ef2aSThomas Huth         prot &= ~PAGE_READ;
413fcf5ef2aSThomas Huth     }
414fcf5ef2aSThomas Huth 
415a6152b52SSuraj Jitindar Singh     switch (env->mmu_model) {
416a6152b52SSuraj Jitindar Singh     /*
417a6152b52SSuraj Jitindar Singh      * MMU version 2.07 and later support IAMR
418a6152b52SSuraj Jitindar Singh      * Check if the IAMR allows the instruction access - it will return
419a6152b52SSuraj Jitindar Singh      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
420a6152b52SSuraj Jitindar Singh      * if it does (and prot will be unchanged indicating execution support).
421a6152b52SSuraj Jitindar Singh      */
422a6152b52SSuraj Jitindar Singh     case POWERPC_MMU_2_07:
423a6152b52SSuraj Jitindar Singh     case POWERPC_MMU_3_00:
424a6152b52SSuraj Jitindar Singh         prot &= ppc_hash64_iamr_prot(cpu, key);
425a6152b52SSuraj Jitindar Singh         break;
426a6152b52SSuraj Jitindar Singh     default:
427a6152b52SSuraj Jitindar Singh         break;
428a6152b52SSuraj Jitindar Singh     }
429a6152b52SSuraj Jitindar Singh 
430fcf5ef2aSThomas Huth     return prot;
431fcf5ef2aSThomas Huth }
432fcf5ef2aSThomas Huth 
4337222b94aSDavid Gibson const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
4347222b94aSDavid Gibson                                              hwaddr ptex, int n)
435fcf5ef2aSThomas Huth {
4367222b94aSDavid Gibson     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
4373367c62fSBenjamin Herrenschmidt     hwaddr base;
4387222b94aSDavid Gibson     hwaddr plen = n * HASH_PTE_SIZE_64;
439e57ca75cSDavid Gibson     const ppc_hash_pte64_t *hptes;
440e57ca75cSDavid Gibson 
441e57ca75cSDavid Gibson     if (cpu->vhyp) {
442e57ca75cSDavid Gibson         PPCVirtualHypervisorClass *vhc =
443e57ca75cSDavid Gibson             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
444e57ca75cSDavid Gibson         return vhc->map_hptes(cpu->vhyp, ptex, n);
445e57ca75cSDavid Gibson     }
4463367c62fSBenjamin Herrenschmidt     base = ppc_hash64_hpt_base(cpu);
447e57ca75cSDavid Gibson 
448e57ca75cSDavid Gibson     if (!base) {
449e57ca75cSDavid Gibson         return NULL;
450e57ca75cSDavid Gibson     }
451e57ca75cSDavid Gibson 
452f26404fbSPeter Maydell     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
453f26404fbSPeter Maydell                               MEMTXATTRS_UNSPECIFIED);
4547222b94aSDavid Gibson     if (plen < (n * HASH_PTE_SIZE_64)) {
4557222b94aSDavid Gibson         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
456fcf5ef2aSThomas Huth     }
4577222b94aSDavid Gibson     return hptes;
458fcf5ef2aSThomas Huth }
459fcf5ef2aSThomas Huth 
4607222b94aSDavid Gibson void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
4617222b94aSDavid Gibson                             hwaddr ptex, int n)
462fcf5ef2aSThomas Huth {
463e57ca75cSDavid Gibson     if (cpu->vhyp) {
464e57ca75cSDavid Gibson         PPCVirtualHypervisorClass *vhc =
465e57ca75cSDavid Gibson             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
466e57ca75cSDavid Gibson         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
467e57ca75cSDavid Gibson         return;
468e57ca75cSDavid Gibson     }
469e57ca75cSDavid Gibson 
4707222b94aSDavid Gibson     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
4717222b94aSDavid Gibson                         false, n * HASH_PTE_SIZE_64);
472fcf5ef2aSThomas Huth }
473fcf5ef2aSThomas Huth 
474b07c59f7SDavid Gibson static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
475fcf5ef2aSThomas Huth                                 uint64_t pte0, uint64_t pte1)
476fcf5ef2aSThomas Huth {
477fcf5ef2aSThomas Huth     int i;
478fcf5ef2aSThomas Huth 
479fcf5ef2aSThomas Huth     if (!(pte0 & HPTE64_V_LARGE)) {
480fcf5ef2aSThomas Huth         if (sps->page_shift != 12) {
481fcf5ef2aSThomas Huth             /* 4kiB page in a non 4kiB segment */
482fcf5ef2aSThomas Huth             return 0;
483fcf5ef2aSThomas Huth         }
484fcf5ef2aSThomas Huth         /* Normal 4kiB page */
485fcf5ef2aSThomas Huth         return 12;
486fcf5ef2aSThomas Huth     }
487fcf5ef2aSThomas Huth 
488fcf5ef2aSThomas Huth     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
489b07c59f7SDavid Gibson         const PPCHash64PageSize *ps = &sps->enc[i];
490fcf5ef2aSThomas Huth         uint64_t mask;
491fcf5ef2aSThomas Huth 
492fcf5ef2aSThomas Huth         if (!ps->page_shift) {
493fcf5ef2aSThomas Huth             break;
494fcf5ef2aSThomas Huth         }
495fcf5ef2aSThomas Huth 
496fcf5ef2aSThomas Huth         if (ps->page_shift == 12) {
497fcf5ef2aSThomas Huth             /* L bit is set so this can't be a 4kiB page */
498fcf5ef2aSThomas Huth             continue;
499fcf5ef2aSThomas Huth         }
500fcf5ef2aSThomas Huth 
501fcf5ef2aSThomas Huth         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
502fcf5ef2aSThomas Huth 
503fcf5ef2aSThomas Huth         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
504fcf5ef2aSThomas Huth             return ps->page_shift;
505fcf5ef2aSThomas Huth         }
506fcf5ef2aSThomas Huth     }
507fcf5ef2aSThomas Huth 
508fcf5ef2aSThomas Huth     return 0; /* Bad page size encoding */
509fcf5ef2aSThomas Huth }
510fcf5ef2aSThomas Huth 
51134525595SBenjamin Herrenschmidt static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
51234525595SBenjamin Herrenschmidt {
51334525595SBenjamin Herrenschmidt     /* Insert B into pte0 */
51434525595SBenjamin Herrenschmidt     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
51534525595SBenjamin Herrenschmidt             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
51634525595SBenjamin Herrenschmidt              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
51734525595SBenjamin Herrenschmidt 
51834525595SBenjamin Herrenschmidt     /* Remove B from pte1 */
51934525595SBenjamin Herrenschmidt     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
52034525595SBenjamin Herrenschmidt }
52134525595SBenjamin Herrenschmidt 
52234525595SBenjamin Herrenschmidt 
523fcf5ef2aSThomas Huth static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
524b07c59f7SDavid Gibson                                      const PPCHash64SegmentPageSizes *sps,
525fcf5ef2aSThomas Huth                                      target_ulong ptem,
526fcf5ef2aSThomas Huth                                      ppc_hash_pte64_t *pte, unsigned *pshift)
527fcf5ef2aSThomas Huth {
528fcf5ef2aSThomas Huth     int i;
5297222b94aSDavid Gibson     const ppc_hash_pte64_t *pteg;
530fcf5ef2aSThomas Huth     target_ulong pte0, pte1;
5317222b94aSDavid Gibson     target_ulong ptex;
532fcf5ef2aSThomas Huth 
53336778660SDavid Gibson     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
5347222b94aSDavid Gibson     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
5357222b94aSDavid Gibson     if (!pteg) {
536fcf5ef2aSThomas Huth         return -1;
537fcf5ef2aSThomas Huth     }
538fcf5ef2aSThomas Huth     for (i = 0; i < HPTES_PER_GROUP; i++) {
5397222b94aSDavid Gibson         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
5403054b0caSBenjamin Herrenschmidt         /*
5413054b0caSBenjamin Herrenschmidt          * pte0 contains the valid bit and must be read before pte1,
5423054b0caSBenjamin Herrenschmidt          * otherwise we might see an old pte1 with a new valid bit and
5433054b0caSBenjamin Herrenschmidt          * thus an inconsistent hpte value
5443054b0caSBenjamin Herrenschmidt          */
5453054b0caSBenjamin Herrenschmidt         smp_rmb();
5467222b94aSDavid Gibson         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
547fcf5ef2aSThomas Huth 
54834525595SBenjamin Herrenschmidt         /* Convert format if necessary */
54934525595SBenjamin Herrenschmidt         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
55034525595SBenjamin Herrenschmidt             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
55134525595SBenjamin Herrenschmidt         }
55234525595SBenjamin Herrenschmidt 
553fcf5ef2aSThomas Huth         /* This compares V, B, H (secondary) and the AVPN */
554fcf5ef2aSThomas Huth         if (HPTE64_V_COMPARE(pte0, ptem)) {
555fcf5ef2aSThomas Huth             *pshift = hpte_page_shift(sps, pte0, pte1);
556fcf5ef2aSThomas Huth             /*
557fcf5ef2aSThomas Huth              * If there is no match, ignore the PTE, it could simply
558fcf5ef2aSThomas Huth              * be for a different segment size encoding and the
559fcf5ef2aSThomas Huth              * architecture specifies we should not match. Linux will
560fcf5ef2aSThomas Huth              * potentially leave behind PTEs for the wrong base page
561fcf5ef2aSThomas Huth              * size when demoting segments.
562fcf5ef2aSThomas Huth              */
563fcf5ef2aSThomas Huth             if (*pshift == 0) {
564fcf5ef2aSThomas Huth                 continue;
565fcf5ef2aSThomas Huth             }
566d75cbae8SDavid Gibson             /*
567d75cbae8SDavid Gibson              * We don't do anything with pshift yet as qemu TLB only
568d75cbae8SDavid Gibson              * deals with 4K pages anyway
569fcf5ef2aSThomas Huth              */
570fcf5ef2aSThomas Huth             pte->pte0 = pte0;
571fcf5ef2aSThomas Huth             pte->pte1 = pte1;
5727222b94aSDavid Gibson             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
5737222b94aSDavid Gibson             return ptex + i;
574fcf5ef2aSThomas Huth         }
575fcf5ef2aSThomas Huth     }
5767222b94aSDavid Gibson     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
577fcf5ef2aSThomas Huth     /*
578fcf5ef2aSThomas Huth      * We didn't find a valid entry.
579fcf5ef2aSThomas Huth      */
580fcf5ef2aSThomas Huth     return -1;
581fcf5ef2aSThomas Huth }
582fcf5ef2aSThomas Huth 
583fcf5ef2aSThomas Huth static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
584fcf5ef2aSThomas Huth                                      ppc_slb_t *slb, target_ulong eaddr,
585fcf5ef2aSThomas Huth                                      ppc_hash_pte64_t *pte, unsigned *pshift)
586fcf5ef2aSThomas Huth {
587fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
5887222b94aSDavid Gibson     hwaddr hash, ptex;
589fcf5ef2aSThomas Huth     uint64_t vsid, epnmask, epn, ptem;
590b07c59f7SDavid Gibson     const PPCHash64SegmentPageSizes *sps = slb->sps;
591fcf5ef2aSThomas Huth 
592d75cbae8SDavid Gibson     /*
593d75cbae8SDavid Gibson      * The SLB store path should prevent any bad page size encodings
594d75cbae8SDavid Gibson      * getting in there, so:
595d75cbae8SDavid Gibson      */
596fcf5ef2aSThomas Huth     assert(sps);
597fcf5ef2aSThomas Huth 
598fcf5ef2aSThomas Huth     /* If ISL is set in LPCR we need to clamp the page size to 4K */
599fcf5ef2aSThomas Huth     if (env->spr[SPR_LPCR] & LPCR_ISL) {
600fcf5ef2aSThomas Huth         /* We assume that when using TCG, 4k is first entry of SPS */
601b07c59f7SDavid Gibson         sps = &cpu->hash64_opts->sps[0];
602fcf5ef2aSThomas Huth         assert(sps->page_shift == 12);
603fcf5ef2aSThomas Huth     }
604fcf5ef2aSThomas Huth 
605fcf5ef2aSThomas Huth     epnmask = ~((1ULL << sps->page_shift) - 1);
606fcf5ef2aSThomas Huth 
607fcf5ef2aSThomas Huth     if (slb->vsid & SLB_VSID_B) {
608fcf5ef2aSThomas Huth         /* 1TB segment */
609fcf5ef2aSThomas Huth         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
610fcf5ef2aSThomas Huth         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
611fcf5ef2aSThomas Huth         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
612fcf5ef2aSThomas Huth     } else {
613fcf5ef2aSThomas Huth         /* 256M segment */
614fcf5ef2aSThomas Huth         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
615fcf5ef2aSThomas Huth         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
616fcf5ef2aSThomas Huth         hash = vsid ^ (epn >> sps->page_shift);
617fcf5ef2aSThomas Huth     }
618fcf5ef2aSThomas Huth     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
619fcf5ef2aSThomas Huth     ptem |= HPTE64_V_VALID;
620fcf5ef2aSThomas Huth 
621fcf5ef2aSThomas Huth     /* Page address translation */
622fcf5ef2aSThomas Huth     qemu_log_mask(CPU_LOG_MMU,
623fcf5ef2aSThomas Huth             "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
624fcf5ef2aSThomas Huth             " hash " TARGET_FMT_plx "\n",
62536778660SDavid Gibson             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
626fcf5ef2aSThomas Huth 
627fcf5ef2aSThomas Huth     /* Primary PTEG lookup */
628fcf5ef2aSThomas Huth     qemu_log_mask(CPU_LOG_MMU,
629fcf5ef2aSThomas Huth             "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
630fcf5ef2aSThomas Huth             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
631fcf5ef2aSThomas Huth             " hash=" TARGET_FMT_plx "\n",
63236778660SDavid Gibson             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
63336778660SDavid Gibson             vsid, ptem,  hash);
6347222b94aSDavid Gibson     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
635fcf5ef2aSThomas Huth 
6367222b94aSDavid Gibson     if (ptex == -1) {
637fcf5ef2aSThomas Huth         /* Secondary PTEG lookup */
638fcf5ef2aSThomas Huth         ptem |= HPTE64_V_SECONDARY;
639fcf5ef2aSThomas Huth         qemu_log_mask(CPU_LOG_MMU,
640fcf5ef2aSThomas Huth                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
641fcf5ef2aSThomas Huth                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
64236778660SDavid Gibson                 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
64336778660SDavid Gibson                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
644fcf5ef2aSThomas Huth 
6457222b94aSDavid Gibson         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
646fcf5ef2aSThomas Huth     }
647fcf5ef2aSThomas Huth 
6487222b94aSDavid Gibson     return ptex;
649fcf5ef2aSThomas Huth }
650fcf5ef2aSThomas Huth 
651fcf5ef2aSThomas Huth unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
652fcf5ef2aSThomas Huth                                           uint64_t pte0, uint64_t pte1)
653fcf5ef2aSThomas Huth {
654fcf5ef2aSThomas Huth     int i;
655fcf5ef2aSThomas Huth 
656fcf5ef2aSThomas Huth     if (!(pte0 & HPTE64_V_LARGE)) {
657fcf5ef2aSThomas Huth         return 12;
658fcf5ef2aSThomas Huth     }
659fcf5ef2aSThomas Huth 
660fcf5ef2aSThomas Huth     /*
661fcf5ef2aSThomas Huth      * The encodings in env->sps need to be carefully chosen so that
662fcf5ef2aSThomas Huth      * this gives an unambiguous result.
663fcf5ef2aSThomas Huth      */
664fcf5ef2aSThomas Huth     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
665b07c59f7SDavid Gibson         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
666fcf5ef2aSThomas Huth         unsigned shift;
667fcf5ef2aSThomas Huth 
668fcf5ef2aSThomas Huth         if (!sps->page_shift) {
669fcf5ef2aSThomas Huth             break;
670fcf5ef2aSThomas Huth         }
671fcf5ef2aSThomas Huth 
672fcf5ef2aSThomas Huth         shift = hpte_page_shift(sps, pte0, pte1);
673fcf5ef2aSThomas Huth         if (shift) {
674fcf5ef2aSThomas Huth             return shift;
675fcf5ef2aSThomas Huth         }
676fcf5ef2aSThomas Huth     }
677fcf5ef2aSThomas Huth 
678fcf5ef2aSThomas Huth     return 0;
679fcf5ef2aSThomas Huth }
680fcf5ef2aSThomas Huth 
6811b99e029SDavid Gibson static bool ppc_hash64_use_vrma(CPUPPCState *env)
6821b99e029SDavid Gibson {
6831b99e029SDavid Gibson     switch (env->mmu_model) {
6841b99e029SDavid Gibson     case POWERPC_MMU_3_00:
6851b99e029SDavid Gibson         /*
6861b99e029SDavid Gibson          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
6871b99e029SDavid Gibson          * register no longer exist
6881b99e029SDavid Gibson          */
6891b99e029SDavid Gibson         return true;
6901b99e029SDavid Gibson 
6911b99e029SDavid Gibson     default:
6921b99e029SDavid Gibson         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
6931b99e029SDavid Gibson     }
6941b99e029SDavid Gibson }
6951b99e029SDavid Gibson 
6968fe08facSDavid Gibson static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
697fcf5ef2aSThomas Huth {
6988fe08facSDavid Gibson     CPUPPCState *env = &POWERPC_CPU(cs)->env;
699fcf5ef2aSThomas Huth     bool vpm;
700fcf5ef2aSThomas Huth 
701fcf5ef2aSThomas Huth     if (msr_ir) {
702fcf5ef2aSThomas Huth         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
703fcf5ef2aSThomas Huth     } else {
7041b99e029SDavid Gibson         vpm = ppc_hash64_use_vrma(env);
705fcf5ef2aSThomas Huth     }
706fcf5ef2aSThomas Huth     if (vpm && !msr_hv) {
707fcf5ef2aSThomas Huth         cs->exception_index = POWERPC_EXCP_HISI;
708fcf5ef2aSThomas Huth     } else {
709fcf5ef2aSThomas Huth         cs->exception_index = POWERPC_EXCP_ISI;
710fcf5ef2aSThomas Huth     }
711fcf5ef2aSThomas Huth     env->error_code = error_code;
712fcf5ef2aSThomas Huth }
713fcf5ef2aSThomas Huth 
7148fe08facSDavid Gibson static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
715fcf5ef2aSThomas Huth {
7168fe08facSDavid Gibson     CPUPPCState *env = &POWERPC_CPU(cs)->env;
717fcf5ef2aSThomas Huth     bool vpm;
718fcf5ef2aSThomas Huth 
719fcf5ef2aSThomas Huth     if (msr_dr) {
720fcf5ef2aSThomas Huth         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
721fcf5ef2aSThomas Huth     } else {
7221b99e029SDavid Gibson         vpm = ppc_hash64_use_vrma(env);
723fcf5ef2aSThomas Huth     }
724fcf5ef2aSThomas Huth     if (vpm && !msr_hv) {
725fcf5ef2aSThomas Huth         cs->exception_index = POWERPC_EXCP_HDSI;
726fcf5ef2aSThomas Huth         env->spr[SPR_HDAR] = dar;
727fcf5ef2aSThomas Huth         env->spr[SPR_HDSISR] = dsisr;
728fcf5ef2aSThomas Huth     } else {
729fcf5ef2aSThomas Huth         cs->exception_index = POWERPC_EXCP_DSI;
730fcf5ef2aSThomas Huth         env->spr[SPR_DAR] = dar;
731fcf5ef2aSThomas Huth         env->spr[SPR_DSISR] = dsisr;
732fcf5ef2aSThomas Huth    }
733fcf5ef2aSThomas Huth     env->error_code = 0;
734fcf5ef2aSThomas Huth }
735fcf5ef2aSThomas Huth 
736fcf5ef2aSThomas Huth 
737a2dd4e83SBenjamin Herrenschmidt static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
738a2dd4e83SBenjamin Herrenschmidt {
739a2dd4e83SBenjamin Herrenschmidt     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
740a2dd4e83SBenjamin Herrenschmidt 
741a2dd4e83SBenjamin Herrenschmidt     if (cpu->vhyp) {
742a2dd4e83SBenjamin Herrenschmidt         PPCVirtualHypervisorClass *vhc =
743a2dd4e83SBenjamin Herrenschmidt             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
744a2dd4e83SBenjamin Herrenschmidt         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
745a2dd4e83SBenjamin Herrenschmidt         return;
746a2dd4e83SBenjamin Herrenschmidt     }
747a2dd4e83SBenjamin Herrenschmidt     base = ppc_hash64_hpt_base(cpu);
748a2dd4e83SBenjamin Herrenschmidt 
749a2dd4e83SBenjamin Herrenschmidt 
750a2dd4e83SBenjamin Herrenschmidt     /* The HW performs a non-atomic byte update */
751a2dd4e83SBenjamin Herrenschmidt     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
752a2dd4e83SBenjamin Herrenschmidt }
753a2dd4e83SBenjamin Herrenschmidt 
754a2dd4e83SBenjamin Herrenschmidt static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
755a2dd4e83SBenjamin Herrenschmidt {
756a2dd4e83SBenjamin Herrenschmidt     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
757a2dd4e83SBenjamin Herrenschmidt 
758a2dd4e83SBenjamin Herrenschmidt     if (cpu->vhyp) {
759a2dd4e83SBenjamin Herrenschmidt         PPCVirtualHypervisorClass *vhc =
760a2dd4e83SBenjamin Herrenschmidt             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
761a2dd4e83SBenjamin Herrenschmidt         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
762a2dd4e83SBenjamin Herrenschmidt         return;
763a2dd4e83SBenjamin Herrenschmidt     }
764a2dd4e83SBenjamin Herrenschmidt     base = ppc_hash64_hpt_base(cpu);
765a2dd4e83SBenjamin Herrenschmidt 
766a2dd4e83SBenjamin Herrenschmidt     /* The HW performs a non-atomic byte update */
767a2dd4e83SBenjamin Herrenschmidt     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
768a2dd4e83SBenjamin Herrenschmidt }
769a2dd4e83SBenjamin Herrenschmidt 
770a864a6b3SDavid Gibson static target_ulong rmls_limit(PowerPCCPU *cpu)
771a864a6b3SDavid Gibson {
772a864a6b3SDavid Gibson     CPUPPCState *env = &cpu->env;
773a864a6b3SDavid Gibson     /*
774d37b40daSDavid Gibson      * In theory the meanings of RMLS values are implementation
775d37b40daSDavid Gibson      * dependent.  In practice, this seems to have been the set from
776d37b40daSDavid Gibson      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
777a864a6b3SDavid Gibson      *
778a864a6b3SDavid Gibson      * Unsupported values mean the OS has shot itself in the
779a864a6b3SDavid Gibson      * foot. Return a 0-sized RMA in this case, which we expect
780a864a6b3SDavid Gibson      * to trigger an immediate DSI or ISI
781a864a6b3SDavid Gibson      */
782a864a6b3SDavid Gibson     static const target_ulong rma_sizes[16] = {
783d37b40daSDavid Gibson         [0] = 256 * GiB,
784a864a6b3SDavid Gibson         [1] = 16 * GiB,
785a864a6b3SDavid Gibson         [2] = 1 * GiB,
786a864a6b3SDavid Gibson         [3] = 64 * MiB,
787a864a6b3SDavid Gibson         [4] = 256 * MiB,
788a864a6b3SDavid Gibson         [7] = 128 * MiB,
789a864a6b3SDavid Gibson         [8] = 32 * MiB,
790a864a6b3SDavid Gibson     };
791a864a6b3SDavid Gibson     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
792a864a6b3SDavid Gibson 
793a864a6b3SDavid Gibson     return rma_sizes[rmls];
794a864a6b3SDavid Gibson }
795a864a6b3SDavid Gibson 
7964c24a87fSDavid Gibson static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
7974c24a87fSDavid Gibson {
7984c24a87fSDavid Gibson     CPUPPCState *env = &cpu->env;
7994c24a87fSDavid Gibson     target_ulong lpcr = env->spr[SPR_LPCR];
8004c24a87fSDavid Gibson     uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
8014c24a87fSDavid Gibson     target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
8024c24a87fSDavid Gibson     int i;
8034c24a87fSDavid Gibson 
8044c24a87fSDavid Gibson     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
8054c24a87fSDavid Gibson         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
8064c24a87fSDavid Gibson 
8074c24a87fSDavid Gibson         if (!sps->page_shift) {
8084c24a87fSDavid Gibson             break;
8094c24a87fSDavid Gibson         }
8104c24a87fSDavid Gibson 
8114c24a87fSDavid Gibson         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
8124c24a87fSDavid Gibson             slb->esid = SLB_ESID_V;
8134c24a87fSDavid Gibson             slb->vsid = vsid;
8144c24a87fSDavid Gibson             slb->sps = sps;
8154c24a87fSDavid Gibson             return 0;
8164c24a87fSDavid Gibson         }
8174c24a87fSDavid Gibson     }
8184c24a87fSDavid Gibson 
8194c24a87fSDavid Gibson     error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
8204c24a87fSDavid Gibson                  TARGET_FMT_lx"\n", lpcr);
8214c24a87fSDavid Gibson 
8224c24a87fSDavid Gibson     return -1;
8234c24a87fSDavid Gibson }
8244c24a87fSDavid Gibson 
825fcf5ef2aSThomas Huth int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
826fcf5ef2aSThomas Huth                                 int rwx, int mmu_idx)
827fcf5ef2aSThomas Huth {
828fcf5ef2aSThomas Huth     CPUState *cs = CPU(cpu);
829fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
8304c24a87fSDavid Gibson     ppc_slb_t vrma_slbe;
831fcf5ef2aSThomas Huth     ppc_slb_t *slb;
832fcf5ef2aSThomas Huth     unsigned apshift;
8337222b94aSDavid Gibson     hwaddr ptex;
834fcf5ef2aSThomas Huth     ppc_hash_pte64_t pte;
83507a68f99SSuraj Jitindar Singh     int exec_prot, pp_prot, amr_prot, prot;
836fcf5ef2aSThomas Huth     const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
837fcf5ef2aSThomas Huth     hwaddr raddr;
838fcf5ef2aSThomas Huth 
839fcf5ef2aSThomas Huth     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
840fcf5ef2aSThomas Huth 
841d75cbae8SDavid Gibson     /*
842d75cbae8SDavid Gibson      * Note on LPCR usage: 970 uses HID4, but our special variant of
843d75cbae8SDavid Gibson      * store_spr copies relevant fields into env->spr[SPR_LPCR].
844d75cbae8SDavid Gibson      * Similarily we filter unimplemented bits when storing into LPCR
845d75cbae8SDavid Gibson      * depending on the MMU version. This code can thus just use the
846d75cbae8SDavid Gibson      * LPCR "as-is".
847fcf5ef2aSThomas Huth      */
848fcf5ef2aSThomas Huth 
849fcf5ef2aSThomas Huth     /* 1. Handle real mode accesses */
850fcf5ef2aSThomas Huth     if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
851d75cbae8SDavid Gibson         /*
852d75cbae8SDavid Gibson          * Translation is supposedly "off", but in real mode the top 4
853d75cbae8SDavid Gibson          * effective address bits are (mostly) ignored
854d75cbae8SDavid Gibson          */
855fcf5ef2aSThomas Huth         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
856fcf5ef2aSThomas Huth 
857682c1dfbSDavid Gibson         if (cpu->vhyp) {
858682c1dfbSDavid Gibson             /*
859682c1dfbSDavid Gibson              * In virtual hypervisor mode, there's nothing to do:
860682c1dfbSDavid Gibson              *   EA == GPA == qemu guest address
861682c1dfbSDavid Gibson              */
862682c1dfbSDavid Gibson         } else if (msr_hv || !env->has_hv_mode) {
863fcf5ef2aSThomas Huth             /* In HV mode, add HRMOR if top EA bit is clear */
864fcf5ef2aSThomas Huth             if (!(eaddr >> 63)) {
865fcf5ef2aSThomas Huth                 raddr |= env->spr[SPR_HRMOR];
866fcf5ef2aSThomas Huth             }
8671b99e029SDavid Gibson         } else if (ppc_hash64_use_vrma(env)) {
868682c1dfbSDavid Gibson             /* Emulated VRMA mode */
8694c24a87fSDavid Gibson             slb = &vrma_slbe;
8704c24a87fSDavid Gibson             if (build_vrma_slbe(cpu, slb) != 0) {
871682c1dfbSDavid Gibson                 /* Invalid VRMA setup, machine check */
872fcf5ef2aSThomas Huth                 cs->exception_index = POWERPC_EXCP_MCHECK;
873fcf5ef2aSThomas Huth                 env->error_code = 0;
874fcf5ef2aSThomas Huth                 return 1;
875682c1dfbSDavid Gibson             }
876682c1dfbSDavid Gibson 
877682c1dfbSDavid Gibson             goto skip_slb_search;
878fcf5ef2aSThomas Huth         } else {
8793a56a55cSDavid Gibson             target_ulong limit = rmls_limit(cpu);
8803a56a55cSDavid Gibson 
881682c1dfbSDavid Gibson             /* Emulated old-style RMO mode, bounds check against RMLS */
8823a56a55cSDavid Gibson             if (raddr >= limit) {
883fcf5ef2aSThomas Huth                 if (rwx == 2) {
8848fe08facSDavid Gibson                     ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
885fcf5ef2aSThomas Huth                 } else {
886da82c73aSSuraj Jitindar Singh                     int dsisr = DSISR_PROTFAULT;
887fcf5ef2aSThomas Huth                     if (rwx == 1) {
888da82c73aSSuraj Jitindar Singh                         dsisr |= DSISR_ISSTORE;
889fcf5ef2aSThomas Huth                     }
8908fe08facSDavid Gibson                     ppc_hash64_set_dsi(cs, eaddr, dsisr);
891fcf5ef2aSThomas Huth                 }
892fcf5ef2aSThomas Huth                 return 1;
893fcf5ef2aSThomas Huth             }
894682c1dfbSDavid Gibson 
895682c1dfbSDavid Gibson             raddr |= env->spr[SPR_RMOR];
896fcf5ef2aSThomas Huth         }
897fcf5ef2aSThomas Huth         tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
898fcf5ef2aSThomas Huth                      PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
899fcf5ef2aSThomas Huth                      TARGET_PAGE_SIZE);
900fcf5ef2aSThomas Huth         return 0;
901fcf5ef2aSThomas Huth     }
902fcf5ef2aSThomas Huth 
903fcf5ef2aSThomas Huth     /* 2. Translation is on, so look up the SLB */
904fcf5ef2aSThomas Huth     slb = slb_lookup(cpu, eaddr);
905fcf5ef2aSThomas Huth     if (!slb) {
906b2899495SSuraj Jitindar Singh         /* No entry found, check if in-memory segment tables are in use */
907ca79b3b7SDavid Gibson         if (ppc64_use_proc_tbl(cpu)) {
908b2899495SSuraj Jitindar Singh             /* TODO - Unsupported */
909b2899495SSuraj Jitindar Singh             error_report("Segment Table Support Unimplemented");
910b2899495SSuraj Jitindar Singh             exit(1);
911b2899495SSuraj Jitindar Singh         }
912b2899495SSuraj Jitindar Singh         /* Segment still not found, generate the appropriate interrupt */
913fcf5ef2aSThomas Huth         if (rwx == 2) {
914fcf5ef2aSThomas Huth             cs->exception_index = POWERPC_EXCP_ISEG;
915fcf5ef2aSThomas Huth             env->error_code = 0;
916fcf5ef2aSThomas Huth         } else {
917fcf5ef2aSThomas Huth             cs->exception_index = POWERPC_EXCP_DSEG;
918fcf5ef2aSThomas Huth             env->error_code = 0;
919fcf5ef2aSThomas Huth             env->spr[SPR_DAR] = eaddr;
920fcf5ef2aSThomas Huth         }
921fcf5ef2aSThomas Huth         return 1;
922fcf5ef2aSThomas Huth     }
923fcf5ef2aSThomas Huth 
924fcf5ef2aSThomas Huth skip_slb_search:
925fcf5ef2aSThomas Huth 
926fcf5ef2aSThomas Huth     /* 3. Check for segment level no-execute violation */
927fcf5ef2aSThomas Huth     if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
9288fe08facSDavid Gibson         ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
929fcf5ef2aSThomas Huth         return 1;
930fcf5ef2aSThomas Huth     }
931fcf5ef2aSThomas Huth 
932fcf5ef2aSThomas Huth     /* 4. Locate the PTE in the hash table */
9337222b94aSDavid Gibson     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
9347222b94aSDavid Gibson     if (ptex == -1) {
935fcf5ef2aSThomas Huth         if (rwx == 2) {
9368fe08facSDavid Gibson             ppc_hash64_set_isi(cs, SRR1_NOPTE);
937fcf5ef2aSThomas Huth         } else {
938da82c73aSSuraj Jitindar Singh             int dsisr = DSISR_NOPTE;
939fcf5ef2aSThomas Huth             if (rwx == 1) {
940da82c73aSSuraj Jitindar Singh                 dsisr |= DSISR_ISSTORE;
941fcf5ef2aSThomas Huth             }
9428fe08facSDavid Gibson             ppc_hash64_set_dsi(cs, eaddr, dsisr);
943fcf5ef2aSThomas Huth         }
944fcf5ef2aSThomas Huth         return 1;
945fcf5ef2aSThomas Huth     }
946fcf5ef2aSThomas Huth     qemu_log_mask(CPU_LOG_MMU,
9477222b94aSDavid Gibson                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
948fcf5ef2aSThomas Huth 
949fcf5ef2aSThomas Huth     /* 5. Check access permissions */
950fcf5ef2aSThomas Huth 
95107a68f99SSuraj Jitindar Singh     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
952fcf5ef2aSThomas Huth     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
953fcf5ef2aSThomas Huth     amr_prot = ppc_hash64_amr_prot(cpu, pte);
95407a68f99SSuraj Jitindar Singh     prot = exec_prot & pp_prot & amr_prot;
955fcf5ef2aSThomas Huth 
956fcf5ef2aSThomas Huth     if ((need_prot[rwx] & ~prot) != 0) {
957fcf5ef2aSThomas Huth         /* Access right violation */
958fcf5ef2aSThomas Huth         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
959fcf5ef2aSThomas Huth         if (rwx == 2) {
960a6152b52SSuraj Jitindar Singh             int srr1 = 0;
96107a68f99SSuraj Jitindar Singh             if (PAGE_EXEC & ~exec_prot) {
96207a68f99SSuraj Jitindar Singh                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
96307a68f99SSuraj Jitindar Singh             } else if (PAGE_EXEC & ~pp_prot) {
964a6152b52SSuraj Jitindar Singh                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
965a6152b52SSuraj Jitindar Singh             }
966a6152b52SSuraj Jitindar Singh             if (PAGE_EXEC & ~amr_prot) {
967a6152b52SSuraj Jitindar Singh                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
968a6152b52SSuraj Jitindar Singh             }
9698fe08facSDavid Gibson             ppc_hash64_set_isi(cs, srr1);
970fcf5ef2aSThomas Huth         } else {
971da82c73aSSuraj Jitindar Singh             int dsisr = 0;
972fcf5ef2aSThomas Huth             if (need_prot[rwx] & ~pp_prot) {
973da82c73aSSuraj Jitindar Singh                 dsisr |= DSISR_PROTFAULT;
974fcf5ef2aSThomas Huth             }
975fcf5ef2aSThomas Huth             if (rwx == 1) {
976da82c73aSSuraj Jitindar Singh                 dsisr |= DSISR_ISSTORE;
977fcf5ef2aSThomas Huth             }
978fcf5ef2aSThomas Huth             if (need_prot[rwx] & ~amr_prot) {
979da82c73aSSuraj Jitindar Singh                 dsisr |= DSISR_AMR;
980fcf5ef2aSThomas Huth             }
9818fe08facSDavid Gibson             ppc_hash64_set_dsi(cs, eaddr, dsisr);
982fcf5ef2aSThomas Huth         }
983fcf5ef2aSThomas Huth         return 1;
984fcf5ef2aSThomas Huth     }
985fcf5ef2aSThomas Huth 
986fcf5ef2aSThomas Huth     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
987fcf5ef2aSThomas Huth 
988fcf5ef2aSThomas Huth     /* 6. Update PTE referenced and changed bits if necessary */
989fcf5ef2aSThomas Huth 
990a2dd4e83SBenjamin Herrenschmidt     if (!(pte.pte1 & HPTE64_R_R)) {
991a2dd4e83SBenjamin Herrenschmidt         ppc_hash64_set_r(cpu, ptex, pte.pte1);
992a2dd4e83SBenjamin Herrenschmidt     }
993a2dd4e83SBenjamin Herrenschmidt     if (!(pte.pte1 & HPTE64_R_C)) {
994fcf5ef2aSThomas Huth         if (rwx == 1) {
995a2dd4e83SBenjamin Herrenschmidt             ppc_hash64_set_c(cpu, ptex, pte.pte1);
996fcf5ef2aSThomas Huth         } else {
997d75cbae8SDavid Gibson             /*
998d75cbae8SDavid Gibson              * Treat the page as read-only for now, so that a later write
999d75cbae8SDavid Gibson              * will pass through this function again to set the C bit
1000d75cbae8SDavid Gibson              */
1001fcf5ef2aSThomas Huth             prot &= ~PAGE_WRITE;
1002fcf5ef2aSThomas Huth         }
1003fcf5ef2aSThomas Huth     }
1004fcf5ef2aSThomas Huth 
1005fcf5ef2aSThomas Huth     /* 7. Determine the real address from the PTE */
1006fcf5ef2aSThomas Huth 
1007fcf5ef2aSThomas Huth     raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
1008fcf5ef2aSThomas Huth 
1009fcf5ef2aSThomas Huth     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1010fcf5ef2aSThomas Huth                  prot, mmu_idx, 1ULL << apshift);
1011fcf5ef2aSThomas Huth 
1012fcf5ef2aSThomas Huth     return 0;
1013fcf5ef2aSThomas Huth }
1014fcf5ef2aSThomas Huth 
1015fcf5ef2aSThomas Huth hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
1016fcf5ef2aSThomas Huth {
1017fcf5ef2aSThomas Huth     CPUPPCState *env = &cpu->env;
10184c24a87fSDavid Gibson     ppc_slb_t vrma_slbe;
1019fcf5ef2aSThomas Huth     ppc_slb_t *slb;
10207222b94aSDavid Gibson     hwaddr ptex, raddr;
1021fcf5ef2aSThomas Huth     ppc_hash_pte64_t pte;
1022fcf5ef2aSThomas Huth     unsigned apshift;
1023fcf5ef2aSThomas Huth 
1024fcf5ef2aSThomas Huth     /* Handle real mode */
1025fcf5ef2aSThomas Huth     if (msr_dr == 0) {
1026fcf5ef2aSThomas Huth         /* In real mode the top 4 effective address bits are ignored */
1027fcf5ef2aSThomas Huth         raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
1028fcf5ef2aSThomas Huth 
1029682c1dfbSDavid Gibson         if (cpu->vhyp) {
1030682c1dfbSDavid Gibson             /*
1031682c1dfbSDavid Gibson              * In virtual hypervisor mode, there's nothing to do:
1032682c1dfbSDavid Gibson              *   EA == GPA == qemu guest address
1033682c1dfbSDavid Gibson              */
1034682c1dfbSDavid Gibson             return raddr;
1035682c1dfbSDavid Gibson         } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
1036fcf5ef2aSThomas Huth             /* In HV mode, add HRMOR if top EA bit is clear */
1037fcf5ef2aSThomas Huth             return raddr | env->spr[SPR_HRMOR];
10381b99e029SDavid Gibson         } else if (ppc_hash64_use_vrma(env)) {
1039682c1dfbSDavid Gibson             /* Emulated VRMA mode */
10404c24a87fSDavid Gibson             slb = &vrma_slbe;
10414c24a87fSDavid Gibson             if (build_vrma_slbe(cpu, slb) != 0) {
1042fcf5ef2aSThomas Huth                 return -1;
1043fcf5ef2aSThomas Huth             }
1044fcf5ef2aSThomas Huth         } else {
10453a56a55cSDavid Gibson             target_ulong limit = rmls_limit(cpu);
10463a56a55cSDavid Gibson 
1047682c1dfbSDavid Gibson             /* Emulated old-style RMO mode, bounds check against RMLS */
10483a56a55cSDavid Gibson             if (raddr >= limit) {
1049fcf5ef2aSThomas Huth                 return -1;
1050fcf5ef2aSThomas Huth             }
1051682c1dfbSDavid Gibson             return raddr | env->spr[SPR_RMOR];
1052682c1dfbSDavid Gibson         }
1053fcf5ef2aSThomas Huth     } else {
1054fcf5ef2aSThomas Huth         slb = slb_lookup(cpu, addr);
1055fcf5ef2aSThomas Huth         if (!slb) {
1056fcf5ef2aSThomas Huth             return -1;
1057fcf5ef2aSThomas Huth         }
1058fcf5ef2aSThomas Huth     }
1059fcf5ef2aSThomas Huth 
10607222b94aSDavid Gibson     ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
10617222b94aSDavid Gibson     if (ptex == -1) {
1062fcf5ef2aSThomas Huth         return -1;
1063fcf5ef2aSThomas Huth     }
1064fcf5ef2aSThomas Huth 
1065fcf5ef2aSThomas Huth     return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
1066fcf5ef2aSThomas Huth         & TARGET_PAGE_MASK;
1067fcf5ef2aSThomas Huth }
1068fcf5ef2aSThomas Huth 
10697222b94aSDavid Gibson void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
1070fcf5ef2aSThomas Huth                                target_ulong pte0, target_ulong pte1)
1071fcf5ef2aSThomas Huth {
1072fcf5ef2aSThomas Huth     /*
1073fcf5ef2aSThomas Huth      * XXX: given the fact that there are too many segments to
1074fcf5ef2aSThomas Huth      * invalidate, and we still don't have a tlb_flush_mask(env, n,
1075fcf5ef2aSThomas Huth      * mask) in QEMU, we just invalidate all TLBs
1076fcf5ef2aSThomas Huth      */
1077fcf5ef2aSThomas Huth     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
1078fcf5ef2aSThomas Huth }
1079fcf5ef2aSThomas Huth 
10805ad55315SDavid Gibson void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
1081fcf5ef2aSThomas Huth {
1082e232ecccSDavid Gibson     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
10835ad55315SDavid Gibson     CPUPPCState *env = &cpu->env;
1084fcf5ef2aSThomas Huth 
1085e232ecccSDavid Gibson     env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
1086fcf5ef2aSThomas Huth }
1087a059471dSDavid Gibson 
10885ad55315SDavid Gibson void helper_store_lpcr(CPUPPCState *env, target_ulong val)
10895ad55315SDavid Gibson {
1090db70b311SRichard Henderson     PowerPCCPU *cpu = env_archcpu(env);
10915ad55315SDavid Gibson 
10925ad55315SDavid Gibson     ppc_store_lpcr(cpu, val);
10935ad55315SDavid Gibson }
10945ad55315SDavid Gibson 
1095a059471dSDavid Gibson void ppc_hash64_init(PowerPCCPU *cpu)
1096a059471dSDavid Gibson {
1097a059471dSDavid Gibson     CPUPPCState *env = &cpu->env;
1098a059471dSDavid Gibson     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1099a059471dSDavid Gibson 
110021e405f1SDavid Gibson     if (!pcc->hash64_opts) {
110121e405f1SDavid Gibson         assert(!(env->mmu_model & POWERPC_MMU_64));
110221e405f1SDavid Gibson         return;
110321e405f1SDavid Gibson     }
110421e405f1SDavid Gibson 
110521e405f1SDavid Gibson     cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
110621e405f1SDavid Gibson }
110721e405f1SDavid Gibson 
110821e405f1SDavid Gibson void ppc_hash64_finalize(PowerPCCPU *cpu)
110921e405f1SDavid Gibson {
111021e405f1SDavid Gibson     g_free(cpu->hash64_opts);
111121e405f1SDavid Gibson }
111221e405f1SDavid Gibson 
111321e405f1SDavid Gibson const PPCHash64Options ppc_hash64_opts_basic = {
111458969eeeSDavid Gibson     .flags = 0,
111567d7d66fSDavid Gibson     .slb_size = 64,
1116a059471dSDavid Gibson     .sps = {
1117a059471dSDavid Gibson         { .page_shift = 12, /* 4K */
1118a059471dSDavid Gibson           .slb_enc = 0,
1119a059471dSDavid Gibson           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1120a059471dSDavid Gibson         },
1121a059471dSDavid Gibson         { .page_shift = 24, /* 16M */
1122a059471dSDavid Gibson           .slb_enc = 0x100,
1123a059471dSDavid Gibson           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1124a059471dSDavid Gibson         },
1125a059471dSDavid Gibson     },
1126a059471dSDavid Gibson };
1127b07c59f7SDavid Gibson 
1128b07c59f7SDavid Gibson const PPCHash64Options ppc_hash64_opts_POWER7 = {
112926cd35b8SDavid Gibson     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
113067d7d66fSDavid Gibson     .slb_size = 32,
1131b07c59f7SDavid Gibson     .sps = {
1132b07c59f7SDavid Gibson         {
1133b07c59f7SDavid Gibson             .page_shift = 12, /* 4K */
1134b07c59f7SDavid Gibson             .slb_enc = 0,
1135b07c59f7SDavid Gibson             .enc = { { .page_shift = 12, .pte_enc = 0 },
1136b07c59f7SDavid Gibson                      { .page_shift = 16, .pte_enc = 0x7 },
1137b07c59f7SDavid Gibson                      { .page_shift = 24, .pte_enc = 0x38 }, },
1138b07c59f7SDavid Gibson         },
1139b07c59f7SDavid Gibson         {
1140b07c59f7SDavid Gibson             .page_shift = 16, /* 64K */
1141b07c59f7SDavid Gibson             .slb_enc = SLB_VSID_64K,
1142b07c59f7SDavid Gibson             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1143b07c59f7SDavid Gibson                      { .page_shift = 24, .pte_enc = 0x8 }, },
1144b07c59f7SDavid Gibson         },
1145b07c59f7SDavid Gibson         {
1146b07c59f7SDavid Gibson             .page_shift = 24, /* 16M */
1147b07c59f7SDavid Gibson             .slb_enc = SLB_VSID_16M,
1148b07c59f7SDavid Gibson             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1149b07c59f7SDavid Gibson         },
1150b07c59f7SDavid Gibson         {
1151b07c59f7SDavid Gibson             .page_shift = 34, /* 16G */
1152b07c59f7SDavid Gibson             .slb_enc = SLB_VSID_16G,
1153b07c59f7SDavid Gibson             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1154b07c59f7SDavid Gibson         },
1155b07c59f7SDavid Gibson     }
1156b07c59f7SDavid Gibson };
115727f00f0aSDavid Gibson 
115827f00f0aSDavid Gibson void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
115927f00f0aSDavid Gibson                                  bool (*cb)(void *, uint32_t, uint32_t),
116027f00f0aSDavid Gibson                                  void *opaque)
116127f00f0aSDavid Gibson {
116227f00f0aSDavid Gibson     PPCHash64Options *opts = cpu->hash64_opts;
116327f00f0aSDavid Gibson     int i;
116427f00f0aSDavid Gibson     int n = 0;
116527f00f0aSDavid Gibson     bool ci_largepage = false;
116627f00f0aSDavid Gibson 
116727f00f0aSDavid Gibson     assert(opts);
116827f00f0aSDavid Gibson 
116927f00f0aSDavid Gibson     n = 0;
117027f00f0aSDavid Gibson     for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
117127f00f0aSDavid Gibson         PPCHash64SegmentPageSizes *sps = &opts->sps[i];
117227f00f0aSDavid Gibson         int j;
117327f00f0aSDavid Gibson         int m = 0;
117427f00f0aSDavid Gibson 
117527f00f0aSDavid Gibson         assert(n <= i);
117627f00f0aSDavid Gibson 
117727f00f0aSDavid Gibson         if (!sps->page_shift) {
117827f00f0aSDavid Gibson             break;
117927f00f0aSDavid Gibson         }
118027f00f0aSDavid Gibson 
118127f00f0aSDavid Gibson         for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
118227f00f0aSDavid Gibson             PPCHash64PageSize *ps = &sps->enc[j];
118327f00f0aSDavid Gibson 
118427f00f0aSDavid Gibson             assert(m <= j);
118527f00f0aSDavid Gibson             if (!ps->page_shift) {
118627f00f0aSDavid Gibson                 break;
118727f00f0aSDavid Gibson             }
118827f00f0aSDavid Gibson 
118927f00f0aSDavid Gibson             if (cb(opaque, sps->page_shift, ps->page_shift)) {
119027f00f0aSDavid Gibson                 if (ps->page_shift >= 16) {
119127f00f0aSDavid Gibson                     ci_largepage = true;
119227f00f0aSDavid Gibson                 }
119327f00f0aSDavid Gibson                 sps->enc[m++] = *ps;
119427f00f0aSDavid Gibson             }
119527f00f0aSDavid Gibson         }
119627f00f0aSDavid Gibson 
119727f00f0aSDavid Gibson         /* Clear rest of the row */
119827f00f0aSDavid Gibson         for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
119927f00f0aSDavid Gibson             memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
120027f00f0aSDavid Gibson         }
120127f00f0aSDavid Gibson 
120227f00f0aSDavid Gibson         if (m) {
120327f00f0aSDavid Gibson             n++;
120427f00f0aSDavid Gibson         }
120527f00f0aSDavid Gibson     }
120627f00f0aSDavid Gibson 
120727f00f0aSDavid Gibson     /* Clear the rest of the table */
120827f00f0aSDavid Gibson     for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
120927f00f0aSDavid Gibson         memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
121027f00f0aSDavid Gibson     }
121127f00f0aSDavid Gibson 
121227f00f0aSDavid Gibson     if (!ci_largepage) {
121327f00f0aSDavid Gibson         opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
121427f00f0aSDavid Gibson     }
121527f00f0aSDavid Gibson }
1216