1fcf5ef2aSThomas Huth /* 2fcf5ef2aSThomas Huth * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3fcf5ef2aSThomas Huth * 4fcf5ef2aSThomas Huth * Copyright (c) 2003-2007 Jocelyn Mayer 5fcf5ef2aSThomas Huth * Copyright (c) 2013 David Gibson, IBM Corporation 6fcf5ef2aSThomas Huth * 7fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 8fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 9fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 10fcf5ef2aSThomas Huth * version 2 of the License, or (at your option) any later version. 11fcf5ef2aSThomas Huth * 12fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 13fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 14fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15fcf5ef2aSThomas Huth * Lesser General Public License for more details. 16fcf5ef2aSThomas Huth * 17fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 18fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19fcf5ef2aSThomas Huth */ 20fcf5ef2aSThomas Huth #include "qemu/osdep.h" 21fcf5ef2aSThomas Huth #include "cpu.h" 22fcf5ef2aSThomas Huth #include "exec/exec-all.h" 23fcf5ef2aSThomas Huth #include "exec/helper-proto.h" 24fcf5ef2aSThomas Huth #include "qemu/error-report.h" 25fad866daSMarkus Armbruster #include "qemu/qemu-print.h" 26b3946626SVincent Palatin #include "sysemu/hw_accel.h" 27fcf5ef2aSThomas Huth #include "kvm_ppc.h" 28fcf5ef2aSThomas Huth #include "mmu-hash64.h" 29fcf5ef2aSThomas Huth #include "exec/log.h" 307222b94aSDavid Gibson #include "hw/hw.h" 31b2899495SSuraj Jitindar Singh #include "mmu-book3s-v3.h" 32fcf5ef2aSThomas Huth 33*d75cbae8SDavid Gibson /* #define DEBUG_SLB */ 34fcf5ef2aSThomas Huth 35fcf5ef2aSThomas Huth #ifdef DEBUG_SLB 36fcf5ef2aSThomas Huth # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 37fcf5ef2aSThomas Huth #else 38fcf5ef2aSThomas Huth # define LOG_SLB(...) do { } while (0) 39fcf5ef2aSThomas Huth #endif 40fcf5ef2aSThomas Huth 41fcf5ef2aSThomas Huth /* 42fcf5ef2aSThomas Huth * SLB handling 43fcf5ef2aSThomas Huth */ 44fcf5ef2aSThomas Huth 45fcf5ef2aSThomas Huth static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) 46fcf5ef2aSThomas Huth { 47fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 48fcf5ef2aSThomas Huth uint64_t esid_256M, esid_1T; 49fcf5ef2aSThomas Huth int n; 50fcf5ef2aSThomas Huth 51fcf5ef2aSThomas Huth LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); 52fcf5ef2aSThomas Huth 53fcf5ef2aSThomas Huth esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; 54fcf5ef2aSThomas Huth esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; 55fcf5ef2aSThomas Huth 5667d7d66fSDavid Gibson for (n = 0; n < cpu->hash64_opts->slb_size; n++) { 57fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->slb[n]; 58fcf5ef2aSThomas Huth 59fcf5ef2aSThomas Huth LOG_SLB("%s: slot %d %016" PRIx64 " %016" 60fcf5ef2aSThomas Huth PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 61*d75cbae8SDavid Gibson /* 62*d75cbae8SDavid Gibson * We check for 1T matches on all MMUs here - if the MMU 63fcf5ef2aSThomas Huth * doesn't have 1T segment support, we will have prevented 1T 64*d75cbae8SDavid Gibson * entries from being inserted in the slbmte code. 65*d75cbae8SDavid Gibson */ 66fcf5ef2aSThomas Huth if (((slb->esid == esid_256M) && 67fcf5ef2aSThomas Huth ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 68fcf5ef2aSThomas Huth || ((slb->esid == esid_1T) && 69fcf5ef2aSThomas Huth ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { 70fcf5ef2aSThomas Huth return slb; 71fcf5ef2aSThomas Huth } 72fcf5ef2aSThomas Huth } 73fcf5ef2aSThomas Huth 74fcf5ef2aSThomas Huth return NULL; 75fcf5ef2aSThomas Huth } 76fcf5ef2aSThomas Huth 77fad866daSMarkus Armbruster void dump_slb(PowerPCCPU *cpu) 78fcf5ef2aSThomas Huth { 79fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 80fcf5ef2aSThomas Huth int i; 81fcf5ef2aSThomas Huth uint64_t slbe, slbv; 82fcf5ef2aSThomas Huth 83fcf5ef2aSThomas Huth cpu_synchronize_state(CPU(cpu)); 84fcf5ef2aSThomas Huth 85fad866daSMarkus Armbruster qemu_printf("SLB\tESID\t\t\tVSID\n"); 8667d7d66fSDavid Gibson for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 87fcf5ef2aSThomas Huth slbe = env->slb[i].esid; 88fcf5ef2aSThomas Huth slbv = env->slb[i].vsid; 89fcf5ef2aSThomas Huth if (slbe == 0 && slbv == 0) { 90fcf5ef2aSThomas Huth continue; 91fcf5ef2aSThomas Huth } 92fad866daSMarkus Armbruster qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", 93fcf5ef2aSThomas Huth i, slbe, slbv); 94fcf5ef2aSThomas Huth } 95fcf5ef2aSThomas Huth } 96fcf5ef2aSThomas Huth 97fcf5ef2aSThomas Huth void helper_slbia(CPUPPCState *env) 98fcf5ef2aSThomas Huth { 9967d7d66fSDavid Gibson PowerPCCPU *cpu = ppc_env_get_cpu(env); 100fcf5ef2aSThomas Huth int n; 101fcf5ef2aSThomas Huth 102fcf5ef2aSThomas Huth /* XXX: Warning: slbia never invalidates the first segment */ 10367d7d66fSDavid Gibson for (n = 1; n < cpu->hash64_opts->slb_size; n++) { 104fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->slb[n]; 105fcf5ef2aSThomas Huth 106fcf5ef2aSThomas Huth if (slb->esid & SLB_ESID_V) { 107fcf5ef2aSThomas Huth slb->esid &= ~SLB_ESID_V; 108*d75cbae8SDavid Gibson /* 109*d75cbae8SDavid Gibson * XXX: given the fact that segment size is 256 MB or 1TB, 110fcf5ef2aSThomas Huth * and we still don't have a tlb_flush_mask(env, n, mask) 111fcf5ef2aSThomas Huth * in QEMU, we just invalidate all TLBs 112fcf5ef2aSThomas Huth */ 113fcf5ef2aSThomas Huth env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 114fcf5ef2aSThomas Huth } 115fcf5ef2aSThomas Huth } 116fcf5ef2aSThomas Huth } 117fcf5ef2aSThomas Huth 118a63f1dfcSNikunj A Dadhania static void __helper_slbie(CPUPPCState *env, target_ulong addr, 119a63f1dfcSNikunj A Dadhania target_ulong global) 120fcf5ef2aSThomas Huth { 121fcf5ef2aSThomas Huth PowerPCCPU *cpu = ppc_env_get_cpu(env); 122fcf5ef2aSThomas Huth ppc_slb_t *slb; 123fcf5ef2aSThomas Huth 124fcf5ef2aSThomas Huth slb = slb_lookup(cpu, addr); 125fcf5ef2aSThomas Huth if (!slb) { 126fcf5ef2aSThomas Huth return; 127fcf5ef2aSThomas Huth } 128fcf5ef2aSThomas Huth 129fcf5ef2aSThomas Huth if (slb->esid & SLB_ESID_V) { 130fcf5ef2aSThomas Huth slb->esid &= ~SLB_ESID_V; 131fcf5ef2aSThomas Huth 132*d75cbae8SDavid Gibson /* 133*d75cbae8SDavid Gibson * XXX: given the fact that segment size is 256 MB or 1TB, 134fcf5ef2aSThomas Huth * and we still don't have a tlb_flush_mask(env, n, mask) 135fcf5ef2aSThomas Huth * in QEMU, we just invalidate all TLBs 136fcf5ef2aSThomas Huth */ 137a63f1dfcSNikunj A Dadhania env->tlb_need_flush |= 138a63f1dfcSNikunj A Dadhania (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); 139fcf5ef2aSThomas Huth } 140fcf5ef2aSThomas Huth } 141fcf5ef2aSThomas Huth 142a63f1dfcSNikunj A Dadhania void helper_slbie(CPUPPCState *env, target_ulong addr) 143a63f1dfcSNikunj A Dadhania { 144a63f1dfcSNikunj A Dadhania __helper_slbie(env, addr, false); 145a63f1dfcSNikunj A Dadhania } 146a63f1dfcSNikunj A Dadhania 147a63f1dfcSNikunj A Dadhania void helper_slbieg(CPUPPCState *env, target_ulong addr) 148a63f1dfcSNikunj A Dadhania { 149a63f1dfcSNikunj A Dadhania __helper_slbie(env, addr, true); 150a63f1dfcSNikunj A Dadhania } 151a63f1dfcSNikunj A Dadhania 152fcf5ef2aSThomas Huth int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, 153fcf5ef2aSThomas Huth target_ulong esid, target_ulong vsid) 154fcf5ef2aSThomas Huth { 155fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 156fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->slb[slot]; 157b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps = NULL; 158fcf5ef2aSThomas Huth int i; 159fcf5ef2aSThomas Huth 16067d7d66fSDavid Gibson if (slot >= cpu->hash64_opts->slb_size) { 161fcf5ef2aSThomas Huth return -1; /* Bad slot number */ 162fcf5ef2aSThomas Huth } 163fcf5ef2aSThomas Huth if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { 164fcf5ef2aSThomas Huth return -1; /* Reserved bits set */ 165fcf5ef2aSThomas Huth } 166fcf5ef2aSThomas Huth if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { 167fcf5ef2aSThomas Huth return -1; /* Bad segment size */ 168fcf5ef2aSThomas Huth } 16958969eeeSDavid Gibson if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { 170fcf5ef2aSThomas Huth return -1; /* 1T segment on MMU that doesn't support it */ 171fcf5ef2aSThomas Huth } 172fcf5ef2aSThomas Huth 173fcf5ef2aSThomas Huth for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 174b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 175fcf5ef2aSThomas Huth 176fcf5ef2aSThomas Huth if (!sps1->page_shift) { 177fcf5ef2aSThomas Huth break; 178fcf5ef2aSThomas Huth } 179fcf5ef2aSThomas Huth 180fcf5ef2aSThomas Huth if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 181fcf5ef2aSThomas Huth sps = sps1; 182fcf5ef2aSThomas Huth break; 183fcf5ef2aSThomas Huth } 184fcf5ef2aSThomas Huth } 185fcf5ef2aSThomas Huth 186fcf5ef2aSThomas Huth if (!sps) { 187fcf5ef2aSThomas Huth error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu 188fcf5ef2aSThomas Huth " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, 189fcf5ef2aSThomas Huth slot, esid, vsid); 190fcf5ef2aSThomas Huth return -1; 191fcf5ef2aSThomas Huth } 192fcf5ef2aSThomas Huth 193fcf5ef2aSThomas Huth slb->esid = esid; 194fcf5ef2aSThomas Huth slb->vsid = vsid; 195fcf5ef2aSThomas Huth slb->sps = sps; 196fcf5ef2aSThomas Huth 19776134d48SSuraj Jitindar Singh LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx 19876134d48SSuraj Jitindar Singh " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, 199fcf5ef2aSThomas Huth slb->esid, slb->vsid); 200fcf5ef2aSThomas Huth 201fcf5ef2aSThomas Huth return 0; 202fcf5ef2aSThomas Huth } 203fcf5ef2aSThomas Huth 204fcf5ef2aSThomas Huth static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, 205fcf5ef2aSThomas Huth target_ulong *rt) 206fcf5ef2aSThomas Huth { 207fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 208fcf5ef2aSThomas Huth int slot = rb & 0xfff; 209fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->slb[slot]; 210fcf5ef2aSThomas Huth 21167d7d66fSDavid Gibson if (slot >= cpu->hash64_opts->slb_size) { 212fcf5ef2aSThomas Huth return -1; 213fcf5ef2aSThomas Huth } 214fcf5ef2aSThomas Huth 215fcf5ef2aSThomas Huth *rt = slb->esid; 216fcf5ef2aSThomas Huth return 0; 217fcf5ef2aSThomas Huth } 218fcf5ef2aSThomas Huth 219fcf5ef2aSThomas Huth static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 220fcf5ef2aSThomas Huth target_ulong *rt) 221fcf5ef2aSThomas Huth { 222fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 223fcf5ef2aSThomas Huth int slot = rb & 0xfff; 224fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->slb[slot]; 225fcf5ef2aSThomas Huth 22667d7d66fSDavid Gibson if (slot >= cpu->hash64_opts->slb_size) { 227fcf5ef2aSThomas Huth return -1; 228fcf5ef2aSThomas Huth } 229fcf5ef2aSThomas Huth 230fcf5ef2aSThomas Huth *rt = slb->vsid; 231fcf5ef2aSThomas Huth return 0; 232fcf5ef2aSThomas Huth } 233fcf5ef2aSThomas Huth 234fcf5ef2aSThomas Huth static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, 235fcf5ef2aSThomas Huth target_ulong *rt) 236fcf5ef2aSThomas Huth { 237fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 238fcf5ef2aSThomas Huth ppc_slb_t *slb; 239fcf5ef2aSThomas Huth 240fcf5ef2aSThomas Huth if (!msr_is_64bit(env, env->msr)) { 241fcf5ef2aSThomas Huth rb &= 0xffffffff; 242fcf5ef2aSThomas Huth } 243fcf5ef2aSThomas Huth slb = slb_lookup(cpu, rb); 244fcf5ef2aSThomas Huth if (slb == NULL) { 245fcf5ef2aSThomas Huth *rt = (target_ulong)-1ul; 246fcf5ef2aSThomas Huth } else { 247fcf5ef2aSThomas Huth *rt = slb->vsid; 248fcf5ef2aSThomas Huth } 249fcf5ef2aSThomas Huth return 0; 250fcf5ef2aSThomas Huth } 251fcf5ef2aSThomas Huth 252fcf5ef2aSThomas Huth void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) 253fcf5ef2aSThomas Huth { 254fcf5ef2aSThomas Huth PowerPCCPU *cpu = ppc_env_get_cpu(env); 255fcf5ef2aSThomas Huth 256fcf5ef2aSThomas Huth if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 257fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 258fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL, GETPC()); 259fcf5ef2aSThomas Huth } 260fcf5ef2aSThomas Huth } 261fcf5ef2aSThomas Huth 262fcf5ef2aSThomas Huth target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) 263fcf5ef2aSThomas Huth { 264fcf5ef2aSThomas Huth PowerPCCPU *cpu = ppc_env_get_cpu(env); 265fcf5ef2aSThomas Huth target_ulong rt = 0; 266fcf5ef2aSThomas Huth 267fcf5ef2aSThomas Huth if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { 268fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 269fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL, GETPC()); 270fcf5ef2aSThomas Huth } 271fcf5ef2aSThomas Huth return rt; 272fcf5ef2aSThomas Huth } 273fcf5ef2aSThomas Huth 274fcf5ef2aSThomas Huth target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) 275fcf5ef2aSThomas Huth { 276fcf5ef2aSThomas Huth PowerPCCPU *cpu = ppc_env_get_cpu(env); 277fcf5ef2aSThomas Huth target_ulong rt = 0; 278fcf5ef2aSThomas Huth 279fcf5ef2aSThomas Huth if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { 280fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 281fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL, GETPC()); 282fcf5ef2aSThomas Huth } 283fcf5ef2aSThomas Huth return rt; 284fcf5ef2aSThomas Huth } 285fcf5ef2aSThomas Huth 286fcf5ef2aSThomas Huth target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) 287fcf5ef2aSThomas Huth { 288fcf5ef2aSThomas Huth PowerPCCPU *cpu = ppc_env_get_cpu(env); 289fcf5ef2aSThomas Huth target_ulong rt = 0; 290fcf5ef2aSThomas Huth 291fcf5ef2aSThomas Huth if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { 292fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 293fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL, GETPC()); 294fcf5ef2aSThomas Huth } 295fcf5ef2aSThomas Huth return rt; 296fcf5ef2aSThomas Huth } 297fcf5ef2aSThomas Huth 29807a68f99SSuraj Jitindar Singh /* Check No-Execute or Guarded Storage */ 29907a68f99SSuraj Jitindar Singh static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, 30007a68f99SSuraj Jitindar Singh ppc_hash_pte64_t pte) 30107a68f99SSuraj Jitindar Singh { 30207a68f99SSuraj Jitindar Singh /* Exec permissions CANNOT take away read or write permissions */ 30307a68f99SSuraj Jitindar Singh return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? 30407a68f99SSuraj Jitindar Singh PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; 30507a68f99SSuraj Jitindar Singh } 30607a68f99SSuraj Jitindar Singh 30707a68f99SSuraj Jitindar Singh /* Check Basic Storage Protection */ 308fcf5ef2aSThomas Huth static int ppc_hash64_pte_prot(PowerPCCPU *cpu, 309fcf5ef2aSThomas Huth ppc_slb_t *slb, ppc_hash_pte64_t pte) 310fcf5ef2aSThomas Huth { 311fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 312fcf5ef2aSThomas Huth unsigned pp, key; 313*d75cbae8SDavid Gibson /* 314*d75cbae8SDavid Gibson * Some pp bit combinations have undefined behaviour, so default 315*d75cbae8SDavid Gibson * to no access in those cases 316*d75cbae8SDavid Gibson */ 317fcf5ef2aSThomas Huth int prot = 0; 318fcf5ef2aSThomas Huth 319fcf5ef2aSThomas Huth key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) 320fcf5ef2aSThomas Huth : (slb->vsid & SLB_VSID_KS)); 321fcf5ef2aSThomas Huth pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); 322fcf5ef2aSThomas Huth 323fcf5ef2aSThomas Huth if (key == 0) { 324fcf5ef2aSThomas Huth switch (pp) { 325fcf5ef2aSThomas Huth case 0x0: 326fcf5ef2aSThomas Huth case 0x1: 327fcf5ef2aSThomas Huth case 0x2: 328347a5c73SSuraj Jitindar Singh prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 329fcf5ef2aSThomas Huth break; 330fcf5ef2aSThomas Huth 331fcf5ef2aSThomas Huth case 0x3: 332fcf5ef2aSThomas Huth case 0x6: 333347a5c73SSuraj Jitindar Singh prot = PAGE_READ | PAGE_EXEC; 334fcf5ef2aSThomas Huth break; 335fcf5ef2aSThomas Huth } 336fcf5ef2aSThomas Huth } else { 337fcf5ef2aSThomas Huth switch (pp) { 338fcf5ef2aSThomas Huth case 0x0: 339fcf5ef2aSThomas Huth case 0x6: 340fcf5ef2aSThomas Huth break; 341fcf5ef2aSThomas Huth 342fcf5ef2aSThomas Huth case 0x1: 343fcf5ef2aSThomas Huth case 0x3: 344347a5c73SSuraj Jitindar Singh prot = PAGE_READ | PAGE_EXEC; 345fcf5ef2aSThomas Huth break; 346fcf5ef2aSThomas Huth 347fcf5ef2aSThomas Huth case 0x2: 348347a5c73SSuraj Jitindar Singh prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 349fcf5ef2aSThomas Huth break; 350fcf5ef2aSThomas Huth } 351fcf5ef2aSThomas Huth } 352fcf5ef2aSThomas Huth 353fcf5ef2aSThomas Huth return prot; 354fcf5ef2aSThomas Huth } 355fcf5ef2aSThomas Huth 356a6152b52SSuraj Jitindar Singh /* Check the instruction access permissions specified in the IAMR */ 357a6152b52SSuraj Jitindar Singh static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) 358a6152b52SSuraj Jitindar Singh { 359a6152b52SSuraj Jitindar Singh CPUPPCState *env = &cpu->env; 360a6152b52SSuraj Jitindar Singh int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; 361a6152b52SSuraj Jitindar Singh 362a6152b52SSuraj Jitindar Singh /* 363a6152b52SSuraj Jitindar Singh * An instruction fetch is permitted if the IAMR bit is 0. 364a6152b52SSuraj Jitindar Singh * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit 365a6152b52SSuraj Jitindar Singh * can only take away EXEC permissions not READ or WRITE permissions. 366a6152b52SSuraj Jitindar Singh * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since 367a6152b52SSuraj Jitindar Singh * EXEC permissions are allowed. 368a6152b52SSuraj Jitindar Singh */ 369a6152b52SSuraj Jitindar Singh return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : 370a6152b52SSuraj Jitindar Singh PAGE_READ | PAGE_WRITE | PAGE_EXEC; 371a6152b52SSuraj Jitindar Singh } 372a6152b52SSuraj Jitindar Singh 373fcf5ef2aSThomas Huth static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) 374fcf5ef2aSThomas Huth { 375fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 376fcf5ef2aSThomas Huth int key, amrbits; 377fcf5ef2aSThomas Huth int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 378fcf5ef2aSThomas Huth 379fcf5ef2aSThomas Huth /* Only recent MMUs implement Virtual Page Class Key Protection */ 38058969eeeSDavid Gibson if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { 381fcf5ef2aSThomas Huth return prot; 382fcf5ef2aSThomas Huth } 383fcf5ef2aSThomas Huth 384fcf5ef2aSThomas Huth key = HPTE64_R_KEY(pte.pte1); 385fcf5ef2aSThomas Huth amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 386fcf5ef2aSThomas Huth 387fcf5ef2aSThomas Huth /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 388fcf5ef2aSThomas Huth /* env->spr[SPR_AMR]); */ 389fcf5ef2aSThomas Huth 390fcf5ef2aSThomas Huth /* 391fcf5ef2aSThomas Huth * A store is permitted if the AMR bit is 0. Remove write 392fcf5ef2aSThomas Huth * protection if it is set. 393fcf5ef2aSThomas Huth */ 394fcf5ef2aSThomas Huth if (amrbits & 0x2) { 395fcf5ef2aSThomas Huth prot &= ~PAGE_WRITE; 396fcf5ef2aSThomas Huth } 397fcf5ef2aSThomas Huth /* 398fcf5ef2aSThomas Huth * A load is permitted if the AMR bit is 0. Remove read 399fcf5ef2aSThomas Huth * protection if it is set. 400fcf5ef2aSThomas Huth */ 401fcf5ef2aSThomas Huth if (amrbits & 0x1) { 402fcf5ef2aSThomas Huth prot &= ~PAGE_READ; 403fcf5ef2aSThomas Huth } 404fcf5ef2aSThomas Huth 405a6152b52SSuraj Jitindar Singh switch (env->mmu_model) { 406a6152b52SSuraj Jitindar Singh /* 407a6152b52SSuraj Jitindar Singh * MMU version 2.07 and later support IAMR 408a6152b52SSuraj Jitindar Singh * Check if the IAMR allows the instruction access - it will return 409a6152b52SSuraj Jitindar Singh * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 410a6152b52SSuraj Jitindar Singh * if it does (and prot will be unchanged indicating execution support). 411a6152b52SSuraj Jitindar Singh */ 412a6152b52SSuraj Jitindar Singh case POWERPC_MMU_2_07: 413a6152b52SSuraj Jitindar Singh case POWERPC_MMU_3_00: 414a6152b52SSuraj Jitindar Singh prot &= ppc_hash64_iamr_prot(cpu, key); 415a6152b52SSuraj Jitindar Singh break; 416a6152b52SSuraj Jitindar Singh default: 417a6152b52SSuraj Jitindar Singh break; 418a6152b52SSuraj Jitindar Singh } 419a6152b52SSuraj Jitindar Singh 420fcf5ef2aSThomas Huth return prot; 421fcf5ef2aSThomas Huth } 422fcf5ef2aSThomas Huth 4237222b94aSDavid Gibson const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, 4247222b94aSDavid Gibson hwaddr ptex, int n) 425fcf5ef2aSThomas Huth { 4267222b94aSDavid Gibson hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; 4273367c62fSBenjamin Herrenschmidt hwaddr base; 4287222b94aSDavid Gibson hwaddr plen = n * HASH_PTE_SIZE_64; 429e57ca75cSDavid Gibson const ppc_hash_pte64_t *hptes; 430e57ca75cSDavid Gibson 431e57ca75cSDavid Gibson if (cpu->vhyp) { 432e57ca75cSDavid Gibson PPCVirtualHypervisorClass *vhc = 433e57ca75cSDavid Gibson PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 434e57ca75cSDavid Gibson return vhc->map_hptes(cpu->vhyp, ptex, n); 435e57ca75cSDavid Gibson } 4363367c62fSBenjamin Herrenschmidt base = ppc_hash64_hpt_base(cpu); 437e57ca75cSDavid Gibson 438e57ca75cSDavid Gibson if (!base) { 439e57ca75cSDavid Gibson return NULL; 440e57ca75cSDavid Gibson } 441e57ca75cSDavid Gibson 442f26404fbSPeter Maydell hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, 443f26404fbSPeter Maydell MEMTXATTRS_UNSPECIFIED); 4447222b94aSDavid Gibson if (plen < (n * HASH_PTE_SIZE_64)) { 4457222b94aSDavid Gibson hw_error("%s: Unable to map all requested HPTEs\n", __func__); 446fcf5ef2aSThomas Huth } 4477222b94aSDavid Gibson return hptes; 448fcf5ef2aSThomas Huth } 449fcf5ef2aSThomas Huth 4507222b94aSDavid Gibson void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, 4517222b94aSDavid Gibson hwaddr ptex, int n) 452fcf5ef2aSThomas Huth { 453e57ca75cSDavid Gibson if (cpu->vhyp) { 454e57ca75cSDavid Gibson PPCVirtualHypervisorClass *vhc = 455e57ca75cSDavid Gibson PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 456e57ca75cSDavid Gibson vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); 457e57ca75cSDavid Gibson return; 458e57ca75cSDavid Gibson } 459e57ca75cSDavid Gibson 4607222b94aSDavid Gibson address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, 4617222b94aSDavid Gibson false, n * HASH_PTE_SIZE_64); 462fcf5ef2aSThomas Huth } 463fcf5ef2aSThomas Huth 464b07c59f7SDavid Gibson static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, 465fcf5ef2aSThomas Huth uint64_t pte0, uint64_t pte1) 466fcf5ef2aSThomas Huth { 467fcf5ef2aSThomas Huth int i; 468fcf5ef2aSThomas Huth 469fcf5ef2aSThomas Huth if (!(pte0 & HPTE64_V_LARGE)) { 470fcf5ef2aSThomas Huth if (sps->page_shift != 12) { 471fcf5ef2aSThomas Huth /* 4kiB page in a non 4kiB segment */ 472fcf5ef2aSThomas Huth return 0; 473fcf5ef2aSThomas Huth } 474fcf5ef2aSThomas Huth /* Normal 4kiB page */ 475fcf5ef2aSThomas Huth return 12; 476fcf5ef2aSThomas Huth } 477fcf5ef2aSThomas Huth 478fcf5ef2aSThomas Huth for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 479b07c59f7SDavid Gibson const PPCHash64PageSize *ps = &sps->enc[i]; 480fcf5ef2aSThomas Huth uint64_t mask; 481fcf5ef2aSThomas Huth 482fcf5ef2aSThomas Huth if (!ps->page_shift) { 483fcf5ef2aSThomas Huth break; 484fcf5ef2aSThomas Huth } 485fcf5ef2aSThomas Huth 486fcf5ef2aSThomas Huth if (ps->page_shift == 12) { 487fcf5ef2aSThomas Huth /* L bit is set so this can't be a 4kiB page */ 488fcf5ef2aSThomas Huth continue; 489fcf5ef2aSThomas Huth } 490fcf5ef2aSThomas Huth 491fcf5ef2aSThomas Huth mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; 492fcf5ef2aSThomas Huth 493fcf5ef2aSThomas Huth if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { 494fcf5ef2aSThomas Huth return ps->page_shift; 495fcf5ef2aSThomas Huth } 496fcf5ef2aSThomas Huth } 497fcf5ef2aSThomas Huth 498fcf5ef2aSThomas Huth return 0; /* Bad page size encoding */ 499fcf5ef2aSThomas Huth } 500fcf5ef2aSThomas Huth 50134525595SBenjamin Herrenschmidt static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) 50234525595SBenjamin Herrenschmidt { 50334525595SBenjamin Herrenschmidt /* Insert B into pte0 */ 50434525595SBenjamin Herrenschmidt *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | 50534525595SBenjamin Herrenschmidt ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << 50634525595SBenjamin Herrenschmidt (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); 50734525595SBenjamin Herrenschmidt 50834525595SBenjamin Herrenschmidt /* Remove B from pte1 */ 50934525595SBenjamin Herrenschmidt *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; 51034525595SBenjamin Herrenschmidt } 51134525595SBenjamin Herrenschmidt 51234525595SBenjamin Herrenschmidt 513fcf5ef2aSThomas Huth static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, 514b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps, 515fcf5ef2aSThomas Huth target_ulong ptem, 516fcf5ef2aSThomas Huth ppc_hash_pte64_t *pte, unsigned *pshift) 517fcf5ef2aSThomas Huth { 518fcf5ef2aSThomas Huth int i; 5197222b94aSDavid Gibson const ppc_hash_pte64_t *pteg; 520fcf5ef2aSThomas Huth target_ulong pte0, pte1; 5217222b94aSDavid Gibson target_ulong ptex; 522fcf5ef2aSThomas Huth 52336778660SDavid Gibson ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; 5247222b94aSDavid Gibson pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 5257222b94aSDavid Gibson if (!pteg) { 526fcf5ef2aSThomas Huth return -1; 527fcf5ef2aSThomas Huth } 528fcf5ef2aSThomas Huth for (i = 0; i < HPTES_PER_GROUP; i++) { 5297222b94aSDavid Gibson pte0 = ppc_hash64_hpte0(cpu, pteg, i); 5303054b0caSBenjamin Herrenschmidt /* 5313054b0caSBenjamin Herrenschmidt * pte0 contains the valid bit and must be read before pte1, 5323054b0caSBenjamin Herrenschmidt * otherwise we might see an old pte1 with a new valid bit and 5333054b0caSBenjamin Herrenschmidt * thus an inconsistent hpte value 5343054b0caSBenjamin Herrenschmidt */ 5353054b0caSBenjamin Herrenschmidt smp_rmb(); 5367222b94aSDavid Gibson pte1 = ppc_hash64_hpte1(cpu, pteg, i); 537fcf5ef2aSThomas Huth 53834525595SBenjamin Herrenschmidt /* Convert format if necessary */ 53934525595SBenjamin Herrenschmidt if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { 54034525595SBenjamin Herrenschmidt ppc64_v3_new_to_old_hpte(&pte0, &pte1); 54134525595SBenjamin Herrenschmidt } 54234525595SBenjamin Herrenschmidt 543fcf5ef2aSThomas Huth /* This compares V, B, H (secondary) and the AVPN */ 544fcf5ef2aSThomas Huth if (HPTE64_V_COMPARE(pte0, ptem)) { 545fcf5ef2aSThomas Huth *pshift = hpte_page_shift(sps, pte0, pte1); 546fcf5ef2aSThomas Huth /* 547fcf5ef2aSThomas Huth * If there is no match, ignore the PTE, it could simply 548fcf5ef2aSThomas Huth * be for a different segment size encoding and the 549fcf5ef2aSThomas Huth * architecture specifies we should not match. Linux will 550fcf5ef2aSThomas Huth * potentially leave behind PTEs for the wrong base page 551fcf5ef2aSThomas Huth * size when demoting segments. 552fcf5ef2aSThomas Huth */ 553fcf5ef2aSThomas Huth if (*pshift == 0) { 554fcf5ef2aSThomas Huth continue; 555fcf5ef2aSThomas Huth } 556*d75cbae8SDavid Gibson /* 557*d75cbae8SDavid Gibson * We don't do anything with pshift yet as qemu TLB only 558*d75cbae8SDavid Gibson * deals with 4K pages anyway 559fcf5ef2aSThomas Huth */ 560fcf5ef2aSThomas Huth pte->pte0 = pte0; 561fcf5ef2aSThomas Huth pte->pte1 = pte1; 5627222b94aSDavid Gibson ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 5637222b94aSDavid Gibson return ptex + i; 564fcf5ef2aSThomas Huth } 565fcf5ef2aSThomas Huth } 5667222b94aSDavid Gibson ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); 567fcf5ef2aSThomas Huth /* 568fcf5ef2aSThomas Huth * We didn't find a valid entry. 569fcf5ef2aSThomas Huth */ 570fcf5ef2aSThomas Huth return -1; 571fcf5ef2aSThomas Huth } 572fcf5ef2aSThomas Huth 573fcf5ef2aSThomas Huth static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, 574fcf5ef2aSThomas Huth ppc_slb_t *slb, target_ulong eaddr, 575fcf5ef2aSThomas Huth ppc_hash_pte64_t *pte, unsigned *pshift) 576fcf5ef2aSThomas Huth { 577fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 5787222b94aSDavid Gibson hwaddr hash, ptex; 579fcf5ef2aSThomas Huth uint64_t vsid, epnmask, epn, ptem; 580b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps = slb->sps; 581fcf5ef2aSThomas Huth 582*d75cbae8SDavid Gibson /* 583*d75cbae8SDavid Gibson * The SLB store path should prevent any bad page size encodings 584*d75cbae8SDavid Gibson * getting in there, so: 585*d75cbae8SDavid Gibson */ 586fcf5ef2aSThomas Huth assert(sps); 587fcf5ef2aSThomas Huth 588fcf5ef2aSThomas Huth /* If ISL is set in LPCR we need to clamp the page size to 4K */ 589fcf5ef2aSThomas Huth if (env->spr[SPR_LPCR] & LPCR_ISL) { 590fcf5ef2aSThomas Huth /* We assume that when using TCG, 4k is first entry of SPS */ 591b07c59f7SDavid Gibson sps = &cpu->hash64_opts->sps[0]; 592fcf5ef2aSThomas Huth assert(sps->page_shift == 12); 593fcf5ef2aSThomas Huth } 594fcf5ef2aSThomas Huth 595fcf5ef2aSThomas Huth epnmask = ~((1ULL << sps->page_shift) - 1); 596fcf5ef2aSThomas Huth 597fcf5ef2aSThomas Huth if (slb->vsid & SLB_VSID_B) { 598fcf5ef2aSThomas Huth /* 1TB segment */ 599fcf5ef2aSThomas Huth vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; 600fcf5ef2aSThomas Huth epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; 601fcf5ef2aSThomas Huth hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); 602fcf5ef2aSThomas Huth } else { 603fcf5ef2aSThomas Huth /* 256M segment */ 604fcf5ef2aSThomas Huth vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; 605fcf5ef2aSThomas Huth epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; 606fcf5ef2aSThomas Huth hash = vsid ^ (epn >> sps->page_shift); 607fcf5ef2aSThomas Huth } 608fcf5ef2aSThomas Huth ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); 609fcf5ef2aSThomas Huth ptem |= HPTE64_V_VALID; 610fcf5ef2aSThomas Huth 611fcf5ef2aSThomas Huth /* Page address translation */ 612fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, 613fcf5ef2aSThomas Huth "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx 614fcf5ef2aSThomas Huth " hash " TARGET_FMT_plx "\n", 61536778660SDavid Gibson ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); 616fcf5ef2aSThomas Huth 617fcf5ef2aSThomas Huth /* Primary PTEG lookup */ 618fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, 619fcf5ef2aSThomas Huth "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 620fcf5ef2aSThomas Huth " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx 621fcf5ef2aSThomas Huth " hash=" TARGET_FMT_plx "\n", 62236778660SDavid Gibson ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), 62336778660SDavid Gibson vsid, ptem, hash); 6247222b94aSDavid Gibson ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); 625fcf5ef2aSThomas Huth 6267222b94aSDavid Gibson if (ptex == -1) { 627fcf5ef2aSThomas Huth /* Secondary PTEG lookup */ 628fcf5ef2aSThomas Huth ptem |= HPTE64_V_SECONDARY; 629fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, 630fcf5ef2aSThomas Huth "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 631fcf5ef2aSThomas Huth " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx 63236778660SDavid Gibson " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), 63336778660SDavid Gibson ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); 634fcf5ef2aSThomas Huth 6357222b94aSDavid Gibson ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); 636fcf5ef2aSThomas Huth } 637fcf5ef2aSThomas Huth 6387222b94aSDavid Gibson return ptex; 639fcf5ef2aSThomas Huth } 640fcf5ef2aSThomas Huth 641fcf5ef2aSThomas Huth unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, 642fcf5ef2aSThomas Huth uint64_t pte0, uint64_t pte1) 643fcf5ef2aSThomas Huth { 644fcf5ef2aSThomas Huth int i; 645fcf5ef2aSThomas Huth 646fcf5ef2aSThomas Huth if (!(pte0 & HPTE64_V_LARGE)) { 647fcf5ef2aSThomas Huth return 12; 648fcf5ef2aSThomas Huth } 649fcf5ef2aSThomas Huth 650fcf5ef2aSThomas Huth /* 651fcf5ef2aSThomas Huth * The encodings in env->sps need to be carefully chosen so that 652fcf5ef2aSThomas Huth * this gives an unambiguous result. 653fcf5ef2aSThomas Huth */ 654fcf5ef2aSThomas Huth for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 655b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; 656fcf5ef2aSThomas Huth unsigned shift; 657fcf5ef2aSThomas Huth 658fcf5ef2aSThomas Huth if (!sps->page_shift) { 659fcf5ef2aSThomas Huth break; 660fcf5ef2aSThomas Huth } 661fcf5ef2aSThomas Huth 662fcf5ef2aSThomas Huth shift = hpte_page_shift(sps, pte0, pte1); 663fcf5ef2aSThomas Huth if (shift) { 664fcf5ef2aSThomas Huth return shift; 665fcf5ef2aSThomas Huth } 666fcf5ef2aSThomas Huth } 667fcf5ef2aSThomas Huth 668fcf5ef2aSThomas Huth return 0; 669fcf5ef2aSThomas Huth } 670fcf5ef2aSThomas Huth 6718fe08facSDavid Gibson static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code) 672fcf5ef2aSThomas Huth { 6738fe08facSDavid Gibson CPUPPCState *env = &POWERPC_CPU(cs)->env; 674fcf5ef2aSThomas Huth bool vpm; 675fcf5ef2aSThomas Huth 676fcf5ef2aSThomas Huth if (msr_ir) { 677fcf5ef2aSThomas Huth vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 678fcf5ef2aSThomas Huth } else { 67950659083SSuraj Jitindar Singh switch (env->mmu_model) { 68050659083SSuraj Jitindar Singh case POWERPC_MMU_3_00: 68150659083SSuraj Jitindar Singh /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ 68250659083SSuraj Jitindar Singh vpm = true; 68350659083SSuraj Jitindar Singh break; 68450659083SSuraj Jitindar Singh default: 685fcf5ef2aSThomas Huth vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); 68650659083SSuraj Jitindar Singh break; 68750659083SSuraj Jitindar Singh } 688fcf5ef2aSThomas Huth } 689fcf5ef2aSThomas Huth if (vpm && !msr_hv) { 690fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_HISI; 691fcf5ef2aSThomas Huth } else { 692fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_ISI; 693fcf5ef2aSThomas Huth } 694fcf5ef2aSThomas Huth env->error_code = error_code; 695fcf5ef2aSThomas Huth } 696fcf5ef2aSThomas Huth 6978fe08facSDavid Gibson static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr) 698fcf5ef2aSThomas Huth { 6998fe08facSDavid Gibson CPUPPCState *env = &POWERPC_CPU(cs)->env; 700fcf5ef2aSThomas Huth bool vpm; 701fcf5ef2aSThomas Huth 702fcf5ef2aSThomas Huth if (msr_dr) { 703fcf5ef2aSThomas Huth vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); 704fcf5ef2aSThomas Huth } else { 70550659083SSuraj Jitindar Singh switch (env->mmu_model) { 70650659083SSuraj Jitindar Singh case POWERPC_MMU_3_00: 70750659083SSuraj Jitindar Singh /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */ 70850659083SSuraj Jitindar Singh vpm = true; 70950659083SSuraj Jitindar Singh break; 71050659083SSuraj Jitindar Singh default: 711fcf5ef2aSThomas Huth vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0); 71250659083SSuraj Jitindar Singh break; 71350659083SSuraj Jitindar Singh } 714fcf5ef2aSThomas Huth } 715fcf5ef2aSThomas Huth if (vpm && !msr_hv) { 716fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_HDSI; 717fcf5ef2aSThomas Huth env->spr[SPR_HDAR] = dar; 718fcf5ef2aSThomas Huth env->spr[SPR_HDSISR] = dsisr; 719fcf5ef2aSThomas Huth } else { 720fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_DSI; 721fcf5ef2aSThomas Huth env->spr[SPR_DAR] = dar; 722fcf5ef2aSThomas Huth env->spr[SPR_DSISR] = dsisr; 723fcf5ef2aSThomas Huth } 724fcf5ef2aSThomas Huth env->error_code = 0; 725fcf5ef2aSThomas Huth } 726fcf5ef2aSThomas Huth 727fcf5ef2aSThomas Huth 728fcf5ef2aSThomas Huth int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, 729fcf5ef2aSThomas Huth int rwx, int mmu_idx) 730fcf5ef2aSThomas Huth { 731fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu); 732fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 733fcf5ef2aSThomas Huth ppc_slb_t *slb; 734fcf5ef2aSThomas Huth unsigned apshift; 7357222b94aSDavid Gibson hwaddr ptex; 736fcf5ef2aSThomas Huth ppc_hash_pte64_t pte; 73707a68f99SSuraj Jitindar Singh int exec_prot, pp_prot, amr_prot, prot; 738da82c73aSSuraj Jitindar Singh uint64_t new_pte1; 739fcf5ef2aSThomas Huth const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; 740fcf5ef2aSThomas Huth hwaddr raddr; 741fcf5ef2aSThomas Huth 742fcf5ef2aSThomas Huth assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 743fcf5ef2aSThomas Huth 744*d75cbae8SDavid Gibson /* 745*d75cbae8SDavid Gibson * Note on LPCR usage: 970 uses HID4, but our special variant of 746*d75cbae8SDavid Gibson * store_spr copies relevant fields into env->spr[SPR_LPCR]. 747*d75cbae8SDavid Gibson * Similarily we filter unimplemented bits when storing into LPCR 748*d75cbae8SDavid Gibson * depending on the MMU version. This code can thus just use the 749*d75cbae8SDavid Gibson * LPCR "as-is". 750fcf5ef2aSThomas Huth */ 751fcf5ef2aSThomas Huth 752fcf5ef2aSThomas Huth /* 1. Handle real mode accesses */ 753fcf5ef2aSThomas Huth if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { 754*d75cbae8SDavid Gibson /* 755*d75cbae8SDavid Gibson * Translation is supposedly "off", but in real mode the top 4 756*d75cbae8SDavid Gibson * effective address bits are (mostly) ignored 757*d75cbae8SDavid Gibson */ 758fcf5ef2aSThomas Huth raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 759fcf5ef2aSThomas Huth 760fcf5ef2aSThomas Huth /* In HV mode, add HRMOR if top EA bit is clear */ 761fcf5ef2aSThomas Huth if (msr_hv || !env->has_hv_mode) { 762fcf5ef2aSThomas Huth if (!(eaddr >> 63)) { 763fcf5ef2aSThomas Huth raddr |= env->spr[SPR_HRMOR]; 764fcf5ef2aSThomas Huth } 765fcf5ef2aSThomas Huth } else { 766fcf5ef2aSThomas Huth /* Otherwise, check VPM for RMA vs VRMA */ 767fcf5ef2aSThomas Huth if (env->spr[SPR_LPCR] & LPCR_VPM0) { 768fcf5ef2aSThomas Huth slb = &env->vrma_slb; 769fcf5ef2aSThomas Huth if (slb->sps) { 770fcf5ef2aSThomas Huth goto skip_slb_search; 771fcf5ef2aSThomas Huth } 772fcf5ef2aSThomas Huth /* Not much else to do here */ 773fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_MCHECK; 774fcf5ef2aSThomas Huth env->error_code = 0; 775fcf5ef2aSThomas Huth return 1; 776fcf5ef2aSThomas Huth } else if (raddr < env->rmls) { 777fcf5ef2aSThomas Huth /* RMA. Check bounds in RMLS */ 778fcf5ef2aSThomas Huth raddr |= env->spr[SPR_RMOR]; 779fcf5ef2aSThomas Huth } else { 780fcf5ef2aSThomas Huth /* The access failed, generate the approriate interrupt */ 781fcf5ef2aSThomas Huth if (rwx == 2) { 7828fe08facSDavid Gibson ppc_hash64_set_isi(cs, SRR1_PROTFAULT); 783fcf5ef2aSThomas Huth } else { 784da82c73aSSuraj Jitindar Singh int dsisr = DSISR_PROTFAULT; 785fcf5ef2aSThomas Huth if (rwx == 1) { 786da82c73aSSuraj Jitindar Singh dsisr |= DSISR_ISSTORE; 787fcf5ef2aSThomas Huth } 7888fe08facSDavid Gibson ppc_hash64_set_dsi(cs, eaddr, dsisr); 789fcf5ef2aSThomas Huth } 790fcf5ef2aSThomas Huth return 1; 791fcf5ef2aSThomas Huth } 792fcf5ef2aSThomas Huth } 793fcf5ef2aSThomas Huth tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 794fcf5ef2aSThomas Huth PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 795fcf5ef2aSThomas Huth TARGET_PAGE_SIZE); 796fcf5ef2aSThomas Huth return 0; 797fcf5ef2aSThomas Huth } 798fcf5ef2aSThomas Huth 799fcf5ef2aSThomas Huth /* 2. Translation is on, so look up the SLB */ 800fcf5ef2aSThomas Huth slb = slb_lookup(cpu, eaddr); 801fcf5ef2aSThomas Huth if (!slb) { 802b2899495SSuraj Jitindar Singh /* No entry found, check if in-memory segment tables are in use */ 803ca79b3b7SDavid Gibson if (ppc64_use_proc_tbl(cpu)) { 804b2899495SSuraj Jitindar Singh /* TODO - Unsupported */ 805b2899495SSuraj Jitindar Singh error_report("Segment Table Support Unimplemented"); 806b2899495SSuraj Jitindar Singh exit(1); 807b2899495SSuraj Jitindar Singh } 808b2899495SSuraj Jitindar Singh /* Segment still not found, generate the appropriate interrupt */ 809fcf5ef2aSThomas Huth if (rwx == 2) { 810fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_ISEG; 811fcf5ef2aSThomas Huth env->error_code = 0; 812fcf5ef2aSThomas Huth } else { 813fcf5ef2aSThomas Huth cs->exception_index = POWERPC_EXCP_DSEG; 814fcf5ef2aSThomas Huth env->error_code = 0; 815fcf5ef2aSThomas Huth env->spr[SPR_DAR] = eaddr; 816fcf5ef2aSThomas Huth } 817fcf5ef2aSThomas Huth return 1; 818fcf5ef2aSThomas Huth } 819fcf5ef2aSThomas Huth 820fcf5ef2aSThomas Huth skip_slb_search: 821fcf5ef2aSThomas Huth 822fcf5ef2aSThomas Huth /* 3. Check for segment level no-execute violation */ 823fcf5ef2aSThomas Huth if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { 8248fe08facSDavid Gibson ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); 825fcf5ef2aSThomas Huth return 1; 826fcf5ef2aSThomas Huth } 827fcf5ef2aSThomas Huth 828fcf5ef2aSThomas Huth /* 4. Locate the PTE in the hash table */ 8297222b94aSDavid Gibson ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); 8307222b94aSDavid Gibson if (ptex == -1) { 831fcf5ef2aSThomas Huth if (rwx == 2) { 8328fe08facSDavid Gibson ppc_hash64_set_isi(cs, SRR1_NOPTE); 833fcf5ef2aSThomas Huth } else { 834da82c73aSSuraj Jitindar Singh int dsisr = DSISR_NOPTE; 835fcf5ef2aSThomas Huth if (rwx == 1) { 836da82c73aSSuraj Jitindar Singh dsisr |= DSISR_ISSTORE; 837fcf5ef2aSThomas Huth } 8388fe08facSDavid Gibson ppc_hash64_set_dsi(cs, eaddr, dsisr); 839fcf5ef2aSThomas Huth } 840fcf5ef2aSThomas Huth return 1; 841fcf5ef2aSThomas Huth } 842fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, 8437222b94aSDavid Gibson "found PTE at index %08" HWADDR_PRIx "\n", ptex); 844fcf5ef2aSThomas Huth 845fcf5ef2aSThomas Huth /* 5. Check access permissions */ 846fcf5ef2aSThomas Huth 84707a68f99SSuraj Jitindar Singh exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); 848fcf5ef2aSThomas Huth pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); 849fcf5ef2aSThomas Huth amr_prot = ppc_hash64_amr_prot(cpu, pte); 85007a68f99SSuraj Jitindar Singh prot = exec_prot & pp_prot & amr_prot; 851fcf5ef2aSThomas Huth 852fcf5ef2aSThomas Huth if ((need_prot[rwx] & ~prot) != 0) { 853fcf5ef2aSThomas Huth /* Access right violation */ 854fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 855fcf5ef2aSThomas Huth if (rwx == 2) { 856a6152b52SSuraj Jitindar Singh int srr1 = 0; 85707a68f99SSuraj Jitindar Singh if (PAGE_EXEC & ~exec_prot) { 85807a68f99SSuraj Jitindar Singh srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ 85907a68f99SSuraj Jitindar Singh } else if (PAGE_EXEC & ~pp_prot) { 860a6152b52SSuraj Jitindar Singh srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ 861a6152b52SSuraj Jitindar Singh } 862a6152b52SSuraj Jitindar Singh if (PAGE_EXEC & ~amr_prot) { 863a6152b52SSuraj Jitindar Singh srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ 864a6152b52SSuraj Jitindar Singh } 8658fe08facSDavid Gibson ppc_hash64_set_isi(cs, srr1); 866fcf5ef2aSThomas Huth } else { 867da82c73aSSuraj Jitindar Singh int dsisr = 0; 868fcf5ef2aSThomas Huth if (need_prot[rwx] & ~pp_prot) { 869da82c73aSSuraj Jitindar Singh dsisr |= DSISR_PROTFAULT; 870fcf5ef2aSThomas Huth } 871fcf5ef2aSThomas Huth if (rwx == 1) { 872da82c73aSSuraj Jitindar Singh dsisr |= DSISR_ISSTORE; 873fcf5ef2aSThomas Huth } 874fcf5ef2aSThomas Huth if (need_prot[rwx] & ~amr_prot) { 875da82c73aSSuraj Jitindar Singh dsisr |= DSISR_AMR; 876fcf5ef2aSThomas Huth } 8778fe08facSDavid Gibson ppc_hash64_set_dsi(cs, eaddr, dsisr); 878fcf5ef2aSThomas Huth } 879fcf5ef2aSThomas Huth return 1; 880fcf5ef2aSThomas Huth } 881fcf5ef2aSThomas Huth 882fcf5ef2aSThomas Huth qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 883fcf5ef2aSThomas Huth 884fcf5ef2aSThomas Huth /* 6. Update PTE referenced and changed bits if necessary */ 885fcf5ef2aSThomas Huth 886fcf5ef2aSThomas Huth new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ 887fcf5ef2aSThomas Huth if (rwx == 1) { 888fcf5ef2aSThomas Huth new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ 889fcf5ef2aSThomas Huth } else { 890*d75cbae8SDavid Gibson /* 891*d75cbae8SDavid Gibson * Treat the page as read-only for now, so that a later write 892*d75cbae8SDavid Gibson * will pass through this function again to set the C bit 893*d75cbae8SDavid Gibson */ 894fcf5ef2aSThomas Huth prot &= ~PAGE_WRITE; 895fcf5ef2aSThomas Huth } 896fcf5ef2aSThomas Huth 897fcf5ef2aSThomas Huth if (new_pte1 != pte.pte1) { 8987222b94aSDavid Gibson ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1); 899fcf5ef2aSThomas Huth } 900fcf5ef2aSThomas Huth 901fcf5ef2aSThomas Huth /* 7. Determine the real address from the PTE */ 902fcf5ef2aSThomas Huth 903fcf5ef2aSThomas Huth raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); 904fcf5ef2aSThomas Huth 905fcf5ef2aSThomas Huth tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 906fcf5ef2aSThomas Huth prot, mmu_idx, 1ULL << apshift); 907fcf5ef2aSThomas Huth 908fcf5ef2aSThomas Huth return 0; 909fcf5ef2aSThomas Huth } 910fcf5ef2aSThomas Huth 911fcf5ef2aSThomas Huth hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) 912fcf5ef2aSThomas Huth { 913fcf5ef2aSThomas Huth CPUPPCState *env = &cpu->env; 914fcf5ef2aSThomas Huth ppc_slb_t *slb; 9157222b94aSDavid Gibson hwaddr ptex, raddr; 916fcf5ef2aSThomas Huth ppc_hash_pte64_t pte; 917fcf5ef2aSThomas Huth unsigned apshift; 918fcf5ef2aSThomas Huth 919fcf5ef2aSThomas Huth /* Handle real mode */ 920fcf5ef2aSThomas Huth if (msr_dr == 0) { 921fcf5ef2aSThomas Huth /* In real mode the top 4 effective address bits are ignored */ 922fcf5ef2aSThomas Huth raddr = addr & 0x0FFFFFFFFFFFFFFFULL; 923fcf5ef2aSThomas Huth 924fcf5ef2aSThomas Huth /* In HV mode, add HRMOR if top EA bit is clear */ 925fcf5ef2aSThomas Huth if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { 926fcf5ef2aSThomas Huth return raddr | env->spr[SPR_HRMOR]; 927fcf5ef2aSThomas Huth } 928fcf5ef2aSThomas Huth 929fcf5ef2aSThomas Huth /* Otherwise, check VPM for RMA vs VRMA */ 930fcf5ef2aSThomas Huth if (env->spr[SPR_LPCR] & LPCR_VPM0) { 931fcf5ef2aSThomas Huth slb = &env->vrma_slb; 932fcf5ef2aSThomas Huth if (!slb->sps) { 933fcf5ef2aSThomas Huth return -1; 934fcf5ef2aSThomas Huth } 935fcf5ef2aSThomas Huth } else if (raddr < env->rmls) { 936fcf5ef2aSThomas Huth /* RMA. Check bounds in RMLS */ 937fcf5ef2aSThomas Huth return raddr | env->spr[SPR_RMOR]; 938fcf5ef2aSThomas Huth } else { 939fcf5ef2aSThomas Huth return -1; 940fcf5ef2aSThomas Huth } 941fcf5ef2aSThomas Huth } else { 942fcf5ef2aSThomas Huth slb = slb_lookup(cpu, addr); 943fcf5ef2aSThomas Huth if (!slb) { 944fcf5ef2aSThomas Huth return -1; 945fcf5ef2aSThomas Huth } 946fcf5ef2aSThomas Huth } 947fcf5ef2aSThomas Huth 9487222b94aSDavid Gibson ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); 9497222b94aSDavid Gibson if (ptex == -1) { 950fcf5ef2aSThomas Huth return -1; 951fcf5ef2aSThomas Huth } 952fcf5ef2aSThomas Huth 953fcf5ef2aSThomas Huth return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) 954fcf5ef2aSThomas Huth & TARGET_PAGE_MASK; 955fcf5ef2aSThomas Huth } 956fcf5ef2aSThomas Huth 9577222b94aSDavid Gibson void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 9587222b94aSDavid Gibson uint64_t pte0, uint64_t pte1) 959fcf5ef2aSThomas Huth { 9603367c62fSBenjamin Herrenschmidt hwaddr base; 9617222b94aSDavid Gibson hwaddr offset = ptex * HASH_PTE_SIZE_64; 962fcf5ef2aSThomas Huth 963e57ca75cSDavid Gibson if (cpu->vhyp) { 964e57ca75cSDavid Gibson PPCVirtualHypervisorClass *vhc = 965e57ca75cSDavid Gibson PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 966e57ca75cSDavid Gibson vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1); 967fcf5ef2aSThomas Huth return; 968fcf5ef2aSThomas Huth } 9693367c62fSBenjamin Herrenschmidt base = ppc_hash64_hpt_base(cpu); 970fcf5ef2aSThomas Huth 97136778660SDavid Gibson stq_phys(CPU(cpu)->as, base + offset, pte0); 97236778660SDavid Gibson stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1); 973fcf5ef2aSThomas Huth } 974fcf5ef2aSThomas Huth 9757222b94aSDavid Gibson void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 976fcf5ef2aSThomas Huth target_ulong pte0, target_ulong pte1) 977fcf5ef2aSThomas Huth { 978fcf5ef2aSThomas Huth /* 979fcf5ef2aSThomas Huth * XXX: given the fact that there are too many segments to 980fcf5ef2aSThomas Huth * invalidate, and we still don't have a tlb_flush_mask(env, n, 981fcf5ef2aSThomas Huth * mask) in QEMU, we just invalidate all TLBs 982fcf5ef2aSThomas Huth */ 983fcf5ef2aSThomas Huth cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; 984fcf5ef2aSThomas Huth } 985fcf5ef2aSThomas Huth 9865ad55315SDavid Gibson static void ppc_hash64_update_rmls(PowerPCCPU *cpu) 987fcf5ef2aSThomas Huth { 9888fe08facSDavid Gibson CPUPPCState *env = &cpu->env; 989fcf5ef2aSThomas Huth uint64_t lpcr = env->spr[SPR_LPCR]; 990fcf5ef2aSThomas Huth 991fcf5ef2aSThomas Huth /* 992fcf5ef2aSThomas Huth * This is the full 4 bits encoding of POWER8. Previous 993fcf5ef2aSThomas Huth * CPUs only support a subset of these but the filtering 994fcf5ef2aSThomas Huth * is done when writing LPCR 995fcf5ef2aSThomas Huth */ 996fcf5ef2aSThomas Huth switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) { 997fcf5ef2aSThomas Huth case 0x8: /* 32MB */ 998fcf5ef2aSThomas Huth env->rmls = 0x2000000ull; 999fcf5ef2aSThomas Huth break; 1000fcf5ef2aSThomas Huth case 0x3: /* 64MB */ 1001fcf5ef2aSThomas Huth env->rmls = 0x4000000ull; 1002fcf5ef2aSThomas Huth break; 1003fcf5ef2aSThomas Huth case 0x7: /* 128MB */ 1004fcf5ef2aSThomas Huth env->rmls = 0x8000000ull; 1005fcf5ef2aSThomas Huth break; 1006fcf5ef2aSThomas Huth case 0x4: /* 256MB */ 1007fcf5ef2aSThomas Huth env->rmls = 0x10000000ull; 1008fcf5ef2aSThomas Huth break; 1009fcf5ef2aSThomas Huth case 0x2: /* 1GB */ 1010fcf5ef2aSThomas Huth env->rmls = 0x40000000ull; 1011fcf5ef2aSThomas Huth break; 1012fcf5ef2aSThomas Huth case 0x1: /* 16GB */ 1013fcf5ef2aSThomas Huth env->rmls = 0x400000000ull; 1014fcf5ef2aSThomas Huth break; 1015fcf5ef2aSThomas Huth default: 1016fcf5ef2aSThomas Huth /* What to do here ??? */ 1017fcf5ef2aSThomas Huth env->rmls = 0; 1018fcf5ef2aSThomas Huth } 1019fcf5ef2aSThomas Huth } 1020fcf5ef2aSThomas Huth 10215ad55315SDavid Gibson static void ppc_hash64_update_vrma(PowerPCCPU *cpu) 1022fcf5ef2aSThomas Huth { 10238fe08facSDavid Gibson CPUPPCState *env = &cpu->env; 1024b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps = NULL; 1025fcf5ef2aSThomas Huth target_ulong esid, vsid, lpcr; 1026fcf5ef2aSThomas Huth ppc_slb_t *slb = &env->vrma_slb; 1027fcf5ef2aSThomas Huth uint32_t vrmasd; 1028fcf5ef2aSThomas Huth int i; 1029fcf5ef2aSThomas Huth 1030fcf5ef2aSThomas Huth /* First clear it */ 1031fcf5ef2aSThomas Huth slb->esid = slb->vsid = 0; 1032fcf5ef2aSThomas Huth slb->sps = NULL; 1033fcf5ef2aSThomas Huth 1034fcf5ef2aSThomas Huth /* Is VRMA enabled ? */ 1035fcf5ef2aSThomas Huth lpcr = env->spr[SPR_LPCR]; 1036fcf5ef2aSThomas Huth if (!(lpcr & LPCR_VPM0)) { 1037fcf5ef2aSThomas Huth return; 1038fcf5ef2aSThomas Huth } 1039fcf5ef2aSThomas Huth 1040*d75cbae8SDavid Gibson /* 1041*d75cbae8SDavid Gibson * Make one up. Mostly ignore the ESID which will not be needed 1042*d75cbae8SDavid Gibson * for translation 1043fcf5ef2aSThomas Huth */ 1044fcf5ef2aSThomas Huth vsid = SLB_VSID_VRMA; 1045fcf5ef2aSThomas Huth vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; 1046fcf5ef2aSThomas Huth vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP); 1047fcf5ef2aSThomas Huth esid = SLB_ESID_V; 1048fcf5ef2aSThomas Huth 1049fcf5ef2aSThomas Huth for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { 1050b07c59f7SDavid Gibson const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; 1051fcf5ef2aSThomas Huth 1052fcf5ef2aSThomas Huth if (!sps1->page_shift) { 1053fcf5ef2aSThomas Huth break; 1054fcf5ef2aSThomas Huth } 1055fcf5ef2aSThomas Huth 1056fcf5ef2aSThomas Huth if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { 1057fcf5ef2aSThomas Huth sps = sps1; 1058fcf5ef2aSThomas Huth break; 1059fcf5ef2aSThomas Huth } 1060fcf5ef2aSThomas Huth } 1061fcf5ef2aSThomas Huth 1062fcf5ef2aSThomas Huth if (!sps) { 1063fcf5ef2aSThomas Huth error_report("Bad page size encoding esid 0x"TARGET_FMT_lx 1064fcf5ef2aSThomas Huth " vsid 0x"TARGET_FMT_lx, esid, vsid); 1065fcf5ef2aSThomas Huth return; 1066fcf5ef2aSThomas Huth } 1067fcf5ef2aSThomas Huth 1068fcf5ef2aSThomas Huth slb->vsid = vsid; 1069fcf5ef2aSThomas Huth slb->esid = esid; 1070fcf5ef2aSThomas Huth slb->sps = sps; 1071fcf5ef2aSThomas Huth } 1072fcf5ef2aSThomas Huth 10735ad55315SDavid Gibson void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) 1074fcf5ef2aSThomas Huth { 10755ad55315SDavid Gibson CPUPPCState *env = &cpu->env; 1076fcf5ef2aSThomas Huth uint64_t lpcr = 0; 1077fcf5ef2aSThomas Huth 1078fcf5ef2aSThomas Huth /* Filter out bits */ 10790941d728SDavid Gibson switch (env->mmu_model) { 10800941d728SDavid Gibson case POWERPC_MMU_64B: /* 970 */ 1081fcf5ef2aSThomas Huth if (val & 0x40) { 1082fcf5ef2aSThomas Huth lpcr |= LPCR_LPES0; 1083fcf5ef2aSThomas Huth } 1084fcf5ef2aSThomas Huth if (val & 0x8000000000000000ull) { 1085fcf5ef2aSThomas Huth lpcr |= LPCR_LPES1; 1086fcf5ef2aSThomas Huth } 1087fcf5ef2aSThomas Huth if (val & 0x20) { 1088fcf5ef2aSThomas Huth lpcr |= (0x4ull << LPCR_RMLS_SHIFT); 1089fcf5ef2aSThomas Huth } 1090fcf5ef2aSThomas Huth if (val & 0x4000000000000000ull) { 1091fcf5ef2aSThomas Huth lpcr |= (0x2ull << LPCR_RMLS_SHIFT); 1092fcf5ef2aSThomas Huth } 1093fcf5ef2aSThomas Huth if (val & 0x2000000000000000ull) { 1094fcf5ef2aSThomas Huth lpcr |= (0x1ull << LPCR_RMLS_SHIFT); 1095fcf5ef2aSThomas Huth } 1096fcf5ef2aSThomas Huth env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26; 1097fcf5ef2aSThomas Huth 1098*d75cbae8SDavid Gibson /* 1099*d75cbae8SDavid Gibson * XXX We could also write LPID from HID4 here 1100fcf5ef2aSThomas Huth * but since we don't tag any translation on it 1101fcf5ef2aSThomas Huth * it doesn't actually matter 1102*d75cbae8SDavid Gibson * 1103*d75cbae8SDavid Gibson * XXX For proper emulation of 970 we also need 1104fcf5ef2aSThomas Huth * to dig HRMOR out of HID5 1105fcf5ef2aSThomas Huth */ 1106fcf5ef2aSThomas Huth break; 11070941d728SDavid Gibson case POWERPC_MMU_2_03: /* P5p */ 1108fcf5ef2aSThomas Huth lpcr = val & (LPCR_RMLS | LPCR_ILE | 1109fcf5ef2aSThomas Huth LPCR_LPES0 | LPCR_LPES1 | 1110fcf5ef2aSThomas Huth LPCR_RMI | LPCR_HDICE); 1111fcf5ef2aSThomas Huth break; 11120941d728SDavid Gibson case POWERPC_MMU_2_06: /* P7 */ 1113fcf5ef2aSThomas Huth lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD | 1114fcf5ef2aSThomas Huth LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | 1115fcf5ef2aSThomas Huth LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 | 1116fcf5ef2aSThomas Huth LPCR_MER | LPCR_TC | 1117fcf5ef2aSThomas Huth LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE); 1118fcf5ef2aSThomas Huth break; 11190941d728SDavid Gibson case POWERPC_MMU_2_07: /* P8 */ 1120fcf5ef2aSThomas Huth lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | 1121fcf5ef2aSThomas Huth LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | 1122fcf5ef2aSThomas Huth LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 | 1123fcf5ef2aSThomas Huth LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | 1124fcf5ef2aSThomas Huth LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE); 1125fcf5ef2aSThomas Huth break; 11260941d728SDavid Gibson case POWERPC_MMU_3_00: /* P9 */ 112718aa49ecSSuraj Jitindar Singh lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | 112818aa49ecSSuraj Jitindar Singh (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | 1129a8dafa52SSuraj Jitindar Singh LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | 113018aa49ecSSuraj Jitindar Singh (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | 113118aa49ecSSuraj Jitindar Singh LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | 113218aa49ecSSuraj Jitindar Singh LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE); 11332b9e0a6bSBenjamin Herrenschmidt /* 11342b9e0a6bSBenjamin Herrenschmidt * If we have a virtual hypervisor, we need to bring back RMLS. It 11352b9e0a6bSBenjamin Herrenschmidt * doesn't exist on an actual P9 but that's all we know how to 11362b9e0a6bSBenjamin Herrenschmidt * configure with softmmu at the moment 11372b9e0a6bSBenjamin Herrenschmidt */ 11382b9e0a6bSBenjamin Herrenschmidt if (cpu->vhyp) { 11392b9e0a6bSBenjamin Herrenschmidt lpcr |= (val & LPCR_RMLS); 11402b9e0a6bSBenjamin Herrenschmidt } 114118aa49ecSSuraj Jitindar Singh break; 1142fcf5ef2aSThomas Huth default: 1143fcf5ef2aSThomas Huth ; 1144fcf5ef2aSThomas Huth } 1145fcf5ef2aSThomas Huth env->spr[SPR_LPCR] = lpcr; 11468fe08facSDavid Gibson ppc_hash64_update_rmls(cpu); 11478fe08facSDavid Gibson ppc_hash64_update_vrma(cpu); 1148fcf5ef2aSThomas Huth } 1149a059471dSDavid Gibson 11505ad55315SDavid Gibson void helper_store_lpcr(CPUPPCState *env, target_ulong val) 11515ad55315SDavid Gibson { 11525ad55315SDavid Gibson PowerPCCPU *cpu = ppc_env_get_cpu(env); 11535ad55315SDavid Gibson 11545ad55315SDavid Gibson ppc_store_lpcr(cpu, val); 11555ad55315SDavid Gibson } 11565ad55315SDavid Gibson 1157a059471dSDavid Gibson void ppc_hash64_init(PowerPCCPU *cpu) 1158a059471dSDavid Gibson { 1159a059471dSDavid Gibson CPUPPCState *env = &cpu->env; 1160a059471dSDavid Gibson PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1161a059471dSDavid Gibson 116221e405f1SDavid Gibson if (!pcc->hash64_opts) { 116321e405f1SDavid Gibson assert(!(env->mmu_model & POWERPC_MMU_64)); 116421e405f1SDavid Gibson return; 116521e405f1SDavid Gibson } 116621e405f1SDavid Gibson 116721e405f1SDavid Gibson cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); 116821e405f1SDavid Gibson } 116921e405f1SDavid Gibson 117021e405f1SDavid Gibson void ppc_hash64_finalize(PowerPCCPU *cpu) 117121e405f1SDavid Gibson { 117221e405f1SDavid Gibson g_free(cpu->hash64_opts); 117321e405f1SDavid Gibson } 117421e405f1SDavid Gibson 117521e405f1SDavid Gibson const PPCHash64Options ppc_hash64_opts_basic = { 117658969eeeSDavid Gibson .flags = 0, 117767d7d66fSDavid Gibson .slb_size = 64, 1178a059471dSDavid Gibson .sps = { 1179a059471dSDavid Gibson { .page_shift = 12, /* 4K */ 1180a059471dSDavid Gibson .slb_enc = 0, 1181a059471dSDavid Gibson .enc = { { .page_shift = 12, .pte_enc = 0 } } 1182a059471dSDavid Gibson }, 1183a059471dSDavid Gibson { .page_shift = 24, /* 16M */ 1184a059471dSDavid Gibson .slb_enc = 0x100, 1185a059471dSDavid Gibson .enc = { { .page_shift = 24, .pte_enc = 0 } } 1186a059471dSDavid Gibson }, 1187a059471dSDavid Gibson }, 1188a059471dSDavid Gibson }; 1189b07c59f7SDavid Gibson 1190b07c59f7SDavid Gibson const PPCHash64Options ppc_hash64_opts_POWER7 = { 119126cd35b8SDavid Gibson .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, 119267d7d66fSDavid Gibson .slb_size = 32, 1193b07c59f7SDavid Gibson .sps = { 1194b07c59f7SDavid Gibson { 1195b07c59f7SDavid Gibson .page_shift = 12, /* 4K */ 1196b07c59f7SDavid Gibson .slb_enc = 0, 1197b07c59f7SDavid Gibson .enc = { { .page_shift = 12, .pte_enc = 0 }, 1198b07c59f7SDavid Gibson { .page_shift = 16, .pte_enc = 0x7 }, 1199b07c59f7SDavid Gibson { .page_shift = 24, .pte_enc = 0x38 }, }, 1200b07c59f7SDavid Gibson }, 1201b07c59f7SDavid Gibson { 1202b07c59f7SDavid Gibson .page_shift = 16, /* 64K */ 1203b07c59f7SDavid Gibson .slb_enc = SLB_VSID_64K, 1204b07c59f7SDavid Gibson .enc = { { .page_shift = 16, .pte_enc = 0x1 }, 1205b07c59f7SDavid Gibson { .page_shift = 24, .pte_enc = 0x8 }, }, 1206b07c59f7SDavid Gibson }, 1207b07c59f7SDavid Gibson { 1208b07c59f7SDavid Gibson .page_shift = 24, /* 16M */ 1209b07c59f7SDavid Gibson .slb_enc = SLB_VSID_16M, 1210b07c59f7SDavid Gibson .enc = { { .page_shift = 24, .pte_enc = 0 }, }, 1211b07c59f7SDavid Gibson }, 1212b07c59f7SDavid Gibson { 1213b07c59f7SDavid Gibson .page_shift = 34, /* 16G */ 1214b07c59f7SDavid Gibson .slb_enc = SLB_VSID_16G, 1215b07c59f7SDavid Gibson .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, 1216b07c59f7SDavid Gibson }, 1217b07c59f7SDavid Gibson } 1218b07c59f7SDavid Gibson }; 121927f00f0aSDavid Gibson 122027f00f0aSDavid Gibson void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, 122127f00f0aSDavid Gibson bool (*cb)(void *, uint32_t, uint32_t), 122227f00f0aSDavid Gibson void *opaque) 122327f00f0aSDavid Gibson { 122427f00f0aSDavid Gibson PPCHash64Options *opts = cpu->hash64_opts; 122527f00f0aSDavid Gibson int i; 122627f00f0aSDavid Gibson int n = 0; 122727f00f0aSDavid Gibson bool ci_largepage = false; 122827f00f0aSDavid Gibson 122927f00f0aSDavid Gibson assert(opts); 123027f00f0aSDavid Gibson 123127f00f0aSDavid Gibson n = 0; 123227f00f0aSDavid Gibson for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { 123327f00f0aSDavid Gibson PPCHash64SegmentPageSizes *sps = &opts->sps[i]; 123427f00f0aSDavid Gibson int j; 123527f00f0aSDavid Gibson int m = 0; 123627f00f0aSDavid Gibson 123727f00f0aSDavid Gibson assert(n <= i); 123827f00f0aSDavid Gibson 123927f00f0aSDavid Gibson if (!sps->page_shift) { 124027f00f0aSDavid Gibson break; 124127f00f0aSDavid Gibson } 124227f00f0aSDavid Gibson 124327f00f0aSDavid Gibson for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { 124427f00f0aSDavid Gibson PPCHash64PageSize *ps = &sps->enc[j]; 124527f00f0aSDavid Gibson 124627f00f0aSDavid Gibson assert(m <= j); 124727f00f0aSDavid Gibson if (!ps->page_shift) { 124827f00f0aSDavid Gibson break; 124927f00f0aSDavid Gibson } 125027f00f0aSDavid Gibson 125127f00f0aSDavid Gibson if (cb(opaque, sps->page_shift, ps->page_shift)) { 125227f00f0aSDavid Gibson if (ps->page_shift >= 16) { 125327f00f0aSDavid Gibson ci_largepage = true; 125427f00f0aSDavid Gibson } 125527f00f0aSDavid Gibson sps->enc[m++] = *ps; 125627f00f0aSDavid Gibson } 125727f00f0aSDavid Gibson } 125827f00f0aSDavid Gibson 125927f00f0aSDavid Gibson /* Clear rest of the row */ 126027f00f0aSDavid Gibson for (j = m; j < ARRAY_SIZE(sps->enc); j++) { 126127f00f0aSDavid Gibson memset(&sps->enc[j], 0, sizeof(sps->enc[j])); 126227f00f0aSDavid Gibson } 126327f00f0aSDavid Gibson 126427f00f0aSDavid Gibson if (m) { 126527f00f0aSDavid Gibson n++; 126627f00f0aSDavid Gibson } 126727f00f0aSDavid Gibson } 126827f00f0aSDavid Gibson 126927f00f0aSDavid Gibson /* Clear the rest of the table */ 127027f00f0aSDavid Gibson for (i = n; i < ARRAY_SIZE(opts->sps); i++) { 127127f00f0aSDavid Gibson memset(&opts->sps[i], 0, sizeof(opts->sps[i])); 127227f00f0aSDavid Gibson } 127327f00f0aSDavid Gibson 127427f00f0aSDavid Gibson if (!ci_largepage) { 127527f00f0aSDavid Gibson opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; 127627f00f0aSDavid Gibson } 127727f00f0aSDavid Gibson } 1278