1fcf5ef2aSThomas Huth /* 2fcf5ef2aSThomas Huth * PowerPC memory access emulation helpers for QEMU. 3fcf5ef2aSThomas Huth * 4fcf5ef2aSThomas Huth * Copyright (c) 2003-2007 Jocelyn Mayer 5fcf5ef2aSThomas Huth * 6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 9fcf5ef2aSThomas Huth * version 2 of the License, or (at your option) any later version. 10fcf5ef2aSThomas Huth * 11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14fcf5ef2aSThomas Huth * Lesser General Public License for more details. 15fcf5ef2aSThomas Huth * 16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18fcf5ef2aSThomas Huth */ 19db725815SMarkus Armbruster 20fcf5ef2aSThomas Huth #include "qemu/osdep.h" 21fcf5ef2aSThomas Huth #include "cpu.h" 22fcf5ef2aSThomas Huth #include "exec/exec-all.h" 23fcf5ef2aSThomas Huth #include "qemu/host-utils.h" 24db725815SMarkus Armbruster #include "qemu/main-loop.h" 25fcf5ef2aSThomas Huth #include "exec/helper-proto.h" 26fcf5ef2aSThomas Huth #include "helper_regs.h" 27fcf5ef2aSThomas Huth #include "exec/cpu_ldst.h" 28dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h" 296914bc4fSNikunj A Dadhania #include "internal.h" 30f34ec0f6SRichard Henderson #include "qemu/atomic128.h" 31fcf5ef2aSThomas Huth 325a2c8b9eSDavid Gibson /* #define DEBUG_OP */ 33fcf5ef2aSThomas Huth 34fcf5ef2aSThomas Huth static inline bool needs_byteswap(const CPUPPCState *env) 35fcf5ef2aSThomas Huth { 36fcf5ef2aSThomas Huth #if defined(TARGET_WORDS_BIGENDIAN) 37fcf5ef2aSThomas Huth return msr_le; 38fcf5ef2aSThomas Huth #else 39fcf5ef2aSThomas Huth return !msr_le; 40fcf5ef2aSThomas Huth #endif 41fcf5ef2aSThomas Huth } 42fcf5ef2aSThomas Huth 43fcf5ef2aSThomas Huth /*****************************************************************************/ 44fcf5ef2aSThomas Huth /* Memory load and stores */ 45fcf5ef2aSThomas Huth 46fcf5ef2aSThomas Huth static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, 47fcf5ef2aSThomas Huth target_long arg) 48fcf5ef2aSThomas Huth { 49fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 50fcf5ef2aSThomas Huth if (!msr_is_64bit(env, env->msr)) { 51fcf5ef2aSThomas Huth return (uint32_t)(addr + arg); 52fcf5ef2aSThomas Huth } else 53fcf5ef2aSThomas Huth #endif 54fcf5ef2aSThomas Huth { 55fcf5ef2aSThomas Huth return addr + arg; 56fcf5ef2aSThomas Huth } 57fcf5ef2aSThomas Huth } 58fcf5ef2aSThomas Huth 59*bb99b391SRichard Henderson static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, 60*bb99b391SRichard Henderson MMUAccessType access_type, int mmu_idx, 61*bb99b391SRichard Henderson uintptr_t raddr) 62*bb99b391SRichard Henderson { 63*bb99b391SRichard Henderson void *host1, *host2; 64*bb99b391SRichard Henderson uint32_t nb_pg1, nb_pg2; 65*bb99b391SRichard Henderson 66*bb99b391SRichard Henderson nb_pg1 = -(addr | TARGET_PAGE_MASK); 67*bb99b391SRichard Henderson if (likely(nb <= nb_pg1)) { 68*bb99b391SRichard Henderson /* The entire operation is on a single page. */ 69*bb99b391SRichard Henderson return probe_access(env, addr, nb, access_type, mmu_idx, raddr); 70*bb99b391SRichard Henderson } 71*bb99b391SRichard Henderson 72*bb99b391SRichard Henderson /* The operation spans two pages. */ 73*bb99b391SRichard Henderson nb_pg2 = nb - nb_pg1; 74*bb99b391SRichard Henderson host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr); 75*bb99b391SRichard Henderson addr = addr_add(env, addr, nb_pg1); 76*bb99b391SRichard Henderson host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr); 77*bb99b391SRichard Henderson 78*bb99b391SRichard Henderson /* If the two host pages are contiguous, optimize. */ 79*bb99b391SRichard Henderson if (host2 == host1 + nb_pg1) { 80*bb99b391SRichard Henderson return host1; 81*bb99b391SRichard Henderson } 82*bb99b391SRichard Henderson return NULL; 83*bb99b391SRichard Henderson } 84*bb99b391SRichard Henderson 85fcf5ef2aSThomas Huth void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 86fcf5ef2aSThomas Huth { 87fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 88fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 89fcf5ef2aSThomas Huth env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC())); 90fcf5ef2aSThomas Huth } else { 91fcf5ef2aSThomas Huth env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC()); 92fcf5ef2aSThomas Huth } 93fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 94fcf5ef2aSThomas Huth } 95fcf5ef2aSThomas Huth } 96fcf5ef2aSThomas Huth 97fcf5ef2aSThomas Huth void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 98fcf5ef2aSThomas Huth { 99fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 100fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 101fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]), 102fcf5ef2aSThomas Huth GETPC()); 103fcf5ef2aSThomas Huth } else { 104fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC()); 105fcf5ef2aSThomas Huth } 106fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 107fcf5ef2aSThomas Huth } 108fcf5ef2aSThomas Huth } 109fcf5ef2aSThomas Huth 110fcf5ef2aSThomas Huth static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 111fcf5ef2aSThomas Huth uint32_t reg, uintptr_t raddr) 112fcf5ef2aSThomas Huth { 113*bb99b391SRichard Henderson int mmu_idx; 114*bb99b391SRichard Henderson void *host; 115*bb99b391SRichard Henderson uint32_t val; 116fcf5ef2aSThomas Huth 117*bb99b391SRichard Henderson if (unlikely(nb == 0)) { 118*bb99b391SRichard Henderson return; 119*bb99b391SRichard Henderson } 120*bb99b391SRichard Henderson 121*bb99b391SRichard Henderson mmu_idx = cpu_mmu_index(env, false); 122*bb99b391SRichard Henderson host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); 123*bb99b391SRichard Henderson 124*bb99b391SRichard Henderson if (likely(host)) { 125*bb99b391SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 126fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 127*bb99b391SRichard Henderson env->gpr[reg] = (uint32_t)ldl_be_p(host); 128*bb99b391SRichard Henderson reg = (reg + 1) % 32; 129*bb99b391SRichard Henderson host += 4; 130*bb99b391SRichard Henderson } 131*bb99b391SRichard Henderson switch (nb) { 132*bb99b391SRichard Henderson default: 133*bb99b391SRichard Henderson return; 134*bb99b391SRichard Henderson case 1: 135*bb99b391SRichard Henderson val = ldub_p(host) << 24; 136*bb99b391SRichard Henderson break; 137*bb99b391SRichard Henderson case 2: 138*bb99b391SRichard Henderson val = lduw_be_p(host) << 16; 139*bb99b391SRichard Henderson break; 140*bb99b391SRichard Henderson case 3: 141*bb99b391SRichard Henderson val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8); 142*bb99b391SRichard Henderson break; 143*bb99b391SRichard Henderson } 144*bb99b391SRichard Henderson } else { 145*bb99b391SRichard Henderson /* Slow path -- at least some of the operation requires i/o. */ 146*bb99b391SRichard Henderson for (; nb > 3; nb -= 4) { 147*bb99b391SRichard Henderson env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); 148fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 149fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 150fcf5ef2aSThomas Huth } 151*bb99b391SRichard Henderson switch (nb) { 152*bb99b391SRichard Henderson default: 153*bb99b391SRichard Henderson return; 154*bb99b391SRichard Henderson case 1: 155*bb99b391SRichard Henderson val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; 156*bb99b391SRichard Henderson break; 157*bb99b391SRichard Henderson case 2: 158*bb99b391SRichard Henderson val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; 159*bb99b391SRichard Henderson break; 160*bb99b391SRichard Henderson case 3: 161*bb99b391SRichard Henderson val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; 162*bb99b391SRichard Henderson addr = addr_add(env, addr, 2); 163*bb99b391SRichard Henderson val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; 164*bb99b391SRichard Henderson break; 165fcf5ef2aSThomas Huth } 166fcf5ef2aSThomas Huth } 167*bb99b391SRichard Henderson env->gpr[reg] = val; 168fcf5ef2aSThomas Huth } 169fcf5ef2aSThomas Huth 170*bb99b391SRichard Henderson void helper_lsw(CPUPPCState *env, target_ulong addr, 171*bb99b391SRichard Henderson uint32_t nb, uint32_t reg) 172fcf5ef2aSThomas Huth { 173fcf5ef2aSThomas Huth do_lsw(env, addr, nb, reg, GETPC()); 174fcf5ef2aSThomas Huth } 175fcf5ef2aSThomas Huth 1765a2c8b9eSDavid Gibson /* 1775a2c8b9eSDavid Gibson * PPC32 specification says we must generate an exception if rA is in 1785a2c8b9eSDavid Gibson * the range of registers to be loaded. In an other hand, IBM says 1795a2c8b9eSDavid Gibson * this is valid, but rA won't be loaded. For now, I'll follow the 1805a2c8b9eSDavid Gibson * spec... 181fcf5ef2aSThomas Huth */ 182fcf5ef2aSThomas Huth void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 183fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 184fcf5ef2aSThomas Huth { 185fcf5ef2aSThomas Huth if (likely(xer_bc != 0)) { 186f0704d78SMarc-André Lureau int num_used_regs = DIV_ROUND_UP(xer_bc, 4); 187fcf5ef2aSThomas Huth if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || 188fcf5ef2aSThomas Huth lsw_reg_in_range(reg, num_used_regs, rb))) { 189fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 190fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL | 191fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL_LSWX, GETPC()); 192fcf5ef2aSThomas Huth } else { 193fcf5ef2aSThomas Huth do_lsw(env, addr, xer_bc, reg, GETPC()); 194fcf5ef2aSThomas Huth } 195fcf5ef2aSThomas Huth } 196fcf5ef2aSThomas Huth } 197fcf5ef2aSThomas Huth 198fcf5ef2aSThomas Huth void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 199fcf5ef2aSThomas Huth uint32_t reg) 200fcf5ef2aSThomas Huth { 201*bb99b391SRichard Henderson uintptr_t raddr = GETPC(); 202*bb99b391SRichard Henderson int mmu_idx; 203*bb99b391SRichard Henderson void *host; 204*bb99b391SRichard Henderson uint32_t val; 205fcf5ef2aSThomas Huth 206*bb99b391SRichard Henderson if (unlikely(nb == 0)) { 207*bb99b391SRichard Henderson return; 208*bb99b391SRichard Henderson } 209*bb99b391SRichard Henderson 210*bb99b391SRichard Henderson mmu_idx = cpu_mmu_index(env, false); 211*bb99b391SRichard Henderson host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); 212*bb99b391SRichard Henderson 213*bb99b391SRichard Henderson if (likely(host)) { 214*bb99b391SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 215fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 216*bb99b391SRichard Henderson stl_be_p(host, env->gpr[reg]); 217*bb99b391SRichard Henderson reg = (reg + 1) % 32; 218*bb99b391SRichard Henderson host += 4; 219*bb99b391SRichard Henderson } 220*bb99b391SRichard Henderson val = env->gpr[reg]; 221*bb99b391SRichard Henderson switch (nb) { 222*bb99b391SRichard Henderson case 1: 223*bb99b391SRichard Henderson stb_p(host, val >> 24); 224*bb99b391SRichard Henderson break; 225*bb99b391SRichard Henderson case 2: 226*bb99b391SRichard Henderson stw_be_p(host, val >> 16); 227*bb99b391SRichard Henderson break; 228*bb99b391SRichard Henderson case 3: 229*bb99b391SRichard Henderson stw_be_p(host, val >> 16); 230*bb99b391SRichard Henderson stb_p(host + 2, val >> 8); 231*bb99b391SRichard Henderson break; 232*bb99b391SRichard Henderson } 233*bb99b391SRichard Henderson } else { 234*bb99b391SRichard Henderson for (; nb > 3; nb -= 4) { 235*bb99b391SRichard Henderson cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); 236fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 237fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 238fcf5ef2aSThomas Huth } 239*bb99b391SRichard Henderson val = env->gpr[reg]; 240*bb99b391SRichard Henderson switch (nb) { 241*bb99b391SRichard Henderson case 1: 242*bb99b391SRichard Henderson cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); 243*bb99b391SRichard Henderson break; 244*bb99b391SRichard Henderson case 2: 245*bb99b391SRichard Henderson cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); 246*bb99b391SRichard Henderson break; 247*bb99b391SRichard Henderson case 3: 248*bb99b391SRichard Henderson cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); 249*bb99b391SRichard Henderson addr = addr_add(env, addr, 2); 250*bb99b391SRichard Henderson cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); 251*bb99b391SRichard Henderson break; 252fcf5ef2aSThomas Huth } 253fcf5ef2aSThomas Huth } 254fcf5ef2aSThomas Huth } 255fcf5ef2aSThomas Huth 25650728199SRoman Kapl static void dcbz_common(CPUPPCState *env, target_ulong addr, 25750728199SRoman Kapl uint32_t opcode, bool epid, uintptr_t retaddr) 258fcf5ef2aSThomas Huth { 259fcf5ef2aSThomas Huth target_ulong mask, dcbz_size = env->dcache_line_size; 260fcf5ef2aSThomas Huth uint32_t i; 261fcf5ef2aSThomas Huth void *haddr; 26250728199SRoman Kapl int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; 263fcf5ef2aSThomas Huth 264fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 265fcf5ef2aSThomas Huth /* Check for dcbz vs dcbzl on 970 */ 266fcf5ef2aSThomas Huth if (env->excp_model == POWERPC_EXCP_970 && 267fcf5ef2aSThomas Huth !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { 268fcf5ef2aSThomas Huth dcbz_size = 32; 269fcf5ef2aSThomas Huth } 270fcf5ef2aSThomas Huth #endif 271fcf5ef2aSThomas Huth 272fcf5ef2aSThomas Huth /* Align address */ 273fcf5ef2aSThomas Huth mask = ~(dcbz_size - 1); 274fcf5ef2aSThomas Huth addr &= mask; 275fcf5ef2aSThomas Huth 276fcf5ef2aSThomas Huth /* Check reservation */ 277fcf5ef2aSThomas Huth if ((env->reserve_addr & mask) == (addr & mask)) { 278fcf5ef2aSThomas Huth env->reserve_addr = (target_ulong)-1ULL; 279fcf5ef2aSThomas Huth } 280fcf5ef2aSThomas Huth 281fcf5ef2aSThomas Huth /* Try fast path translate */ 28250728199SRoman Kapl haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx); 283fcf5ef2aSThomas Huth if (haddr) { 284fcf5ef2aSThomas Huth memset(haddr, 0, dcbz_size); 285fcf5ef2aSThomas Huth } else { 286fcf5ef2aSThomas Huth /* Slow path */ 287fcf5ef2aSThomas Huth for (i = 0; i < dcbz_size; i += 8) { 2885a376e4fSRichard Henderson cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); 289fcf5ef2aSThomas Huth } 290fcf5ef2aSThomas Huth } 29150728199SRoman Kapl } 29250728199SRoman Kapl 29350728199SRoman Kapl void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) 29450728199SRoman Kapl { 29550728199SRoman Kapl dcbz_common(env, addr, opcode, false, GETPC()); 29650728199SRoman Kapl } 29750728199SRoman Kapl 29850728199SRoman Kapl void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) 29950728199SRoman Kapl { 30050728199SRoman Kapl dcbz_common(env, addr, opcode, true, GETPC()); 30150728199SRoman Kapl } 302fcf5ef2aSThomas Huth 303fcf5ef2aSThomas Huth void helper_icbi(CPUPPCState *env, target_ulong addr) 304fcf5ef2aSThomas Huth { 305fcf5ef2aSThomas Huth addr &= ~(env->dcache_line_size - 1); 3065a2c8b9eSDavid Gibson /* 3075a2c8b9eSDavid Gibson * Invalidate one cache line : 308fcf5ef2aSThomas Huth * PowerPC specification says this is to be treated like a load 309fcf5ef2aSThomas Huth * (not a fetch) by the MMU. To be sure it will be so, 310fcf5ef2aSThomas Huth * do the load "by hand". 311fcf5ef2aSThomas Huth */ 312fcf5ef2aSThomas Huth cpu_ldl_data_ra(env, addr, GETPC()); 313fcf5ef2aSThomas Huth } 314fcf5ef2aSThomas Huth 31550728199SRoman Kapl void helper_icbiep(CPUPPCState *env, target_ulong addr) 31650728199SRoman Kapl { 31750728199SRoman Kapl #if !defined(CONFIG_USER_ONLY) 31850728199SRoman Kapl /* See comments above */ 31950728199SRoman Kapl addr &= ~(env->dcache_line_size - 1); 3205a376e4fSRichard Henderson cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC()); 32150728199SRoman Kapl #endif 32250728199SRoman Kapl } 32350728199SRoman Kapl 324fcf5ef2aSThomas Huth /* XXX: to be tested */ 325fcf5ef2aSThomas Huth target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, 326fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 327fcf5ef2aSThomas Huth { 328fcf5ef2aSThomas Huth int i, c, d; 329fcf5ef2aSThomas Huth 330fcf5ef2aSThomas Huth d = 24; 331fcf5ef2aSThomas Huth for (i = 0; i < xer_bc; i++) { 332fcf5ef2aSThomas Huth c = cpu_ldub_data_ra(env, addr, GETPC()); 333fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 334fcf5ef2aSThomas Huth /* ra (if not 0) and rb are never modified */ 335fcf5ef2aSThomas Huth if (likely(reg != rb && (ra == 0 || reg != ra))) { 336fcf5ef2aSThomas Huth env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); 337fcf5ef2aSThomas Huth } 338fcf5ef2aSThomas Huth if (unlikely(c == xer_cmp)) { 339fcf5ef2aSThomas Huth break; 340fcf5ef2aSThomas Huth } 341fcf5ef2aSThomas Huth if (likely(d != 0)) { 342fcf5ef2aSThomas Huth d -= 8; 343fcf5ef2aSThomas Huth } else { 344fcf5ef2aSThomas Huth d = 24; 345fcf5ef2aSThomas Huth reg++; 346fcf5ef2aSThomas Huth reg = reg & 0x1F; 347fcf5ef2aSThomas Huth } 348fcf5ef2aSThomas Huth } 349fcf5ef2aSThomas Huth return i; 350fcf5ef2aSThomas Huth } 351fcf5ef2aSThomas Huth 352f34ec0f6SRichard Henderson #ifdef TARGET_PPC64 35394bf2658SRichard Henderson uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, 35494bf2658SRichard Henderson uint32_t opidx) 35594bf2658SRichard Henderson { 356f34ec0f6SRichard Henderson Int128 ret; 357f34ec0f6SRichard Henderson 358f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 359f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 360f34ec0f6SRichard Henderson ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); 36194bf2658SRichard Henderson env->retxh = int128_gethi(ret); 36294bf2658SRichard Henderson return int128_getlo(ret); 36394bf2658SRichard Henderson } 36494bf2658SRichard Henderson 36594bf2658SRichard Henderson uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, 36694bf2658SRichard Henderson uint32_t opidx) 36794bf2658SRichard Henderson { 368f34ec0f6SRichard Henderson Int128 ret; 369f34ec0f6SRichard Henderson 370f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 371f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 372f34ec0f6SRichard Henderson ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); 37394bf2658SRichard Henderson env->retxh = int128_gethi(ret); 37494bf2658SRichard Henderson return int128_getlo(ret); 37594bf2658SRichard Henderson } 376f89ced5fSRichard Henderson 377f89ced5fSRichard Henderson void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, 378f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 379f89ced5fSRichard Henderson { 380f34ec0f6SRichard Henderson Int128 val; 381f34ec0f6SRichard Henderson 382f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 383f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 384f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 385f89ced5fSRichard Henderson helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); 386f89ced5fSRichard Henderson } 387f89ced5fSRichard Henderson 388f89ced5fSRichard Henderson void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, 389f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 390f89ced5fSRichard Henderson { 391f34ec0f6SRichard Henderson Int128 val; 392f34ec0f6SRichard Henderson 393f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 394f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 395f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 396f89ced5fSRichard Henderson helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); 397f89ced5fSRichard Henderson } 3984a9b3c5dSRichard Henderson 3994a9b3c5dSRichard Henderson uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, 4004a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 4014a9b3c5dSRichard Henderson uint32_t opidx) 4024a9b3c5dSRichard Henderson { 4034a9b3c5dSRichard Henderson bool success = false; 4044a9b3c5dSRichard Henderson 405f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 406f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 407f34ec0f6SRichard Henderson 4084a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 4094a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 4104a9b3c5dSRichard Henderson 4114a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 4124a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 4134a9b3c5dSRichard Henderson oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, 4144a9b3c5dSRichard Henderson opidx, GETPC()); 4154a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 4164a9b3c5dSRichard Henderson } 4174a9b3c5dSRichard Henderson env->reserve_addr = -1; 4184a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 4194a9b3c5dSRichard Henderson } 4204a9b3c5dSRichard Henderson 4214a9b3c5dSRichard Henderson uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, 4224a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 4234a9b3c5dSRichard Henderson uint32_t opidx) 4244a9b3c5dSRichard Henderson { 4254a9b3c5dSRichard Henderson bool success = false; 4264a9b3c5dSRichard Henderson 427f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 428f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 429f34ec0f6SRichard Henderson 4304a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 4314a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 4324a9b3c5dSRichard Henderson 4334a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 4344a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 4354a9b3c5dSRichard Henderson oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, 4364a9b3c5dSRichard Henderson opidx, GETPC()); 4374a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 4384a9b3c5dSRichard Henderson } 4394a9b3c5dSRichard Henderson env->reserve_addr = -1; 4404a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 4414a9b3c5dSRichard Henderson } 44294bf2658SRichard Henderson #endif 44394bf2658SRichard Henderson 444fcf5ef2aSThomas Huth /*****************************************************************************/ 445fcf5ef2aSThomas Huth /* Altivec extension helpers */ 446fcf5ef2aSThomas Huth #if defined(HOST_WORDS_BIGENDIAN) 447fcf5ef2aSThomas Huth #define HI_IDX 0 448fcf5ef2aSThomas Huth #define LO_IDX 1 449fcf5ef2aSThomas Huth #else 450fcf5ef2aSThomas Huth #define HI_IDX 1 451fcf5ef2aSThomas Huth #define LO_IDX 0 452fcf5ef2aSThomas Huth #endif 453fcf5ef2aSThomas Huth 4545a2c8b9eSDavid Gibson /* 4555a2c8b9eSDavid Gibson * We use msr_le to determine index ordering in a vector. However, 4565a2c8b9eSDavid Gibson * byteswapping is not simply controlled by msr_le. We also need to 4575a2c8b9eSDavid Gibson * take into account endianness of the target. This is done for the 4585a2c8b9eSDavid Gibson * little-endian PPC64 user-mode target. 4595a2c8b9eSDavid Gibson */ 460fcf5ef2aSThomas Huth 461fcf5ef2aSThomas Huth #define LVE(name, access, swap, element) \ 462fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 463fcf5ef2aSThomas Huth target_ulong addr) \ 464fcf5ef2aSThomas Huth { \ 465fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 466fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 467fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 468fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 469fcf5ef2aSThomas Huth if (msr_le) { \ 470fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 471fcf5ef2aSThomas Huth } \ 472fcf5ef2aSThomas Huth \ 473fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 474fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 475fcf5ef2aSThomas Huth swap(access(env, addr, GETPC())); \ 476fcf5ef2aSThomas Huth } else { \ 477fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 478fcf5ef2aSThomas Huth access(env, addr, GETPC()); \ 479fcf5ef2aSThomas Huth } \ 480fcf5ef2aSThomas Huth } 481fcf5ef2aSThomas Huth #define I(x) (x) 482fcf5ef2aSThomas Huth LVE(lvebx, cpu_ldub_data_ra, I, u8) 483fcf5ef2aSThomas Huth LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) 484fcf5ef2aSThomas Huth LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) 485fcf5ef2aSThomas Huth #undef I 486fcf5ef2aSThomas Huth #undef LVE 487fcf5ef2aSThomas Huth 488fcf5ef2aSThomas Huth #define STVE(name, access, swap, element) \ 489fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 490fcf5ef2aSThomas Huth target_ulong addr) \ 491fcf5ef2aSThomas Huth { \ 492fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 493fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 494fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 495fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 496fcf5ef2aSThomas Huth if (msr_le) { \ 497fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 498fcf5ef2aSThomas Huth } \ 499fcf5ef2aSThomas Huth \ 500fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 501fcf5ef2aSThomas Huth access(env, addr, swap(r->element[LO_IDX ? index : \ 502fcf5ef2aSThomas Huth (adjust - index)]), \ 503fcf5ef2aSThomas Huth GETPC()); \ 504fcf5ef2aSThomas Huth } else { \ 505fcf5ef2aSThomas Huth access(env, addr, r->element[LO_IDX ? index : \ 506fcf5ef2aSThomas Huth (adjust - index)], GETPC()); \ 507fcf5ef2aSThomas Huth } \ 508fcf5ef2aSThomas Huth } 509fcf5ef2aSThomas Huth #define I(x) (x) 510fcf5ef2aSThomas Huth STVE(stvebx, cpu_stb_data_ra, I, u8) 511fcf5ef2aSThomas Huth STVE(stvehx, cpu_stw_data_ra, bswap16, u16) 512fcf5ef2aSThomas Huth STVE(stvewx, cpu_stl_data_ra, bswap32, u32) 513fcf5ef2aSThomas Huth #undef I 514fcf5ef2aSThomas Huth #undef LVE 515fcf5ef2aSThomas Huth 5166914bc4fSNikunj A Dadhania #ifdef TARGET_PPC64 5176914bc4fSNikunj A Dadhania #define GET_NB(rb) ((rb >> 56) & 0xFF) 5186914bc4fSNikunj A Dadhania 5196914bc4fSNikunj A Dadhania #define VSX_LXVL(name, lj) \ 5206914bc4fSNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 5212aba168eSMark Cave-Ayland ppc_vsr_t *xt, target_ulong rb) \ 5226914bc4fSNikunj A Dadhania { \ 5232a175830SMark Cave-Ayland ppc_vsr_t t; \ 5246914bc4fSNikunj A Dadhania uint64_t nb = GET_NB(rb); \ 5252a175830SMark Cave-Ayland int i; \ 5266914bc4fSNikunj A Dadhania \ 5272a175830SMark Cave-Ayland t.s128 = int128_zero(); \ 5286914bc4fSNikunj A Dadhania if (nb) { \ 5296914bc4fSNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 5306914bc4fSNikunj A Dadhania if (msr_le && !lj) { \ 5316914bc4fSNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 5322a175830SMark Cave-Ayland t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ 5336914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 5346914bc4fSNikunj A Dadhania } \ 5356914bc4fSNikunj A Dadhania } else { \ 5366914bc4fSNikunj A Dadhania for (i = 0; i < nb; i++) { \ 5372a175830SMark Cave-Ayland t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ 5386914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 5396914bc4fSNikunj A Dadhania } \ 5406914bc4fSNikunj A Dadhania } \ 5416914bc4fSNikunj A Dadhania } \ 5422a175830SMark Cave-Ayland *xt = t; \ 5436914bc4fSNikunj A Dadhania } 5446914bc4fSNikunj A Dadhania 5456914bc4fSNikunj A Dadhania VSX_LXVL(lxvl, 0) 546176e44e7SNikunj A Dadhania VSX_LXVL(lxvll, 1) 5476914bc4fSNikunj A Dadhania #undef VSX_LXVL 548681c2478SNikunj A Dadhania 549681c2478SNikunj A Dadhania #define VSX_STXVL(name, lj) \ 550681c2478SNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 5512aba168eSMark Cave-Ayland ppc_vsr_t *xt, target_ulong rb) \ 552681c2478SNikunj A Dadhania { \ 553681c2478SNikunj A Dadhania target_ulong nb = GET_NB(rb); \ 5542a175830SMark Cave-Ayland int i; \ 555681c2478SNikunj A Dadhania \ 556681c2478SNikunj A Dadhania if (!nb) { \ 557681c2478SNikunj A Dadhania return; \ 558681c2478SNikunj A Dadhania } \ 5592a175830SMark Cave-Ayland \ 560681c2478SNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 561681c2478SNikunj A Dadhania if (msr_le && !lj) { \ 562681c2478SNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 5632a175830SMark Cave-Ayland cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ 564681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 565681c2478SNikunj A Dadhania } \ 566681c2478SNikunj A Dadhania } else { \ 567681c2478SNikunj A Dadhania for (i = 0; i < nb; i++) { \ 5682a175830SMark Cave-Ayland cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ 569681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 570681c2478SNikunj A Dadhania } \ 571681c2478SNikunj A Dadhania } \ 572681c2478SNikunj A Dadhania } 573681c2478SNikunj A Dadhania 574681c2478SNikunj A Dadhania VSX_STXVL(stxvl, 0) 575e122090dSNikunj A Dadhania VSX_STXVL(stxvll, 1) 576681c2478SNikunj A Dadhania #undef VSX_STXVL 5776914bc4fSNikunj A Dadhania #undef GET_NB 5786914bc4fSNikunj A Dadhania #endif /* TARGET_PPC64 */ 5796914bc4fSNikunj A Dadhania 580fcf5ef2aSThomas Huth #undef HI_IDX 581fcf5ef2aSThomas Huth #undef LO_IDX 582fcf5ef2aSThomas Huth 583fcf5ef2aSThomas Huth void helper_tbegin(CPUPPCState *env) 584fcf5ef2aSThomas Huth { 5855a2c8b9eSDavid Gibson /* 5865a2c8b9eSDavid Gibson * As a degenerate implementation, always fail tbegin. The reason 587fcf5ef2aSThomas Huth * given is "Nesting overflow". The "persistent" bit is set, 588fcf5ef2aSThomas Huth * providing a hint to the error handler to not retry. The TFIAR 589fcf5ef2aSThomas Huth * captures the address of the failure, which is this tbegin 5905a2c8b9eSDavid Gibson * instruction. Instruction execution will continue with the next 5915a2c8b9eSDavid Gibson * instruction in memory, which is precisely what we want. 592fcf5ef2aSThomas Huth */ 593fcf5ef2aSThomas Huth 594fcf5ef2aSThomas Huth env->spr[SPR_TEXASR] = 595fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_PERSISTENT) | 596fcf5ef2aSThomas Huth (1ULL << TEXASR_NESTING_OVERFLOW) | 597fcf5ef2aSThomas Huth (msr_hv << TEXASR_PRIVILEGE_HV) | 598fcf5ef2aSThomas Huth (msr_pr << TEXASR_PRIVILEGE_PR) | 599fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_SUMMARY) | 600fcf5ef2aSThomas Huth (1ULL << TEXASR_TFIAR_EXACT); 601fcf5ef2aSThomas Huth env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; 602fcf5ef2aSThomas Huth env->spr[SPR_TFHAR] = env->nip + 4; 603fcf5ef2aSThomas Huth env->crf[0] = 0xB; /* 0b1010 = transaction failure */ 604fcf5ef2aSThomas Huth } 605