1fcf5ef2aSThomas Huth /* 2fcf5ef2aSThomas Huth * PowerPC memory access emulation helpers for QEMU. 3fcf5ef2aSThomas Huth * 4fcf5ef2aSThomas Huth * Copyright (c) 2003-2007 Jocelyn Mayer 5fcf5ef2aSThomas Huth * 6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 9fcf5ef2aSThomas Huth * version 2 of the License, or (at your option) any later version. 10fcf5ef2aSThomas Huth * 11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14fcf5ef2aSThomas Huth * Lesser General Public License for more details. 15fcf5ef2aSThomas Huth * 16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18fcf5ef2aSThomas Huth */ 19fcf5ef2aSThomas Huth #include "qemu/osdep.h" 20fcf5ef2aSThomas Huth #include "cpu.h" 21fcf5ef2aSThomas Huth #include "exec/exec-all.h" 22fcf5ef2aSThomas Huth #include "qemu/host-utils.h" 23fcf5ef2aSThomas Huth #include "exec/helper-proto.h" 24fcf5ef2aSThomas Huth #include "helper_regs.h" 25fcf5ef2aSThomas Huth #include "exec/cpu_ldst.h" 2694bf2658SRichard Henderson #include "tcg.h" 276914bc4fSNikunj A Dadhania #include "internal.h" 28f34ec0f6SRichard Henderson #include "qemu/atomic128.h" 29fcf5ef2aSThomas Huth 30fcf5ef2aSThomas Huth //#define DEBUG_OP 31fcf5ef2aSThomas Huth 32fcf5ef2aSThomas Huth static inline bool needs_byteswap(const CPUPPCState *env) 33fcf5ef2aSThomas Huth { 34fcf5ef2aSThomas Huth #if defined(TARGET_WORDS_BIGENDIAN) 35fcf5ef2aSThomas Huth return msr_le; 36fcf5ef2aSThomas Huth #else 37fcf5ef2aSThomas Huth return !msr_le; 38fcf5ef2aSThomas Huth #endif 39fcf5ef2aSThomas Huth } 40fcf5ef2aSThomas Huth 41fcf5ef2aSThomas Huth /*****************************************************************************/ 42fcf5ef2aSThomas Huth /* Memory load and stores */ 43fcf5ef2aSThomas Huth 44fcf5ef2aSThomas Huth static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, 45fcf5ef2aSThomas Huth target_long arg) 46fcf5ef2aSThomas Huth { 47fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 48fcf5ef2aSThomas Huth if (!msr_is_64bit(env, env->msr)) { 49fcf5ef2aSThomas Huth return (uint32_t)(addr + arg); 50fcf5ef2aSThomas Huth } else 51fcf5ef2aSThomas Huth #endif 52fcf5ef2aSThomas Huth { 53fcf5ef2aSThomas Huth return addr + arg; 54fcf5ef2aSThomas Huth } 55fcf5ef2aSThomas Huth } 56fcf5ef2aSThomas Huth 57fcf5ef2aSThomas Huth void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 58fcf5ef2aSThomas Huth { 59fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 60fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 61fcf5ef2aSThomas Huth env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC())); 62fcf5ef2aSThomas Huth } else { 63fcf5ef2aSThomas Huth env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC()); 64fcf5ef2aSThomas Huth } 65fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 66fcf5ef2aSThomas Huth } 67fcf5ef2aSThomas Huth } 68fcf5ef2aSThomas Huth 69fcf5ef2aSThomas Huth void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 70fcf5ef2aSThomas Huth { 71fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 72fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 73fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]), 74fcf5ef2aSThomas Huth GETPC()); 75fcf5ef2aSThomas Huth } else { 76fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC()); 77fcf5ef2aSThomas Huth } 78fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 79fcf5ef2aSThomas Huth } 80fcf5ef2aSThomas Huth } 81fcf5ef2aSThomas Huth 82fcf5ef2aSThomas Huth static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 83fcf5ef2aSThomas Huth uint32_t reg, uintptr_t raddr) 84fcf5ef2aSThomas Huth { 85fcf5ef2aSThomas Huth int sh; 86fcf5ef2aSThomas Huth 87fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 88fcf5ef2aSThomas Huth env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr); 89fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 90fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 91fcf5ef2aSThomas Huth } 92fcf5ef2aSThomas Huth if (unlikely(nb > 0)) { 93fcf5ef2aSThomas Huth env->gpr[reg] = 0; 94fcf5ef2aSThomas Huth for (sh = 24; nb > 0; nb--, sh -= 8) { 95fcf5ef2aSThomas Huth env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh; 96fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 97fcf5ef2aSThomas Huth } 98fcf5ef2aSThomas Huth } 99fcf5ef2aSThomas Huth } 100fcf5ef2aSThomas Huth 101fcf5ef2aSThomas Huth void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) 102fcf5ef2aSThomas Huth { 103fcf5ef2aSThomas Huth do_lsw(env, addr, nb, reg, GETPC()); 104fcf5ef2aSThomas Huth } 105fcf5ef2aSThomas Huth 106fcf5ef2aSThomas Huth /* PPC32 specification says we must generate an exception if 107fcf5ef2aSThomas Huth * rA is in the range of registers to be loaded. 108fcf5ef2aSThomas Huth * In an other hand, IBM says this is valid, but rA won't be loaded. 109fcf5ef2aSThomas Huth * For now, I'll follow the spec... 110fcf5ef2aSThomas Huth */ 111fcf5ef2aSThomas Huth void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 112fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 113fcf5ef2aSThomas Huth { 114fcf5ef2aSThomas Huth if (likely(xer_bc != 0)) { 115f0704d78SMarc-André Lureau int num_used_regs = DIV_ROUND_UP(xer_bc, 4); 116fcf5ef2aSThomas Huth if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || 117fcf5ef2aSThomas Huth lsw_reg_in_range(reg, num_used_regs, rb))) { 118fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 119fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL | 120fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL_LSWX, GETPC()); 121fcf5ef2aSThomas Huth } else { 122fcf5ef2aSThomas Huth do_lsw(env, addr, xer_bc, reg, GETPC()); 123fcf5ef2aSThomas Huth } 124fcf5ef2aSThomas Huth } 125fcf5ef2aSThomas Huth } 126fcf5ef2aSThomas Huth 127fcf5ef2aSThomas Huth void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 128fcf5ef2aSThomas Huth uint32_t reg) 129fcf5ef2aSThomas Huth { 130fcf5ef2aSThomas Huth int sh; 131fcf5ef2aSThomas Huth 132fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 133fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC()); 134fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 135fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 136fcf5ef2aSThomas Huth } 137fcf5ef2aSThomas Huth if (unlikely(nb > 0)) { 138fcf5ef2aSThomas Huth for (sh = 24; nb > 0; nb--, sh -= 8) { 139fcf5ef2aSThomas Huth cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC()); 140fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 141fcf5ef2aSThomas Huth } 142fcf5ef2aSThomas Huth } 143fcf5ef2aSThomas Huth } 144fcf5ef2aSThomas Huth 145*50728199SRoman Kapl static void dcbz_common(CPUPPCState *env, target_ulong addr, 146*50728199SRoman Kapl uint32_t opcode, bool epid, uintptr_t retaddr) 147fcf5ef2aSThomas Huth { 148fcf5ef2aSThomas Huth target_ulong mask, dcbz_size = env->dcache_line_size; 149fcf5ef2aSThomas Huth uint32_t i; 150fcf5ef2aSThomas Huth void *haddr; 151*50728199SRoman Kapl int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; 152fcf5ef2aSThomas Huth 153fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 154fcf5ef2aSThomas Huth /* Check for dcbz vs dcbzl on 970 */ 155fcf5ef2aSThomas Huth if (env->excp_model == POWERPC_EXCP_970 && 156fcf5ef2aSThomas Huth !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { 157fcf5ef2aSThomas Huth dcbz_size = 32; 158fcf5ef2aSThomas Huth } 159fcf5ef2aSThomas Huth #endif 160fcf5ef2aSThomas Huth 161fcf5ef2aSThomas Huth /* Align address */ 162fcf5ef2aSThomas Huth mask = ~(dcbz_size - 1); 163fcf5ef2aSThomas Huth addr &= mask; 164fcf5ef2aSThomas Huth 165fcf5ef2aSThomas Huth /* Check reservation */ 166fcf5ef2aSThomas Huth if ((env->reserve_addr & mask) == (addr & mask)) { 167fcf5ef2aSThomas Huth env->reserve_addr = (target_ulong)-1ULL; 168fcf5ef2aSThomas Huth } 169fcf5ef2aSThomas Huth 170fcf5ef2aSThomas Huth /* Try fast path translate */ 171*50728199SRoman Kapl haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx); 172fcf5ef2aSThomas Huth if (haddr) { 173fcf5ef2aSThomas Huth memset(haddr, 0, dcbz_size); 174fcf5ef2aSThomas Huth } else { 175fcf5ef2aSThomas Huth /* Slow path */ 176fcf5ef2aSThomas Huth for (i = 0; i < dcbz_size; i += 8) { 177*50728199SRoman Kapl if (epid) { 178*50728199SRoman Kapl #if !defined(CONFIG_USER_ONLY) 179*50728199SRoman Kapl /* Does not make sense on USER_ONLY config */ 180*50728199SRoman Kapl cpu_stq_eps_ra(env, addr + i, 0, retaddr); 181*50728199SRoman Kapl #endif 182*50728199SRoman Kapl } else { 183*50728199SRoman Kapl cpu_stq_data_ra(env, addr + i, 0, retaddr); 184fcf5ef2aSThomas Huth } 185fcf5ef2aSThomas Huth } 186fcf5ef2aSThomas Huth } 187*50728199SRoman Kapl } 188*50728199SRoman Kapl 189*50728199SRoman Kapl void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) 190*50728199SRoman Kapl { 191*50728199SRoman Kapl dcbz_common(env, addr, opcode, false, GETPC()); 192*50728199SRoman Kapl } 193*50728199SRoman Kapl 194*50728199SRoman Kapl void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) 195*50728199SRoman Kapl { 196*50728199SRoman Kapl dcbz_common(env, addr, opcode, true, GETPC()); 197*50728199SRoman Kapl } 198fcf5ef2aSThomas Huth 199fcf5ef2aSThomas Huth void helper_icbi(CPUPPCState *env, target_ulong addr) 200fcf5ef2aSThomas Huth { 201fcf5ef2aSThomas Huth addr &= ~(env->dcache_line_size - 1); 202fcf5ef2aSThomas Huth /* Invalidate one cache line : 203fcf5ef2aSThomas Huth * PowerPC specification says this is to be treated like a load 204fcf5ef2aSThomas Huth * (not a fetch) by the MMU. To be sure it will be so, 205fcf5ef2aSThomas Huth * do the load "by hand". 206fcf5ef2aSThomas Huth */ 207fcf5ef2aSThomas Huth cpu_ldl_data_ra(env, addr, GETPC()); 208fcf5ef2aSThomas Huth } 209fcf5ef2aSThomas Huth 210*50728199SRoman Kapl void helper_icbiep(CPUPPCState *env, target_ulong addr) 211*50728199SRoman Kapl { 212*50728199SRoman Kapl #if !defined(CONFIG_USER_ONLY) 213*50728199SRoman Kapl /* See comments above */ 214*50728199SRoman Kapl addr &= ~(env->dcache_line_size - 1); 215*50728199SRoman Kapl cpu_ldl_epl_ra(env, addr, GETPC()); 216*50728199SRoman Kapl #endif 217*50728199SRoman Kapl } 218*50728199SRoman Kapl 219fcf5ef2aSThomas Huth /* XXX: to be tested */ 220fcf5ef2aSThomas Huth target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, 221fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 222fcf5ef2aSThomas Huth { 223fcf5ef2aSThomas Huth int i, c, d; 224fcf5ef2aSThomas Huth 225fcf5ef2aSThomas Huth d = 24; 226fcf5ef2aSThomas Huth for (i = 0; i < xer_bc; i++) { 227fcf5ef2aSThomas Huth c = cpu_ldub_data_ra(env, addr, GETPC()); 228fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 229fcf5ef2aSThomas Huth /* ra (if not 0) and rb are never modified */ 230fcf5ef2aSThomas Huth if (likely(reg != rb && (ra == 0 || reg != ra))) { 231fcf5ef2aSThomas Huth env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); 232fcf5ef2aSThomas Huth } 233fcf5ef2aSThomas Huth if (unlikely(c == xer_cmp)) { 234fcf5ef2aSThomas Huth break; 235fcf5ef2aSThomas Huth } 236fcf5ef2aSThomas Huth if (likely(d != 0)) { 237fcf5ef2aSThomas Huth d -= 8; 238fcf5ef2aSThomas Huth } else { 239fcf5ef2aSThomas Huth d = 24; 240fcf5ef2aSThomas Huth reg++; 241fcf5ef2aSThomas Huth reg = reg & 0x1F; 242fcf5ef2aSThomas Huth } 243fcf5ef2aSThomas Huth } 244fcf5ef2aSThomas Huth return i; 245fcf5ef2aSThomas Huth } 246fcf5ef2aSThomas Huth 247f34ec0f6SRichard Henderson #ifdef TARGET_PPC64 24894bf2658SRichard Henderson uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, 24994bf2658SRichard Henderson uint32_t opidx) 25094bf2658SRichard Henderson { 251f34ec0f6SRichard Henderson Int128 ret; 252f34ec0f6SRichard Henderson 253f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 254f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 255f34ec0f6SRichard Henderson ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); 25694bf2658SRichard Henderson env->retxh = int128_gethi(ret); 25794bf2658SRichard Henderson return int128_getlo(ret); 25894bf2658SRichard Henderson } 25994bf2658SRichard Henderson 26094bf2658SRichard Henderson uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, 26194bf2658SRichard Henderson uint32_t opidx) 26294bf2658SRichard Henderson { 263f34ec0f6SRichard Henderson Int128 ret; 264f34ec0f6SRichard Henderson 265f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 266f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 267f34ec0f6SRichard Henderson ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); 26894bf2658SRichard Henderson env->retxh = int128_gethi(ret); 26994bf2658SRichard Henderson return int128_getlo(ret); 27094bf2658SRichard Henderson } 271f89ced5fSRichard Henderson 272f89ced5fSRichard Henderson void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, 273f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 274f89ced5fSRichard Henderson { 275f34ec0f6SRichard Henderson Int128 val; 276f34ec0f6SRichard Henderson 277f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 278f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 279f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 280f89ced5fSRichard Henderson helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); 281f89ced5fSRichard Henderson } 282f89ced5fSRichard Henderson 283f89ced5fSRichard Henderson void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, 284f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 285f89ced5fSRichard Henderson { 286f34ec0f6SRichard Henderson Int128 val; 287f34ec0f6SRichard Henderson 288f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 289f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 290f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 291f89ced5fSRichard Henderson helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); 292f89ced5fSRichard Henderson } 2934a9b3c5dSRichard Henderson 2944a9b3c5dSRichard Henderson uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, 2954a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 2964a9b3c5dSRichard Henderson uint32_t opidx) 2974a9b3c5dSRichard Henderson { 2984a9b3c5dSRichard Henderson bool success = false; 2994a9b3c5dSRichard Henderson 300f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 301f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 302f34ec0f6SRichard Henderson 3034a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 3044a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 3054a9b3c5dSRichard Henderson 3064a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 3074a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 3084a9b3c5dSRichard Henderson oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, 3094a9b3c5dSRichard Henderson opidx, GETPC()); 3104a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 3114a9b3c5dSRichard Henderson } 3124a9b3c5dSRichard Henderson env->reserve_addr = -1; 3134a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 3144a9b3c5dSRichard Henderson } 3154a9b3c5dSRichard Henderson 3164a9b3c5dSRichard Henderson uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, 3174a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 3184a9b3c5dSRichard Henderson uint32_t opidx) 3194a9b3c5dSRichard Henderson { 3204a9b3c5dSRichard Henderson bool success = false; 3214a9b3c5dSRichard Henderson 322f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 323f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 324f34ec0f6SRichard Henderson 3254a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 3264a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 3274a9b3c5dSRichard Henderson 3284a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 3294a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 3304a9b3c5dSRichard Henderson oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, 3314a9b3c5dSRichard Henderson opidx, GETPC()); 3324a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 3334a9b3c5dSRichard Henderson } 3344a9b3c5dSRichard Henderson env->reserve_addr = -1; 3354a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 3364a9b3c5dSRichard Henderson } 33794bf2658SRichard Henderson #endif 33894bf2658SRichard Henderson 339fcf5ef2aSThomas Huth /*****************************************************************************/ 340fcf5ef2aSThomas Huth /* Altivec extension helpers */ 341fcf5ef2aSThomas Huth #if defined(HOST_WORDS_BIGENDIAN) 342fcf5ef2aSThomas Huth #define HI_IDX 0 343fcf5ef2aSThomas Huth #define LO_IDX 1 344fcf5ef2aSThomas Huth #else 345fcf5ef2aSThomas Huth #define HI_IDX 1 346fcf5ef2aSThomas Huth #define LO_IDX 0 347fcf5ef2aSThomas Huth #endif 348fcf5ef2aSThomas Huth 349fcf5ef2aSThomas Huth /* We use msr_le to determine index ordering in a vector. However, 350fcf5ef2aSThomas Huth byteswapping is not simply controlled by msr_le. We also need to take 351fcf5ef2aSThomas Huth into account endianness of the target. This is done for the little-endian 352fcf5ef2aSThomas Huth PPC64 user-mode target. */ 353fcf5ef2aSThomas Huth 354fcf5ef2aSThomas Huth #define LVE(name, access, swap, element) \ 355fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 356fcf5ef2aSThomas Huth target_ulong addr) \ 357fcf5ef2aSThomas Huth { \ 358fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 359fcf5ef2aSThomas Huth int adjust = HI_IDX*(n_elems - 1); \ 360fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 361fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 362fcf5ef2aSThomas Huth if (msr_le) { \ 363fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 364fcf5ef2aSThomas Huth } \ 365fcf5ef2aSThomas Huth \ 366fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 367fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 368fcf5ef2aSThomas Huth swap(access(env, addr, GETPC())); \ 369fcf5ef2aSThomas Huth } else { \ 370fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 371fcf5ef2aSThomas Huth access(env, addr, GETPC()); \ 372fcf5ef2aSThomas Huth } \ 373fcf5ef2aSThomas Huth } 374fcf5ef2aSThomas Huth #define I(x) (x) 375fcf5ef2aSThomas Huth LVE(lvebx, cpu_ldub_data_ra, I, u8) 376fcf5ef2aSThomas Huth LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) 377fcf5ef2aSThomas Huth LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) 378fcf5ef2aSThomas Huth #undef I 379fcf5ef2aSThomas Huth #undef LVE 380fcf5ef2aSThomas Huth 381fcf5ef2aSThomas Huth #define STVE(name, access, swap, element) \ 382fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 383fcf5ef2aSThomas Huth target_ulong addr) \ 384fcf5ef2aSThomas Huth { \ 385fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 386fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 387fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 388fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 389fcf5ef2aSThomas Huth if (msr_le) { \ 390fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 391fcf5ef2aSThomas Huth } \ 392fcf5ef2aSThomas Huth \ 393fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 394fcf5ef2aSThomas Huth access(env, addr, swap(r->element[LO_IDX ? index : \ 395fcf5ef2aSThomas Huth (adjust - index)]), \ 396fcf5ef2aSThomas Huth GETPC()); \ 397fcf5ef2aSThomas Huth } else { \ 398fcf5ef2aSThomas Huth access(env, addr, r->element[LO_IDX ? index : \ 399fcf5ef2aSThomas Huth (adjust - index)], GETPC()); \ 400fcf5ef2aSThomas Huth } \ 401fcf5ef2aSThomas Huth } 402fcf5ef2aSThomas Huth #define I(x) (x) 403fcf5ef2aSThomas Huth STVE(stvebx, cpu_stb_data_ra, I, u8) 404fcf5ef2aSThomas Huth STVE(stvehx, cpu_stw_data_ra, bswap16, u16) 405fcf5ef2aSThomas Huth STVE(stvewx, cpu_stl_data_ra, bswap32, u32) 406fcf5ef2aSThomas Huth #undef I 407fcf5ef2aSThomas Huth #undef LVE 408fcf5ef2aSThomas Huth 4096914bc4fSNikunj A Dadhania #ifdef TARGET_PPC64 4106914bc4fSNikunj A Dadhania #define GET_NB(rb) ((rb >> 56) & 0xFF) 4116914bc4fSNikunj A Dadhania 4126914bc4fSNikunj A Dadhania #define VSX_LXVL(name, lj) \ 4136914bc4fSNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 4146914bc4fSNikunj A Dadhania target_ulong xt_num, target_ulong rb) \ 4156914bc4fSNikunj A Dadhania { \ 4166914bc4fSNikunj A Dadhania int i; \ 4176914bc4fSNikunj A Dadhania ppc_vsr_t xt; \ 4186914bc4fSNikunj A Dadhania uint64_t nb = GET_NB(rb); \ 4196914bc4fSNikunj A Dadhania \ 4206914bc4fSNikunj A Dadhania xt.s128 = int128_zero(); \ 4216914bc4fSNikunj A Dadhania if (nb) { \ 4226914bc4fSNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 4236914bc4fSNikunj A Dadhania if (msr_le && !lj) { \ 4246914bc4fSNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 4256914bc4fSNikunj A Dadhania xt.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ 4266914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 4276914bc4fSNikunj A Dadhania } \ 4286914bc4fSNikunj A Dadhania } else { \ 4296914bc4fSNikunj A Dadhania for (i = 0; i < nb; i++) { \ 4306914bc4fSNikunj A Dadhania xt.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ 4316914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 4326914bc4fSNikunj A Dadhania } \ 4336914bc4fSNikunj A Dadhania } \ 4346914bc4fSNikunj A Dadhania } \ 4356914bc4fSNikunj A Dadhania putVSR(xt_num, &xt, env); \ 4366914bc4fSNikunj A Dadhania } 4376914bc4fSNikunj A Dadhania 4386914bc4fSNikunj A Dadhania VSX_LXVL(lxvl, 0) 439176e44e7SNikunj A Dadhania VSX_LXVL(lxvll, 1) 4406914bc4fSNikunj A Dadhania #undef VSX_LXVL 441681c2478SNikunj A Dadhania 442681c2478SNikunj A Dadhania #define VSX_STXVL(name, lj) \ 443681c2478SNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 444681c2478SNikunj A Dadhania target_ulong xt_num, target_ulong rb) \ 445681c2478SNikunj A Dadhania { \ 446681c2478SNikunj A Dadhania int i; \ 447681c2478SNikunj A Dadhania ppc_vsr_t xt; \ 448681c2478SNikunj A Dadhania target_ulong nb = GET_NB(rb); \ 449681c2478SNikunj A Dadhania \ 450681c2478SNikunj A Dadhania if (!nb) { \ 451681c2478SNikunj A Dadhania return; \ 452681c2478SNikunj A Dadhania } \ 453681c2478SNikunj A Dadhania getVSR(xt_num, &xt, env); \ 454681c2478SNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 455681c2478SNikunj A Dadhania if (msr_le && !lj) { \ 456681c2478SNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 457681c2478SNikunj A Dadhania cpu_stb_data_ra(env, addr, xt.VsrB(i - 1), GETPC()); \ 458681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 459681c2478SNikunj A Dadhania } \ 460681c2478SNikunj A Dadhania } else { \ 461681c2478SNikunj A Dadhania for (i = 0; i < nb; i++) { \ 462681c2478SNikunj A Dadhania cpu_stb_data_ra(env, addr, xt.VsrB(i), GETPC()); \ 463681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 464681c2478SNikunj A Dadhania } \ 465681c2478SNikunj A Dadhania } \ 466681c2478SNikunj A Dadhania } 467681c2478SNikunj A Dadhania 468681c2478SNikunj A Dadhania VSX_STXVL(stxvl, 0) 469e122090dSNikunj A Dadhania VSX_STXVL(stxvll, 1) 470681c2478SNikunj A Dadhania #undef VSX_STXVL 4716914bc4fSNikunj A Dadhania #undef GET_NB 4726914bc4fSNikunj A Dadhania #endif /* TARGET_PPC64 */ 4736914bc4fSNikunj A Dadhania 474fcf5ef2aSThomas Huth #undef HI_IDX 475fcf5ef2aSThomas Huth #undef LO_IDX 476fcf5ef2aSThomas Huth 477fcf5ef2aSThomas Huth void helper_tbegin(CPUPPCState *env) 478fcf5ef2aSThomas Huth { 479fcf5ef2aSThomas Huth /* As a degenerate implementation, always fail tbegin. The reason 480fcf5ef2aSThomas Huth * given is "Nesting overflow". The "persistent" bit is set, 481fcf5ef2aSThomas Huth * providing a hint to the error handler to not retry. The TFIAR 482fcf5ef2aSThomas Huth * captures the address of the failure, which is this tbegin 483fcf5ef2aSThomas Huth * instruction. Instruction execution will continue with the 484fcf5ef2aSThomas Huth * next instruction in memory, which is precisely what we want. 485fcf5ef2aSThomas Huth */ 486fcf5ef2aSThomas Huth 487fcf5ef2aSThomas Huth env->spr[SPR_TEXASR] = 488fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_PERSISTENT) | 489fcf5ef2aSThomas Huth (1ULL << TEXASR_NESTING_OVERFLOW) | 490fcf5ef2aSThomas Huth (msr_hv << TEXASR_PRIVILEGE_HV) | 491fcf5ef2aSThomas Huth (msr_pr << TEXASR_PRIVILEGE_PR) | 492fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_SUMMARY) | 493fcf5ef2aSThomas Huth (1ULL << TEXASR_TFIAR_EXACT); 494fcf5ef2aSThomas Huth env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; 495fcf5ef2aSThomas Huth env->spr[SPR_TFHAR] = env->nip + 4; 496fcf5ef2aSThomas Huth env->crf[0] = 0xB; /* 0b1010 = transaction failure */ 497fcf5ef2aSThomas Huth } 498