1*fcf5ef2aSThomas Huth /* 2*fcf5ef2aSThomas Huth * PowerPC memory access emulation helpers for QEMU. 3*fcf5ef2aSThomas Huth * 4*fcf5ef2aSThomas Huth * Copyright (c) 2003-2007 Jocelyn Mayer 5*fcf5ef2aSThomas Huth * 6*fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 7*fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 8*fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 9*fcf5ef2aSThomas Huth * version 2 of the License, or (at your option) any later version. 10*fcf5ef2aSThomas Huth * 11*fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 12*fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 13*fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14*fcf5ef2aSThomas Huth * Lesser General Public License for more details. 15*fcf5ef2aSThomas Huth * 16*fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 17*fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18*fcf5ef2aSThomas Huth */ 19*fcf5ef2aSThomas Huth #include "qemu/osdep.h" 20*fcf5ef2aSThomas Huth #include "cpu.h" 21*fcf5ef2aSThomas Huth #include "exec/exec-all.h" 22*fcf5ef2aSThomas Huth #include "qemu/host-utils.h" 23*fcf5ef2aSThomas Huth #include "exec/helper-proto.h" 24*fcf5ef2aSThomas Huth 25*fcf5ef2aSThomas Huth #include "helper_regs.h" 26*fcf5ef2aSThomas Huth #include "exec/cpu_ldst.h" 27*fcf5ef2aSThomas Huth 28*fcf5ef2aSThomas Huth //#define DEBUG_OP 29*fcf5ef2aSThomas Huth 30*fcf5ef2aSThomas Huth static inline bool needs_byteswap(const CPUPPCState *env) 31*fcf5ef2aSThomas Huth { 32*fcf5ef2aSThomas Huth #if defined(TARGET_WORDS_BIGENDIAN) 33*fcf5ef2aSThomas Huth return msr_le; 34*fcf5ef2aSThomas Huth #else 35*fcf5ef2aSThomas Huth return !msr_le; 36*fcf5ef2aSThomas Huth #endif 37*fcf5ef2aSThomas Huth } 38*fcf5ef2aSThomas Huth 39*fcf5ef2aSThomas Huth /*****************************************************************************/ 40*fcf5ef2aSThomas Huth /* Memory load and stores */ 41*fcf5ef2aSThomas Huth 42*fcf5ef2aSThomas Huth static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, 43*fcf5ef2aSThomas Huth target_long arg) 44*fcf5ef2aSThomas Huth { 45*fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 46*fcf5ef2aSThomas Huth if (!msr_is_64bit(env, env->msr)) { 47*fcf5ef2aSThomas Huth return (uint32_t)(addr + arg); 48*fcf5ef2aSThomas Huth } else 49*fcf5ef2aSThomas Huth #endif 50*fcf5ef2aSThomas Huth { 51*fcf5ef2aSThomas Huth return addr + arg; 52*fcf5ef2aSThomas Huth } 53*fcf5ef2aSThomas Huth } 54*fcf5ef2aSThomas Huth 55*fcf5ef2aSThomas Huth void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 56*fcf5ef2aSThomas Huth { 57*fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 58*fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 59*fcf5ef2aSThomas Huth env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC())); 60*fcf5ef2aSThomas Huth } else { 61*fcf5ef2aSThomas Huth env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC()); 62*fcf5ef2aSThomas Huth } 63*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 64*fcf5ef2aSThomas Huth } 65*fcf5ef2aSThomas Huth } 66*fcf5ef2aSThomas Huth 67*fcf5ef2aSThomas Huth void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 68*fcf5ef2aSThomas Huth { 69*fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 70*fcf5ef2aSThomas Huth if (needs_byteswap(env)) { 71*fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]), 72*fcf5ef2aSThomas Huth GETPC()); 73*fcf5ef2aSThomas Huth } else { 74*fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC()); 75*fcf5ef2aSThomas Huth } 76*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 77*fcf5ef2aSThomas Huth } 78*fcf5ef2aSThomas Huth } 79*fcf5ef2aSThomas Huth 80*fcf5ef2aSThomas Huth static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 81*fcf5ef2aSThomas Huth uint32_t reg, uintptr_t raddr) 82*fcf5ef2aSThomas Huth { 83*fcf5ef2aSThomas Huth int sh; 84*fcf5ef2aSThomas Huth 85*fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 86*fcf5ef2aSThomas Huth env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr); 87*fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 88*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 89*fcf5ef2aSThomas Huth } 90*fcf5ef2aSThomas Huth if (unlikely(nb > 0)) { 91*fcf5ef2aSThomas Huth env->gpr[reg] = 0; 92*fcf5ef2aSThomas Huth for (sh = 24; nb > 0; nb--, sh -= 8) { 93*fcf5ef2aSThomas Huth env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh; 94*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 95*fcf5ef2aSThomas Huth } 96*fcf5ef2aSThomas Huth } 97*fcf5ef2aSThomas Huth } 98*fcf5ef2aSThomas Huth 99*fcf5ef2aSThomas Huth void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) 100*fcf5ef2aSThomas Huth { 101*fcf5ef2aSThomas Huth do_lsw(env, addr, nb, reg, GETPC()); 102*fcf5ef2aSThomas Huth } 103*fcf5ef2aSThomas Huth 104*fcf5ef2aSThomas Huth /* PPC32 specification says we must generate an exception if 105*fcf5ef2aSThomas Huth * rA is in the range of registers to be loaded. 106*fcf5ef2aSThomas Huth * In an other hand, IBM says this is valid, but rA won't be loaded. 107*fcf5ef2aSThomas Huth * For now, I'll follow the spec... 108*fcf5ef2aSThomas Huth */ 109*fcf5ef2aSThomas Huth void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 110*fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 111*fcf5ef2aSThomas Huth { 112*fcf5ef2aSThomas Huth if (likely(xer_bc != 0)) { 113*fcf5ef2aSThomas Huth int num_used_regs = (xer_bc + 3) / 4; 114*fcf5ef2aSThomas Huth if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || 115*fcf5ef2aSThomas Huth lsw_reg_in_range(reg, num_used_regs, rb))) { 116*fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 117*fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL | 118*fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL_LSWX, GETPC()); 119*fcf5ef2aSThomas Huth } else { 120*fcf5ef2aSThomas Huth do_lsw(env, addr, xer_bc, reg, GETPC()); 121*fcf5ef2aSThomas Huth } 122*fcf5ef2aSThomas Huth } 123*fcf5ef2aSThomas Huth } 124*fcf5ef2aSThomas Huth 125*fcf5ef2aSThomas Huth void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 126*fcf5ef2aSThomas Huth uint32_t reg) 127*fcf5ef2aSThomas Huth { 128*fcf5ef2aSThomas Huth int sh; 129*fcf5ef2aSThomas Huth 130*fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 131*fcf5ef2aSThomas Huth cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC()); 132*fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 133*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 134*fcf5ef2aSThomas Huth } 135*fcf5ef2aSThomas Huth if (unlikely(nb > 0)) { 136*fcf5ef2aSThomas Huth for (sh = 24; nb > 0; nb--, sh -= 8) { 137*fcf5ef2aSThomas Huth cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC()); 138*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 139*fcf5ef2aSThomas Huth } 140*fcf5ef2aSThomas Huth } 141*fcf5ef2aSThomas Huth } 142*fcf5ef2aSThomas Huth 143*fcf5ef2aSThomas Huth void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) 144*fcf5ef2aSThomas Huth { 145*fcf5ef2aSThomas Huth target_ulong mask, dcbz_size = env->dcache_line_size; 146*fcf5ef2aSThomas Huth uint32_t i; 147*fcf5ef2aSThomas Huth void *haddr; 148*fcf5ef2aSThomas Huth 149*fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 150*fcf5ef2aSThomas Huth /* Check for dcbz vs dcbzl on 970 */ 151*fcf5ef2aSThomas Huth if (env->excp_model == POWERPC_EXCP_970 && 152*fcf5ef2aSThomas Huth !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { 153*fcf5ef2aSThomas Huth dcbz_size = 32; 154*fcf5ef2aSThomas Huth } 155*fcf5ef2aSThomas Huth #endif 156*fcf5ef2aSThomas Huth 157*fcf5ef2aSThomas Huth /* Align address */ 158*fcf5ef2aSThomas Huth mask = ~(dcbz_size - 1); 159*fcf5ef2aSThomas Huth addr &= mask; 160*fcf5ef2aSThomas Huth 161*fcf5ef2aSThomas Huth /* Check reservation */ 162*fcf5ef2aSThomas Huth if ((env->reserve_addr & mask) == (addr & mask)) { 163*fcf5ef2aSThomas Huth env->reserve_addr = (target_ulong)-1ULL; 164*fcf5ef2aSThomas Huth } 165*fcf5ef2aSThomas Huth 166*fcf5ef2aSThomas Huth /* Try fast path translate */ 167*fcf5ef2aSThomas Huth haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, env->dmmu_idx); 168*fcf5ef2aSThomas Huth if (haddr) { 169*fcf5ef2aSThomas Huth memset(haddr, 0, dcbz_size); 170*fcf5ef2aSThomas Huth } else { 171*fcf5ef2aSThomas Huth /* Slow path */ 172*fcf5ef2aSThomas Huth for (i = 0; i < dcbz_size; i += 8) { 173*fcf5ef2aSThomas Huth cpu_stq_data_ra(env, addr + i, 0, GETPC()); 174*fcf5ef2aSThomas Huth } 175*fcf5ef2aSThomas Huth } 176*fcf5ef2aSThomas Huth } 177*fcf5ef2aSThomas Huth 178*fcf5ef2aSThomas Huth void helper_icbi(CPUPPCState *env, target_ulong addr) 179*fcf5ef2aSThomas Huth { 180*fcf5ef2aSThomas Huth addr &= ~(env->dcache_line_size - 1); 181*fcf5ef2aSThomas Huth /* Invalidate one cache line : 182*fcf5ef2aSThomas Huth * PowerPC specification says this is to be treated like a load 183*fcf5ef2aSThomas Huth * (not a fetch) by the MMU. To be sure it will be so, 184*fcf5ef2aSThomas Huth * do the load "by hand". 185*fcf5ef2aSThomas Huth */ 186*fcf5ef2aSThomas Huth cpu_ldl_data_ra(env, addr, GETPC()); 187*fcf5ef2aSThomas Huth } 188*fcf5ef2aSThomas Huth 189*fcf5ef2aSThomas Huth /* XXX: to be tested */ 190*fcf5ef2aSThomas Huth target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, 191*fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 192*fcf5ef2aSThomas Huth { 193*fcf5ef2aSThomas Huth int i, c, d; 194*fcf5ef2aSThomas Huth 195*fcf5ef2aSThomas Huth d = 24; 196*fcf5ef2aSThomas Huth for (i = 0; i < xer_bc; i++) { 197*fcf5ef2aSThomas Huth c = cpu_ldub_data_ra(env, addr, GETPC()); 198*fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 199*fcf5ef2aSThomas Huth /* ra (if not 0) and rb are never modified */ 200*fcf5ef2aSThomas Huth if (likely(reg != rb && (ra == 0 || reg != ra))) { 201*fcf5ef2aSThomas Huth env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); 202*fcf5ef2aSThomas Huth } 203*fcf5ef2aSThomas Huth if (unlikely(c == xer_cmp)) { 204*fcf5ef2aSThomas Huth break; 205*fcf5ef2aSThomas Huth } 206*fcf5ef2aSThomas Huth if (likely(d != 0)) { 207*fcf5ef2aSThomas Huth d -= 8; 208*fcf5ef2aSThomas Huth } else { 209*fcf5ef2aSThomas Huth d = 24; 210*fcf5ef2aSThomas Huth reg++; 211*fcf5ef2aSThomas Huth reg = reg & 0x1F; 212*fcf5ef2aSThomas Huth } 213*fcf5ef2aSThomas Huth } 214*fcf5ef2aSThomas Huth return i; 215*fcf5ef2aSThomas Huth } 216*fcf5ef2aSThomas Huth 217*fcf5ef2aSThomas Huth /*****************************************************************************/ 218*fcf5ef2aSThomas Huth /* Altivec extension helpers */ 219*fcf5ef2aSThomas Huth #if defined(HOST_WORDS_BIGENDIAN) 220*fcf5ef2aSThomas Huth #define HI_IDX 0 221*fcf5ef2aSThomas Huth #define LO_IDX 1 222*fcf5ef2aSThomas Huth #else 223*fcf5ef2aSThomas Huth #define HI_IDX 1 224*fcf5ef2aSThomas Huth #define LO_IDX 0 225*fcf5ef2aSThomas Huth #endif 226*fcf5ef2aSThomas Huth 227*fcf5ef2aSThomas Huth /* We use msr_le to determine index ordering in a vector. However, 228*fcf5ef2aSThomas Huth byteswapping is not simply controlled by msr_le. We also need to take 229*fcf5ef2aSThomas Huth into account endianness of the target. This is done for the little-endian 230*fcf5ef2aSThomas Huth PPC64 user-mode target. */ 231*fcf5ef2aSThomas Huth 232*fcf5ef2aSThomas Huth #define LVE(name, access, swap, element) \ 233*fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 234*fcf5ef2aSThomas Huth target_ulong addr) \ 235*fcf5ef2aSThomas Huth { \ 236*fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 237*fcf5ef2aSThomas Huth int adjust = HI_IDX*(n_elems - 1); \ 238*fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 239*fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 240*fcf5ef2aSThomas Huth if (msr_le) { \ 241*fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 242*fcf5ef2aSThomas Huth } \ 243*fcf5ef2aSThomas Huth \ 244*fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 245*fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 246*fcf5ef2aSThomas Huth swap(access(env, addr, GETPC())); \ 247*fcf5ef2aSThomas Huth } else { \ 248*fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 249*fcf5ef2aSThomas Huth access(env, addr, GETPC()); \ 250*fcf5ef2aSThomas Huth } \ 251*fcf5ef2aSThomas Huth } 252*fcf5ef2aSThomas Huth #define I(x) (x) 253*fcf5ef2aSThomas Huth LVE(lvebx, cpu_ldub_data_ra, I, u8) 254*fcf5ef2aSThomas Huth LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) 255*fcf5ef2aSThomas Huth LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) 256*fcf5ef2aSThomas Huth #undef I 257*fcf5ef2aSThomas Huth #undef LVE 258*fcf5ef2aSThomas Huth 259*fcf5ef2aSThomas Huth #define STVE(name, access, swap, element) \ 260*fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 261*fcf5ef2aSThomas Huth target_ulong addr) \ 262*fcf5ef2aSThomas Huth { \ 263*fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 264*fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 265*fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 266*fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 267*fcf5ef2aSThomas Huth if (msr_le) { \ 268*fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 269*fcf5ef2aSThomas Huth } \ 270*fcf5ef2aSThomas Huth \ 271*fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 272*fcf5ef2aSThomas Huth access(env, addr, swap(r->element[LO_IDX ? index : \ 273*fcf5ef2aSThomas Huth (adjust - index)]), \ 274*fcf5ef2aSThomas Huth GETPC()); \ 275*fcf5ef2aSThomas Huth } else { \ 276*fcf5ef2aSThomas Huth access(env, addr, r->element[LO_IDX ? index : \ 277*fcf5ef2aSThomas Huth (adjust - index)], GETPC()); \ 278*fcf5ef2aSThomas Huth } \ 279*fcf5ef2aSThomas Huth } 280*fcf5ef2aSThomas Huth #define I(x) (x) 281*fcf5ef2aSThomas Huth STVE(stvebx, cpu_stb_data_ra, I, u8) 282*fcf5ef2aSThomas Huth STVE(stvehx, cpu_stw_data_ra, bswap16, u16) 283*fcf5ef2aSThomas Huth STVE(stvewx, cpu_stl_data_ra, bswap32, u32) 284*fcf5ef2aSThomas Huth #undef I 285*fcf5ef2aSThomas Huth #undef LVE 286*fcf5ef2aSThomas Huth 287*fcf5ef2aSThomas Huth #undef HI_IDX 288*fcf5ef2aSThomas Huth #undef LO_IDX 289*fcf5ef2aSThomas Huth 290*fcf5ef2aSThomas Huth void helper_tbegin(CPUPPCState *env) 291*fcf5ef2aSThomas Huth { 292*fcf5ef2aSThomas Huth /* As a degenerate implementation, always fail tbegin. The reason 293*fcf5ef2aSThomas Huth * given is "Nesting overflow". The "persistent" bit is set, 294*fcf5ef2aSThomas Huth * providing a hint to the error handler to not retry. The TFIAR 295*fcf5ef2aSThomas Huth * captures the address of the failure, which is this tbegin 296*fcf5ef2aSThomas Huth * instruction. Instruction execution will continue with the 297*fcf5ef2aSThomas Huth * next instruction in memory, which is precisely what we want. 298*fcf5ef2aSThomas Huth */ 299*fcf5ef2aSThomas Huth 300*fcf5ef2aSThomas Huth env->spr[SPR_TEXASR] = 301*fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_PERSISTENT) | 302*fcf5ef2aSThomas Huth (1ULL << TEXASR_NESTING_OVERFLOW) | 303*fcf5ef2aSThomas Huth (msr_hv << TEXASR_PRIVILEGE_HV) | 304*fcf5ef2aSThomas Huth (msr_pr << TEXASR_PRIVILEGE_PR) | 305*fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_SUMMARY) | 306*fcf5ef2aSThomas Huth (1ULL << TEXASR_TFIAR_EXACT); 307*fcf5ef2aSThomas Huth env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; 308*fcf5ef2aSThomas Huth env->spr[SPR_TFHAR] = env->nip + 4; 309*fcf5ef2aSThomas Huth env->crf[0] = 0xB; /* 0b1010 = transaction failure */ 310*fcf5ef2aSThomas Huth } 311