1fcf5ef2aSThomas Huth /* 2fcf5ef2aSThomas Huth * PowerPC memory access emulation helpers for QEMU. 3fcf5ef2aSThomas Huth * 4fcf5ef2aSThomas Huth * Copyright (c) 2003-2007 Jocelyn Mayer 5fcf5ef2aSThomas Huth * 6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 96bd039cdSChetan Pant * version 2.1 of the License, or (at your option) any later version. 10fcf5ef2aSThomas Huth * 11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14fcf5ef2aSThomas Huth * Lesser General Public License for more details. 15fcf5ef2aSThomas Huth * 16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18fcf5ef2aSThomas Huth */ 19db725815SMarkus Armbruster 20fcf5ef2aSThomas Huth #include "qemu/osdep.h" 21fcf5ef2aSThomas Huth #include "cpu.h" 22fcf5ef2aSThomas Huth #include "exec/exec-all.h" 23fcf5ef2aSThomas Huth #include "qemu/host-utils.h" 24db725815SMarkus Armbruster #include "qemu/main-loop.h" 25fcf5ef2aSThomas Huth #include "exec/helper-proto.h" 26fcf5ef2aSThomas Huth #include "helper_regs.h" 27fcf5ef2aSThomas Huth #include "exec/cpu_ldst.h" 286914bc4fSNikunj A Dadhania #include "internal.h" 29f34ec0f6SRichard Henderson #include "qemu/atomic128.h" 30fcf5ef2aSThomas Huth 315a2c8b9eSDavid Gibson /* #define DEBUG_OP */ 32fcf5ef2aSThomas Huth 33fcf5ef2aSThomas Huth static inline bool needs_byteswap(const CPUPPCState *env) 34fcf5ef2aSThomas Huth { 35fcf5ef2aSThomas Huth #if defined(TARGET_WORDS_BIGENDIAN) 36fcf5ef2aSThomas Huth return msr_le; 37fcf5ef2aSThomas Huth #else 38fcf5ef2aSThomas Huth return !msr_le; 39fcf5ef2aSThomas Huth #endif 40fcf5ef2aSThomas Huth } 41fcf5ef2aSThomas Huth 42fcf5ef2aSThomas Huth /*****************************************************************************/ 43fcf5ef2aSThomas Huth /* Memory load and stores */ 44fcf5ef2aSThomas Huth 45fcf5ef2aSThomas Huth static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, 46fcf5ef2aSThomas Huth target_long arg) 47fcf5ef2aSThomas Huth { 48fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 49fcf5ef2aSThomas Huth if (!msr_is_64bit(env, env->msr)) { 50fcf5ef2aSThomas Huth return (uint32_t)(addr + arg); 51fcf5ef2aSThomas Huth } else 52fcf5ef2aSThomas Huth #endif 53fcf5ef2aSThomas Huth { 54fcf5ef2aSThomas Huth return addr + arg; 55fcf5ef2aSThomas Huth } 56fcf5ef2aSThomas Huth } 57fcf5ef2aSThomas Huth 58bb99b391SRichard Henderson static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, 59bb99b391SRichard Henderson MMUAccessType access_type, int mmu_idx, 60bb99b391SRichard Henderson uintptr_t raddr) 61bb99b391SRichard Henderson { 62bb99b391SRichard Henderson void *host1, *host2; 63bb99b391SRichard Henderson uint32_t nb_pg1, nb_pg2; 64bb99b391SRichard Henderson 65bb99b391SRichard Henderson nb_pg1 = -(addr | TARGET_PAGE_MASK); 66bb99b391SRichard Henderson if (likely(nb <= nb_pg1)) { 67bb99b391SRichard Henderson /* The entire operation is on a single page. */ 68bb99b391SRichard Henderson return probe_access(env, addr, nb, access_type, mmu_idx, raddr); 69bb99b391SRichard Henderson } 70bb99b391SRichard Henderson 71bb99b391SRichard Henderson /* The operation spans two pages. */ 72bb99b391SRichard Henderson nb_pg2 = nb - nb_pg1; 73bb99b391SRichard Henderson host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr); 74bb99b391SRichard Henderson addr = addr_add(env, addr, nb_pg1); 75bb99b391SRichard Henderson host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr); 76bb99b391SRichard Henderson 77bb99b391SRichard Henderson /* If the two host pages are contiguous, optimize. */ 78bb99b391SRichard Henderson if (host2 == host1 + nb_pg1) { 79bb99b391SRichard Henderson return host1; 80bb99b391SRichard Henderson } 81bb99b391SRichard Henderson return NULL; 82bb99b391SRichard Henderson } 83bb99b391SRichard Henderson 84fcf5ef2aSThomas Huth void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 85fcf5ef2aSThomas Huth { 862ca2ef49SRichard Henderson uintptr_t raddr = GETPC(); 872ca2ef49SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 882ca2ef49SRichard Henderson void *host = probe_contiguous(env, addr, (32 - reg) * 4, 892ca2ef49SRichard Henderson MMU_DATA_LOAD, mmu_idx, raddr); 902ca2ef49SRichard Henderson 912ca2ef49SRichard Henderson if (likely(host)) { 922ca2ef49SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 93fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 942ca2ef49SRichard Henderson env->gpr[reg] = (uint32_t)ldl_be_p(host); 952ca2ef49SRichard Henderson host += 4; 96fcf5ef2aSThomas Huth } 972ca2ef49SRichard Henderson } else { 982ca2ef49SRichard Henderson /* Slow path -- at least some of the operation requires i/o. */ 992ca2ef49SRichard Henderson for (; reg < 32; reg++) { 1002ca2ef49SRichard Henderson env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); 101fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 102fcf5ef2aSThomas Huth } 103fcf5ef2aSThomas Huth } 1042ca2ef49SRichard Henderson } 105fcf5ef2aSThomas Huth 106fcf5ef2aSThomas Huth void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 107fcf5ef2aSThomas Huth { 1082ca2ef49SRichard Henderson uintptr_t raddr = GETPC(); 1092ca2ef49SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 1102ca2ef49SRichard Henderson void *host = probe_contiguous(env, addr, (32 - reg) * 4, 1112ca2ef49SRichard Henderson MMU_DATA_STORE, mmu_idx, raddr); 1122ca2ef49SRichard Henderson 1132ca2ef49SRichard Henderson if (likely(host)) { 1142ca2ef49SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 115fcf5ef2aSThomas Huth for (; reg < 32; reg++) { 1162ca2ef49SRichard Henderson stl_be_p(host, env->gpr[reg]); 1172ca2ef49SRichard Henderson host += 4; 118fcf5ef2aSThomas Huth } 1192ca2ef49SRichard Henderson } else { 1202ca2ef49SRichard Henderson /* Slow path -- at least some of the operation requires i/o. */ 1212ca2ef49SRichard Henderson for (; reg < 32; reg++) { 1222ca2ef49SRichard Henderson cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); 123fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 124fcf5ef2aSThomas Huth } 125fcf5ef2aSThomas Huth } 1262ca2ef49SRichard Henderson } 127fcf5ef2aSThomas Huth 128fcf5ef2aSThomas Huth static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 129fcf5ef2aSThomas Huth uint32_t reg, uintptr_t raddr) 130fcf5ef2aSThomas Huth { 131bb99b391SRichard Henderson int mmu_idx; 132bb99b391SRichard Henderson void *host; 133bb99b391SRichard Henderson uint32_t val; 134fcf5ef2aSThomas Huth 135bb99b391SRichard Henderson if (unlikely(nb == 0)) { 136bb99b391SRichard Henderson return; 137bb99b391SRichard Henderson } 138bb99b391SRichard Henderson 139bb99b391SRichard Henderson mmu_idx = cpu_mmu_index(env, false); 140bb99b391SRichard Henderson host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); 141bb99b391SRichard Henderson 142bb99b391SRichard Henderson if (likely(host)) { 143bb99b391SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 144fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 145bb99b391SRichard Henderson env->gpr[reg] = (uint32_t)ldl_be_p(host); 146bb99b391SRichard Henderson reg = (reg + 1) % 32; 147bb99b391SRichard Henderson host += 4; 148bb99b391SRichard Henderson } 149bb99b391SRichard Henderson switch (nb) { 150bb99b391SRichard Henderson default: 151bb99b391SRichard Henderson return; 152bb99b391SRichard Henderson case 1: 153bb99b391SRichard Henderson val = ldub_p(host) << 24; 154bb99b391SRichard Henderson break; 155bb99b391SRichard Henderson case 2: 156bb99b391SRichard Henderson val = lduw_be_p(host) << 16; 157bb99b391SRichard Henderson break; 158bb99b391SRichard Henderson case 3: 159bb99b391SRichard Henderson val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8); 160bb99b391SRichard Henderson break; 161bb99b391SRichard Henderson } 162bb99b391SRichard Henderson } else { 163bb99b391SRichard Henderson /* Slow path -- at least some of the operation requires i/o. */ 164bb99b391SRichard Henderson for (; nb > 3; nb -= 4) { 165bb99b391SRichard Henderson env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); 166fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 167fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 168fcf5ef2aSThomas Huth } 169bb99b391SRichard Henderson switch (nb) { 170bb99b391SRichard Henderson default: 171bb99b391SRichard Henderson return; 172bb99b391SRichard Henderson case 1: 173bb99b391SRichard Henderson val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; 174bb99b391SRichard Henderson break; 175bb99b391SRichard Henderson case 2: 176bb99b391SRichard Henderson val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; 177bb99b391SRichard Henderson break; 178bb99b391SRichard Henderson case 3: 179bb99b391SRichard Henderson val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; 180bb99b391SRichard Henderson addr = addr_add(env, addr, 2); 181bb99b391SRichard Henderson val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; 182bb99b391SRichard Henderson break; 183fcf5ef2aSThomas Huth } 184fcf5ef2aSThomas Huth } 185bb99b391SRichard Henderson env->gpr[reg] = val; 186fcf5ef2aSThomas Huth } 187fcf5ef2aSThomas Huth 188bb99b391SRichard Henderson void helper_lsw(CPUPPCState *env, target_ulong addr, 189bb99b391SRichard Henderson uint32_t nb, uint32_t reg) 190fcf5ef2aSThomas Huth { 191fcf5ef2aSThomas Huth do_lsw(env, addr, nb, reg, GETPC()); 192fcf5ef2aSThomas Huth } 193fcf5ef2aSThomas Huth 1945a2c8b9eSDavid Gibson /* 1955a2c8b9eSDavid Gibson * PPC32 specification says we must generate an exception if rA is in 1965a2c8b9eSDavid Gibson * the range of registers to be loaded. In an other hand, IBM says 1975a2c8b9eSDavid Gibson * this is valid, but rA won't be loaded. For now, I'll follow the 1985a2c8b9eSDavid Gibson * spec... 199fcf5ef2aSThomas Huth */ 200fcf5ef2aSThomas Huth void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 201fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 202fcf5ef2aSThomas Huth { 203fcf5ef2aSThomas Huth if (likely(xer_bc != 0)) { 204f0704d78SMarc-André Lureau int num_used_regs = DIV_ROUND_UP(xer_bc, 4); 205fcf5ef2aSThomas Huth if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || 206fcf5ef2aSThomas Huth lsw_reg_in_range(reg, num_used_regs, rb))) { 207fcf5ef2aSThomas Huth raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 208fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL | 209fcf5ef2aSThomas Huth POWERPC_EXCP_INVAL_LSWX, GETPC()); 210fcf5ef2aSThomas Huth } else { 211fcf5ef2aSThomas Huth do_lsw(env, addr, xer_bc, reg, GETPC()); 212fcf5ef2aSThomas Huth } 213fcf5ef2aSThomas Huth } 214fcf5ef2aSThomas Huth } 215fcf5ef2aSThomas Huth 216fcf5ef2aSThomas Huth void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 217fcf5ef2aSThomas Huth uint32_t reg) 218fcf5ef2aSThomas Huth { 219bb99b391SRichard Henderson uintptr_t raddr = GETPC(); 220bb99b391SRichard Henderson int mmu_idx; 221bb99b391SRichard Henderson void *host; 222bb99b391SRichard Henderson uint32_t val; 223fcf5ef2aSThomas Huth 224bb99b391SRichard Henderson if (unlikely(nb == 0)) { 225bb99b391SRichard Henderson return; 226bb99b391SRichard Henderson } 227bb99b391SRichard Henderson 228bb99b391SRichard Henderson mmu_idx = cpu_mmu_index(env, false); 229bb99b391SRichard Henderson host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); 230bb99b391SRichard Henderson 231bb99b391SRichard Henderson if (likely(host)) { 232bb99b391SRichard Henderson /* Fast path -- the entire operation is in RAM at host. */ 233fcf5ef2aSThomas Huth for (; nb > 3; nb -= 4) { 234bb99b391SRichard Henderson stl_be_p(host, env->gpr[reg]); 235bb99b391SRichard Henderson reg = (reg + 1) % 32; 236bb99b391SRichard Henderson host += 4; 237bb99b391SRichard Henderson } 238bb99b391SRichard Henderson val = env->gpr[reg]; 239bb99b391SRichard Henderson switch (nb) { 240bb99b391SRichard Henderson case 1: 241bb99b391SRichard Henderson stb_p(host, val >> 24); 242bb99b391SRichard Henderson break; 243bb99b391SRichard Henderson case 2: 244bb99b391SRichard Henderson stw_be_p(host, val >> 16); 245bb99b391SRichard Henderson break; 246bb99b391SRichard Henderson case 3: 247bb99b391SRichard Henderson stw_be_p(host, val >> 16); 248bb99b391SRichard Henderson stb_p(host + 2, val >> 8); 249bb99b391SRichard Henderson break; 250bb99b391SRichard Henderson } 251bb99b391SRichard Henderson } else { 252bb99b391SRichard Henderson for (; nb > 3; nb -= 4) { 253bb99b391SRichard Henderson cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); 254fcf5ef2aSThomas Huth reg = (reg + 1) % 32; 255fcf5ef2aSThomas Huth addr = addr_add(env, addr, 4); 256fcf5ef2aSThomas Huth } 257bb99b391SRichard Henderson val = env->gpr[reg]; 258bb99b391SRichard Henderson switch (nb) { 259bb99b391SRichard Henderson case 1: 260bb99b391SRichard Henderson cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); 261bb99b391SRichard Henderson break; 262bb99b391SRichard Henderson case 2: 263bb99b391SRichard Henderson cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); 264bb99b391SRichard Henderson break; 265bb99b391SRichard Henderson case 3: 266bb99b391SRichard Henderson cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); 267bb99b391SRichard Henderson addr = addr_add(env, addr, 2); 268bb99b391SRichard Henderson cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); 269bb99b391SRichard Henderson break; 270fcf5ef2aSThomas Huth } 271fcf5ef2aSThomas Huth } 272fcf5ef2aSThomas Huth } 273fcf5ef2aSThomas Huth 27450728199SRoman Kapl static void dcbz_common(CPUPPCState *env, target_ulong addr, 27550728199SRoman Kapl uint32_t opcode, bool epid, uintptr_t retaddr) 276fcf5ef2aSThomas Huth { 277fcf5ef2aSThomas Huth target_ulong mask, dcbz_size = env->dcache_line_size; 278fcf5ef2aSThomas Huth uint32_t i; 279fcf5ef2aSThomas Huth void *haddr; 280d764184dSRichard Henderson int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false); 281fcf5ef2aSThomas Huth 282fcf5ef2aSThomas Huth #if defined(TARGET_PPC64) 283fcf5ef2aSThomas Huth /* Check for dcbz vs dcbzl on 970 */ 284fcf5ef2aSThomas Huth if (env->excp_model == POWERPC_EXCP_970 && 285fcf5ef2aSThomas Huth !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { 286fcf5ef2aSThomas Huth dcbz_size = 32; 287fcf5ef2aSThomas Huth } 288fcf5ef2aSThomas Huth #endif 289fcf5ef2aSThomas Huth 290fcf5ef2aSThomas Huth /* Align address */ 291fcf5ef2aSThomas Huth mask = ~(dcbz_size - 1); 292fcf5ef2aSThomas Huth addr &= mask; 293fcf5ef2aSThomas Huth 294fcf5ef2aSThomas Huth /* Check reservation */ 2951cbddf6dSRichard Henderson if ((env->reserve_addr & mask) == addr) { 296fcf5ef2aSThomas Huth env->reserve_addr = (target_ulong)-1ULL; 297fcf5ef2aSThomas Huth } 298fcf5ef2aSThomas Huth 299fcf5ef2aSThomas Huth /* Try fast path translate */ 3004dcf078fSRichard Henderson haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr); 301fcf5ef2aSThomas Huth if (haddr) { 302fcf5ef2aSThomas Huth memset(haddr, 0, dcbz_size); 303fcf5ef2aSThomas Huth } else { 304fcf5ef2aSThomas Huth /* Slow path */ 305fcf5ef2aSThomas Huth for (i = 0; i < dcbz_size; i += 8) { 3065a376e4fSRichard Henderson cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); 307fcf5ef2aSThomas Huth } 308fcf5ef2aSThomas Huth } 30950728199SRoman Kapl } 31050728199SRoman Kapl 31150728199SRoman Kapl void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) 31250728199SRoman Kapl { 31350728199SRoman Kapl dcbz_common(env, addr, opcode, false, GETPC()); 31450728199SRoman Kapl } 31550728199SRoman Kapl 31650728199SRoman Kapl void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) 31750728199SRoman Kapl { 31850728199SRoman Kapl dcbz_common(env, addr, opcode, true, GETPC()); 31950728199SRoman Kapl } 320fcf5ef2aSThomas Huth 321fcf5ef2aSThomas Huth void helper_icbi(CPUPPCState *env, target_ulong addr) 322fcf5ef2aSThomas Huth { 323fcf5ef2aSThomas Huth addr &= ~(env->dcache_line_size - 1); 3245a2c8b9eSDavid Gibson /* 3255a2c8b9eSDavid Gibson * Invalidate one cache line : 326fcf5ef2aSThomas Huth * PowerPC specification says this is to be treated like a load 327fcf5ef2aSThomas Huth * (not a fetch) by the MMU. To be sure it will be so, 328fcf5ef2aSThomas Huth * do the load "by hand". 329fcf5ef2aSThomas Huth */ 330fcf5ef2aSThomas Huth cpu_ldl_data_ra(env, addr, GETPC()); 331fcf5ef2aSThomas Huth } 332fcf5ef2aSThomas Huth 33350728199SRoman Kapl void helper_icbiep(CPUPPCState *env, target_ulong addr) 33450728199SRoman Kapl { 33550728199SRoman Kapl #if !defined(CONFIG_USER_ONLY) 33650728199SRoman Kapl /* See comments above */ 33750728199SRoman Kapl addr &= ~(env->dcache_line_size - 1); 3385a376e4fSRichard Henderson cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC()); 33950728199SRoman Kapl #endif 34050728199SRoman Kapl } 34150728199SRoman Kapl 342fcf5ef2aSThomas Huth /* XXX: to be tested */ 343fcf5ef2aSThomas Huth target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, 344fcf5ef2aSThomas Huth uint32_t ra, uint32_t rb) 345fcf5ef2aSThomas Huth { 346fcf5ef2aSThomas Huth int i, c, d; 347fcf5ef2aSThomas Huth 348fcf5ef2aSThomas Huth d = 24; 349fcf5ef2aSThomas Huth for (i = 0; i < xer_bc; i++) { 350fcf5ef2aSThomas Huth c = cpu_ldub_data_ra(env, addr, GETPC()); 351fcf5ef2aSThomas Huth addr = addr_add(env, addr, 1); 352fcf5ef2aSThomas Huth /* ra (if not 0) and rb are never modified */ 353fcf5ef2aSThomas Huth if (likely(reg != rb && (ra == 0 || reg != ra))) { 354fcf5ef2aSThomas Huth env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); 355fcf5ef2aSThomas Huth } 356fcf5ef2aSThomas Huth if (unlikely(c == xer_cmp)) { 357fcf5ef2aSThomas Huth break; 358fcf5ef2aSThomas Huth } 359fcf5ef2aSThomas Huth if (likely(d != 0)) { 360fcf5ef2aSThomas Huth d -= 8; 361fcf5ef2aSThomas Huth } else { 362fcf5ef2aSThomas Huth d = 24; 363fcf5ef2aSThomas Huth reg++; 364fcf5ef2aSThomas Huth reg = reg & 0x1F; 365fcf5ef2aSThomas Huth } 366fcf5ef2aSThomas Huth } 367fcf5ef2aSThomas Huth return i; 368fcf5ef2aSThomas Huth } 369fcf5ef2aSThomas Huth 370f34ec0f6SRichard Henderson #ifdef TARGET_PPC64 37194bf2658SRichard Henderson uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, 37294bf2658SRichard Henderson uint32_t opidx) 37394bf2658SRichard Henderson { 374f34ec0f6SRichard Henderson Int128 ret; 375f34ec0f6SRichard Henderson 376f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 377f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 378be9568b4SRichard Henderson ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); 37994bf2658SRichard Henderson env->retxh = int128_gethi(ret); 38094bf2658SRichard Henderson return int128_getlo(ret); 38194bf2658SRichard Henderson } 38294bf2658SRichard Henderson 38394bf2658SRichard Henderson uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, 38494bf2658SRichard Henderson uint32_t opidx) 38594bf2658SRichard Henderson { 386f34ec0f6SRichard Henderson Int128 ret; 387f34ec0f6SRichard Henderson 388f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 389f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 390be9568b4SRichard Henderson ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); 39194bf2658SRichard Henderson env->retxh = int128_gethi(ret); 39294bf2658SRichard Henderson return int128_getlo(ret); 39394bf2658SRichard Henderson } 394f89ced5fSRichard Henderson 395f89ced5fSRichard Henderson void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, 396f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 397f89ced5fSRichard Henderson { 398f34ec0f6SRichard Henderson Int128 val; 399f34ec0f6SRichard Henderson 400f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 401f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 402f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 403be9568b4SRichard Henderson cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); 404f89ced5fSRichard Henderson } 405f89ced5fSRichard Henderson 406f89ced5fSRichard Henderson void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, 407f89ced5fSRichard Henderson uint64_t lo, uint64_t hi, uint32_t opidx) 408f89ced5fSRichard Henderson { 409f34ec0f6SRichard Henderson Int128 val; 410f34ec0f6SRichard Henderson 411f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 412f34ec0f6SRichard Henderson assert(HAVE_ATOMIC128); 413f34ec0f6SRichard Henderson val = int128_make128(lo, hi); 414be9568b4SRichard Henderson cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); 415f89ced5fSRichard Henderson } 4164a9b3c5dSRichard Henderson 4174a9b3c5dSRichard Henderson uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, 4184a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 4194a9b3c5dSRichard Henderson uint32_t opidx) 4204a9b3c5dSRichard Henderson { 4214a9b3c5dSRichard Henderson bool success = false; 4224a9b3c5dSRichard Henderson 423f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 424f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 425f34ec0f6SRichard Henderson 4264a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 4274a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 4284a9b3c5dSRichard Henderson 4294a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 4304a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 431be9568b4SRichard Henderson oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, 4324a9b3c5dSRichard Henderson opidx, GETPC()); 4334a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 4344a9b3c5dSRichard Henderson } 4354a9b3c5dSRichard Henderson env->reserve_addr = -1; 4364a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 4374a9b3c5dSRichard Henderson } 4384a9b3c5dSRichard Henderson 4394a9b3c5dSRichard Henderson uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, 4404a9b3c5dSRichard Henderson uint64_t new_lo, uint64_t new_hi, 4414a9b3c5dSRichard Henderson uint32_t opidx) 4424a9b3c5dSRichard Henderson { 4434a9b3c5dSRichard Henderson bool success = false; 4444a9b3c5dSRichard Henderson 445f34ec0f6SRichard Henderson /* We will have raised EXCP_ATOMIC from the translator. */ 446f34ec0f6SRichard Henderson assert(HAVE_CMPXCHG128); 447f34ec0f6SRichard Henderson 4484a9b3c5dSRichard Henderson if (likely(addr == env->reserve_addr)) { 4494a9b3c5dSRichard Henderson Int128 oldv, cmpv, newv; 4504a9b3c5dSRichard Henderson 4514a9b3c5dSRichard Henderson cmpv = int128_make128(env->reserve_val2, env->reserve_val); 4524a9b3c5dSRichard Henderson newv = int128_make128(new_lo, new_hi); 453be9568b4SRichard Henderson oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, 4544a9b3c5dSRichard Henderson opidx, GETPC()); 4554a9b3c5dSRichard Henderson success = int128_eq(oldv, cmpv); 4564a9b3c5dSRichard Henderson } 4574a9b3c5dSRichard Henderson env->reserve_addr = -1; 4584a9b3c5dSRichard Henderson return env->so + success * CRF_EQ_BIT; 4594a9b3c5dSRichard Henderson } 46094bf2658SRichard Henderson #endif 46194bf2658SRichard Henderson 462fcf5ef2aSThomas Huth /*****************************************************************************/ 463fcf5ef2aSThomas Huth /* Altivec extension helpers */ 464*e03b5686SMarc-André Lureau #if HOST_BIG_ENDIAN 465fcf5ef2aSThomas Huth #define HI_IDX 0 466fcf5ef2aSThomas Huth #define LO_IDX 1 467fcf5ef2aSThomas Huth #else 468fcf5ef2aSThomas Huth #define HI_IDX 1 469fcf5ef2aSThomas Huth #define LO_IDX 0 470fcf5ef2aSThomas Huth #endif 471fcf5ef2aSThomas Huth 4725a2c8b9eSDavid Gibson /* 4735a2c8b9eSDavid Gibson * We use msr_le to determine index ordering in a vector. However, 4745a2c8b9eSDavid Gibson * byteswapping is not simply controlled by msr_le. We also need to 4755a2c8b9eSDavid Gibson * take into account endianness of the target. This is done for the 4765a2c8b9eSDavid Gibson * little-endian PPC64 user-mode target. 4775a2c8b9eSDavid Gibson */ 478fcf5ef2aSThomas Huth 479fcf5ef2aSThomas Huth #define LVE(name, access, swap, element) \ 480fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 481fcf5ef2aSThomas Huth target_ulong addr) \ 482fcf5ef2aSThomas Huth { \ 483fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 484fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 485fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 486fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 487fcf5ef2aSThomas Huth if (msr_le) { \ 488fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 489fcf5ef2aSThomas Huth } \ 490fcf5ef2aSThomas Huth \ 491fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 492fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 493fcf5ef2aSThomas Huth swap(access(env, addr, GETPC())); \ 494fcf5ef2aSThomas Huth } else { \ 495fcf5ef2aSThomas Huth r->element[LO_IDX ? index : (adjust - index)] = \ 496fcf5ef2aSThomas Huth access(env, addr, GETPC()); \ 497fcf5ef2aSThomas Huth } \ 498fcf5ef2aSThomas Huth } 499fcf5ef2aSThomas Huth #define I(x) (x) 500fcf5ef2aSThomas Huth LVE(lvebx, cpu_ldub_data_ra, I, u8) 501fcf5ef2aSThomas Huth LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) 502fcf5ef2aSThomas Huth LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) 503fcf5ef2aSThomas Huth #undef I 504fcf5ef2aSThomas Huth #undef LVE 505fcf5ef2aSThomas Huth 506fcf5ef2aSThomas Huth #define STVE(name, access, swap, element) \ 507fcf5ef2aSThomas Huth void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 508fcf5ef2aSThomas Huth target_ulong addr) \ 509fcf5ef2aSThomas Huth { \ 510fcf5ef2aSThomas Huth size_t n_elems = ARRAY_SIZE(r->element); \ 511fcf5ef2aSThomas Huth int adjust = HI_IDX * (n_elems - 1); \ 512fcf5ef2aSThomas Huth int sh = sizeof(r->element[0]) >> 1; \ 513fcf5ef2aSThomas Huth int index = (addr & 0xf) >> sh; \ 514fcf5ef2aSThomas Huth if (msr_le) { \ 515fcf5ef2aSThomas Huth index = n_elems - index - 1; \ 516fcf5ef2aSThomas Huth } \ 517fcf5ef2aSThomas Huth \ 518fcf5ef2aSThomas Huth if (needs_byteswap(env)) { \ 519fcf5ef2aSThomas Huth access(env, addr, swap(r->element[LO_IDX ? index : \ 520fcf5ef2aSThomas Huth (adjust - index)]), \ 521fcf5ef2aSThomas Huth GETPC()); \ 522fcf5ef2aSThomas Huth } else { \ 523fcf5ef2aSThomas Huth access(env, addr, r->element[LO_IDX ? index : \ 524fcf5ef2aSThomas Huth (adjust - index)], GETPC()); \ 525fcf5ef2aSThomas Huth } \ 526fcf5ef2aSThomas Huth } 527fcf5ef2aSThomas Huth #define I(x) (x) 528fcf5ef2aSThomas Huth STVE(stvebx, cpu_stb_data_ra, I, u8) 529fcf5ef2aSThomas Huth STVE(stvehx, cpu_stw_data_ra, bswap16, u16) 530fcf5ef2aSThomas Huth STVE(stvewx, cpu_stl_data_ra, bswap32, u32) 531fcf5ef2aSThomas Huth #undef I 532fcf5ef2aSThomas Huth #undef LVE 533fcf5ef2aSThomas Huth 5346914bc4fSNikunj A Dadhania #ifdef TARGET_PPC64 5356914bc4fSNikunj A Dadhania #define GET_NB(rb) ((rb >> 56) & 0xFF) 5366914bc4fSNikunj A Dadhania 5376914bc4fSNikunj A Dadhania #define VSX_LXVL(name, lj) \ 5386914bc4fSNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 5392aba168eSMark Cave-Ayland ppc_vsr_t *xt, target_ulong rb) \ 5406914bc4fSNikunj A Dadhania { \ 5412a175830SMark Cave-Ayland ppc_vsr_t t; \ 5426914bc4fSNikunj A Dadhania uint64_t nb = GET_NB(rb); \ 5432a175830SMark Cave-Ayland int i; \ 5446914bc4fSNikunj A Dadhania \ 5452a175830SMark Cave-Ayland t.s128 = int128_zero(); \ 5466914bc4fSNikunj A Dadhania if (nb) { \ 5476914bc4fSNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 5486914bc4fSNikunj A Dadhania if (msr_le && !lj) { \ 5496914bc4fSNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 5502a175830SMark Cave-Ayland t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ 5516914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 5526914bc4fSNikunj A Dadhania } \ 5536914bc4fSNikunj A Dadhania } else { \ 5546914bc4fSNikunj A Dadhania for (i = 0; i < nb; i++) { \ 5552a175830SMark Cave-Ayland t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ 5566914bc4fSNikunj A Dadhania addr = addr_add(env, addr, 1); \ 5576914bc4fSNikunj A Dadhania } \ 5586914bc4fSNikunj A Dadhania } \ 5596914bc4fSNikunj A Dadhania } \ 5602a175830SMark Cave-Ayland *xt = t; \ 5616914bc4fSNikunj A Dadhania } 5626914bc4fSNikunj A Dadhania 5636914bc4fSNikunj A Dadhania VSX_LXVL(lxvl, 0) 564176e44e7SNikunj A Dadhania VSX_LXVL(lxvll, 1) 5656914bc4fSNikunj A Dadhania #undef VSX_LXVL 566681c2478SNikunj A Dadhania 567681c2478SNikunj A Dadhania #define VSX_STXVL(name, lj) \ 568681c2478SNikunj A Dadhania void helper_##name(CPUPPCState *env, target_ulong addr, \ 5692aba168eSMark Cave-Ayland ppc_vsr_t *xt, target_ulong rb) \ 570681c2478SNikunj A Dadhania { \ 571681c2478SNikunj A Dadhania target_ulong nb = GET_NB(rb); \ 5722a175830SMark Cave-Ayland int i; \ 573681c2478SNikunj A Dadhania \ 574681c2478SNikunj A Dadhania if (!nb) { \ 575681c2478SNikunj A Dadhania return; \ 576681c2478SNikunj A Dadhania } \ 5772a175830SMark Cave-Ayland \ 578681c2478SNikunj A Dadhania nb = (nb >= 16) ? 16 : nb; \ 579681c2478SNikunj A Dadhania if (msr_le && !lj) { \ 580681c2478SNikunj A Dadhania for (i = 16; i > 16 - nb; i--) { \ 5812a175830SMark Cave-Ayland cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ 582681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 583681c2478SNikunj A Dadhania } \ 584681c2478SNikunj A Dadhania } else { \ 585681c2478SNikunj A Dadhania for (i = 0; i < nb; i++) { \ 5862a175830SMark Cave-Ayland cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ 587681c2478SNikunj A Dadhania addr = addr_add(env, addr, 1); \ 588681c2478SNikunj A Dadhania } \ 589681c2478SNikunj A Dadhania } \ 590681c2478SNikunj A Dadhania } 591681c2478SNikunj A Dadhania 592681c2478SNikunj A Dadhania VSX_STXVL(stxvl, 0) 593e122090dSNikunj A Dadhania VSX_STXVL(stxvll, 1) 594681c2478SNikunj A Dadhania #undef VSX_STXVL 5956914bc4fSNikunj A Dadhania #undef GET_NB 5966914bc4fSNikunj A Dadhania #endif /* TARGET_PPC64 */ 5976914bc4fSNikunj A Dadhania 598fcf5ef2aSThomas Huth #undef HI_IDX 599fcf5ef2aSThomas Huth #undef LO_IDX 600fcf5ef2aSThomas Huth 601fcf5ef2aSThomas Huth void helper_tbegin(CPUPPCState *env) 602fcf5ef2aSThomas Huth { 6035a2c8b9eSDavid Gibson /* 6045a2c8b9eSDavid Gibson * As a degenerate implementation, always fail tbegin. The reason 605fcf5ef2aSThomas Huth * given is "Nesting overflow". The "persistent" bit is set, 606fcf5ef2aSThomas Huth * providing a hint to the error handler to not retry. The TFIAR 607fcf5ef2aSThomas Huth * captures the address of the failure, which is this tbegin 6085a2c8b9eSDavid Gibson * instruction. Instruction execution will continue with the next 6095a2c8b9eSDavid Gibson * instruction in memory, which is precisely what we want. 610fcf5ef2aSThomas Huth */ 611fcf5ef2aSThomas Huth 612fcf5ef2aSThomas Huth env->spr[SPR_TEXASR] = 613fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_PERSISTENT) | 614fcf5ef2aSThomas Huth (1ULL << TEXASR_NESTING_OVERFLOW) | 615fcf5ef2aSThomas Huth (msr_hv << TEXASR_PRIVILEGE_HV) | 616fcf5ef2aSThomas Huth (msr_pr << TEXASR_PRIVILEGE_PR) | 617fcf5ef2aSThomas Huth (1ULL << TEXASR_FAILURE_SUMMARY) | 618fcf5ef2aSThomas Huth (1ULL << TEXASR_TFIAR_EXACT); 619fcf5ef2aSThomas Huth env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; 620fcf5ef2aSThomas Huth env->spr[SPR_TFHAR] = env->nip + 4; 621fcf5ef2aSThomas Huth env->crf[0] = 0xB; /* 0b1010 = transaction failure */ 622fcf5ef2aSThomas Huth } 623