1 /* 2 * PowerPC memory access emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "exec/exec-all.h" 22 #include "qemu/host-utils.h" 23 #include "exec/helper-proto.h" 24 25 #include "helper_regs.h" 26 #include "exec/cpu_ldst.h" 27 #include "internal.h" 28 29 //#define DEBUG_OP 30 31 static inline bool needs_byteswap(const CPUPPCState *env) 32 { 33 #if defined(TARGET_WORDS_BIGENDIAN) 34 return msr_le; 35 #else 36 return !msr_le; 37 #endif 38 } 39 40 /*****************************************************************************/ 41 /* Memory load and stores */ 42 43 static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, 44 target_long arg) 45 { 46 #if defined(TARGET_PPC64) 47 if (!msr_is_64bit(env, env->msr)) { 48 return (uint32_t)(addr + arg); 49 } else 50 #endif 51 { 52 return addr + arg; 53 } 54 } 55 56 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 57 { 58 for (; reg < 32; reg++) { 59 if (needs_byteswap(env)) { 60 env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC())); 61 } else { 62 env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC()); 63 } 64 addr = addr_add(env, addr, 4); 65 } 66 } 67 68 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) 69 { 70 for (; reg < 32; reg++) { 71 if (needs_byteswap(env)) { 72 cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]), 73 GETPC()); 74 } else { 75 cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC()); 76 } 77 addr = addr_add(env, addr, 4); 78 } 79 } 80 81 static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 82 uint32_t reg, uintptr_t raddr) 83 { 84 int sh; 85 86 for (; nb > 3; nb -= 4) { 87 env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr); 88 reg = (reg + 1) % 32; 89 addr = addr_add(env, addr, 4); 90 } 91 if (unlikely(nb > 0)) { 92 env->gpr[reg] = 0; 93 for (sh = 24; nb > 0; nb--, sh -= 8) { 94 env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh; 95 addr = addr_add(env, addr, 1); 96 } 97 } 98 } 99 100 void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg) 101 { 102 do_lsw(env, addr, nb, reg, GETPC()); 103 } 104 105 /* PPC32 specification says we must generate an exception if 106 * rA is in the range of registers to be loaded. 107 * In an other hand, IBM says this is valid, but rA won't be loaded. 108 * For now, I'll follow the spec... 109 */ 110 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 111 uint32_t ra, uint32_t rb) 112 { 113 if (likely(xer_bc != 0)) { 114 int num_used_regs = DIV_ROUND_UP(xer_bc, 4); 115 if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || 116 lsw_reg_in_range(reg, num_used_regs, rb))) { 117 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 118 POWERPC_EXCP_INVAL | 119 POWERPC_EXCP_INVAL_LSWX, GETPC()); 120 } else { 121 do_lsw(env, addr, xer_bc, reg, GETPC()); 122 } 123 } 124 } 125 126 void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, 127 uint32_t reg) 128 { 129 int sh; 130 131 for (; nb > 3; nb -= 4) { 132 cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC()); 133 reg = (reg + 1) % 32; 134 addr = addr_add(env, addr, 4); 135 } 136 if (unlikely(nb > 0)) { 137 for (sh = 24; nb > 0; nb--, sh -= 8) { 138 cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC()); 139 addr = addr_add(env, addr, 1); 140 } 141 } 142 } 143 144 void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) 145 { 146 target_ulong mask, dcbz_size = env->dcache_line_size; 147 uint32_t i; 148 void *haddr; 149 150 #if defined(TARGET_PPC64) 151 /* Check for dcbz vs dcbzl on 970 */ 152 if (env->excp_model == POWERPC_EXCP_970 && 153 !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { 154 dcbz_size = 32; 155 } 156 #endif 157 158 /* Align address */ 159 mask = ~(dcbz_size - 1); 160 addr &= mask; 161 162 /* Check reservation */ 163 if ((env->reserve_addr & mask) == (addr & mask)) { 164 env->reserve_addr = (target_ulong)-1ULL; 165 } 166 167 /* Try fast path translate */ 168 haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, env->dmmu_idx); 169 if (haddr) { 170 memset(haddr, 0, dcbz_size); 171 } else { 172 /* Slow path */ 173 for (i = 0; i < dcbz_size; i += 8) { 174 cpu_stq_data_ra(env, addr + i, 0, GETPC()); 175 } 176 } 177 } 178 179 void helper_icbi(CPUPPCState *env, target_ulong addr) 180 { 181 addr &= ~(env->dcache_line_size - 1); 182 /* Invalidate one cache line : 183 * PowerPC specification says this is to be treated like a load 184 * (not a fetch) by the MMU. To be sure it will be so, 185 * do the load "by hand". 186 */ 187 cpu_ldl_data_ra(env, addr, GETPC()); 188 } 189 190 /* XXX: to be tested */ 191 target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, 192 uint32_t ra, uint32_t rb) 193 { 194 int i, c, d; 195 196 d = 24; 197 for (i = 0; i < xer_bc; i++) { 198 c = cpu_ldub_data_ra(env, addr, GETPC()); 199 addr = addr_add(env, addr, 1); 200 /* ra (if not 0) and rb are never modified */ 201 if (likely(reg != rb && (ra == 0 || reg != ra))) { 202 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); 203 } 204 if (unlikely(c == xer_cmp)) { 205 break; 206 } 207 if (likely(d != 0)) { 208 d -= 8; 209 } else { 210 d = 24; 211 reg++; 212 reg = reg & 0x1F; 213 } 214 } 215 return i; 216 } 217 218 /*****************************************************************************/ 219 /* Altivec extension helpers */ 220 #if defined(HOST_WORDS_BIGENDIAN) 221 #define HI_IDX 0 222 #define LO_IDX 1 223 #else 224 #define HI_IDX 1 225 #define LO_IDX 0 226 #endif 227 228 /* We use msr_le to determine index ordering in a vector. However, 229 byteswapping is not simply controlled by msr_le. We also need to take 230 into account endianness of the target. This is done for the little-endian 231 PPC64 user-mode target. */ 232 233 #define LVE(name, access, swap, element) \ 234 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 235 target_ulong addr) \ 236 { \ 237 size_t n_elems = ARRAY_SIZE(r->element); \ 238 int adjust = HI_IDX*(n_elems - 1); \ 239 int sh = sizeof(r->element[0]) >> 1; \ 240 int index = (addr & 0xf) >> sh; \ 241 if (msr_le) { \ 242 index = n_elems - index - 1; \ 243 } \ 244 \ 245 if (needs_byteswap(env)) { \ 246 r->element[LO_IDX ? index : (adjust - index)] = \ 247 swap(access(env, addr, GETPC())); \ 248 } else { \ 249 r->element[LO_IDX ? index : (adjust - index)] = \ 250 access(env, addr, GETPC()); \ 251 } \ 252 } 253 #define I(x) (x) 254 LVE(lvebx, cpu_ldub_data_ra, I, u8) 255 LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) 256 LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) 257 #undef I 258 #undef LVE 259 260 #define STVE(name, access, swap, element) \ 261 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 262 target_ulong addr) \ 263 { \ 264 size_t n_elems = ARRAY_SIZE(r->element); \ 265 int adjust = HI_IDX * (n_elems - 1); \ 266 int sh = sizeof(r->element[0]) >> 1; \ 267 int index = (addr & 0xf) >> sh; \ 268 if (msr_le) { \ 269 index = n_elems - index - 1; \ 270 } \ 271 \ 272 if (needs_byteswap(env)) { \ 273 access(env, addr, swap(r->element[LO_IDX ? index : \ 274 (adjust - index)]), \ 275 GETPC()); \ 276 } else { \ 277 access(env, addr, r->element[LO_IDX ? index : \ 278 (adjust - index)], GETPC()); \ 279 } \ 280 } 281 #define I(x) (x) 282 STVE(stvebx, cpu_stb_data_ra, I, u8) 283 STVE(stvehx, cpu_stw_data_ra, bswap16, u16) 284 STVE(stvewx, cpu_stl_data_ra, bswap32, u32) 285 #undef I 286 #undef LVE 287 288 #ifdef TARGET_PPC64 289 #define GET_NB(rb) ((rb >> 56) & 0xFF) 290 291 #define VSX_LXVL(name, lj) \ 292 void helper_##name(CPUPPCState *env, target_ulong addr, \ 293 target_ulong xt_num, target_ulong rb) \ 294 { \ 295 int i; \ 296 ppc_vsr_t xt; \ 297 uint64_t nb = GET_NB(rb); \ 298 \ 299 xt.s128 = int128_zero(); \ 300 if (nb) { \ 301 nb = (nb >= 16) ? 16 : nb; \ 302 if (msr_le && !lj) { \ 303 for (i = 16; i > 16 - nb; i--) { \ 304 xt.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ 305 addr = addr_add(env, addr, 1); \ 306 } \ 307 } else { \ 308 for (i = 0; i < nb; i++) { \ 309 xt.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ 310 addr = addr_add(env, addr, 1); \ 311 } \ 312 } \ 313 } \ 314 putVSR(xt_num, &xt, env); \ 315 } 316 317 VSX_LXVL(lxvl, 0) 318 VSX_LXVL(lxvll, 1) 319 #undef VSX_LXVL 320 321 #define VSX_STXVL(name, lj) \ 322 void helper_##name(CPUPPCState *env, target_ulong addr, \ 323 target_ulong xt_num, target_ulong rb) \ 324 { \ 325 int i; \ 326 ppc_vsr_t xt; \ 327 target_ulong nb = GET_NB(rb); \ 328 \ 329 if (!nb) { \ 330 return; \ 331 } \ 332 getVSR(xt_num, &xt, env); \ 333 nb = (nb >= 16) ? 16 : nb; \ 334 if (msr_le && !lj) { \ 335 for (i = 16; i > 16 - nb; i--) { \ 336 cpu_stb_data_ra(env, addr, xt.VsrB(i - 1), GETPC()); \ 337 addr = addr_add(env, addr, 1); \ 338 } \ 339 } else { \ 340 for (i = 0; i < nb; i++) { \ 341 cpu_stb_data_ra(env, addr, xt.VsrB(i), GETPC()); \ 342 addr = addr_add(env, addr, 1); \ 343 } \ 344 } \ 345 } 346 347 VSX_STXVL(stxvl, 0) 348 VSX_STXVL(stxvll, 1) 349 #undef VSX_STXVL 350 #undef GET_NB 351 #endif /* TARGET_PPC64 */ 352 353 #undef HI_IDX 354 #undef LO_IDX 355 356 void helper_tbegin(CPUPPCState *env) 357 { 358 /* As a degenerate implementation, always fail tbegin. The reason 359 * given is "Nesting overflow". The "persistent" bit is set, 360 * providing a hint to the error handler to not retry. The TFIAR 361 * captures the address of the failure, which is this tbegin 362 * instruction. Instruction execution will continue with the 363 * next instruction in memory, which is precisely what we want. 364 */ 365 366 env->spr[SPR_TEXASR] = 367 (1ULL << TEXASR_FAILURE_PERSISTENT) | 368 (1ULL << TEXASR_NESTING_OVERFLOW) | 369 (msr_hv << TEXASR_PRIVILEGE_HV) | 370 (msr_pr << TEXASR_PRIVILEGE_PR) | 371 (1ULL << TEXASR_FAILURE_SUMMARY) | 372 (1ULL << TEXASR_TFIAR_EXACT); 373 env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; 374 env->spr[SPR_TFHAR] = env->nip + 4; 375 env->crf[0] = 0xB; /* 0b1010 = transaction failure */ 376 } 377