1/* 2 * translate/vmx-impl.c 3 * 4 * Altivec/VMX translation 5 */ 6 7/*** Altivec vector extension ***/ 8/* Altivec registers moves */ 9 10static inline TCGv_ptr gen_avr_ptr(int reg) 11{ 12 TCGv_ptr r = tcg_temp_new_ptr(); 13 tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg)); 14 return r; 15} 16 17#define GEN_VR_LDX(name, opc2, opc3) \ 18static void glue(gen_, name)(DisasContext *ctx) \ 19{ \ 20 TCGv EA; \ 21 TCGv_i64 avr; \ 22 if (unlikely(!ctx->altivec_enabled)) { \ 23 gen_exception(ctx, POWERPC_EXCP_VPU); \ 24 return; \ 25 } \ 26 gen_set_access_type(ctx, ACCESS_INT); \ 27 avr = tcg_temp_new_i64(); \ 28 EA = tcg_temp_new(); \ 29 gen_addr_reg_index(ctx, EA); \ 30 tcg_gen_andi_tl(EA, EA, ~0xf); \ 31 /* \ 32 * We only need to swap high and low halves. gen_qemu_ld64_i64 \ 33 * does necessary 64-bit byteswap already. \ 34 */ \ 35 if (ctx->le_mode) { \ 36 gen_qemu_ld64_i64(ctx, avr, EA); \ 37 set_avr64(rD(ctx->opcode), avr, false); \ 38 tcg_gen_addi_tl(EA, EA, 8); \ 39 gen_qemu_ld64_i64(ctx, avr, EA); \ 40 set_avr64(rD(ctx->opcode), avr, true); \ 41 } else { \ 42 gen_qemu_ld64_i64(ctx, avr, EA); \ 43 set_avr64(rD(ctx->opcode), avr, true); \ 44 tcg_gen_addi_tl(EA, EA, 8); \ 45 gen_qemu_ld64_i64(ctx, avr, EA); \ 46 set_avr64(rD(ctx->opcode), avr, false); \ 47 } \ 48 tcg_temp_free(EA); \ 49 tcg_temp_free_i64(avr); \ 50} 51 52#define GEN_VR_STX(name, opc2, opc3) \ 53static void gen_st##name(DisasContext *ctx) \ 54{ \ 55 TCGv EA; \ 56 TCGv_i64 avr; \ 57 if (unlikely(!ctx->altivec_enabled)) { \ 58 gen_exception(ctx, POWERPC_EXCP_VPU); \ 59 return; \ 60 } \ 61 gen_set_access_type(ctx, ACCESS_INT); \ 62 avr = tcg_temp_new_i64(); \ 63 EA = tcg_temp_new(); \ 64 gen_addr_reg_index(ctx, EA); \ 65 tcg_gen_andi_tl(EA, EA, ~0xf); \ 66 /* \ 67 * We only need to swap high and low halves. gen_qemu_st64_i64 \ 68 * does necessary 64-bit byteswap already. \ 69 */ \ 70 if (ctx->le_mode) { \ 71 get_avr64(avr, rD(ctx->opcode), false); \ 72 gen_qemu_st64_i64(ctx, avr, EA); \ 73 tcg_gen_addi_tl(EA, EA, 8); \ 74 get_avr64(avr, rD(ctx->opcode), true); \ 75 gen_qemu_st64_i64(ctx, avr, EA); \ 76 } else { \ 77 get_avr64(avr, rD(ctx->opcode), true); \ 78 gen_qemu_st64_i64(ctx, avr, EA); \ 79 tcg_gen_addi_tl(EA, EA, 8); \ 80 get_avr64(avr, rD(ctx->opcode), false); \ 81 gen_qemu_st64_i64(ctx, avr, EA); \ 82 } \ 83 tcg_temp_free(EA); \ 84 tcg_temp_free_i64(avr); \ 85} 86 87#define GEN_VR_LVE(name, opc2, opc3, size) \ 88static void gen_lve##name(DisasContext *ctx) \ 89 { \ 90 TCGv EA; \ 91 TCGv_ptr rs; \ 92 if (unlikely(!ctx->altivec_enabled)) { \ 93 gen_exception(ctx, POWERPC_EXCP_VPU); \ 94 return; \ 95 } \ 96 gen_set_access_type(ctx, ACCESS_INT); \ 97 EA = tcg_temp_new(); \ 98 gen_addr_reg_index(ctx, EA); \ 99 if (size > 1) { \ 100 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 101 } \ 102 rs = gen_avr_ptr(rS(ctx->opcode)); \ 103 gen_helper_lve##name(cpu_env, rs, EA); \ 104 tcg_temp_free(EA); \ 105 tcg_temp_free_ptr(rs); \ 106 } 107 108#define GEN_VR_STVE(name, opc2, opc3, size) \ 109static void gen_stve##name(DisasContext *ctx) \ 110 { \ 111 TCGv EA; \ 112 TCGv_ptr rs; \ 113 if (unlikely(!ctx->altivec_enabled)) { \ 114 gen_exception(ctx, POWERPC_EXCP_VPU); \ 115 return; \ 116 } \ 117 gen_set_access_type(ctx, ACCESS_INT); \ 118 EA = tcg_temp_new(); \ 119 gen_addr_reg_index(ctx, EA); \ 120 if (size > 1) { \ 121 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 122 } \ 123 rs = gen_avr_ptr(rS(ctx->opcode)); \ 124 gen_helper_stve##name(cpu_env, rs, EA); \ 125 tcg_temp_free(EA); \ 126 tcg_temp_free_ptr(rs); \ 127 } 128 129GEN_VR_LDX(lvx, 0x07, 0x03); 130/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ 131GEN_VR_LDX(lvxl, 0x07, 0x0B); 132 133GEN_VR_LVE(bx, 0x07, 0x00, 1); 134GEN_VR_LVE(hx, 0x07, 0x01, 2); 135GEN_VR_LVE(wx, 0x07, 0x02, 4); 136 137GEN_VR_STX(svx, 0x07, 0x07); 138/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ 139GEN_VR_STX(svxl, 0x07, 0x0F); 140 141GEN_VR_STVE(bx, 0x07, 0x04, 1); 142GEN_VR_STVE(hx, 0x07, 0x05, 2); 143GEN_VR_STVE(wx, 0x07, 0x06, 4); 144 145static void gen_mfvscr(DisasContext *ctx) 146{ 147 TCGv_i32 t; 148 TCGv_i64 avr; 149 if (unlikely(!ctx->altivec_enabled)) { 150 gen_exception(ctx, POWERPC_EXCP_VPU); 151 return; 152 } 153 avr = tcg_temp_new_i64(); 154 tcg_gen_movi_i64(avr, 0); 155 set_avr64(rD(ctx->opcode), avr, true); 156 t = tcg_temp_new_i32(); 157 gen_helper_mfvscr(t, cpu_env); 158 tcg_gen_extu_i32_i64(avr, t); 159 set_avr64(rD(ctx->opcode), avr, false); 160 tcg_temp_free_i32(t); 161 tcg_temp_free_i64(avr); 162} 163 164static void gen_mtvscr(DisasContext *ctx) 165{ 166 TCGv_i32 val; 167 int bofs; 168 169 if (unlikely(!ctx->altivec_enabled)) { 170 gen_exception(ctx, POWERPC_EXCP_VPU); 171 return; 172 } 173 174 val = tcg_temp_new_i32(); 175 bofs = avr_full_offset(rB(ctx->opcode)); 176#ifdef HOST_WORDS_BIGENDIAN 177 bofs += 3 * 4; 178#endif 179 180 tcg_gen_ld_i32(val, cpu_env, bofs); 181 gen_helper_mtvscr(cpu_env, val); 182 tcg_temp_free_i32(val); 183} 184 185#define GEN_VX_VMUL10(name, add_cin, ret_carry) \ 186static void glue(gen_, name)(DisasContext *ctx) \ 187{ \ 188 TCGv_i64 t0; \ 189 TCGv_i64 t1; \ 190 TCGv_i64 t2; \ 191 TCGv_i64 avr; \ 192 TCGv_i64 ten, z; \ 193 \ 194 if (unlikely(!ctx->altivec_enabled)) { \ 195 gen_exception(ctx, POWERPC_EXCP_VPU); \ 196 return; \ 197 } \ 198 \ 199 t0 = tcg_temp_new_i64(); \ 200 t1 = tcg_temp_new_i64(); \ 201 t2 = tcg_temp_new_i64(); \ 202 avr = tcg_temp_new_i64(); \ 203 ten = tcg_const_i64(10); \ 204 z = tcg_const_i64(0); \ 205 \ 206 if (add_cin) { \ 207 get_avr64(avr, rA(ctx->opcode), false); \ 208 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 209 get_avr64(avr, rB(ctx->opcode), false); \ 210 tcg_gen_andi_i64(t2, avr, 0xF); \ 211 tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ 212 set_avr64(rD(ctx->opcode), avr, false); \ 213 } else { \ 214 get_avr64(avr, rA(ctx->opcode), false); \ 215 tcg_gen_mulu2_i64(avr, t2, avr, ten); \ 216 set_avr64(rD(ctx->opcode), avr, false); \ 217 } \ 218 \ 219 if (ret_carry) { \ 220 get_avr64(avr, rA(ctx->opcode), true); \ 221 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 222 tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ 223 set_avr64(rD(ctx->opcode), avr, false); \ 224 set_avr64(rD(ctx->opcode), z, true); \ 225 } else { \ 226 get_avr64(avr, rA(ctx->opcode), true); \ 227 tcg_gen_mul_i64(t0, avr, ten); \ 228 tcg_gen_add_i64(avr, t0, t2); \ 229 set_avr64(rD(ctx->opcode), avr, true); \ 230 } \ 231 \ 232 tcg_temp_free_i64(t0); \ 233 tcg_temp_free_i64(t1); \ 234 tcg_temp_free_i64(t2); \ 235 tcg_temp_free_i64(avr); \ 236 tcg_temp_free_i64(ten); \ 237 tcg_temp_free_i64(z); \ 238} \ 239 240GEN_VX_VMUL10(vmul10uq, 0, 0); 241GEN_VX_VMUL10(vmul10euq, 1, 0); 242GEN_VX_VMUL10(vmul10cuq, 0, 1); 243GEN_VX_VMUL10(vmul10ecuq, 1, 1); 244 245#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \ 246static void glue(gen_, name)(DisasContext *ctx) \ 247{ \ 248 if (unlikely(!ctx->altivec_enabled)) { \ 249 gen_exception(ctx, POWERPC_EXCP_VPU); \ 250 return; \ 251 } \ 252 \ 253 tcg_op(vece, \ 254 avr_full_offset(rD(ctx->opcode)), \ 255 avr_full_offset(rA(ctx->opcode)), \ 256 avr_full_offset(rB(ctx->opcode)), \ 257 16, 16); \ 258} 259 260/* Logical operations */ 261GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); 262GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); 263GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); 264GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); 265GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); 266GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); 267GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); 268GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); 269 270#define GEN_VXFORM(name, opc2, opc3) \ 271static void glue(gen_, name)(DisasContext *ctx) \ 272{ \ 273 TCGv_ptr ra, rb, rd; \ 274 if (unlikely(!ctx->altivec_enabled)) { \ 275 gen_exception(ctx, POWERPC_EXCP_VPU); \ 276 return; \ 277 } \ 278 ra = gen_avr_ptr(rA(ctx->opcode)); \ 279 rb = gen_avr_ptr(rB(ctx->opcode)); \ 280 rd = gen_avr_ptr(rD(ctx->opcode)); \ 281 gen_helper_##name(rd, ra, rb); \ 282 tcg_temp_free_ptr(ra); \ 283 tcg_temp_free_ptr(rb); \ 284 tcg_temp_free_ptr(rd); \ 285} 286 287#define GEN_VXFORM_TRANS(name, opc2, opc3) \ 288static void glue(gen_, name)(DisasContext *ctx) \ 289{ \ 290 if (unlikely(!ctx->altivec_enabled)) { \ 291 gen_exception(ctx, POWERPC_EXCP_VPU); \ 292 return; \ 293 } \ 294 trans_##name(ctx); \ 295} 296 297#define GEN_VXFORM_ENV(name, opc2, opc3) \ 298static void glue(gen_, name)(DisasContext *ctx) \ 299{ \ 300 TCGv_ptr ra, rb, rd; \ 301 if (unlikely(!ctx->altivec_enabled)) { \ 302 gen_exception(ctx, POWERPC_EXCP_VPU); \ 303 return; \ 304 } \ 305 ra = gen_avr_ptr(rA(ctx->opcode)); \ 306 rb = gen_avr_ptr(rB(ctx->opcode)); \ 307 rd = gen_avr_ptr(rD(ctx->opcode)); \ 308 gen_helper_##name(cpu_env, rd, ra, rb); \ 309 tcg_temp_free_ptr(ra); \ 310 tcg_temp_free_ptr(rb); \ 311 tcg_temp_free_ptr(rd); \ 312} 313 314#define GEN_VXFORM3(name, opc2, opc3) \ 315static void glue(gen_, name)(DisasContext *ctx) \ 316{ \ 317 TCGv_ptr ra, rb, rc, rd; \ 318 if (unlikely(!ctx->altivec_enabled)) { \ 319 gen_exception(ctx, POWERPC_EXCP_VPU); \ 320 return; \ 321 } \ 322 ra = gen_avr_ptr(rA(ctx->opcode)); \ 323 rb = gen_avr_ptr(rB(ctx->opcode)); \ 324 rc = gen_avr_ptr(rC(ctx->opcode)); \ 325 rd = gen_avr_ptr(rD(ctx->opcode)); \ 326 gen_helper_##name(rd, ra, rb, rc); \ 327 tcg_temp_free_ptr(ra); \ 328 tcg_temp_free_ptr(rb); \ 329 tcg_temp_free_ptr(rc); \ 330 tcg_temp_free_ptr(rd); \ 331} 332 333/* 334 * Support for Altivec instruction pairs that use bit 31 (Rc) as 335 * an opcode bit. In general, these pairs come from different 336 * versions of the ISA, so we must also support a pair of flags for 337 * each instruction. 338 */ 339#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 340static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 341{ \ 342 if ((Rc(ctx->opcode) == 0) && \ 343 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 344 gen_##name0(ctx); \ 345 } else if ((Rc(ctx->opcode) == 1) && \ 346 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 347 gen_##name1(ctx); \ 348 } else { \ 349 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 350 } \ 351} 352 353/* 354 * We use this macro if one instruction is realized with direct 355 * translation, and second one with helper. 356 */ 357#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\ 358static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 359{ \ 360 if ((Rc(ctx->opcode) == 0) && \ 361 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 362 if (unlikely(!ctx->altivec_enabled)) { \ 363 gen_exception(ctx, POWERPC_EXCP_VPU); \ 364 return; \ 365 } \ 366 trans_##name0(ctx); \ 367 } else if ((Rc(ctx->opcode) == 1) && \ 368 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 369 gen_##name1(ctx); \ 370 } else { \ 371 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 372 } \ 373} 374 375/* Adds support to provide invalid mask */ 376#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \ 377 name1, flg1, flg2_1, inval1) \ 378static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 379{ \ 380 if ((Rc(ctx->opcode) == 0) && \ 381 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \ 382 !(ctx->opcode & inval0)) { \ 383 gen_##name0(ctx); \ 384 } else if ((Rc(ctx->opcode) == 1) && \ 385 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \ 386 !(ctx->opcode & inval1)) { \ 387 gen_##name1(ctx); \ 388 } else { \ 389 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 390 } \ 391} 392 393#define GEN_VXFORM_HETRO(name, opc2, opc3) \ 394static void glue(gen_, name)(DisasContext *ctx) \ 395{ \ 396 TCGv_ptr rb; \ 397 if (unlikely(!ctx->altivec_enabled)) { \ 398 gen_exception(ctx, POWERPC_EXCP_VPU); \ 399 return; \ 400 } \ 401 rb = gen_avr_ptr(rB(ctx->opcode)); \ 402 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ 403 tcg_temp_free_ptr(rb); \ 404} 405 406GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); 407GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \ 408 vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800) 409GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1); 410GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \ 411 vmul10ecuq, PPC_NONE, PPC2_ISA300) 412GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2); 413GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3); 414GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); 415GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); 416GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); 417GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); 418GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); 419GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); 420GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); 421GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); 422GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); 423GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); 424GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); 425GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); 426GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); 427GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); 428GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); 429GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); 430GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); 431GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); 432GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); 433GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); 434GEN_VXFORM(vavgub, 1, 16); 435GEN_VXFORM(vabsdub, 1, 16); 436GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ 437 vabsdub, PPC_NONE, PPC2_ISA300) 438GEN_VXFORM(vavguh, 1, 17); 439GEN_VXFORM(vabsduh, 1, 17); 440GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ 441 vabsduh, PPC_NONE, PPC2_ISA300) 442GEN_VXFORM(vavguw, 1, 18); 443GEN_VXFORM(vabsduw, 1, 18); 444GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ 445 vabsduw, PPC_NONE, PPC2_ISA300) 446GEN_VXFORM(vavgsb, 1, 20); 447GEN_VXFORM(vavgsh, 1, 21); 448GEN_VXFORM(vavgsw, 1, 22); 449GEN_VXFORM(vmrghb, 6, 0); 450GEN_VXFORM(vmrghh, 6, 1); 451GEN_VXFORM(vmrghw, 6, 2); 452GEN_VXFORM(vmrglb, 6, 4); 453GEN_VXFORM(vmrglh, 6, 5); 454GEN_VXFORM(vmrglw, 6, 6); 455 456static void trans_vmrgew(DisasContext *ctx) 457{ 458 int VT = rD(ctx->opcode); 459 int VA = rA(ctx->opcode); 460 int VB = rB(ctx->opcode); 461 TCGv_i64 tmp = tcg_temp_new_i64(); 462 TCGv_i64 avr = tcg_temp_new_i64(); 463 464 get_avr64(avr, VB, true); 465 tcg_gen_shri_i64(tmp, avr, 32); 466 get_avr64(avr, VA, true); 467 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 468 set_avr64(VT, avr, true); 469 470 get_avr64(avr, VB, false); 471 tcg_gen_shri_i64(tmp, avr, 32); 472 get_avr64(avr, VA, false); 473 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 474 set_avr64(VT, avr, false); 475 476 tcg_temp_free_i64(tmp); 477 tcg_temp_free_i64(avr); 478} 479 480static void trans_vmrgow(DisasContext *ctx) 481{ 482 int VT = rD(ctx->opcode); 483 int VA = rA(ctx->opcode); 484 int VB = rB(ctx->opcode); 485 TCGv_i64 t0 = tcg_temp_new_i64(); 486 TCGv_i64 t1 = tcg_temp_new_i64(); 487 TCGv_i64 avr = tcg_temp_new_i64(); 488 489 get_avr64(t0, VB, true); 490 get_avr64(t1, VA, true); 491 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 492 set_avr64(VT, avr, true); 493 494 get_avr64(t0, VB, false); 495 get_avr64(t1, VA, false); 496 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 497 set_avr64(VT, avr, false); 498 499 tcg_temp_free_i64(t0); 500 tcg_temp_free_i64(t1); 501 tcg_temp_free_i64(avr); 502} 503 504/* 505 * lvsl VRT,RA,RB - Load Vector for Shift Left 506 * 507 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 508 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 509 * Bytes sh:sh+15 of X are placed into vD. 510 */ 511static void trans_lvsl(DisasContext *ctx) 512{ 513 int VT = rD(ctx->opcode); 514 TCGv_i64 result = tcg_temp_new_i64(); 515 TCGv_i64 sh = tcg_temp_new_i64(); 516 TCGv EA = tcg_temp_new(); 517 518 /* Get sh(from description) by anding EA with 0xf. */ 519 gen_addr_reg_index(ctx, EA); 520 tcg_gen_extu_tl_i64(sh, EA); 521 tcg_gen_andi_i64(sh, sh, 0xfULL); 522 523 /* 524 * Create bytes sh:sh+7 of X(from description) and place them in 525 * higher doubleword of vD. 526 */ 527 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 528 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull); 529 set_avr64(VT, result, true); 530 /* 531 * Create bytes sh+8:sh+15 of X(from description) and place them in 532 * lower doubleword of vD. 533 */ 534 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); 535 set_avr64(VT, result, false); 536 537 tcg_temp_free_i64(result); 538 tcg_temp_free_i64(sh); 539 tcg_temp_free(EA); 540} 541 542/* 543 * lvsr VRT,RA,RB - Load Vector for Shift Right 544 * 545 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 546 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 547 * Bytes (16-sh):(31-sh) of X are placed into vD. 548 */ 549static void trans_lvsr(DisasContext *ctx) 550{ 551 int VT = rD(ctx->opcode); 552 TCGv_i64 result = tcg_temp_new_i64(); 553 TCGv_i64 sh = tcg_temp_new_i64(); 554 TCGv EA = tcg_temp_new(); 555 556 557 /* Get sh(from description) by anding EA with 0xf. */ 558 gen_addr_reg_index(ctx, EA); 559 tcg_gen_extu_tl_i64(sh, EA); 560 tcg_gen_andi_i64(sh, sh, 0xfULL); 561 562 /* 563 * Create bytes (16-sh):(23-sh) of X(from description) and place them in 564 * higher doubleword of vD. 565 */ 566 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 567 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh); 568 set_avr64(VT, result, true); 569 /* 570 * Create bytes (24-sh):(32-sh) of X(from description) and place them in 571 * lower doubleword of vD. 572 */ 573 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); 574 set_avr64(VT, result, false); 575 576 tcg_temp_free_i64(result); 577 tcg_temp_free_i64(sh); 578 tcg_temp_free(EA); 579} 580 581/* 582 * vsl VRT,VRA,VRB - Vector Shift Left 583 * 584 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB. 585 * Lowest 3 bits in each byte element of register vB must be identical or 586 * result is undefined. 587 */ 588static void trans_vsl(DisasContext *ctx) 589{ 590 int VT = rD(ctx->opcode); 591 int VA = rA(ctx->opcode); 592 int VB = rB(ctx->opcode); 593 TCGv_i64 avr = tcg_temp_new_i64(); 594 TCGv_i64 sh = tcg_temp_new_i64(); 595 TCGv_i64 carry = tcg_temp_new_i64(); 596 TCGv_i64 tmp = tcg_temp_new_i64(); 597 598 /* Place bits 125-127 of vB in 'sh'. */ 599 get_avr64(avr, VB, false); 600 tcg_gen_andi_i64(sh, avr, 0x07ULL); 601 602 /* 603 * Save highest 'sh' bits of lower doubleword element of vA in variable 604 * 'carry' and perform shift on lower doubleword. 605 */ 606 get_avr64(avr, VA, false); 607 tcg_gen_subfi_i64(tmp, 32, sh); 608 tcg_gen_shri_i64(carry, avr, 32); 609 tcg_gen_shr_i64(carry, carry, tmp); 610 tcg_gen_shl_i64(avr, avr, sh); 611 set_avr64(VT, avr, false); 612 613 /* 614 * Perform shift on higher doubleword element of vA and replace lowest 615 * 'sh' bits with 'carry'. 616 */ 617 get_avr64(avr, VA, true); 618 tcg_gen_shl_i64(avr, avr, sh); 619 tcg_gen_or_i64(avr, avr, carry); 620 set_avr64(VT, avr, true); 621 622 tcg_temp_free_i64(avr); 623 tcg_temp_free_i64(sh); 624 tcg_temp_free_i64(carry); 625 tcg_temp_free_i64(tmp); 626} 627 628/* 629 * vsr VRT,VRA,VRB - Vector Shift Right 630 * 631 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB. 632 * Lowest 3 bits in each byte element of register vB must be identical or 633 * result is undefined. 634 */ 635static void trans_vsr(DisasContext *ctx) 636{ 637 int VT = rD(ctx->opcode); 638 int VA = rA(ctx->opcode); 639 int VB = rB(ctx->opcode); 640 TCGv_i64 avr = tcg_temp_new_i64(); 641 TCGv_i64 sh = tcg_temp_new_i64(); 642 TCGv_i64 carry = tcg_temp_new_i64(); 643 TCGv_i64 tmp = tcg_temp_new_i64(); 644 645 /* Place bits 125-127 of vB in 'sh'. */ 646 get_avr64(avr, VB, false); 647 tcg_gen_andi_i64(sh, avr, 0x07ULL); 648 649 /* 650 * Save lowest 'sh' bits of higher doubleword element of vA in variable 651 * 'carry' and perform shift on higher doubleword. 652 */ 653 get_avr64(avr, VA, true); 654 tcg_gen_subfi_i64(tmp, 32, sh); 655 tcg_gen_shli_i64(carry, avr, 32); 656 tcg_gen_shl_i64(carry, carry, tmp); 657 tcg_gen_shr_i64(avr, avr, sh); 658 set_avr64(VT, avr, true); 659 /* 660 * Perform shift on lower doubleword element of vA and replace highest 661 * 'sh' bits with 'carry'. 662 */ 663 get_avr64(avr, VA, false); 664 tcg_gen_shr_i64(avr, avr, sh); 665 tcg_gen_or_i64(avr, avr, carry); 666 set_avr64(VT, avr, false); 667 668 tcg_temp_free_i64(avr); 669 tcg_temp_free_i64(sh); 670 tcg_temp_free_i64(carry); 671 tcg_temp_free_i64(tmp); 672} 673 674/* 675 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword 676 * 677 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source 678 * register are concatenated and placed into ith byte of appropriate doubleword 679 * element in destination register. 680 * 681 * Following solution is done for both doubleword elements of source register 682 * in parallel, in order to reduce the number of instructions needed(that's why 683 * arrays are used): 684 * First, both doubleword elements of source register vB are placed in 685 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for 686 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of 687 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with 688 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables 689 * have to be shifted right for 7 and 8 places, respectively, in order to get 690 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so 691 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask... 692 * After first 8 iteration(first loop), all the first bits are in their final 693 * places, all second bits but second bit from eight byte are in their places... 694 * only 1 eight bit from eight byte is in it's place). In second loop we do all 695 * operations symmetrically, in order to get other half of bits in their final 696 * spots. Results for first and second doubleword elements are saved in 697 * result[0] and result[1] respectively. In the end those results are saved in 698 * appropriate doubleword element of destination register vD. 699 */ 700static void trans_vgbbd(DisasContext *ctx) 701{ 702 int VT = rD(ctx->opcode); 703 int VB = rB(ctx->opcode); 704 TCGv_i64 tmp = tcg_temp_new_i64(); 705 uint64_t mask = 0x8040201008040201ULL; 706 int i, j; 707 708 TCGv_i64 result[2]; 709 result[0] = tcg_temp_new_i64(); 710 result[1] = tcg_temp_new_i64(); 711 TCGv_i64 avr[2]; 712 avr[0] = tcg_temp_new_i64(); 713 avr[1] = tcg_temp_new_i64(); 714 TCGv_i64 tcg_mask = tcg_temp_new_i64(); 715 716 tcg_gen_movi_i64(tcg_mask, mask); 717 for (j = 0; j < 2; j++) { 718 get_avr64(avr[j], VB, j); 719 tcg_gen_and_i64(result[j], avr[j], tcg_mask); 720 } 721 for (i = 1; i < 8; i++) { 722 tcg_gen_movi_i64(tcg_mask, mask >> (i * 8)); 723 for (j = 0; j < 2; j++) { 724 tcg_gen_shri_i64(tmp, avr[j], i * 7); 725 tcg_gen_and_i64(tmp, tmp, tcg_mask); 726 tcg_gen_or_i64(result[j], result[j], tmp); 727 } 728 } 729 for (i = 1; i < 8; i++) { 730 tcg_gen_movi_i64(tcg_mask, mask << (i * 8)); 731 for (j = 0; j < 2; j++) { 732 tcg_gen_shli_i64(tmp, avr[j], i * 7); 733 tcg_gen_and_i64(tmp, tmp, tcg_mask); 734 tcg_gen_or_i64(result[j], result[j], tmp); 735 } 736 } 737 for (j = 0; j < 2; j++) { 738 set_avr64(VT, result[j], j); 739 } 740 741 tcg_temp_free_i64(tmp); 742 tcg_temp_free_i64(tcg_mask); 743 tcg_temp_free_i64(result[0]); 744 tcg_temp_free_i64(result[1]); 745 tcg_temp_free_i64(avr[0]); 746 tcg_temp_free_i64(avr[1]); 747} 748 749/* 750 * vclzw VRT,VRB - Vector Count Leading Zeros Word 751 * 752 * Counting the number of leading zero bits of each word element in source 753 * register and placing result in appropriate word element of destination 754 * register. 755 */ 756static void trans_vclzw(DisasContext *ctx) 757{ 758 int VT = rD(ctx->opcode); 759 int VB = rB(ctx->opcode); 760 TCGv_i32 tmp = tcg_temp_new_i32(); 761 int i; 762 763 /* Perform count for every word element using tcg_gen_clzi_i32. */ 764 for (i = 0; i < 4; i++) { 765 tcg_gen_ld_i32(tmp, cpu_env, 766 offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4); 767 tcg_gen_clzi_i32(tmp, tmp, 32); 768 tcg_gen_st_i32(tmp, cpu_env, 769 offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); 770 } 771 772 tcg_temp_free_i32(tmp); 773} 774 775/* 776 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword 777 * 778 * Counting the number of leading zero bits of each doubleword element in source 779 * register and placing result in appropriate doubleword element of destination 780 * register. 781 */ 782static void trans_vclzd(DisasContext *ctx) 783{ 784 int VT = rD(ctx->opcode); 785 int VB = rB(ctx->opcode); 786 TCGv_i64 avr = tcg_temp_new_i64(); 787 788 /* high doubleword */ 789 get_avr64(avr, VB, true); 790 tcg_gen_clzi_i64(avr, avr, 64); 791 set_avr64(VT, avr, true); 792 793 /* low doubleword */ 794 get_avr64(avr, VB, false); 795 tcg_gen_clzi_i64(avr, avr, 64); 796 set_avr64(VT, avr, false); 797 798 tcg_temp_free_i64(avr); 799} 800 801GEN_VXFORM(vmuloub, 4, 0); 802GEN_VXFORM(vmulouh, 4, 1); 803GEN_VXFORM(vmulouw, 4, 2); 804GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2); 805GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, 806 vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) 807GEN_VXFORM(vmulosb, 4, 4); 808GEN_VXFORM(vmulosh, 4, 5); 809GEN_VXFORM(vmulosw, 4, 6); 810GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7); 811GEN_VXFORM(vmuleub, 4, 8); 812GEN_VXFORM(vmuleuh, 4, 9); 813GEN_VXFORM(vmuleuw, 4, 10); 814GEN_VXFORM(vmulhuw, 4, 10); 815GEN_VXFORM(vmulhud, 4, 11); 816GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE, 817 vmulhuw, PPC_NONE, PPC2_ISA310); 818GEN_VXFORM(vmulesb, 4, 12); 819GEN_VXFORM(vmulesh, 4, 13); 820GEN_VXFORM(vmulesw, 4, 14); 821GEN_VXFORM(vmulhsw, 4, 14); 822GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE, 823 vmulhsw, PPC_NONE, PPC2_ISA310); 824GEN_VXFORM(vmulhsd, 4, 15); 825GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); 826GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); 827GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); 828GEN_VXFORM(vrlwnm, 2, 6); 829GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ 830 vrlwnm, PPC_NONE, PPC2_ISA300) 831GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); 832GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); 833GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); 834GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); 835GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); 836GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); 837GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); 838GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); 839GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); 840GEN_VXFORM(vsrv, 2, 28); 841GEN_VXFORM(vslv, 2, 29); 842GEN_VXFORM(vslo, 6, 16); 843GEN_VXFORM(vsro, 6, 17); 844GEN_VXFORM(vaddcuw, 0, 6); 845GEN_VXFORM(vsubcuw, 0, 22); 846 847#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ 848static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ 849 TCGv_vec sat, TCGv_vec a, \ 850 TCGv_vec b) \ 851{ \ 852 TCGv_vec x = tcg_temp_new_vec_matching(t); \ 853 glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \ 854 glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \ 855 tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \ 856 tcg_gen_or_vec(VECE, sat, sat, x); \ 857 tcg_temp_free_vec(x); \ 858} \ 859static void glue(gen_, NAME)(DisasContext *ctx) \ 860{ \ 861 static const TCGOpcode vecop_list[] = { \ 862 glue(glue(INDEX_op_, NORM), _vec), \ 863 glue(glue(INDEX_op_, SAT), _vec), \ 864 INDEX_op_cmp_vec, 0 \ 865 }; \ 866 static const GVecGen4 g = { \ 867 .fniv = glue(glue(gen_, NAME), _vec), \ 868 .fno = glue(gen_helper_, NAME), \ 869 .opt_opc = vecop_list, \ 870 .write_aofs = true, \ 871 .vece = VECE, \ 872 }; \ 873 if (unlikely(!ctx->altivec_enabled)) { \ 874 gen_exception(ctx, POWERPC_EXCP_VPU); \ 875 return; \ 876 } \ 877 tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \ 878 offsetof(CPUPPCState, vscr_sat), \ 879 avr_full_offset(rA(ctx->opcode)), \ 880 avr_full_offset(rB(ctx->opcode)), \ 881 16, 16, &g); \ 882} 883 884GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8); 885GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \ 886 vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800) 887GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9); 888GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \ 889 vmul10euq, PPC_NONE, PPC2_ISA300) 890GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10); 891GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12); 892GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13); 893GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14); 894GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24); 895GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25); 896GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); 897GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); 898GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); 899GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); 900GEN_VXFORM(vadduqm, 0, 4); 901GEN_VXFORM(vaddcuq, 0, 5); 902GEN_VXFORM3(vaddeuqm, 30, 0); 903GEN_VXFORM3(vaddecuq, 30, 0); 904GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 905 vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) 906GEN_VXFORM(vsubuqm, 0, 20); 907GEN_VXFORM(vsubcuq, 0, 21); 908GEN_VXFORM3(vsubeuqm, 31, 0); 909GEN_VXFORM3(vsubecuq, 31, 0); 910GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 911 vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) 912GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0); 913GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1); 914GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2); 915GEN_VXFORM(vrlwmi, 2, 2); 916GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ 917 vrlwmi, PPC_NONE, PPC2_ISA300) 918GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3); 919GEN_VXFORM(vrldmi, 2, 3); 920GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ 921 vrldmi, PPC_NONE, PPC2_ISA300) 922GEN_VXFORM_TRANS(vsl, 2, 7); 923GEN_VXFORM(vrldnm, 2, 7); 924GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ 925 vrldnm, PPC_NONE, PPC2_ISA300) 926GEN_VXFORM_TRANS(vsr, 2, 11); 927GEN_VXFORM_ENV(vpkuhum, 7, 0); 928GEN_VXFORM_ENV(vpkuwum, 7, 1); 929GEN_VXFORM_ENV(vpkudum, 7, 17); 930GEN_VXFORM_ENV(vpkuhus, 7, 2); 931GEN_VXFORM_ENV(vpkuwus, 7, 3); 932GEN_VXFORM_ENV(vpkudus, 7, 19); 933GEN_VXFORM_ENV(vpkshus, 7, 4); 934GEN_VXFORM_ENV(vpkswus, 7, 5); 935GEN_VXFORM_ENV(vpksdus, 7, 21); 936GEN_VXFORM_ENV(vpkshss, 7, 6); 937GEN_VXFORM_ENV(vpkswss, 7, 7); 938GEN_VXFORM_ENV(vpksdss, 7, 23); 939GEN_VXFORM(vpkpx, 7, 12); 940GEN_VXFORM_ENV(vsum4ubs, 4, 24); 941GEN_VXFORM_ENV(vsum4sbs, 4, 28); 942GEN_VXFORM_ENV(vsum4shs, 4, 25); 943GEN_VXFORM_ENV(vsum2sws, 4, 26); 944GEN_VXFORM_ENV(vsumsws, 4, 30); 945GEN_VXFORM_ENV(vaddfp, 5, 0); 946GEN_VXFORM_ENV(vsubfp, 5, 1); 947GEN_VXFORM_ENV(vmaxfp, 5, 16); 948GEN_VXFORM_ENV(vminfp, 5, 17); 949GEN_VXFORM_HETRO(vextublx, 6, 24) 950GEN_VXFORM_HETRO(vextuhlx, 6, 25) 951GEN_VXFORM_HETRO(vextuwlx, 6, 26) 952GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, 953 vextuwlx, PPC_NONE, PPC2_ISA300) 954GEN_VXFORM_HETRO(vextubrx, 6, 28) 955GEN_VXFORM_HETRO(vextuhrx, 6, 29) 956GEN_VXFORM_HETRO(vextuwrx, 6, 30) 957GEN_VXFORM_TRANS(lvsl, 6, 31) 958GEN_VXFORM_TRANS(lvsr, 6, 32) 959GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, 960 vextuwrx, PPC_NONE, PPC2_ISA300) 961 962#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ 963static void glue(gen_, name)(DisasContext *ctx) \ 964 { \ 965 TCGv_ptr ra, rb, rd; \ 966 if (unlikely(!ctx->altivec_enabled)) { \ 967 gen_exception(ctx, POWERPC_EXCP_VPU); \ 968 return; \ 969 } \ 970 ra = gen_avr_ptr(rA(ctx->opcode)); \ 971 rb = gen_avr_ptr(rB(ctx->opcode)); \ 972 rd = gen_avr_ptr(rD(ctx->opcode)); \ 973 gen_helper_##opname(cpu_env, rd, ra, rb); \ 974 tcg_temp_free_ptr(ra); \ 975 tcg_temp_free_ptr(rb); \ 976 tcg_temp_free_ptr(rd); \ 977 } 978 979#define GEN_VXRFORM(name, opc2, opc3) \ 980 GEN_VXRFORM1(name, name, #name, opc2, opc3) \ 981 GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) 982 983/* 984 * Support for Altivec instructions that use bit 31 (Rc) as an opcode 985 * bit but also use bit 21 as an actual Rc bit. In general, thse pairs 986 * come from different versions of the ISA, so we must also support a 987 * pair of flags for each instruction. 988 */ 989#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 990static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 991{ \ 992 if ((Rc(ctx->opcode) == 0) && \ 993 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 994 if (Rc21(ctx->opcode) == 0) { \ 995 gen_##name0(ctx); \ 996 } else { \ 997 gen_##name0##_(ctx); \ 998 } \ 999 } else if ((Rc(ctx->opcode) == 1) && \ 1000 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 1001 if (Rc21(ctx->opcode) == 0) { \ 1002 gen_##name1(ctx); \ 1003 } else { \ 1004 gen_##name1##_(ctx); \ 1005 } \ 1006 } else { \ 1007 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 1008 } \ 1009} 1010 1011GEN_VXRFORM(vcmpequb, 3, 0) 1012GEN_VXRFORM(vcmpequh, 3, 1) 1013GEN_VXRFORM(vcmpequw, 3, 2) 1014GEN_VXRFORM(vcmpequd, 3, 3) 1015GEN_VXRFORM(vcmpnezb, 3, 4) 1016GEN_VXRFORM(vcmpnezh, 3, 5) 1017GEN_VXRFORM(vcmpnezw, 3, 6) 1018GEN_VXRFORM(vcmpgtsb, 3, 12) 1019GEN_VXRFORM(vcmpgtsh, 3, 13) 1020GEN_VXRFORM(vcmpgtsw, 3, 14) 1021GEN_VXRFORM(vcmpgtsd, 3, 15) 1022GEN_VXRFORM(vcmpgtub, 3, 8) 1023GEN_VXRFORM(vcmpgtuh, 3, 9) 1024GEN_VXRFORM(vcmpgtuw, 3, 10) 1025GEN_VXRFORM(vcmpgtud, 3, 11) 1026GEN_VXRFORM(vcmpeqfp, 3, 3) 1027GEN_VXRFORM(vcmpgefp, 3, 7) 1028GEN_VXRFORM(vcmpgtfp, 3, 11) 1029GEN_VXRFORM(vcmpbfp, 3, 15) 1030GEN_VXRFORM(vcmpneb, 3, 0) 1031GEN_VXRFORM(vcmpneh, 3, 1) 1032GEN_VXRFORM(vcmpnew, 3, 2) 1033 1034GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ 1035 vcmpneb, PPC_NONE, PPC2_ISA300) 1036GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ 1037 vcmpneh, PPC_NONE, PPC2_ISA300) 1038GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ 1039 vcmpnew, PPC_NONE, PPC2_ISA300) 1040GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ 1041 vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) 1042GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ 1043 vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) 1044GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ 1045 vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) 1046 1047static void gen_vsplti(DisasContext *ctx, int vece) 1048{ 1049 int simm; 1050 1051 if (unlikely(!ctx->altivec_enabled)) { 1052 gen_exception(ctx, POWERPC_EXCP_VPU); 1053 return; 1054 } 1055 1056 simm = SIMM5(ctx->opcode); 1057 tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm); 1058} 1059 1060#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \ 1061static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); } 1062 1063GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12); 1064GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13); 1065GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14); 1066 1067#define GEN_VXFORM_NOA(name, opc2, opc3) \ 1068static void glue(gen_, name)(DisasContext *ctx) \ 1069 { \ 1070 TCGv_ptr rb, rd; \ 1071 if (unlikely(!ctx->altivec_enabled)) { \ 1072 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1073 return; \ 1074 } \ 1075 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1076 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1077 gen_helper_##name(rd, rb); \ 1078 tcg_temp_free_ptr(rb); \ 1079 tcg_temp_free_ptr(rd); \ 1080 } 1081 1082#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ 1083static void glue(gen_, name)(DisasContext *ctx) \ 1084 { \ 1085 TCGv_ptr rb, rd; \ 1086 \ 1087 if (unlikely(!ctx->altivec_enabled)) { \ 1088 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1089 return; \ 1090 } \ 1091 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1092 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1093 gen_helper_##name(cpu_env, rd, rb); \ 1094 tcg_temp_free_ptr(rb); \ 1095 tcg_temp_free_ptr(rd); \ 1096 } 1097 1098#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ 1099static void glue(gen_, name)(DisasContext *ctx) \ 1100 { \ 1101 TCGv_ptr rb, rd; \ 1102 if (unlikely(!ctx->altivec_enabled)) { \ 1103 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1104 return; \ 1105 } \ 1106 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1107 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1108 gen_helper_##name(rd, rb); \ 1109 tcg_temp_free_ptr(rb); \ 1110 tcg_temp_free_ptr(rd); \ 1111 } 1112 1113#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ 1114static void glue(gen_, name)(DisasContext *ctx) \ 1115 { \ 1116 TCGv_ptr rb; \ 1117 if (unlikely(!ctx->altivec_enabled)) { \ 1118 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1119 return; \ 1120 } \ 1121 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1122 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \ 1123 tcg_temp_free_ptr(rb); \ 1124 } 1125GEN_VXFORM_NOA(vupkhsb, 7, 8); 1126GEN_VXFORM_NOA(vupkhsh, 7, 9); 1127GEN_VXFORM_NOA(vupkhsw, 7, 25); 1128GEN_VXFORM_NOA(vupklsb, 7, 10); 1129GEN_VXFORM_NOA(vupklsh, 7, 11); 1130GEN_VXFORM_NOA(vupklsw, 7, 27); 1131GEN_VXFORM_NOA(vupkhpx, 7, 13); 1132GEN_VXFORM_NOA(vupklpx, 7, 15); 1133GEN_VXFORM_NOA_ENV(vrefp, 5, 4); 1134GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); 1135GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); 1136GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); 1137GEN_VXFORM_NOA_ENV(vrfim, 5, 11); 1138GEN_VXFORM_NOA_ENV(vrfin, 5, 8); 1139GEN_VXFORM_NOA_ENV(vrfip, 5, 10); 1140GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); 1141GEN_VXFORM_NOA(vprtybw, 1, 24); 1142GEN_VXFORM_NOA(vprtybd, 1, 24); 1143GEN_VXFORM_NOA(vprtybq, 1, 24); 1144 1145static void gen_vsplt(DisasContext *ctx, int vece) 1146{ 1147 int uimm, dofs, bofs; 1148 1149 if (unlikely(!ctx->altivec_enabled)) { 1150 gen_exception(ctx, POWERPC_EXCP_VPU); 1151 return; 1152 } 1153 1154 uimm = UIMM5(ctx->opcode); 1155 bofs = avr_full_offset(rB(ctx->opcode)); 1156 dofs = avr_full_offset(rD(ctx->opcode)); 1157 1158 /* Experimental testing shows that hardware masks the immediate. */ 1159 bofs += (uimm << vece) & 15; 1160#ifndef HOST_WORDS_BIGENDIAN 1161 bofs ^= 15; 1162 bofs &= ~((1 << vece) - 1); 1163#endif 1164 1165 tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16); 1166} 1167 1168#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \ 1169static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); } 1170 1171#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ 1172static void glue(gen_, name)(DisasContext *ctx) \ 1173 { \ 1174 TCGv_ptr rb, rd; \ 1175 TCGv_i32 uimm; \ 1176 \ 1177 if (unlikely(!ctx->altivec_enabled)) { \ 1178 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1179 return; \ 1180 } \ 1181 uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ 1182 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1183 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1184 gen_helper_##name(cpu_env, rd, rb, uimm); \ 1185 tcg_temp_free_i32(uimm); \ 1186 tcg_temp_free_ptr(rb); \ 1187 tcg_temp_free_ptr(rd); \ 1188 } 1189 1190#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ 1191static void glue(gen_, name)(DisasContext *ctx) \ 1192 { \ 1193 TCGv_ptr rb, rd; \ 1194 uint8_t uimm = UIMM4(ctx->opcode); \ 1195 TCGv_i32 t0; \ 1196 if (unlikely(!ctx->altivec_enabled)) { \ 1197 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1198 return; \ 1199 } \ 1200 if (uimm > splat_max) { \ 1201 uimm = 0; \ 1202 } \ 1203 t0 = tcg_temp_new_i32(); \ 1204 tcg_gen_movi_i32(t0, uimm); \ 1205 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1206 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1207 gen_helper_##name(rd, rb, t0); \ 1208 tcg_temp_free_i32(t0); \ 1209 tcg_temp_free_ptr(rb); \ 1210 tcg_temp_free_ptr(rd); \ 1211 } 1212 1213GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); 1214GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9); 1215GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10); 1216GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); 1217GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); 1218GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); 1219GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); 1220GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); 1221GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); 1222GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); 1223GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); 1224GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE, 1225 vextractub, PPC_NONE, PPC2_ISA300); 1226GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, 1227 vextractuh, PPC_NONE, PPC2_ISA300); 1228GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, 1229 vextractuw, PPC_NONE, PPC2_ISA300); 1230 1231static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, 1232 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv)) 1233{ 1234 TCGv_ptr vrt, vra, vrb; 1235 TCGv rc; 1236 1237 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1238 REQUIRE_VECTOR(ctx); 1239 1240 vrt = gen_avr_ptr(a->vrt); 1241 vra = gen_avr_ptr(a->vra); 1242 vrb = gen_avr_ptr(a->vrb); 1243 rc = tcg_temp_new(); 1244 1245 tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F); 1246 if (right) { 1247 tcg_gen_subfi_tl(rc, 32 - size, rc); 1248 } 1249 gen_helper(cpu_env, vrt, vra, vrb, rc); 1250 1251 tcg_temp_free_ptr(vrt); 1252 tcg_temp_free_ptr(vra); 1253 tcg_temp_free_ptr(vrb); 1254 tcg_temp_free(rc); 1255 return true; 1256} 1257 1258TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX) 1259TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX) 1260TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX) 1261TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX) 1262 1263TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX) 1264TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX) 1265TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX) 1266TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX) 1267 1268static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, 1269 TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1270{ 1271 TCGv_ptr t; 1272 TCGv idx; 1273 1274 t = gen_avr_ptr(vrt); 1275 idx = tcg_temp_new(); 1276 1277 tcg_gen_andi_tl(idx, ra, 0xF); 1278 if (right) { 1279 tcg_gen_subfi_tl(idx, 16 - size, idx); 1280 } 1281 1282 gen_helper(cpu_env, t, rb, idx); 1283 1284 tcg_temp_free_ptr(t); 1285 tcg_temp_free(idx); 1286 1287 return true; 1288} 1289 1290static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, 1291 int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1292{ 1293 bool ok; 1294 TCGv_i64 val; 1295 1296 val = tcg_temp_new_i64(); 1297 get_avr64(val, vrb, true); 1298 ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); 1299 1300 tcg_temp_free_i64(val); 1301 return ok; 1302} 1303 1304static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, 1305 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1306{ 1307 bool ok; 1308 TCGv_i64 val; 1309 1310 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1311 REQUIRE_VECTOR(ctx); 1312 1313 val = tcg_temp_new_i64(); 1314 tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); 1315 1316 ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); 1317 1318 tcg_temp_free_i64(val); 1319 return ok; 1320} 1321 1322static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, 1323 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1324{ 1325 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1326 REQUIRE_VECTOR(ctx); 1327 1328 return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb, 1329 gen_helper); 1330} 1331 1332static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, 1333 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1334{ 1335 bool ok; 1336 TCGv_i64 val; 1337 1338 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1339 REQUIRE_VECTOR(ctx); 1340 1341 if (a->uim > (16 - size)) { 1342 /* 1343 * PowerISA v3.1 says that the resulting value is undefined in this 1344 * case, so just log a guest error and leave VRT unchanged. The 1345 * real hardware would do a partial insert, e.g. if VRT is zeroed and 1346 * RB is 0x12345678, executing "vinsw VRT,RB,14" results in 1347 * VRT = 0x0000...00001234, but we don't bother to reproduce this 1348 * behavior as software shouldn't rely on it. 1349 */ 1350 qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at" 1351 " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, 1352 16 - size); 1353 return true; 1354 } 1355 1356 val = tcg_temp_new_i64(); 1357 tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); 1358 1359 ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, 1360 gen_helper); 1361 1362 tcg_temp_free_i64(val); 1363 return ok; 1364} 1365 1366static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, 1367 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) 1368{ 1369 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 1370 REQUIRE_VECTOR(ctx); 1371 1372 if (a->uim > (16 - size)) { 1373 qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at" 1374 " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, 1375 16 - size); 1376 return true; 1377 } 1378 1379 return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb, 1380 gen_helper); 1381} 1382 1383TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX) 1384TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX) 1385TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX) 1386TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX) 1387 1388TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX) 1389TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX) 1390TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX) 1391TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX) 1392 1393TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX) 1394TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX) 1395 1396TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX) 1397TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX) 1398TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX) 1399 1400TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX) 1401TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX) 1402TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX) 1403 1404TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX) 1405TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX) 1406TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX) 1407TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX) 1408 1409static void gen_vsldoi(DisasContext *ctx) 1410{ 1411 TCGv_ptr ra, rb, rd; 1412 TCGv_i32 sh; 1413 if (unlikely(!ctx->altivec_enabled)) { 1414 gen_exception(ctx, POWERPC_EXCP_VPU); 1415 return; 1416 } 1417 ra = gen_avr_ptr(rA(ctx->opcode)); 1418 rb = gen_avr_ptr(rB(ctx->opcode)); 1419 rd = gen_avr_ptr(rD(ctx->opcode)); 1420 sh = tcg_const_i32(VSH(ctx->opcode)); 1421 gen_helper_vsldoi(rd, ra, rb, sh); 1422 tcg_temp_free_ptr(ra); 1423 tcg_temp_free_ptr(rb); 1424 tcg_temp_free_ptr(rd); 1425 tcg_temp_free_i32(sh); 1426} 1427 1428static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) 1429{ 1430 TCGv_i64 t0, t1, t2; 1431 1432 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1433 REQUIRE_VECTOR(ctx); 1434 1435 t0 = tcg_temp_new_i64(); 1436 t1 = tcg_temp_new_i64(); 1437 1438 get_avr64(t0, a->vra, true); 1439 get_avr64(t1, a->vra, false); 1440 1441 if (a->sh != 0) { 1442 t2 = tcg_temp_new_i64(); 1443 1444 get_avr64(t2, a->vrb, true); 1445 1446 tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); 1447 tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); 1448 1449 tcg_temp_free_i64(t2); 1450 } 1451 1452 set_avr64(a->vrt, t0, true); 1453 set_avr64(a->vrt, t1, false); 1454 1455 tcg_temp_free_i64(t0); 1456 tcg_temp_free_i64(t1); 1457 1458 return true; 1459} 1460 1461static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) 1462{ 1463 TCGv_i64 t2, t1, t0; 1464 1465 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1466 REQUIRE_VECTOR(ctx); 1467 1468 t0 = tcg_temp_new_i64(); 1469 t1 = tcg_temp_new_i64(); 1470 1471 get_avr64(t0, a->vrb, false); 1472 get_avr64(t1, a->vrb, true); 1473 1474 if (a->sh != 0) { 1475 t2 = tcg_temp_new_i64(); 1476 1477 get_avr64(t2, a->vra, false); 1478 1479 tcg_gen_extract2_i64(t0, t0, t1, a->sh); 1480 tcg_gen_extract2_i64(t1, t1, t2, a->sh); 1481 1482 tcg_temp_free_i64(t2); 1483 } 1484 1485 set_avr64(a->vrt, t0, false); 1486 set_avr64(a->vrt, t1, true); 1487 1488 tcg_temp_free_i64(t0); 1489 tcg_temp_free_i64(t1); 1490 1491 return true; 1492} 1493 1494static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece) 1495{ 1496 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1497 REQUIRE_VECTOR(ctx); 1498 1499 tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb), 1500 (8 << vece) - 1, 16, 16); 1501 1502 return true; 1503} 1504 1505TRANS(VEXPANDBM, do_vexpand, MO_8) 1506TRANS(VEXPANDHM, do_vexpand, MO_16) 1507TRANS(VEXPANDWM, do_vexpand, MO_32) 1508TRANS(VEXPANDDM, do_vexpand, MO_64) 1509 1510static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a) 1511{ 1512 TCGv_i64 tmp; 1513 1514 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1515 REQUIRE_VECTOR(ctx); 1516 1517 tmp = tcg_temp_new_i64(); 1518 1519 get_avr64(tmp, a->vrb, true); 1520 tcg_gen_sari_i64(tmp, tmp, 63); 1521 set_avr64(a->vrt, tmp, false); 1522 set_avr64(a->vrt, tmp, true); 1523 1524 tcg_temp_free_i64(tmp); 1525 return true; 1526} 1527 1528static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) 1529{ 1530 const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece, 1531 mask = dup_const(vece, 1 << (elem_width - 1)); 1532 uint64_t i, j; 1533 TCGv_i64 lo, hi, t0, t1; 1534 1535 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1536 REQUIRE_VECTOR(ctx); 1537 1538 hi = tcg_temp_new_i64(); 1539 lo = tcg_temp_new_i64(); 1540 t0 = tcg_temp_new_i64(); 1541 t1 = tcg_temp_new_i64(); 1542 1543 get_avr64(lo, a->vrb, false); 1544 get_avr64(hi, a->vrb, true); 1545 1546 tcg_gen_andi_i64(lo, lo, mask); 1547 tcg_gen_andi_i64(hi, hi, mask); 1548 1549 /* 1550 * Gather the most significant bit of each element in the highest element 1551 * element. E.g. for bytes: 1552 * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX 1553 * & dup(1 << (elem_width - 1)) 1554 * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000 1555 * << 32 - 4 1556 * 0000e0000000f0000000g0000000h00000000000000000000000000000000000 1557 * | 1558 * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000 1559 * << 16 - 2 1560 * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000 1561 * | 1562 * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000 1563 * << 8 - 1 1564 * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000 1565 * | 1566 * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000 1567 */ 1568 for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) { 1569 tcg_gen_shli_i64(t0, hi, j - i); 1570 tcg_gen_shli_i64(t1, lo, j - i); 1571 tcg_gen_or_i64(hi, hi, t0); 1572 tcg_gen_or_i64(lo, lo, t1); 1573 } 1574 1575 tcg_gen_shri_i64(hi, hi, 64 - elem_count_half); 1576 tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half); 1577 tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo); 1578 1579 tcg_temp_free_i64(hi); 1580 tcg_temp_free_i64(lo); 1581 tcg_temp_free_i64(t0); 1582 tcg_temp_free_i64(t1); 1583 1584 return true; 1585} 1586 1587TRANS(VEXTRACTBM, do_vextractm, MO_8) 1588TRANS(VEXTRACTHM, do_vextractm, MO_16) 1589TRANS(VEXTRACTWM, do_vextractm, MO_32) 1590TRANS(VEXTRACTDM, do_vextractm, MO_64) 1591 1592static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a) 1593{ 1594 TCGv_i64 tmp; 1595 1596 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1597 REQUIRE_VECTOR(ctx); 1598 1599 tmp = tcg_temp_new_i64(); 1600 1601 get_avr64(tmp, a->vrb, true); 1602 tcg_gen_shri_i64(tmp, tmp, 63); 1603 tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp); 1604 1605 tcg_temp_free_i64(tmp); 1606 1607 return true; 1608} 1609 1610static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) 1611{ 1612 const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece; 1613 uint64_t c; 1614 int i, j; 1615 TCGv_i64 hi, lo, t0, t1; 1616 1617 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1618 REQUIRE_VECTOR(ctx); 1619 1620 hi = tcg_temp_new_i64(); 1621 lo = tcg_temp_new_i64(); 1622 t0 = tcg_temp_new_i64(); 1623 t1 = tcg_temp_new_i64(); 1624 1625 tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]); 1626 tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half); 1627 tcg_gen_extract_i64(lo, t0, 0, elem_count_half); 1628 1629 /* 1630 * Spread the bits into their respective elements. 1631 * E.g. for bytes: 1632 * 00000000000000000000000000000000000000000000000000000000abcdefgh 1633 * << 32 - 4 1634 * 0000000000000000000000000000abcdefgh0000000000000000000000000000 1635 * | 1636 * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh 1637 * << 16 - 2 1638 * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000 1639 * | 1640 * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh 1641 * << 8 - 1 1642 * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000 1643 * | 1644 * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh 1645 * & dup(1) 1646 * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h 1647 * * 0xff 1648 * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh 1649 */ 1650 for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) { 1651 tcg_gen_shli_i64(t0, hi, j - i); 1652 tcg_gen_shli_i64(t1, lo, j - i); 1653 tcg_gen_or_i64(hi, hi, t0); 1654 tcg_gen_or_i64(lo, lo, t1); 1655 } 1656 1657 c = dup_const(vece, 1); 1658 tcg_gen_andi_i64(hi, hi, c); 1659 tcg_gen_andi_i64(lo, lo, c); 1660 1661 c = MAKE_64BIT_MASK(0, elem_width); 1662 tcg_gen_muli_i64(hi, hi, c); 1663 tcg_gen_muli_i64(lo, lo, c); 1664 1665 set_avr64(a->vrt, lo, false); 1666 set_avr64(a->vrt, hi, true); 1667 1668 tcg_temp_free_i64(hi); 1669 tcg_temp_free_i64(lo); 1670 tcg_temp_free_i64(t0); 1671 tcg_temp_free_i64(t1); 1672 1673 return true; 1674} 1675 1676TRANS(MTVSRBM, do_mtvsrm, MO_8) 1677TRANS(MTVSRHM, do_mtvsrm, MO_16) 1678TRANS(MTVSRWM, do_mtvsrm, MO_32) 1679TRANS(MTVSRDM, do_mtvsrm, MO_64) 1680 1681static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a) 1682{ 1683 TCGv_i64 tmp; 1684 1685 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1686 REQUIRE_VECTOR(ctx); 1687 1688 tmp = tcg_temp_new_i64(); 1689 1690 tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]); 1691 tcg_gen_sextract_i64(tmp, tmp, 0, 1); 1692 set_avr64(a->vrt, tmp, false); 1693 set_avr64(a->vrt, tmp, true); 1694 1695 tcg_temp_free_i64(tmp); 1696 1697 return true; 1698} 1699 1700static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a) 1701{ 1702 const uint64_t mask = dup_const(MO_8, 1); 1703 uint64_t hi, lo; 1704 1705 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1706 REQUIRE_VECTOR(ctx); 1707 1708 hi = extract16(a->b, 8, 8); 1709 lo = extract16(a->b, 0, 8); 1710 1711 for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) { 1712 hi |= hi << (j - i); 1713 lo |= lo << (j - i); 1714 } 1715 1716 hi = (hi & mask) * 0xFF; 1717 lo = (lo & mask) * 0xFF; 1718 1719 set_avr64(a->vrt, tcg_constant_i64(hi), true); 1720 set_avr64(a->vrt, tcg_constant_i64(lo), false); 1721 1722 return true; 1723} 1724 1725#define GEN_VAFORM_PAIRED(name0, name1, opc2) \ 1726static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 1727 { \ 1728 TCGv_ptr ra, rb, rc, rd; \ 1729 if (unlikely(!ctx->altivec_enabled)) { \ 1730 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1731 return; \ 1732 } \ 1733 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1734 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1735 rc = gen_avr_ptr(rC(ctx->opcode)); \ 1736 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1737 if (Rc(ctx->opcode)) { \ 1738 gen_helper_##name1(cpu_env, rd, ra, rb, rc); \ 1739 } else { \ 1740 gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ 1741 } \ 1742 tcg_temp_free_ptr(ra); \ 1743 tcg_temp_free_ptr(rb); \ 1744 tcg_temp_free_ptr(rc); \ 1745 tcg_temp_free_ptr(rd); \ 1746 } 1747 1748GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) 1749 1750static void gen_vmladduhm(DisasContext *ctx) 1751{ 1752 TCGv_ptr ra, rb, rc, rd; 1753 if (unlikely(!ctx->altivec_enabled)) { 1754 gen_exception(ctx, POWERPC_EXCP_VPU); 1755 return; 1756 } 1757 ra = gen_avr_ptr(rA(ctx->opcode)); 1758 rb = gen_avr_ptr(rB(ctx->opcode)); 1759 rc = gen_avr_ptr(rC(ctx->opcode)); 1760 rd = gen_avr_ptr(rD(ctx->opcode)); 1761 gen_helper_vmladduhm(rd, ra, rb, rc); 1762 tcg_temp_free_ptr(ra); 1763 tcg_temp_free_ptr(rb); 1764 tcg_temp_free_ptr(rc); 1765 tcg_temp_free_ptr(rd); 1766} 1767 1768static void gen_vpermr(DisasContext *ctx) 1769{ 1770 TCGv_ptr ra, rb, rc, rd; 1771 if (unlikely(!ctx->altivec_enabled)) { 1772 gen_exception(ctx, POWERPC_EXCP_VPU); 1773 return; 1774 } 1775 ra = gen_avr_ptr(rA(ctx->opcode)); 1776 rb = gen_avr_ptr(rB(ctx->opcode)); 1777 rc = gen_avr_ptr(rC(ctx->opcode)); 1778 rd = gen_avr_ptr(rD(ctx->opcode)); 1779 gen_helper_vpermr(cpu_env, rd, ra, rb, rc); 1780 tcg_temp_free_ptr(ra); 1781 tcg_temp_free_ptr(rb); 1782 tcg_temp_free_ptr(rc); 1783 tcg_temp_free_ptr(rd); 1784} 1785 1786GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) 1787GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) 1788GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) 1789GEN_VAFORM_PAIRED(vsel, vperm, 21) 1790GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) 1791 1792GEN_VXFORM_NOA(vclzb, 1, 28) 1793GEN_VXFORM_NOA(vclzh, 1, 29) 1794GEN_VXFORM_TRANS(vclzw, 1, 30) 1795GEN_VXFORM_TRANS(vclzd, 1, 31) 1796GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) 1797GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) 1798GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) 1799GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) 1800GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) 1801GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) 1802GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) 1803GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) 1804GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) 1805GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) 1806GEN_VXFORM_NOA_2(vctzd, 1, 24, 31) 1807GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0) 1808GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1) 1809GEN_VXFORM_NOA(vpopcntb, 1, 28) 1810GEN_VXFORM_NOA(vpopcnth, 1, 29) 1811GEN_VXFORM_NOA(vpopcntw, 1, 30) 1812GEN_VXFORM_NOA(vpopcntd, 1, 31) 1813GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \ 1814 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207) 1815GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \ 1816 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207) 1817GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \ 1818 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207) 1819GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \ 1820 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) 1821GEN_VXFORM(vbpermd, 6, 23); 1822GEN_VXFORM(vbpermq, 6, 21); 1823GEN_VXFORM_TRANS(vgbbd, 6, 20); 1824GEN_VXFORM(vpmsumb, 4, 16) 1825GEN_VXFORM(vpmsumh, 4, 17) 1826GEN_VXFORM(vpmsumw, 4, 18) 1827GEN_VXFORM(vpmsumd, 4, 19) 1828 1829#define GEN_BCD(op) \ 1830static void gen_##op(DisasContext *ctx) \ 1831{ \ 1832 TCGv_ptr ra, rb, rd; \ 1833 TCGv_i32 ps; \ 1834 \ 1835 if (unlikely(!ctx->altivec_enabled)) { \ 1836 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1837 return; \ 1838 } \ 1839 \ 1840 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1841 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1842 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1843 \ 1844 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1845 \ 1846 gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ 1847 \ 1848 tcg_temp_free_ptr(ra); \ 1849 tcg_temp_free_ptr(rb); \ 1850 tcg_temp_free_ptr(rd); \ 1851 tcg_temp_free_i32(ps); \ 1852} 1853 1854#define GEN_BCD2(op) \ 1855static void gen_##op(DisasContext *ctx) \ 1856{ \ 1857 TCGv_ptr rd, rb; \ 1858 TCGv_i32 ps; \ 1859 \ 1860 if (unlikely(!ctx->altivec_enabled)) { \ 1861 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1862 return; \ 1863 } \ 1864 \ 1865 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1866 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1867 \ 1868 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1869 \ 1870 gen_helper_##op(cpu_crf[6], rd, rb, ps); \ 1871 \ 1872 tcg_temp_free_ptr(rb); \ 1873 tcg_temp_free_ptr(rd); \ 1874 tcg_temp_free_i32(ps); \ 1875} 1876 1877GEN_BCD(bcdadd) 1878GEN_BCD(bcdsub) 1879GEN_BCD2(bcdcfn) 1880GEN_BCD2(bcdctn) 1881GEN_BCD2(bcdcfz) 1882GEN_BCD2(bcdctz) 1883GEN_BCD2(bcdcfsq) 1884GEN_BCD2(bcdctsq) 1885GEN_BCD2(bcdsetsgn) 1886GEN_BCD(bcdcpsgn); 1887GEN_BCD(bcds); 1888GEN_BCD(bcdus); 1889GEN_BCD(bcdsr); 1890GEN_BCD(bcdtrunc); 1891GEN_BCD(bcdutrunc); 1892 1893static void gen_xpnd04_1(DisasContext *ctx) 1894{ 1895 switch (opc4(ctx->opcode)) { 1896 case 0: 1897 gen_bcdctsq(ctx); 1898 break; 1899 case 2: 1900 gen_bcdcfsq(ctx); 1901 break; 1902 case 4: 1903 gen_bcdctz(ctx); 1904 break; 1905 case 5: 1906 gen_bcdctn(ctx); 1907 break; 1908 case 6: 1909 gen_bcdcfz(ctx); 1910 break; 1911 case 7: 1912 gen_bcdcfn(ctx); 1913 break; 1914 case 31: 1915 gen_bcdsetsgn(ctx); 1916 break; 1917 default: 1918 gen_invalid(ctx); 1919 break; 1920 } 1921} 1922 1923static void gen_xpnd04_2(DisasContext *ctx) 1924{ 1925 switch (opc4(ctx->opcode)) { 1926 case 0: 1927 gen_bcdctsq(ctx); 1928 break; 1929 case 2: 1930 gen_bcdcfsq(ctx); 1931 break; 1932 case 4: 1933 gen_bcdctz(ctx); 1934 break; 1935 case 6: 1936 gen_bcdcfz(ctx); 1937 break; 1938 case 7: 1939 gen_bcdcfn(ctx); 1940 break; 1941 case 31: 1942 gen_bcdsetsgn(ctx); 1943 break; 1944 default: 1945 gen_invalid(ctx); 1946 break; 1947 } 1948} 1949 1950 1951GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ 1952 xpnd04_1, PPC_NONE, PPC2_ISA300) 1953GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ 1954 xpnd04_2, PPC_NONE, PPC2_ISA300) 1955 1956GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \ 1957 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1958GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \ 1959 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1960GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \ 1961 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1962GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \ 1963 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1964GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \ 1965 bcdcpsgn, PPC_NONE, PPC2_ISA300) 1966GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \ 1967 bcds, PPC_NONE, PPC2_ISA300) 1968GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ 1969 bcdus, PPC_NONE, PPC2_ISA300) 1970GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ 1971 bcdtrunc, PPC_NONE, PPC2_ISA300) 1972GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ 1973 bcdtrunc, PPC_NONE, PPC2_ISA300) 1974GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ 1975 bcdutrunc, PPC_NONE, PPC2_ISA300) 1976 1977 1978static void gen_vsbox(DisasContext *ctx) 1979{ 1980 TCGv_ptr ra, rd; 1981 if (unlikely(!ctx->altivec_enabled)) { 1982 gen_exception(ctx, POWERPC_EXCP_VPU); 1983 return; 1984 } 1985 ra = gen_avr_ptr(rA(ctx->opcode)); 1986 rd = gen_avr_ptr(rD(ctx->opcode)); 1987 gen_helper_vsbox(rd, ra); 1988 tcg_temp_free_ptr(ra); 1989 tcg_temp_free_ptr(rd); 1990} 1991 1992GEN_VXFORM(vcipher, 4, 20) 1993GEN_VXFORM(vcipherlast, 4, 20) 1994GEN_VXFORM(vncipher, 4, 21) 1995GEN_VXFORM(vncipherlast, 4, 21) 1996 1997GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207, 1998 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207) 1999GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207, 2000 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207) 2001 2002#define VSHASIGMA(op) \ 2003static void gen_##op(DisasContext *ctx) \ 2004{ \ 2005 TCGv_ptr ra, rd; \ 2006 TCGv_i32 st_six; \ 2007 if (unlikely(!ctx->altivec_enabled)) { \ 2008 gen_exception(ctx, POWERPC_EXCP_VPU); \ 2009 return; \ 2010 } \ 2011 ra = gen_avr_ptr(rA(ctx->opcode)); \ 2012 rd = gen_avr_ptr(rD(ctx->opcode)); \ 2013 st_six = tcg_const_i32(rB(ctx->opcode)); \ 2014 gen_helper_##op(rd, ra, st_six); \ 2015 tcg_temp_free_ptr(ra); \ 2016 tcg_temp_free_ptr(rd); \ 2017 tcg_temp_free_i32(st_six); \ 2018} 2019 2020VSHASIGMA(vshasigmaw) 2021VSHASIGMA(vshasigmad) 2022 2023GEN_VXFORM3(vpermxor, 22, 0xFF) 2024GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, 2025 vpermxor, PPC_NONE, PPC2_ALTIVEC_207) 2026 2027static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) 2028{ 2029 static const GVecGen3 g = { 2030 .fni8 = gen_helper_CFUGED, 2031 .vece = MO_64, 2032 }; 2033 2034 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2035 REQUIRE_VECTOR(ctx); 2036 2037 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), 2038 avr_full_offset(a->vrb), 16, 16, &g); 2039 2040 return true; 2041} 2042 2043static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a) 2044{ 2045 static const GVecGen3i g = { 2046 .fni8 = do_cntzdm, 2047 .vece = MO_64, 2048 }; 2049 2050 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2051 REQUIRE_VECTOR(ctx); 2052 2053 tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), 2054 avr_full_offset(a->vrb), 16, 16, false, &g); 2055 2056 return true; 2057} 2058 2059static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a) 2060{ 2061 static const GVecGen3i g = { 2062 .fni8 = do_cntzdm, 2063 .vece = MO_64, 2064 }; 2065 2066 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2067 REQUIRE_VECTOR(ctx); 2068 2069 tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), 2070 avr_full_offset(a->vrb), 16, 16, true, &g); 2071 2072 return true; 2073} 2074 2075static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a) 2076{ 2077 static const GVecGen3 g = { 2078 .fni8 = gen_helper_PDEPD, 2079 .vece = MO_64, 2080 }; 2081 2082 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2083 REQUIRE_VECTOR(ctx); 2084 2085 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), 2086 avr_full_offset(a->vrb), 16, 16, &g); 2087 2088 return true; 2089} 2090 2091static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a) 2092{ 2093 static const GVecGen3 g = { 2094 .fni8 = gen_helper_PEXTD, 2095 .vece = MO_64, 2096 }; 2097 2098 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2099 REQUIRE_VECTOR(ctx); 2100 2101 tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), 2102 avr_full_offset(a->vrb), 16, 16, &g); 2103 2104 return true; 2105} 2106 2107#undef GEN_VR_LDX 2108#undef GEN_VR_STX 2109#undef GEN_VR_LVE 2110#undef GEN_VR_STVE 2111 2112#undef GEN_VX_LOGICAL 2113#undef GEN_VX_LOGICAL_207 2114#undef GEN_VXFORM 2115#undef GEN_VXFORM_207 2116#undef GEN_VXFORM_DUAL 2117#undef GEN_VXRFORM_DUAL 2118#undef GEN_VXRFORM1 2119#undef GEN_VXRFORM 2120#undef GEN_VXFORM_VSPLTI 2121#undef GEN_VXFORM_NOA 2122#undef GEN_VXFORM_UIMM 2123#undef GEN_VAFORM_PAIRED 2124 2125#undef GEN_BCD2 2126