1/* 2 * translate/vmx-impl.c 3 * 4 * Altivec/VMX translation 5 */ 6 7/*** Altivec vector extension ***/ 8/* Altivec registers moves */ 9 10static inline TCGv_ptr gen_avr_ptr(int reg) 11{ 12 TCGv_ptr r = tcg_temp_new_ptr(); 13 tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg)); 14 return r; 15} 16 17#define GEN_VR_LDX(name, opc2, opc3) \ 18static void glue(gen_, name)(DisasContext *ctx) \ 19{ \ 20 TCGv EA; \ 21 TCGv_i64 avr; \ 22 if (unlikely(!ctx->altivec_enabled)) { \ 23 gen_exception(ctx, POWERPC_EXCP_VPU); \ 24 return; \ 25 } \ 26 gen_set_access_type(ctx, ACCESS_INT); \ 27 avr = tcg_temp_new_i64(); \ 28 EA = tcg_temp_new(); \ 29 gen_addr_reg_index(ctx, EA); \ 30 tcg_gen_andi_tl(EA, EA, ~0xf); \ 31 /* \ 32 * We only need to swap high and low halves. gen_qemu_ld64_i64 \ 33 * does necessary 64-bit byteswap already. \ 34 */ \ 35 if (ctx->le_mode) { \ 36 gen_qemu_ld64_i64(ctx, avr, EA); \ 37 set_avr64(rD(ctx->opcode), avr, false); \ 38 tcg_gen_addi_tl(EA, EA, 8); \ 39 gen_qemu_ld64_i64(ctx, avr, EA); \ 40 set_avr64(rD(ctx->opcode), avr, true); \ 41 } else { \ 42 gen_qemu_ld64_i64(ctx, avr, EA); \ 43 set_avr64(rD(ctx->opcode), avr, true); \ 44 tcg_gen_addi_tl(EA, EA, 8); \ 45 gen_qemu_ld64_i64(ctx, avr, EA); \ 46 set_avr64(rD(ctx->opcode), avr, false); \ 47 } \ 48 tcg_temp_free(EA); \ 49 tcg_temp_free_i64(avr); \ 50} 51 52#define GEN_VR_STX(name, opc2, opc3) \ 53static void gen_st##name(DisasContext *ctx) \ 54{ \ 55 TCGv EA; \ 56 TCGv_i64 avr; \ 57 if (unlikely(!ctx->altivec_enabled)) { \ 58 gen_exception(ctx, POWERPC_EXCP_VPU); \ 59 return; \ 60 } \ 61 gen_set_access_type(ctx, ACCESS_INT); \ 62 avr = tcg_temp_new_i64(); \ 63 EA = tcg_temp_new(); \ 64 gen_addr_reg_index(ctx, EA); \ 65 tcg_gen_andi_tl(EA, EA, ~0xf); \ 66 /* \ 67 * We only need to swap high and low halves. gen_qemu_st64_i64 \ 68 * does necessary 64-bit byteswap already. \ 69 */ \ 70 if (ctx->le_mode) { \ 71 get_avr64(avr, rD(ctx->opcode), false); \ 72 gen_qemu_st64_i64(ctx, avr, EA); \ 73 tcg_gen_addi_tl(EA, EA, 8); \ 74 get_avr64(avr, rD(ctx->opcode), true); \ 75 gen_qemu_st64_i64(ctx, avr, EA); \ 76 } else { \ 77 get_avr64(avr, rD(ctx->opcode), true); \ 78 gen_qemu_st64_i64(ctx, avr, EA); \ 79 tcg_gen_addi_tl(EA, EA, 8); \ 80 get_avr64(avr, rD(ctx->opcode), false); \ 81 gen_qemu_st64_i64(ctx, avr, EA); \ 82 } \ 83 tcg_temp_free(EA); \ 84 tcg_temp_free_i64(avr); \ 85} 86 87#define GEN_VR_LVE(name, opc2, opc3, size) \ 88static void gen_lve##name(DisasContext *ctx) \ 89 { \ 90 TCGv EA; \ 91 TCGv_ptr rs; \ 92 if (unlikely(!ctx->altivec_enabled)) { \ 93 gen_exception(ctx, POWERPC_EXCP_VPU); \ 94 return; \ 95 } \ 96 gen_set_access_type(ctx, ACCESS_INT); \ 97 EA = tcg_temp_new(); \ 98 gen_addr_reg_index(ctx, EA); \ 99 if (size > 1) { \ 100 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 101 } \ 102 rs = gen_avr_ptr(rS(ctx->opcode)); \ 103 gen_helper_lve##name(cpu_env, rs, EA); \ 104 tcg_temp_free(EA); \ 105 tcg_temp_free_ptr(rs); \ 106 } 107 108#define GEN_VR_STVE(name, opc2, opc3, size) \ 109static void gen_stve##name(DisasContext *ctx) \ 110 { \ 111 TCGv EA; \ 112 TCGv_ptr rs; \ 113 if (unlikely(!ctx->altivec_enabled)) { \ 114 gen_exception(ctx, POWERPC_EXCP_VPU); \ 115 return; \ 116 } \ 117 gen_set_access_type(ctx, ACCESS_INT); \ 118 EA = tcg_temp_new(); \ 119 gen_addr_reg_index(ctx, EA); \ 120 if (size > 1) { \ 121 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ 122 } \ 123 rs = gen_avr_ptr(rS(ctx->opcode)); \ 124 gen_helper_stve##name(cpu_env, rs, EA); \ 125 tcg_temp_free(EA); \ 126 tcg_temp_free_ptr(rs); \ 127 } 128 129GEN_VR_LDX(lvx, 0x07, 0x03); 130/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ 131GEN_VR_LDX(lvxl, 0x07, 0x0B); 132 133GEN_VR_LVE(bx, 0x07, 0x00, 1); 134GEN_VR_LVE(hx, 0x07, 0x01, 2); 135GEN_VR_LVE(wx, 0x07, 0x02, 4); 136 137GEN_VR_STX(svx, 0x07, 0x07); 138/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ 139GEN_VR_STX(svxl, 0x07, 0x0F); 140 141GEN_VR_STVE(bx, 0x07, 0x04, 1); 142GEN_VR_STVE(hx, 0x07, 0x05, 2); 143GEN_VR_STVE(wx, 0x07, 0x06, 4); 144 145static void gen_mfvscr(DisasContext *ctx) 146{ 147 TCGv_i32 t; 148 TCGv_i64 avr; 149 if (unlikely(!ctx->altivec_enabled)) { 150 gen_exception(ctx, POWERPC_EXCP_VPU); 151 return; 152 } 153 avr = tcg_temp_new_i64(); 154 tcg_gen_movi_i64(avr, 0); 155 set_avr64(rD(ctx->opcode), avr, true); 156 t = tcg_temp_new_i32(); 157 gen_helper_mfvscr(t, cpu_env); 158 tcg_gen_extu_i32_i64(avr, t); 159 set_avr64(rD(ctx->opcode), avr, false); 160 tcg_temp_free_i32(t); 161 tcg_temp_free_i64(avr); 162} 163 164static void gen_mtvscr(DisasContext *ctx) 165{ 166 TCGv_i32 val; 167 int bofs; 168 169 if (unlikely(!ctx->altivec_enabled)) { 170 gen_exception(ctx, POWERPC_EXCP_VPU); 171 return; 172 } 173 174 val = tcg_temp_new_i32(); 175 bofs = avr_full_offset(rB(ctx->opcode)); 176#ifdef HOST_WORDS_BIGENDIAN 177 bofs += 3 * 4; 178#endif 179 180 tcg_gen_ld_i32(val, cpu_env, bofs); 181 gen_helper_mtvscr(cpu_env, val); 182 tcg_temp_free_i32(val); 183} 184 185#define GEN_VX_VMUL10(name, add_cin, ret_carry) \ 186static void glue(gen_, name)(DisasContext *ctx) \ 187{ \ 188 TCGv_i64 t0; \ 189 TCGv_i64 t1; \ 190 TCGv_i64 t2; \ 191 TCGv_i64 avr; \ 192 TCGv_i64 ten, z; \ 193 \ 194 if (unlikely(!ctx->altivec_enabled)) { \ 195 gen_exception(ctx, POWERPC_EXCP_VPU); \ 196 return; \ 197 } \ 198 \ 199 t0 = tcg_temp_new_i64(); \ 200 t1 = tcg_temp_new_i64(); \ 201 t2 = tcg_temp_new_i64(); \ 202 avr = tcg_temp_new_i64(); \ 203 ten = tcg_const_i64(10); \ 204 z = tcg_const_i64(0); \ 205 \ 206 if (add_cin) { \ 207 get_avr64(avr, rA(ctx->opcode), false); \ 208 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 209 get_avr64(avr, rB(ctx->opcode), false); \ 210 tcg_gen_andi_i64(t2, avr, 0xF); \ 211 tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ 212 set_avr64(rD(ctx->opcode), avr, false); \ 213 } else { \ 214 get_avr64(avr, rA(ctx->opcode), false); \ 215 tcg_gen_mulu2_i64(avr, t2, avr, ten); \ 216 set_avr64(rD(ctx->opcode), avr, false); \ 217 } \ 218 \ 219 if (ret_carry) { \ 220 get_avr64(avr, rA(ctx->opcode), true); \ 221 tcg_gen_mulu2_i64(t0, t1, avr, ten); \ 222 tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ 223 set_avr64(rD(ctx->opcode), avr, false); \ 224 set_avr64(rD(ctx->opcode), z, true); \ 225 } else { \ 226 get_avr64(avr, rA(ctx->opcode), true); \ 227 tcg_gen_mul_i64(t0, avr, ten); \ 228 tcg_gen_add_i64(avr, t0, t2); \ 229 set_avr64(rD(ctx->opcode), avr, true); \ 230 } \ 231 \ 232 tcg_temp_free_i64(t0); \ 233 tcg_temp_free_i64(t1); \ 234 tcg_temp_free_i64(t2); \ 235 tcg_temp_free_i64(avr); \ 236 tcg_temp_free_i64(ten); \ 237 tcg_temp_free_i64(z); \ 238} \ 239 240GEN_VX_VMUL10(vmul10uq, 0, 0); 241GEN_VX_VMUL10(vmul10euq, 1, 0); 242GEN_VX_VMUL10(vmul10cuq, 0, 1); 243GEN_VX_VMUL10(vmul10ecuq, 1, 1); 244 245#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \ 246static void glue(gen_, name)(DisasContext *ctx) \ 247{ \ 248 if (unlikely(!ctx->altivec_enabled)) { \ 249 gen_exception(ctx, POWERPC_EXCP_VPU); \ 250 return; \ 251 } \ 252 \ 253 tcg_op(vece, \ 254 avr_full_offset(rD(ctx->opcode)), \ 255 avr_full_offset(rA(ctx->opcode)), \ 256 avr_full_offset(rB(ctx->opcode)), \ 257 16, 16); \ 258} 259 260/* Logical operations */ 261GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); 262GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); 263GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); 264GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); 265GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); 266GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); 267GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); 268GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); 269 270#define GEN_VXFORM(name, opc2, opc3) \ 271static void glue(gen_, name)(DisasContext *ctx) \ 272{ \ 273 TCGv_ptr ra, rb, rd; \ 274 if (unlikely(!ctx->altivec_enabled)) { \ 275 gen_exception(ctx, POWERPC_EXCP_VPU); \ 276 return; \ 277 } \ 278 ra = gen_avr_ptr(rA(ctx->opcode)); \ 279 rb = gen_avr_ptr(rB(ctx->opcode)); \ 280 rd = gen_avr_ptr(rD(ctx->opcode)); \ 281 gen_helper_##name(rd, ra, rb); \ 282 tcg_temp_free_ptr(ra); \ 283 tcg_temp_free_ptr(rb); \ 284 tcg_temp_free_ptr(rd); \ 285} 286 287#define GEN_VXFORM_TRANS(name, opc2, opc3) \ 288static void glue(gen_, name)(DisasContext *ctx) \ 289{ \ 290 if (unlikely(!ctx->altivec_enabled)) { \ 291 gen_exception(ctx, POWERPC_EXCP_VPU); \ 292 return; \ 293 } \ 294 trans_##name(ctx); \ 295} 296 297#define GEN_VXFORM_ENV(name, opc2, opc3) \ 298static void glue(gen_, name)(DisasContext *ctx) \ 299{ \ 300 TCGv_ptr ra, rb, rd; \ 301 if (unlikely(!ctx->altivec_enabled)) { \ 302 gen_exception(ctx, POWERPC_EXCP_VPU); \ 303 return; \ 304 } \ 305 ra = gen_avr_ptr(rA(ctx->opcode)); \ 306 rb = gen_avr_ptr(rB(ctx->opcode)); \ 307 rd = gen_avr_ptr(rD(ctx->opcode)); \ 308 gen_helper_##name(cpu_env, rd, ra, rb); \ 309 tcg_temp_free_ptr(ra); \ 310 tcg_temp_free_ptr(rb); \ 311 tcg_temp_free_ptr(rd); \ 312} 313 314#define GEN_VXFORM3(name, opc2, opc3) \ 315static void glue(gen_, name)(DisasContext *ctx) \ 316{ \ 317 TCGv_ptr ra, rb, rc, rd; \ 318 if (unlikely(!ctx->altivec_enabled)) { \ 319 gen_exception(ctx, POWERPC_EXCP_VPU); \ 320 return; \ 321 } \ 322 ra = gen_avr_ptr(rA(ctx->opcode)); \ 323 rb = gen_avr_ptr(rB(ctx->opcode)); \ 324 rc = gen_avr_ptr(rC(ctx->opcode)); \ 325 rd = gen_avr_ptr(rD(ctx->opcode)); \ 326 gen_helper_##name(rd, ra, rb, rc); \ 327 tcg_temp_free_ptr(ra); \ 328 tcg_temp_free_ptr(rb); \ 329 tcg_temp_free_ptr(rc); \ 330 tcg_temp_free_ptr(rd); \ 331} 332 333/* 334 * Support for Altivec instruction pairs that use bit 31 (Rc) as 335 * an opcode bit. In general, these pairs come from different 336 * versions of the ISA, so we must also support a pair of flags for 337 * each instruction. 338 */ 339#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 340static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 341{ \ 342 if ((Rc(ctx->opcode) == 0) && \ 343 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 344 gen_##name0(ctx); \ 345 } else if ((Rc(ctx->opcode) == 1) && \ 346 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 347 gen_##name1(ctx); \ 348 } else { \ 349 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 350 } \ 351} 352 353/* 354 * We use this macro if one instruction is realized with direct 355 * translation, and second one with helper. 356 */ 357#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\ 358static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 359{ \ 360 if ((Rc(ctx->opcode) == 0) && \ 361 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 362 if (unlikely(!ctx->altivec_enabled)) { \ 363 gen_exception(ctx, POWERPC_EXCP_VPU); \ 364 return; \ 365 } \ 366 trans_##name0(ctx); \ 367 } else if ((Rc(ctx->opcode) == 1) && \ 368 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 369 gen_##name1(ctx); \ 370 } else { \ 371 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 372 } \ 373} 374 375/* Adds support to provide invalid mask */ 376#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \ 377 name1, flg1, flg2_1, inval1) \ 378static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 379{ \ 380 if ((Rc(ctx->opcode) == 0) && \ 381 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \ 382 !(ctx->opcode & inval0)) { \ 383 gen_##name0(ctx); \ 384 } else if ((Rc(ctx->opcode) == 1) && \ 385 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \ 386 !(ctx->opcode & inval1)) { \ 387 gen_##name1(ctx); \ 388 } else { \ 389 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 390 } \ 391} 392 393#define GEN_VXFORM_HETRO(name, opc2, opc3) \ 394static void glue(gen_, name)(DisasContext *ctx) \ 395{ \ 396 TCGv_ptr rb; \ 397 if (unlikely(!ctx->altivec_enabled)) { \ 398 gen_exception(ctx, POWERPC_EXCP_VPU); \ 399 return; \ 400 } \ 401 rb = gen_avr_ptr(rB(ctx->opcode)); \ 402 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ 403 tcg_temp_free_ptr(rb); \ 404} 405 406GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); 407GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \ 408 vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800) 409GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1); 410GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \ 411 vmul10ecuq, PPC_NONE, PPC2_ISA300) 412GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2); 413GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3); 414GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); 415GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); 416GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); 417GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); 418GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); 419GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); 420GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); 421GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); 422GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); 423GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); 424GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); 425GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); 426GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); 427GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); 428GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); 429GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); 430GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); 431GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); 432GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); 433GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); 434GEN_VXFORM(vavgub, 1, 16); 435GEN_VXFORM(vabsdub, 1, 16); 436GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ 437 vabsdub, PPC_NONE, PPC2_ISA300) 438GEN_VXFORM(vavguh, 1, 17); 439GEN_VXFORM(vabsduh, 1, 17); 440GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ 441 vabsduh, PPC_NONE, PPC2_ISA300) 442GEN_VXFORM(vavguw, 1, 18); 443GEN_VXFORM(vabsduw, 1, 18); 444GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ 445 vabsduw, PPC_NONE, PPC2_ISA300) 446GEN_VXFORM(vavgsb, 1, 20); 447GEN_VXFORM(vavgsh, 1, 21); 448GEN_VXFORM(vavgsw, 1, 22); 449GEN_VXFORM(vmrghb, 6, 0); 450GEN_VXFORM(vmrghh, 6, 1); 451GEN_VXFORM(vmrghw, 6, 2); 452GEN_VXFORM(vmrglb, 6, 4); 453GEN_VXFORM(vmrglh, 6, 5); 454GEN_VXFORM(vmrglw, 6, 6); 455 456static void trans_vmrgew(DisasContext *ctx) 457{ 458 int VT = rD(ctx->opcode); 459 int VA = rA(ctx->opcode); 460 int VB = rB(ctx->opcode); 461 TCGv_i64 tmp = tcg_temp_new_i64(); 462 TCGv_i64 avr = tcg_temp_new_i64(); 463 464 get_avr64(avr, VB, true); 465 tcg_gen_shri_i64(tmp, avr, 32); 466 get_avr64(avr, VA, true); 467 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 468 set_avr64(VT, avr, true); 469 470 get_avr64(avr, VB, false); 471 tcg_gen_shri_i64(tmp, avr, 32); 472 get_avr64(avr, VA, false); 473 tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); 474 set_avr64(VT, avr, false); 475 476 tcg_temp_free_i64(tmp); 477 tcg_temp_free_i64(avr); 478} 479 480static void trans_vmrgow(DisasContext *ctx) 481{ 482 int VT = rD(ctx->opcode); 483 int VA = rA(ctx->opcode); 484 int VB = rB(ctx->opcode); 485 TCGv_i64 t0 = tcg_temp_new_i64(); 486 TCGv_i64 t1 = tcg_temp_new_i64(); 487 TCGv_i64 avr = tcg_temp_new_i64(); 488 489 get_avr64(t0, VB, true); 490 get_avr64(t1, VA, true); 491 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 492 set_avr64(VT, avr, true); 493 494 get_avr64(t0, VB, false); 495 get_avr64(t1, VA, false); 496 tcg_gen_deposit_i64(avr, t0, t1, 32, 32); 497 set_avr64(VT, avr, false); 498 499 tcg_temp_free_i64(t0); 500 tcg_temp_free_i64(t1); 501 tcg_temp_free_i64(avr); 502} 503 504/* 505 * lvsl VRT,RA,RB - Load Vector for Shift Left 506 * 507 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 508 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 509 * Bytes sh:sh+15 of X are placed into vD. 510 */ 511static void trans_lvsl(DisasContext *ctx) 512{ 513 int VT = rD(ctx->opcode); 514 TCGv_i64 result = tcg_temp_new_i64(); 515 TCGv_i64 sh = tcg_temp_new_i64(); 516 TCGv EA = tcg_temp_new(); 517 518 /* Get sh(from description) by anding EA with 0xf. */ 519 gen_addr_reg_index(ctx, EA); 520 tcg_gen_extu_tl_i64(sh, EA); 521 tcg_gen_andi_i64(sh, sh, 0xfULL); 522 523 /* 524 * Create bytes sh:sh+7 of X(from description) and place them in 525 * higher doubleword of vD. 526 */ 527 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 528 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull); 529 set_avr64(VT, result, true); 530 /* 531 * Create bytes sh+8:sh+15 of X(from description) and place them in 532 * lower doubleword of vD. 533 */ 534 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); 535 set_avr64(VT, result, false); 536 537 tcg_temp_free_i64(result); 538 tcg_temp_free_i64(sh); 539 tcg_temp_free(EA); 540} 541 542/* 543 * lvsr VRT,RA,RB - Load Vector for Shift Right 544 * 545 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31]. 546 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. 547 * Bytes (16-sh):(31-sh) of X are placed into vD. 548 */ 549static void trans_lvsr(DisasContext *ctx) 550{ 551 int VT = rD(ctx->opcode); 552 TCGv_i64 result = tcg_temp_new_i64(); 553 TCGv_i64 sh = tcg_temp_new_i64(); 554 TCGv EA = tcg_temp_new(); 555 556 557 /* Get sh(from description) by anding EA with 0xf. */ 558 gen_addr_reg_index(ctx, EA); 559 tcg_gen_extu_tl_i64(sh, EA); 560 tcg_gen_andi_i64(sh, sh, 0xfULL); 561 562 /* 563 * Create bytes (16-sh):(23-sh) of X(from description) and place them in 564 * higher doubleword of vD. 565 */ 566 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); 567 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh); 568 set_avr64(VT, result, true); 569 /* 570 * Create bytes (24-sh):(32-sh) of X(from description) and place them in 571 * lower doubleword of vD. 572 */ 573 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); 574 set_avr64(VT, result, false); 575 576 tcg_temp_free_i64(result); 577 tcg_temp_free_i64(sh); 578 tcg_temp_free(EA); 579} 580 581/* 582 * vsl VRT,VRA,VRB - Vector Shift Left 583 * 584 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB. 585 * Lowest 3 bits in each byte element of register vB must be identical or 586 * result is undefined. 587 */ 588static void trans_vsl(DisasContext *ctx) 589{ 590 int VT = rD(ctx->opcode); 591 int VA = rA(ctx->opcode); 592 int VB = rB(ctx->opcode); 593 TCGv_i64 avr = tcg_temp_new_i64(); 594 TCGv_i64 sh = tcg_temp_new_i64(); 595 TCGv_i64 carry = tcg_temp_new_i64(); 596 TCGv_i64 tmp = tcg_temp_new_i64(); 597 598 /* Place bits 125-127 of vB in 'sh'. */ 599 get_avr64(avr, VB, false); 600 tcg_gen_andi_i64(sh, avr, 0x07ULL); 601 602 /* 603 * Save highest 'sh' bits of lower doubleword element of vA in variable 604 * 'carry' and perform shift on lower doubleword. 605 */ 606 get_avr64(avr, VA, false); 607 tcg_gen_subfi_i64(tmp, 32, sh); 608 tcg_gen_shri_i64(carry, avr, 32); 609 tcg_gen_shr_i64(carry, carry, tmp); 610 tcg_gen_shl_i64(avr, avr, sh); 611 set_avr64(VT, avr, false); 612 613 /* 614 * Perform shift on higher doubleword element of vA and replace lowest 615 * 'sh' bits with 'carry'. 616 */ 617 get_avr64(avr, VA, true); 618 tcg_gen_shl_i64(avr, avr, sh); 619 tcg_gen_or_i64(avr, avr, carry); 620 set_avr64(VT, avr, true); 621 622 tcg_temp_free_i64(avr); 623 tcg_temp_free_i64(sh); 624 tcg_temp_free_i64(carry); 625 tcg_temp_free_i64(tmp); 626} 627 628/* 629 * vsr VRT,VRA,VRB - Vector Shift Right 630 * 631 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB. 632 * Lowest 3 bits in each byte element of register vB must be identical or 633 * result is undefined. 634 */ 635static void trans_vsr(DisasContext *ctx) 636{ 637 int VT = rD(ctx->opcode); 638 int VA = rA(ctx->opcode); 639 int VB = rB(ctx->opcode); 640 TCGv_i64 avr = tcg_temp_new_i64(); 641 TCGv_i64 sh = tcg_temp_new_i64(); 642 TCGv_i64 carry = tcg_temp_new_i64(); 643 TCGv_i64 tmp = tcg_temp_new_i64(); 644 645 /* Place bits 125-127 of vB in 'sh'. */ 646 get_avr64(avr, VB, false); 647 tcg_gen_andi_i64(sh, avr, 0x07ULL); 648 649 /* 650 * Save lowest 'sh' bits of higher doubleword element of vA in variable 651 * 'carry' and perform shift on higher doubleword. 652 */ 653 get_avr64(avr, VA, true); 654 tcg_gen_subfi_i64(tmp, 32, sh); 655 tcg_gen_shli_i64(carry, avr, 32); 656 tcg_gen_shl_i64(carry, carry, tmp); 657 tcg_gen_shr_i64(avr, avr, sh); 658 set_avr64(VT, avr, true); 659 /* 660 * Perform shift on lower doubleword element of vA and replace highest 661 * 'sh' bits with 'carry'. 662 */ 663 get_avr64(avr, VA, false); 664 tcg_gen_shr_i64(avr, avr, sh); 665 tcg_gen_or_i64(avr, avr, carry); 666 set_avr64(VT, avr, false); 667 668 tcg_temp_free_i64(avr); 669 tcg_temp_free_i64(sh); 670 tcg_temp_free_i64(carry); 671 tcg_temp_free_i64(tmp); 672} 673 674/* 675 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword 676 * 677 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source 678 * register are concatenated and placed into ith byte of appropriate doubleword 679 * element in destination register. 680 * 681 * Following solution is done for both doubleword elements of source register 682 * in parallel, in order to reduce the number of instructions needed(that's why 683 * arrays are used): 684 * First, both doubleword elements of source register vB are placed in 685 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for 686 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of 687 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with 688 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables 689 * have to be shifted right for 7 and 8 places, respectively, in order to get 690 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so 691 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask... 692 * After first 8 iteration(first loop), all the first bits are in their final 693 * places, all second bits but second bit from eight byte are in their places... 694 * only 1 eight bit from eight byte is in it's place). In second loop we do all 695 * operations symmetrically, in order to get other half of bits in their final 696 * spots. Results for first and second doubleword elements are saved in 697 * result[0] and result[1] respectively. In the end those results are saved in 698 * appropriate doubleword element of destination register vD. 699 */ 700static void trans_vgbbd(DisasContext *ctx) 701{ 702 int VT = rD(ctx->opcode); 703 int VB = rB(ctx->opcode); 704 TCGv_i64 tmp = tcg_temp_new_i64(); 705 uint64_t mask = 0x8040201008040201ULL; 706 int i, j; 707 708 TCGv_i64 result[2]; 709 result[0] = tcg_temp_new_i64(); 710 result[1] = tcg_temp_new_i64(); 711 TCGv_i64 avr[2]; 712 avr[0] = tcg_temp_new_i64(); 713 avr[1] = tcg_temp_new_i64(); 714 TCGv_i64 tcg_mask = tcg_temp_new_i64(); 715 716 tcg_gen_movi_i64(tcg_mask, mask); 717 for (j = 0; j < 2; j++) { 718 get_avr64(avr[j], VB, j); 719 tcg_gen_and_i64(result[j], avr[j], tcg_mask); 720 } 721 for (i = 1; i < 8; i++) { 722 tcg_gen_movi_i64(tcg_mask, mask >> (i * 8)); 723 for (j = 0; j < 2; j++) { 724 tcg_gen_shri_i64(tmp, avr[j], i * 7); 725 tcg_gen_and_i64(tmp, tmp, tcg_mask); 726 tcg_gen_or_i64(result[j], result[j], tmp); 727 } 728 } 729 for (i = 1; i < 8; i++) { 730 tcg_gen_movi_i64(tcg_mask, mask << (i * 8)); 731 for (j = 0; j < 2; j++) { 732 tcg_gen_shli_i64(tmp, avr[j], i * 7); 733 tcg_gen_and_i64(tmp, tmp, tcg_mask); 734 tcg_gen_or_i64(result[j], result[j], tmp); 735 } 736 } 737 for (j = 0; j < 2; j++) { 738 set_avr64(VT, result[j], j); 739 } 740 741 tcg_temp_free_i64(tmp); 742 tcg_temp_free_i64(tcg_mask); 743 tcg_temp_free_i64(result[0]); 744 tcg_temp_free_i64(result[1]); 745 tcg_temp_free_i64(avr[0]); 746 tcg_temp_free_i64(avr[1]); 747} 748 749/* 750 * vclzw VRT,VRB - Vector Count Leading Zeros Word 751 * 752 * Counting the number of leading zero bits of each word element in source 753 * register and placing result in appropriate word element of destination 754 * register. 755 */ 756static void trans_vclzw(DisasContext *ctx) 757{ 758 int VT = rD(ctx->opcode); 759 int VB = rB(ctx->opcode); 760 TCGv_i32 tmp = tcg_temp_new_i32(); 761 int i; 762 763 /* Perform count for every word element using tcg_gen_clzi_i32. */ 764 for (i = 0; i < 4; i++) { 765 tcg_gen_ld_i32(tmp, cpu_env, 766 offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4); 767 tcg_gen_clzi_i32(tmp, tmp, 32); 768 tcg_gen_st_i32(tmp, cpu_env, 769 offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); 770 } 771 772 tcg_temp_free_i32(tmp); 773} 774 775/* 776 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword 777 * 778 * Counting the number of leading zero bits of each doubleword element in source 779 * register and placing result in appropriate doubleword element of destination 780 * register. 781 */ 782static void trans_vclzd(DisasContext *ctx) 783{ 784 int VT = rD(ctx->opcode); 785 int VB = rB(ctx->opcode); 786 TCGv_i64 avr = tcg_temp_new_i64(); 787 788 /* high doubleword */ 789 get_avr64(avr, VB, true); 790 tcg_gen_clzi_i64(avr, avr, 64); 791 set_avr64(VT, avr, true); 792 793 /* low doubleword */ 794 get_avr64(avr, VB, false); 795 tcg_gen_clzi_i64(avr, avr, 64); 796 set_avr64(VT, avr, false); 797 798 tcg_temp_free_i64(avr); 799} 800 801GEN_VXFORM(vmuloub, 4, 0); 802GEN_VXFORM(vmulouh, 4, 1); 803GEN_VXFORM(vmulouw, 4, 2); 804GEN_VXFORM(vmuluwm, 4, 2); 805GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, 806 vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) 807GEN_VXFORM(vmulosb, 4, 4); 808GEN_VXFORM(vmulosh, 4, 5); 809GEN_VXFORM(vmulosw, 4, 6); 810GEN_VXFORM(vmuleub, 4, 8); 811GEN_VXFORM(vmuleuh, 4, 9); 812GEN_VXFORM(vmuleuw, 4, 10); 813GEN_VXFORM(vmulesb, 4, 12); 814GEN_VXFORM(vmulesh, 4, 13); 815GEN_VXFORM(vmulesw, 4, 14); 816GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); 817GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); 818GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); 819GEN_VXFORM(vrlwnm, 2, 6); 820GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ 821 vrlwnm, PPC_NONE, PPC2_ISA300) 822GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); 823GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); 824GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); 825GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); 826GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); 827GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); 828GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); 829GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); 830GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); 831GEN_VXFORM(vsrv, 2, 28); 832GEN_VXFORM(vslv, 2, 29); 833GEN_VXFORM(vslo, 6, 16); 834GEN_VXFORM(vsro, 6, 17); 835GEN_VXFORM(vaddcuw, 0, 6); 836GEN_VXFORM(vsubcuw, 0, 22); 837 838#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ 839static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ 840 TCGv_vec sat, TCGv_vec a, \ 841 TCGv_vec b) \ 842{ \ 843 TCGv_vec x = tcg_temp_new_vec_matching(t); \ 844 glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \ 845 glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \ 846 tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \ 847 tcg_gen_or_vec(VECE, sat, sat, x); \ 848 tcg_temp_free_vec(x); \ 849} \ 850static void glue(gen_, NAME)(DisasContext *ctx) \ 851{ \ 852 static const TCGOpcode vecop_list[] = { \ 853 glue(glue(INDEX_op_, NORM), _vec), \ 854 glue(glue(INDEX_op_, SAT), _vec), \ 855 INDEX_op_cmp_vec, 0 \ 856 }; \ 857 static const GVecGen4 g = { \ 858 .fniv = glue(glue(gen_, NAME), _vec), \ 859 .fno = glue(gen_helper_, NAME), \ 860 .opt_opc = vecop_list, \ 861 .write_aofs = true, \ 862 .vece = VECE, \ 863 }; \ 864 if (unlikely(!ctx->altivec_enabled)) { \ 865 gen_exception(ctx, POWERPC_EXCP_VPU); \ 866 return; \ 867 } \ 868 tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \ 869 offsetof(CPUPPCState, vscr_sat), \ 870 avr_full_offset(rA(ctx->opcode)), \ 871 avr_full_offset(rB(ctx->opcode)), \ 872 16, 16, &g); \ 873} 874 875GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8); 876GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \ 877 vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800) 878GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9); 879GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \ 880 vmul10euq, PPC_NONE, PPC2_ISA300) 881GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10); 882GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12); 883GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13); 884GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14); 885GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24); 886GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25); 887GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); 888GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); 889GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); 890GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); 891GEN_VXFORM(vadduqm, 0, 4); 892GEN_VXFORM(vaddcuq, 0, 5); 893GEN_VXFORM3(vaddeuqm, 30, 0); 894GEN_VXFORM3(vaddecuq, 30, 0); 895GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 896 vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) 897GEN_VXFORM(vsubuqm, 0, 20); 898GEN_VXFORM(vsubcuq, 0, 21); 899GEN_VXFORM3(vsubeuqm, 31, 0); 900GEN_VXFORM3(vsubecuq, 31, 0); 901GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ 902 vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) 903GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0); 904GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1); 905GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2); 906GEN_VXFORM(vrlwmi, 2, 2); 907GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ 908 vrlwmi, PPC_NONE, PPC2_ISA300) 909GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3); 910GEN_VXFORM(vrldmi, 2, 3); 911GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ 912 vrldmi, PPC_NONE, PPC2_ISA300) 913GEN_VXFORM_TRANS(vsl, 2, 7); 914GEN_VXFORM(vrldnm, 2, 7); 915GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ 916 vrldnm, PPC_NONE, PPC2_ISA300) 917GEN_VXFORM_TRANS(vsr, 2, 11); 918GEN_VXFORM_ENV(vpkuhum, 7, 0); 919GEN_VXFORM_ENV(vpkuwum, 7, 1); 920GEN_VXFORM_ENV(vpkudum, 7, 17); 921GEN_VXFORM_ENV(vpkuhus, 7, 2); 922GEN_VXFORM_ENV(vpkuwus, 7, 3); 923GEN_VXFORM_ENV(vpkudus, 7, 19); 924GEN_VXFORM_ENV(vpkshus, 7, 4); 925GEN_VXFORM_ENV(vpkswus, 7, 5); 926GEN_VXFORM_ENV(vpksdus, 7, 21); 927GEN_VXFORM_ENV(vpkshss, 7, 6); 928GEN_VXFORM_ENV(vpkswss, 7, 7); 929GEN_VXFORM_ENV(vpksdss, 7, 23); 930GEN_VXFORM(vpkpx, 7, 12); 931GEN_VXFORM_ENV(vsum4ubs, 4, 24); 932GEN_VXFORM_ENV(vsum4sbs, 4, 28); 933GEN_VXFORM_ENV(vsum4shs, 4, 25); 934GEN_VXFORM_ENV(vsum2sws, 4, 26); 935GEN_VXFORM_ENV(vsumsws, 4, 30); 936GEN_VXFORM_ENV(vaddfp, 5, 0); 937GEN_VXFORM_ENV(vsubfp, 5, 1); 938GEN_VXFORM_ENV(vmaxfp, 5, 16); 939GEN_VXFORM_ENV(vminfp, 5, 17); 940GEN_VXFORM_HETRO(vextublx, 6, 24) 941GEN_VXFORM_HETRO(vextuhlx, 6, 25) 942GEN_VXFORM_HETRO(vextuwlx, 6, 26) 943GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, 944 vextuwlx, PPC_NONE, PPC2_ISA300) 945GEN_VXFORM_HETRO(vextubrx, 6, 28) 946GEN_VXFORM_HETRO(vextuhrx, 6, 29) 947GEN_VXFORM_HETRO(vextuwrx, 6, 30) 948GEN_VXFORM_TRANS(lvsl, 6, 31) 949GEN_VXFORM_TRANS(lvsr, 6, 32) 950GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, 951 vextuwrx, PPC_NONE, PPC2_ISA300) 952 953#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ 954static void glue(gen_, name)(DisasContext *ctx) \ 955 { \ 956 TCGv_ptr ra, rb, rd; \ 957 if (unlikely(!ctx->altivec_enabled)) { \ 958 gen_exception(ctx, POWERPC_EXCP_VPU); \ 959 return; \ 960 } \ 961 ra = gen_avr_ptr(rA(ctx->opcode)); \ 962 rb = gen_avr_ptr(rB(ctx->opcode)); \ 963 rd = gen_avr_ptr(rD(ctx->opcode)); \ 964 gen_helper_##opname(cpu_env, rd, ra, rb); \ 965 tcg_temp_free_ptr(ra); \ 966 tcg_temp_free_ptr(rb); \ 967 tcg_temp_free_ptr(rd); \ 968 } 969 970#define GEN_VXRFORM(name, opc2, opc3) \ 971 GEN_VXRFORM1(name, name, #name, opc2, opc3) \ 972 GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) 973 974/* 975 * Support for Altivec instructions that use bit 31 (Rc) as an opcode 976 * bit but also use bit 21 as an actual Rc bit. In general, thse pairs 977 * come from different versions of the ISA, so we must also support a 978 * pair of flags for each instruction. 979 */ 980#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ 981static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 982{ \ 983 if ((Rc(ctx->opcode) == 0) && \ 984 ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ 985 if (Rc21(ctx->opcode) == 0) { \ 986 gen_##name0(ctx); \ 987 } else { \ 988 gen_##name0##_(ctx); \ 989 } \ 990 } else if ((Rc(ctx->opcode) == 1) && \ 991 ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ 992 if (Rc21(ctx->opcode) == 0) { \ 993 gen_##name1(ctx); \ 994 } else { \ 995 gen_##name1##_(ctx); \ 996 } \ 997 } else { \ 998 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ 999 } \ 1000} 1001 1002GEN_VXRFORM(vcmpequb, 3, 0) 1003GEN_VXRFORM(vcmpequh, 3, 1) 1004GEN_VXRFORM(vcmpequw, 3, 2) 1005GEN_VXRFORM(vcmpequd, 3, 3) 1006GEN_VXRFORM(vcmpnezb, 3, 4) 1007GEN_VXRFORM(vcmpnezh, 3, 5) 1008GEN_VXRFORM(vcmpnezw, 3, 6) 1009GEN_VXRFORM(vcmpgtsb, 3, 12) 1010GEN_VXRFORM(vcmpgtsh, 3, 13) 1011GEN_VXRFORM(vcmpgtsw, 3, 14) 1012GEN_VXRFORM(vcmpgtsd, 3, 15) 1013GEN_VXRFORM(vcmpgtub, 3, 8) 1014GEN_VXRFORM(vcmpgtuh, 3, 9) 1015GEN_VXRFORM(vcmpgtuw, 3, 10) 1016GEN_VXRFORM(vcmpgtud, 3, 11) 1017GEN_VXRFORM(vcmpeqfp, 3, 3) 1018GEN_VXRFORM(vcmpgefp, 3, 7) 1019GEN_VXRFORM(vcmpgtfp, 3, 11) 1020GEN_VXRFORM(vcmpbfp, 3, 15) 1021GEN_VXRFORM(vcmpneb, 3, 0) 1022GEN_VXRFORM(vcmpneh, 3, 1) 1023GEN_VXRFORM(vcmpnew, 3, 2) 1024 1025GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ 1026 vcmpneb, PPC_NONE, PPC2_ISA300) 1027GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ 1028 vcmpneh, PPC_NONE, PPC2_ISA300) 1029GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ 1030 vcmpnew, PPC_NONE, PPC2_ISA300) 1031GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ 1032 vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) 1033GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ 1034 vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) 1035GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ 1036 vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) 1037 1038static void gen_vsplti(DisasContext *ctx, int vece) 1039{ 1040 int simm; 1041 1042 if (unlikely(!ctx->altivec_enabled)) { 1043 gen_exception(ctx, POWERPC_EXCP_VPU); 1044 return; 1045 } 1046 1047 simm = SIMM5(ctx->opcode); 1048 tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm); 1049} 1050 1051#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \ 1052static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); } 1053 1054GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12); 1055GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13); 1056GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14); 1057 1058#define GEN_VXFORM_NOA(name, opc2, opc3) \ 1059static void glue(gen_, name)(DisasContext *ctx) \ 1060 { \ 1061 TCGv_ptr rb, rd; \ 1062 if (unlikely(!ctx->altivec_enabled)) { \ 1063 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1064 return; \ 1065 } \ 1066 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1067 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1068 gen_helper_##name(rd, rb); \ 1069 tcg_temp_free_ptr(rb); \ 1070 tcg_temp_free_ptr(rd); \ 1071 } 1072 1073#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ 1074static void glue(gen_, name)(DisasContext *ctx) \ 1075 { \ 1076 TCGv_ptr rb, rd; \ 1077 \ 1078 if (unlikely(!ctx->altivec_enabled)) { \ 1079 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1080 return; \ 1081 } \ 1082 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1083 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1084 gen_helper_##name(cpu_env, rd, rb); \ 1085 tcg_temp_free_ptr(rb); \ 1086 tcg_temp_free_ptr(rd); \ 1087 } 1088 1089#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ 1090static void glue(gen_, name)(DisasContext *ctx) \ 1091 { \ 1092 TCGv_ptr rb, rd; \ 1093 if (unlikely(!ctx->altivec_enabled)) { \ 1094 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1095 return; \ 1096 } \ 1097 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1098 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1099 gen_helper_##name(rd, rb); \ 1100 tcg_temp_free_ptr(rb); \ 1101 tcg_temp_free_ptr(rd); \ 1102 } 1103 1104#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ 1105static void glue(gen_, name)(DisasContext *ctx) \ 1106 { \ 1107 TCGv_ptr rb; \ 1108 if (unlikely(!ctx->altivec_enabled)) { \ 1109 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1110 return; \ 1111 } \ 1112 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1113 gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \ 1114 tcg_temp_free_ptr(rb); \ 1115 } 1116GEN_VXFORM_NOA(vupkhsb, 7, 8); 1117GEN_VXFORM_NOA(vupkhsh, 7, 9); 1118GEN_VXFORM_NOA(vupkhsw, 7, 25); 1119GEN_VXFORM_NOA(vupklsb, 7, 10); 1120GEN_VXFORM_NOA(vupklsh, 7, 11); 1121GEN_VXFORM_NOA(vupklsw, 7, 27); 1122GEN_VXFORM_NOA(vupkhpx, 7, 13); 1123GEN_VXFORM_NOA(vupklpx, 7, 15); 1124GEN_VXFORM_NOA_ENV(vrefp, 5, 4); 1125GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); 1126GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); 1127GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); 1128GEN_VXFORM_NOA_ENV(vrfim, 5, 11); 1129GEN_VXFORM_NOA_ENV(vrfin, 5, 8); 1130GEN_VXFORM_NOA_ENV(vrfip, 5, 10); 1131GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); 1132GEN_VXFORM_NOA(vprtybw, 1, 24); 1133GEN_VXFORM_NOA(vprtybd, 1, 24); 1134GEN_VXFORM_NOA(vprtybq, 1, 24); 1135 1136static void gen_vsplt(DisasContext *ctx, int vece) 1137{ 1138 int uimm, dofs, bofs; 1139 1140 if (unlikely(!ctx->altivec_enabled)) { 1141 gen_exception(ctx, POWERPC_EXCP_VPU); 1142 return; 1143 } 1144 1145 uimm = UIMM5(ctx->opcode); 1146 bofs = avr_full_offset(rB(ctx->opcode)); 1147 dofs = avr_full_offset(rD(ctx->opcode)); 1148 1149 /* Experimental testing shows that hardware masks the immediate. */ 1150 bofs += (uimm << vece) & 15; 1151#ifndef HOST_WORDS_BIGENDIAN 1152 bofs ^= 15; 1153 bofs &= ~((1 << vece) - 1); 1154#endif 1155 1156 tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16); 1157} 1158 1159#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \ 1160static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); } 1161 1162#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ 1163static void glue(gen_, name)(DisasContext *ctx) \ 1164 { \ 1165 TCGv_ptr rb, rd; \ 1166 TCGv_i32 uimm; \ 1167 \ 1168 if (unlikely(!ctx->altivec_enabled)) { \ 1169 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1170 return; \ 1171 } \ 1172 uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ 1173 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1174 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1175 gen_helper_##name(cpu_env, rd, rb, uimm); \ 1176 tcg_temp_free_i32(uimm); \ 1177 tcg_temp_free_ptr(rb); \ 1178 tcg_temp_free_ptr(rd); \ 1179 } 1180 1181#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ 1182static void glue(gen_, name)(DisasContext *ctx) \ 1183 { \ 1184 TCGv_ptr rb, rd; \ 1185 uint8_t uimm = UIMM4(ctx->opcode); \ 1186 TCGv_i32 t0; \ 1187 if (unlikely(!ctx->altivec_enabled)) { \ 1188 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1189 return; \ 1190 } \ 1191 if (uimm > splat_max) { \ 1192 uimm = 0; \ 1193 } \ 1194 t0 = tcg_temp_new_i32(); \ 1195 tcg_gen_movi_i32(t0, uimm); \ 1196 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1197 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1198 gen_helper_##name(rd, rb, t0); \ 1199 tcg_temp_free_i32(t0); \ 1200 tcg_temp_free_ptr(rb); \ 1201 tcg_temp_free_ptr(rd); \ 1202 } 1203 1204GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); 1205GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9); 1206GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10); 1207GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); 1208GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); 1209GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); 1210GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); 1211GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); 1212GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); 1213GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); 1214GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); 1215GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); 1216GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); 1217GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); 1218GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); 1219GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE, 1220 vextractub, PPC_NONE, PPC2_ISA300); 1221GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, 1222 vextractuh, PPC_NONE, PPC2_ISA300); 1223GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, 1224 vextractuw, PPC_NONE, PPC2_ISA300); 1225GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, 1226 vinsertb, PPC_NONE, PPC2_ISA300); 1227GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, 1228 vinserth, PPC_NONE, PPC2_ISA300); 1229GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, 1230 vinsertw, PPC_NONE, PPC2_ISA300); 1231 1232static void gen_vsldoi(DisasContext *ctx) 1233{ 1234 TCGv_ptr ra, rb, rd; 1235 TCGv_i32 sh; 1236 if (unlikely(!ctx->altivec_enabled)) { 1237 gen_exception(ctx, POWERPC_EXCP_VPU); 1238 return; 1239 } 1240 ra = gen_avr_ptr(rA(ctx->opcode)); 1241 rb = gen_avr_ptr(rB(ctx->opcode)); 1242 rd = gen_avr_ptr(rD(ctx->opcode)); 1243 sh = tcg_const_i32(VSH(ctx->opcode)); 1244 gen_helper_vsldoi(rd, ra, rb, sh); 1245 tcg_temp_free_ptr(ra); 1246 tcg_temp_free_ptr(rb); 1247 tcg_temp_free_ptr(rd); 1248 tcg_temp_free_i32(sh); 1249} 1250 1251#define GEN_VAFORM_PAIRED(name0, name1, opc2) \ 1252static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ 1253 { \ 1254 TCGv_ptr ra, rb, rc, rd; \ 1255 if (unlikely(!ctx->altivec_enabled)) { \ 1256 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1257 return; \ 1258 } \ 1259 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1260 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1261 rc = gen_avr_ptr(rC(ctx->opcode)); \ 1262 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1263 if (Rc(ctx->opcode)) { \ 1264 gen_helper_##name1(cpu_env, rd, ra, rb, rc); \ 1265 } else { \ 1266 gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ 1267 } \ 1268 tcg_temp_free_ptr(ra); \ 1269 tcg_temp_free_ptr(rb); \ 1270 tcg_temp_free_ptr(rc); \ 1271 tcg_temp_free_ptr(rd); \ 1272 } 1273 1274GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) 1275 1276static void gen_vmladduhm(DisasContext *ctx) 1277{ 1278 TCGv_ptr ra, rb, rc, rd; 1279 if (unlikely(!ctx->altivec_enabled)) { 1280 gen_exception(ctx, POWERPC_EXCP_VPU); 1281 return; 1282 } 1283 ra = gen_avr_ptr(rA(ctx->opcode)); 1284 rb = gen_avr_ptr(rB(ctx->opcode)); 1285 rc = gen_avr_ptr(rC(ctx->opcode)); 1286 rd = gen_avr_ptr(rD(ctx->opcode)); 1287 gen_helper_vmladduhm(rd, ra, rb, rc); 1288 tcg_temp_free_ptr(ra); 1289 tcg_temp_free_ptr(rb); 1290 tcg_temp_free_ptr(rc); 1291 tcg_temp_free_ptr(rd); 1292} 1293 1294static void gen_vpermr(DisasContext *ctx) 1295{ 1296 TCGv_ptr ra, rb, rc, rd; 1297 if (unlikely(!ctx->altivec_enabled)) { 1298 gen_exception(ctx, POWERPC_EXCP_VPU); 1299 return; 1300 } 1301 ra = gen_avr_ptr(rA(ctx->opcode)); 1302 rb = gen_avr_ptr(rB(ctx->opcode)); 1303 rc = gen_avr_ptr(rC(ctx->opcode)); 1304 rd = gen_avr_ptr(rD(ctx->opcode)); 1305 gen_helper_vpermr(cpu_env, rd, ra, rb, rc); 1306 tcg_temp_free_ptr(ra); 1307 tcg_temp_free_ptr(rb); 1308 tcg_temp_free_ptr(rc); 1309 tcg_temp_free_ptr(rd); 1310} 1311 1312GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) 1313GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) 1314GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) 1315GEN_VAFORM_PAIRED(vsel, vperm, 21) 1316GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) 1317 1318GEN_VXFORM_NOA(vclzb, 1, 28) 1319GEN_VXFORM_NOA(vclzh, 1, 29) 1320GEN_VXFORM_TRANS(vclzw, 1, 30) 1321GEN_VXFORM_TRANS(vclzd, 1, 31) 1322GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) 1323GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) 1324GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) 1325GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) 1326GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) 1327GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) 1328GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) 1329GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) 1330GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) 1331GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) 1332GEN_VXFORM_NOA_2(vctzd, 1, 24, 31) 1333GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0) 1334GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1) 1335GEN_VXFORM_NOA(vpopcntb, 1, 28) 1336GEN_VXFORM_NOA(vpopcnth, 1, 29) 1337GEN_VXFORM_NOA(vpopcntw, 1, 30) 1338GEN_VXFORM_NOA(vpopcntd, 1, 31) 1339GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \ 1340 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207) 1341GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \ 1342 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207) 1343GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \ 1344 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207) 1345GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \ 1346 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) 1347GEN_VXFORM(vbpermd, 6, 23); 1348GEN_VXFORM(vbpermq, 6, 21); 1349GEN_VXFORM_TRANS(vgbbd, 6, 20); 1350GEN_VXFORM(vpmsumb, 4, 16) 1351GEN_VXFORM(vpmsumh, 4, 17) 1352GEN_VXFORM(vpmsumw, 4, 18) 1353GEN_VXFORM(vpmsumd, 4, 19) 1354 1355#define GEN_BCD(op) \ 1356static void gen_##op(DisasContext *ctx) \ 1357{ \ 1358 TCGv_ptr ra, rb, rd; \ 1359 TCGv_i32 ps; \ 1360 \ 1361 if (unlikely(!ctx->altivec_enabled)) { \ 1362 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1363 return; \ 1364 } \ 1365 \ 1366 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1367 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1368 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1369 \ 1370 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1371 \ 1372 gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ 1373 \ 1374 tcg_temp_free_ptr(ra); \ 1375 tcg_temp_free_ptr(rb); \ 1376 tcg_temp_free_ptr(rd); \ 1377 tcg_temp_free_i32(ps); \ 1378} 1379 1380#define GEN_BCD2(op) \ 1381static void gen_##op(DisasContext *ctx) \ 1382{ \ 1383 TCGv_ptr rd, rb; \ 1384 TCGv_i32 ps; \ 1385 \ 1386 if (unlikely(!ctx->altivec_enabled)) { \ 1387 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1388 return; \ 1389 } \ 1390 \ 1391 rb = gen_avr_ptr(rB(ctx->opcode)); \ 1392 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1393 \ 1394 ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ 1395 \ 1396 gen_helper_##op(cpu_crf[6], rd, rb, ps); \ 1397 \ 1398 tcg_temp_free_ptr(rb); \ 1399 tcg_temp_free_ptr(rd); \ 1400 tcg_temp_free_i32(ps); \ 1401} 1402 1403GEN_BCD(bcdadd) 1404GEN_BCD(bcdsub) 1405GEN_BCD2(bcdcfn) 1406GEN_BCD2(bcdctn) 1407GEN_BCD2(bcdcfz) 1408GEN_BCD2(bcdctz) 1409GEN_BCD2(bcdcfsq) 1410GEN_BCD2(bcdctsq) 1411GEN_BCD2(bcdsetsgn) 1412GEN_BCD(bcdcpsgn); 1413GEN_BCD(bcds); 1414GEN_BCD(bcdus); 1415GEN_BCD(bcdsr); 1416GEN_BCD(bcdtrunc); 1417GEN_BCD(bcdutrunc); 1418 1419static void gen_xpnd04_1(DisasContext *ctx) 1420{ 1421 switch (opc4(ctx->opcode)) { 1422 case 0: 1423 gen_bcdctsq(ctx); 1424 break; 1425 case 2: 1426 gen_bcdcfsq(ctx); 1427 break; 1428 case 4: 1429 gen_bcdctz(ctx); 1430 break; 1431 case 5: 1432 gen_bcdctn(ctx); 1433 break; 1434 case 6: 1435 gen_bcdcfz(ctx); 1436 break; 1437 case 7: 1438 gen_bcdcfn(ctx); 1439 break; 1440 case 31: 1441 gen_bcdsetsgn(ctx); 1442 break; 1443 default: 1444 gen_invalid(ctx); 1445 break; 1446 } 1447} 1448 1449static void gen_xpnd04_2(DisasContext *ctx) 1450{ 1451 switch (opc4(ctx->opcode)) { 1452 case 0: 1453 gen_bcdctsq(ctx); 1454 break; 1455 case 2: 1456 gen_bcdcfsq(ctx); 1457 break; 1458 case 4: 1459 gen_bcdctz(ctx); 1460 break; 1461 case 6: 1462 gen_bcdcfz(ctx); 1463 break; 1464 case 7: 1465 gen_bcdcfn(ctx); 1466 break; 1467 case 31: 1468 gen_bcdsetsgn(ctx); 1469 break; 1470 default: 1471 gen_invalid(ctx); 1472 break; 1473 } 1474} 1475 1476 1477GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ 1478 xpnd04_1, PPC_NONE, PPC2_ISA300) 1479GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ 1480 xpnd04_2, PPC_NONE, PPC2_ISA300) 1481 1482GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \ 1483 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1484GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \ 1485 bcdadd, PPC_NONE, PPC2_ALTIVEC_207) 1486GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \ 1487 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1488GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \ 1489 bcdsub, PPC_NONE, PPC2_ALTIVEC_207) 1490GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \ 1491 bcdcpsgn, PPC_NONE, PPC2_ISA300) 1492GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \ 1493 bcds, PPC_NONE, PPC2_ISA300) 1494GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ 1495 bcdus, PPC_NONE, PPC2_ISA300) 1496GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ 1497 bcdtrunc, PPC_NONE, PPC2_ISA300) 1498GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ 1499 bcdtrunc, PPC_NONE, PPC2_ISA300) 1500GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ 1501 bcdutrunc, PPC_NONE, PPC2_ISA300) 1502 1503 1504static void gen_vsbox(DisasContext *ctx) 1505{ 1506 TCGv_ptr ra, rd; 1507 if (unlikely(!ctx->altivec_enabled)) { 1508 gen_exception(ctx, POWERPC_EXCP_VPU); 1509 return; 1510 } 1511 ra = gen_avr_ptr(rA(ctx->opcode)); 1512 rd = gen_avr_ptr(rD(ctx->opcode)); 1513 gen_helper_vsbox(rd, ra); 1514 tcg_temp_free_ptr(ra); 1515 tcg_temp_free_ptr(rd); 1516} 1517 1518GEN_VXFORM(vcipher, 4, 20) 1519GEN_VXFORM(vcipherlast, 4, 20) 1520GEN_VXFORM(vncipher, 4, 21) 1521GEN_VXFORM(vncipherlast, 4, 21) 1522 1523GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207, 1524 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207) 1525GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207, 1526 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207) 1527 1528#define VSHASIGMA(op) \ 1529static void gen_##op(DisasContext *ctx) \ 1530{ \ 1531 TCGv_ptr ra, rd; \ 1532 TCGv_i32 st_six; \ 1533 if (unlikely(!ctx->altivec_enabled)) { \ 1534 gen_exception(ctx, POWERPC_EXCP_VPU); \ 1535 return; \ 1536 } \ 1537 ra = gen_avr_ptr(rA(ctx->opcode)); \ 1538 rd = gen_avr_ptr(rD(ctx->opcode)); \ 1539 st_six = tcg_const_i32(rB(ctx->opcode)); \ 1540 gen_helper_##op(rd, ra, st_six); \ 1541 tcg_temp_free_ptr(ra); \ 1542 tcg_temp_free_ptr(rd); \ 1543 tcg_temp_free_i32(st_six); \ 1544} 1545 1546VSHASIGMA(vshasigmaw) 1547VSHASIGMA(vshasigmad) 1548 1549GEN_VXFORM3(vpermxor, 22, 0xFF) 1550GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, 1551 vpermxor, PPC_NONE, PPC2_ALTIVEC_207) 1552 1553#undef GEN_VR_LDX 1554#undef GEN_VR_STX 1555#undef GEN_VR_LVE 1556#undef GEN_VR_STVE 1557 1558#undef GEN_VX_LOGICAL 1559#undef GEN_VX_LOGICAL_207 1560#undef GEN_VXFORM 1561#undef GEN_VXFORM_207 1562#undef GEN_VXFORM_DUAL 1563#undef GEN_VXRFORM_DUAL 1564#undef GEN_VXRFORM1 1565#undef GEN_VXRFORM 1566#undef GEN_VXFORM_VSPLTI 1567#undef GEN_VXFORM_NOA 1568#undef GEN_VXFORM_UIMM 1569#undef GEN_VAFORM_PAIRED 1570 1571#undef GEN_BCD2 1572