1/*** VSX extension ***/ 2 3static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high) 4{ 5 tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high)); 6} 7 8static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high) 9{ 10 tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high)); 11} 12 13static inline TCGv_ptr gen_vsr_ptr(int reg) 14{ 15 TCGv_ptr r = tcg_temp_new_ptr(); 16 tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg)); 17 return r; 18} 19 20static inline TCGv_ptr gen_acc_ptr(int reg) 21{ 22 TCGv_ptr r = tcg_temp_new_ptr(); 23 tcg_gen_addi_ptr(r, cpu_env, acc_full_offset(reg)); 24 return r; 25} 26 27#define VSX_LOAD_SCALAR(name, operation) \ 28static void gen_##name(DisasContext *ctx) \ 29{ \ 30 TCGv EA; \ 31 TCGv_i64 t0; \ 32 if (unlikely(!ctx->vsx_enabled)) { \ 33 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 34 return; \ 35 } \ 36 t0 = tcg_temp_new_i64(); \ 37 gen_set_access_type(ctx, ACCESS_INT); \ 38 EA = tcg_temp_new(); \ 39 gen_addr_reg_index(ctx, EA); \ 40 gen_qemu_##operation(ctx, t0, EA); \ 41 set_cpu_vsr(xT(ctx->opcode), t0, true); \ 42 /* NOTE: cpu_vsrl is undefined */ \ 43 tcg_temp_free(EA); \ 44 tcg_temp_free_i64(t0); \ 45} 46 47VSX_LOAD_SCALAR(lxsdx, ld64_i64) 48VSX_LOAD_SCALAR(lxsiwax, ld32s_i64) 49VSX_LOAD_SCALAR(lxsibzx, ld8u_i64) 50VSX_LOAD_SCALAR(lxsihzx, ld16u_i64) 51VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64) 52VSX_LOAD_SCALAR(lxsspx, ld32fs) 53 54static void gen_lxvd2x(DisasContext *ctx) 55{ 56 TCGv EA; 57 TCGv_i64 t0; 58 if (unlikely(!ctx->vsx_enabled)) { 59 gen_exception(ctx, POWERPC_EXCP_VSXU); 60 return; 61 } 62 t0 = tcg_temp_new_i64(); 63 gen_set_access_type(ctx, ACCESS_INT); 64 EA = tcg_temp_new(); 65 gen_addr_reg_index(ctx, EA); 66 gen_qemu_ld64_i64(ctx, t0, EA); 67 set_cpu_vsr(xT(ctx->opcode), t0, true); 68 tcg_gen_addi_tl(EA, EA, 8); 69 gen_qemu_ld64_i64(ctx, t0, EA); 70 set_cpu_vsr(xT(ctx->opcode), t0, false); 71 tcg_temp_free(EA); 72 tcg_temp_free_i64(t0); 73} 74 75static void gen_lxvw4x(DisasContext *ctx) 76{ 77 TCGv EA; 78 TCGv_i64 xth; 79 TCGv_i64 xtl; 80 if (unlikely(!ctx->vsx_enabled)) { 81 gen_exception(ctx, POWERPC_EXCP_VSXU); 82 return; 83 } 84 xth = tcg_temp_new_i64(); 85 xtl = tcg_temp_new_i64(); 86 87 gen_set_access_type(ctx, ACCESS_INT); 88 EA = tcg_temp_new(); 89 90 gen_addr_reg_index(ctx, EA); 91 if (ctx->le_mode) { 92 TCGv_i64 t0 = tcg_temp_new_i64(); 93 TCGv_i64 t1 = tcg_temp_new_i64(); 94 95 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); 96 tcg_gen_shri_i64(t1, t0, 32); 97 tcg_gen_deposit_i64(xth, t1, t0, 32, 32); 98 tcg_gen_addi_tl(EA, EA, 8); 99 tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); 100 tcg_gen_shri_i64(t1, t0, 32); 101 tcg_gen_deposit_i64(xtl, t1, t0, 32, 32); 102 tcg_temp_free_i64(t0); 103 tcg_temp_free_i64(t1); 104 } else { 105 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); 106 tcg_gen_addi_tl(EA, EA, 8); 107 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); 108 } 109 set_cpu_vsr(xT(ctx->opcode), xth, true); 110 set_cpu_vsr(xT(ctx->opcode), xtl, false); 111 tcg_temp_free(EA); 112 tcg_temp_free_i64(xth); 113 tcg_temp_free_i64(xtl); 114} 115 116static void gen_lxvwsx(DisasContext *ctx) 117{ 118 TCGv EA; 119 TCGv_i32 data; 120 121 if (xT(ctx->opcode) < 32) { 122 if (unlikely(!ctx->vsx_enabled)) { 123 gen_exception(ctx, POWERPC_EXCP_VSXU); 124 return; 125 } 126 } else { 127 if (unlikely(!ctx->altivec_enabled)) { 128 gen_exception(ctx, POWERPC_EXCP_VPU); 129 return; 130 } 131 } 132 133 gen_set_access_type(ctx, ACCESS_INT); 134 EA = tcg_temp_new(); 135 136 gen_addr_reg_index(ctx, EA); 137 138 data = tcg_temp_new_i32(); 139 tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL)); 140 tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); 141 142 tcg_temp_free(EA); 143 tcg_temp_free_i32(data); 144} 145 146static void gen_lxvdsx(DisasContext *ctx) 147{ 148 TCGv EA; 149 TCGv_i64 data; 150 151 if (unlikely(!ctx->vsx_enabled)) { 152 gen_exception(ctx, POWERPC_EXCP_VSXU); 153 return; 154 } 155 156 gen_set_access_type(ctx, ACCESS_INT); 157 EA = tcg_temp_new(); 158 159 gen_addr_reg_index(ctx, EA); 160 161 data = tcg_temp_new_i64(); 162 tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ)); 163 tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); 164 165 tcg_temp_free(EA); 166 tcg_temp_free_i64(data); 167} 168 169static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, 170 TCGv_i64 inh, TCGv_i64 inl) 171{ 172 TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF); 173 TCGv_i64 t0 = tcg_temp_new_i64(); 174 TCGv_i64 t1 = tcg_temp_new_i64(); 175 176 /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */ 177 tcg_gen_and_i64(t0, inh, mask); 178 tcg_gen_shli_i64(t0, t0, 8); 179 tcg_gen_shri_i64(t1, inh, 8); 180 tcg_gen_and_i64(t1, t1, mask); 181 tcg_gen_or_i64(outh, t0, t1); 182 183 /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */ 184 tcg_gen_and_i64(t0, inl, mask); 185 tcg_gen_shli_i64(t0, t0, 8); 186 tcg_gen_shri_i64(t1, inl, 8); 187 tcg_gen_and_i64(t1, t1, mask); 188 tcg_gen_or_i64(outl, t0, t1); 189 190 tcg_temp_free_i64(t0); 191 tcg_temp_free_i64(t1); 192 tcg_temp_free_i64(mask); 193} 194 195static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl, 196 TCGv_i64 inh, TCGv_i64 inl) 197{ 198 TCGv_i64 hi = tcg_temp_new_i64(); 199 TCGv_i64 lo = tcg_temp_new_i64(); 200 201 tcg_gen_bswap64_i64(hi, inh); 202 tcg_gen_bswap64_i64(lo, inl); 203 tcg_gen_shri_i64(outh, hi, 32); 204 tcg_gen_deposit_i64(outh, outh, hi, 32, 32); 205 tcg_gen_shri_i64(outl, lo, 32); 206 tcg_gen_deposit_i64(outl, outl, lo, 32, 32); 207 208 tcg_temp_free_i64(hi); 209 tcg_temp_free_i64(lo); 210} 211static void gen_lxvh8x(DisasContext *ctx) 212{ 213 TCGv EA; 214 TCGv_i64 xth; 215 TCGv_i64 xtl; 216 217 if (unlikely(!ctx->vsx_enabled)) { 218 gen_exception(ctx, POWERPC_EXCP_VSXU); 219 return; 220 } 221 xth = tcg_temp_new_i64(); 222 xtl = tcg_temp_new_i64(); 223 gen_set_access_type(ctx, ACCESS_INT); 224 225 EA = tcg_temp_new(); 226 gen_addr_reg_index(ctx, EA); 227 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); 228 tcg_gen_addi_tl(EA, EA, 8); 229 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); 230 if (ctx->le_mode) { 231 gen_bswap16x8(xth, xtl, xth, xtl); 232 } 233 set_cpu_vsr(xT(ctx->opcode), xth, true); 234 set_cpu_vsr(xT(ctx->opcode), xtl, false); 235 tcg_temp_free(EA); 236 tcg_temp_free_i64(xth); 237 tcg_temp_free_i64(xtl); 238} 239 240static void gen_lxvb16x(DisasContext *ctx) 241{ 242 TCGv EA; 243 TCGv_i64 xth; 244 TCGv_i64 xtl; 245 246 if (unlikely(!ctx->vsx_enabled)) { 247 gen_exception(ctx, POWERPC_EXCP_VSXU); 248 return; 249 } 250 xth = tcg_temp_new_i64(); 251 xtl = tcg_temp_new_i64(); 252 gen_set_access_type(ctx, ACCESS_INT); 253 EA = tcg_temp_new(); 254 gen_addr_reg_index(ctx, EA); 255 tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); 256 tcg_gen_addi_tl(EA, EA, 8); 257 tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); 258 set_cpu_vsr(xT(ctx->opcode), xth, true); 259 set_cpu_vsr(xT(ctx->opcode), xtl, false); 260 tcg_temp_free(EA); 261 tcg_temp_free_i64(xth); 262 tcg_temp_free_i64(xtl); 263} 264 265#ifdef TARGET_PPC64 266#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ 267static void gen_##name(DisasContext *ctx) \ 268{ \ 269 TCGv EA; \ 270 TCGv_ptr xt; \ 271 \ 272 if (xT(ctx->opcode) < 32) { \ 273 if (unlikely(!ctx->vsx_enabled)) { \ 274 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 275 return; \ 276 } \ 277 } else { \ 278 if (unlikely(!ctx->altivec_enabled)) { \ 279 gen_exception(ctx, POWERPC_EXCP_VPU); \ 280 return; \ 281 } \ 282 } \ 283 EA = tcg_temp_new(); \ 284 xt = gen_vsr_ptr(xT(ctx->opcode)); \ 285 gen_set_access_type(ctx, ACCESS_INT); \ 286 gen_addr_register(ctx, EA); \ 287 gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \ 288 tcg_temp_free(EA); \ 289 tcg_temp_free_ptr(xt); \ 290} 291 292VSX_VECTOR_LOAD_STORE_LENGTH(lxvl) 293VSX_VECTOR_LOAD_STORE_LENGTH(lxvll) 294VSX_VECTOR_LOAD_STORE_LENGTH(stxvl) 295VSX_VECTOR_LOAD_STORE_LENGTH(stxvll) 296#endif 297 298#define VSX_STORE_SCALAR(name, operation) \ 299static void gen_##name(DisasContext *ctx) \ 300{ \ 301 TCGv EA; \ 302 TCGv_i64 t0; \ 303 if (unlikely(!ctx->vsx_enabled)) { \ 304 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 305 return; \ 306 } \ 307 t0 = tcg_temp_new_i64(); \ 308 gen_set_access_type(ctx, ACCESS_INT); \ 309 EA = tcg_temp_new(); \ 310 gen_addr_reg_index(ctx, EA); \ 311 get_cpu_vsr(t0, xS(ctx->opcode), true); \ 312 gen_qemu_##operation(ctx, t0, EA); \ 313 tcg_temp_free(EA); \ 314 tcg_temp_free_i64(t0); \ 315} 316 317VSX_STORE_SCALAR(stxsdx, st64_i64) 318 319VSX_STORE_SCALAR(stxsibx, st8_i64) 320VSX_STORE_SCALAR(stxsihx, st16_i64) 321VSX_STORE_SCALAR(stxsiwx, st32_i64) 322VSX_STORE_SCALAR(stxsspx, st32fs) 323 324static void gen_stxvd2x(DisasContext *ctx) 325{ 326 TCGv EA; 327 TCGv_i64 t0; 328 if (unlikely(!ctx->vsx_enabled)) { 329 gen_exception(ctx, POWERPC_EXCP_VSXU); 330 return; 331 } 332 t0 = tcg_temp_new_i64(); 333 gen_set_access_type(ctx, ACCESS_INT); 334 EA = tcg_temp_new(); 335 gen_addr_reg_index(ctx, EA); 336 get_cpu_vsr(t0, xS(ctx->opcode), true); 337 gen_qemu_st64_i64(ctx, t0, EA); 338 tcg_gen_addi_tl(EA, EA, 8); 339 get_cpu_vsr(t0, xS(ctx->opcode), false); 340 gen_qemu_st64_i64(ctx, t0, EA); 341 tcg_temp_free(EA); 342 tcg_temp_free_i64(t0); 343} 344 345static void gen_stxvw4x(DisasContext *ctx) 346{ 347 TCGv EA; 348 TCGv_i64 xsh; 349 TCGv_i64 xsl; 350 351 if (unlikely(!ctx->vsx_enabled)) { 352 gen_exception(ctx, POWERPC_EXCP_VSXU); 353 return; 354 } 355 xsh = tcg_temp_new_i64(); 356 xsl = tcg_temp_new_i64(); 357 get_cpu_vsr(xsh, xS(ctx->opcode), true); 358 get_cpu_vsr(xsl, xS(ctx->opcode), false); 359 gen_set_access_type(ctx, ACCESS_INT); 360 EA = tcg_temp_new(); 361 gen_addr_reg_index(ctx, EA); 362 if (ctx->le_mode) { 363 TCGv_i64 t0 = tcg_temp_new_i64(); 364 TCGv_i64 t1 = tcg_temp_new_i64(); 365 366 tcg_gen_shri_i64(t0, xsh, 32); 367 tcg_gen_deposit_i64(t1, t0, xsh, 32, 32); 368 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); 369 tcg_gen_addi_tl(EA, EA, 8); 370 tcg_gen_shri_i64(t0, xsl, 32); 371 tcg_gen_deposit_i64(t1, t0, xsl, 32, 32); 372 tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); 373 tcg_temp_free_i64(t0); 374 tcg_temp_free_i64(t1); 375 } else { 376 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); 377 tcg_gen_addi_tl(EA, EA, 8); 378 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); 379 } 380 tcg_temp_free(EA); 381 tcg_temp_free_i64(xsh); 382 tcg_temp_free_i64(xsl); 383} 384 385static void gen_stxvh8x(DisasContext *ctx) 386{ 387 TCGv EA; 388 TCGv_i64 xsh; 389 TCGv_i64 xsl; 390 391 if (unlikely(!ctx->vsx_enabled)) { 392 gen_exception(ctx, POWERPC_EXCP_VSXU); 393 return; 394 } 395 xsh = tcg_temp_new_i64(); 396 xsl = tcg_temp_new_i64(); 397 get_cpu_vsr(xsh, xS(ctx->opcode), true); 398 get_cpu_vsr(xsl, xS(ctx->opcode), false); 399 gen_set_access_type(ctx, ACCESS_INT); 400 EA = tcg_temp_new(); 401 gen_addr_reg_index(ctx, EA); 402 if (ctx->le_mode) { 403 TCGv_i64 outh = tcg_temp_new_i64(); 404 TCGv_i64 outl = tcg_temp_new_i64(); 405 406 gen_bswap16x8(outh, outl, xsh, xsl); 407 tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ); 408 tcg_gen_addi_tl(EA, EA, 8); 409 tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ); 410 tcg_temp_free_i64(outh); 411 tcg_temp_free_i64(outl); 412 } else { 413 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); 414 tcg_gen_addi_tl(EA, EA, 8); 415 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); 416 } 417 tcg_temp_free(EA); 418 tcg_temp_free_i64(xsh); 419 tcg_temp_free_i64(xsl); 420} 421 422static void gen_stxvb16x(DisasContext *ctx) 423{ 424 TCGv EA; 425 TCGv_i64 xsh; 426 TCGv_i64 xsl; 427 428 if (unlikely(!ctx->vsx_enabled)) { 429 gen_exception(ctx, POWERPC_EXCP_VSXU); 430 return; 431 } 432 xsh = tcg_temp_new_i64(); 433 xsl = tcg_temp_new_i64(); 434 get_cpu_vsr(xsh, xS(ctx->opcode), true); 435 get_cpu_vsr(xsl, xS(ctx->opcode), false); 436 gen_set_access_type(ctx, ACCESS_INT); 437 EA = tcg_temp_new(); 438 gen_addr_reg_index(ctx, EA); 439 tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); 440 tcg_gen_addi_tl(EA, EA, 8); 441 tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); 442 tcg_temp_free(EA); 443 tcg_temp_free_i64(xsh); 444 tcg_temp_free_i64(xsl); 445} 446 447static void gen_mfvsrwz(DisasContext *ctx) 448{ 449 if (xS(ctx->opcode) < 32) { 450 if (unlikely(!ctx->fpu_enabled)) { 451 gen_exception(ctx, POWERPC_EXCP_FPU); 452 return; 453 } 454 } else { 455 if (unlikely(!ctx->altivec_enabled)) { 456 gen_exception(ctx, POWERPC_EXCP_VPU); 457 return; 458 } 459 } 460 TCGv_i64 tmp = tcg_temp_new_i64(); 461 TCGv_i64 xsh = tcg_temp_new_i64(); 462 get_cpu_vsr(xsh, xS(ctx->opcode), true); 463 tcg_gen_ext32u_i64(tmp, xsh); 464 tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); 465 tcg_temp_free_i64(tmp); 466 tcg_temp_free_i64(xsh); 467} 468 469static void gen_mtvsrwa(DisasContext *ctx) 470{ 471 if (xS(ctx->opcode) < 32) { 472 if (unlikely(!ctx->fpu_enabled)) { 473 gen_exception(ctx, POWERPC_EXCP_FPU); 474 return; 475 } 476 } else { 477 if (unlikely(!ctx->altivec_enabled)) { 478 gen_exception(ctx, POWERPC_EXCP_VPU); 479 return; 480 } 481 } 482 TCGv_i64 tmp = tcg_temp_new_i64(); 483 TCGv_i64 xsh = tcg_temp_new_i64(); 484 tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); 485 tcg_gen_ext32s_i64(xsh, tmp); 486 set_cpu_vsr(xT(ctx->opcode), xsh, true); 487 tcg_temp_free_i64(tmp); 488 tcg_temp_free_i64(xsh); 489} 490 491static void gen_mtvsrwz(DisasContext *ctx) 492{ 493 if (xS(ctx->opcode) < 32) { 494 if (unlikely(!ctx->fpu_enabled)) { 495 gen_exception(ctx, POWERPC_EXCP_FPU); 496 return; 497 } 498 } else { 499 if (unlikely(!ctx->altivec_enabled)) { 500 gen_exception(ctx, POWERPC_EXCP_VPU); 501 return; 502 } 503 } 504 TCGv_i64 tmp = tcg_temp_new_i64(); 505 TCGv_i64 xsh = tcg_temp_new_i64(); 506 tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); 507 tcg_gen_ext32u_i64(xsh, tmp); 508 set_cpu_vsr(xT(ctx->opcode), xsh, true); 509 tcg_temp_free_i64(tmp); 510 tcg_temp_free_i64(xsh); 511} 512 513#if defined(TARGET_PPC64) 514static void gen_mfvsrd(DisasContext *ctx) 515{ 516 TCGv_i64 t0; 517 if (xS(ctx->opcode) < 32) { 518 if (unlikely(!ctx->fpu_enabled)) { 519 gen_exception(ctx, POWERPC_EXCP_FPU); 520 return; 521 } 522 } else { 523 if (unlikely(!ctx->altivec_enabled)) { 524 gen_exception(ctx, POWERPC_EXCP_VPU); 525 return; 526 } 527 } 528 t0 = tcg_temp_new_i64(); 529 get_cpu_vsr(t0, xS(ctx->opcode), true); 530 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); 531 tcg_temp_free_i64(t0); 532} 533 534static void gen_mtvsrd(DisasContext *ctx) 535{ 536 TCGv_i64 t0; 537 if (xS(ctx->opcode) < 32) { 538 if (unlikely(!ctx->fpu_enabled)) { 539 gen_exception(ctx, POWERPC_EXCP_FPU); 540 return; 541 } 542 } else { 543 if (unlikely(!ctx->altivec_enabled)) { 544 gen_exception(ctx, POWERPC_EXCP_VPU); 545 return; 546 } 547 } 548 t0 = tcg_temp_new_i64(); 549 tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); 550 set_cpu_vsr(xT(ctx->opcode), t0, true); 551 tcg_temp_free_i64(t0); 552} 553 554static void gen_mfvsrld(DisasContext *ctx) 555{ 556 TCGv_i64 t0; 557 if (xS(ctx->opcode) < 32) { 558 if (unlikely(!ctx->vsx_enabled)) { 559 gen_exception(ctx, POWERPC_EXCP_VSXU); 560 return; 561 } 562 } else { 563 if (unlikely(!ctx->altivec_enabled)) { 564 gen_exception(ctx, POWERPC_EXCP_VPU); 565 return; 566 } 567 } 568 t0 = tcg_temp_new_i64(); 569 get_cpu_vsr(t0, xS(ctx->opcode), false); 570 tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); 571 tcg_temp_free_i64(t0); 572} 573 574static void gen_mtvsrdd(DisasContext *ctx) 575{ 576 TCGv_i64 t0; 577 if (xT(ctx->opcode) < 32) { 578 if (unlikely(!ctx->vsx_enabled)) { 579 gen_exception(ctx, POWERPC_EXCP_VSXU); 580 return; 581 } 582 } else { 583 if (unlikely(!ctx->altivec_enabled)) { 584 gen_exception(ctx, POWERPC_EXCP_VPU); 585 return; 586 } 587 } 588 589 t0 = tcg_temp_new_i64(); 590 if (!rA(ctx->opcode)) { 591 tcg_gen_movi_i64(t0, 0); 592 } else { 593 tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); 594 } 595 set_cpu_vsr(xT(ctx->opcode), t0, true); 596 597 tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); 598 set_cpu_vsr(xT(ctx->opcode), t0, false); 599 tcg_temp_free_i64(t0); 600} 601 602static void gen_mtvsrws(DisasContext *ctx) 603{ 604 TCGv_i64 t0; 605 if (xT(ctx->opcode) < 32) { 606 if (unlikely(!ctx->vsx_enabled)) { 607 gen_exception(ctx, POWERPC_EXCP_VSXU); 608 return; 609 } 610 } else { 611 if (unlikely(!ctx->altivec_enabled)) { 612 gen_exception(ctx, POWERPC_EXCP_VPU); 613 return; 614 } 615 } 616 617 t0 = tcg_temp_new_i64(); 618 tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)], 619 cpu_gpr[rA(ctx->opcode)], 32, 32); 620 set_cpu_vsr(xT(ctx->opcode), t0, false); 621 set_cpu_vsr(xT(ctx->opcode), t0, true); 622 tcg_temp_free_i64(t0); 623} 624 625#endif 626 627#define OP_ABS 1 628#define OP_NABS 2 629#define OP_NEG 3 630#define OP_CPSGN 4 631#define SGN_MASK_DP 0x8000000000000000ull 632#define SGN_MASK_SP 0x8000000080000000ull 633 634#define VSX_SCALAR_MOVE(name, op, sgn_mask) \ 635static void glue(gen_, name)(DisasContext *ctx) \ 636 { \ 637 TCGv_i64 xb, sgm; \ 638 if (unlikely(!ctx->vsx_enabled)) { \ 639 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 640 return; \ 641 } \ 642 xb = tcg_temp_new_i64(); \ 643 sgm = tcg_temp_new_i64(); \ 644 get_cpu_vsr(xb, xB(ctx->opcode), true); \ 645 tcg_gen_movi_i64(sgm, sgn_mask); \ 646 switch (op) { \ 647 case OP_ABS: { \ 648 tcg_gen_andc_i64(xb, xb, sgm); \ 649 break; \ 650 } \ 651 case OP_NABS: { \ 652 tcg_gen_or_i64(xb, xb, sgm); \ 653 break; \ 654 } \ 655 case OP_NEG: { \ 656 tcg_gen_xor_i64(xb, xb, sgm); \ 657 break; \ 658 } \ 659 case OP_CPSGN: { \ 660 TCGv_i64 xa = tcg_temp_new_i64(); \ 661 get_cpu_vsr(xa, xA(ctx->opcode), true); \ 662 tcg_gen_and_i64(xa, xa, sgm); \ 663 tcg_gen_andc_i64(xb, xb, sgm); \ 664 tcg_gen_or_i64(xb, xb, xa); \ 665 tcg_temp_free_i64(xa); \ 666 break; \ 667 } \ 668 } \ 669 set_cpu_vsr(xT(ctx->opcode), xb, true); \ 670 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ 671 tcg_temp_free_i64(xb); \ 672 tcg_temp_free_i64(sgm); \ 673 } 674 675VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) 676VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP) 677VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP) 678VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP) 679 680#define VSX_SCALAR_MOVE_QP(name, op, sgn_mask) \ 681static void glue(gen_, name)(DisasContext *ctx) \ 682{ \ 683 int xa; \ 684 int xt = rD(ctx->opcode) + 32; \ 685 int xb = rB(ctx->opcode) + 32; \ 686 TCGv_i64 xah, xbh, xbl, sgm, tmp; \ 687 \ 688 if (unlikely(!ctx->vsx_enabled)) { \ 689 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 690 return; \ 691 } \ 692 xbh = tcg_temp_new_i64(); \ 693 xbl = tcg_temp_new_i64(); \ 694 sgm = tcg_temp_new_i64(); \ 695 tmp = tcg_temp_new_i64(); \ 696 get_cpu_vsr(xbh, xb, true); \ 697 get_cpu_vsr(xbl, xb, false); \ 698 tcg_gen_movi_i64(sgm, sgn_mask); \ 699 switch (op) { \ 700 case OP_ABS: \ 701 tcg_gen_andc_i64(xbh, xbh, sgm); \ 702 break; \ 703 case OP_NABS: \ 704 tcg_gen_or_i64(xbh, xbh, sgm); \ 705 break; \ 706 case OP_NEG: \ 707 tcg_gen_xor_i64(xbh, xbh, sgm); \ 708 break; \ 709 case OP_CPSGN: \ 710 xah = tcg_temp_new_i64(); \ 711 xa = rA(ctx->opcode) + 32; \ 712 get_cpu_vsr(tmp, xa, true); \ 713 tcg_gen_and_i64(xah, tmp, sgm); \ 714 tcg_gen_andc_i64(xbh, xbh, sgm); \ 715 tcg_gen_or_i64(xbh, xbh, xah); \ 716 tcg_temp_free_i64(xah); \ 717 break; \ 718 } \ 719 set_cpu_vsr(xt, xbh, true); \ 720 set_cpu_vsr(xt, xbl, false); \ 721 tcg_temp_free_i64(xbl); \ 722 tcg_temp_free_i64(xbh); \ 723 tcg_temp_free_i64(sgm); \ 724 tcg_temp_free_i64(tmp); \ 725} 726 727VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP) 728VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP) 729VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP) 730VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) 731 732#define VSX_VECTOR_MOVE(name, op, sgn_mask) \ 733static void glue(gen_, name)(DisasContext *ctx) \ 734 { \ 735 TCGv_i64 xbh, xbl, sgm; \ 736 if (unlikely(!ctx->vsx_enabled)) { \ 737 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 738 return; \ 739 } \ 740 xbh = tcg_temp_new_i64(); \ 741 xbl = tcg_temp_new_i64(); \ 742 sgm = tcg_temp_new_i64(); \ 743 get_cpu_vsr(xbh, xB(ctx->opcode), true); \ 744 get_cpu_vsr(xbl, xB(ctx->opcode), false); \ 745 tcg_gen_movi_i64(sgm, sgn_mask); \ 746 switch (op) { \ 747 case OP_ABS: { \ 748 tcg_gen_andc_i64(xbh, xbh, sgm); \ 749 tcg_gen_andc_i64(xbl, xbl, sgm); \ 750 break; \ 751 } \ 752 case OP_NABS: { \ 753 tcg_gen_or_i64(xbh, xbh, sgm); \ 754 tcg_gen_or_i64(xbl, xbl, sgm); \ 755 break; \ 756 } \ 757 case OP_NEG: { \ 758 tcg_gen_xor_i64(xbh, xbh, sgm); \ 759 tcg_gen_xor_i64(xbl, xbl, sgm); \ 760 break; \ 761 } \ 762 case OP_CPSGN: { \ 763 TCGv_i64 xah = tcg_temp_new_i64(); \ 764 TCGv_i64 xal = tcg_temp_new_i64(); \ 765 get_cpu_vsr(xah, xA(ctx->opcode), true); \ 766 get_cpu_vsr(xal, xA(ctx->opcode), false); \ 767 tcg_gen_and_i64(xah, xah, sgm); \ 768 tcg_gen_and_i64(xal, xal, sgm); \ 769 tcg_gen_andc_i64(xbh, xbh, sgm); \ 770 tcg_gen_andc_i64(xbl, xbl, sgm); \ 771 tcg_gen_or_i64(xbh, xbh, xah); \ 772 tcg_gen_or_i64(xbl, xbl, xal); \ 773 tcg_temp_free_i64(xah); \ 774 tcg_temp_free_i64(xal); \ 775 break; \ 776 } \ 777 } \ 778 set_cpu_vsr(xT(ctx->opcode), xbh, true); \ 779 set_cpu_vsr(xT(ctx->opcode), xbl, false); \ 780 tcg_temp_free_i64(xbh); \ 781 tcg_temp_free_i64(xbl); \ 782 tcg_temp_free_i64(sgm); \ 783 } 784 785VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) 786VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) 787VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) 788VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) 789VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) 790VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) 791VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) 792VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) 793 794#define VSX_CMP(name, op1, op2, inval, type) \ 795static void gen_##name(DisasContext *ctx) \ 796{ \ 797 TCGv_i32 ignored; \ 798 TCGv_ptr xt, xa, xb; \ 799 if (unlikely(!ctx->vsx_enabled)) { \ 800 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 801 return; \ 802 } \ 803 xt = gen_vsr_ptr(xT(ctx->opcode)); \ 804 xa = gen_vsr_ptr(xA(ctx->opcode)); \ 805 xb = gen_vsr_ptr(xB(ctx->opcode)); \ 806 if ((ctx->opcode >> (31 - 21)) & 1) { \ 807 gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \ 808 } else { \ 809 ignored = tcg_temp_new_i32(); \ 810 gen_helper_##name(ignored, cpu_env, xt, xa, xb); \ 811 tcg_temp_free_i32(ignored); \ 812 } \ 813 tcg_temp_free_ptr(xt); \ 814 tcg_temp_free_ptr(xa); \ 815 tcg_temp_free_ptr(xb); \ 816} 817 818VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) 819VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX) 820VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX) 821VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300) 822VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX) 823VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX) 824VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX) 825VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX) 826 827static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a) 828{ 829 TCGv_i32 ro; 830 TCGv_ptr xt, xb; 831 832 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 833 REQUIRE_VSX(ctx); 834 835 ro = tcg_const_i32(a->rc); 836 837 xt = gen_avr_ptr(a->rt); 838 xb = gen_avr_ptr(a->rb); 839 gen_helper_XSCVQPDP(cpu_env, ro, xt, xb); 840 tcg_temp_free_i32(ro); 841 tcg_temp_free_ptr(xt); 842 tcg_temp_free_ptr(xb); 843 844 return true; 845} 846 847static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a, 848 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr)) 849{ 850 TCGv_ptr xt, xb; 851 852 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 853 REQUIRE_VSX(ctx); 854 855 xt = gen_avr_ptr(a->rt); 856 xb = gen_avr_ptr(a->rb); 857 gen_helper(cpu_env, xt, xb); 858 tcg_temp_free_ptr(xt); 859 tcg_temp_free_ptr(xb); 860 861 return true; 862} 863 864TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP) 865TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP) 866TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ) 867TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ) 868 869#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ 870static void gen_##name(DisasContext *ctx) \ 871{ \ 872 TCGv_i32 opc; \ 873 if (unlikely(!ctx->vsx_enabled)) { \ 874 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 875 return; \ 876 } \ 877 opc = tcg_const_i32(ctx->opcode); \ 878 gen_helper_##name(cpu_env, opc); \ 879 tcg_temp_free_i32(opc); \ 880} 881 882#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \ 883static void gen_##name(DisasContext *ctx) \ 884{ \ 885 TCGv_ptr xt, xa, xb; \ 886 if (unlikely(!ctx->vsx_enabled)) { \ 887 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 888 return; \ 889 } \ 890 xt = gen_vsr_ptr(xT(ctx->opcode)); \ 891 xa = gen_vsr_ptr(xA(ctx->opcode)); \ 892 xb = gen_vsr_ptr(xB(ctx->opcode)); \ 893 gen_helper_##name(cpu_env, xt, xa, xb); \ 894 tcg_temp_free_ptr(xt); \ 895 tcg_temp_free_ptr(xa); \ 896 tcg_temp_free_ptr(xb); \ 897} 898 899#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \ 900static void gen_##name(DisasContext *ctx) \ 901{ \ 902 TCGv_ptr xt, xb; \ 903 if (unlikely(!ctx->vsx_enabled)) { \ 904 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 905 return; \ 906 } \ 907 xt = gen_vsr_ptr(xT(ctx->opcode)); \ 908 xb = gen_vsr_ptr(xB(ctx->opcode)); \ 909 gen_helper_##name(cpu_env, xt, xb); \ 910 tcg_temp_free_ptr(xt); \ 911 tcg_temp_free_ptr(xb); \ 912} 913 914#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \ 915static void gen_##name(DisasContext *ctx) \ 916{ \ 917 TCGv_i32 opc; \ 918 TCGv_ptr xa, xb; \ 919 if (unlikely(!ctx->vsx_enabled)) { \ 920 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 921 return; \ 922 } \ 923 opc = tcg_const_i32(ctx->opcode); \ 924 xa = gen_vsr_ptr(xA(ctx->opcode)); \ 925 xb = gen_vsr_ptr(xB(ctx->opcode)); \ 926 gen_helper_##name(cpu_env, opc, xa, xb); \ 927 tcg_temp_free_i32(opc); \ 928 tcg_temp_free_ptr(xa); \ 929 tcg_temp_free_ptr(xb); \ 930} 931 932#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \ 933static void gen_##name(DisasContext *ctx) \ 934{ \ 935 TCGv_i32 opc; \ 936 TCGv_ptr xb; \ 937 if (unlikely(!ctx->vsx_enabled)) { \ 938 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 939 return; \ 940 } \ 941 opc = tcg_const_i32(ctx->opcode); \ 942 xb = gen_vsr_ptr(xB(ctx->opcode)); \ 943 gen_helper_##name(cpu_env, opc, xb); \ 944 tcg_temp_free_i32(opc); \ 945 tcg_temp_free_ptr(xb); \ 946} 947 948#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \ 949static void gen_##name(DisasContext *ctx) \ 950{ \ 951 TCGv_i32 opc; \ 952 TCGv_ptr xt, xa, xb; \ 953 if (unlikely(!ctx->vsx_enabled)) { \ 954 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 955 return; \ 956 } \ 957 opc = tcg_const_i32(ctx->opcode); \ 958 xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ 959 xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ 960 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ 961 gen_helper_##name(cpu_env, opc, xt, xa, xb); \ 962 tcg_temp_free_i32(opc); \ 963 tcg_temp_free_ptr(xt); \ 964 tcg_temp_free_ptr(xa); \ 965 tcg_temp_free_ptr(xb); \ 966} 967 968#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \ 969static void gen_##name(DisasContext *ctx) \ 970{ \ 971 TCGv_i32 opc; \ 972 TCGv_ptr xt, xb; \ 973 if (unlikely(!ctx->vsx_enabled)) { \ 974 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 975 return; \ 976 } \ 977 opc = tcg_const_i32(ctx->opcode); \ 978 xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ 979 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ 980 gen_helper_##name(cpu_env, opc, xt, xb); \ 981 tcg_temp_free_i32(opc); \ 982 tcg_temp_free_ptr(xt); \ 983 tcg_temp_free_ptr(xb); \ 984} 985 986#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \ 987static void gen_##name(DisasContext *ctx) \ 988{ \ 989 TCGv_i32 opc; \ 990 TCGv_ptr xa, xb; \ 991 if (unlikely(!ctx->vsx_enabled)) { \ 992 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 993 return; \ 994 } \ 995 opc = tcg_const_i32(ctx->opcode); \ 996 xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ 997 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ 998 gen_helper_##name(cpu_env, opc, xa, xb); \ 999 tcg_temp_free_i32(opc); \ 1000 tcg_temp_free_ptr(xa); \ 1001 tcg_temp_free_ptr(xb); \ 1002} 1003 1004#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ 1005static void gen_##name(DisasContext *ctx) \ 1006{ \ 1007 TCGv_i64 t0; \ 1008 TCGv_i64 t1; \ 1009 if (unlikely(!ctx->vsx_enabled)) { \ 1010 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 1011 return; \ 1012 } \ 1013 t0 = tcg_temp_new_i64(); \ 1014 t1 = tcg_temp_new_i64(); \ 1015 get_cpu_vsr(t0, xB(ctx->opcode), true); \ 1016 gen_helper_##name(t1, cpu_env, t0); \ 1017 set_cpu_vsr(xT(ctx->opcode), t1, true); \ 1018 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ 1019 tcg_temp_free_i64(t0); \ 1020 tcg_temp_free_i64(t1); \ 1021} 1022 1023GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX) 1024GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300) 1025GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX) 1026GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX) 1027GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300) 1028GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX) 1029GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300) 1030GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX) 1031GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX) 1032GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) 1033GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) 1034GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) 1035GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300) 1036GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300) 1037GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX) 1038GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX) 1039GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX) 1040GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX) 1041GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) 1042GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX) 1043GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300) 1044GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) 1045GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300) 1046GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207) 1047GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300) 1048GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300) 1049GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300) 1050GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300) 1051GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300) 1052GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300) 1053GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) 1054 1055bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a) 1056{ 1057 TCGv_i64 tmp; 1058 1059 REQUIRE_INSNS_FLAGS2(ctx, VSX207); 1060 REQUIRE_VSX(ctx); 1061 1062 tmp = tcg_temp_new_i64(); 1063 get_cpu_vsr(tmp, a->xb, true); 1064 1065 gen_helper_XSCVSPDPN(tmp, tmp); 1066 1067 set_cpu_vsr(a->xt, tmp, true); 1068 set_cpu_vsr(a->xt, tcg_constant_i64(0), false); 1069 1070 tcg_temp_free_i64(tmp); 1071 1072 return true; 1073} 1074 1075GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX) 1076GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX) 1077GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX) 1078GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX) 1079GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX) 1080GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300) 1081GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX) 1082GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX) 1083GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX) 1084GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX) 1085GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) 1086GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) 1087GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) 1088GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300) 1089GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300) 1090GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300) 1091GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300) 1092GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) 1093GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) 1094GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) 1095GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207) 1096GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207) 1097GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) 1098GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) 1099GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) 1100GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) 1101GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300) 1102GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300) 1103GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300) 1104 1105GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) 1106GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) 1107GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX) 1108GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX) 1109GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX) 1110GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX) 1111GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX) 1112GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX) 1113GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX) 1114GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX) 1115GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX) 1116GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX) 1117GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX) 1118GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX) 1119GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX) 1120GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX) 1121GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX) 1122GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX) 1123GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX) 1124GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX) 1125GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX) 1126GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX) 1127GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX) 1128GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX) 1129GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX) 1130 1131GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX) 1132GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX) 1133GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX) 1134GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX) 1135GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX) 1136GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX) 1137GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX) 1138GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX) 1139GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX) 1140GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX) 1141GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX) 1142GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX) 1143GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300) 1144GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300) 1145GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX) 1146GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX) 1147GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX) 1148GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX) 1149GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX) 1150GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX) 1151GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX) 1152GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX) 1153GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX) 1154GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX) 1155GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX) 1156GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX) 1157GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX) 1158GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX) 1159GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX) 1160 1161static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a) 1162{ 1163 TCGv_ptr xt, xa, xb; 1164 1165 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 1166 REQUIRE_VSX(ctx); 1167 1168 xt = gen_vsr_ptr(a->xt); 1169 xa = gen_vsr_ptr(a->xa); 1170 xb = gen_vsr_ptr(a->xb); 1171 1172 gen_helper_VPERM(xt, xa, xt, xb); 1173 1174 tcg_temp_free_ptr(xt); 1175 tcg_temp_free_ptr(xa); 1176 tcg_temp_free_ptr(xb); 1177 1178 return true; 1179} 1180 1181static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a) 1182{ 1183 TCGv_ptr xt, xa, xb; 1184 1185 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 1186 REQUIRE_VSX(ctx); 1187 1188 xt = gen_vsr_ptr(a->xt); 1189 xa = gen_vsr_ptr(a->xa); 1190 xb = gen_vsr_ptr(a->xb); 1191 1192 gen_helper_VPERMR(xt, xa, xt, xb); 1193 1194 tcg_temp_free_ptr(xt); 1195 tcg_temp_free_ptr(xa); 1196 tcg_temp_free_ptr(xb); 1197 1198 return true; 1199} 1200 1201static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) 1202{ 1203 TCGv_i64 t0, t1; 1204 1205 REQUIRE_INSNS_FLAGS2(ctx, VSX); 1206 REQUIRE_VSX(ctx); 1207 1208 t0 = tcg_temp_new_i64(); 1209 1210 if (unlikely(a->xt == a->xa || a->xt == a->xb)) { 1211 t1 = tcg_temp_new_i64(); 1212 1213 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); 1214 get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0); 1215 1216 set_cpu_vsr(a->xt, t0, true); 1217 set_cpu_vsr(a->xt, t1, false); 1218 1219 tcg_temp_free_i64(t1); 1220 } else { 1221 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); 1222 set_cpu_vsr(a->xt, t0, true); 1223 1224 get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0); 1225 set_cpu_vsr(a->xt, t0, false); 1226 } 1227 1228 tcg_temp_free_i64(t0); 1229 1230 return true; 1231} 1232 1233static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a) 1234{ 1235 TCGv_ptr xt, xa, xb, xc; 1236 1237 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1238 REQUIRE_VSX(ctx); 1239 1240 xt = gen_vsr_ptr(a->xt); 1241 xa = gen_vsr_ptr(a->xa); 1242 xb = gen_vsr_ptr(a->xb); 1243 xc = gen_vsr_ptr(a->xc); 1244 1245 gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3)); 1246 1247 tcg_temp_free_ptr(xt); 1248 tcg_temp_free_ptr(xa); 1249 tcg_temp_free_ptr(xb); 1250 tcg_temp_free_ptr(xc); 1251 1252 return true; 1253} 1254 1255typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr); 1256 1257static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a, 1258 const xxgenpcv_genfn fn[4]) 1259{ 1260 TCGv_ptr xt, vrb; 1261 1262 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1263 REQUIRE_VSX(ctx); 1264 1265 if (a->imm & ~0x3) { 1266 gen_invalid(ctx); 1267 return true; 1268 } 1269 1270 xt = gen_vsr_ptr(a->xt); 1271 vrb = gen_avr_ptr(a->vrb); 1272 1273 fn[a->imm](xt, vrb); 1274 1275 tcg_temp_free_ptr(xt); 1276 tcg_temp_free_ptr(vrb); 1277 1278 return true; 1279} 1280 1281#define XXGENPCV(NAME) \ 1282 static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a) \ 1283 { \ 1284 static const xxgenpcv_genfn fn[4] = { \ 1285 gen_helper_##NAME##_be_exp, \ 1286 gen_helper_##NAME##_be_comp, \ 1287 gen_helper_##NAME##_le_exp, \ 1288 gen_helper_##NAME##_le_comp, \ 1289 }; \ 1290 return do_xxgenpcv(ctx, a, fn); \ 1291 } 1292 1293XXGENPCV(XXGENPCVBM) 1294XXGENPCV(XXGENPCVHM) 1295XXGENPCV(XXGENPCVWM) 1296XXGENPCV(XXGENPCVDM) 1297#undef XXGENPCV 1298 1299static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3, 1300 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 1301{ 1302 TCGv_ptr t, s1, s2, s3; 1303 1304 t = gen_vsr_ptr(tgt); 1305 s1 = gen_vsr_ptr(src1); 1306 s2 = gen_vsr_ptr(src2); 1307 s3 = gen_vsr_ptr(src3); 1308 1309 gen_helper(cpu_env, t, s1, s2, s3); 1310 1311 tcg_temp_free_ptr(t); 1312 tcg_temp_free_ptr(s1); 1313 tcg_temp_free_ptr(s2); 1314 tcg_temp_free_ptr(s3); 1315 1316 return true; 1317} 1318 1319static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a, 1320 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 1321{ 1322 REQUIRE_VSX(ctx); 1323 1324 if (type_a) { 1325 return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper); 1326 } 1327 return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper); 1328} 1329 1330TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP) 1331TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP) 1332TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP) 1333TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP) 1334TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP) 1335TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP) 1336TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP) 1337TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP) 1338TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP) 1339TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP) 1340TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP) 1341TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP) 1342TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP) 1343TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP) 1344TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP) 1345TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP) 1346 1347static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a, 1348 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr), 1349 void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 1350{ 1351 int vrt, vra, vrb; 1352 1353 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 1354 REQUIRE_VSX(ctx); 1355 1356 vrt = a->rt + 32; 1357 vra = a->ra + 32; 1358 vrb = a->rb + 32; 1359 1360 if (a->rc) { 1361 return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro); 1362 } 1363 1364 return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper); 1365} 1366 1367TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO) 1368TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO) 1369TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO) 1370TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO) 1371 1372#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \ 1373static void gen_##name(DisasContext *ctx) \ 1374{ \ 1375 TCGv_ptr xt, s1, s2, s3; \ 1376 if (unlikely(!ctx->vsx_enabled)) { \ 1377 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 1378 return; \ 1379 } \ 1380 xt = gen_vsr_ptr(xT(ctx->opcode)); \ 1381 s1 = gen_vsr_ptr(xA(ctx->opcode)); \ 1382 if (ctx->opcode & PPC_BIT32(25)) { \ 1383 /* \ 1384 * AxT + B \ 1385 */ \ 1386 s2 = gen_vsr_ptr(xB(ctx->opcode)); \ 1387 s3 = gen_vsr_ptr(xT(ctx->opcode)); \ 1388 } else { \ 1389 /* \ 1390 * AxB + T \ 1391 */ \ 1392 s2 = gen_vsr_ptr(xT(ctx->opcode)); \ 1393 s3 = gen_vsr_ptr(xB(ctx->opcode)); \ 1394 } \ 1395 gen_helper_##name(cpu_env, xt, s1, s2, s3); \ 1396 tcg_temp_free_ptr(xt); \ 1397 tcg_temp_free_ptr(s1); \ 1398 tcg_temp_free_ptr(s2); \ 1399 tcg_temp_free_ptr(s3); \ 1400} 1401 1402GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) 1403GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX) 1404GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX) 1405GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX) 1406GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX) 1407GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX) 1408GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX) 1409GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX) 1410 1411static void gen_xxbrd(DisasContext *ctx) 1412{ 1413 TCGv_i64 xth; 1414 TCGv_i64 xtl; 1415 TCGv_i64 xbh; 1416 TCGv_i64 xbl; 1417 1418 if (unlikely(!ctx->vsx_enabled)) { 1419 gen_exception(ctx, POWERPC_EXCP_VSXU); 1420 return; 1421 } 1422 xth = tcg_temp_new_i64(); 1423 xtl = tcg_temp_new_i64(); 1424 xbh = tcg_temp_new_i64(); 1425 xbl = tcg_temp_new_i64(); 1426 get_cpu_vsr(xbh, xB(ctx->opcode), true); 1427 get_cpu_vsr(xbl, xB(ctx->opcode), false); 1428 1429 tcg_gen_bswap64_i64(xth, xbh); 1430 tcg_gen_bswap64_i64(xtl, xbl); 1431 set_cpu_vsr(xT(ctx->opcode), xth, true); 1432 set_cpu_vsr(xT(ctx->opcode), xtl, false); 1433 1434 tcg_temp_free_i64(xth); 1435 tcg_temp_free_i64(xtl); 1436 tcg_temp_free_i64(xbh); 1437 tcg_temp_free_i64(xbl); 1438} 1439 1440static void gen_xxbrh(DisasContext *ctx) 1441{ 1442 TCGv_i64 xth; 1443 TCGv_i64 xtl; 1444 TCGv_i64 xbh; 1445 TCGv_i64 xbl; 1446 1447 if (unlikely(!ctx->vsx_enabled)) { 1448 gen_exception(ctx, POWERPC_EXCP_VSXU); 1449 return; 1450 } 1451 xth = tcg_temp_new_i64(); 1452 xtl = tcg_temp_new_i64(); 1453 xbh = tcg_temp_new_i64(); 1454 xbl = tcg_temp_new_i64(); 1455 get_cpu_vsr(xbh, xB(ctx->opcode), true); 1456 get_cpu_vsr(xbl, xB(ctx->opcode), false); 1457 1458 gen_bswap16x8(xth, xtl, xbh, xbl); 1459 set_cpu_vsr(xT(ctx->opcode), xth, true); 1460 set_cpu_vsr(xT(ctx->opcode), xtl, false); 1461 1462 tcg_temp_free_i64(xth); 1463 tcg_temp_free_i64(xtl); 1464 tcg_temp_free_i64(xbh); 1465 tcg_temp_free_i64(xbl); 1466} 1467 1468static void gen_xxbrq(DisasContext *ctx) 1469{ 1470 TCGv_i64 xth; 1471 TCGv_i64 xtl; 1472 TCGv_i64 xbh; 1473 TCGv_i64 xbl; 1474 TCGv_i64 t0; 1475 1476 if (unlikely(!ctx->vsx_enabled)) { 1477 gen_exception(ctx, POWERPC_EXCP_VSXU); 1478 return; 1479 } 1480 xth = tcg_temp_new_i64(); 1481 xtl = tcg_temp_new_i64(); 1482 xbh = tcg_temp_new_i64(); 1483 xbl = tcg_temp_new_i64(); 1484 get_cpu_vsr(xbh, xB(ctx->opcode), true); 1485 get_cpu_vsr(xbl, xB(ctx->opcode), false); 1486 t0 = tcg_temp_new_i64(); 1487 1488 tcg_gen_bswap64_i64(t0, xbl); 1489 tcg_gen_bswap64_i64(xtl, xbh); 1490 set_cpu_vsr(xT(ctx->opcode), xtl, false); 1491 tcg_gen_mov_i64(xth, t0); 1492 set_cpu_vsr(xT(ctx->opcode), xth, true); 1493 1494 tcg_temp_free_i64(t0); 1495 tcg_temp_free_i64(xth); 1496 tcg_temp_free_i64(xtl); 1497 tcg_temp_free_i64(xbh); 1498 tcg_temp_free_i64(xbl); 1499} 1500 1501static void gen_xxbrw(DisasContext *ctx) 1502{ 1503 TCGv_i64 xth; 1504 TCGv_i64 xtl; 1505 TCGv_i64 xbh; 1506 TCGv_i64 xbl; 1507 1508 if (unlikely(!ctx->vsx_enabled)) { 1509 gen_exception(ctx, POWERPC_EXCP_VSXU); 1510 return; 1511 } 1512 xth = tcg_temp_new_i64(); 1513 xtl = tcg_temp_new_i64(); 1514 xbh = tcg_temp_new_i64(); 1515 xbl = tcg_temp_new_i64(); 1516 get_cpu_vsr(xbh, xB(ctx->opcode), true); 1517 get_cpu_vsr(xbl, xB(ctx->opcode), false); 1518 1519 gen_bswap32x4(xth, xtl, xbh, xbl); 1520 set_cpu_vsr(xT(ctx->opcode), xth, true); 1521 set_cpu_vsr(xT(ctx->opcode), xtl, false); 1522 1523 tcg_temp_free_i64(xth); 1524 tcg_temp_free_i64(xtl); 1525 tcg_temp_free_i64(xbh); 1526 tcg_temp_free_i64(xbl); 1527} 1528 1529#define VSX_LOGICAL(name, vece, tcg_op) \ 1530static void glue(gen_, name)(DisasContext *ctx) \ 1531 { \ 1532 if (unlikely(!ctx->vsx_enabled)) { \ 1533 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 1534 return; \ 1535 } \ 1536 tcg_op(vece, vsr_full_offset(xT(ctx->opcode)), \ 1537 vsr_full_offset(xA(ctx->opcode)), \ 1538 vsr_full_offset(xB(ctx->opcode)), 16, 16); \ 1539 } 1540 1541VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and) 1542VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc) 1543VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or) 1544VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor) 1545VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor) 1546VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv) 1547VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand) 1548VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc) 1549 1550#define VSX_XXMRG(name, high) \ 1551static void glue(gen_, name)(DisasContext *ctx) \ 1552 { \ 1553 TCGv_i64 a0, a1, b0, b1, tmp; \ 1554 if (unlikely(!ctx->vsx_enabled)) { \ 1555 gen_exception(ctx, POWERPC_EXCP_VSXU); \ 1556 return; \ 1557 } \ 1558 a0 = tcg_temp_new_i64(); \ 1559 a1 = tcg_temp_new_i64(); \ 1560 b0 = tcg_temp_new_i64(); \ 1561 b1 = tcg_temp_new_i64(); \ 1562 tmp = tcg_temp_new_i64(); \ 1563 get_cpu_vsr(a0, xA(ctx->opcode), high); \ 1564 get_cpu_vsr(a1, xA(ctx->opcode), high); \ 1565 get_cpu_vsr(b0, xB(ctx->opcode), high); \ 1566 get_cpu_vsr(b1, xB(ctx->opcode), high); \ 1567 tcg_gen_shri_i64(a0, a0, 32); \ 1568 tcg_gen_shri_i64(b0, b0, 32); \ 1569 tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \ 1570 set_cpu_vsr(xT(ctx->opcode), tmp, true); \ 1571 tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ 1572 set_cpu_vsr(xT(ctx->opcode), tmp, false); \ 1573 tcg_temp_free_i64(a0); \ 1574 tcg_temp_free_i64(a1); \ 1575 tcg_temp_free_i64(b0); \ 1576 tcg_temp_free_i64(b1); \ 1577 tcg_temp_free_i64(tmp); \ 1578 } 1579 1580VSX_XXMRG(xxmrghw, 1) 1581VSX_XXMRG(xxmrglw, 0) 1582 1583static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a) 1584{ 1585 REQUIRE_INSNS_FLAGS2(ctx, VSX); 1586 REQUIRE_VSX(ctx); 1587 1588 tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc), 1589 vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16); 1590 1591 return true; 1592} 1593 1594static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a) 1595{ 1596 int tofs, bofs; 1597 1598 REQUIRE_VSX(ctx); 1599 1600 tofs = vsr_full_offset(a->xt); 1601 bofs = vsr_full_offset(a->xb); 1602 bofs += a->uim << MO_32; 1603#if !HOST_BIG_ENDIAN 1604 bofs ^= 8 | 4; 1605#endif 1606 1607 tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16); 1608 return true; 1609} 1610 1611#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) 1612 1613static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a) 1614{ 1615 if (a->xt < 32) { 1616 REQUIRE_VSX(ctx); 1617 } else { 1618 REQUIRE_VECTOR(ctx); 1619 } 1620 tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm); 1621 return true; 1622} 1623 1624static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a) 1625{ 1626 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1627 REQUIRE_VSX(ctx); 1628 1629 tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si); 1630 1631 return true; 1632} 1633 1634static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a) 1635{ 1636 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1637 REQUIRE_VSX(ctx); 1638 1639 tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16, 1640 helper_todouble(a->si)); 1641 return true; 1642} 1643 1644static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a) 1645{ 1646 TCGv_i32 imm; 1647 1648 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1649 REQUIRE_VSX(ctx); 1650 1651 imm = tcg_constant_i32(a->si); 1652 1653 tcg_gen_st_i32(imm, cpu_env, 1654 offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix))); 1655 tcg_gen_st_i32(imm, cpu_env, 1656 offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix))); 1657 1658 return true; 1659} 1660 1661static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a) 1662{ 1663 static const uint64_t values[32] = { 1664 0, /* Unspecified */ 1665 0x3FFF000000000000llu, /* QP +1.0 */ 1666 0x4000000000000000llu, /* QP +2.0 */ 1667 0x4000800000000000llu, /* QP +3.0 */ 1668 0x4001000000000000llu, /* QP +4.0 */ 1669 0x4001400000000000llu, /* QP +5.0 */ 1670 0x4001800000000000llu, /* QP +6.0 */ 1671 0x4001C00000000000llu, /* QP +7.0 */ 1672 0x7FFF000000000000llu, /* QP +Inf */ 1673 0x7FFF800000000000llu, /* QP dQNaN */ 1674 0, /* Unspecified */ 1675 0, /* Unspecified */ 1676 0, /* Unspecified */ 1677 0, /* Unspecified */ 1678 0, /* Unspecified */ 1679 0, /* Unspecified */ 1680 0x8000000000000000llu, /* QP -0.0 */ 1681 0xBFFF000000000000llu, /* QP -1.0 */ 1682 0xC000000000000000llu, /* QP -2.0 */ 1683 0xC000800000000000llu, /* QP -3.0 */ 1684 0xC001000000000000llu, /* QP -4.0 */ 1685 0xC001400000000000llu, /* QP -5.0 */ 1686 0xC001800000000000llu, /* QP -6.0 */ 1687 0xC001C00000000000llu, /* QP -7.0 */ 1688 0xFFFF000000000000llu, /* QP -Inf */ 1689 }; 1690 1691 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1692 REQUIRE_VSX(ctx); 1693 1694 if (values[a->uim]) { 1695 set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false); 1696 set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true); 1697 } else { 1698 gen_invalid(ctx); 1699 } 1700 1701 return true; 1702} 1703 1704static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a) 1705{ 1706 TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero; 1707 1708 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 1709 REQUIRE_VSX(ctx); 1710 1711 xb = tcg_temp_new_i64(); 1712 t0 = tcg_temp_new_i64(); 1713 t1 = tcg_temp_new_i64(); 1714 all_true = tcg_temp_new_i64(); 1715 all_false = tcg_temp_new_i64(); 1716 mask = tcg_constant_i64(dup_const(MO_8, 1)); 1717 zero = tcg_constant_i64(0); 1718 1719 get_cpu_vsr(xb, a->xb, true); 1720 tcg_gen_and_i64(t0, mask, xb); 1721 get_cpu_vsr(xb, a->xb, false); 1722 tcg_gen_and_i64(t1, mask, xb); 1723 1724 tcg_gen_or_i64(all_false, t0, t1); 1725 tcg_gen_and_i64(all_true, t0, t1); 1726 1727 tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero); 1728 tcg_gen_shli_i64(all_false, all_false, 1); 1729 tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask); 1730 tcg_gen_shli_i64(all_true, all_true, 3); 1731 1732 tcg_gen_or_i64(t0, all_false, all_true); 1733 tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0); 1734 1735 tcg_temp_free_i64(xb); 1736 tcg_temp_free_i64(t0); 1737 tcg_temp_free_i64(t1); 1738 tcg_temp_free_i64(all_true); 1739 tcg_temp_free_i64(all_false); 1740 1741 return true; 1742} 1743 1744static void gen_xxsldwi(DisasContext *ctx) 1745{ 1746 TCGv_i64 xth, xtl; 1747 if (unlikely(!ctx->vsx_enabled)) { 1748 gen_exception(ctx, POWERPC_EXCP_VSXU); 1749 return; 1750 } 1751 xth = tcg_temp_new_i64(); 1752 xtl = tcg_temp_new_i64(); 1753 1754 switch (SHW(ctx->opcode)) { 1755 case 0: { 1756 get_cpu_vsr(xth, xA(ctx->opcode), true); 1757 get_cpu_vsr(xtl, xA(ctx->opcode), false); 1758 break; 1759 } 1760 case 1: { 1761 TCGv_i64 t0 = tcg_temp_new_i64(); 1762 get_cpu_vsr(xth, xA(ctx->opcode), true); 1763 tcg_gen_shli_i64(xth, xth, 32); 1764 get_cpu_vsr(t0, xA(ctx->opcode), false); 1765 tcg_gen_shri_i64(t0, t0, 32); 1766 tcg_gen_or_i64(xth, xth, t0); 1767 get_cpu_vsr(xtl, xA(ctx->opcode), false); 1768 tcg_gen_shli_i64(xtl, xtl, 32); 1769 get_cpu_vsr(t0, xB(ctx->opcode), true); 1770 tcg_gen_shri_i64(t0, t0, 32); 1771 tcg_gen_or_i64(xtl, xtl, t0); 1772 tcg_temp_free_i64(t0); 1773 break; 1774 } 1775 case 2: { 1776 get_cpu_vsr(xth, xA(ctx->opcode), false); 1777 get_cpu_vsr(xtl, xB(ctx->opcode), true); 1778 break; 1779 } 1780 case 3: { 1781 TCGv_i64 t0 = tcg_temp_new_i64(); 1782 get_cpu_vsr(xth, xA(ctx->opcode), false); 1783 tcg_gen_shli_i64(xth, xth, 32); 1784 get_cpu_vsr(t0, xB(ctx->opcode), true); 1785 tcg_gen_shri_i64(t0, t0, 32); 1786 tcg_gen_or_i64(xth, xth, t0); 1787 get_cpu_vsr(xtl, xB(ctx->opcode), true); 1788 tcg_gen_shli_i64(xtl, xtl, 32); 1789 get_cpu_vsr(t0, xB(ctx->opcode), false); 1790 tcg_gen_shri_i64(t0, t0, 32); 1791 tcg_gen_or_i64(xtl, xtl, t0); 1792 tcg_temp_free_i64(t0); 1793 break; 1794 } 1795 } 1796 1797 set_cpu_vsr(xT(ctx->opcode), xth, true); 1798 set_cpu_vsr(xT(ctx->opcode), xtl, false); 1799 1800 tcg_temp_free_i64(xth); 1801 tcg_temp_free_i64(xtl); 1802} 1803 1804static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, 1805 void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32)) 1806{ 1807 TCGv_i64 zero = tcg_constant_i64(0); 1808 TCGv_ptr xt, xb; 1809 1810 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 1811 REQUIRE_VSX(ctx); 1812 1813 /* 1814 * uim > 15 out of bound and for 1815 * uim > 12 handle as per hardware in helper 1816 */ 1817 if (a->uim > 15) { 1818 set_cpu_vsr(a->xt, zero, true); 1819 set_cpu_vsr(a->xt, zero, false); 1820 } else { 1821 xt = gen_vsr_ptr(a->xt); 1822 xb = gen_vsr_ptr(a->xb); 1823 gen_helper(xt, xb, tcg_constant_i32(a->uim)); 1824 tcg_temp_free_ptr(xb); 1825 tcg_temp_free_ptr(xt); 1826 } 1827 1828 return true; 1829} 1830 1831TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW) 1832TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW) 1833 1834#ifdef TARGET_PPC64 1835static void gen_xsxexpdp(DisasContext *ctx) 1836{ 1837 TCGv rt = cpu_gpr[rD(ctx->opcode)]; 1838 TCGv_i64 t0; 1839 if (unlikely(!ctx->vsx_enabled)) { 1840 gen_exception(ctx, POWERPC_EXCP_VSXU); 1841 return; 1842 } 1843 t0 = tcg_temp_new_i64(); 1844 get_cpu_vsr(t0, xB(ctx->opcode), true); 1845 tcg_gen_extract_i64(rt, t0, 52, 11); 1846 tcg_temp_free_i64(t0); 1847} 1848 1849static void gen_xsxexpqp(DisasContext *ctx) 1850{ 1851 TCGv_i64 xth; 1852 TCGv_i64 xtl; 1853 TCGv_i64 xbh; 1854 1855 if (unlikely(!ctx->vsx_enabled)) { 1856 gen_exception(ctx, POWERPC_EXCP_VSXU); 1857 return; 1858 } 1859 xth = tcg_temp_new_i64(); 1860 xtl = tcg_temp_new_i64(); 1861 xbh = tcg_temp_new_i64(); 1862 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); 1863 1864 tcg_gen_extract_i64(xth, xbh, 48, 15); 1865 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); 1866 tcg_gen_movi_i64(xtl, 0); 1867 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); 1868 1869 tcg_temp_free_i64(xbh); 1870 tcg_temp_free_i64(xth); 1871 tcg_temp_free_i64(xtl); 1872} 1873 1874static void gen_xsiexpdp(DisasContext *ctx) 1875{ 1876 TCGv_i64 xth; 1877 TCGv ra = cpu_gpr[rA(ctx->opcode)]; 1878 TCGv rb = cpu_gpr[rB(ctx->opcode)]; 1879 TCGv_i64 t0; 1880 1881 if (unlikely(!ctx->vsx_enabled)) { 1882 gen_exception(ctx, POWERPC_EXCP_VSXU); 1883 return; 1884 } 1885 t0 = tcg_temp_new_i64(); 1886 xth = tcg_temp_new_i64(); 1887 tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF); 1888 tcg_gen_andi_i64(t0, rb, 0x7FF); 1889 tcg_gen_shli_i64(t0, t0, 52); 1890 tcg_gen_or_i64(xth, xth, t0); 1891 set_cpu_vsr(xT(ctx->opcode), xth, true); 1892 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); 1893 tcg_temp_free_i64(t0); 1894 tcg_temp_free_i64(xth); 1895} 1896 1897static void gen_xsiexpqp(DisasContext *ctx) 1898{ 1899 TCGv_i64 xth; 1900 TCGv_i64 xtl; 1901 TCGv_i64 xah; 1902 TCGv_i64 xal; 1903 TCGv_i64 xbh; 1904 TCGv_i64 t0; 1905 1906 if (unlikely(!ctx->vsx_enabled)) { 1907 gen_exception(ctx, POWERPC_EXCP_VSXU); 1908 return; 1909 } 1910 xth = tcg_temp_new_i64(); 1911 xtl = tcg_temp_new_i64(); 1912 xah = tcg_temp_new_i64(); 1913 xal = tcg_temp_new_i64(); 1914 get_cpu_vsr(xah, rA(ctx->opcode) + 32, true); 1915 get_cpu_vsr(xal, rA(ctx->opcode) + 32, false); 1916 xbh = tcg_temp_new_i64(); 1917 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); 1918 t0 = tcg_temp_new_i64(); 1919 1920 tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF); 1921 tcg_gen_andi_i64(t0, xbh, 0x7FFF); 1922 tcg_gen_shli_i64(t0, t0, 48); 1923 tcg_gen_or_i64(xth, xth, t0); 1924 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); 1925 tcg_gen_mov_i64(xtl, xal); 1926 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); 1927 1928 tcg_temp_free_i64(t0); 1929 tcg_temp_free_i64(xth); 1930 tcg_temp_free_i64(xtl); 1931 tcg_temp_free_i64(xah); 1932 tcg_temp_free_i64(xal); 1933 tcg_temp_free_i64(xbh); 1934} 1935 1936static void gen_xsxsigdp(DisasContext *ctx) 1937{ 1938 TCGv rt = cpu_gpr[rD(ctx->opcode)]; 1939 TCGv_i64 t0, t1, zr, nan, exp; 1940 1941 if (unlikely(!ctx->vsx_enabled)) { 1942 gen_exception(ctx, POWERPC_EXCP_VSXU); 1943 return; 1944 } 1945 exp = tcg_temp_new_i64(); 1946 t0 = tcg_temp_new_i64(); 1947 t1 = tcg_temp_new_i64(); 1948 zr = tcg_const_i64(0); 1949 nan = tcg_const_i64(2047); 1950 1951 get_cpu_vsr(t1, xB(ctx->opcode), true); 1952 tcg_gen_extract_i64(exp, t1, 52, 11); 1953 tcg_gen_movi_i64(t0, 0x0010000000000000); 1954 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); 1955 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); 1956 get_cpu_vsr(t1, xB(ctx->opcode), true); 1957 tcg_gen_deposit_i64(rt, t0, t1, 0, 52); 1958 1959 tcg_temp_free_i64(t0); 1960 tcg_temp_free_i64(t1); 1961 tcg_temp_free_i64(exp); 1962 tcg_temp_free_i64(zr); 1963 tcg_temp_free_i64(nan); 1964} 1965 1966static void gen_xsxsigqp(DisasContext *ctx) 1967{ 1968 TCGv_i64 t0, zr, nan, exp; 1969 TCGv_i64 xth; 1970 TCGv_i64 xtl; 1971 TCGv_i64 xbh; 1972 TCGv_i64 xbl; 1973 1974 if (unlikely(!ctx->vsx_enabled)) { 1975 gen_exception(ctx, POWERPC_EXCP_VSXU); 1976 return; 1977 } 1978 xth = tcg_temp_new_i64(); 1979 xtl = tcg_temp_new_i64(); 1980 xbh = tcg_temp_new_i64(); 1981 xbl = tcg_temp_new_i64(); 1982 get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); 1983 get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); 1984 exp = tcg_temp_new_i64(); 1985 t0 = tcg_temp_new_i64(); 1986 zr = tcg_const_i64(0); 1987 nan = tcg_const_i64(32767); 1988 1989 tcg_gen_extract_i64(exp, xbh, 48, 15); 1990 tcg_gen_movi_i64(t0, 0x0001000000000000); 1991 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); 1992 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); 1993 tcg_gen_deposit_i64(xth, t0, xbh, 0, 48); 1994 set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); 1995 tcg_gen_mov_i64(xtl, xbl); 1996 set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); 1997 1998 tcg_temp_free_i64(t0); 1999 tcg_temp_free_i64(exp); 2000 tcg_temp_free_i64(zr); 2001 tcg_temp_free_i64(nan); 2002 tcg_temp_free_i64(xth); 2003 tcg_temp_free_i64(xtl); 2004 tcg_temp_free_i64(xbh); 2005 tcg_temp_free_i64(xbl); 2006} 2007#endif 2008 2009static void gen_xviexpsp(DisasContext *ctx) 2010{ 2011 TCGv_i64 xth; 2012 TCGv_i64 xtl; 2013 TCGv_i64 xah; 2014 TCGv_i64 xal; 2015 TCGv_i64 xbh; 2016 TCGv_i64 xbl; 2017 TCGv_i64 t0; 2018 2019 if (unlikely(!ctx->vsx_enabled)) { 2020 gen_exception(ctx, POWERPC_EXCP_VSXU); 2021 return; 2022 } 2023 xth = tcg_temp_new_i64(); 2024 xtl = tcg_temp_new_i64(); 2025 xah = tcg_temp_new_i64(); 2026 xal = tcg_temp_new_i64(); 2027 xbh = tcg_temp_new_i64(); 2028 xbl = tcg_temp_new_i64(); 2029 get_cpu_vsr(xah, xA(ctx->opcode), true); 2030 get_cpu_vsr(xal, xA(ctx->opcode), false); 2031 get_cpu_vsr(xbh, xB(ctx->opcode), true); 2032 get_cpu_vsr(xbl, xB(ctx->opcode), false); 2033 t0 = tcg_temp_new_i64(); 2034 2035 tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF); 2036 tcg_gen_andi_i64(t0, xbh, 0xFF000000FF); 2037 tcg_gen_shli_i64(t0, t0, 23); 2038 tcg_gen_or_i64(xth, xth, t0); 2039 set_cpu_vsr(xT(ctx->opcode), xth, true); 2040 tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF); 2041 tcg_gen_andi_i64(t0, xbl, 0xFF000000FF); 2042 tcg_gen_shli_i64(t0, t0, 23); 2043 tcg_gen_or_i64(xtl, xtl, t0); 2044 set_cpu_vsr(xT(ctx->opcode), xtl, false); 2045 2046 tcg_temp_free_i64(t0); 2047 tcg_temp_free_i64(xth); 2048 tcg_temp_free_i64(xtl); 2049 tcg_temp_free_i64(xah); 2050 tcg_temp_free_i64(xal); 2051 tcg_temp_free_i64(xbh); 2052 tcg_temp_free_i64(xbl); 2053} 2054 2055static void gen_xviexpdp(DisasContext *ctx) 2056{ 2057 TCGv_i64 xth; 2058 TCGv_i64 xtl; 2059 TCGv_i64 xah; 2060 TCGv_i64 xal; 2061 TCGv_i64 xbh; 2062 TCGv_i64 xbl; 2063 2064 if (unlikely(!ctx->vsx_enabled)) { 2065 gen_exception(ctx, POWERPC_EXCP_VSXU); 2066 return; 2067 } 2068 xth = tcg_temp_new_i64(); 2069 xtl = tcg_temp_new_i64(); 2070 xah = tcg_temp_new_i64(); 2071 xal = tcg_temp_new_i64(); 2072 xbh = tcg_temp_new_i64(); 2073 xbl = tcg_temp_new_i64(); 2074 get_cpu_vsr(xah, xA(ctx->opcode), true); 2075 get_cpu_vsr(xal, xA(ctx->opcode), false); 2076 get_cpu_vsr(xbh, xB(ctx->opcode), true); 2077 get_cpu_vsr(xbl, xB(ctx->opcode), false); 2078 2079 tcg_gen_deposit_i64(xth, xah, xbh, 52, 11); 2080 set_cpu_vsr(xT(ctx->opcode), xth, true); 2081 2082 tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); 2083 set_cpu_vsr(xT(ctx->opcode), xtl, false); 2084 2085 tcg_temp_free_i64(xth); 2086 tcg_temp_free_i64(xtl); 2087 tcg_temp_free_i64(xah); 2088 tcg_temp_free_i64(xal); 2089 tcg_temp_free_i64(xbh); 2090 tcg_temp_free_i64(xbl); 2091} 2092 2093static void gen_xvxexpsp(DisasContext *ctx) 2094{ 2095 TCGv_i64 xth; 2096 TCGv_i64 xtl; 2097 TCGv_i64 xbh; 2098 TCGv_i64 xbl; 2099 2100 if (unlikely(!ctx->vsx_enabled)) { 2101 gen_exception(ctx, POWERPC_EXCP_VSXU); 2102 return; 2103 } 2104 xth = tcg_temp_new_i64(); 2105 xtl = tcg_temp_new_i64(); 2106 xbh = tcg_temp_new_i64(); 2107 xbl = tcg_temp_new_i64(); 2108 get_cpu_vsr(xbh, xB(ctx->opcode), true); 2109 get_cpu_vsr(xbl, xB(ctx->opcode), false); 2110 2111 tcg_gen_shri_i64(xth, xbh, 23); 2112 tcg_gen_andi_i64(xth, xth, 0xFF000000FF); 2113 set_cpu_vsr(xT(ctx->opcode), xth, true); 2114 tcg_gen_shri_i64(xtl, xbl, 23); 2115 tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); 2116 set_cpu_vsr(xT(ctx->opcode), xtl, false); 2117 2118 tcg_temp_free_i64(xth); 2119 tcg_temp_free_i64(xtl); 2120 tcg_temp_free_i64(xbh); 2121 tcg_temp_free_i64(xbl); 2122} 2123 2124static void gen_xvxexpdp(DisasContext *ctx) 2125{ 2126 TCGv_i64 xth; 2127 TCGv_i64 xtl; 2128 TCGv_i64 xbh; 2129 TCGv_i64 xbl; 2130 2131 if (unlikely(!ctx->vsx_enabled)) { 2132 gen_exception(ctx, POWERPC_EXCP_VSXU); 2133 return; 2134 } 2135 xth = tcg_temp_new_i64(); 2136 xtl = tcg_temp_new_i64(); 2137 xbh = tcg_temp_new_i64(); 2138 xbl = tcg_temp_new_i64(); 2139 get_cpu_vsr(xbh, xB(ctx->opcode), true); 2140 get_cpu_vsr(xbl, xB(ctx->opcode), false); 2141 2142 tcg_gen_extract_i64(xth, xbh, 52, 11); 2143 set_cpu_vsr(xT(ctx->opcode), xth, true); 2144 tcg_gen_extract_i64(xtl, xbl, 52, 11); 2145 set_cpu_vsr(xT(ctx->opcode), xtl, false); 2146 2147 tcg_temp_free_i64(xth); 2148 tcg_temp_free_i64(xtl); 2149 tcg_temp_free_i64(xbh); 2150 tcg_temp_free_i64(xbl); 2151} 2152 2153static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) 2154{ 2155 TCGv_ptr t, b; 2156 2157 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 2158 REQUIRE_VSX(ctx); 2159 2160 t = gen_vsr_ptr(a->xt); 2161 b = gen_vsr_ptr(a->xb); 2162 2163 gen_helper_XVXSIGSP(t, b); 2164 2165 tcg_temp_free_ptr(t); 2166 tcg_temp_free_ptr(b); 2167 2168 return true; 2169} 2170 2171static void gen_xvxsigdp(DisasContext *ctx) 2172{ 2173 TCGv_i64 xth; 2174 TCGv_i64 xtl; 2175 TCGv_i64 xbh; 2176 TCGv_i64 xbl; 2177 TCGv_i64 t0, zr, nan, exp; 2178 2179 if (unlikely(!ctx->vsx_enabled)) { 2180 gen_exception(ctx, POWERPC_EXCP_VSXU); 2181 return; 2182 } 2183 xth = tcg_temp_new_i64(); 2184 xtl = tcg_temp_new_i64(); 2185 xbh = tcg_temp_new_i64(); 2186 xbl = tcg_temp_new_i64(); 2187 get_cpu_vsr(xbh, xB(ctx->opcode), true); 2188 get_cpu_vsr(xbl, xB(ctx->opcode), false); 2189 exp = tcg_temp_new_i64(); 2190 t0 = tcg_temp_new_i64(); 2191 zr = tcg_const_i64(0); 2192 nan = tcg_const_i64(2047); 2193 2194 tcg_gen_extract_i64(exp, xbh, 52, 11); 2195 tcg_gen_movi_i64(t0, 0x0010000000000000); 2196 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); 2197 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); 2198 tcg_gen_deposit_i64(xth, t0, xbh, 0, 52); 2199 set_cpu_vsr(xT(ctx->opcode), xth, true); 2200 2201 tcg_gen_extract_i64(exp, xbl, 52, 11); 2202 tcg_gen_movi_i64(t0, 0x0010000000000000); 2203 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); 2204 tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); 2205 tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); 2206 set_cpu_vsr(xT(ctx->opcode), xtl, false); 2207 2208 tcg_temp_free_i64(t0); 2209 tcg_temp_free_i64(exp); 2210 tcg_temp_free_i64(zr); 2211 tcg_temp_free_i64(nan); 2212 tcg_temp_free_i64(xth); 2213 tcg_temp_free_i64(xtl); 2214 tcg_temp_free_i64(xbh); 2215 tcg_temp_free_i64(xbl); 2216} 2217 2218static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, 2219 int rt, bool store, bool paired) 2220{ 2221 TCGv ea; 2222 TCGv_i64 xt; 2223 MemOp mop; 2224 int rt1, rt2; 2225 2226 xt = tcg_temp_new_i64(); 2227 2228 mop = DEF_MEMOP(MO_UQ); 2229 2230 gen_set_access_type(ctx, ACCESS_INT); 2231 ea = do_ea_calc(ctx, ra, displ); 2232 2233 if (paired && ctx->le_mode) { 2234 rt1 = rt + 1; 2235 rt2 = rt; 2236 } else { 2237 rt1 = rt; 2238 rt2 = rt + 1; 2239 } 2240 2241 if (store) { 2242 get_cpu_vsr(xt, rt1, !ctx->le_mode); 2243 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2244 gen_addr_add(ctx, ea, ea, 8); 2245 get_cpu_vsr(xt, rt1, ctx->le_mode); 2246 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2247 if (paired) { 2248 gen_addr_add(ctx, ea, ea, 8); 2249 get_cpu_vsr(xt, rt2, !ctx->le_mode); 2250 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2251 gen_addr_add(ctx, ea, ea, 8); 2252 get_cpu_vsr(xt, rt2, ctx->le_mode); 2253 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2254 } 2255 } else { 2256 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2257 set_cpu_vsr(rt1, xt, !ctx->le_mode); 2258 gen_addr_add(ctx, ea, ea, 8); 2259 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2260 set_cpu_vsr(rt1, xt, ctx->le_mode); 2261 if (paired) { 2262 gen_addr_add(ctx, ea, ea, 8); 2263 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2264 set_cpu_vsr(rt2, xt, !ctx->le_mode); 2265 gen_addr_add(ctx, ea, ea, 8); 2266 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2267 set_cpu_vsr(rt2, xt, ctx->le_mode); 2268 } 2269 } 2270 2271 tcg_temp_free(ea); 2272 tcg_temp_free_i64(xt); 2273 return true; 2274} 2275 2276static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired) 2277{ 2278 if (paired || a->rt >= 32) { 2279 REQUIRE_VSX(ctx); 2280 } else { 2281 REQUIRE_VECTOR(ctx); 2282 } 2283 2284 return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired); 2285} 2286 2287static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a, 2288 bool store, bool paired) 2289{ 2290 arg_D d; 2291 REQUIRE_VSX(ctx); 2292 2293 if (!resolve_PLS_D(ctx, &d, a)) { 2294 return true; 2295 } 2296 2297 return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired); 2298} 2299 2300static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired) 2301{ 2302 if (paired || a->rt >= 32) { 2303 REQUIRE_VSX(ctx); 2304 } else { 2305 REQUIRE_VECTOR(ctx); 2306 } 2307 2308 return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired); 2309} 2310 2311static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) 2312{ 2313 TCGv ea; 2314 TCGv_i64 xt; 2315 MemOp mop; 2316 2317 if (store) { 2318 REQUIRE_VECTOR(ctx); 2319 } else { 2320 REQUIRE_VSX(ctx); 2321 } 2322 2323 xt = tcg_temp_new_i64(); 2324 mop = DEF_MEMOP(MO_UQ); 2325 2326 gen_set_access_type(ctx, ACCESS_INT); 2327 ea = do_ea_calc(ctx, ra, displ); 2328 2329 if (store) { 2330 get_cpu_vsr(xt, rt + 32, true); 2331 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2332 } else { 2333 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2334 set_cpu_vsr(rt + 32, xt, true); 2335 set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); 2336 } 2337 2338 tcg_temp_free(ea); 2339 tcg_temp_free_i64(xt); 2340 2341 return true; 2342} 2343 2344static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store) 2345{ 2346 return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store); 2347} 2348 2349static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) 2350{ 2351 arg_D d; 2352 2353 if (!resolve_PLS_D(ctx, &d, a)) { 2354 return true; 2355 } 2356 2357 return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store); 2358} 2359 2360static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) 2361{ 2362 TCGv ea; 2363 TCGv_i64 xt; 2364 2365 REQUIRE_VECTOR(ctx); 2366 2367 xt = tcg_temp_new_i64(); 2368 2369 gen_set_access_type(ctx, ACCESS_INT); 2370 ea = do_ea_calc(ctx, ra, displ); 2371 2372 if (store) { 2373 get_cpu_vsr(xt, rt + 32, true); 2374 gen_qemu_st32fs(ctx, xt, ea); 2375 } else { 2376 gen_qemu_ld32fs(ctx, xt, ea); 2377 set_cpu_vsr(rt + 32, xt, true); 2378 set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); 2379 } 2380 2381 tcg_temp_free(ea); 2382 tcg_temp_free_i64(xt); 2383 2384 return true; 2385} 2386 2387static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store) 2388{ 2389 return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store); 2390} 2391 2392static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) 2393{ 2394 arg_D d; 2395 2396 if (!resolve_PLS_D(ctx, &d, a)) { 2397 return true; 2398 } 2399 2400 return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store); 2401} 2402 2403TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false) 2404TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true) 2405TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false) 2406TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true) 2407TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false) 2408TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false) 2409TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true) 2410TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true) 2411TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false) 2412TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false) 2413TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true) 2414TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true) 2415TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false) 2416TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true) 2417TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false) 2418TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true) 2419TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false) 2420TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false) 2421TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true) 2422TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true) 2423 2424static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store) 2425{ 2426 TCGv ea; 2427 TCGv_i64 xt; 2428 2429 REQUIRE_VSX(ctx); 2430 2431 xt = tcg_temp_new_i64(); 2432 2433 gen_set_access_type(ctx, ACCESS_INT); 2434 ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]); 2435 2436 if (store) { 2437 get_cpu_vsr(xt, a->rt, false); 2438 tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); 2439 } else { 2440 tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); 2441 set_cpu_vsr(a->rt, xt, false); 2442 set_cpu_vsr(a->rt, tcg_constant_i64(0), true); 2443 } 2444 2445 tcg_temp_free(ea); 2446 tcg_temp_free_i64(xt); 2447 return true; 2448} 2449 2450TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false) 2451TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false) 2452TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false) 2453TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false) 2454TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true) 2455TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true) 2456TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true) 2457TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true) 2458 2459static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, 2460 int64_t imm) 2461{ 2462 /* 2463 * Instead of processing imm bit-by-bit, we'll skip the computation of 2464 * conjunctions whose corresponding bit is unset. 2465 */ 2466 int bit; 2467 TCGv_i64 conj, disj; 2468 2469 conj = tcg_temp_new_i64(); 2470 disj = tcg_const_i64(0); 2471 2472 /* Iterate over set bits from the least to the most significant bit */ 2473 while (imm) { 2474 /* 2475 * Get the next bit to be processed with ctz64. Invert the result of 2476 * ctz64 to match the indexing used by PowerISA. 2477 */ 2478 bit = 7 - ctz64(imm); 2479 if (bit & 0x4) { 2480 tcg_gen_mov_i64(conj, a); 2481 } else { 2482 tcg_gen_not_i64(conj, a); 2483 } 2484 if (bit & 0x2) { 2485 tcg_gen_and_i64(conj, conj, b); 2486 } else { 2487 tcg_gen_andc_i64(conj, conj, b); 2488 } 2489 if (bit & 0x1) { 2490 tcg_gen_and_i64(conj, conj, c); 2491 } else { 2492 tcg_gen_andc_i64(conj, conj, c); 2493 } 2494 tcg_gen_or_i64(disj, disj, conj); 2495 2496 /* Unset the least significant bit that is set */ 2497 imm &= imm - 1; 2498 } 2499 2500 tcg_gen_mov_i64(t, disj); 2501 2502 tcg_temp_free_i64(conj); 2503 tcg_temp_free_i64(disj); 2504} 2505 2506static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, 2507 TCGv_vec c, int64_t imm) 2508{ 2509 /* 2510 * Instead of processing imm bit-by-bit, we'll skip the computation of 2511 * conjunctions whose corresponding bit is unset. 2512 */ 2513 int bit; 2514 TCGv_vec disj, conj; 2515 2516 disj = tcg_const_zeros_vec_matching(t); 2517 conj = tcg_temp_new_vec_matching(t); 2518 2519 /* Iterate over set bits from the least to the most significant bit */ 2520 while (imm) { 2521 /* 2522 * Get the next bit to be processed with ctz64. Invert the result of 2523 * ctz64 to match the indexing used by PowerISA. 2524 */ 2525 bit = 7 - ctz64(imm); 2526 if (bit & 0x4) { 2527 tcg_gen_mov_vec(conj, a); 2528 } else { 2529 tcg_gen_not_vec(vece, conj, a); 2530 } 2531 if (bit & 0x2) { 2532 tcg_gen_and_vec(vece, conj, conj, b); 2533 } else { 2534 tcg_gen_andc_vec(vece, conj, conj, b); 2535 } 2536 if (bit & 0x1) { 2537 tcg_gen_and_vec(vece, conj, conj, c); 2538 } else { 2539 tcg_gen_andc_vec(vece, conj, conj, c); 2540 } 2541 tcg_gen_or_vec(vece, disj, disj, conj); 2542 2543 /* Unset the least significant bit that is set */ 2544 imm &= imm - 1; 2545 } 2546 2547 tcg_gen_mov_vec(t, disj); 2548 2549 tcg_temp_free_vec(disj); 2550 tcg_temp_free_vec(conj); 2551} 2552 2553static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) 2554{ 2555 static const TCGOpcode vecop_list[] = { 2556 INDEX_op_andc_vec, 0 2557 }; 2558 static const GVecGen4i op = { 2559 .fniv = gen_xxeval_vec, 2560 .fno = gen_helper_XXEVAL, 2561 .fni8 = gen_xxeval_i64, 2562 .opt_opc = vecop_list, 2563 .vece = MO_64 2564 }; 2565 int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa), 2566 xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc); 2567 2568 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2569 REQUIRE_VSX(ctx); 2570 2571 /* Equivalent functions that can be implemented with a single gen_gvec */ 2572 switch (a->imm) { 2573 case 0b00000000: /* true */ 2574 set_cpu_vsr(a->xt, tcg_constant_i64(0), true); 2575 set_cpu_vsr(a->xt, tcg_constant_i64(0), false); 2576 break; 2577 case 0b00000011: /* and(B,A) */ 2578 tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16); 2579 break; 2580 case 0b00000101: /* and(C,A) */ 2581 tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16); 2582 break; 2583 case 0b00001111: /* A */ 2584 tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16); 2585 break; 2586 case 0b00010001: /* and(C,B) */ 2587 tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16); 2588 break; 2589 case 0b00011011: /* C?B:A */ 2590 tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16); 2591 break; 2592 case 0b00011101: /* B?C:A */ 2593 tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16); 2594 break; 2595 case 0b00100111: /* C?A:B */ 2596 tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16); 2597 break; 2598 case 0b00110011: /* B */ 2599 tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16); 2600 break; 2601 case 0b00110101: /* A?C:B */ 2602 tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16); 2603 break; 2604 case 0b00111100: /* xor(B,A) */ 2605 tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16); 2606 break; 2607 case 0b00111111: /* or(B,A) */ 2608 tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16); 2609 break; 2610 case 0b01000111: /* B?A:C */ 2611 tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16); 2612 break; 2613 case 0b01010011: /* A?B:C */ 2614 tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16); 2615 break; 2616 case 0b01010101: /* C */ 2617 tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16); 2618 break; 2619 case 0b01011010: /* xor(C,A) */ 2620 tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16); 2621 break; 2622 case 0b01011111: /* or(C,A) */ 2623 tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16); 2624 break; 2625 case 0b01100110: /* xor(C,B) */ 2626 tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16); 2627 break; 2628 case 0b01110111: /* or(C,B) */ 2629 tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16); 2630 break; 2631 case 0b10001000: /* nor(C,B) */ 2632 tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16); 2633 break; 2634 case 0b10011001: /* eqv(C,B) */ 2635 tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16); 2636 break; 2637 case 0b10100000: /* nor(C,A) */ 2638 tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16); 2639 break; 2640 case 0b10100101: /* eqv(C,A) */ 2641 tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16); 2642 break; 2643 case 0b10101010: /* not(C) */ 2644 tcg_gen_gvec_not(MO_64, xt, xc, 16, 16); 2645 break; 2646 case 0b11000000: /* nor(B,A) */ 2647 tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16); 2648 break; 2649 case 0b11000011: /* eqv(B,A) */ 2650 tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16); 2651 break; 2652 case 0b11001100: /* not(B) */ 2653 tcg_gen_gvec_not(MO_64, xt, xb, 16, 16); 2654 break; 2655 case 0b11101110: /* nand(C,B) */ 2656 tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16); 2657 break; 2658 case 0b11110000: /* not(A) */ 2659 tcg_gen_gvec_not(MO_64, xt, xa, 16, 16); 2660 break; 2661 case 0b11111010: /* nand(C,A) */ 2662 tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16); 2663 break; 2664 case 0b11111100: /* nand(B,A) */ 2665 tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16); 2666 break; 2667 case 0b11111111: /* true */ 2668 set_cpu_vsr(a->xt, tcg_constant_i64(-1), true); 2669 set_cpu_vsr(a->xt, tcg_constant_i64(-1), false); 2670 break; 2671 default: 2672 /* Fallback to compute all conjunctions/disjunctions */ 2673 tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op); 2674 } 2675 2676 return true; 2677} 2678 2679static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, 2680 TCGv_vec c) 2681{ 2682 TCGv_vec tmp = tcg_temp_new_vec_matching(c); 2683 tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); 2684 tcg_gen_bitsel_vec(vece, t, tmp, b, a); 2685 tcg_temp_free_vec(tmp); 2686} 2687 2688static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece) 2689{ 2690 static const TCGOpcode vecop_list[] = { 2691 INDEX_op_sari_vec, 0 2692 }; 2693 static const GVecGen4 ops[4] = { 2694 { 2695 .fniv = gen_xxblendv_vec, 2696 .fno = gen_helper_XXBLENDVB, 2697 .opt_opc = vecop_list, 2698 .vece = MO_8 2699 }, 2700 { 2701 .fniv = gen_xxblendv_vec, 2702 .fno = gen_helper_XXBLENDVH, 2703 .opt_opc = vecop_list, 2704 .vece = MO_16 2705 }, 2706 { 2707 .fniv = gen_xxblendv_vec, 2708 .fno = gen_helper_XXBLENDVW, 2709 .opt_opc = vecop_list, 2710 .vece = MO_32 2711 }, 2712 { 2713 .fniv = gen_xxblendv_vec, 2714 .fno = gen_helper_XXBLENDVD, 2715 .opt_opc = vecop_list, 2716 .vece = MO_64 2717 } 2718 }; 2719 2720 REQUIRE_VSX(ctx); 2721 2722 tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa), 2723 vsr_full_offset(a->xb), vsr_full_offset(a->xc), 2724 16, 16, &ops[vece]); 2725 2726 return true; 2727} 2728 2729TRANS(XXBLENDVB, do_xxblendv, MO_8) 2730TRANS(XXBLENDVH, do_xxblendv, MO_16) 2731TRANS(XXBLENDVW, do_xxblendv, MO_32) 2732TRANS(XXBLENDVD, do_xxblendv, MO_64) 2733 2734static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a, 2735 void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 2736{ 2737 TCGv_ptr xt, xa, xb; 2738 2739 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 2740 REQUIRE_VSX(ctx); 2741 2742 xt = gen_vsr_ptr(a->xt); 2743 xa = gen_vsr_ptr(a->xa); 2744 xb = gen_vsr_ptr(a->xb); 2745 2746 helper(cpu_env, xt, xa, xb); 2747 2748 tcg_temp_free_ptr(xt); 2749 tcg_temp_free_ptr(xa); 2750 tcg_temp_free_ptr(xb); 2751 2752 return true; 2753} 2754 2755TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP) 2756TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP) 2757TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP) 2758TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP) 2759TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP) 2760TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP) 2761TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP) 2762 2763static bool do_helper_X(arg_X *a, 2764 void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 2765{ 2766 TCGv_ptr rt, ra, rb; 2767 2768 rt = gen_avr_ptr(a->rt); 2769 ra = gen_avr_ptr(a->ra); 2770 rb = gen_avr_ptr(a->rb); 2771 2772 helper(cpu_env, rt, ra, rb); 2773 2774 tcg_temp_free_ptr(rt); 2775 tcg_temp_free_ptr(ra); 2776 tcg_temp_free_ptr(rb); 2777 2778 return true; 2779} 2780 2781static bool do_xscmpqp(DisasContext *ctx, arg_X *a, 2782 void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr)) 2783{ 2784 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2785 REQUIRE_VSX(ctx); 2786 2787 return do_helper_X(a, helper); 2788} 2789 2790TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP) 2791TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP) 2792TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP) 2793TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP) 2794TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP) 2795 2796static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a) 2797{ 2798 TCGv_ptr xt, xb; 2799 2800 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2801 REQUIRE_VSX(ctx); 2802 2803 xt = gen_vsr_ptr(a->xt); 2804 xb = gen_vsr_ptr(a->xb); 2805 2806 gen_helper_XVCVSPBF16(cpu_env, xt, xb); 2807 2808 tcg_temp_free_ptr(xt); 2809 tcg_temp_free_ptr(xb); 2810 2811 return true; 2812} 2813 2814static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a) 2815{ 2816 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2817 REQUIRE_VSX(ctx); 2818 2819 tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb), 2820 16, 16, 16); 2821 2822 return true; 2823} 2824 2825 /* 2826 * The PowerISA 3.1 mentions that for the current version of the 2827 * architecture, "the hardware implementation provides the effect of 2828 * ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data" 2829 * and "The Accumulators introduce no new logical state at this time" 2830 * (page 501). For now it seems unnecessary to create new structures, 2831 * so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore 2832 * move to and from accumulators are no-ops. 2833 */ 2834static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a) 2835{ 2836 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2837 REQUIRE_VSX(ctx); 2838 return true; 2839} 2840 2841static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a) 2842{ 2843 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2844 REQUIRE_VSX(ctx); 2845 return true; 2846} 2847 2848static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a) 2849{ 2850 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2851 REQUIRE_VSX(ctx); 2852 tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0); 2853 return true; 2854} 2855 2856static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a, 2857 void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32)) 2858{ 2859 uint32_t mask; 2860 TCGv_ptr xt, xa, xb; 2861 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 2862 REQUIRE_VSX(ctx); 2863 if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) { 2864 gen_invalid(ctx); 2865 return true; 2866 } 2867 2868 xt = gen_acc_ptr(a->xt); 2869 xa = gen_vsr_ptr(a->xa); 2870 xb = gen_vsr_ptr(a->xb); 2871 2872 mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk); 2873 helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask)); 2874 tcg_temp_free_ptr(xt); 2875 tcg_temp_free_ptr(xa); 2876 tcg_temp_free_ptr(xb); 2877 return true; 2878} 2879 2880TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8) 2881TRANS(XVI4GER8PP, do_ger, gen_helper_XVI4GER8PP) 2882TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4) 2883TRANS(XVI8GER4PP, do_ger, gen_helper_XVI8GER4PP) 2884TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP) 2885TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2) 2886TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP) 2887TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S) 2888TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP) 2889 2890TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8) 2891TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP) 2892TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4) 2893TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP) 2894TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP) 2895TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2) 2896TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP) 2897TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S) 2898TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP) 2899 2900TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2) 2901TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP) 2902TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN) 2903TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP) 2904TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN) 2905 2906TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2) 2907TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP) 2908TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN) 2909TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP) 2910TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN) 2911 2912TRANS(XVF32GER, do_ger, gen_helper_XVF32GER) 2913TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP) 2914TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN) 2915TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP) 2916TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN) 2917 2918TRANS(XVF64GER, do_ger, gen_helper_XVF64GER) 2919TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP) 2920TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN) 2921TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP) 2922TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN) 2923 2924TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2) 2925TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP) 2926TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN) 2927TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP) 2928TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN) 2929 2930TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2) 2931TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP) 2932TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN) 2933TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP) 2934TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN) 2935 2936TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER) 2937TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP) 2938TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN) 2939TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP) 2940TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN) 2941 2942TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER) 2943TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP) 2944TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN) 2945TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP) 2946TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN) 2947 2948#undef GEN_XX2FORM 2949#undef GEN_XX3FORM 2950#undef GEN_XX2IFORM 2951#undef GEN_XX3_RC_FORM 2952#undef GEN_XX3FORM_DM 2953#undef VSX_LOGICAL 2954