1/* 2 * translate-fp.c 3 * 4 * Standard FPU translation 5 */ 6 7static inline void gen_reset_fpstatus(void) 8{ 9 gen_helper_reset_fpstatus(cpu_env); 10} 11 12static inline void gen_compute_fprf_float64(TCGv_i64 arg) 13{ 14 gen_helper_compute_fprf_float64(cpu_env, arg); 15 gen_helper_float_check_status(cpu_env); 16} 17 18#if defined(TARGET_PPC64) 19static void gen_set_cr1_from_fpscr(DisasContext *ctx) 20{ 21 TCGv_i32 tmp = tcg_temp_new_i32(); 22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); 23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28); 24} 25#else 26static void gen_set_cr1_from_fpscr(DisasContext *ctx) 27{ 28 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28); 29} 30#endif 31 32/*** Floating-Point arithmetic ***/ 33#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \ 34static void gen_f##name(DisasContext *ctx) \ 35{ \ 36 TCGv_i64 t0; \ 37 TCGv_i64 t1; \ 38 TCGv_i64 t2; \ 39 TCGv_i64 t3; \ 40 if (unlikely(!ctx->fpu_enabled)) { \ 41 gen_exception(ctx, POWERPC_EXCP_FPU); \ 42 return; \ 43 } \ 44 t0 = tcg_temp_new_i64(); \ 45 t1 = tcg_temp_new_i64(); \ 46 t2 = tcg_temp_new_i64(); \ 47 t3 = tcg_temp_new_i64(); \ 48 gen_reset_fpstatus(); \ 49 get_fpr(t0, rA(ctx->opcode)); \ 50 get_fpr(t1, rC(ctx->opcode)); \ 51 get_fpr(t2, rB(ctx->opcode)); \ 52 gen_helper_f##name(t3, cpu_env, t0, t1, t2); \ 53 set_fpr(rD(ctx->opcode), t3); \ 54 if (set_fprf) { \ 55 gen_compute_fprf_float64(t3); \ 56 } \ 57 if (unlikely(Rc(ctx->opcode) != 0)) { \ 58 gen_set_cr1_from_fpscr(ctx); \ 59 } \ 60} 61 62#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ 63_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \ 64_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type); 65 66#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \ 67static void gen_f##name(DisasContext *ctx) \ 68{ \ 69 TCGv_i64 t0; \ 70 TCGv_i64 t1; \ 71 TCGv_i64 t2; \ 72 if (unlikely(!ctx->fpu_enabled)) { \ 73 gen_exception(ctx, POWERPC_EXCP_FPU); \ 74 return; \ 75 } \ 76 t0 = tcg_temp_new_i64(); \ 77 t1 = tcg_temp_new_i64(); \ 78 t2 = tcg_temp_new_i64(); \ 79 gen_reset_fpstatus(); \ 80 get_fpr(t0, rA(ctx->opcode)); \ 81 get_fpr(t1, rB(ctx->opcode)); \ 82 gen_helper_f##name(t2, cpu_env, t0, t1); \ 83 set_fpr(rD(ctx->opcode), t2); \ 84 if (set_fprf) { \ 85 gen_compute_fprf_float64(t2); \ 86 } \ 87 if (unlikely(Rc(ctx->opcode) != 0)) { \ 88 gen_set_cr1_from_fpscr(ctx); \ 89 } \ 90} 91#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ 92_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ 93_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type); 94 95#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \ 96static void gen_f##name(DisasContext *ctx) \ 97{ \ 98 TCGv_i64 t0; \ 99 TCGv_i64 t1; \ 100 TCGv_i64 t2; \ 101 if (unlikely(!ctx->fpu_enabled)) { \ 102 gen_exception(ctx, POWERPC_EXCP_FPU); \ 103 return; \ 104 } \ 105 t0 = tcg_temp_new_i64(); \ 106 t1 = tcg_temp_new_i64(); \ 107 t2 = tcg_temp_new_i64(); \ 108 gen_reset_fpstatus(); \ 109 get_fpr(t0, rA(ctx->opcode)); \ 110 get_fpr(t1, rC(ctx->opcode)); \ 111 gen_helper_f##name(t2, cpu_env, t0, t1); \ 112 set_fpr(rD(ctx->opcode), t2); \ 113 if (set_fprf) { \ 114 gen_compute_fprf_float64(t2); \ 115 } \ 116 if (unlikely(Rc(ctx->opcode) != 0)) { \ 117 gen_set_cr1_from_fpscr(ctx); \ 118 } \ 119} 120#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ 121_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ 122_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type); 123 124#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ 125static void gen_f##name(DisasContext *ctx) \ 126{ \ 127 TCGv_i64 t0; \ 128 TCGv_i64 t1; \ 129 if (unlikely(!ctx->fpu_enabled)) { \ 130 gen_exception(ctx, POWERPC_EXCP_FPU); \ 131 return; \ 132 } \ 133 t0 = tcg_temp_new_i64(); \ 134 t1 = tcg_temp_new_i64(); \ 135 gen_reset_fpstatus(); \ 136 get_fpr(t0, rB(ctx->opcode)); \ 137 gen_helper_f##name(t1, cpu_env, t0); \ 138 set_fpr(rD(ctx->opcode), t1); \ 139 if (set_fprf) { \ 140 gen_helper_compute_fprf_float64(cpu_env, t1); \ 141 } \ 142 gen_helper_float_check_status(cpu_env); \ 143 if (unlikely(Rc(ctx->opcode) != 0)) { \ 144 gen_set_cr1_from_fpscr(ctx); \ 145 } \ 146} 147 148#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ 149static void gen_f##name(DisasContext *ctx) \ 150{ \ 151 TCGv_i64 t0; \ 152 TCGv_i64 t1; \ 153 if (unlikely(!ctx->fpu_enabled)) { \ 154 gen_exception(ctx, POWERPC_EXCP_FPU); \ 155 return; \ 156 } \ 157 t0 = tcg_temp_new_i64(); \ 158 t1 = tcg_temp_new_i64(); \ 159 gen_reset_fpstatus(); \ 160 get_fpr(t0, rB(ctx->opcode)); \ 161 gen_helper_f##name(t1, cpu_env, t0); \ 162 set_fpr(rD(ctx->opcode), t1); \ 163 if (set_fprf) { \ 164 gen_compute_fprf_float64(t1); \ 165 } \ 166 if (unlikely(Rc(ctx->opcode) != 0)) { \ 167 gen_set_cr1_from_fpscr(ctx); \ 168 } \ 169} 170 171/* fadd - fadds */ 172GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); 173/* fdiv - fdivs */ 174GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); 175/* fmul - fmuls */ 176GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); 177 178/* fre */ 179GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); 180 181/* fres */ 182GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); 183 184/* frsqrte */ 185GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); 186 187/* frsqrtes */ 188static void gen_frsqrtes(DisasContext *ctx) 189{ 190 TCGv_i64 t0; 191 TCGv_i64 t1; 192 if (unlikely(!ctx->fpu_enabled)) { 193 gen_exception(ctx, POWERPC_EXCP_FPU); 194 return; 195 } 196 t0 = tcg_temp_new_i64(); 197 t1 = tcg_temp_new_i64(); 198 gen_reset_fpstatus(); 199 get_fpr(t0, rB(ctx->opcode)); 200 gen_helper_frsqrtes(t1, cpu_env, t0); 201 set_fpr(rD(ctx->opcode), t1); 202 gen_compute_fprf_float64(t1); 203 if (unlikely(Rc(ctx->opcode) != 0)) { 204 gen_set_cr1_from_fpscr(ctx); 205 } 206} 207 208static bool trans_FSEL(DisasContext *ctx, arg_A *a) 209{ 210 TCGv_i64 t0, t1, t2; 211 212 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL); 213 REQUIRE_FPU(ctx); 214 215 t0 = tcg_temp_new_i64(); 216 t1 = tcg_temp_new_i64(); 217 t2 = tcg_temp_new_i64(); 218 219 get_fpr(t0, a->fra); 220 get_fpr(t1, a->frb); 221 get_fpr(t2, a->frc); 222 223 gen_helper_FSEL(t0, t0, t1, t2); 224 set_fpr(a->frt, t0); 225 if (a->rc) { 226 gen_set_cr1_from_fpscr(ctx); 227 } 228 return true; 229} 230 231/* fsub - fsubs */ 232GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); 233/* Optional: */ 234 235static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, 236 void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) 237{ 238 TCGv_i64 t0, t1; 239 240 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT); 241 REQUIRE_FPU(ctx); 242 243 t0 = tcg_temp_new_i64(); 244 t1 = tcg_temp_new_i64(); 245 246 gen_reset_fpstatus(); 247 get_fpr(t0, a->frb); 248 helper(t1, cpu_env, t0); 249 set_fpr(a->frt, t1); 250 gen_compute_fprf_float64(t1); 251 if (unlikely(a->rc != 0)) { 252 gen_set_cr1_from_fpscr(ctx); 253 } 254 return true; 255} 256 257TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); 258TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); 259 260/*** Floating-Point multiply-and-add ***/ 261/* fmadd - fmadds */ 262GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); 263/* fmsub - fmsubs */ 264GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); 265/* fnmadd - fnmadds */ 266GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); 267/* fnmsub - fnmsubs */ 268GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); 269 270/*** Floating-Point round & convert ***/ 271/* fctiw */ 272GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); 273/* fctiwu */ 274GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); 275/* fctiwz */ 276GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); 277/* fctiwuz */ 278GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); 279/* frsp */ 280GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); 281/* fcfid */ 282GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); 283/* fcfids */ 284GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); 285/* fcfidu */ 286GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 287/* fcfidus */ 288GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 289/* fctid */ 290GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); 291/* fctidu */ 292GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); 293/* fctidz */ 294GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); 295/* fctidu */ 296GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); 297 298/* frin */ 299GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); 300/* friz */ 301GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); 302/* frip */ 303GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); 304/* frim */ 305GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); 306 307static void gen_ftdiv(DisasContext *ctx) 308{ 309 TCGv_i64 t0; 310 TCGv_i64 t1; 311 if (unlikely(!ctx->fpu_enabled)) { 312 gen_exception(ctx, POWERPC_EXCP_FPU); 313 return; 314 } 315 t0 = tcg_temp_new_i64(); 316 t1 = tcg_temp_new_i64(); 317 get_fpr(t0, rA(ctx->opcode)); 318 get_fpr(t1, rB(ctx->opcode)); 319 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); 320} 321 322static void gen_ftsqrt(DisasContext *ctx) 323{ 324 TCGv_i64 t0; 325 if (unlikely(!ctx->fpu_enabled)) { 326 gen_exception(ctx, POWERPC_EXCP_FPU); 327 return; 328 } 329 t0 = tcg_temp_new_i64(); 330 get_fpr(t0, rB(ctx->opcode)); 331 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); 332} 333 334 335 336/*** Floating-Point compare ***/ 337 338/* fcmpo */ 339static void gen_fcmpo(DisasContext *ctx) 340{ 341 TCGv_i32 crf; 342 TCGv_i64 t0; 343 TCGv_i64 t1; 344 if (unlikely(!ctx->fpu_enabled)) { 345 gen_exception(ctx, POWERPC_EXCP_FPU); 346 return; 347 } 348 t0 = tcg_temp_new_i64(); 349 t1 = tcg_temp_new_i64(); 350 gen_reset_fpstatus(); 351 crf = tcg_constant_i32(crfD(ctx->opcode)); 352 get_fpr(t0, rA(ctx->opcode)); 353 get_fpr(t1, rB(ctx->opcode)); 354 gen_helper_fcmpo(cpu_env, t0, t1, crf); 355 gen_helper_float_check_status(cpu_env); 356} 357 358/* fcmpu */ 359static void gen_fcmpu(DisasContext *ctx) 360{ 361 TCGv_i32 crf; 362 TCGv_i64 t0; 363 TCGv_i64 t1; 364 if (unlikely(!ctx->fpu_enabled)) { 365 gen_exception(ctx, POWERPC_EXCP_FPU); 366 return; 367 } 368 t0 = tcg_temp_new_i64(); 369 t1 = tcg_temp_new_i64(); 370 gen_reset_fpstatus(); 371 crf = tcg_constant_i32(crfD(ctx->opcode)); 372 get_fpr(t0, rA(ctx->opcode)); 373 get_fpr(t1, rB(ctx->opcode)); 374 gen_helper_fcmpu(cpu_env, t0, t1, crf); 375 gen_helper_float_check_status(cpu_env); 376} 377 378/*** Floating-point move ***/ 379/* fabs */ 380/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ 381static void gen_fabs(DisasContext *ctx) 382{ 383 TCGv_i64 t0; 384 TCGv_i64 t1; 385 if (unlikely(!ctx->fpu_enabled)) { 386 gen_exception(ctx, POWERPC_EXCP_FPU); 387 return; 388 } 389 t0 = tcg_temp_new_i64(); 390 t1 = tcg_temp_new_i64(); 391 get_fpr(t0, rB(ctx->opcode)); 392 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63)); 393 set_fpr(rD(ctx->opcode), t1); 394 if (unlikely(Rc(ctx->opcode))) { 395 gen_set_cr1_from_fpscr(ctx); 396 } 397} 398 399/* fmr - fmr. */ 400/* XXX: beware that fmr never checks for NaNs nor update FPSCR */ 401static void gen_fmr(DisasContext *ctx) 402{ 403 TCGv_i64 t0; 404 if (unlikely(!ctx->fpu_enabled)) { 405 gen_exception(ctx, POWERPC_EXCP_FPU); 406 return; 407 } 408 t0 = tcg_temp_new_i64(); 409 get_fpr(t0, rB(ctx->opcode)); 410 set_fpr(rD(ctx->opcode), t0); 411 if (unlikely(Rc(ctx->opcode))) { 412 gen_set_cr1_from_fpscr(ctx); 413 } 414} 415 416/* fnabs */ 417/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ 418static void gen_fnabs(DisasContext *ctx) 419{ 420 TCGv_i64 t0; 421 TCGv_i64 t1; 422 if (unlikely(!ctx->fpu_enabled)) { 423 gen_exception(ctx, POWERPC_EXCP_FPU); 424 return; 425 } 426 t0 = tcg_temp_new_i64(); 427 t1 = tcg_temp_new_i64(); 428 get_fpr(t0, rB(ctx->opcode)); 429 tcg_gen_ori_i64(t1, t0, 1ULL << 63); 430 set_fpr(rD(ctx->opcode), t1); 431 if (unlikely(Rc(ctx->opcode))) { 432 gen_set_cr1_from_fpscr(ctx); 433 } 434} 435 436/* fneg */ 437/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ 438static void gen_fneg(DisasContext *ctx) 439{ 440 TCGv_i64 t0; 441 TCGv_i64 t1; 442 if (unlikely(!ctx->fpu_enabled)) { 443 gen_exception(ctx, POWERPC_EXCP_FPU); 444 return; 445 } 446 t0 = tcg_temp_new_i64(); 447 t1 = tcg_temp_new_i64(); 448 get_fpr(t0, rB(ctx->opcode)); 449 tcg_gen_xori_i64(t1, t0, 1ULL << 63); 450 set_fpr(rD(ctx->opcode), t1); 451 if (unlikely(Rc(ctx->opcode))) { 452 gen_set_cr1_from_fpscr(ctx); 453 } 454} 455 456/* fcpsgn: PowerPC 2.05 specification */ 457/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ 458static void gen_fcpsgn(DisasContext *ctx) 459{ 460 TCGv_i64 t0; 461 TCGv_i64 t1; 462 TCGv_i64 t2; 463 if (unlikely(!ctx->fpu_enabled)) { 464 gen_exception(ctx, POWERPC_EXCP_FPU); 465 return; 466 } 467 t0 = tcg_temp_new_i64(); 468 t1 = tcg_temp_new_i64(); 469 t2 = tcg_temp_new_i64(); 470 get_fpr(t0, rA(ctx->opcode)); 471 get_fpr(t1, rB(ctx->opcode)); 472 tcg_gen_deposit_i64(t2, t0, t1, 0, 63); 473 set_fpr(rD(ctx->opcode), t2); 474 if (unlikely(Rc(ctx->opcode))) { 475 gen_set_cr1_from_fpscr(ctx); 476 } 477} 478 479static void gen_fmrgew(DisasContext *ctx) 480{ 481 TCGv_i64 b0; 482 TCGv_i64 t0; 483 TCGv_i64 t1; 484 if (unlikely(!ctx->fpu_enabled)) { 485 gen_exception(ctx, POWERPC_EXCP_FPU); 486 return; 487 } 488 b0 = tcg_temp_new_i64(); 489 t0 = tcg_temp_new_i64(); 490 t1 = tcg_temp_new_i64(); 491 get_fpr(t0, rB(ctx->opcode)); 492 tcg_gen_shri_i64(b0, t0, 32); 493 get_fpr(t0, rA(ctx->opcode)); 494 tcg_gen_deposit_i64(t1, t0, b0, 0, 32); 495 set_fpr(rD(ctx->opcode), t1); 496} 497 498static void gen_fmrgow(DisasContext *ctx) 499{ 500 TCGv_i64 t0; 501 TCGv_i64 t1; 502 TCGv_i64 t2; 503 if (unlikely(!ctx->fpu_enabled)) { 504 gen_exception(ctx, POWERPC_EXCP_FPU); 505 return; 506 } 507 t0 = tcg_temp_new_i64(); 508 t1 = tcg_temp_new_i64(); 509 t2 = tcg_temp_new_i64(); 510 get_fpr(t0, rB(ctx->opcode)); 511 get_fpr(t1, rA(ctx->opcode)); 512 tcg_gen_deposit_i64(t2, t0, t1, 32, 32); 513 set_fpr(rD(ctx->opcode), t2); 514} 515 516/*** Floating-Point status & ctrl register ***/ 517 518/* mcrfs */ 519static void gen_mcrfs(DisasContext *ctx) 520{ 521 TCGv tmp = tcg_temp_new(); 522 TCGv_i32 tmask; 523 TCGv_i64 tnew_fpscr = tcg_temp_new_i64(); 524 int bfa; 525 int nibble; 526 int shift; 527 528 if (unlikely(!ctx->fpu_enabled)) { 529 gen_exception(ctx, POWERPC_EXCP_FPU); 530 return; 531 } 532 bfa = crfS(ctx->opcode); 533 nibble = 7 - bfa; 534 shift = 4 * nibble; 535 tcg_gen_shri_tl(tmp, cpu_fpscr, shift); 536 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); 537 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 538 0xf); 539 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); 540 /* Only the exception bits (including FX) should be cleared if read */ 541 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, 542 ~((0xF << shift) & FP_EX_CLEAR_BITS)); 543 /* FEX and VX need to be updated, so don't set fpscr directly */ 544 tmask = tcg_constant_i32(1 << nibble); 545 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); 546} 547 548static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) 549{ 550 TCGv_i64 fpscr = tcg_temp_new_i64(); 551 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 552 553 tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); 554 tcg_gen_andi_i64(fpscr_masked, fpscr, mask); 555 set_fpr(rt, fpscr_masked); 556 557 return fpscr; 558} 559 560static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, 561 TCGv_i64 set_mask, uint32_t store_mask) 562{ 563 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 564 TCGv_i32 st_mask = tcg_constant_i32(store_mask); 565 566 tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); 567 tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); 568 gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); 569} 570 571static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) 572{ 573 REQUIRE_FPU(ctx); 574 575 gen_reset_fpstatus(); 576 place_from_fpscr(a->rt, UINT64_MAX); 577 if (a->rc) { 578 gen_set_cr1_from_fpscr(ctx); 579 } 580 return true; 581} 582 583static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) 584{ 585 TCGv_i64 fpscr; 586 587 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 588 REQUIRE_FPU(ctx); 589 590 gen_reset_fpstatus(); 591 fpscr = place_from_fpscr(a->rt, UINT64_MAX); 592 store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); 593 return true; 594} 595 596static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) 597{ 598 TCGv_i64 t1, fpscr; 599 600 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 601 REQUIRE_FPU(ctx); 602 603 t1 = tcg_temp_new_i64(); 604 get_fpr(t1, a->rb); 605 tcg_gen_andi_i64(t1, t1, FP_RN); 606 607 gen_reset_fpstatus(); 608 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 609 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 610 return true; 611} 612 613static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) 614{ 615 TCGv_i64 t1, fpscr; 616 617 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 618 REQUIRE_FPU(ctx); 619 620 t1 = tcg_temp_new_i64(); 621 get_fpr(t1, a->rb); 622 tcg_gen_andi_i64(t1, t1, FP_DRN); 623 624 gen_reset_fpstatus(); 625 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 626 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 627 return true; 628} 629 630static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) 631{ 632 TCGv_i64 t1, fpscr; 633 634 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 635 REQUIRE_FPU(ctx); 636 637 t1 = tcg_temp_new_i64(); 638 tcg_gen_movi_i64(t1, a->imm); 639 640 gen_reset_fpstatus(); 641 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 642 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 643 return true; 644} 645 646static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) 647{ 648 TCGv_i64 t1, fpscr; 649 650 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 651 REQUIRE_FPU(ctx); 652 653 t1 = tcg_temp_new_i64(); 654 tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0); 655 656 gen_reset_fpstatus(); 657 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 658 store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); 659 return true; 660} 661 662static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) 663{ 664 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 665 REQUIRE_FPU(ctx); 666 667 gen_reset_fpstatus(); 668 place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); 669 return true; 670} 671 672/* mtfsb0 */ 673static void gen_mtfsb0(DisasContext *ctx) 674{ 675 uint8_t crb; 676 677 if (unlikely(!ctx->fpu_enabled)) { 678 gen_exception(ctx, POWERPC_EXCP_FPU); 679 return; 680 } 681 crb = 31 - crbD(ctx->opcode); 682 gen_reset_fpstatus(); 683 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { 684 gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb)); 685 } 686 if (unlikely(Rc(ctx->opcode) != 0)) { 687 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 688 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 689 } 690} 691 692/* mtfsb1 */ 693static void gen_mtfsb1(DisasContext *ctx) 694{ 695 uint8_t crb; 696 697 if (unlikely(!ctx->fpu_enabled)) { 698 gen_exception(ctx, POWERPC_EXCP_FPU); 699 return; 700 } 701 crb = 31 - crbD(ctx->opcode); 702 /* XXX: we pretend we can only do IEEE floating-point computations */ 703 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { 704 gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb)); 705 } 706 if (unlikely(Rc(ctx->opcode) != 0)) { 707 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 708 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 709 } 710 /* We can raise a deferred exception */ 711 gen_helper_fpscr_check_status(cpu_env); 712} 713 714/* mtfsf */ 715static void gen_mtfsf(DisasContext *ctx) 716{ 717 TCGv_i32 t0; 718 TCGv_i64 t1; 719 int flm, l, w; 720 721 if (unlikely(!ctx->fpu_enabled)) { 722 gen_exception(ctx, POWERPC_EXCP_FPU); 723 return; 724 } 725 flm = FPFLM(ctx->opcode); 726 l = FPL(ctx->opcode); 727 w = FPW(ctx->opcode); 728 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 729 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 730 return; 731 } 732 if (!l) { 733 t0 = tcg_constant_i32(flm << (w * 8)); 734 } else if (ctx->insns_flags2 & PPC2_ISA205) { 735 t0 = tcg_constant_i32(0xffff); 736 } else { 737 t0 = tcg_constant_i32(0xff); 738 } 739 t1 = tcg_temp_new_i64(); 740 get_fpr(t1, rB(ctx->opcode)); 741 gen_helper_store_fpscr(cpu_env, t1, t0); 742 if (unlikely(Rc(ctx->opcode) != 0)) { 743 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 744 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 745 } 746 /* We can raise a deferred exception */ 747 gen_helper_fpscr_check_status(cpu_env); 748} 749 750/* mtfsfi */ 751static void gen_mtfsfi(DisasContext *ctx) 752{ 753 int bf, sh, w; 754 TCGv_i64 t0; 755 TCGv_i32 t1; 756 757 if (unlikely(!ctx->fpu_enabled)) { 758 gen_exception(ctx, POWERPC_EXCP_FPU); 759 return; 760 } 761 w = FPW(ctx->opcode); 762 bf = FPBF(ctx->opcode); 763 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 764 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 765 return; 766 } 767 sh = (8 * w) + 7 - bf; 768 t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); 769 t1 = tcg_constant_i32(1 << sh); 770 gen_helper_store_fpscr(cpu_env, t0, t1); 771 if (unlikely(Rc(ctx->opcode) != 0)) { 772 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 773 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 774 } 775 /* We can raise a deferred exception */ 776 gen_helper_fpscr_check_status(cpu_env); 777} 778 779static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) 780{ 781 TCGv_i32 tmp = tcg_temp_new_i32(); 782 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 783 gen_helper_todouble(dest, tmp); 784} 785 786/* lfdepx (external PID lfdx) */ 787static void gen_lfdepx(DisasContext *ctx) 788{ 789 TCGv EA; 790 TCGv_i64 t0; 791 CHK_SV(ctx); 792 if (unlikely(!ctx->fpu_enabled)) { 793 gen_exception(ctx, POWERPC_EXCP_FPU); 794 return; 795 } 796 gen_set_access_type(ctx, ACCESS_FLOAT); 797 EA = tcg_temp_new(); 798 t0 = tcg_temp_new_i64(); 799 gen_addr_reg_index(ctx, EA); 800 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); 801 set_fpr(rD(ctx->opcode), t0); 802} 803 804/* lfdp */ 805static void gen_lfdp(DisasContext *ctx) 806{ 807 TCGv EA; 808 TCGv_i64 t0; 809 if (unlikely(!ctx->fpu_enabled)) { 810 gen_exception(ctx, POWERPC_EXCP_FPU); 811 return; 812 } 813 gen_set_access_type(ctx, ACCESS_FLOAT); 814 EA = tcg_temp_new(); 815 gen_addr_imm_index(ctx, EA, 0); 816 t0 = tcg_temp_new_i64(); 817 /* 818 * We only need to swap high and low halves. gen_qemu_ld64_i64 819 * does necessary 64-bit byteswap already. 820 */ 821 if (unlikely(ctx->le_mode)) { 822 gen_qemu_ld64_i64(ctx, t0, EA); 823 set_fpr(rD(ctx->opcode) + 1, t0); 824 tcg_gen_addi_tl(EA, EA, 8); 825 gen_qemu_ld64_i64(ctx, t0, EA); 826 set_fpr(rD(ctx->opcode), t0); 827 } else { 828 gen_qemu_ld64_i64(ctx, t0, EA); 829 set_fpr(rD(ctx->opcode), t0); 830 tcg_gen_addi_tl(EA, EA, 8); 831 gen_qemu_ld64_i64(ctx, t0, EA); 832 set_fpr(rD(ctx->opcode) + 1, t0); 833 } 834} 835 836/* lfdpx */ 837static void gen_lfdpx(DisasContext *ctx) 838{ 839 TCGv EA; 840 TCGv_i64 t0; 841 if (unlikely(!ctx->fpu_enabled)) { 842 gen_exception(ctx, POWERPC_EXCP_FPU); 843 return; 844 } 845 gen_set_access_type(ctx, ACCESS_FLOAT); 846 EA = tcg_temp_new(); 847 gen_addr_reg_index(ctx, EA); 848 t0 = tcg_temp_new_i64(); 849 /* 850 * We only need to swap high and low halves. gen_qemu_ld64_i64 851 * does necessary 64-bit byteswap already. 852 */ 853 if (unlikely(ctx->le_mode)) { 854 gen_qemu_ld64_i64(ctx, t0, EA); 855 set_fpr(rD(ctx->opcode) + 1, t0); 856 tcg_gen_addi_tl(EA, EA, 8); 857 gen_qemu_ld64_i64(ctx, t0, EA); 858 set_fpr(rD(ctx->opcode), t0); 859 } else { 860 gen_qemu_ld64_i64(ctx, t0, EA); 861 set_fpr(rD(ctx->opcode), t0); 862 tcg_gen_addi_tl(EA, EA, 8); 863 gen_qemu_ld64_i64(ctx, t0, EA); 864 set_fpr(rD(ctx->opcode) + 1, t0); 865 } 866} 867 868/* lfiwax */ 869static void gen_lfiwax(DisasContext *ctx) 870{ 871 TCGv EA; 872 TCGv t0; 873 TCGv_i64 t1; 874 if (unlikely(!ctx->fpu_enabled)) { 875 gen_exception(ctx, POWERPC_EXCP_FPU); 876 return; 877 } 878 gen_set_access_type(ctx, ACCESS_FLOAT); 879 EA = tcg_temp_new(); 880 t0 = tcg_temp_new(); 881 t1 = tcg_temp_new_i64(); 882 gen_addr_reg_index(ctx, EA); 883 gen_qemu_ld32s(ctx, t0, EA); 884 tcg_gen_ext_tl_i64(t1, t0); 885 set_fpr(rD(ctx->opcode), t1); 886} 887 888/* lfiwzx */ 889static void gen_lfiwzx(DisasContext *ctx) 890{ 891 TCGv EA; 892 TCGv_i64 t0; 893 if (unlikely(!ctx->fpu_enabled)) { 894 gen_exception(ctx, POWERPC_EXCP_FPU); 895 return; 896 } 897 gen_set_access_type(ctx, ACCESS_FLOAT); 898 EA = tcg_temp_new(); 899 t0 = tcg_temp_new_i64(); 900 gen_addr_reg_index(ctx, EA); 901 gen_qemu_ld32u_i64(ctx, t0, EA); 902 set_fpr(rD(ctx->opcode), t0); 903} 904 905#define GEN_STXF(name, stop, opc2, opc3, type) \ 906static void glue(gen_, name##x)(DisasContext *ctx) \ 907{ \ 908 TCGv EA; \ 909 TCGv_i64 t0; \ 910 if (unlikely(!ctx->fpu_enabled)) { \ 911 gen_exception(ctx, POWERPC_EXCP_FPU); \ 912 return; \ 913 } \ 914 gen_set_access_type(ctx, ACCESS_FLOAT); \ 915 EA = tcg_temp_new(); \ 916 t0 = tcg_temp_new_i64(); \ 917 gen_addr_reg_index(ctx, EA); \ 918 get_fpr(t0, rS(ctx->opcode)); \ 919 gen_qemu_##stop(ctx, t0, EA); \ 920} 921 922static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) 923{ 924 TCGv_i32 tmp = tcg_temp_new_i32(); 925 gen_helper_tosingle(tmp, src); 926 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 927} 928 929/* stfdepx (external PID lfdx) */ 930static void gen_stfdepx(DisasContext *ctx) 931{ 932 TCGv EA; 933 TCGv_i64 t0; 934 CHK_SV(ctx); 935 if (unlikely(!ctx->fpu_enabled)) { 936 gen_exception(ctx, POWERPC_EXCP_FPU); 937 return; 938 } 939 gen_set_access_type(ctx, ACCESS_FLOAT); 940 EA = tcg_temp_new(); 941 t0 = tcg_temp_new_i64(); 942 gen_addr_reg_index(ctx, EA); 943 get_fpr(t0, rD(ctx->opcode)); 944 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); 945} 946 947/* stfdp */ 948static void gen_stfdp(DisasContext *ctx) 949{ 950 TCGv EA; 951 TCGv_i64 t0; 952 if (unlikely(!ctx->fpu_enabled)) { 953 gen_exception(ctx, POWERPC_EXCP_FPU); 954 return; 955 } 956 gen_set_access_type(ctx, ACCESS_FLOAT); 957 EA = tcg_temp_new(); 958 t0 = tcg_temp_new_i64(); 959 gen_addr_imm_index(ctx, EA, 0); 960 /* 961 * We only need to swap high and low halves. gen_qemu_st64_i64 962 * does necessary 64-bit byteswap already. 963 */ 964 if (unlikely(ctx->le_mode)) { 965 get_fpr(t0, rD(ctx->opcode) + 1); 966 gen_qemu_st64_i64(ctx, t0, EA); 967 tcg_gen_addi_tl(EA, EA, 8); 968 get_fpr(t0, rD(ctx->opcode)); 969 gen_qemu_st64_i64(ctx, t0, EA); 970 } else { 971 get_fpr(t0, rD(ctx->opcode)); 972 gen_qemu_st64_i64(ctx, t0, EA); 973 tcg_gen_addi_tl(EA, EA, 8); 974 get_fpr(t0, rD(ctx->opcode) + 1); 975 gen_qemu_st64_i64(ctx, t0, EA); 976 } 977} 978 979/* stfdpx */ 980static void gen_stfdpx(DisasContext *ctx) 981{ 982 TCGv EA; 983 TCGv_i64 t0; 984 if (unlikely(!ctx->fpu_enabled)) { 985 gen_exception(ctx, POWERPC_EXCP_FPU); 986 return; 987 } 988 gen_set_access_type(ctx, ACCESS_FLOAT); 989 EA = tcg_temp_new(); 990 t0 = tcg_temp_new_i64(); 991 gen_addr_reg_index(ctx, EA); 992 /* 993 * We only need to swap high and low halves. gen_qemu_st64_i64 994 * does necessary 64-bit byteswap already. 995 */ 996 if (unlikely(ctx->le_mode)) { 997 get_fpr(t0, rD(ctx->opcode) + 1); 998 gen_qemu_st64_i64(ctx, t0, EA); 999 tcg_gen_addi_tl(EA, EA, 8); 1000 get_fpr(t0, rD(ctx->opcode)); 1001 gen_qemu_st64_i64(ctx, t0, EA); 1002 } else { 1003 get_fpr(t0, rD(ctx->opcode)); 1004 gen_qemu_st64_i64(ctx, t0, EA); 1005 tcg_gen_addi_tl(EA, EA, 8); 1006 get_fpr(t0, rD(ctx->opcode) + 1); 1007 gen_qemu_st64_i64(ctx, t0, EA); 1008 } 1009} 1010 1011/* Optional: */ 1012static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) 1013{ 1014 TCGv t0 = tcg_temp_new(); 1015 tcg_gen_trunc_i64_tl(t0, arg1), 1016 gen_qemu_st32(ctx, t0, arg2); 1017} 1018/* stfiwx */ 1019GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); 1020 1021/* Floating-point Load/Store Instructions */ 1022static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, 1023 bool update, bool store, bool single) 1024{ 1025 TCGv ea; 1026 TCGv_i64 t0; 1027 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 1028 REQUIRE_FPU(ctx); 1029 if (update && ra == 0) { 1030 gen_invalid(ctx); 1031 return true; 1032 } 1033 gen_set_access_type(ctx, ACCESS_FLOAT); 1034 t0 = tcg_temp_new_i64(); 1035 ea = do_ea_calc(ctx, ra, displ); 1036 if (store) { 1037 get_fpr(t0, rt); 1038 if (single) { 1039 gen_qemu_st32fs(ctx, t0, ea); 1040 } else { 1041 gen_qemu_st64_i64(ctx, t0, ea); 1042 } 1043 } else { 1044 if (single) { 1045 gen_qemu_ld32fs(ctx, t0, ea); 1046 } else { 1047 gen_qemu_ld64_i64(ctx, t0, ea); 1048 } 1049 set_fpr(rt, t0); 1050 } 1051 if (update) { 1052 tcg_gen_mov_tl(cpu_gpr[ra], ea); 1053 } 1054 return true; 1055} 1056 1057static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, 1058 bool single) 1059{ 1060 return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, 1061 single); 1062} 1063 1064static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 1065 bool store, bool single) 1066{ 1067 arg_D d; 1068 if (!resolve_PLS_D(ctx, &d, a)) { 1069 return true; 1070 } 1071 return do_lsfp_D(ctx, &d, update, store, single); 1072} 1073 1074static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, 1075 bool store, bool single) 1076{ 1077 return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); 1078} 1079 1080TRANS(LFS, do_lsfp_D, false, false, true) 1081TRANS(LFSU, do_lsfp_D, true, false, true) 1082TRANS(LFSX, do_lsfp_X, false, false, true) 1083TRANS(LFSUX, do_lsfp_X, true, false, true) 1084TRANS(PLFS, do_lsfp_PLS_D, false, false, true) 1085 1086TRANS(LFD, do_lsfp_D, false, false, false) 1087TRANS(LFDU, do_lsfp_D, true, false, false) 1088TRANS(LFDX, do_lsfp_X, false, false, false) 1089TRANS(LFDUX, do_lsfp_X, true, false, false) 1090TRANS(PLFD, do_lsfp_PLS_D, false, false, false) 1091 1092TRANS(STFS, do_lsfp_D, false, true, true) 1093TRANS(STFSU, do_lsfp_D, true, true, true) 1094TRANS(STFSX, do_lsfp_X, false, true, true) 1095TRANS(STFSUX, do_lsfp_X, true, true, true) 1096TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) 1097 1098TRANS(STFD, do_lsfp_D, false, true, false) 1099TRANS(STFDU, do_lsfp_D, true, true, false) 1100TRANS(STFDX, do_lsfp_X, false, true, false) 1101TRANS(STFDUX, do_lsfp_X, true, true, false) 1102TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) 1103 1104#undef _GEN_FLOAT_ACB 1105#undef GEN_FLOAT_ACB 1106#undef _GEN_FLOAT_AB 1107#undef GEN_FLOAT_AB 1108#undef _GEN_FLOAT_AC 1109#undef GEN_FLOAT_AC 1110#undef GEN_FLOAT_B 1111#undef GEN_FLOAT_BS 1112 1113#undef GEN_LDF 1114#undef GEN_LDUF 1115#undef GEN_LDUXF 1116#undef GEN_LDXF 1117#undef GEN_LDFS 1118 1119#undef GEN_STF 1120#undef GEN_STUF 1121#undef GEN_STUXF 1122#undef GEN_STXF 1123#undef GEN_STFS 1124