1/* 2 * translate-fp.c 3 * 4 * Standard FPU translation 5 */ 6 7static inline void gen_reset_fpstatus(void) 8{ 9 gen_helper_reset_fpstatus(cpu_env); 10} 11 12static inline void gen_compute_fprf_float64(TCGv_i64 arg) 13{ 14 gen_helper_compute_fprf_float64(cpu_env, arg); 15 gen_helper_float_check_status(cpu_env); 16} 17 18#if defined(TARGET_PPC64) 19static void gen_set_cr1_from_fpscr(DisasContext *ctx) 20{ 21 TCGv_i32 tmp = tcg_temp_new_i32(); 22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); 23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28); 24 tcg_temp_free_i32(tmp); 25} 26#else 27static void gen_set_cr1_from_fpscr(DisasContext *ctx) 28{ 29 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28); 30} 31#endif 32 33/*** Floating-Point arithmetic ***/ 34#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \ 35static void gen_f##name(DisasContext *ctx) \ 36{ \ 37 TCGv_i64 t0; \ 38 TCGv_i64 t1; \ 39 TCGv_i64 t2; \ 40 TCGv_i64 t3; \ 41 if (unlikely(!ctx->fpu_enabled)) { \ 42 gen_exception(ctx, POWERPC_EXCP_FPU); \ 43 return; \ 44 } \ 45 t0 = tcg_temp_new_i64(); \ 46 t1 = tcg_temp_new_i64(); \ 47 t2 = tcg_temp_new_i64(); \ 48 t3 = tcg_temp_new_i64(); \ 49 gen_reset_fpstatus(); \ 50 get_fpr(t0, rA(ctx->opcode)); \ 51 get_fpr(t1, rC(ctx->opcode)); \ 52 get_fpr(t2, rB(ctx->opcode)); \ 53 gen_helper_f##name(t3, cpu_env, t0, t1, t2); \ 54 set_fpr(rD(ctx->opcode), t3); \ 55 if (set_fprf) { \ 56 gen_compute_fprf_float64(t3); \ 57 } \ 58 if (unlikely(Rc(ctx->opcode) != 0)) { \ 59 gen_set_cr1_from_fpscr(ctx); \ 60 } \ 61 tcg_temp_free_i64(t0); \ 62 tcg_temp_free_i64(t1); \ 63 tcg_temp_free_i64(t2); \ 64 tcg_temp_free_i64(t3); \ 65} 66 67#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ 68_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \ 69_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type); 70 71#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \ 72static void gen_f##name(DisasContext *ctx) \ 73{ \ 74 TCGv_i64 t0; \ 75 TCGv_i64 t1; \ 76 TCGv_i64 t2; \ 77 if (unlikely(!ctx->fpu_enabled)) { \ 78 gen_exception(ctx, POWERPC_EXCP_FPU); \ 79 return; \ 80 } \ 81 t0 = tcg_temp_new_i64(); \ 82 t1 = tcg_temp_new_i64(); \ 83 t2 = tcg_temp_new_i64(); \ 84 gen_reset_fpstatus(); \ 85 get_fpr(t0, rA(ctx->opcode)); \ 86 get_fpr(t1, rB(ctx->opcode)); \ 87 gen_helper_f##name(t2, cpu_env, t0, t1); \ 88 set_fpr(rD(ctx->opcode), t2); \ 89 if (set_fprf) { \ 90 gen_compute_fprf_float64(t2); \ 91 } \ 92 if (unlikely(Rc(ctx->opcode) != 0)) { \ 93 gen_set_cr1_from_fpscr(ctx); \ 94 } \ 95 tcg_temp_free_i64(t0); \ 96 tcg_temp_free_i64(t1); \ 97 tcg_temp_free_i64(t2); \ 98} 99#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ 100_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ 101_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type); 102 103#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \ 104static void gen_f##name(DisasContext *ctx) \ 105{ \ 106 TCGv_i64 t0; \ 107 TCGv_i64 t1; \ 108 TCGv_i64 t2; \ 109 if (unlikely(!ctx->fpu_enabled)) { \ 110 gen_exception(ctx, POWERPC_EXCP_FPU); \ 111 return; \ 112 } \ 113 t0 = tcg_temp_new_i64(); \ 114 t1 = tcg_temp_new_i64(); \ 115 t2 = tcg_temp_new_i64(); \ 116 gen_reset_fpstatus(); \ 117 get_fpr(t0, rA(ctx->opcode)); \ 118 get_fpr(t1, rC(ctx->opcode)); \ 119 gen_helper_f##name(t2, cpu_env, t0, t1); \ 120 set_fpr(rD(ctx->opcode), t2); \ 121 if (set_fprf) { \ 122 gen_compute_fprf_float64(t2); \ 123 } \ 124 if (unlikely(Rc(ctx->opcode) != 0)) { \ 125 gen_set_cr1_from_fpscr(ctx); \ 126 } \ 127 tcg_temp_free_i64(t0); \ 128 tcg_temp_free_i64(t1); \ 129 tcg_temp_free_i64(t2); \ 130} 131#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ 132_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ 133_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type); 134 135#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ 136static void gen_f##name(DisasContext *ctx) \ 137{ \ 138 TCGv_i64 t0; \ 139 TCGv_i64 t1; \ 140 if (unlikely(!ctx->fpu_enabled)) { \ 141 gen_exception(ctx, POWERPC_EXCP_FPU); \ 142 return; \ 143 } \ 144 t0 = tcg_temp_new_i64(); \ 145 t1 = tcg_temp_new_i64(); \ 146 gen_reset_fpstatus(); \ 147 get_fpr(t0, rB(ctx->opcode)); \ 148 gen_helper_f##name(t1, cpu_env, t0); \ 149 set_fpr(rD(ctx->opcode), t1); \ 150 if (set_fprf) { \ 151 gen_helper_compute_fprf_float64(cpu_env, t1); \ 152 } \ 153 gen_helper_float_check_status(cpu_env); \ 154 if (unlikely(Rc(ctx->opcode) != 0)) { \ 155 gen_set_cr1_from_fpscr(ctx); \ 156 } \ 157 tcg_temp_free_i64(t0); \ 158 tcg_temp_free_i64(t1); \ 159} 160 161#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ 162static void gen_f##name(DisasContext *ctx) \ 163{ \ 164 TCGv_i64 t0; \ 165 TCGv_i64 t1; \ 166 if (unlikely(!ctx->fpu_enabled)) { \ 167 gen_exception(ctx, POWERPC_EXCP_FPU); \ 168 return; \ 169 } \ 170 t0 = tcg_temp_new_i64(); \ 171 t1 = tcg_temp_new_i64(); \ 172 gen_reset_fpstatus(); \ 173 get_fpr(t0, rB(ctx->opcode)); \ 174 gen_helper_f##name(t1, cpu_env, t0); \ 175 set_fpr(rD(ctx->opcode), t1); \ 176 if (set_fprf) { \ 177 gen_compute_fprf_float64(t1); \ 178 } \ 179 if (unlikely(Rc(ctx->opcode) != 0)) { \ 180 gen_set_cr1_from_fpscr(ctx); \ 181 } \ 182 tcg_temp_free_i64(t0); \ 183 tcg_temp_free_i64(t1); \ 184} 185 186/* fadd - fadds */ 187GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); 188/* fdiv - fdivs */ 189GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); 190/* fmul - fmuls */ 191GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); 192 193/* fre */ 194GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); 195 196/* fres */ 197GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); 198 199/* frsqrte */ 200GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); 201 202/* frsqrtes */ 203static void gen_frsqrtes(DisasContext *ctx) 204{ 205 TCGv_i64 t0; 206 TCGv_i64 t1; 207 if (unlikely(!ctx->fpu_enabled)) { 208 gen_exception(ctx, POWERPC_EXCP_FPU); 209 return; 210 } 211 t0 = tcg_temp_new_i64(); 212 t1 = tcg_temp_new_i64(); 213 gen_reset_fpstatus(); 214 get_fpr(t0, rB(ctx->opcode)); 215 gen_helper_frsqrtes(t1, cpu_env, t0); 216 set_fpr(rD(ctx->opcode), t1); 217 gen_compute_fprf_float64(t1); 218 if (unlikely(Rc(ctx->opcode) != 0)) { 219 gen_set_cr1_from_fpscr(ctx); 220 } 221 tcg_temp_free_i64(t0); 222 tcg_temp_free_i64(t1); 223} 224 225static bool trans_FSEL(DisasContext *ctx, arg_A *a) 226{ 227 TCGv_i64 t0, t1, t2; 228 229 REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL); 230 REQUIRE_FPU(ctx); 231 232 t0 = tcg_temp_new_i64(); 233 t1 = tcg_temp_new_i64(); 234 t2 = tcg_temp_new_i64(); 235 236 get_fpr(t0, a->fra); 237 get_fpr(t1, a->frb); 238 get_fpr(t2, a->frc); 239 240 gen_helper_FSEL(t0, t0, t1, t2); 241 set_fpr(a->frt, t0); 242 if (a->rc) { 243 gen_set_cr1_from_fpscr(ctx); 244 } 245 246 tcg_temp_free_i64(t0); 247 tcg_temp_free_i64(t1); 248 tcg_temp_free_i64(t2); 249 250 return true; 251} 252 253/* fsub - fsubs */ 254GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); 255/* Optional: */ 256 257/* fsqrt */ 258static void gen_fsqrt(DisasContext *ctx) 259{ 260 TCGv_i64 t0; 261 TCGv_i64 t1; 262 if (unlikely(!ctx->fpu_enabled)) { 263 gen_exception(ctx, POWERPC_EXCP_FPU); 264 return; 265 } 266 t0 = tcg_temp_new_i64(); 267 t1 = tcg_temp_new_i64(); 268 gen_reset_fpstatus(); 269 get_fpr(t0, rB(ctx->opcode)); 270 gen_helper_fsqrt(t1, cpu_env, t0); 271 set_fpr(rD(ctx->opcode), t1); 272 gen_compute_fprf_float64(t1); 273 if (unlikely(Rc(ctx->opcode) != 0)) { 274 gen_set_cr1_from_fpscr(ctx); 275 } 276 tcg_temp_free_i64(t0); 277 tcg_temp_free_i64(t1); 278} 279 280static void gen_fsqrts(DisasContext *ctx) 281{ 282 TCGv_i64 t0; 283 TCGv_i64 t1; 284 if (unlikely(!ctx->fpu_enabled)) { 285 gen_exception(ctx, POWERPC_EXCP_FPU); 286 return; 287 } 288 t0 = tcg_temp_new_i64(); 289 t1 = tcg_temp_new_i64(); 290 gen_reset_fpstatus(); 291 get_fpr(t0, rB(ctx->opcode)); 292 gen_helper_fsqrts(t1, cpu_env, t0); 293 set_fpr(rD(ctx->opcode), t1); 294 gen_compute_fprf_float64(t1); 295 if (unlikely(Rc(ctx->opcode) != 0)) { 296 gen_set_cr1_from_fpscr(ctx); 297 } 298 tcg_temp_free_i64(t0); 299 tcg_temp_free_i64(t1); 300} 301 302/*** Floating-Point multiply-and-add ***/ 303/* fmadd - fmadds */ 304GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); 305/* fmsub - fmsubs */ 306GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); 307/* fnmadd - fnmadds */ 308GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); 309/* fnmsub - fnmsubs */ 310GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); 311 312/*** Floating-Point round & convert ***/ 313/* fctiw */ 314GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); 315/* fctiwu */ 316GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); 317/* fctiwz */ 318GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); 319/* fctiwuz */ 320GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); 321/* frsp */ 322GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); 323/* fcfid */ 324GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); 325/* fcfids */ 326GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); 327/* fcfidu */ 328GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 329/* fcfidus */ 330GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); 331/* fctid */ 332GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); 333/* fctidu */ 334GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); 335/* fctidz */ 336GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); 337/* fctidu */ 338GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); 339 340/* frin */ 341GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); 342/* friz */ 343GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); 344/* frip */ 345GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); 346/* frim */ 347GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); 348 349static void gen_ftdiv(DisasContext *ctx) 350{ 351 TCGv_i64 t0; 352 TCGv_i64 t1; 353 if (unlikely(!ctx->fpu_enabled)) { 354 gen_exception(ctx, POWERPC_EXCP_FPU); 355 return; 356 } 357 t0 = tcg_temp_new_i64(); 358 t1 = tcg_temp_new_i64(); 359 get_fpr(t0, rA(ctx->opcode)); 360 get_fpr(t1, rB(ctx->opcode)); 361 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); 362 tcg_temp_free_i64(t0); 363 tcg_temp_free_i64(t1); 364} 365 366static void gen_ftsqrt(DisasContext *ctx) 367{ 368 TCGv_i64 t0; 369 if (unlikely(!ctx->fpu_enabled)) { 370 gen_exception(ctx, POWERPC_EXCP_FPU); 371 return; 372 } 373 t0 = tcg_temp_new_i64(); 374 get_fpr(t0, rB(ctx->opcode)); 375 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); 376 tcg_temp_free_i64(t0); 377} 378 379 380 381/*** Floating-Point compare ***/ 382 383/* fcmpo */ 384static void gen_fcmpo(DisasContext *ctx) 385{ 386 TCGv_i32 crf; 387 TCGv_i64 t0; 388 TCGv_i64 t1; 389 if (unlikely(!ctx->fpu_enabled)) { 390 gen_exception(ctx, POWERPC_EXCP_FPU); 391 return; 392 } 393 t0 = tcg_temp_new_i64(); 394 t1 = tcg_temp_new_i64(); 395 gen_reset_fpstatus(); 396 crf = tcg_const_i32(crfD(ctx->opcode)); 397 get_fpr(t0, rA(ctx->opcode)); 398 get_fpr(t1, rB(ctx->opcode)); 399 gen_helper_fcmpo(cpu_env, t0, t1, crf); 400 tcg_temp_free_i32(crf); 401 gen_helper_float_check_status(cpu_env); 402 tcg_temp_free_i64(t0); 403 tcg_temp_free_i64(t1); 404} 405 406/* fcmpu */ 407static void gen_fcmpu(DisasContext *ctx) 408{ 409 TCGv_i32 crf; 410 TCGv_i64 t0; 411 TCGv_i64 t1; 412 if (unlikely(!ctx->fpu_enabled)) { 413 gen_exception(ctx, POWERPC_EXCP_FPU); 414 return; 415 } 416 t0 = tcg_temp_new_i64(); 417 t1 = tcg_temp_new_i64(); 418 gen_reset_fpstatus(); 419 crf = tcg_const_i32(crfD(ctx->opcode)); 420 get_fpr(t0, rA(ctx->opcode)); 421 get_fpr(t1, rB(ctx->opcode)); 422 gen_helper_fcmpu(cpu_env, t0, t1, crf); 423 tcg_temp_free_i32(crf); 424 gen_helper_float_check_status(cpu_env); 425 tcg_temp_free_i64(t0); 426 tcg_temp_free_i64(t1); 427} 428 429/*** Floating-point move ***/ 430/* fabs */ 431/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ 432static void gen_fabs(DisasContext *ctx) 433{ 434 TCGv_i64 t0; 435 TCGv_i64 t1; 436 if (unlikely(!ctx->fpu_enabled)) { 437 gen_exception(ctx, POWERPC_EXCP_FPU); 438 return; 439 } 440 t0 = tcg_temp_new_i64(); 441 t1 = tcg_temp_new_i64(); 442 get_fpr(t0, rB(ctx->opcode)); 443 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63)); 444 set_fpr(rD(ctx->opcode), t1); 445 if (unlikely(Rc(ctx->opcode))) { 446 gen_set_cr1_from_fpscr(ctx); 447 } 448 tcg_temp_free_i64(t0); 449 tcg_temp_free_i64(t1); 450} 451 452/* fmr - fmr. */ 453/* XXX: beware that fmr never checks for NaNs nor update FPSCR */ 454static void gen_fmr(DisasContext *ctx) 455{ 456 TCGv_i64 t0; 457 if (unlikely(!ctx->fpu_enabled)) { 458 gen_exception(ctx, POWERPC_EXCP_FPU); 459 return; 460 } 461 t0 = tcg_temp_new_i64(); 462 get_fpr(t0, rB(ctx->opcode)); 463 set_fpr(rD(ctx->opcode), t0); 464 if (unlikely(Rc(ctx->opcode))) { 465 gen_set_cr1_from_fpscr(ctx); 466 } 467 tcg_temp_free_i64(t0); 468} 469 470/* fnabs */ 471/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ 472static void gen_fnabs(DisasContext *ctx) 473{ 474 TCGv_i64 t0; 475 TCGv_i64 t1; 476 if (unlikely(!ctx->fpu_enabled)) { 477 gen_exception(ctx, POWERPC_EXCP_FPU); 478 return; 479 } 480 t0 = tcg_temp_new_i64(); 481 t1 = tcg_temp_new_i64(); 482 get_fpr(t0, rB(ctx->opcode)); 483 tcg_gen_ori_i64(t1, t0, 1ULL << 63); 484 set_fpr(rD(ctx->opcode), t1); 485 if (unlikely(Rc(ctx->opcode))) { 486 gen_set_cr1_from_fpscr(ctx); 487 } 488 tcg_temp_free_i64(t0); 489 tcg_temp_free_i64(t1); 490} 491 492/* fneg */ 493/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ 494static void gen_fneg(DisasContext *ctx) 495{ 496 TCGv_i64 t0; 497 TCGv_i64 t1; 498 if (unlikely(!ctx->fpu_enabled)) { 499 gen_exception(ctx, POWERPC_EXCP_FPU); 500 return; 501 } 502 t0 = tcg_temp_new_i64(); 503 t1 = tcg_temp_new_i64(); 504 get_fpr(t0, rB(ctx->opcode)); 505 tcg_gen_xori_i64(t1, t0, 1ULL << 63); 506 set_fpr(rD(ctx->opcode), t1); 507 if (unlikely(Rc(ctx->opcode))) { 508 gen_set_cr1_from_fpscr(ctx); 509 } 510 tcg_temp_free_i64(t0); 511 tcg_temp_free_i64(t1); 512} 513 514/* fcpsgn: PowerPC 2.05 specification */ 515/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ 516static void gen_fcpsgn(DisasContext *ctx) 517{ 518 TCGv_i64 t0; 519 TCGv_i64 t1; 520 TCGv_i64 t2; 521 if (unlikely(!ctx->fpu_enabled)) { 522 gen_exception(ctx, POWERPC_EXCP_FPU); 523 return; 524 } 525 t0 = tcg_temp_new_i64(); 526 t1 = tcg_temp_new_i64(); 527 t2 = tcg_temp_new_i64(); 528 get_fpr(t0, rA(ctx->opcode)); 529 get_fpr(t1, rB(ctx->opcode)); 530 tcg_gen_deposit_i64(t2, t0, t1, 0, 63); 531 set_fpr(rD(ctx->opcode), t2); 532 if (unlikely(Rc(ctx->opcode))) { 533 gen_set_cr1_from_fpscr(ctx); 534 } 535 tcg_temp_free_i64(t0); 536 tcg_temp_free_i64(t1); 537 tcg_temp_free_i64(t2); 538} 539 540static void gen_fmrgew(DisasContext *ctx) 541{ 542 TCGv_i64 b0; 543 TCGv_i64 t0; 544 TCGv_i64 t1; 545 if (unlikely(!ctx->fpu_enabled)) { 546 gen_exception(ctx, POWERPC_EXCP_FPU); 547 return; 548 } 549 b0 = tcg_temp_new_i64(); 550 t0 = tcg_temp_new_i64(); 551 t1 = tcg_temp_new_i64(); 552 get_fpr(t0, rB(ctx->opcode)); 553 tcg_gen_shri_i64(b0, t0, 32); 554 get_fpr(t0, rA(ctx->opcode)); 555 tcg_gen_deposit_i64(t1, t0, b0, 0, 32); 556 set_fpr(rD(ctx->opcode), t1); 557 tcg_temp_free_i64(b0); 558 tcg_temp_free_i64(t0); 559 tcg_temp_free_i64(t1); 560} 561 562static void gen_fmrgow(DisasContext *ctx) 563{ 564 TCGv_i64 t0; 565 TCGv_i64 t1; 566 TCGv_i64 t2; 567 if (unlikely(!ctx->fpu_enabled)) { 568 gen_exception(ctx, POWERPC_EXCP_FPU); 569 return; 570 } 571 t0 = tcg_temp_new_i64(); 572 t1 = tcg_temp_new_i64(); 573 t2 = tcg_temp_new_i64(); 574 get_fpr(t0, rB(ctx->opcode)); 575 get_fpr(t1, rA(ctx->opcode)); 576 tcg_gen_deposit_i64(t2, t0, t1, 32, 32); 577 set_fpr(rD(ctx->opcode), t2); 578 tcg_temp_free_i64(t0); 579 tcg_temp_free_i64(t1); 580 tcg_temp_free_i64(t2); 581} 582 583/*** Floating-Point status & ctrl register ***/ 584 585/* mcrfs */ 586static void gen_mcrfs(DisasContext *ctx) 587{ 588 TCGv tmp = tcg_temp_new(); 589 TCGv_i32 tmask; 590 TCGv_i64 tnew_fpscr = tcg_temp_new_i64(); 591 int bfa; 592 int nibble; 593 int shift; 594 595 if (unlikely(!ctx->fpu_enabled)) { 596 gen_exception(ctx, POWERPC_EXCP_FPU); 597 return; 598 } 599 bfa = crfS(ctx->opcode); 600 nibble = 7 - bfa; 601 shift = 4 * nibble; 602 tcg_gen_shri_tl(tmp, cpu_fpscr, shift); 603 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); 604 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 605 0xf); 606 tcg_temp_free(tmp); 607 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); 608 /* Only the exception bits (including FX) should be cleared if read */ 609 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, 610 ~((0xF << shift) & FP_EX_CLEAR_BITS)); 611 /* FEX and VX need to be updated, so don't set fpscr directly */ 612 tmask = tcg_const_i32(1 << nibble); 613 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); 614 tcg_temp_free_i32(tmask); 615 tcg_temp_free_i64(tnew_fpscr); 616} 617 618/* mffs */ 619static void gen_mffs(DisasContext *ctx) 620{ 621 TCGv_i64 t0; 622 if (unlikely(!ctx->fpu_enabled)) { 623 gen_exception(ctx, POWERPC_EXCP_FPU); 624 return; 625 } 626 t0 = tcg_temp_new_i64(); 627 gen_reset_fpstatus(); 628 tcg_gen_extu_tl_i64(t0, cpu_fpscr); 629 set_fpr(rD(ctx->opcode), t0); 630 if (unlikely(Rc(ctx->opcode))) { 631 gen_set_cr1_from_fpscr(ctx); 632 } 633 tcg_temp_free_i64(t0); 634} 635 636/* mffsl */ 637static void gen_mffsl(DisasContext *ctx) 638{ 639 TCGv_i64 t0; 640 641 if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { 642 return gen_mffs(ctx); 643 } 644 645 if (unlikely(!ctx->fpu_enabled)) { 646 gen_exception(ctx, POWERPC_EXCP_FPU); 647 return; 648 } 649 t0 = tcg_temp_new_i64(); 650 gen_reset_fpstatus(); 651 tcg_gen_extu_tl_i64(t0, cpu_fpscr); 652 /* Mask everything except mode, status, and enables. */ 653 tcg_gen_andi_i64(t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN); 654 set_fpr(rD(ctx->opcode), t0); 655 tcg_temp_free_i64(t0); 656} 657 658static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) 659{ 660 TCGv_i64 fpscr = tcg_temp_new_i64(); 661 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 662 663 tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); 664 tcg_gen_andi_i64(fpscr_masked, fpscr, mask); 665 set_fpr(rt, fpscr_masked); 666 667 tcg_temp_free_i64(fpscr_masked); 668 669 return fpscr; 670} 671 672static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, 673 TCGv_i64 set_mask, uint32_t store_mask) 674{ 675 TCGv_i64 fpscr_masked = tcg_temp_new_i64(); 676 TCGv_i32 st_mask = tcg_constant_i32(store_mask); 677 678 tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); 679 tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); 680 gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); 681 682 tcg_temp_free_i64(fpscr_masked); 683} 684 685static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) 686{ 687 TCGv_i64 fpscr; 688 689 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 690 REQUIRE_FPU(ctx); 691 692 gen_reset_fpstatus(); 693 fpscr = place_from_fpscr(a->rt, UINT64_MAX); 694 store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); 695 696 tcg_temp_free_i64(fpscr); 697 698 return true; 699} 700 701static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) 702{ 703 TCGv_i64 t1, fpscr; 704 705 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 706 REQUIRE_FPU(ctx); 707 708 t1 = tcg_temp_new_i64(); 709 get_fpr(t1, a->rb); 710 tcg_gen_andi_i64(t1, t1, FP_RN); 711 712 gen_reset_fpstatus(); 713 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 714 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 715 716 tcg_temp_free_i64(t1); 717 tcg_temp_free_i64(fpscr); 718 719 return true; 720} 721 722static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) 723{ 724 TCGv_i64 t1, fpscr; 725 726 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 727 REQUIRE_FPU(ctx); 728 729 t1 = tcg_temp_new_i64(); 730 tcg_gen_movi_i64(t1, a->imm); 731 732 gen_reset_fpstatus(); 733 fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); 734 store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); 735 736 tcg_temp_free_i64(t1); 737 tcg_temp_free_i64(fpscr); 738 739 return true; 740} 741 742/* mtfsb0 */ 743static void gen_mtfsb0(DisasContext *ctx) 744{ 745 uint8_t crb; 746 747 if (unlikely(!ctx->fpu_enabled)) { 748 gen_exception(ctx, POWERPC_EXCP_FPU); 749 return; 750 } 751 crb = 31 - crbD(ctx->opcode); 752 gen_reset_fpstatus(); 753 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { 754 TCGv_i32 t0; 755 t0 = tcg_const_i32(crb); 756 gen_helper_fpscr_clrbit(cpu_env, t0); 757 tcg_temp_free_i32(t0); 758 } 759 if (unlikely(Rc(ctx->opcode) != 0)) { 760 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 761 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 762 } 763} 764 765/* mtfsb1 */ 766static void gen_mtfsb1(DisasContext *ctx) 767{ 768 uint8_t crb; 769 770 if (unlikely(!ctx->fpu_enabled)) { 771 gen_exception(ctx, POWERPC_EXCP_FPU); 772 return; 773 } 774 crb = 31 - crbD(ctx->opcode); 775 /* XXX: we pretend we can only do IEEE floating-point computations */ 776 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { 777 TCGv_i32 t0; 778 t0 = tcg_const_i32(crb); 779 gen_helper_fpscr_setbit(cpu_env, t0); 780 tcg_temp_free_i32(t0); 781 } 782 if (unlikely(Rc(ctx->opcode) != 0)) { 783 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 784 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 785 } 786 /* We can raise a deferred exception */ 787 gen_helper_fpscr_check_status(cpu_env); 788} 789 790/* mtfsf */ 791static void gen_mtfsf(DisasContext *ctx) 792{ 793 TCGv_i32 t0; 794 TCGv_i64 t1; 795 int flm, l, w; 796 797 if (unlikely(!ctx->fpu_enabled)) { 798 gen_exception(ctx, POWERPC_EXCP_FPU); 799 return; 800 } 801 flm = FPFLM(ctx->opcode); 802 l = FPL(ctx->opcode); 803 w = FPW(ctx->opcode); 804 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 805 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 806 return; 807 } 808 if (l) { 809 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); 810 } else { 811 t0 = tcg_const_i32(flm << (w * 8)); 812 } 813 t1 = tcg_temp_new_i64(); 814 get_fpr(t1, rB(ctx->opcode)); 815 gen_helper_store_fpscr(cpu_env, t1, t0); 816 tcg_temp_free_i32(t0); 817 if (unlikely(Rc(ctx->opcode) != 0)) { 818 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 819 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 820 } 821 /* We can raise a deferred exception */ 822 gen_helper_fpscr_check_status(cpu_env); 823 tcg_temp_free_i64(t1); 824} 825 826/* mtfsfi */ 827static void gen_mtfsfi(DisasContext *ctx) 828{ 829 int bf, sh, w; 830 TCGv_i64 t0; 831 TCGv_i32 t1; 832 833 if (unlikely(!ctx->fpu_enabled)) { 834 gen_exception(ctx, POWERPC_EXCP_FPU); 835 return; 836 } 837 w = FPW(ctx->opcode); 838 bf = FPBF(ctx->opcode); 839 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { 840 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); 841 return; 842 } 843 sh = (8 * w) + 7 - bf; 844 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); 845 t1 = tcg_const_i32(1 << sh); 846 gen_helper_store_fpscr(cpu_env, t0, t1); 847 tcg_temp_free_i64(t0); 848 tcg_temp_free_i32(t1); 849 if (unlikely(Rc(ctx->opcode) != 0)) { 850 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); 851 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); 852 } 853 /* We can raise a deferred exception */ 854 gen_helper_fpscr_check_status(cpu_env); 855} 856 857static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) 858{ 859 TCGv_i32 tmp = tcg_temp_new_i32(); 860 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 861 gen_helper_todouble(dest, tmp); 862 tcg_temp_free_i32(tmp); 863} 864 865/* lfdepx (external PID lfdx) */ 866static void gen_lfdepx(DisasContext *ctx) 867{ 868 TCGv EA; 869 TCGv_i64 t0; 870 CHK_SV; 871 if (unlikely(!ctx->fpu_enabled)) { 872 gen_exception(ctx, POWERPC_EXCP_FPU); 873 return; 874 } 875 gen_set_access_type(ctx, ACCESS_FLOAT); 876 EA = tcg_temp_new(); 877 t0 = tcg_temp_new_i64(); 878 gen_addr_reg_index(ctx, EA); 879 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); 880 set_fpr(rD(ctx->opcode), t0); 881 tcg_temp_free(EA); 882 tcg_temp_free_i64(t0); 883} 884 885/* lfdp */ 886static void gen_lfdp(DisasContext *ctx) 887{ 888 TCGv EA; 889 TCGv_i64 t0; 890 if (unlikely(!ctx->fpu_enabled)) { 891 gen_exception(ctx, POWERPC_EXCP_FPU); 892 return; 893 } 894 gen_set_access_type(ctx, ACCESS_FLOAT); 895 EA = tcg_temp_new(); 896 gen_addr_imm_index(ctx, EA, 0); 897 t0 = tcg_temp_new_i64(); 898 /* 899 * We only need to swap high and low halves. gen_qemu_ld64_i64 900 * does necessary 64-bit byteswap already. 901 */ 902 if (unlikely(ctx->le_mode)) { 903 gen_qemu_ld64_i64(ctx, t0, EA); 904 set_fpr(rD(ctx->opcode) + 1, t0); 905 tcg_gen_addi_tl(EA, EA, 8); 906 gen_qemu_ld64_i64(ctx, t0, EA); 907 set_fpr(rD(ctx->opcode), t0); 908 } else { 909 gen_qemu_ld64_i64(ctx, t0, EA); 910 set_fpr(rD(ctx->opcode), t0); 911 tcg_gen_addi_tl(EA, EA, 8); 912 gen_qemu_ld64_i64(ctx, t0, EA); 913 set_fpr(rD(ctx->opcode) + 1, t0); 914 } 915 tcg_temp_free(EA); 916 tcg_temp_free_i64(t0); 917} 918 919/* lfdpx */ 920static void gen_lfdpx(DisasContext *ctx) 921{ 922 TCGv EA; 923 TCGv_i64 t0; 924 if (unlikely(!ctx->fpu_enabled)) { 925 gen_exception(ctx, POWERPC_EXCP_FPU); 926 return; 927 } 928 gen_set_access_type(ctx, ACCESS_FLOAT); 929 EA = tcg_temp_new(); 930 gen_addr_reg_index(ctx, EA); 931 t0 = tcg_temp_new_i64(); 932 /* 933 * We only need to swap high and low halves. gen_qemu_ld64_i64 934 * does necessary 64-bit byteswap already. 935 */ 936 if (unlikely(ctx->le_mode)) { 937 gen_qemu_ld64_i64(ctx, t0, EA); 938 set_fpr(rD(ctx->opcode) + 1, t0); 939 tcg_gen_addi_tl(EA, EA, 8); 940 gen_qemu_ld64_i64(ctx, t0, EA); 941 set_fpr(rD(ctx->opcode), t0); 942 } else { 943 gen_qemu_ld64_i64(ctx, t0, EA); 944 set_fpr(rD(ctx->opcode), t0); 945 tcg_gen_addi_tl(EA, EA, 8); 946 gen_qemu_ld64_i64(ctx, t0, EA); 947 set_fpr(rD(ctx->opcode) + 1, t0); 948 } 949 tcg_temp_free(EA); 950 tcg_temp_free_i64(t0); 951} 952 953/* lfiwax */ 954static void gen_lfiwax(DisasContext *ctx) 955{ 956 TCGv EA; 957 TCGv t0; 958 TCGv_i64 t1; 959 if (unlikely(!ctx->fpu_enabled)) { 960 gen_exception(ctx, POWERPC_EXCP_FPU); 961 return; 962 } 963 gen_set_access_type(ctx, ACCESS_FLOAT); 964 EA = tcg_temp_new(); 965 t0 = tcg_temp_new(); 966 t1 = tcg_temp_new_i64(); 967 gen_addr_reg_index(ctx, EA); 968 gen_qemu_ld32s(ctx, t0, EA); 969 tcg_gen_ext_tl_i64(t1, t0); 970 set_fpr(rD(ctx->opcode), t1); 971 tcg_temp_free(EA); 972 tcg_temp_free(t0); 973 tcg_temp_free_i64(t1); 974} 975 976/* lfiwzx */ 977static void gen_lfiwzx(DisasContext *ctx) 978{ 979 TCGv EA; 980 TCGv_i64 t0; 981 if (unlikely(!ctx->fpu_enabled)) { 982 gen_exception(ctx, POWERPC_EXCP_FPU); 983 return; 984 } 985 gen_set_access_type(ctx, ACCESS_FLOAT); 986 EA = tcg_temp_new(); 987 t0 = tcg_temp_new_i64(); 988 gen_addr_reg_index(ctx, EA); 989 gen_qemu_ld32u_i64(ctx, t0, EA); 990 set_fpr(rD(ctx->opcode), t0); 991 tcg_temp_free(EA); 992 tcg_temp_free_i64(t0); 993} 994 995#define GEN_STXF(name, stop, opc2, opc3, type) \ 996static void glue(gen_, name##x)(DisasContext *ctx) \ 997{ \ 998 TCGv EA; \ 999 TCGv_i64 t0; \ 1000 if (unlikely(!ctx->fpu_enabled)) { \ 1001 gen_exception(ctx, POWERPC_EXCP_FPU); \ 1002 return; \ 1003 } \ 1004 gen_set_access_type(ctx, ACCESS_FLOAT); \ 1005 EA = tcg_temp_new(); \ 1006 t0 = tcg_temp_new_i64(); \ 1007 gen_addr_reg_index(ctx, EA); \ 1008 get_fpr(t0, rS(ctx->opcode)); \ 1009 gen_qemu_##stop(ctx, t0, EA); \ 1010 tcg_temp_free(EA); \ 1011 tcg_temp_free_i64(t0); \ 1012} 1013 1014static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) 1015{ 1016 TCGv_i32 tmp = tcg_temp_new_i32(); 1017 gen_helper_tosingle(tmp, src); 1018 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); 1019 tcg_temp_free_i32(tmp); 1020} 1021 1022/* stfdepx (external PID lfdx) */ 1023static void gen_stfdepx(DisasContext *ctx) 1024{ 1025 TCGv EA; 1026 TCGv_i64 t0; 1027 CHK_SV; 1028 if (unlikely(!ctx->fpu_enabled)) { 1029 gen_exception(ctx, POWERPC_EXCP_FPU); 1030 return; 1031 } 1032 gen_set_access_type(ctx, ACCESS_FLOAT); 1033 EA = tcg_temp_new(); 1034 t0 = tcg_temp_new_i64(); 1035 gen_addr_reg_index(ctx, EA); 1036 get_fpr(t0, rD(ctx->opcode)); 1037 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); 1038 tcg_temp_free(EA); 1039 tcg_temp_free_i64(t0); 1040} 1041 1042/* stfdp */ 1043static void gen_stfdp(DisasContext *ctx) 1044{ 1045 TCGv EA; 1046 TCGv_i64 t0; 1047 if (unlikely(!ctx->fpu_enabled)) { 1048 gen_exception(ctx, POWERPC_EXCP_FPU); 1049 return; 1050 } 1051 gen_set_access_type(ctx, ACCESS_FLOAT); 1052 EA = tcg_temp_new(); 1053 t0 = tcg_temp_new_i64(); 1054 gen_addr_imm_index(ctx, EA, 0); 1055 /* 1056 * We only need to swap high and low halves. gen_qemu_st64_i64 1057 * does necessary 64-bit byteswap already. 1058 */ 1059 if (unlikely(ctx->le_mode)) { 1060 get_fpr(t0, rD(ctx->opcode) + 1); 1061 gen_qemu_st64_i64(ctx, t0, EA); 1062 tcg_gen_addi_tl(EA, EA, 8); 1063 get_fpr(t0, rD(ctx->opcode)); 1064 gen_qemu_st64_i64(ctx, t0, EA); 1065 } else { 1066 get_fpr(t0, rD(ctx->opcode)); 1067 gen_qemu_st64_i64(ctx, t0, EA); 1068 tcg_gen_addi_tl(EA, EA, 8); 1069 get_fpr(t0, rD(ctx->opcode) + 1); 1070 gen_qemu_st64_i64(ctx, t0, EA); 1071 } 1072 tcg_temp_free(EA); 1073 tcg_temp_free_i64(t0); 1074} 1075 1076/* stfdpx */ 1077static void gen_stfdpx(DisasContext *ctx) 1078{ 1079 TCGv EA; 1080 TCGv_i64 t0; 1081 if (unlikely(!ctx->fpu_enabled)) { 1082 gen_exception(ctx, POWERPC_EXCP_FPU); 1083 return; 1084 } 1085 gen_set_access_type(ctx, ACCESS_FLOAT); 1086 EA = tcg_temp_new(); 1087 t0 = tcg_temp_new_i64(); 1088 gen_addr_reg_index(ctx, EA); 1089 /* 1090 * We only need to swap high and low halves. gen_qemu_st64_i64 1091 * does necessary 64-bit byteswap already. 1092 */ 1093 if (unlikely(ctx->le_mode)) { 1094 get_fpr(t0, rD(ctx->opcode) + 1); 1095 gen_qemu_st64_i64(ctx, t0, EA); 1096 tcg_gen_addi_tl(EA, EA, 8); 1097 get_fpr(t0, rD(ctx->opcode)); 1098 gen_qemu_st64_i64(ctx, t0, EA); 1099 } else { 1100 get_fpr(t0, rD(ctx->opcode)); 1101 gen_qemu_st64_i64(ctx, t0, EA); 1102 tcg_gen_addi_tl(EA, EA, 8); 1103 get_fpr(t0, rD(ctx->opcode) + 1); 1104 gen_qemu_st64_i64(ctx, t0, EA); 1105 } 1106 tcg_temp_free(EA); 1107 tcg_temp_free_i64(t0); 1108} 1109 1110/* Optional: */ 1111static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) 1112{ 1113 TCGv t0 = tcg_temp_new(); 1114 tcg_gen_trunc_i64_tl(t0, arg1), 1115 gen_qemu_st32(ctx, t0, arg2); 1116 tcg_temp_free(t0); 1117} 1118/* stfiwx */ 1119GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); 1120 1121/* Floating-point Load/Store Instructions */ 1122static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, 1123 bool update, bool store, bool single) 1124{ 1125 TCGv ea; 1126 TCGv_i64 t0; 1127 REQUIRE_INSNS_FLAGS(ctx, FLOAT); 1128 REQUIRE_FPU(ctx); 1129 if (update && ra == 0) { 1130 gen_invalid(ctx); 1131 return true; 1132 } 1133 gen_set_access_type(ctx, ACCESS_FLOAT); 1134 t0 = tcg_temp_new_i64(); 1135 ea = do_ea_calc(ctx, ra, displ); 1136 if (store) { 1137 get_fpr(t0, rt); 1138 if (single) { 1139 gen_qemu_st32fs(ctx, t0, ea); 1140 } else { 1141 gen_qemu_st64_i64(ctx, t0, ea); 1142 } 1143 } else { 1144 if (single) { 1145 gen_qemu_ld32fs(ctx, t0, ea); 1146 } else { 1147 gen_qemu_ld64_i64(ctx, t0, ea); 1148 } 1149 set_fpr(rt, t0); 1150 } 1151 if (update) { 1152 tcg_gen_mov_tl(cpu_gpr[ra], ea); 1153 } 1154 tcg_temp_free_i64(t0); 1155 tcg_temp_free(ea); 1156 return true; 1157} 1158 1159static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, 1160 bool single) 1161{ 1162 return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, 1163 single); 1164} 1165 1166static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 1167 bool store, bool single) 1168{ 1169 arg_D d; 1170 if (!resolve_PLS_D(ctx, &d, a)) { 1171 return true; 1172 } 1173 return do_lsfp_D(ctx, &d, update, store, single); 1174} 1175 1176static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, 1177 bool store, bool single) 1178{ 1179 return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); 1180} 1181 1182TRANS(LFS, do_lsfp_D, false, false, true) 1183TRANS(LFSU, do_lsfp_D, true, false, true) 1184TRANS(LFSX, do_lsfp_X, false, false, true) 1185TRANS(LFSUX, do_lsfp_X, true, false, true) 1186TRANS(PLFS, do_lsfp_PLS_D, false, false, true) 1187 1188TRANS(LFD, do_lsfp_D, false, false, false) 1189TRANS(LFDU, do_lsfp_D, true, false, false) 1190TRANS(LFDX, do_lsfp_X, false, false, false) 1191TRANS(LFDUX, do_lsfp_X, true, false, false) 1192TRANS(PLFD, do_lsfp_PLS_D, false, false, false) 1193 1194TRANS(STFS, do_lsfp_D, false, true, true) 1195TRANS(STFSU, do_lsfp_D, true, true, true) 1196TRANS(STFSX, do_lsfp_X, false, true, true) 1197TRANS(STFSUX, do_lsfp_X, true, true, true) 1198TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) 1199 1200TRANS(STFD, do_lsfp_D, false, true, false) 1201TRANS(STFDU, do_lsfp_D, true, true, false) 1202TRANS(STFDX, do_lsfp_X, false, true, false) 1203TRANS(STFDUX, do_lsfp_X, true, true, false) 1204TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) 1205 1206#undef _GEN_FLOAT_ACB 1207#undef GEN_FLOAT_ACB 1208#undef _GEN_FLOAT_AB 1209#undef GEN_FLOAT_AB 1210#undef _GEN_FLOAT_AC 1211#undef GEN_FLOAT_AC 1212#undef GEN_FLOAT_B 1213#undef GEN_FLOAT_BS 1214 1215#undef GEN_LDF 1216#undef GEN_LDUF 1217#undef GEN_LDUXF 1218#undef GEN_LDXF 1219#undef GEN_LDFS 1220 1221#undef GEN_STF 1222#undef GEN_STUF 1223#undef GEN_STUXF 1224#undef GEN_STXF 1225#undef GEN_STFS 1226