1/* 2 * RISC-V translation routines for the RV64F Standard Extension. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#define REQUIRE_FPU do {\ 22 if (ctx->mstatus_fs == 0) \ 23 if (!ctx->cfg_ptr->ext_zfinx) \ 24 return false; \ 25} while (0) 26 27#define REQUIRE_ZFINX_OR_F(ctx) do {\ 28 if (!ctx->cfg_ptr->ext_zfinx) { \ 29 REQUIRE_EXT(ctx, RVF); \ 30 } \ 31} while (0) 32 33static bool trans_flw(DisasContext *ctx, arg_flw *a) 34{ 35 TCGv_i64 dest; 36 TCGv addr; 37 38 REQUIRE_FPU; 39 REQUIRE_EXT(ctx, RVF); 40 41 decode_save_opc(ctx); 42 addr = get_address(ctx, a->rs1, a->imm); 43 dest = cpu_fpr[a->rd]; 44 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); 45 gen_nanbox_s(dest, dest); 46 47 mark_fs_dirty(ctx); 48 return true; 49} 50 51static bool trans_fsw(DisasContext *ctx, arg_fsw *a) 52{ 53 TCGv addr; 54 55 REQUIRE_FPU; 56 REQUIRE_EXT(ctx, RVF); 57 58 decode_save_opc(ctx); 59 addr = get_address(ctx, a->rs1, a->imm); 60 tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); 61 return true; 62} 63 64static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) 65{ 66 REQUIRE_FPU; 67 REQUIRE_ZFINX_OR_F(ctx); 68 69 TCGv_i64 dest = dest_fpr(ctx, a->rd); 70 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 71 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 72 TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); 73 74 gen_set_rm(ctx, a->rm); 75 gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3); 76 gen_set_fpr_hs(ctx, a->rd, dest); 77 mark_fs_dirty(ctx); 78 return true; 79} 80 81static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) 82{ 83 REQUIRE_FPU; 84 REQUIRE_ZFINX_OR_F(ctx); 85 86 TCGv_i64 dest = dest_fpr(ctx, a->rd); 87 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 88 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 89 TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); 90 91 gen_set_rm(ctx, a->rm); 92 gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3); 93 gen_set_fpr_hs(ctx, a->rd, dest); 94 mark_fs_dirty(ctx); 95 return true; 96} 97 98static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) 99{ 100 REQUIRE_FPU; 101 REQUIRE_ZFINX_OR_F(ctx); 102 103 TCGv_i64 dest = dest_fpr(ctx, a->rd); 104 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 105 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 106 TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); 107 108 gen_set_rm(ctx, a->rm); 109 gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3); 110 gen_set_fpr_hs(ctx, a->rd, dest); 111 mark_fs_dirty(ctx); 112 return true; 113} 114 115static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) 116{ 117 REQUIRE_FPU; 118 REQUIRE_ZFINX_OR_F(ctx); 119 120 TCGv_i64 dest = dest_fpr(ctx, a->rd); 121 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 122 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 123 TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3); 124 125 gen_set_rm(ctx, a->rm); 126 gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3); 127 gen_set_fpr_hs(ctx, a->rd, dest); 128 mark_fs_dirty(ctx); 129 return true; 130} 131 132static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) 133{ 134 REQUIRE_FPU; 135 REQUIRE_ZFINX_OR_F(ctx); 136 137 TCGv_i64 dest = dest_fpr(ctx, a->rd); 138 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 139 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 140 141 gen_set_rm(ctx, a->rm); 142 gen_helper_fadd_s(dest, cpu_env, src1, src2); 143 gen_set_fpr_hs(ctx, a->rd, dest); 144 mark_fs_dirty(ctx); 145 return true; 146} 147 148static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) 149{ 150 REQUIRE_FPU; 151 REQUIRE_ZFINX_OR_F(ctx); 152 153 TCGv_i64 dest = dest_fpr(ctx, a->rd); 154 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 155 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 156 157 gen_set_rm(ctx, a->rm); 158 gen_helper_fsub_s(dest, cpu_env, src1, src2); 159 gen_set_fpr_hs(ctx, a->rd, dest); 160 mark_fs_dirty(ctx); 161 return true; 162} 163 164static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) 165{ 166 REQUIRE_FPU; 167 REQUIRE_ZFINX_OR_F(ctx); 168 169 TCGv_i64 dest = dest_fpr(ctx, a->rd); 170 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 171 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 172 173 gen_set_rm(ctx, a->rm); 174 gen_helper_fmul_s(dest, cpu_env, src1, src2); 175 gen_set_fpr_hs(ctx, a->rd, dest); 176 mark_fs_dirty(ctx); 177 return true; 178} 179 180static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) 181{ 182 REQUIRE_FPU; 183 REQUIRE_ZFINX_OR_F(ctx); 184 185 TCGv_i64 dest = dest_fpr(ctx, a->rd); 186 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 187 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 188 189 gen_set_rm(ctx, a->rm); 190 gen_helper_fdiv_s(dest, cpu_env, src1, src2); 191 gen_set_fpr_hs(ctx, a->rd, dest); 192 mark_fs_dirty(ctx); 193 return true; 194} 195 196static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) 197{ 198 REQUIRE_FPU; 199 REQUIRE_ZFINX_OR_F(ctx); 200 201 TCGv_i64 dest = dest_fpr(ctx, a->rd); 202 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 203 204 gen_set_rm(ctx, a->rm); 205 gen_helper_fsqrt_s(dest, cpu_env, src1); 206 gen_set_fpr_hs(ctx, a->rd, dest); 207 mark_fs_dirty(ctx); 208 return true; 209} 210 211static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) 212{ 213 REQUIRE_FPU; 214 REQUIRE_ZFINX_OR_F(ctx); 215 216 TCGv_i64 dest = dest_fpr(ctx, a->rd); 217 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 218 219 if (a->rs1 == a->rs2) { /* FMOV */ 220 if (!ctx->cfg_ptr->ext_zfinx) { 221 gen_check_nanbox_s(dest, src1); 222 } else { 223 tcg_gen_ext32s_i64(dest, src1); 224 } 225 } else { /* FSGNJ */ 226 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 227 228 if (!ctx->cfg_ptr->ext_zfinx) { 229 TCGv_i64 rs1 = tcg_temp_new_i64(); 230 TCGv_i64 rs2 = tcg_temp_new_i64(); 231 gen_check_nanbox_s(rs1, src1); 232 gen_check_nanbox_s(rs2, src2); 233 234 /* This formulation retains the nanboxing of rs2 in normal 'F'. */ 235 tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31); 236 237 tcg_temp_free_i64(rs1); 238 tcg_temp_free_i64(rs2); 239 } else { 240 tcg_gen_deposit_i64(dest, src2, src1, 0, 31); 241 tcg_gen_ext32s_i64(dest, dest); 242 } 243 } 244 gen_set_fpr_hs(ctx, a->rd, dest); 245 mark_fs_dirty(ctx); 246 return true; 247} 248 249static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) 250{ 251 TCGv_i64 rs1, rs2, mask; 252 253 REQUIRE_FPU; 254 REQUIRE_ZFINX_OR_F(ctx); 255 256 TCGv_i64 dest = dest_fpr(ctx, a->rd); 257 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 258 259 rs1 = tcg_temp_new_i64(); 260 if (!ctx->cfg_ptr->ext_zfinx) { 261 gen_check_nanbox_s(rs1, src1); 262 } else { 263 tcg_gen_mov_i64(rs1, src1); 264 } 265 if (a->rs1 == a->rs2) { /* FNEG */ 266 tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1)); 267 } else { 268 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 269 rs2 = tcg_temp_new_i64(); 270 if (!ctx->cfg_ptr->ext_zfinx) { 271 gen_check_nanbox_s(rs2, src2); 272 } else { 273 tcg_gen_mov_i64(rs2, src2); 274 } 275 276 /* 277 * Replace bit 31 in rs1 with inverse in rs2. 278 * This formulation retains the nanboxing of rs1. 279 */ 280 mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1)); 281 tcg_gen_nor_i64(rs2, rs2, mask); 282 tcg_gen_and_i64(dest, mask, rs1); 283 tcg_gen_or_i64(dest, dest, rs2); 284 285 tcg_temp_free_i64(rs2); 286 } 287 /* signed-extended intead of nanboxing for result if enable zfinx */ 288 if (ctx->cfg_ptr->ext_zfinx) { 289 tcg_gen_ext32s_i64(dest, dest); 290 } 291 gen_set_fpr_hs(ctx, a->rd, dest); 292 tcg_temp_free_i64(rs1); 293 mark_fs_dirty(ctx); 294 return true; 295} 296 297static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) 298{ 299 TCGv_i64 rs1, rs2; 300 301 REQUIRE_FPU; 302 REQUIRE_ZFINX_OR_F(ctx); 303 304 TCGv_i64 dest = dest_fpr(ctx, a->rd); 305 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 306 rs1 = tcg_temp_new_i64(); 307 308 if (!ctx->cfg_ptr->ext_zfinx) { 309 gen_check_nanbox_s(rs1, src1); 310 } else { 311 tcg_gen_mov_i64(rs1, src1); 312 } 313 314 if (a->rs1 == a->rs2) { /* FABS */ 315 tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1)); 316 } else { 317 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 318 rs2 = tcg_temp_new_i64(); 319 320 if (!ctx->cfg_ptr->ext_zfinx) { 321 gen_check_nanbox_s(rs2, src2); 322 } else { 323 tcg_gen_mov_i64(rs2, src2); 324 } 325 326 /* 327 * Xor bit 31 in rs1 with that in rs2. 328 * This formulation retains the nanboxing of rs1. 329 */ 330 tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1)); 331 tcg_gen_xor_i64(dest, rs1, dest); 332 333 tcg_temp_free_i64(rs2); 334 } 335 /* signed-extended intead of nanboxing for result if enable zfinx */ 336 if (ctx->cfg_ptr->ext_zfinx) { 337 tcg_gen_ext32s_i64(dest, dest); 338 } 339 tcg_temp_free_i64(rs1); 340 gen_set_fpr_hs(ctx, a->rd, dest); 341 mark_fs_dirty(ctx); 342 return true; 343} 344 345static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) 346{ 347 REQUIRE_FPU; 348 REQUIRE_ZFINX_OR_F(ctx); 349 350 TCGv_i64 dest = dest_fpr(ctx, a->rd); 351 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 352 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 353 354 gen_helper_fmin_s(dest, cpu_env, src1, src2); 355 gen_set_fpr_hs(ctx, a->rd, dest); 356 mark_fs_dirty(ctx); 357 return true; 358} 359 360static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) 361{ 362 REQUIRE_FPU; 363 REQUIRE_ZFINX_OR_F(ctx); 364 365 TCGv_i64 dest = dest_fpr(ctx, a->rd); 366 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 367 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 368 369 gen_helper_fmax_s(dest, cpu_env, src1, src2); 370 gen_set_fpr_hs(ctx, a->rd, dest); 371 mark_fs_dirty(ctx); 372 return true; 373} 374 375static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a) 376{ 377 REQUIRE_FPU; 378 REQUIRE_ZFINX_OR_F(ctx); 379 380 TCGv dest = dest_gpr(ctx, a->rd); 381 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 382 383 gen_set_rm(ctx, a->rm); 384 gen_helper_fcvt_w_s(dest, cpu_env, src1); 385 gen_set_gpr(ctx, a->rd, dest); 386 return true; 387} 388 389static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a) 390{ 391 REQUIRE_FPU; 392 REQUIRE_ZFINX_OR_F(ctx); 393 394 TCGv dest = dest_gpr(ctx, a->rd); 395 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 396 397 gen_set_rm(ctx, a->rm); 398 gen_helper_fcvt_wu_s(dest, cpu_env, src1); 399 gen_set_gpr(ctx, a->rd, dest); 400 return true; 401} 402 403static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a) 404{ 405 /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */ 406 REQUIRE_FPU; 407 REQUIRE_ZFINX_OR_F(ctx); 408 409 TCGv dest = dest_gpr(ctx, a->rd); 410 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 411#if defined(TARGET_RISCV64) 412 tcg_gen_ext32s_tl(dest, src1); 413#else 414 tcg_gen_extrl_i64_i32(dest, src1); 415#endif 416 417 gen_set_gpr(ctx, a->rd, dest); 418 return true; 419} 420 421static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a) 422{ 423 REQUIRE_FPU; 424 REQUIRE_ZFINX_OR_F(ctx); 425 426 TCGv dest = dest_gpr(ctx, a->rd); 427 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 428 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 429 430 gen_helper_feq_s(dest, cpu_env, src1, src2); 431 gen_set_gpr(ctx, a->rd, dest); 432 return true; 433} 434 435static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a) 436{ 437 REQUIRE_FPU; 438 REQUIRE_ZFINX_OR_F(ctx); 439 440 TCGv dest = dest_gpr(ctx, a->rd); 441 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 442 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 443 444 gen_helper_flt_s(dest, cpu_env, src1, src2); 445 gen_set_gpr(ctx, a->rd, dest); 446 return true; 447} 448 449static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a) 450{ 451 REQUIRE_FPU; 452 REQUIRE_ZFINX_OR_F(ctx); 453 454 TCGv dest = dest_gpr(ctx, a->rd); 455 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 456 TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2); 457 458 gen_helper_fle_s(dest, cpu_env, src1, src2); 459 gen_set_gpr(ctx, a->rd, dest); 460 return true; 461} 462 463static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) 464{ 465 REQUIRE_FPU; 466 REQUIRE_ZFINX_OR_F(ctx); 467 468 TCGv dest = dest_gpr(ctx, a->rd); 469 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 470 471 gen_helper_fclass_s(dest, cpu_env, src1); 472 gen_set_gpr(ctx, a->rd, dest); 473 return true; 474} 475 476static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a) 477{ 478 REQUIRE_FPU; 479 REQUIRE_ZFINX_OR_F(ctx); 480 481 TCGv_i64 dest = dest_fpr(ctx, a->rd); 482 TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); 483 484 gen_set_rm(ctx, a->rm); 485 gen_helper_fcvt_s_w(dest, cpu_env, src); 486 gen_set_fpr_hs(ctx, a->rd, dest); 487 mark_fs_dirty(ctx); 488 return true; 489} 490 491static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a) 492{ 493 REQUIRE_FPU; 494 REQUIRE_ZFINX_OR_F(ctx); 495 496 TCGv_i64 dest = dest_fpr(ctx, a->rd); 497 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 498 499 gen_set_rm(ctx, a->rm); 500 gen_helper_fcvt_s_wu(dest, cpu_env, src); 501 gen_set_fpr_hs(ctx, a->rd, dest); 502 mark_fs_dirty(ctx); 503 return true; 504} 505 506static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) 507{ 508 /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */ 509 REQUIRE_FPU; 510 REQUIRE_ZFINX_OR_F(ctx); 511 512 TCGv_i64 dest = dest_fpr(ctx, a->rd); 513 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 514 515 tcg_gen_extu_tl_i64(dest, src); 516 gen_nanbox_s(dest, dest); 517 gen_set_fpr_hs(ctx, a->rd, dest); 518 mark_fs_dirty(ctx); 519 return true; 520} 521 522static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) 523{ 524 REQUIRE_64BIT(ctx); 525 REQUIRE_FPU; 526 REQUIRE_ZFINX_OR_F(ctx); 527 528 TCGv dest = dest_gpr(ctx, a->rd); 529 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 530 531 gen_set_rm(ctx, a->rm); 532 gen_helper_fcvt_l_s(dest, cpu_env, src1); 533 gen_set_gpr(ctx, a->rd, dest); 534 return true; 535} 536 537static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) 538{ 539 REQUIRE_64BIT(ctx); 540 REQUIRE_FPU; 541 REQUIRE_ZFINX_OR_F(ctx); 542 543 TCGv dest = dest_gpr(ctx, a->rd); 544 TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); 545 546 gen_set_rm(ctx, a->rm); 547 gen_helper_fcvt_lu_s(dest, cpu_env, src1); 548 gen_set_gpr(ctx, a->rd, dest); 549 return true; 550} 551 552static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) 553{ 554 REQUIRE_64BIT(ctx); 555 REQUIRE_FPU; 556 REQUIRE_ZFINX_OR_F(ctx); 557 558 TCGv_i64 dest = dest_fpr(ctx, a->rd); 559 TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); 560 561 gen_set_rm(ctx, a->rm); 562 gen_helper_fcvt_s_l(dest, cpu_env, src); 563 gen_set_fpr_hs(ctx, a->rd, dest); 564 mark_fs_dirty(ctx); 565 return true; 566} 567 568static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) 569{ 570 REQUIRE_64BIT(ctx); 571 REQUIRE_FPU; 572 REQUIRE_ZFINX_OR_F(ctx); 573 574 TCGv_i64 dest = dest_fpr(ctx, a->rd); 575 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 576 577 gen_set_rm(ctx, a->rm); 578 gen_helper_fcvt_s_lu(dest, cpu_env, src); 579 gen_set_fpr_hs(ctx, a->rd, dest); 580 mark_fs_dirty(ctx); 581 return true; 582} 583