1/* 2 * RISC-V translation routines for the RV64F Standard Extension. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#define REQUIRE_FPU do {\ 22 if (ctx->mstatus_fs == 0) \ 23 return false; \ 24} while (0) 25 26static bool trans_flw(DisasContext *ctx, arg_flw *a) 27{ 28 TCGv_i64 dest; 29 TCGv addr; 30 31 REQUIRE_FPU; 32 REQUIRE_EXT(ctx, RVF); 33 34 addr = get_gpr(ctx, a->rs1, EXT_NONE); 35 if (a->imm) { 36 TCGv temp = temp_new(ctx); 37 tcg_gen_addi_tl(temp, addr, a->imm); 38 addr = temp; 39 } 40 41 dest = cpu_fpr[a->rd]; 42 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); 43 gen_nanbox_s(dest, dest); 44 45 mark_fs_dirty(ctx); 46 return true; 47} 48 49static bool trans_fsw(DisasContext *ctx, arg_fsw *a) 50{ 51 TCGv addr; 52 53 REQUIRE_FPU; 54 REQUIRE_EXT(ctx, RVF); 55 56 addr = get_gpr(ctx, a->rs1, EXT_NONE); 57 if (a->imm) { 58 TCGv temp = tcg_temp_new(); 59 tcg_gen_addi_tl(temp, addr, a->imm); 60 addr = temp; 61 } 62 63 tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); 64 65 return true; 66} 67 68static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) 69{ 70 REQUIRE_FPU; 71 REQUIRE_EXT(ctx, RVF); 72 gen_set_rm(ctx, a->rm); 73 gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 74 cpu_fpr[a->rs2], cpu_fpr[a->rs3]); 75 mark_fs_dirty(ctx); 76 return true; 77} 78 79static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) 80{ 81 REQUIRE_FPU; 82 REQUIRE_EXT(ctx, RVF); 83 gen_set_rm(ctx, a->rm); 84 gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 85 cpu_fpr[a->rs2], cpu_fpr[a->rs3]); 86 mark_fs_dirty(ctx); 87 return true; 88} 89 90static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) 91{ 92 REQUIRE_FPU; 93 REQUIRE_EXT(ctx, RVF); 94 gen_set_rm(ctx, a->rm); 95 gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 96 cpu_fpr[a->rs2], cpu_fpr[a->rs3]); 97 mark_fs_dirty(ctx); 98 return true; 99} 100 101static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) 102{ 103 REQUIRE_FPU; 104 REQUIRE_EXT(ctx, RVF); 105 gen_set_rm(ctx, a->rm); 106 gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 107 cpu_fpr[a->rs2], cpu_fpr[a->rs3]); 108 mark_fs_dirty(ctx); 109 return true; 110} 111 112static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) 113{ 114 REQUIRE_FPU; 115 REQUIRE_EXT(ctx, RVF); 116 117 gen_set_rm(ctx, a->rm); 118 gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env, 119 cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 120 mark_fs_dirty(ctx); 121 return true; 122} 123 124static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) 125{ 126 REQUIRE_FPU; 127 REQUIRE_EXT(ctx, RVF); 128 129 gen_set_rm(ctx, a->rm); 130 gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env, 131 cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 132 mark_fs_dirty(ctx); 133 return true; 134} 135 136static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) 137{ 138 REQUIRE_FPU; 139 REQUIRE_EXT(ctx, RVF); 140 141 gen_set_rm(ctx, a->rm); 142 gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env, 143 cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 144 mark_fs_dirty(ctx); 145 return true; 146} 147 148static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) 149{ 150 REQUIRE_FPU; 151 REQUIRE_EXT(ctx, RVF); 152 153 gen_set_rm(ctx, a->rm); 154 gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env, 155 cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 156 mark_fs_dirty(ctx); 157 return true; 158} 159 160static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) 161{ 162 REQUIRE_FPU; 163 REQUIRE_EXT(ctx, RVF); 164 165 gen_set_rm(ctx, a->rm); 166 gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]); 167 mark_fs_dirty(ctx); 168 return true; 169} 170 171static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) 172{ 173 REQUIRE_FPU; 174 REQUIRE_EXT(ctx, RVF); 175 176 if (a->rs1 == a->rs2) { /* FMOV */ 177 gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]); 178 } else { /* FSGNJ */ 179 TCGv_i64 rs1 = tcg_temp_new_i64(); 180 TCGv_i64 rs2 = tcg_temp_new_i64(); 181 182 gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); 183 gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); 184 185 /* This formulation retains the nanboxing of rs2. */ 186 tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31); 187 tcg_temp_free_i64(rs1); 188 tcg_temp_free_i64(rs2); 189 } 190 mark_fs_dirty(ctx); 191 return true; 192} 193 194static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) 195{ 196 TCGv_i64 rs1, rs2, mask; 197 198 REQUIRE_FPU; 199 REQUIRE_EXT(ctx, RVF); 200 201 rs1 = tcg_temp_new_i64(); 202 gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); 203 204 if (a->rs1 == a->rs2) { /* FNEG */ 205 tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1)); 206 } else { 207 rs2 = tcg_temp_new_i64(); 208 gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); 209 210 /* 211 * Replace bit 31 in rs1 with inverse in rs2. 212 * This formulation retains the nanboxing of rs1. 213 */ 214 mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1)); 215 tcg_gen_nor_i64(rs2, rs2, mask); 216 tcg_gen_and_i64(rs1, mask, rs1); 217 tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2); 218 219 tcg_temp_free_i64(rs2); 220 } 221 tcg_temp_free_i64(rs1); 222 223 mark_fs_dirty(ctx); 224 return true; 225} 226 227static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) 228{ 229 TCGv_i64 rs1, rs2; 230 231 REQUIRE_FPU; 232 REQUIRE_EXT(ctx, RVF); 233 234 rs1 = tcg_temp_new_i64(); 235 gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); 236 237 if (a->rs1 == a->rs2) { /* FABS */ 238 tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1)); 239 } else { 240 rs2 = tcg_temp_new_i64(); 241 gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); 242 243 /* 244 * Xor bit 31 in rs1 with that in rs2. 245 * This formulation retains the nanboxing of rs1. 246 */ 247 tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1)); 248 tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2); 249 250 tcg_temp_free_i64(rs2); 251 } 252 tcg_temp_free_i64(rs1); 253 254 mark_fs_dirty(ctx); 255 return true; 256} 257 258static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) 259{ 260 REQUIRE_FPU; 261 REQUIRE_EXT(ctx, RVF); 262 263 gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 264 cpu_fpr[a->rs2]); 265 mark_fs_dirty(ctx); 266 return true; 267} 268 269static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) 270{ 271 REQUIRE_FPU; 272 REQUIRE_EXT(ctx, RVF); 273 274 gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1], 275 cpu_fpr[a->rs2]); 276 mark_fs_dirty(ctx); 277 return true; 278} 279 280static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a) 281{ 282 REQUIRE_FPU; 283 REQUIRE_EXT(ctx, RVF); 284 285 TCGv dest = dest_gpr(ctx, a->rd); 286 287 gen_set_rm(ctx, a->rm); 288 gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]); 289 gen_set_gpr(ctx, a->rd, dest); 290 return true; 291} 292 293static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a) 294{ 295 REQUIRE_FPU; 296 REQUIRE_EXT(ctx, RVF); 297 298 TCGv dest = dest_gpr(ctx, a->rd); 299 300 gen_set_rm(ctx, a->rm); 301 gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]); 302 gen_set_gpr(ctx, a->rd, dest); 303 return true; 304} 305 306static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a) 307{ 308 /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */ 309 REQUIRE_FPU; 310 REQUIRE_EXT(ctx, RVF); 311 312 TCGv dest = dest_gpr(ctx, a->rd); 313 314#if defined(TARGET_RISCV64) 315 tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]); 316#else 317 tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]); 318#endif 319 320 gen_set_gpr(ctx, a->rd, dest); 321 return true; 322} 323 324static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a) 325{ 326 REQUIRE_FPU; 327 REQUIRE_EXT(ctx, RVF); 328 329 TCGv dest = dest_gpr(ctx, a->rd); 330 331 gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 332 gen_set_gpr(ctx, a->rd, dest); 333 return true; 334} 335 336static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a) 337{ 338 REQUIRE_FPU; 339 REQUIRE_EXT(ctx, RVF); 340 341 TCGv dest = dest_gpr(ctx, a->rd); 342 343 gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 344 gen_set_gpr(ctx, a->rd, dest); 345 return true; 346} 347 348static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a) 349{ 350 REQUIRE_FPU; 351 REQUIRE_EXT(ctx, RVF); 352 353 TCGv dest = dest_gpr(ctx, a->rd); 354 355 gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]); 356 gen_set_gpr(ctx, a->rd, dest); 357 return true; 358} 359 360static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) 361{ 362 REQUIRE_FPU; 363 REQUIRE_EXT(ctx, RVF); 364 365 TCGv dest = dest_gpr(ctx, a->rd); 366 367 gen_helper_fclass_s(dest, cpu_fpr[a->rs1]); 368 gen_set_gpr(ctx, a->rd, dest); 369 return true; 370} 371 372static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a) 373{ 374 REQUIRE_FPU; 375 REQUIRE_EXT(ctx, RVF); 376 377 TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); 378 379 gen_set_rm(ctx, a->rm); 380 gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src); 381 382 mark_fs_dirty(ctx); 383 return true; 384} 385 386static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a) 387{ 388 REQUIRE_FPU; 389 REQUIRE_EXT(ctx, RVF); 390 391 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 392 393 gen_set_rm(ctx, a->rm); 394 gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src); 395 396 mark_fs_dirty(ctx); 397 return true; 398} 399 400static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) 401{ 402 /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */ 403 REQUIRE_FPU; 404 REQUIRE_EXT(ctx, RVF); 405 406 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 407 408 tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src); 409 gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]); 410 411 mark_fs_dirty(ctx); 412 return true; 413} 414 415static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) 416{ 417 REQUIRE_64BIT(ctx); 418 REQUIRE_FPU; 419 REQUIRE_EXT(ctx, RVF); 420 421 TCGv dest = dest_gpr(ctx, a->rd); 422 423 gen_set_rm(ctx, a->rm); 424 gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]); 425 gen_set_gpr(ctx, a->rd, dest); 426 return true; 427} 428 429static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) 430{ 431 REQUIRE_64BIT(ctx); 432 REQUIRE_FPU; 433 REQUIRE_EXT(ctx, RVF); 434 435 TCGv dest = dest_gpr(ctx, a->rd); 436 437 gen_set_rm(ctx, a->rm); 438 gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]); 439 gen_set_gpr(ctx, a->rd, dest); 440 return true; 441} 442 443static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) 444{ 445 REQUIRE_64BIT(ctx); 446 REQUIRE_FPU; 447 REQUIRE_EXT(ctx, RVF); 448 449 TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN); 450 451 gen_set_rm(ctx, a->rm); 452 gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src); 453 454 mark_fs_dirty(ctx); 455 return true; 456} 457 458static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) 459{ 460 REQUIRE_64BIT(ctx); 461 REQUIRE_FPU; 462 REQUIRE_EXT(ctx, RVF); 463 464 TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO); 465 466 gen_set_rm(ctx, a->rm); 467 gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src); 468 469 mark_fs_dirty(ctx); 470 return true; 471} 472