1 /* 2 * RISC-V emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/log.h" 21 #include "cpu.h" 22 #include "tcg-op.h" 23 #include "disas/disas.h" 24 #include "exec/cpu_ldst.h" 25 #include "exec/exec-all.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 #include "instmap.h" 33 34 /* global register indices */ 35 static TCGv cpu_gpr[32], cpu_pc; 36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ 37 static TCGv load_res; 38 static TCGv load_val; 39 40 #include "exec/gen-icount.h" 41 42 typedef struct DisasContext { 43 DisasContextBase base; 44 /* pc_succ_insn points to the instruction following base.pc_next */ 45 target_ulong pc_succ_insn; 46 uint32_t opcode; 47 uint32_t mstatus_fs; 48 uint32_t mem_idx; 49 /* Remember the rounding mode encoded in the previous fp instruction, 50 which we have already installed into env->fp_status. Or -1 for 51 no previous fp instruction. Note that we exit the TB when writing 52 to any system register, which includes CSR_FRM, so we do not have 53 to reset this known value. */ 54 int frm; 55 } DisasContext; 56 57 /* convert riscv funct3 to qemu memop for load/store */ 58 static const int tcg_memop_lookup[8] = { 59 [0 ... 7] = -1, 60 [0] = MO_SB, 61 [1] = MO_TESW, 62 [2] = MO_TESL, 63 [4] = MO_UB, 64 [5] = MO_TEUW, 65 #ifdef TARGET_RISCV64 66 [3] = MO_TEQ, 67 [6] = MO_TEUL, 68 #endif 69 }; 70 71 #ifdef TARGET_RISCV64 72 #define CASE_OP_32_64(X) case X: case glue(X, W) 73 #else 74 #define CASE_OP_32_64(X) case X 75 #endif 76 77 static void generate_exception(DisasContext *ctx, int excp) 78 { 79 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); 80 TCGv_i32 helper_tmp = tcg_const_i32(excp); 81 gen_helper_raise_exception(cpu_env, helper_tmp); 82 tcg_temp_free_i32(helper_tmp); 83 ctx->base.is_jmp = DISAS_NORETURN; 84 } 85 86 static void generate_exception_mbadaddr(DisasContext *ctx, int excp) 87 { 88 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); 89 tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr)); 90 TCGv_i32 helper_tmp = tcg_const_i32(excp); 91 gen_helper_raise_exception(cpu_env, helper_tmp); 92 tcg_temp_free_i32(helper_tmp); 93 ctx->base.is_jmp = DISAS_NORETURN; 94 } 95 96 static void gen_exception_debug(void) 97 { 98 TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG); 99 gen_helper_raise_exception(cpu_env, helper_tmp); 100 tcg_temp_free_i32(helper_tmp); 101 } 102 103 static void gen_exception_illegal(DisasContext *ctx) 104 { 105 generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); 106 } 107 108 static void gen_exception_inst_addr_mis(DisasContext *ctx) 109 { 110 generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS); 111 } 112 113 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) 114 { 115 if (unlikely(ctx->base.singlestep_enabled)) { 116 return false; 117 } 118 119 #ifndef CONFIG_USER_ONLY 120 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 121 #else 122 return true; 123 #endif 124 } 125 126 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 127 { 128 if (use_goto_tb(ctx, dest)) { 129 /* chaining is only allowed when the jump is to the same page */ 130 tcg_gen_goto_tb(n); 131 tcg_gen_movi_tl(cpu_pc, dest); 132 tcg_gen_exit_tb(ctx->base.tb, n); 133 } else { 134 tcg_gen_movi_tl(cpu_pc, dest); 135 if (ctx->base.singlestep_enabled) { 136 gen_exception_debug(); 137 } else { 138 tcg_gen_lookup_and_goto_ptr(); 139 } 140 } 141 } 142 143 /* Wrapper for getting reg values - need to check of reg is zero since 144 * cpu_gpr[0] is not actually allocated 145 */ 146 static inline void gen_get_gpr(TCGv t, int reg_num) 147 { 148 if (reg_num == 0) { 149 tcg_gen_movi_tl(t, 0); 150 } else { 151 tcg_gen_mov_tl(t, cpu_gpr[reg_num]); 152 } 153 } 154 155 /* Wrapper for setting reg values - need to check of reg is zero since 156 * cpu_gpr[0] is not actually allocated. this is more for safety purposes, 157 * since we usually avoid calling the OP_TYPE_gen function if we see a write to 158 * $zero 159 */ 160 static inline void gen_set_gpr(int reg_num_dst, TCGv t) 161 { 162 if (reg_num_dst != 0) { 163 tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t); 164 } 165 } 166 167 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) 168 { 169 TCGv rl = tcg_temp_new(); 170 TCGv rh = tcg_temp_new(); 171 172 tcg_gen_mulu2_tl(rl, rh, arg1, arg2); 173 /* fix up for one negative */ 174 tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1); 175 tcg_gen_and_tl(rl, rl, arg2); 176 tcg_gen_sub_tl(ret, rh, rl); 177 178 tcg_temp_free(rl); 179 tcg_temp_free(rh); 180 } 181 182 static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1, 183 uint32_t rs2, int rm, uint64_t min) 184 { 185 switch (rm) { 186 case 0: /* fsgnj */ 187 if (rs1 == rs2) { /* FMOV */ 188 tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]); 189 } else { 190 tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1], 191 0, min == INT32_MIN ? 31 : 63); 192 } 193 break; 194 case 1: /* fsgnjn */ 195 if (rs1 == rs2) { /* FNEG */ 196 tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min); 197 } else { 198 TCGv_i64 t0 = tcg_temp_new_i64(); 199 tcg_gen_not_i64(t0, cpu_fpr[rs2]); 200 tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1], 201 0, min == INT32_MIN ? 31 : 63); 202 tcg_temp_free_i64(t0); 203 } 204 break; 205 case 2: /* fsgnjx */ 206 if (rs1 == rs2) { /* FABS */ 207 tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min); 208 } else { 209 TCGv_i64 t0 = tcg_temp_new_i64(); 210 tcg_gen_andi_i64(t0, cpu_fpr[rs2], min); 211 tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0); 212 tcg_temp_free_i64(t0); 213 } 214 break; 215 default: 216 gen_exception_illegal(ctx); 217 } 218 } 219 220 static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1, 221 int rs2) 222 { 223 TCGv source1, source2, cond1, cond2, zeroreg, resultopt1; 224 source1 = tcg_temp_new(); 225 source2 = tcg_temp_new(); 226 gen_get_gpr(source1, rs1); 227 gen_get_gpr(source2, rs2); 228 229 switch (opc) { 230 CASE_OP_32_64(OPC_RISC_ADD): 231 tcg_gen_add_tl(source1, source1, source2); 232 break; 233 CASE_OP_32_64(OPC_RISC_SUB): 234 tcg_gen_sub_tl(source1, source1, source2); 235 break; 236 #if defined(TARGET_RISCV64) 237 case OPC_RISC_SLLW: 238 tcg_gen_andi_tl(source2, source2, 0x1F); 239 tcg_gen_shl_tl(source1, source1, source2); 240 break; 241 #endif 242 case OPC_RISC_SLL: 243 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1); 244 tcg_gen_shl_tl(source1, source1, source2); 245 break; 246 case OPC_RISC_SLT: 247 tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2); 248 break; 249 case OPC_RISC_SLTU: 250 tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2); 251 break; 252 case OPC_RISC_XOR: 253 tcg_gen_xor_tl(source1, source1, source2); 254 break; 255 #if defined(TARGET_RISCV64) 256 case OPC_RISC_SRLW: 257 /* clear upper 32 */ 258 tcg_gen_ext32u_tl(source1, source1); 259 tcg_gen_andi_tl(source2, source2, 0x1F); 260 tcg_gen_shr_tl(source1, source1, source2); 261 break; 262 #endif 263 case OPC_RISC_SRL: 264 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1); 265 tcg_gen_shr_tl(source1, source1, source2); 266 break; 267 #if defined(TARGET_RISCV64) 268 case OPC_RISC_SRAW: 269 /* first, trick to get it to act like working on 32 bits (get rid of 270 upper 32, sign extend to fill space) */ 271 tcg_gen_ext32s_tl(source1, source1); 272 tcg_gen_andi_tl(source2, source2, 0x1F); 273 tcg_gen_sar_tl(source1, source1, source2); 274 break; 275 #endif 276 case OPC_RISC_SRA: 277 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1); 278 tcg_gen_sar_tl(source1, source1, source2); 279 break; 280 case OPC_RISC_OR: 281 tcg_gen_or_tl(source1, source1, source2); 282 break; 283 case OPC_RISC_AND: 284 tcg_gen_and_tl(source1, source1, source2); 285 break; 286 CASE_OP_32_64(OPC_RISC_MUL): 287 tcg_gen_mul_tl(source1, source1, source2); 288 break; 289 case OPC_RISC_MULH: 290 tcg_gen_muls2_tl(source2, source1, source1, source2); 291 break; 292 case OPC_RISC_MULHSU: 293 gen_mulhsu(source1, source1, source2); 294 break; 295 case OPC_RISC_MULHU: 296 tcg_gen_mulu2_tl(source2, source1, source1, source2); 297 break; 298 #if defined(TARGET_RISCV64) 299 case OPC_RISC_DIVW: 300 tcg_gen_ext32s_tl(source1, source1); 301 tcg_gen_ext32s_tl(source2, source2); 302 /* fall through to DIV */ 303 #endif 304 case OPC_RISC_DIV: 305 /* Handle by altering args to tcg_gen_div to produce req'd results: 306 * For overflow: want source1 in source1 and 1 in source2 307 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */ 308 cond1 = tcg_temp_new(); 309 cond2 = tcg_temp_new(); 310 zeroreg = tcg_const_tl(0); 311 resultopt1 = tcg_temp_new(); 312 313 tcg_gen_movi_tl(resultopt1, (target_ulong)-1); 314 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L)); 315 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1, 316 ((target_ulong)1) << (TARGET_LONG_BITS - 1)); 317 tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */ 318 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */ 319 /* if div by zero, set source1 to -1, otherwise don't change */ 320 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1, 321 resultopt1); 322 /* if overflow or div by zero, set source2 to 1, else don't change */ 323 tcg_gen_or_tl(cond1, cond1, cond2); 324 tcg_gen_movi_tl(resultopt1, (target_ulong)1); 325 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, 326 resultopt1); 327 tcg_gen_div_tl(source1, source1, source2); 328 329 tcg_temp_free(cond1); 330 tcg_temp_free(cond2); 331 tcg_temp_free(zeroreg); 332 tcg_temp_free(resultopt1); 333 break; 334 #if defined(TARGET_RISCV64) 335 case OPC_RISC_DIVUW: 336 tcg_gen_ext32u_tl(source1, source1); 337 tcg_gen_ext32u_tl(source2, source2); 338 /* fall through to DIVU */ 339 #endif 340 case OPC_RISC_DIVU: 341 cond1 = tcg_temp_new(); 342 zeroreg = tcg_const_tl(0); 343 resultopt1 = tcg_temp_new(); 344 345 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); 346 tcg_gen_movi_tl(resultopt1, (target_ulong)-1); 347 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1, 348 resultopt1); 349 tcg_gen_movi_tl(resultopt1, (target_ulong)1); 350 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, 351 resultopt1); 352 tcg_gen_divu_tl(source1, source1, source2); 353 354 tcg_temp_free(cond1); 355 tcg_temp_free(zeroreg); 356 tcg_temp_free(resultopt1); 357 break; 358 #if defined(TARGET_RISCV64) 359 case OPC_RISC_REMW: 360 tcg_gen_ext32s_tl(source1, source1); 361 tcg_gen_ext32s_tl(source2, source2); 362 /* fall through to REM */ 363 #endif 364 case OPC_RISC_REM: 365 cond1 = tcg_temp_new(); 366 cond2 = tcg_temp_new(); 367 zeroreg = tcg_const_tl(0); 368 resultopt1 = tcg_temp_new(); 369 370 tcg_gen_movi_tl(resultopt1, 1L); 371 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1); 372 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1, 373 (target_ulong)1 << (TARGET_LONG_BITS - 1)); 374 tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */ 375 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */ 376 /* if overflow or div by zero, set source2 to 1, else don't change */ 377 tcg_gen_or_tl(cond2, cond1, cond2); 378 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2, 379 resultopt1); 380 tcg_gen_rem_tl(resultopt1, source1, source2); 381 /* if div by zero, just return the original dividend */ 382 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1, 383 source1); 384 385 tcg_temp_free(cond1); 386 tcg_temp_free(cond2); 387 tcg_temp_free(zeroreg); 388 tcg_temp_free(resultopt1); 389 break; 390 #if defined(TARGET_RISCV64) 391 case OPC_RISC_REMUW: 392 tcg_gen_ext32u_tl(source1, source1); 393 tcg_gen_ext32u_tl(source2, source2); 394 /* fall through to REMU */ 395 #endif 396 case OPC_RISC_REMU: 397 cond1 = tcg_temp_new(); 398 zeroreg = tcg_const_tl(0); 399 resultopt1 = tcg_temp_new(); 400 401 tcg_gen_movi_tl(resultopt1, (target_ulong)1); 402 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); 403 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2, 404 resultopt1); 405 tcg_gen_remu_tl(resultopt1, source1, source2); 406 /* if div by zero, just return the original dividend */ 407 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1, 408 source1); 409 410 tcg_temp_free(cond1); 411 tcg_temp_free(zeroreg); 412 tcg_temp_free(resultopt1); 413 break; 414 default: 415 gen_exception_illegal(ctx); 416 return; 417 } 418 419 if (opc & 0x8) { /* sign extend for W instructions */ 420 tcg_gen_ext32s_tl(source1, source1); 421 } 422 423 gen_set_gpr(rd, source1); 424 tcg_temp_free(source1); 425 tcg_temp_free(source2); 426 } 427 428 static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd, 429 int rs1, target_long imm) 430 { 431 TCGv source1 = tcg_temp_new(); 432 int shift_len = TARGET_LONG_BITS; 433 int shift_a; 434 435 gen_get_gpr(source1, rs1); 436 437 switch (opc) { 438 case OPC_RISC_ADDI: 439 #if defined(TARGET_RISCV64) 440 case OPC_RISC_ADDIW: 441 #endif 442 tcg_gen_addi_tl(source1, source1, imm); 443 break; 444 case OPC_RISC_SLTI: 445 tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm); 446 break; 447 case OPC_RISC_SLTIU: 448 tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm); 449 break; 450 case OPC_RISC_XORI: 451 tcg_gen_xori_tl(source1, source1, imm); 452 break; 453 case OPC_RISC_ORI: 454 tcg_gen_ori_tl(source1, source1, imm); 455 break; 456 case OPC_RISC_ANDI: 457 tcg_gen_andi_tl(source1, source1, imm); 458 break; 459 #if defined(TARGET_RISCV64) 460 case OPC_RISC_SLLIW: 461 shift_len = 32; 462 /* FALLTHRU */ 463 #endif 464 case OPC_RISC_SLLI: 465 if (imm >= shift_len) { 466 goto do_illegal; 467 } 468 tcg_gen_shli_tl(source1, source1, imm); 469 break; 470 #if defined(TARGET_RISCV64) 471 case OPC_RISC_SHIFT_RIGHT_IW: 472 shift_len = 32; 473 /* FALLTHRU */ 474 #endif 475 case OPC_RISC_SHIFT_RIGHT_I: 476 /* differentiate on IMM */ 477 shift_a = imm & 0x400; 478 imm &= 0x3ff; 479 if (imm >= shift_len) { 480 goto do_illegal; 481 } 482 if (imm != 0) { 483 if (shift_a) { 484 /* SRAI[W] */ 485 tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm); 486 } else { 487 /* SRLI[W] */ 488 tcg_gen_extract_tl(source1, source1, imm, shift_len - imm); 489 } 490 /* No further sign-extension needed for W instructions. */ 491 opc &= ~0x8; 492 } 493 break; 494 default: 495 do_illegal: 496 gen_exception_illegal(ctx); 497 return; 498 } 499 500 if (opc & 0x8) { /* sign-extend for W instructions */ 501 tcg_gen_ext32s_tl(source1, source1); 502 } 503 504 gen_set_gpr(rd, source1); 505 tcg_temp_free(source1); 506 } 507 508 static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd, 509 target_ulong imm) 510 { 511 target_ulong next_pc; 512 513 /* check misaligned: */ 514 next_pc = ctx->base.pc_next + imm; 515 if (!riscv_has_ext(env, RVC)) { 516 if ((next_pc & 0x3) != 0) { 517 gen_exception_inst_addr_mis(ctx); 518 return; 519 } 520 } 521 if (rd != 0) { 522 tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn); 523 } 524 525 gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */ 526 ctx->base.is_jmp = DISAS_NORETURN; 527 } 528 529 static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc, 530 int rd, int rs1, target_long imm) 531 { 532 /* no chaining with JALR */ 533 TCGLabel *misaligned = NULL; 534 TCGv t0 = tcg_temp_new(); 535 536 switch (opc) { 537 case OPC_RISC_JALR: 538 gen_get_gpr(cpu_pc, rs1); 539 tcg_gen_addi_tl(cpu_pc, cpu_pc, imm); 540 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); 541 542 if (!riscv_has_ext(env, RVC)) { 543 misaligned = gen_new_label(); 544 tcg_gen_andi_tl(t0, cpu_pc, 0x2); 545 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); 546 } 547 548 if (rd != 0) { 549 tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn); 550 } 551 tcg_gen_lookup_and_goto_ptr(); 552 553 if (misaligned) { 554 gen_set_label(misaligned); 555 gen_exception_inst_addr_mis(ctx); 556 } 557 ctx->base.is_jmp = DISAS_NORETURN; 558 break; 559 560 default: 561 gen_exception_illegal(ctx); 562 break; 563 } 564 tcg_temp_free(t0); 565 } 566 567 static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc, 568 int rs1, int rs2, target_long bimm) 569 { 570 TCGLabel *l = gen_new_label(); 571 TCGv source1, source2; 572 source1 = tcg_temp_new(); 573 source2 = tcg_temp_new(); 574 gen_get_gpr(source1, rs1); 575 gen_get_gpr(source2, rs2); 576 577 switch (opc) { 578 case OPC_RISC_BEQ: 579 tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l); 580 break; 581 case OPC_RISC_BNE: 582 tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l); 583 break; 584 case OPC_RISC_BLT: 585 tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l); 586 break; 587 case OPC_RISC_BGE: 588 tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l); 589 break; 590 case OPC_RISC_BLTU: 591 tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l); 592 break; 593 case OPC_RISC_BGEU: 594 tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l); 595 break; 596 default: 597 gen_exception_illegal(ctx); 598 return; 599 } 600 tcg_temp_free(source1); 601 tcg_temp_free(source2); 602 603 gen_goto_tb(ctx, 1, ctx->pc_succ_insn); 604 gen_set_label(l); /* branch taken */ 605 if (!riscv_has_ext(env, RVC) && ((ctx->base.pc_next + bimm) & 0x3)) { 606 /* misaligned */ 607 gen_exception_inst_addr_mis(ctx); 608 } else { 609 gen_goto_tb(ctx, 0, ctx->base.pc_next + bimm); 610 } 611 ctx->base.is_jmp = DISAS_NORETURN; 612 } 613 614 static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1, 615 target_long imm) 616 { 617 TCGv t0 = tcg_temp_new(); 618 TCGv t1 = tcg_temp_new(); 619 gen_get_gpr(t0, rs1); 620 tcg_gen_addi_tl(t0, t0, imm); 621 int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; 622 623 if (memop < 0) { 624 gen_exception_illegal(ctx); 625 return; 626 } 627 628 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop); 629 gen_set_gpr(rd, t1); 630 tcg_temp_free(t0); 631 tcg_temp_free(t1); 632 } 633 634 static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2, 635 target_long imm) 636 { 637 TCGv t0 = tcg_temp_new(); 638 TCGv dat = tcg_temp_new(); 639 gen_get_gpr(t0, rs1); 640 tcg_gen_addi_tl(t0, t0, imm); 641 gen_get_gpr(dat, rs2); 642 int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; 643 644 if (memop < 0) { 645 gen_exception_illegal(ctx); 646 return; 647 } 648 649 tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop); 650 tcg_temp_free(t0); 651 tcg_temp_free(dat); 652 } 653 654 #ifndef CONFIG_USER_ONLY 655 /* The states of mstatus_fs are: 656 * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty 657 * We will have already diagnosed disabled state, 658 * and need to turn initial/clean into dirty. 659 */ 660 static void mark_fs_dirty(DisasContext *ctx) 661 { 662 TCGv tmp; 663 if (ctx->mstatus_fs == MSTATUS_FS) { 664 return; 665 } 666 /* Remember the state change for the rest of the TB. */ 667 ctx->mstatus_fs = MSTATUS_FS; 668 669 tmp = tcg_temp_new(); 670 tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); 671 tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); 672 tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); 673 tcg_temp_free(tmp); 674 } 675 #else 676 static inline void mark_fs_dirty(DisasContext *ctx) { } 677 #endif 678 679 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd, 680 int rs1, target_long imm) 681 { 682 TCGv t0; 683 684 if (ctx->mstatus_fs == 0) { 685 gen_exception_illegal(ctx); 686 return; 687 } 688 689 t0 = tcg_temp_new(); 690 gen_get_gpr(t0, rs1); 691 tcg_gen_addi_tl(t0, t0, imm); 692 693 switch (opc) { 694 case OPC_RISC_FLW: 695 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL); 696 /* RISC-V requires NaN-boxing of narrower width floating point values */ 697 tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL); 698 break; 699 case OPC_RISC_FLD: 700 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ); 701 break; 702 default: 703 gen_exception_illegal(ctx); 704 break; 705 } 706 tcg_temp_free(t0); 707 708 mark_fs_dirty(ctx); 709 } 710 711 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1, 712 int rs2, target_long imm) 713 { 714 TCGv t0; 715 716 if (ctx->mstatus_fs == 0) { 717 gen_exception_illegal(ctx); 718 return; 719 } 720 721 t0 = tcg_temp_new(); 722 gen_get_gpr(t0, rs1); 723 tcg_gen_addi_tl(t0, t0, imm); 724 725 switch (opc) { 726 case OPC_RISC_FSW: 727 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL); 728 break; 729 case OPC_RISC_FSD: 730 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ); 731 break; 732 default: 733 gen_exception_illegal(ctx); 734 break; 735 } 736 737 tcg_temp_free(t0); 738 } 739 740 static void gen_atomic(DisasContext *ctx, uint32_t opc, 741 int rd, int rs1, int rs2) 742 { 743 TCGv src1, src2, dat; 744 TCGLabel *l1, *l2; 745 TCGMemOp mop; 746 bool aq, rl; 747 748 /* Extract the size of the atomic operation. */ 749 switch (extract32(opc, 12, 3)) { 750 case 2: /* 32-bit */ 751 mop = MO_ALIGN | MO_TESL; 752 break; 753 #if defined(TARGET_RISCV64) 754 case 3: /* 64-bit */ 755 mop = MO_ALIGN | MO_TEQ; 756 break; 757 #endif 758 default: 759 gen_exception_illegal(ctx); 760 return; 761 } 762 rl = extract32(opc, 25, 1); 763 aq = extract32(opc, 26, 1); 764 765 src1 = tcg_temp_new(); 766 src2 = tcg_temp_new(); 767 768 switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) { 769 case OPC_RISC_LR: 770 /* Put addr in load_res, data in load_val. */ 771 gen_get_gpr(src1, rs1); 772 if (rl) { 773 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 774 } 775 tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); 776 if (aq) { 777 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 778 } 779 tcg_gen_mov_tl(load_res, src1); 780 gen_set_gpr(rd, load_val); 781 break; 782 783 case OPC_RISC_SC: 784 l1 = gen_new_label(); 785 l2 = gen_new_label(); 786 dat = tcg_temp_new(); 787 788 gen_get_gpr(src1, rs1); 789 tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); 790 791 gen_get_gpr(src2, rs2); 792 /* Note that the TCG atomic primitives are SC, 793 so we can ignore AQ/RL along this path. */ 794 tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2, 795 ctx->mem_idx, mop); 796 tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val); 797 gen_set_gpr(rd, dat); 798 tcg_gen_br(l2); 799 800 gen_set_label(l1); 801 /* Address comparion failure. However, we still need to 802 provide the memory barrier implied by AQ/RL. */ 803 tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL); 804 tcg_gen_movi_tl(dat, 1); 805 gen_set_gpr(rd, dat); 806 807 gen_set_label(l2); 808 tcg_temp_free(dat); 809 break; 810 811 case OPC_RISC_AMOSWAP: 812 /* Note that the TCG atomic primitives are SC, 813 so we can ignore AQ/RL along this path. */ 814 gen_get_gpr(src1, rs1); 815 gen_get_gpr(src2, rs2); 816 tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop); 817 gen_set_gpr(rd, src2); 818 break; 819 case OPC_RISC_AMOADD: 820 gen_get_gpr(src1, rs1); 821 gen_get_gpr(src2, rs2); 822 tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop); 823 gen_set_gpr(rd, src2); 824 break; 825 case OPC_RISC_AMOXOR: 826 gen_get_gpr(src1, rs1); 827 gen_get_gpr(src2, rs2); 828 tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop); 829 gen_set_gpr(rd, src2); 830 break; 831 case OPC_RISC_AMOAND: 832 gen_get_gpr(src1, rs1); 833 gen_get_gpr(src2, rs2); 834 tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop); 835 gen_set_gpr(rd, src2); 836 break; 837 case OPC_RISC_AMOOR: 838 gen_get_gpr(src1, rs1); 839 gen_get_gpr(src2, rs2); 840 tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop); 841 gen_set_gpr(rd, src2); 842 break; 843 case OPC_RISC_AMOMIN: 844 gen_get_gpr(src1, rs1); 845 gen_get_gpr(src2, rs2); 846 tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop); 847 gen_set_gpr(rd, src2); 848 break; 849 case OPC_RISC_AMOMAX: 850 gen_get_gpr(src1, rs1); 851 gen_get_gpr(src2, rs2); 852 tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop); 853 gen_set_gpr(rd, src2); 854 break; 855 case OPC_RISC_AMOMINU: 856 gen_get_gpr(src1, rs1); 857 gen_get_gpr(src2, rs2); 858 tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop); 859 gen_set_gpr(rd, src2); 860 break; 861 case OPC_RISC_AMOMAXU: 862 gen_get_gpr(src1, rs1); 863 gen_get_gpr(src2, rs2); 864 tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop); 865 gen_set_gpr(rd, src2); 866 break; 867 868 default: 869 gen_exception_illegal(ctx); 870 break; 871 } 872 873 tcg_temp_free(src1); 874 tcg_temp_free(src2); 875 } 876 877 static void gen_set_rm(DisasContext *ctx, int rm) 878 { 879 TCGv_i32 t0; 880 881 if (ctx->frm == rm) { 882 return; 883 } 884 ctx->frm = rm; 885 t0 = tcg_const_i32(rm); 886 gen_helper_set_rounding_mode(cpu_env, t0); 887 tcg_temp_free_i32(t0); 888 } 889 890 static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd, 891 int rs1, int rs2, int rs3, int rm) 892 { 893 switch (opc) { 894 case OPC_RISC_FMADD_S: 895 gen_set_rm(ctx, rm); 896 gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 897 cpu_fpr[rs2], cpu_fpr[rs3]); 898 break; 899 case OPC_RISC_FMADD_D: 900 gen_set_rm(ctx, rm); 901 gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 902 cpu_fpr[rs2], cpu_fpr[rs3]); 903 break; 904 default: 905 gen_exception_illegal(ctx); 906 break; 907 } 908 } 909 910 static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd, 911 int rs1, int rs2, int rs3, int rm) 912 { 913 switch (opc) { 914 case OPC_RISC_FMSUB_S: 915 gen_set_rm(ctx, rm); 916 gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 917 cpu_fpr[rs2], cpu_fpr[rs3]); 918 break; 919 case OPC_RISC_FMSUB_D: 920 gen_set_rm(ctx, rm); 921 gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 922 cpu_fpr[rs2], cpu_fpr[rs3]); 923 break; 924 default: 925 gen_exception_illegal(ctx); 926 break; 927 } 928 } 929 930 static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd, 931 int rs1, int rs2, int rs3, int rm) 932 { 933 switch (opc) { 934 case OPC_RISC_FNMSUB_S: 935 gen_set_rm(ctx, rm); 936 gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 937 cpu_fpr[rs2], cpu_fpr[rs3]); 938 break; 939 case OPC_RISC_FNMSUB_D: 940 gen_set_rm(ctx, rm); 941 gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 942 cpu_fpr[rs2], cpu_fpr[rs3]); 943 break; 944 default: 945 gen_exception_illegal(ctx); 946 break; 947 } 948 } 949 950 static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd, 951 int rs1, int rs2, int rs3, int rm) 952 { 953 switch (opc) { 954 case OPC_RISC_FNMADD_S: 955 gen_set_rm(ctx, rm); 956 gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 957 cpu_fpr[rs2], cpu_fpr[rs3]); 958 break; 959 case OPC_RISC_FNMADD_D: 960 gen_set_rm(ctx, rm); 961 gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 962 cpu_fpr[rs2], cpu_fpr[rs3]); 963 break; 964 default: 965 gen_exception_illegal(ctx); 966 break; 967 } 968 } 969 970 static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd, 971 int rs1, int rs2, int rm) 972 { 973 TCGv t0 = NULL; 974 bool fp_output = true; 975 976 if (ctx->mstatus_fs == 0) { 977 goto do_illegal; 978 } 979 980 switch (opc) { 981 case OPC_RISC_FADD_S: 982 gen_set_rm(ctx, rm); 983 gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 984 break; 985 case OPC_RISC_FSUB_S: 986 gen_set_rm(ctx, rm); 987 gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 988 break; 989 case OPC_RISC_FMUL_S: 990 gen_set_rm(ctx, rm); 991 gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 992 break; 993 case OPC_RISC_FDIV_S: 994 gen_set_rm(ctx, rm); 995 gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 996 break; 997 case OPC_RISC_FSQRT_S: 998 gen_set_rm(ctx, rm); 999 gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]); 1000 break; 1001 case OPC_RISC_FSGNJ_S: 1002 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN); 1003 break; 1004 1005 case OPC_RISC_FMIN_S: 1006 /* also handles: OPC_RISC_FMAX_S */ 1007 switch (rm) { 1008 case 0x0: 1009 gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1010 break; 1011 case 0x1: 1012 gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1013 break; 1014 default: 1015 goto do_illegal; 1016 } 1017 break; 1018 1019 case OPC_RISC_FEQ_S: 1020 /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */ 1021 t0 = tcg_temp_new(); 1022 switch (rm) { 1023 case 0x0: 1024 gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1025 break; 1026 case 0x1: 1027 gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1028 break; 1029 case 0x2: 1030 gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1031 break; 1032 default: 1033 goto do_illegal; 1034 } 1035 gen_set_gpr(rd, t0); 1036 tcg_temp_free(t0); 1037 fp_output = false; 1038 break; 1039 1040 case OPC_RISC_FCVT_W_S: 1041 /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */ 1042 t0 = tcg_temp_new(); 1043 switch (rs2) { 1044 case 0: /* FCVT_W_S */ 1045 gen_set_rm(ctx, rm); 1046 gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]); 1047 break; 1048 case 1: /* FCVT_WU_S */ 1049 gen_set_rm(ctx, rm); 1050 gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]); 1051 break; 1052 #if defined(TARGET_RISCV64) 1053 case 2: /* FCVT_L_S */ 1054 gen_set_rm(ctx, rm); 1055 gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]); 1056 break; 1057 case 3: /* FCVT_LU_S */ 1058 gen_set_rm(ctx, rm); 1059 gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]); 1060 break; 1061 #endif 1062 default: 1063 goto do_illegal; 1064 } 1065 gen_set_gpr(rd, t0); 1066 tcg_temp_free(t0); 1067 fp_output = false; 1068 break; 1069 1070 case OPC_RISC_FCVT_S_W: 1071 /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */ 1072 t0 = tcg_temp_new(); 1073 gen_get_gpr(t0, rs1); 1074 switch (rs2) { 1075 case 0: /* FCVT_S_W */ 1076 gen_set_rm(ctx, rm); 1077 gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0); 1078 break; 1079 case 1: /* FCVT_S_WU */ 1080 gen_set_rm(ctx, rm); 1081 gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0); 1082 break; 1083 #if defined(TARGET_RISCV64) 1084 case 2: /* FCVT_S_L */ 1085 gen_set_rm(ctx, rm); 1086 gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0); 1087 break; 1088 case 3: /* FCVT_S_LU */ 1089 gen_set_rm(ctx, rm); 1090 gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0); 1091 break; 1092 #endif 1093 default: 1094 goto do_illegal; 1095 } 1096 tcg_temp_free(t0); 1097 break; 1098 1099 case OPC_RISC_FMV_X_S: 1100 /* also OPC_RISC_FCLASS_S */ 1101 t0 = tcg_temp_new(); 1102 switch (rm) { 1103 case 0: /* FMV */ 1104 #if defined(TARGET_RISCV64) 1105 tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]); 1106 #else 1107 tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]); 1108 #endif 1109 break; 1110 case 1: 1111 gen_helper_fclass_s(t0, cpu_fpr[rs1]); 1112 break; 1113 default: 1114 goto do_illegal; 1115 } 1116 gen_set_gpr(rd, t0); 1117 tcg_temp_free(t0); 1118 fp_output = false; 1119 break; 1120 1121 case OPC_RISC_FMV_S_X: 1122 t0 = tcg_temp_new(); 1123 gen_get_gpr(t0, rs1); 1124 #if defined(TARGET_RISCV64) 1125 tcg_gen_mov_i64(cpu_fpr[rd], t0); 1126 #else 1127 tcg_gen_extu_i32_i64(cpu_fpr[rd], t0); 1128 #endif 1129 tcg_temp_free(t0); 1130 break; 1131 1132 /* double */ 1133 case OPC_RISC_FADD_D: 1134 gen_set_rm(ctx, rm); 1135 gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1136 break; 1137 case OPC_RISC_FSUB_D: 1138 gen_set_rm(ctx, rm); 1139 gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1140 break; 1141 case OPC_RISC_FMUL_D: 1142 gen_set_rm(ctx, rm); 1143 gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1144 break; 1145 case OPC_RISC_FDIV_D: 1146 gen_set_rm(ctx, rm); 1147 gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1148 break; 1149 case OPC_RISC_FSQRT_D: 1150 gen_set_rm(ctx, rm); 1151 gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]); 1152 break; 1153 case OPC_RISC_FSGNJ_D: 1154 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN); 1155 break; 1156 1157 case OPC_RISC_FMIN_D: 1158 /* also OPC_RISC_FMAX_D */ 1159 switch (rm) { 1160 case 0: 1161 gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1162 break; 1163 case 1: 1164 gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1165 break; 1166 default: 1167 goto do_illegal; 1168 } 1169 break; 1170 1171 case OPC_RISC_FCVT_S_D: 1172 switch (rs2) { 1173 case 1: 1174 gen_set_rm(ctx, rm); 1175 gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]); 1176 break; 1177 default: 1178 goto do_illegal; 1179 } 1180 break; 1181 1182 case OPC_RISC_FCVT_D_S: 1183 switch (rs2) { 1184 case 0: 1185 gen_set_rm(ctx, rm); 1186 gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]); 1187 break; 1188 default: 1189 goto do_illegal; 1190 } 1191 break; 1192 1193 case OPC_RISC_FEQ_D: 1194 /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */ 1195 t0 = tcg_temp_new(); 1196 switch (rm) { 1197 case 0: 1198 gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1199 break; 1200 case 1: 1201 gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1202 break; 1203 case 2: 1204 gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]); 1205 break; 1206 default: 1207 goto do_illegal; 1208 } 1209 gen_set_gpr(rd, t0); 1210 tcg_temp_free(t0); 1211 fp_output = false; 1212 break; 1213 1214 case OPC_RISC_FCVT_W_D: 1215 /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */ 1216 t0 = tcg_temp_new(); 1217 switch (rs2) { 1218 case 0: 1219 gen_set_rm(ctx, rm); 1220 gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]); 1221 break; 1222 case 1: 1223 gen_set_rm(ctx, rm); 1224 gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]); 1225 break; 1226 #if defined(TARGET_RISCV64) 1227 case 2: 1228 gen_set_rm(ctx, rm); 1229 gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]); 1230 break; 1231 case 3: 1232 gen_set_rm(ctx, rm); 1233 gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]); 1234 break; 1235 #endif 1236 default: 1237 goto do_illegal; 1238 } 1239 gen_set_gpr(rd, t0); 1240 tcg_temp_free(t0); 1241 fp_output = false; 1242 break; 1243 1244 case OPC_RISC_FCVT_D_W: 1245 /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */ 1246 t0 = tcg_temp_new(); 1247 gen_get_gpr(t0, rs1); 1248 switch (rs2) { 1249 case 0: 1250 gen_set_rm(ctx, rm); 1251 gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0); 1252 break; 1253 case 1: 1254 gen_set_rm(ctx, rm); 1255 gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0); 1256 break; 1257 #if defined(TARGET_RISCV64) 1258 case 2: 1259 gen_set_rm(ctx, rm); 1260 gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0); 1261 break; 1262 case 3: 1263 gen_set_rm(ctx, rm); 1264 gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0); 1265 break; 1266 #endif 1267 default: 1268 goto do_illegal; 1269 } 1270 tcg_temp_free(t0); 1271 break; 1272 1273 case OPC_RISC_FMV_X_D: 1274 /* also OPC_RISC_FCLASS_D */ 1275 switch (rm) { 1276 #if defined(TARGET_RISCV64) 1277 case 0: /* FMV */ 1278 gen_set_gpr(rd, cpu_fpr[rs1]); 1279 break; 1280 #endif 1281 case 1: 1282 t0 = tcg_temp_new(); 1283 gen_helper_fclass_d(t0, cpu_fpr[rs1]); 1284 gen_set_gpr(rd, t0); 1285 tcg_temp_free(t0); 1286 break; 1287 default: 1288 goto do_illegal; 1289 } 1290 fp_output = false; 1291 break; 1292 1293 #if defined(TARGET_RISCV64) 1294 case OPC_RISC_FMV_D_X: 1295 t0 = tcg_temp_new(); 1296 gen_get_gpr(t0, rs1); 1297 tcg_gen_mov_tl(cpu_fpr[rd], t0); 1298 tcg_temp_free(t0); 1299 break; 1300 #endif 1301 1302 default: 1303 do_illegal: 1304 if (t0) { 1305 tcg_temp_free(t0); 1306 } 1307 gen_exception_illegal(ctx); 1308 return; 1309 } 1310 1311 if (fp_output) { 1312 mark_fs_dirty(ctx); 1313 } 1314 } 1315 1316 static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc, 1317 int rd, int rs1, int csr) 1318 { 1319 TCGv source1, csr_store, dest, rs1_pass, imm_rs1; 1320 source1 = tcg_temp_new(); 1321 csr_store = tcg_temp_new(); 1322 dest = tcg_temp_new(); 1323 rs1_pass = tcg_temp_new(); 1324 imm_rs1 = tcg_temp_new(); 1325 gen_get_gpr(source1, rs1); 1326 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); 1327 tcg_gen_movi_tl(rs1_pass, rs1); 1328 tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */ 1329 1330 #ifndef CONFIG_USER_ONLY 1331 /* Extract funct7 value and check whether it matches SFENCE.VMA */ 1332 if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) { 1333 if (env->priv_ver == PRIV_VERSION_1_10_0) { 1334 /* sfence.vma */ 1335 /* TODO: handle ASID specific fences */ 1336 gen_helper_tlb_flush(cpu_env); 1337 return; 1338 } else { 1339 gen_exception_illegal(ctx); 1340 } 1341 } 1342 #endif 1343 1344 switch (opc) { 1345 case OPC_RISC_ECALL: 1346 switch (csr) { 1347 case 0x0: /* ECALL */ 1348 /* always generates U-level ECALL, fixed in do_interrupt handler */ 1349 generate_exception(ctx, RISCV_EXCP_U_ECALL); 1350 tcg_gen_exit_tb(NULL, 0); /* no chaining */ 1351 ctx->base.is_jmp = DISAS_NORETURN; 1352 break; 1353 case 0x1: /* EBREAK */ 1354 generate_exception(ctx, RISCV_EXCP_BREAKPOINT); 1355 tcg_gen_exit_tb(NULL, 0); /* no chaining */ 1356 ctx->base.is_jmp = DISAS_NORETURN; 1357 break; 1358 #ifndef CONFIG_USER_ONLY 1359 case 0x002: /* URET */ 1360 gen_exception_illegal(ctx); 1361 break; 1362 case 0x102: /* SRET */ 1363 if (riscv_has_ext(env, RVS)) { 1364 gen_helper_sret(cpu_pc, cpu_env, cpu_pc); 1365 tcg_gen_exit_tb(NULL, 0); /* no chaining */ 1366 ctx->base.is_jmp = DISAS_NORETURN; 1367 } else { 1368 gen_exception_illegal(ctx); 1369 } 1370 break; 1371 case 0x202: /* HRET */ 1372 gen_exception_illegal(ctx); 1373 break; 1374 case 0x302: /* MRET */ 1375 gen_helper_mret(cpu_pc, cpu_env, cpu_pc); 1376 tcg_gen_exit_tb(NULL, 0); /* no chaining */ 1377 ctx->base.is_jmp = DISAS_NORETURN; 1378 break; 1379 case 0x7b2: /* DRET */ 1380 gen_exception_illegal(ctx); 1381 break; 1382 case 0x105: /* WFI */ 1383 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 1384 gen_helper_wfi(cpu_env); 1385 break; 1386 case 0x104: /* SFENCE.VM */ 1387 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 1388 gen_helper_tlb_flush(cpu_env); 1389 } else { 1390 gen_exception_illegal(ctx); 1391 } 1392 break; 1393 #endif 1394 default: 1395 gen_exception_illegal(ctx); 1396 break; 1397 } 1398 break; 1399 default: 1400 tcg_gen_movi_tl(imm_rs1, rs1); 1401 gen_io_start(); 1402 switch (opc) { 1403 case OPC_RISC_CSRRW: 1404 gen_helper_csrrw(dest, cpu_env, source1, csr_store); 1405 break; 1406 case OPC_RISC_CSRRS: 1407 gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass); 1408 break; 1409 case OPC_RISC_CSRRC: 1410 gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass); 1411 break; 1412 case OPC_RISC_CSRRWI: 1413 gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store); 1414 break; 1415 case OPC_RISC_CSRRSI: 1416 gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass); 1417 break; 1418 case OPC_RISC_CSRRCI: 1419 gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass); 1420 break; 1421 default: 1422 gen_exception_illegal(ctx); 1423 return; 1424 } 1425 gen_io_end(); 1426 gen_set_gpr(rd, dest); 1427 /* end tb since we may be changing priv modes, to get mmu_index right */ 1428 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 1429 tcg_gen_exit_tb(NULL, 0); /* no chaining */ 1430 ctx->base.is_jmp = DISAS_NORETURN; 1431 break; 1432 } 1433 tcg_temp_free(source1); 1434 tcg_temp_free(csr_store); 1435 tcg_temp_free(dest); 1436 tcg_temp_free(rs1_pass); 1437 tcg_temp_free(imm_rs1); 1438 } 1439 1440 static void decode_RV32_64C0(DisasContext *ctx) 1441 { 1442 uint8_t funct3 = extract32(ctx->opcode, 13, 3); 1443 uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode); 1444 uint8_t rs1s = GET_C_RS1S(ctx->opcode); 1445 1446 switch (funct3) { 1447 case 0: 1448 /* illegal */ 1449 if (ctx->opcode == 0) { 1450 gen_exception_illegal(ctx); 1451 } else { 1452 /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/ 1453 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2, 1454 GET_C_ADDI4SPN_IMM(ctx->opcode)); 1455 } 1456 break; 1457 case 1: 1458 /* C.FLD -> fld rd', offset[7:3](rs1')*/ 1459 gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s, 1460 GET_C_LD_IMM(ctx->opcode)); 1461 /* C.LQ(RV128) */ 1462 break; 1463 case 2: 1464 /* C.LW -> lw rd', offset[6:2](rs1') */ 1465 gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s, 1466 GET_C_LW_IMM(ctx->opcode)); 1467 break; 1468 case 3: 1469 #if defined(TARGET_RISCV64) 1470 /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/ 1471 gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s, 1472 GET_C_LD_IMM(ctx->opcode)); 1473 #else 1474 /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/ 1475 gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s, 1476 GET_C_LW_IMM(ctx->opcode)); 1477 #endif 1478 break; 1479 case 4: 1480 /* reserved */ 1481 gen_exception_illegal(ctx); 1482 break; 1483 case 5: 1484 /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */ 1485 gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2, 1486 GET_C_LD_IMM(ctx->opcode)); 1487 /* C.SQ (RV128) */ 1488 break; 1489 case 6: 1490 /* C.SW -> sw rs2', offset[6:2](rs1')*/ 1491 gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2, 1492 GET_C_LW_IMM(ctx->opcode)); 1493 break; 1494 case 7: 1495 #if defined(TARGET_RISCV64) 1496 /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/ 1497 gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2, 1498 GET_C_LD_IMM(ctx->opcode)); 1499 #else 1500 /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/ 1501 gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2, 1502 GET_C_LW_IMM(ctx->opcode)); 1503 #endif 1504 break; 1505 } 1506 } 1507 1508 static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx) 1509 { 1510 uint8_t funct3 = extract32(ctx->opcode, 13, 3); 1511 uint8_t rd_rs1 = GET_C_RS1(ctx->opcode); 1512 uint8_t rs1s, rs2s; 1513 uint8_t funct2; 1514 1515 switch (funct3) { 1516 case 0: 1517 /* C.ADDI -> addi rd, rd, nzimm[5:0] */ 1518 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1, 1519 GET_C_IMM(ctx->opcode)); 1520 break; 1521 case 1: 1522 #if defined(TARGET_RISCV64) 1523 /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/ 1524 gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1, 1525 GET_C_IMM(ctx->opcode)); 1526 #else 1527 /* C.JAL(RV32) -> jal x1, offset[11:1] */ 1528 gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode)); 1529 #endif 1530 break; 1531 case 2: 1532 /* C.LI -> addi rd, x0, imm[5:0]*/ 1533 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode)); 1534 break; 1535 case 3: 1536 if (rd_rs1 == 2) { 1537 /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/ 1538 gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2, 1539 GET_C_ADDI16SP_IMM(ctx->opcode)); 1540 } else if (rd_rs1 != 0) { 1541 /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/ 1542 tcg_gen_movi_tl(cpu_gpr[rd_rs1], 1543 GET_C_IMM(ctx->opcode) << 12); 1544 } 1545 break; 1546 case 4: 1547 funct2 = extract32(ctx->opcode, 10, 2); 1548 rs1s = GET_C_RS1S(ctx->opcode); 1549 switch (funct2) { 1550 case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */ 1551 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s, 1552 GET_C_ZIMM(ctx->opcode)); 1553 /* C.SRLI64(RV128) */ 1554 break; 1555 case 1: 1556 /* C.SRAI -> srai rd', rd', shamt[5:0]*/ 1557 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s, 1558 GET_C_ZIMM(ctx->opcode) | 0x400); 1559 /* C.SRAI64(RV128) */ 1560 break; 1561 case 2: 1562 /* C.ANDI -> andi rd', rd', imm[5:0]*/ 1563 gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s, 1564 GET_C_IMM(ctx->opcode)); 1565 break; 1566 case 3: 1567 funct2 = extract32(ctx->opcode, 5, 2); 1568 rs2s = GET_C_RS2S(ctx->opcode); 1569 switch (funct2) { 1570 case 0: 1571 /* C.SUB -> sub rd', rd', rs2' */ 1572 if (extract32(ctx->opcode, 12, 1) == 0) { 1573 gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s); 1574 } 1575 #if defined(TARGET_RISCV64) 1576 else { 1577 gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s); 1578 } 1579 #endif 1580 break; 1581 case 1: 1582 /* C.XOR -> xor rs1', rs1', rs2' */ 1583 if (extract32(ctx->opcode, 12, 1) == 0) { 1584 gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s); 1585 } 1586 #if defined(TARGET_RISCV64) 1587 else { 1588 /* C.ADDW (RV64/128) */ 1589 gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s); 1590 } 1591 #endif 1592 break; 1593 case 2: 1594 /* C.OR -> or rs1', rs1', rs2' */ 1595 gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s); 1596 break; 1597 case 3: 1598 /* C.AND -> and rs1', rs1', rs2' */ 1599 gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s); 1600 break; 1601 } 1602 break; 1603 } 1604 break; 1605 case 5: 1606 /* C.J -> jal x0, offset[11:1]*/ 1607 gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode)); 1608 break; 1609 case 6: 1610 /* C.BEQZ -> beq rs1', x0, offset[8:1]*/ 1611 rs1s = GET_C_RS1S(ctx->opcode); 1612 gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode)); 1613 break; 1614 case 7: 1615 /* C.BNEZ -> bne rs1', x0, offset[8:1]*/ 1616 rs1s = GET_C_RS1S(ctx->opcode); 1617 gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode)); 1618 break; 1619 } 1620 } 1621 1622 static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx) 1623 { 1624 uint8_t rd, rs2; 1625 uint8_t funct3 = extract32(ctx->opcode, 13, 3); 1626 1627 1628 rd = GET_RD(ctx->opcode); 1629 1630 switch (funct3) { 1631 case 0: /* C.SLLI -> slli rd, rd, shamt[5:0] 1632 C.SLLI64 -> */ 1633 gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode)); 1634 break; 1635 case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */ 1636 gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode)); 1637 break; 1638 case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */ 1639 gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode)); 1640 break; 1641 case 3: 1642 #if defined(TARGET_RISCV64) 1643 /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */ 1644 gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode)); 1645 #else 1646 /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */ 1647 gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode)); 1648 #endif 1649 break; 1650 case 4: 1651 rs2 = GET_C_RS2(ctx->opcode); 1652 1653 if (extract32(ctx->opcode, 12, 1) == 0) { 1654 if (rs2 == 0) { 1655 /* C.JR -> jalr x0, rs1, 0*/ 1656 gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0); 1657 } else { 1658 /* C.MV -> add rd, x0, rs2 */ 1659 gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2); 1660 } 1661 } else { 1662 if (rd == 0) { 1663 /* C.EBREAK -> ebreak*/ 1664 gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1); 1665 } else { 1666 if (rs2 == 0) { 1667 /* C.JALR -> jalr x1, rs1, 0*/ 1668 gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0); 1669 } else { 1670 /* C.ADD -> add rd, rd, rs2 */ 1671 gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2); 1672 } 1673 } 1674 } 1675 break; 1676 case 5: 1677 /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/ 1678 gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode), 1679 GET_C_SDSP_IMM(ctx->opcode)); 1680 /* C.SQSP */ 1681 break; 1682 case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/ 1683 gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode), 1684 GET_C_SWSP_IMM(ctx->opcode)); 1685 break; 1686 case 7: 1687 #if defined(TARGET_RISCV64) 1688 /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/ 1689 gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode), 1690 GET_C_SDSP_IMM(ctx->opcode)); 1691 #else 1692 /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */ 1693 gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode), 1694 GET_C_SWSP_IMM(ctx->opcode)); 1695 #endif 1696 break; 1697 } 1698 } 1699 1700 static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx) 1701 { 1702 uint8_t op = extract32(ctx->opcode, 0, 2); 1703 1704 switch (op) { 1705 case 0: 1706 decode_RV32_64C0(ctx); 1707 break; 1708 case 1: 1709 decode_RV32_64C1(env, ctx); 1710 break; 1711 case 2: 1712 decode_RV32_64C2(env, ctx); 1713 break; 1714 } 1715 } 1716 1717 static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx) 1718 { 1719 int rs1; 1720 int rs2; 1721 int rd; 1722 uint32_t op; 1723 target_long imm; 1724 1725 /* We do not do misaligned address check here: the address should never be 1726 * misaligned at this point. Instructions that set PC must do the check, 1727 * since epc must be the address of the instruction that caused us to 1728 * perform the misaligned instruction fetch */ 1729 1730 op = MASK_OP_MAJOR(ctx->opcode); 1731 rs1 = GET_RS1(ctx->opcode); 1732 rs2 = GET_RS2(ctx->opcode); 1733 rd = GET_RD(ctx->opcode); 1734 imm = GET_IMM(ctx->opcode); 1735 1736 switch (op) { 1737 case OPC_RISC_LUI: 1738 if (rd == 0) { 1739 break; /* NOP */ 1740 } 1741 tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12); 1742 break; 1743 case OPC_RISC_AUIPC: 1744 if (rd == 0) { 1745 break; /* NOP */ 1746 } 1747 tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) + 1748 ctx->base.pc_next); 1749 break; 1750 case OPC_RISC_JAL: 1751 imm = GET_JAL_IMM(ctx->opcode); 1752 gen_jal(env, ctx, rd, imm); 1753 break; 1754 case OPC_RISC_JALR: 1755 gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm); 1756 break; 1757 case OPC_RISC_BRANCH: 1758 gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2, 1759 GET_B_IMM(ctx->opcode)); 1760 break; 1761 case OPC_RISC_LOAD: 1762 gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm); 1763 break; 1764 case OPC_RISC_STORE: 1765 gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2, 1766 GET_STORE_IMM(ctx->opcode)); 1767 break; 1768 case OPC_RISC_ARITH_IMM: 1769 #if defined(TARGET_RISCV64) 1770 case OPC_RISC_ARITH_IMM_W: 1771 #endif 1772 if (rd == 0) { 1773 break; /* NOP */ 1774 } 1775 gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm); 1776 break; 1777 case OPC_RISC_ARITH: 1778 #if defined(TARGET_RISCV64) 1779 case OPC_RISC_ARITH_W: 1780 #endif 1781 if (rd == 0) { 1782 break; /* NOP */ 1783 } 1784 gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2); 1785 break; 1786 case OPC_RISC_FP_LOAD: 1787 gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm); 1788 break; 1789 case OPC_RISC_FP_STORE: 1790 gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2, 1791 GET_STORE_IMM(ctx->opcode)); 1792 break; 1793 case OPC_RISC_ATOMIC: 1794 gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2); 1795 break; 1796 case OPC_RISC_FMADD: 1797 gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2, 1798 GET_RS3(ctx->opcode), GET_RM(ctx->opcode)); 1799 break; 1800 case OPC_RISC_FMSUB: 1801 gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2, 1802 GET_RS3(ctx->opcode), GET_RM(ctx->opcode)); 1803 break; 1804 case OPC_RISC_FNMSUB: 1805 gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2, 1806 GET_RS3(ctx->opcode), GET_RM(ctx->opcode)); 1807 break; 1808 case OPC_RISC_FNMADD: 1809 gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2, 1810 GET_RS3(ctx->opcode), GET_RM(ctx->opcode)); 1811 break; 1812 case OPC_RISC_FP_ARITH: 1813 gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2, 1814 GET_RM(ctx->opcode)); 1815 break; 1816 case OPC_RISC_FENCE: 1817 if (ctx->opcode & 0x1000) { 1818 /* FENCE_I is a no-op in QEMU, 1819 * however we need to end the translation block */ 1820 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 1821 tcg_gen_exit_tb(NULL, 0); 1822 ctx->base.is_jmp = DISAS_NORETURN; 1823 } else { 1824 /* FENCE is a full memory barrier. */ 1825 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1826 } 1827 break; 1828 case OPC_RISC_SYSTEM: 1829 gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1, 1830 (ctx->opcode & 0xFFF00000) >> 20); 1831 break; 1832 default: 1833 gen_exception_illegal(ctx); 1834 break; 1835 } 1836 } 1837 1838 static void decode_opc(CPURISCVState *env, DisasContext *ctx) 1839 { 1840 /* check for compressed insn */ 1841 if (extract32(ctx->opcode, 0, 2) != 3) { 1842 if (!riscv_has_ext(env, RVC)) { 1843 gen_exception_illegal(ctx); 1844 } else { 1845 ctx->pc_succ_insn = ctx->base.pc_next + 2; 1846 decode_RV32_64C(env, ctx); 1847 } 1848 } else { 1849 ctx->pc_succ_insn = ctx->base.pc_next + 4; 1850 decode_RV32_64G(env, ctx); 1851 } 1852 } 1853 1854 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 1855 { 1856 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1857 1858 ctx->pc_succ_insn = ctx->base.pc_first; 1859 ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK; 1860 ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS; 1861 ctx->frm = -1; /* unknown rounding mode */ 1862 } 1863 1864 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) 1865 { 1866 } 1867 1868 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 1869 { 1870 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1871 1872 tcg_gen_insn_start(ctx->base.pc_next); 1873 } 1874 1875 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, 1876 const CPUBreakpoint *bp) 1877 { 1878 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1879 1880 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); 1881 ctx->base.is_jmp = DISAS_NORETURN; 1882 gen_exception_debug(); 1883 /* The address covered by the breakpoint must be included in 1884 [tb->pc, tb->pc + tb->size) in order to for it to be 1885 properly cleared -- thus we increment the PC here so that 1886 the logic setting tb->size below does the right thing. */ 1887 ctx->base.pc_next += 4; 1888 return true; 1889 } 1890 1891 1892 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 1893 { 1894 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1895 CPURISCVState *env = cpu->env_ptr; 1896 1897 ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); 1898 decode_opc(env, ctx); 1899 ctx->base.pc_next = ctx->pc_succ_insn; 1900 1901 if (ctx->base.is_jmp == DISAS_NEXT) { 1902 target_ulong page_start; 1903 1904 page_start = ctx->base.pc_first & TARGET_PAGE_MASK; 1905 if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) { 1906 ctx->base.is_jmp = DISAS_TOO_MANY; 1907 } 1908 } 1909 } 1910 1911 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 1912 { 1913 DisasContext *ctx = container_of(dcbase, DisasContext, base); 1914 1915 switch (ctx->base.is_jmp) { 1916 case DISAS_TOO_MANY: 1917 gen_goto_tb(ctx, 0, ctx->base.pc_next); 1918 break; 1919 case DISAS_NORETURN: 1920 break; 1921 default: 1922 g_assert_not_reached(); 1923 } 1924 } 1925 1926 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) 1927 { 1928 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); 1929 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); 1930 } 1931 1932 static const TranslatorOps riscv_tr_ops = { 1933 .init_disas_context = riscv_tr_init_disas_context, 1934 .tb_start = riscv_tr_tb_start, 1935 .insn_start = riscv_tr_insn_start, 1936 .breakpoint_check = riscv_tr_breakpoint_check, 1937 .translate_insn = riscv_tr_translate_insn, 1938 .tb_stop = riscv_tr_tb_stop, 1939 .disas_log = riscv_tr_disas_log, 1940 }; 1941 1942 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb) 1943 { 1944 DisasContext ctx; 1945 1946 translator_loop(&riscv_tr_ops, &ctx.base, cs, tb); 1947 } 1948 1949 void riscv_translate_init(void) 1950 { 1951 int i; 1952 1953 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */ 1954 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */ 1955 /* registers, unless you specifically block reads/writes to reg 0 */ 1956 cpu_gpr[0] = NULL; 1957 1958 for (i = 1; i < 32; i++) { 1959 cpu_gpr[i] = tcg_global_mem_new(cpu_env, 1960 offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]); 1961 } 1962 1963 for (i = 0; i < 32; i++) { 1964 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env, 1965 offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]); 1966 } 1967 1968 cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc"); 1969 load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res), 1970 "load_res"); 1971 load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val), 1972 "load_val"); 1973 } 1974