1/* 2 * RISC-V translation routines for the RVXI Base Integer Instruction Set. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21static bool trans_illegal(DisasContext *ctx, arg_empty *a) 22{ 23 gen_exception_illegal(ctx); 24 return true; 25} 26 27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a) 28{ 29 REQUIRE_64BIT(ctx); 30 return trans_illegal(ctx, a); 31} 32 33static bool trans_lui(DisasContext *ctx, arg_lui *a) 34{ 35 if (a->rd != 0) { 36 tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm); 37 } 38 return true; 39} 40 41static bool trans_auipc(DisasContext *ctx, arg_auipc *a) 42{ 43 if (a->rd != 0) { 44 tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next); 45 } 46 return true; 47} 48 49static bool trans_jal(DisasContext *ctx, arg_jal *a) 50{ 51 gen_jal(ctx, a->rd, a->imm); 52 return true; 53} 54 55static bool trans_jalr(DisasContext *ctx, arg_jalr *a) 56{ 57 TCGLabel *misaligned = NULL; 58 59 tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm); 60 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); 61 62 if (!has_ext(ctx, RVC)) { 63 TCGv t0 = tcg_temp_new(); 64 65 misaligned = gen_new_label(); 66 tcg_gen_andi_tl(t0, cpu_pc, 0x2); 67 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); 68 tcg_temp_free(t0); 69 } 70 71 if (a->rd != 0) { 72 tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn); 73 } 74 tcg_gen_lookup_and_goto_ptr(); 75 76 if (misaligned) { 77 gen_set_label(misaligned); 78 gen_exception_inst_addr_mis(ctx); 79 } 80 ctx->base.is_jmp = DISAS_NORETURN; 81 82 return true; 83} 84 85static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) 86{ 87 TCGLabel *l = gen_new_label(); 88 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN); 89 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN); 90 91 tcg_gen_brcond_tl(cond, src1, src2, l); 92 gen_goto_tb(ctx, 1, ctx->pc_succ_insn); 93 94 gen_set_label(l); /* branch taken */ 95 96 if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { 97 /* misaligned */ 98 gen_exception_inst_addr_mis(ctx); 99 } else { 100 gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm); 101 } 102 ctx->base.is_jmp = DISAS_NORETURN; 103 104 return true; 105} 106 107static bool trans_beq(DisasContext *ctx, arg_beq *a) 108{ 109 return gen_branch(ctx, a, TCG_COND_EQ); 110} 111 112static bool trans_bne(DisasContext *ctx, arg_bne *a) 113{ 114 return gen_branch(ctx, a, TCG_COND_NE); 115} 116 117static bool trans_blt(DisasContext *ctx, arg_blt *a) 118{ 119 return gen_branch(ctx, a, TCG_COND_LT); 120} 121 122static bool trans_bge(DisasContext *ctx, arg_bge *a) 123{ 124 return gen_branch(ctx, a, TCG_COND_GE); 125} 126 127static bool trans_bltu(DisasContext *ctx, arg_bltu *a) 128{ 129 return gen_branch(ctx, a, TCG_COND_LTU); 130} 131 132static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) 133{ 134 return gen_branch(ctx, a, TCG_COND_GEU); 135} 136 137static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) 138{ 139 TCGv dest = dest_gpr(ctx, a->rd); 140 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 141 142 if (a->imm) { 143 TCGv temp = temp_new(ctx); 144 tcg_gen_addi_tl(temp, addr, a->imm); 145 addr = temp; 146 } 147 addr = gen_pm_adjust_address(ctx, addr); 148 149 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); 150 gen_set_gpr(ctx, a->rd, dest); 151 return true; 152} 153 154static bool trans_lb(DisasContext *ctx, arg_lb *a) 155{ 156 return gen_load(ctx, a, MO_SB); 157} 158 159static bool trans_lh(DisasContext *ctx, arg_lh *a) 160{ 161 return gen_load(ctx, a, MO_TESW); 162} 163 164static bool trans_lw(DisasContext *ctx, arg_lw *a) 165{ 166 return gen_load(ctx, a, MO_TESL); 167} 168 169static bool trans_lbu(DisasContext *ctx, arg_lbu *a) 170{ 171 return gen_load(ctx, a, MO_UB); 172} 173 174static bool trans_lhu(DisasContext *ctx, arg_lhu *a) 175{ 176 return gen_load(ctx, a, MO_TEUW); 177} 178 179static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) 180{ 181 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 182 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); 183 184 if (a->imm) { 185 TCGv temp = temp_new(ctx); 186 tcg_gen_addi_tl(temp, addr, a->imm); 187 addr = temp; 188 } 189 addr = gen_pm_adjust_address(ctx, addr); 190 191 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); 192 return true; 193} 194 195static bool trans_sb(DisasContext *ctx, arg_sb *a) 196{ 197 return gen_store(ctx, a, MO_SB); 198} 199 200static bool trans_sh(DisasContext *ctx, arg_sh *a) 201{ 202 return gen_store(ctx, a, MO_TESW); 203} 204 205static bool trans_sw(DisasContext *ctx, arg_sw *a) 206{ 207 return gen_store(ctx, a, MO_TESL); 208} 209 210static bool trans_lwu(DisasContext *ctx, arg_lwu *a) 211{ 212 REQUIRE_64BIT(ctx); 213 return gen_load(ctx, a, MO_TEUL); 214} 215 216static bool trans_ld(DisasContext *ctx, arg_ld *a) 217{ 218 REQUIRE_64BIT(ctx); 219 return gen_load(ctx, a, MO_TEQ); 220} 221 222static bool trans_sd(DisasContext *ctx, arg_sd *a) 223{ 224 REQUIRE_64BIT(ctx); 225 return gen_store(ctx, a, MO_TEQ); 226} 227 228static bool trans_addi(DisasContext *ctx, arg_addi *a) 229{ 230 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl); 231} 232 233static void gen_slt(TCGv ret, TCGv s1, TCGv s2) 234{ 235 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2); 236} 237 238static void gen_sltu(TCGv ret, TCGv s1, TCGv s2) 239{ 240 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2); 241} 242 243static bool trans_slti(DisasContext *ctx, arg_slti *a) 244{ 245 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt); 246} 247 248static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) 249{ 250 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu); 251} 252 253static bool trans_xori(DisasContext *ctx, arg_xori *a) 254{ 255 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_xori_tl); 256} 257 258static bool trans_ori(DisasContext *ctx, arg_ori *a) 259{ 260 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_ori_tl); 261} 262 263static bool trans_andi(DisasContext *ctx, arg_andi *a) 264{ 265 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_andi_tl); 266} 267 268static bool trans_slli(DisasContext *ctx, arg_slli *a) 269{ 270 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl); 271} 272 273static void gen_srliw(TCGv dst, TCGv src, target_long shamt) 274{ 275 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); 276} 277 278static bool trans_srli(DisasContext *ctx, arg_srli *a) 279{ 280 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 281 tcg_gen_shri_tl, gen_srliw); 282} 283 284static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) 285{ 286 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); 287} 288 289static bool trans_srai(DisasContext *ctx, arg_srai *a) 290{ 291 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 292 tcg_gen_sari_tl, gen_sraiw); 293} 294 295static bool trans_add(DisasContext *ctx, arg_add *a) 296{ 297 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl); 298} 299 300static bool trans_sub(DisasContext *ctx, arg_sub *a) 301{ 302 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl); 303} 304 305static bool trans_sll(DisasContext *ctx, arg_sll *a) 306{ 307 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl); 308} 309 310static bool trans_slt(DisasContext *ctx, arg_slt *a) 311{ 312 return gen_arith(ctx, a, EXT_SIGN, gen_slt); 313} 314 315static bool trans_sltu(DisasContext *ctx, arg_sltu *a) 316{ 317 return gen_arith(ctx, a, EXT_SIGN, gen_sltu); 318} 319 320static bool trans_xor(DisasContext *ctx, arg_xor *a) 321{ 322 return gen_arith(ctx, a, EXT_NONE, tcg_gen_xor_tl); 323} 324 325static bool trans_srl(DisasContext *ctx, arg_srl *a) 326{ 327 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl); 328} 329 330static bool trans_sra(DisasContext *ctx, arg_sra *a) 331{ 332 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl); 333} 334 335static bool trans_or(DisasContext *ctx, arg_or *a) 336{ 337 return gen_arith(ctx, a, EXT_NONE, tcg_gen_or_tl); 338} 339 340static bool trans_and(DisasContext *ctx, arg_and *a) 341{ 342 return gen_arith(ctx, a, EXT_NONE, tcg_gen_and_tl); 343} 344 345static bool trans_addiw(DisasContext *ctx, arg_addiw *a) 346{ 347 REQUIRE_64BIT(ctx); 348 ctx->ol = MXL_RV32; 349 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl); 350} 351 352static bool trans_slliw(DisasContext *ctx, arg_slliw *a) 353{ 354 REQUIRE_64BIT(ctx); 355 ctx->ol = MXL_RV32; 356 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl); 357} 358 359static bool trans_srliw(DisasContext *ctx, arg_srliw *a) 360{ 361 REQUIRE_64BIT(ctx); 362 ctx->ol = MXL_RV32; 363 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw); 364} 365 366static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) 367{ 368 REQUIRE_64BIT(ctx); 369 ctx->ol = MXL_RV32; 370 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw); 371} 372 373static bool trans_addw(DisasContext *ctx, arg_addw *a) 374{ 375 REQUIRE_64BIT(ctx); 376 ctx->ol = MXL_RV32; 377 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl); 378} 379 380static bool trans_subw(DisasContext *ctx, arg_subw *a) 381{ 382 REQUIRE_64BIT(ctx); 383 ctx->ol = MXL_RV32; 384 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl); 385} 386 387static bool trans_sllw(DisasContext *ctx, arg_sllw *a) 388{ 389 REQUIRE_64BIT(ctx); 390 ctx->ol = MXL_RV32; 391 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl); 392} 393 394static bool trans_srlw(DisasContext *ctx, arg_srlw *a) 395{ 396 REQUIRE_64BIT(ctx); 397 ctx->ol = MXL_RV32; 398 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl); 399} 400 401static bool trans_sraw(DisasContext *ctx, arg_sraw *a) 402{ 403 REQUIRE_64BIT(ctx); 404 ctx->ol = MXL_RV32; 405 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl); 406} 407 408static bool trans_fence(DisasContext *ctx, arg_fence *a) 409{ 410 /* FENCE is a full memory barrier. */ 411 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 412 return true; 413} 414 415static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) 416{ 417 if (!ctx->ext_ifencei) { 418 return false; 419 } 420 421 /* 422 * FENCE_I is a no-op in QEMU, 423 * however we need to end the translation block 424 */ 425 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 426 tcg_gen_exit_tb(NULL, 0); 427 ctx->base.is_jmp = DISAS_NORETURN; 428 return true; 429} 430 431static bool do_csr_post(DisasContext *ctx) 432{ 433 /* We may have changed important cpu state -- exit to main loop. */ 434 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 435 tcg_gen_exit_tb(NULL, 0); 436 ctx->base.is_jmp = DISAS_NORETURN; 437 return true; 438} 439 440static bool do_csrr(DisasContext *ctx, int rd, int rc) 441{ 442 TCGv dest = dest_gpr(ctx, rd); 443 TCGv_i32 csr = tcg_constant_i32(rc); 444 445 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 446 gen_io_start(); 447 } 448 gen_helper_csrr(dest, cpu_env, csr); 449 gen_set_gpr(ctx, rd, dest); 450 return do_csr_post(ctx); 451} 452 453static bool do_csrw(DisasContext *ctx, int rc, TCGv src) 454{ 455 TCGv_i32 csr = tcg_constant_i32(rc); 456 457 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 458 gen_io_start(); 459 } 460 gen_helper_csrw(cpu_env, csr, src); 461 return do_csr_post(ctx); 462} 463 464static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask) 465{ 466 TCGv dest = dest_gpr(ctx, rd); 467 TCGv_i32 csr = tcg_constant_i32(rc); 468 469 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 470 gen_io_start(); 471 } 472 gen_helper_csrrw(dest, cpu_env, csr, src, mask); 473 gen_set_gpr(ctx, rd, dest); 474 return do_csr_post(ctx); 475} 476 477static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) 478{ 479 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE); 480 481 /* 482 * If rd == 0, the insn shall not read the csr, nor cause any of the 483 * side effects that might occur on a csr read. 484 */ 485 if (a->rd == 0) { 486 return do_csrw(ctx, a->csr, src); 487 } 488 489 TCGv mask = tcg_constant_tl(-1); 490 return do_csrrw(ctx, a->rd, a->csr, src, mask); 491} 492 493static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) 494{ 495 /* 496 * If rs1 == 0, the insn shall not write to the csr at all, nor 497 * cause any of the side effects that might occur on a csr write. 498 * Note that if rs1 specifies a register other than x0, holding 499 * a zero value, the instruction will still attempt to write the 500 * unmodified value back to the csr and will cause side effects. 501 */ 502 if (a->rs1 == 0) { 503 return do_csrr(ctx, a->rd, a->csr); 504 } 505 506 TCGv ones = tcg_constant_tl(-1); 507 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 508 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 509} 510 511static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) 512{ 513 /* 514 * If rs1 == 0, the insn shall not write to the csr at all, nor 515 * cause any of the side effects that might occur on a csr write. 516 * Note that if rs1 specifies a register other than x0, holding 517 * a zero value, the instruction will still attempt to write the 518 * unmodified value back to the csr and will cause side effects. 519 */ 520 if (a->rs1 == 0) { 521 return do_csrr(ctx, a->rd, a->csr); 522 } 523 524 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 525 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 526} 527 528static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) 529{ 530 TCGv src = tcg_constant_tl(a->rs1); 531 532 /* 533 * If rd == 0, the insn shall not read the csr, nor cause any of the 534 * side effects that might occur on a csr read. 535 */ 536 if (a->rd == 0) { 537 return do_csrw(ctx, a->csr, src); 538 } 539 540 TCGv mask = tcg_constant_tl(-1); 541 return do_csrrw(ctx, a->rd, a->csr, src, mask); 542} 543 544static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) 545{ 546 /* 547 * If rs1 == 0, the insn shall not write to the csr at all, nor 548 * cause any of the side effects that might occur on a csr write. 549 * Note that if rs1 specifies a register other than x0, holding 550 * a zero value, the instruction will still attempt to write the 551 * unmodified value back to the csr and will cause side effects. 552 */ 553 if (a->rs1 == 0) { 554 return do_csrr(ctx, a->rd, a->csr); 555 } 556 557 TCGv ones = tcg_constant_tl(-1); 558 TCGv mask = tcg_constant_tl(a->rs1); 559 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 560} 561 562static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a) 563{ 564 /* 565 * If rs1 == 0, the insn shall not write to the csr at all, nor 566 * cause any of the side effects that might occur on a csr write. 567 * Note that if rs1 specifies a register other than x0, holding 568 * a zero value, the instruction will still attempt to write the 569 * unmodified value back to the csr and will cause side effects. 570 */ 571 if (a->rs1 == 0) { 572 return do_csrr(ctx, a->rd, a->csr); 573 } 574 575 TCGv mask = tcg_constant_tl(a->rs1); 576 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 577} 578