1/* 2 * RISC-V translation routines for the RVXI Base Integer Instruction Set. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21static bool trans_illegal(DisasContext *ctx, arg_empty *a) 22{ 23 gen_exception_illegal(ctx); 24 return true; 25} 26 27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a) 28{ 29 REQUIRE_64_OR_128BIT(ctx); 30 return trans_illegal(ctx, a); 31} 32 33static bool trans_lui(DisasContext *ctx, arg_lui *a) 34{ 35 if (a->rd != 0) { 36 gen_set_gpri(ctx, a->rd, a->imm); 37 } 38 return true; 39} 40 41static bool trans_auipc(DisasContext *ctx, arg_auipc *a) 42{ 43 if (a->rd != 0) { 44 gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next); 45 } 46 return true; 47} 48 49static bool trans_jal(DisasContext *ctx, arg_jal *a) 50{ 51 gen_jal(ctx, a->rd, a->imm); 52 return true; 53} 54 55static bool trans_jalr(DisasContext *ctx, arg_jalr *a) 56{ 57 TCGLabel *misaligned = NULL; 58 59 tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm); 60 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); 61 62 if (!has_ext(ctx, RVC)) { 63 TCGv t0 = tcg_temp_new(); 64 65 misaligned = gen_new_label(); 66 tcg_gen_andi_tl(t0, cpu_pc, 0x2); 67 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); 68 tcg_temp_free(t0); 69 } 70 71 if (a->rd != 0) { 72 tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn); 73 } 74 tcg_gen_lookup_and_goto_ptr(); 75 76 if (misaligned) { 77 gen_set_label(misaligned); 78 gen_exception_inst_addr_mis(ctx); 79 } 80 ctx->base.is_jmp = DISAS_NORETURN; 81 82 return true; 83} 84 85static TCGCond gen_compare_i128(bool bz, TCGv rl, 86 TCGv al, TCGv ah, TCGv bl, TCGv bh, 87 TCGCond cond) 88{ 89 TCGv rh = tcg_temp_new(); 90 bool invert = false; 91 92 switch (cond) { 93 case TCG_COND_EQ: 94 case TCG_COND_NE: 95 if (bz) { 96 tcg_gen_or_tl(rl, al, ah); 97 } else { 98 tcg_gen_xor_tl(rl, al, bl); 99 tcg_gen_xor_tl(rh, ah, bh); 100 tcg_gen_or_tl(rl, rl, rh); 101 } 102 break; 103 104 case TCG_COND_GE: 105 case TCG_COND_LT: 106 if (bz) { 107 tcg_gen_mov_tl(rl, ah); 108 } else { 109 TCGv tmp = tcg_temp_new(); 110 111 tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh); 112 tcg_gen_xor_tl(rl, rh, ah); 113 tcg_gen_xor_tl(tmp, ah, bh); 114 tcg_gen_and_tl(rl, rl, tmp); 115 tcg_gen_xor_tl(rl, rh, rl); 116 117 tcg_temp_free(tmp); 118 } 119 break; 120 121 case TCG_COND_LTU: 122 invert = true; 123 /* fallthrough */ 124 case TCG_COND_GEU: 125 { 126 TCGv tmp = tcg_temp_new(); 127 TCGv zero = tcg_constant_tl(0); 128 TCGv one = tcg_constant_tl(1); 129 130 cond = TCG_COND_NE; 131 /* borrow in to second word */ 132 tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl); 133 /* seed third word with 1, which will be result */ 134 tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero); 135 tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero); 136 137 tcg_temp_free(tmp); 138 } 139 break; 140 141 default: 142 g_assert_not_reached(); 143 } 144 145 if (invert) { 146 cond = tcg_invert_cond(cond); 147 } 148 149 tcg_temp_free(rh); 150 return cond; 151} 152 153static void gen_setcond_i128(TCGv rl, TCGv rh, 154 TCGv src1l, TCGv src1h, 155 TCGv src2l, TCGv src2h, 156 TCGCond cond) 157{ 158 cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond); 159 tcg_gen_setcondi_tl(cond, rl, rl, 0); 160 tcg_gen_movi_tl(rh, 0); 161} 162 163static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) 164{ 165 TCGLabel *l = gen_new_label(); 166 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN); 167 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN); 168 169 if (get_xl(ctx) == MXL_RV128) { 170 TCGv src1h = get_gprh(ctx, a->rs1); 171 TCGv src2h = get_gprh(ctx, a->rs2); 172 TCGv tmp = tcg_temp_new(); 173 174 cond = gen_compare_i128(a->rs2 == 0, 175 tmp, src1, src1h, src2, src2h, cond); 176 tcg_gen_brcondi_tl(cond, tmp, 0, l); 177 178 tcg_temp_free(tmp); 179 } else { 180 tcg_gen_brcond_tl(cond, src1, src2, l); 181 } 182 gen_goto_tb(ctx, 1, ctx->pc_succ_insn); 183 184 gen_set_label(l); /* branch taken */ 185 186 if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { 187 /* misaligned */ 188 gen_exception_inst_addr_mis(ctx); 189 } else { 190 gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm); 191 } 192 ctx->base.is_jmp = DISAS_NORETURN; 193 194 return true; 195} 196 197static bool trans_beq(DisasContext *ctx, arg_beq *a) 198{ 199 return gen_branch(ctx, a, TCG_COND_EQ); 200} 201 202static bool trans_bne(DisasContext *ctx, arg_bne *a) 203{ 204 return gen_branch(ctx, a, TCG_COND_NE); 205} 206 207static bool trans_blt(DisasContext *ctx, arg_blt *a) 208{ 209 return gen_branch(ctx, a, TCG_COND_LT); 210} 211 212static bool trans_bge(DisasContext *ctx, arg_bge *a) 213{ 214 return gen_branch(ctx, a, TCG_COND_GE); 215} 216 217static bool trans_bltu(DisasContext *ctx, arg_bltu *a) 218{ 219 return gen_branch(ctx, a, TCG_COND_LTU); 220} 221 222static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) 223{ 224 return gen_branch(ctx, a, TCG_COND_GEU); 225} 226 227static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop) 228{ 229 TCGv dest = dest_gpr(ctx, a->rd); 230 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 231 232 if (a->imm) { 233 TCGv temp = temp_new(ctx); 234 tcg_gen_addi_tl(temp, addr, a->imm); 235 addr = temp; 236 } 237 addr = gen_pm_adjust_address(ctx, addr); 238 239 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); 240 gen_set_gpr(ctx, a->rd, dest); 241 return true; 242} 243 244/* Compute only 64-bit addresses to use the address translation mechanism */ 245static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) 246{ 247 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); 248 TCGv destl = dest_gpr(ctx, a->rd); 249 TCGv desth = dest_gprh(ctx, a->rd); 250 TCGv addrl = tcg_temp_new(); 251 252 tcg_gen_addi_tl(addrl, src1l, a->imm); 253 254 if ((memop & MO_SIZE) <= MO_64) { 255 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop); 256 if (memop & MO_SIGN) { 257 tcg_gen_sari_tl(desth, destl, 63); 258 } else { 259 tcg_gen_movi_tl(desth, 0); 260 } 261 } else { 262 /* assume little-endian memory access for now */ 263 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ); 264 tcg_gen_addi_tl(addrl, addrl, 8); 265 tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ); 266 } 267 268 gen_set_gpr128(ctx, a->rd, destl, desth); 269 270 tcg_temp_free(addrl); 271 return true; 272} 273 274static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) 275{ 276 if (get_xl(ctx) == MXL_RV128) { 277 return gen_load_i128(ctx, a, memop); 278 } else { 279 return gen_load_tl(ctx, a, memop); 280 } 281} 282 283static bool trans_lb(DisasContext *ctx, arg_lb *a) 284{ 285 return gen_load(ctx, a, MO_SB); 286} 287 288static bool trans_lh(DisasContext *ctx, arg_lh *a) 289{ 290 return gen_load(ctx, a, MO_TESW); 291} 292 293static bool trans_lw(DisasContext *ctx, arg_lw *a) 294{ 295 return gen_load(ctx, a, MO_TESL); 296} 297 298static bool trans_ld(DisasContext *ctx, arg_ld *a) 299{ 300 REQUIRE_64_OR_128BIT(ctx); 301 return gen_load(ctx, a, MO_TESQ); 302} 303 304static bool trans_lq(DisasContext *ctx, arg_lq *a) 305{ 306 REQUIRE_128BIT(ctx); 307 return gen_load(ctx, a, MO_TEUO); 308} 309 310static bool trans_lbu(DisasContext *ctx, arg_lbu *a) 311{ 312 return gen_load(ctx, a, MO_UB); 313} 314 315static bool trans_lhu(DisasContext *ctx, arg_lhu *a) 316{ 317 return gen_load(ctx, a, MO_TEUW); 318} 319 320static bool trans_lwu(DisasContext *ctx, arg_lwu *a) 321{ 322 REQUIRE_64_OR_128BIT(ctx); 323 return gen_load(ctx, a, MO_TEUL); 324} 325 326static bool trans_ldu(DisasContext *ctx, arg_ldu *a) 327{ 328 REQUIRE_128BIT(ctx); 329 return gen_load(ctx, a, MO_TEUQ); 330} 331 332static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) 333{ 334 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 335 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); 336 337 if (a->imm) { 338 TCGv temp = temp_new(ctx); 339 tcg_gen_addi_tl(temp, addr, a->imm); 340 addr = temp; 341 } 342 addr = gen_pm_adjust_address(ctx, addr); 343 344 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); 345 return true; 346} 347 348static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) 349{ 350 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); 351 TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE); 352 TCGv src2h = get_gprh(ctx, a->rs2); 353 TCGv addrl = tcg_temp_new(); 354 355 tcg_gen_addi_tl(addrl, src1l, a->imm); 356 357 if ((memop & MO_SIZE) <= MO_64) { 358 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop); 359 } else { 360 /* little-endian memory access assumed for now */ 361 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ); 362 tcg_gen_addi_tl(addrl, addrl, 8); 363 tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ); 364 } 365 366 tcg_temp_free(addrl); 367 return true; 368} 369 370static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) 371{ 372 if (get_xl(ctx) == MXL_RV128) { 373 return gen_store_i128(ctx, a, memop); 374 } else { 375 return gen_store_tl(ctx, a, memop); 376 } 377} 378 379static bool trans_sb(DisasContext *ctx, arg_sb *a) 380{ 381 return gen_store(ctx, a, MO_SB); 382} 383 384static bool trans_sh(DisasContext *ctx, arg_sh *a) 385{ 386 return gen_store(ctx, a, MO_TESW); 387} 388 389static bool trans_sw(DisasContext *ctx, arg_sw *a) 390{ 391 return gen_store(ctx, a, MO_TESL); 392} 393 394static bool trans_sd(DisasContext *ctx, arg_sd *a) 395{ 396 REQUIRE_64_OR_128BIT(ctx); 397 return gen_store(ctx, a, MO_TEUQ); 398} 399 400static bool trans_sq(DisasContext *ctx, arg_sq *a) 401{ 402 REQUIRE_128BIT(ctx); 403 return gen_store(ctx, a, MO_TEUO); 404} 405 406static bool trans_addd(DisasContext *ctx, arg_addd *a) 407{ 408 REQUIRE_128BIT(ctx); 409 ctx->ol = MXL_RV64; 410 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL); 411} 412 413static bool trans_addid(DisasContext *ctx, arg_addid *a) 414{ 415 REQUIRE_128BIT(ctx); 416 ctx->ol = MXL_RV64; 417 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL); 418} 419 420static bool trans_subd(DisasContext *ctx, arg_subd *a) 421{ 422 REQUIRE_128BIT(ctx); 423 ctx->ol = MXL_RV64; 424 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL); 425} 426 427static void gen_addi2_i128(TCGv retl, TCGv reth, 428 TCGv srcl, TCGv srch, target_long imm) 429{ 430 TCGv imml = tcg_constant_tl(imm); 431 TCGv immh = tcg_constant_tl(-(imm < 0)); 432 tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh); 433} 434 435static bool trans_addi(DisasContext *ctx, arg_addi *a) 436{ 437 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128); 438} 439 440static void gen_slt(TCGv ret, TCGv s1, TCGv s2) 441{ 442 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2); 443} 444 445static void gen_slt_i128(TCGv retl, TCGv reth, 446 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h) 447{ 448 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT); 449} 450 451static void gen_sltu(TCGv ret, TCGv s1, TCGv s2) 452{ 453 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2); 454} 455 456static void gen_sltu_i128(TCGv retl, TCGv reth, 457 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h) 458{ 459 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU); 460} 461 462static bool trans_slti(DisasContext *ctx, arg_slti *a) 463{ 464 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128); 465} 466 467static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) 468{ 469 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128); 470} 471 472static bool trans_xori(DisasContext *ctx, arg_xori *a) 473{ 474 return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl); 475} 476 477static bool trans_ori(DisasContext *ctx, arg_ori *a) 478{ 479 return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl); 480} 481 482static bool trans_andi(DisasContext *ctx, arg_andi *a) 483{ 484 return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl); 485} 486 487static void gen_slli_i128(TCGv retl, TCGv reth, 488 TCGv src1l, TCGv src1h, 489 target_long shamt) 490{ 491 if (shamt >= 64) { 492 tcg_gen_shli_tl(reth, src1l, shamt - 64); 493 tcg_gen_movi_tl(retl, 0); 494 } else { 495 tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt); 496 tcg_gen_shli_tl(retl, src1l, shamt); 497 } 498} 499 500static bool trans_slli(DisasContext *ctx, arg_slli *a) 501{ 502 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128); 503} 504 505static void gen_srliw(TCGv dst, TCGv src, target_long shamt) 506{ 507 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); 508} 509 510static void gen_srli_i128(TCGv retl, TCGv reth, 511 TCGv src1l, TCGv src1h, 512 target_long shamt) 513{ 514 if (shamt >= 64) { 515 tcg_gen_shri_tl(retl, src1h, shamt - 64); 516 tcg_gen_movi_tl(reth, 0); 517 } else { 518 tcg_gen_extract2_tl(retl, src1l, src1h, shamt); 519 tcg_gen_shri_tl(reth, src1h, shamt); 520 } 521} 522 523static bool trans_srli(DisasContext *ctx, arg_srli *a) 524{ 525 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 526 tcg_gen_shri_tl, gen_srliw, gen_srli_i128); 527} 528 529static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) 530{ 531 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); 532} 533 534static void gen_srai_i128(TCGv retl, TCGv reth, 535 TCGv src1l, TCGv src1h, 536 target_long shamt) 537{ 538 if (shamt >= 64) { 539 tcg_gen_sari_tl(retl, src1h, shamt - 64); 540 tcg_gen_sari_tl(reth, src1h, 63); 541 } else { 542 tcg_gen_extract2_tl(retl, src1l, src1h, shamt); 543 tcg_gen_sari_tl(reth, src1h, shamt); 544 } 545} 546 547static bool trans_srai(DisasContext *ctx, arg_srai *a) 548{ 549 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 550 tcg_gen_sari_tl, gen_sraiw, gen_srai_i128); 551} 552 553static bool trans_add(DisasContext *ctx, arg_add *a) 554{ 555 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl); 556} 557 558static bool trans_sub(DisasContext *ctx, arg_sub *a) 559{ 560 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl); 561} 562 563static void gen_sll_i128(TCGv destl, TCGv desth, 564 TCGv src1l, TCGv src1h, TCGv shamt) 565{ 566 TCGv ls = tcg_temp_new(); 567 TCGv rs = tcg_temp_new(); 568 TCGv hs = tcg_temp_new(); 569 TCGv ll = tcg_temp_new(); 570 TCGv lr = tcg_temp_new(); 571 TCGv h0 = tcg_temp_new(); 572 TCGv h1 = tcg_temp_new(); 573 TCGv zero = tcg_constant_tl(0); 574 575 tcg_gen_andi_tl(hs, shamt, 64); 576 tcg_gen_andi_tl(ls, shamt, 63); 577 tcg_gen_neg_tl(shamt, shamt); 578 tcg_gen_andi_tl(rs, shamt, 63); 579 580 tcg_gen_shl_tl(ll, src1l, ls); 581 tcg_gen_shl_tl(h0, src1h, ls); 582 tcg_gen_shr_tl(lr, src1l, rs); 583 tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero); 584 tcg_gen_or_tl(h1, h0, lr); 585 586 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll); 587 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1); 588 589 tcg_temp_free(ls); 590 tcg_temp_free(rs); 591 tcg_temp_free(hs); 592 tcg_temp_free(ll); 593 tcg_temp_free(lr); 594 tcg_temp_free(h0); 595 tcg_temp_free(h1); 596} 597 598static bool trans_sll(DisasContext *ctx, arg_sll *a) 599{ 600 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128); 601} 602 603static bool trans_slt(DisasContext *ctx, arg_slt *a) 604{ 605 return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128); 606} 607 608static bool trans_sltu(DisasContext *ctx, arg_sltu *a) 609{ 610 return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128); 611} 612 613static void gen_srl_i128(TCGv destl, TCGv desth, 614 TCGv src1l, TCGv src1h, TCGv shamt) 615{ 616 TCGv ls = tcg_temp_new(); 617 TCGv rs = tcg_temp_new(); 618 TCGv hs = tcg_temp_new(); 619 TCGv ll = tcg_temp_new(); 620 TCGv lr = tcg_temp_new(); 621 TCGv h0 = tcg_temp_new(); 622 TCGv h1 = tcg_temp_new(); 623 TCGv zero = tcg_constant_tl(0); 624 625 tcg_gen_andi_tl(hs, shamt, 64); 626 tcg_gen_andi_tl(rs, shamt, 63); 627 tcg_gen_neg_tl(shamt, shamt); 628 tcg_gen_andi_tl(ls, shamt, 63); 629 630 tcg_gen_shr_tl(lr, src1l, rs); 631 tcg_gen_shr_tl(h1, src1h, rs); 632 tcg_gen_shl_tl(ll, src1h, ls); 633 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); 634 tcg_gen_or_tl(h0, ll, lr); 635 636 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); 637 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1); 638 639 tcg_temp_free(ls); 640 tcg_temp_free(rs); 641 tcg_temp_free(hs); 642 tcg_temp_free(ll); 643 tcg_temp_free(lr); 644 tcg_temp_free(h0); 645 tcg_temp_free(h1); 646} 647 648static bool trans_srl(DisasContext *ctx, arg_srl *a) 649{ 650 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128); 651} 652 653static void gen_sra_i128(TCGv destl, TCGv desth, 654 TCGv src1l, TCGv src1h, TCGv shamt) 655{ 656 TCGv ls = tcg_temp_new(); 657 TCGv rs = tcg_temp_new(); 658 TCGv hs = tcg_temp_new(); 659 TCGv ll = tcg_temp_new(); 660 TCGv lr = tcg_temp_new(); 661 TCGv h0 = tcg_temp_new(); 662 TCGv h1 = tcg_temp_new(); 663 TCGv zero = tcg_constant_tl(0); 664 665 tcg_gen_andi_tl(hs, shamt, 64); 666 tcg_gen_andi_tl(rs, shamt, 63); 667 tcg_gen_neg_tl(shamt, shamt); 668 tcg_gen_andi_tl(ls, shamt, 63); 669 670 tcg_gen_shr_tl(lr, src1l, rs); 671 tcg_gen_sar_tl(h1, src1h, rs); 672 tcg_gen_shl_tl(ll, src1h, ls); 673 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); 674 tcg_gen_or_tl(h0, ll, lr); 675 tcg_gen_sari_tl(lr, src1h, 63); 676 677 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); 678 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1); 679 680 tcg_temp_free(ls); 681 tcg_temp_free(rs); 682 tcg_temp_free(hs); 683 tcg_temp_free(ll); 684 tcg_temp_free(lr); 685 tcg_temp_free(h0); 686 tcg_temp_free(h1); 687} 688 689static bool trans_sra(DisasContext *ctx, arg_sra *a) 690{ 691 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128); 692} 693 694static bool trans_xor(DisasContext *ctx, arg_xor *a) 695{ 696 return gen_logic(ctx, a, tcg_gen_xor_tl); 697} 698 699static bool trans_or(DisasContext *ctx, arg_or *a) 700{ 701 return gen_logic(ctx, a, tcg_gen_or_tl); 702} 703 704static bool trans_and(DisasContext *ctx, arg_and *a) 705{ 706 return gen_logic(ctx, a, tcg_gen_and_tl); 707} 708 709static bool trans_addiw(DisasContext *ctx, arg_addiw *a) 710{ 711 REQUIRE_64_OR_128BIT(ctx); 712 ctx->ol = MXL_RV32; 713 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL); 714} 715 716static bool trans_slliw(DisasContext *ctx, arg_slliw *a) 717{ 718 REQUIRE_64_OR_128BIT(ctx); 719 ctx->ol = MXL_RV32; 720 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); 721} 722 723static bool trans_srliw(DisasContext *ctx, arg_srliw *a) 724{ 725 REQUIRE_64_OR_128BIT(ctx); 726 ctx->ol = MXL_RV32; 727 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL); 728} 729 730static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) 731{ 732 REQUIRE_64_OR_128BIT(ctx); 733 ctx->ol = MXL_RV32; 734 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL); 735} 736 737static bool trans_sllid(DisasContext *ctx, arg_sllid *a) 738{ 739 REQUIRE_128BIT(ctx); 740 ctx->ol = MXL_RV64; 741 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); 742} 743 744static bool trans_srlid(DisasContext *ctx, arg_srlid *a) 745{ 746 REQUIRE_128BIT(ctx); 747 ctx->ol = MXL_RV64; 748 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL); 749} 750 751static bool trans_sraid(DisasContext *ctx, arg_sraid *a) 752{ 753 REQUIRE_128BIT(ctx); 754 ctx->ol = MXL_RV64; 755 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL); 756} 757 758static bool trans_addw(DisasContext *ctx, arg_addw *a) 759{ 760 REQUIRE_64_OR_128BIT(ctx); 761 ctx->ol = MXL_RV32; 762 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL); 763} 764 765static bool trans_subw(DisasContext *ctx, arg_subw *a) 766{ 767 REQUIRE_64_OR_128BIT(ctx); 768 ctx->ol = MXL_RV32; 769 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL); 770} 771 772static bool trans_sllw(DisasContext *ctx, arg_sllw *a) 773{ 774 REQUIRE_64_OR_128BIT(ctx); 775 ctx->ol = MXL_RV32; 776 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); 777} 778 779static bool trans_srlw(DisasContext *ctx, arg_srlw *a) 780{ 781 REQUIRE_64_OR_128BIT(ctx); 782 ctx->ol = MXL_RV32; 783 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); 784} 785 786static bool trans_sraw(DisasContext *ctx, arg_sraw *a) 787{ 788 REQUIRE_64_OR_128BIT(ctx); 789 ctx->ol = MXL_RV32; 790 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); 791} 792 793static bool trans_slld(DisasContext *ctx, arg_slld *a) 794{ 795 REQUIRE_128BIT(ctx); 796 ctx->ol = MXL_RV64; 797 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); 798} 799 800static bool trans_srld(DisasContext *ctx, arg_srld *a) 801{ 802 REQUIRE_128BIT(ctx); 803 ctx->ol = MXL_RV64; 804 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); 805} 806 807static bool trans_srad(DisasContext *ctx, arg_srad *a) 808{ 809 REQUIRE_128BIT(ctx); 810 ctx->ol = MXL_RV64; 811 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); 812} 813 814 815static bool trans_fence(DisasContext *ctx, arg_fence *a) 816{ 817 /* FENCE is a full memory barrier. */ 818 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 819 return true; 820} 821 822static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) 823{ 824 if (!ctx->ext_ifencei) { 825 return false; 826 } 827 828 /* 829 * FENCE_I is a no-op in QEMU, 830 * however we need to end the translation block 831 */ 832 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 833 tcg_gen_exit_tb(NULL, 0); 834 ctx->base.is_jmp = DISAS_NORETURN; 835 return true; 836} 837 838static bool do_csr_post(DisasContext *ctx) 839{ 840 /* We may have changed important cpu state -- exit to main loop. */ 841 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 842 tcg_gen_exit_tb(NULL, 0); 843 ctx->base.is_jmp = DISAS_NORETURN; 844 return true; 845} 846 847static bool do_csrr(DisasContext *ctx, int rd, int rc) 848{ 849 TCGv dest = dest_gpr(ctx, rd); 850 TCGv_i32 csr = tcg_constant_i32(rc); 851 852 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 853 gen_io_start(); 854 } 855 gen_helper_csrr(dest, cpu_env, csr); 856 gen_set_gpr(ctx, rd, dest); 857 return do_csr_post(ctx); 858} 859 860static bool do_csrw(DisasContext *ctx, int rc, TCGv src) 861{ 862 TCGv_i32 csr = tcg_constant_i32(rc); 863 864 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 865 gen_io_start(); 866 } 867 gen_helper_csrw(cpu_env, csr, src); 868 return do_csr_post(ctx); 869} 870 871static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask) 872{ 873 TCGv dest = dest_gpr(ctx, rd); 874 TCGv_i32 csr = tcg_constant_i32(rc); 875 876 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 877 gen_io_start(); 878 } 879 gen_helper_csrrw(dest, cpu_env, csr, src, mask); 880 gen_set_gpr(ctx, rd, dest); 881 return do_csr_post(ctx); 882} 883 884static bool do_csrr_i128(DisasContext *ctx, int rd, int rc) 885{ 886 TCGv destl = dest_gpr(ctx, rd); 887 TCGv desth = dest_gprh(ctx, rd); 888 TCGv_i32 csr = tcg_constant_i32(rc); 889 890 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 891 gen_io_start(); 892 } 893 gen_helper_csrr_i128(destl, cpu_env, csr); 894 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh)); 895 gen_set_gpr128(ctx, rd, destl, desth); 896 return do_csr_post(ctx); 897} 898 899static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch) 900{ 901 TCGv_i32 csr = tcg_constant_i32(rc); 902 903 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 904 gen_io_start(); 905 } 906 gen_helper_csrw_i128(cpu_env, csr, srcl, srch); 907 return do_csr_post(ctx); 908} 909 910static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc, 911 TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh) 912{ 913 TCGv destl = dest_gpr(ctx, rd); 914 TCGv desth = dest_gprh(ctx, rd); 915 TCGv_i32 csr = tcg_constant_i32(rc); 916 917 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 918 gen_io_start(); 919 } 920 gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh); 921 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh)); 922 gen_set_gpr128(ctx, rd, destl, desth); 923 return do_csr_post(ctx); 924} 925 926static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) 927{ 928 if (get_xl(ctx) < MXL_RV128) { 929 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE); 930 931 /* 932 * If rd == 0, the insn shall not read the csr, nor cause any of the 933 * side effects that might occur on a csr read. 934 */ 935 if (a->rd == 0) { 936 return do_csrw(ctx, a->csr, src); 937 } 938 939 TCGv mask = tcg_constant_tl(-1); 940 return do_csrrw(ctx, a->rd, a->csr, src, mask); 941 } else { 942 TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE); 943 TCGv srch = get_gprh(ctx, a->rs1); 944 945 /* 946 * If rd == 0, the insn shall not read the csr, nor cause any of the 947 * side effects that might occur on a csr read. 948 */ 949 if (a->rd == 0) { 950 return do_csrw_i128(ctx, a->csr, srcl, srch); 951 } 952 953 TCGv mask = tcg_constant_tl(-1); 954 return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask); 955 } 956} 957 958static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) 959{ 960 /* 961 * If rs1 == 0, the insn shall not write to the csr at all, nor 962 * cause any of the side effects that might occur on a csr write. 963 * Note that if rs1 specifies a register other than x0, holding 964 * a zero value, the instruction will still attempt to write the 965 * unmodified value back to the csr and will cause side effects. 966 */ 967 if (get_xl(ctx) < MXL_RV128) { 968 if (a->rs1 == 0) { 969 return do_csrr(ctx, a->rd, a->csr); 970 } 971 972 TCGv ones = tcg_constant_tl(-1); 973 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 974 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 975 } else { 976 if (a->rs1 == 0) { 977 return do_csrr_i128(ctx, a->rd, a->csr); 978 } 979 980 TCGv ones = tcg_constant_tl(-1); 981 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO); 982 TCGv maskh = get_gprh(ctx, a->rs1); 983 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh); 984 } 985} 986 987static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) 988{ 989 /* 990 * If rs1 == 0, the insn shall not write to the csr at all, nor 991 * cause any of the side effects that might occur on a csr write. 992 * Note that if rs1 specifies a register other than x0, holding 993 * a zero value, the instruction will still attempt to write the 994 * unmodified value back to the csr and will cause side effects. 995 */ 996 if (get_xl(ctx) < MXL_RV128) { 997 if (a->rs1 == 0) { 998 return do_csrr(ctx, a->rd, a->csr); 999 } 1000 1001 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 1002 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 1003 } else { 1004 if (a->rs1 == 0) { 1005 return do_csrr_i128(ctx, a->rd, a->csr); 1006 } 1007 1008 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO); 1009 TCGv maskh = get_gprh(ctx, a->rs1); 1010 return do_csrrw_i128(ctx, a->rd, a->csr, 1011 ctx->zero, ctx->zero, maskl, maskh); 1012 } 1013} 1014 1015static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) 1016{ 1017 if (get_xl(ctx) < MXL_RV128) { 1018 TCGv src = tcg_constant_tl(a->rs1); 1019 1020 /* 1021 * If rd == 0, the insn shall not read the csr, nor cause any of the 1022 * side effects that might occur on a csr read. 1023 */ 1024 if (a->rd == 0) { 1025 return do_csrw(ctx, a->csr, src); 1026 } 1027 1028 TCGv mask = tcg_constant_tl(-1); 1029 return do_csrrw(ctx, a->rd, a->csr, src, mask); 1030 } else { 1031 TCGv src = tcg_constant_tl(a->rs1); 1032 1033 /* 1034 * If rd == 0, the insn shall not read the csr, nor cause any of the 1035 * side effects that might occur on a csr read. 1036 */ 1037 if (a->rd == 0) { 1038 return do_csrw_i128(ctx, a->csr, src, ctx->zero); 1039 } 1040 1041 TCGv mask = tcg_constant_tl(-1); 1042 return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask); 1043 } 1044} 1045 1046static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) 1047{ 1048 /* 1049 * If rs1 == 0, the insn shall not write to the csr at all, nor 1050 * cause any of the side effects that might occur on a csr write. 1051 * Note that if rs1 specifies a register other than x0, holding 1052 * a zero value, the instruction will still attempt to write the 1053 * unmodified value back to the csr and will cause side effects. 1054 */ 1055 if (get_xl(ctx) < MXL_RV128) { 1056 if (a->rs1 == 0) { 1057 return do_csrr(ctx, a->rd, a->csr); 1058 } 1059 1060 TCGv ones = tcg_constant_tl(-1); 1061 TCGv mask = tcg_constant_tl(a->rs1); 1062 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 1063 } else { 1064 if (a->rs1 == 0) { 1065 return do_csrr_i128(ctx, a->rd, a->csr); 1066 } 1067 1068 TCGv ones = tcg_constant_tl(-1); 1069 TCGv mask = tcg_constant_tl(a->rs1); 1070 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero); 1071 } 1072} 1073 1074static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a) 1075{ 1076 /* 1077 * If rs1 == 0, the insn shall not write to the csr at all, nor 1078 * cause any of the side effects that might occur on a csr write. 1079 * Note that if rs1 specifies a register other than x0, holding 1080 * a zero value, the instruction will still attempt to write the 1081 * unmodified value back to the csr and will cause side effects. 1082 */ 1083 if (get_xl(ctx) < MXL_RV128) { 1084 if (a->rs1 == 0) { 1085 return do_csrr(ctx, a->rd, a->csr); 1086 } 1087 1088 TCGv mask = tcg_constant_tl(a->rs1); 1089 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 1090 } else { 1091 if (a->rs1 == 0) { 1092 return do_csrr_i128(ctx, a->rd, a->csr); 1093 } 1094 1095 TCGv mask = tcg_constant_tl(a->rs1); 1096 return do_csrrw_i128(ctx, a->rd, a->csr, 1097 ctx->zero, ctx->zero, mask, ctx->zero); 1098 } 1099} 1100