1/* 2 * RISC-V translation routines for the RVXI Base Integer Instruction Set. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21static bool trans_illegal(DisasContext *ctx, arg_empty *a) 22{ 23 gen_exception_illegal(ctx); 24 return true; 25} 26 27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a) 28{ 29 REQUIRE_64_OR_128BIT(ctx); 30 return trans_illegal(ctx, a); 31} 32 33static bool trans_lui(DisasContext *ctx, arg_lui *a) 34{ 35 if (a->rd != 0) { 36 gen_set_gpri(ctx, a->rd, a->imm); 37 } 38 return true; 39} 40 41static bool trans_auipc(DisasContext *ctx, arg_auipc *a) 42{ 43 if (a->rd != 0) { 44 gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next); 45 } 46 return true; 47} 48 49static bool trans_jal(DisasContext *ctx, arg_jal *a) 50{ 51 gen_jal(ctx, a->rd, a->imm); 52 return true; 53} 54 55static bool trans_jalr(DisasContext *ctx, arg_jalr *a) 56{ 57 TCGLabel *misaligned = NULL; 58 59 tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm); 60 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2); 61 62 if (!has_ext(ctx, RVC)) { 63 TCGv t0 = tcg_temp_new(); 64 65 misaligned = gen_new_label(); 66 tcg_gen_andi_tl(t0, cpu_pc, 0x2); 67 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); 68 tcg_temp_free(t0); 69 } 70 71 if (a->rd != 0) { 72 tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn); 73 } 74 tcg_gen_lookup_and_goto_ptr(); 75 76 if (misaligned) { 77 gen_set_label(misaligned); 78 gen_exception_inst_addr_mis(ctx); 79 } 80 ctx->base.is_jmp = DISAS_NORETURN; 81 82 return true; 83} 84 85static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) 86{ 87 TCGLabel *l = gen_new_label(); 88 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN); 89 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN); 90 91 tcg_gen_brcond_tl(cond, src1, src2, l); 92 gen_goto_tb(ctx, 1, ctx->pc_succ_insn); 93 94 gen_set_label(l); /* branch taken */ 95 96 if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { 97 /* misaligned */ 98 gen_exception_inst_addr_mis(ctx); 99 } else { 100 gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm); 101 } 102 ctx->base.is_jmp = DISAS_NORETURN; 103 104 return true; 105} 106 107static bool trans_beq(DisasContext *ctx, arg_beq *a) 108{ 109 return gen_branch(ctx, a, TCG_COND_EQ); 110} 111 112static bool trans_bne(DisasContext *ctx, arg_bne *a) 113{ 114 return gen_branch(ctx, a, TCG_COND_NE); 115} 116 117static bool trans_blt(DisasContext *ctx, arg_blt *a) 118{ 119 return gen_branch(ctx, a, TCG_COND_LT); 120} 121 122static bool trans_bge(DisasContext *ctx, arg_bge *a) 123{ 124 return gen_branch(ctx, a, TCG_COND_GE); 125} 126 127static bool trans_bltu(DisasContext *ctx, arg_bltu *a) 128{ 129 return gen_branch(ctx, a, TCG_COND_LTU); 130} 131 132static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) 133{ 134 return gen_branch(ctx, a, TCG_COND_GEU); 135} 136 137static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop) 138{ 139 TCGv dest = dest_gpr(ctx, a->rd); 140 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 141 142 if (a->imm) { 143 TCGv temp = temp_new(ctx); 144 tcg_gen_addi_tl(temp, addr, a->imm); 145 addr = temp; 146 } 147 addr = gen_pm_adjust_address(ctx, addr); 148 149 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); 150 gen_set_gpr(ctx, a->rd, dest); 151 return true; 152} 153 154/* Compute only 64-bit addresses to use the address translation mechanism */ 155static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) 156{ 157 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); 158 TCGv destl = dest_gpr(ctx, a->rd); 159 TCGv desth = dest_gprh(ctx, a->rd); 160 TCGv addrl = tcg_temp_new(); 161 162 tcg_gen_addi_tl(addrl, src1l, a->imm); 163 164 if ((memop & MO_SIZE) <= MO_64) { 165 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop); 166 if (memop & MO_SIGN) { 167 tcg_gen_sari_tl(desth, destl, 63); 168 } else { 169 tcg_gen_movi_tl(desth, 0); 170 } 171 } else { 172 /* assume little-endian memory access for now */ 173 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ); 174 tcg_gen_addi_tl(addrl, addrl, 8); 175 tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ); 176 } 177 178 gen_set_gpr128(ctx, a->rd, destl, desth); 179 180 tcg_temp_free(addrl); 181 return true; 182} 183 184static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) 185{ 186 if (get_xl(ctx) == MXL_RV128) { 187 return gen_load_i128(ctx, a, memop); 188 } else { 189 return gen_load_tl(ctx, a, memop); 190 } 191} 192 193static bool trans_lb(DisasContext *ctx, arg_lb *a) 194{ 195 return gen_load(ctx, a, MO_SB); 196} 197 198static bool trans_lh(DisasContext *ctx, arg_lh *a) 199{ 200 return gen_load(ctx, a, MO_TESW); 201} 202 203static bool trans_lw(DisasContext *ctx, arg_lw *a) 204{ 205 return gen_load(ctx, a, MO_TESL); 206} 207 208static bool trans_ld(DisasContext *ctx, arg_ld *a) 209{ 210 REQUIRE_64_OR_128BIT(ctx); 211 return gen_load(ctx, a, MO_TESQ); 212} 213 214static bool trans_lq(DisasContext *ctx, arg_lq *a) 215{ 216 REQUIRE_128BIT(ctx); 217 return gen_load(ctx, a, MO_TEUO); 218} 219 220static bool trans_lbu(DisasContext *ctx, arg_lbu *a) 221{ 222 return gen_load(ctx, a, MO_UB); 223} 224 225static bool trans_lhu(DisasContext *ctx, arg_lhu *a) 226{ 227 return gen_load(ctx, a, MO_TEUW); 228} 229 230static bool trans_lwu(DisasContext *ctx, arg_lwu *a) 231{ 232 REQUIRE_64_OR_128BIT(ctx); 233 return gen_load(ctx, a, MO_TEUL); 234} 235 236static bool trans_ldu(DisasContext *ctx, arg_ldu *a) 237{ 238 REQUIRE_128BIT(ctx); 239 return gen_load(ctx, a, MO_TEUQ); 240} 241 242static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop) 243{ 244 TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); 245 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); 246 247 if (a->imm) { 248 TCGv temp = temp_new(ctx); 249 tcg_gen_addi_tl(temp, addr, a->imm); 250 addr = temp; 251 } 252 addr = gen_pm_adjust_address(ctx, addr); 253 254 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); 255 return true; 256} 257 258static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) 259{ 260 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE); 261 TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE); 262 TCGv src2h = get_gprh(ctx, a->rs2); 263 TCGv addrl = tcg_temp_new(); 264 265 tcg_gen_addi_tl(addrl, src1l, a->imm); 266 267 if ((memop & MO_SIZE) <= MO_64) { 268 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop); 269 } else { 270 /* little-endian memory access assumed for now */ 271 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ); 272 tcg_gen_addi_tl(addrl, addrl, 8); 273 tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ); 274 } 275 276 tcg_temp_free(addrl); 277 return true; 278} 279 280static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) 281{ 282 if (get_xl(ctx) == MXL_RV128) { 283 return gen_store_i128(ctx, a, memop); 284 } else { 285 return gen_store_tl(ctx, a, memop); 286 } 287} 288 289static bool trans_sb(DisasContext *ctx, arg_sb *a) 290{ 291 return gen_store(ctx, a, MO_SB); 292} 293 294static bool trans_sh(DisasContext *ctx, arg_sh *a) 295{ 296 return gen_store(ctx, a, MO_TESW); 297} 298 299static bool trans_sw(DisasContext *ctx, arg_sw *a) 300{ 301 return gen_store(ctx, a, MO_TESL); 302} 303 304static bool trans_sd(DisasContext *ctx, arg_sd *a) 305{ 306 REQUIRE_64_OR_128BIT(ctx); 307 return gen_store(ctx, a, MO_TEUQ); 308} 309 310static bool trans_sq(DisasContext *ctx, arg_sq *a) 311{ 312 REQUIRE_128BIT(ctx); 313 return gen_store(ctx, a, MO_TEUO); 314} 315 316static bool trans_addi(DisasContext *ctx, arg_addi *a) 317{ 318 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl); 319} 320 321static void gen_slt(TCGv ret, TCGv s1, TCGv s2) 322{ 323 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2); 324} 325 326static void gen_sltu(TCGv ret, TCGv s1, TCGv s2) 327{ 328 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2); 329} 330 331static bool trans_slti(DisasContext *ctx, arg_slti *a) 332{ 333 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt); 334} 335 336static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) 337{ 338 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu); 339} 340 341static bool trans_xori(DisasContext *ctx, arg_xori *a) 342{ 343 return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl); 344} 345 346static bool trans_ori(DisasContext *ctx, arg_ori *a) 347{ 348 return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl); 349} 350 351static bool trans_andi(DisasContext *ctx, arg_andi *a) 352{ 353 return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl); 354} 355 356static void gen_slli_i128(TCGv retl, TCGv reth, 357 TCGv src1l, TCGv src1h, 358 target_long shamt) 359{ 360 if (shamt >= 64) { 361 tcg_gen_shli_tl(reth, src1l, shamt - 64); 362 tcg_gen_movi_tl(retl, 0); 363 } else { 364 tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt); 365 tcg_gen_shli_tl(retl, src1l, shamt); 366 } 367} 368 369static bool trans_slli(DisasContext *ctx, arg_slli *a) 370{ 371 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128); 372} 373 374static void gen_srliw(TCGv dst, TCGv src, target_long shamt) 375{ 376 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); 377} 378 379static void gen_srli_i128(TCGv retl, TCGv reth, 380 TCGv src1l, TCGv src1h, 381 target_long shamt) 382{ 383 if (shamt >= 64) { 384 tcg_gen_shri_tl(retl, src1h, shamt - 64); 385 tcg_gen_movi_tl(reth, 0); 386 } else { 387 tcg_gen_extract2_tl(retl, src1l, src1h, shamt); 388 tcg_gen_shri_tl(reth, src1h, shamt); 389 } 390} 391 392static bool trans_srli(DisasContext *ctx, arg_srli *a) 393{ 394 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 395 tcg_gen_shri_tl, gen_srliw, gen_srli_i128); 396} 397 398static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) 399{ 400 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); 401} 402 403static void gen_srai_i128(TCGv retl, TCGv reth, 404 TCGv src1l, TCGv src1h, 405 target_long shamt) 406{ 407 if (shamt >= 64) { 408 tcg_gen_sari_tl(retl, src1h, shamt - 64); 409 tcg_gen_sari_tl(reth, src1h, 63); 410 } else { 411 tcg_gen_extract2_tl(retl, src1l, src1h, shamt); 412 tcg_gen_sari_tl(reth, src1h, shamt); 413 } 414} 415 416static bool trans_srai(DisasContext *ctx, arg_srai *a) 417{ 418 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, 419 tcg_gen_sari_tl, gen_sraiw, gen_srai_i128); 420} 421 422static bool trans_add(DisasContext *ctx, arg_add *a) 423{ 424 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl); 425} 426 427static bool trans_sub(DisasContext *ctx, arg_sub *a) 428{ 429 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl); 430} 431 432static void gen_sll_i128(TCGv destl, TCGv desth, 433 TCGv src1l, TCGv src1h, TCGv shamt) 434{ 435 TCGv ls = tcg_temp_new(); 436 TCGv rs = tcg_temp_new(); 437 TCGv hs = tcg_temp_new(); 438 TCGv ll = tcg_temp_new(); 439 TCGv lr = tcg_temp_new(); 440 TCGv h0 = tcg_temp_new(); 441 TCGv h1 = tcg_temp_new(); 442 TCGv zero = tcg_constant_tl(0); 443 444 tcg_gen_andi_tl(hs, shamt, 64); 445 tcg_gen_andi_tl(ls, shamt, 63); 446 tcg_gen_neg_tl(shamt, shamt); 447 tcg_gen_andi_tl(rs, shamt, 63); 448 449 tcg_gen_shl_tl(ll, src1l, ls); 450 tcg_gen_shl_tl(h0, src1h, ls); 451 tcg_gen_shr_tl(lr, src1l, rs); 452 tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero); 453 tcg_gen_or_tl(h1, h0, lr); 454 455 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll); 456 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1); 457 458 tcg_temp_free(ls); 459 tcg_temp_free(rs); 460 tcg_temp_free(hs); 461 tcg_temp_free(ll); 462 tcg_temp_free(lr); 463 tcg_temp_free(h0); 464 tcg_temp_free(h1); 465} 466 467static bool trans_sll(DisasContext *ctx, arg_sll *a) 468{ 469 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128); 470} 471 472static bool trans_slt(DisasContext *ctx, arg_slt *a) 473{ 474 return gen_arith(ctx, a, EXT_SIGN, gen_slt); 475} 476 477static bool trans_sltu(DisasContext *ctx, arg_sltu *a) 478{ 479 return gen_arith(ctx, a, EXT_SIGN, gen_sltu); 480} 481 482static void gen_srl_i128(TCGv destl, TCGv desth, 483 TCGv src1l, TCGv src1h, TCGv shamt) 484{ 485 TCGv ls = tcg_temp_new(); 486 TCGv rs = tcg_temp_new(); 487 TCGv hs = tcg_temp_new(); 488 TCGv ll = tcg_temp_new(); 489 TCGv lr = tcg_temp_new(); 490 TCGv h0 = tcg_temp_new(); 491 TCGv h1 = tcg_temp_new(); 492 TCGv zero = tcg_constant_tl(0); 493 494 tcg_gen_andi_tl(hs, shamt, 64); 495 tcg_gen_andi_tl(rs, shamt, 63); 496 tcg_gen_neg_tl(shamt, shamt); 497 tcg_gen_andi_tl(ls, shamt, 63); 498 499 tcg_gen_shr_tl(lr, src1l, rs); 500 tcg_gen_shr_tl(h1, src1h, rs); 501 tcg_gen_shl_tl(ll, src1h, ls); 502 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); 503 tcg_gen_or_tl(h0, ll, lr); 504 505 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); 506 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1); 507 508 tcg_temp_free(ls); 509 tcg_temp_free(rs); 510 tcg_temp_free(hs); 511 tcg_temp_free(ll); 512 tcg_temp_free(lr); 513 tcg_temp_free(h0); 514 tcg_temp_free(h1); 515} 516 517static bool trans_srl(DisasContext *ctx, arg_srl *a) 518{ 519 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128); 520} 521 522static void gen_sra_i128(TCGv destl, TCGv desth, 523 TCGv src1l, TCGv src1h, TCGv shamt) 524{ 525 TCGv ls = tcg_temp_new(); 526 TCGv rs = tcg_temp_new(); 527 TCGv hs = tcg_temp_new(); 528 TCGv ll = tcg_temp_new(); 529 TCGv lr = tcg_temp_new(); 530 TCGv h0 = tcg_temp_new(); 531 TCGv h1 = tcg_temp_new(); 532 TCGv zero = tcg_constant_tl(0); 533 534 tcg_gen_andi_tl(hs, shamt, 64); 535 tcg_gen_andi_tl(rs, shamt, 63); 536 tcg_gen_neg_tl(shamt, shamt); 537 tcg_gen_andi_tl(ls, shamt, 63); 538 539 tcg_gen_shr_tl(lr, src1l, rs); 540 tcg_gen_sar_tl(h1, src1h, rs); 541 tcg_gen_shl_tl(ll, src1h, ls); 542 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero); 543 tcg_gen_or_tl(h0, ll, lr); 544 tcg_gen_sari_tl(lr, src1h, 63); 545 546 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); 547 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1); 548 549 tcg_temp_free(ls); 550 tcg_temp_free(rs); 551 tcg_temp_free(hs); 552 tcg_temp_free(ll); 553 tcg_temp_free(lr); 554 tcg_temp_free(h0); 555 tcg_temp_free(h1); 556} 557 558static bool trans_sra(DisasContext *ctx, arg_sra *a) 559{ 560 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128); 561} 562 563static bool trans_xor(DisasContext *ctx, arg_xor *a) 564{ 565 return gen_logic(ctx, a, tcg_gen_xor_tl); 566} 567 568static bool trans_or(DisasContext *ctx, arg_or *a) 569{ 570 return gen_logic(ctx, a, tcg_gen_or_tl); 571} 572 573static bool trans_and(DisasContext *ctx, arg_and *a) 574{ 575 return gen_logic(ctx, a, tcg_gen_and_tl); 576} 577 578static bool trans_addiw(DisasContext *ctx, arg_addiw *a) 579{ 580 REQUIRE_64BIT(ctx); 581 ctx->ol = MXL_RV32; 582 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl); 583} 584 585static bool trans_slliw(DisasContext *ctx, arg_slliw *a) 586{ 587 REQUIRE_64_OR_128BIT(ctx); 588 ctx->ol = MXL_RV32; 589 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); 590} 591 592static bool trans_srliw(DisasContext *ctx, arg_srliw *a) 593{ 594 REQUIRE_64_OR_128BIT(ctx); 595 ctx->ol = MXL_RV32; 596 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL); 597} 598 599static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) 600{ 601 REQUIRE_64_OR_128BIT(ctx); 602 ctx->ol = MXL_RV32; 603 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL); 604} 605 606static bool trans_sllid(DisasContext *ctx, arg_sllid *a) 607{ 608 REQUIRE_128BIT(ctx); 609 ctx->ol = MXL_RV64; 610 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL); 611} 612 613static bool trans_srlid(DisasContext *ctx, arg_srlid *a) 614{ 615 REQUIRE_128BIT(ctx); 616 ctx->ol = MXL_RV64; 617 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL); 618} 619 620static bool trans_sraid(DisasContext *ctx, arg_sraid *a) 621{ 622 REQUIRE_128BIT(ctx); 623 ctx->ol = MXL_RV64; 624 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL); 625} 626 627static bool trans_addw(DisasContext *ctx, arg_addw *a) 628{ 629 REQUIRE_64BIT(ctx); 630 ctx->ol = MXL_RV32; 631 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl); 632} 633 634static bool trans_subw(DisasContext *ctx, arg_subw *a) 635{ 636 REQUIRE_64BIT(ctx); 637 ctx->ol = MXL_RV32; 638 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl); 639} 640 641static bool trans_sllw(DisasContext *ctx, arg_sllw *a) 642{ 643 REQUIRE_64_OR_128BIT(ctx); 644 ctx->ol = MXL_RV32; 645 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); 646} 647 648static bool trans_srlw(DisasContext *ctx, arg_srlw *a) 649{ 650 REQUIRE_64_OR_128BIT(ctx); 651 ctx->ol = MXL_RV32; 652 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); 653} 654 655static bool trans_sraw(DisasContext *ctx, arg_sraw *a) 656{ 657 REQUIRE_64_OR_128BIT(ctx); 658 ctx->ol = MXL_RV32; 659 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); 660} 661 662static bool trans_slld(DisasContext *ctx, arg_slld *a) 663{ 664 REQUIRE_128BIT(ctx); 665 ctx->ol = MXL_RV64; 666 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL); 667} 668 669static bool trans_srld(DisasContext *ctx, arg_srld *a) 670{ 671 REQUIRE_128BIT(ctx); 672 ctx->ol = MXL_RV64; 673 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL); 674} 675 676static bool trans_srad(DisasContext *ctx, arg_srad *a) 677{ 678 REQUIRE_128BIT(ctx); 679 ctx->ol = MXL_RV64; 680 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL); 681} 682 683 684static bool trans_fence(DisasContext *ctx, arg_fence *a) 685{ 686 /* FENCE is a full memory barrier. */ 687 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 688 return true; 689} 690 691static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) 692{ 693 if (!ctx->ext_ifencei) { 694 return false; 695 } 696 697 /* 698 * FENCE_I is a no-op in QEMU, 699 * however we need to end the translation block 700 */ 701 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 702 tcg_gen_exit_tb(NULL, 0); 703 ctx->base.is_jmp = DISAS_NORETURN; 704 return true; 705} 706 707static bool do_csr_post(DisasContext *ctx) 708{ 709 /* We may have changed important cpu state -- exit to main loop. */ 710 tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); 711 tcg_gen_exit_tb(NULL, 0); 712 ctx->base.is_jmp = DISAS_NORETURN; 713 return true; 714} 715 716static bool do_csrr(DisasContext *ctx, int rd, int rc) 717{ 718 TCGv dest = dest_gpr(ctx, rd); 719 TCGv_i32 csr = tcg_constant_i32(rc); 720 721 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 722 gen_io_start(); 723 } 724 gen_helper_csrr(dest, cpu_env, csr); 725 gen_set_gpr(ctx, rd, dest); 726 return do_csr_post(ctx); 727} 728 729static bool do_csrw(DisasContext *ctx, int rc, TCGv src) 730{ 731 TCGv_i32 csr = tcg_constant_i32(rc); 732 733 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 734 gen_io_start(); 735 } 736 gen_helper_csrw(cpu_env, csr, src); 737 return do_csr_post(ctx); 738} 739 740static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask) 741{ 742 TCGv dest = dest_gpr(ctx, rd); 743 TCGv_i32 csr = tcg_constant_i32(rc); 744 745 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 746 gen_io_start(); 747 } 748 gen_helper_csrrw(dest, cpu_env, csr, src, mask); 749 gen_set_gpr(ctx, rd, dest); 750 return do_csr_post(ctx); 751} 752 753static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) 754{ 755 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE); 756 757 /* 758 * If rd == 0, the insn shall not read the csr, nor cause any of the 759 * side effects that might occur on a csr read. 760 */ 761 if (a->rd == 0) { 762 return do_csrw(ctx, a->csr, src); 763 } 764 765 TCGv mask = tcg_constant_tl(-1); 766 return do_csrrw(ctx, a->rd, a->csr, src, mask); 767} 768 769static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) 770{ 771 /* 772 * If rs1 == 0, the insn shall not write to the csr at all, nor 773 * cause any of the side effects that might occur on a csr write. 774 * Note that if rs1 specifies a register other than x0, holding 775 * a zero value, the instruction will still attempt to write the 776 * unmodified value back to the csr and will cause side effects. 777 */ 778 if (a->rs1 == 0) { 779 return do_csrr(ctx, a->rd, a->csr); 780 } 781 782 TCGv ones = tcg_constant_tl(-1); 783 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 784 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 785} 786 787static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) 788{ 789 /* 790 * If rs1 == 0, the insn shall not write to the csr at all, nor 791 * cause any of the side effects that might occur on a csr write. 792 * Note that if rs1 specifies a register other than x0, holding 793 * a zero value, the instruction will still attempt to write the 794 * unmodified value back to the csr and will cause side effects. 795 */ 796 if (a->rs1 == 0) { 797 return do_csrr(ctx, a->rd, a->csr); 798 } 799 800 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO); 801 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 802} 803 804static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) 805{ 806 TCGv src = tcg_constant_tl(a->rs1); 807 808 /* 809 * If rd == 0, the insn shall not read the csr, nor cause any of the 810 * side effects that might occur on a csr read. 811 */ 812 if (a->rd == 0) { 813 return do_csrw(ctx, a->csr, src); 814 } 815 816 TCGv mask = tcg_constant_tl(-1); 817 return do_csrrw(ctx, a->rd, a->csr, src, mask); 818} 819 820static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) 821{ 822 /* 823 * If rs1 == 0, the insn shall not write to the csr at all, nor 824 * cause any of the side effects that might occur on a csr write. 825 * Note that if rs1 specifies a register other than x0, holding 826 * a zero value, the instruction will still attempt to write the 827 * unmodified value back to the csr and will cause side effects. 828 */ 829 if (a->rs1 == 0) { 830 return do_csrr(ctx, a->rd, a->csr); 831 } 832 833 TCGv ones = tcg_constant_tl(-1); 834 TCGv mask = tcg_constant_tl(a->rs1); 835 return do_csrrw(ctx, a->rd, a->csr, ones, mask); 836} 837 838static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a) 839{ 840 /* 841 * If rs1 == 0, the insn shall not write to the csr at all, nor 842 * cause any of the side effects that might occur on a csr write. 843 * Note that if rs1 specifies a register other than x0, holding 844 * a zero value, the instruction will still attempt to write the 845 * unmodified value back to the csr and will cause side effects. 846 */ 847 if (a->rs1 == 0) { 848 return do_csrr(ctx, a->rd, a->csr); 849 } 850 851 TCGv mask = tcg_constant_tl(a->rs1); 852 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask); 853} 854