1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30#include "../tcg-ldst.c.inc" 31#include "../tcg-pool.c.inc" 32 33#ifdef CONFIG_DEBUG_TCG 34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 35 "zero", 36 "ra", 37 "sp", 38 "gp", 39 "tp", 40 "t0", 41 "t1", 42 "t2", 43 "s0", 44 "s1", 45 "a0", 46 "a1", 47 "a2", 48 "a3", 49 "a4", 50 "a5", 51 "a6", 52 "a7", 53 "s2", 54 "s3", 55 "s4", 56 "s5", 57 "s6", 58 "s7", 59 "s8", 60 "s9", 61 "s10", 62 "s11", 63 "t3", 64 "t4", 65 "t5", 66 "t6" 67}; 68#endif 69 70static const int tcg_target_reg_alloc_order[] = { 71 /* Call saved registers */ 72 /* TCG_REG_S0 reservered for TCG_AREG0 */ 73 TCG_REG_S1, 74 TCG_REG_S2, 75 TCG_REG_S3, 76 TCG_REG_S4, 77 TCG_REG_S5, 78 TCG_REG_S6, 79 TCG_REG_S7, 80 TCG_REG_S8, 81 TCG_REG_S9, 82 TCG_REG_S10, 83 TCG_REG_S11, 84 85 /* Call clobbered registers */ 86 TCG_REG_T0, 87 TCG_REG_T1, 88 TCG_REG_T2, 89 TCG_REG_T3, 90 TCG_REG_T4, 91 TCG_REG_T5, 92 TCG_REG_T6, 93 94 /* Argument registers */ 95 TCG_REG_A0, 96 TCG_REG_A1, 97 TCG_REG_A2, 98 TCG_REG_A3, 99 TCG_REG_A4, 100 TCG_REG_A5, 101 TCG_REG_A6, 102 TCG_REG_A7, 103}; 104 105static const int tcg_target_call_iarg_regs[] = { 106 TCG_REG_A0, 107 TCG_REG_A1, 108 TCG_REG_A2, 109 TCG_REG_A3, 110 TCG_REG_A4, 111 TCG_REG_A5, 112 TCG_REG_A6, 113 TCG_REG_A7, 114}; 115 116static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 117{ 118 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 119 tcg_debug_assert(slot >= 0 && slot <= 1); 120 return TCG_REG_A0 + slot; 121} 122 123#define TCG_CT_CONST_ZERO 0x100 124#define TCG_CT_CONST_S12 0x200 125#define TCG_CT_CONST_N12 0x400 126#define TCG_CT_CONST_M12 0x800 127 128#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 129/* 130 * For softmmu, we need to avoid conflicts with the first 5 131 * argument registers to call the helper. Some of these are 132 * also used for the tlb lookup. 133 */ 134#ifdef CONFIG_SOFTMMU 135#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) 136#else 137#define SOFTMMU_RESERVE_REGS 0 138#endif 139 140#define sextreg sextract64 141 142/* test if a constant matches the constraint */ 143static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 144{ 145 if (ct & TCG_CT_CONST) { 146 return 1; 147 } 148 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 149 return 1; 150 } 151 /* 152 * Sign extended from 12 bits: [-0x800, 0x7ff]. 153 * Used for most arithmetic, as this is the isa field. 154 */ 155 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { 156 return 1; 157 } 158 /* 159 * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. 160 * Used for subtraction, where a constant must be handled by ADDI. 161 */ 162 if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { 163 return 1; 164 } 165 /* 166 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. 167 * Used by addsub2, which may need the negative operation, 168 * and requires the modified constant to be representable. 169 */ 170 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { 171 return 1; 172 } 173 return 0; 174} 175 176/* 177 * RISC-V Base ISA opcodes (IM) 178 */ 179 180typedef enum { 181 OPC_ADD = 0x33, 182 OPC_ADDI = 0x13, 183 OPC_AND = 0x7033, 184 OPC_ANDI = 0x7013, 185 OPC_AUIPC = 0x17, 186 OPC_BEQ = 0x63, 187 OPC_BGE = 0x5063, 188 OPC_BGEU = 0x7063, 189 OPC_BLT = 0x4063, 190 OPC_BLTU = 0x6063, 191 OPC_BNE = 0x1063, 192 OPC_DIV = 0x2004033, 193 OPC_DIVU = 0x2005033, 194 OPC_JAL = 0x6f, 195 OPC_JALR = 0x67, 196 OPC_LB = 0x3, 197 OPC_LBU = 0x4003, 198 OPC_LD = 0x3003, 199 OPC_LH = 0x1003, 200 OPC_LHU = 0x5003, 201 OPC_LUI = 0x37, 202 OPC_LW = 0x2003, 203 OPC_LWU = 0x6003, 204 OPC_MUL = 0x2000033, 205 OPC_MULH = 0x2001033, 206 OPC_MULHSU = 0x2002033, 207 OPC_MULHU = 0x2003033, 208 OPC_OR = 0x6033, 209 OPC_ORI = 0x6013, 210 OPC_REM = 0x2006033, 211 OPC_REMU = 0x2007033, 212 OPC_SB = 0x23, 213 OPC_SD = 0x3023, 214 OPC_SH = 0x1023, 215 OPC_SLL = 0x1033, 216 OPC_SLLI = 0x1013, 217 OPC_SLT = 0x2033, 218 OPC_SLTI = 0x2013, 219 OPC_SLTIU = 0x3013, 220 OPC_SLTU = 0x3033, 221 OPC_SRA = 0x40005033, 222 OPC_SRAI = 0x40005013, 223 OPC_SRL = 0x5033, 224 OPC_SRLI = 0x5013, 225 OPC_SUB = 0x40000033, 226 OPC_SW = 0x2023, 227 OPC_XOR = 0x4033, 228 OPC_XORI = 0x4013, 229 230 OPC_ADDIW = 0x1b, 231 OPC_ADDW = 0x3b, 232 OPC_DIVUW = 0x200503b, 233 OPC_DIVW = 0x200403b, 234 OPC_MULW = 0x200003b, 235 OPC_REMUW = 0x200703b, 236 OPC_REMW = 0x200603b, 237 OPC_SLLIW = 0x101b, 238 OPC_SLLW = 0x103b, 239 OPC_SRAIW = 0x4000501b, 240 OPC_SRAW = 0x4000503b, 241 OPC_SRLIW = 0x501b, 242 OPC_SRLW = 0x503b, 243 OPC_SUBW = 0x4000003b, 244 245 OPC_FENCE = 0x0000000f, 246 OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ 247} RISCVInsn; 248 249/* 250 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 251 */ 252 253/* Type-R */ 254 255static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 256{ 257 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 258} 259 260/* Type-I */ 261 262static int32_t encode_imm12(uint32_t imm) 263{ 264 return (imm & 0xfff) << 20; 265} 266 267static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 268{ 269 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 270} 271 272/* Type-S */ 273 274static int32_t encode_simm12(uint32_t imm) 275{ 276 int32_t ret = 0; 277 278 ret |= (imm & 0xFE0) << 20; 279 ret |= (imm & 0x1F) << 7; 280 281 return ret; 282} 283 284static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 285{ 286 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 287} 288 289/* Type-SB */ 290 291static int32_t encode_sbimm12(uint32_t imm) 292{ 293 int32_t ret = 0; 294 295 ret |= (imm & 0x1000) << 19; 296 ret |= (imm & 0x7e0) << 20; 297 ret |= (imm & 0x1e) << 7; 298 ret |= (imm & 0x800) >> 4; 299 300 return ret; 301} 302 303static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 304{ 305 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 306} 307 308/* Type-U */ 309 310static int32_t encode_uimm20(uint32_t imm) 311{ 312 return imm & 0xfffff000; 313} 314 315static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 316{ 317 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 318} 319 320/* Type-UJ */ 321 322static int32_t encode_ujimm20(uint32_t imm) 323{ 324 int32_t ret = 0; 325 326 ret |= (imm & 0x0007fe) << (21 - 1); 327 ret |= (imm & 0x000800) << (20 - 11); 328 ret |= (imm & 0x0ff000) << (12 - 12); 329 ret |= (imm & 0x100000) << (31 - 20); 330 331 return ret; 332} 333 334static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 335{ 336 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 337} 338 339/* 340 * RISC-V instruction emitters 341 */ 342 343static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 344 TCGReg rd, TCGReg rs1, TCGReg rs2) 345{ 346 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 347} 348 349static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 350 TCGReg rd, TCGReg rs1, TCGArg imm) 351{ 352 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 353} 354 355static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 356 TCGReg rs1, TCGReg rs2, uint32_t imm) 357{ 358 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 359} 360 361static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 362 TCGReg rs1, TCGReg rs2, uint32_t imm) 363{ 364 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 365} 366 367static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 368 TCGReg rd, uint32_t imm) 369{ 370 tcg_out32(s, encode_u(opc, rd, imm)); 371} 372 373static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 374 TCGReg rd, uint32_t imm) 375{ 376 tcg_out32(s, encode_uj(opc, rd, imm)); 377} 378 379static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 380{ 381 int i; 382 for (i = 0; i < count; ++i) { 383 p[i] = OPC_NOP; 384 } 385} 386 387/* 388 * Relocations 389 */ 390 391static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 392{ 393 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 394 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 395 396 tcg_debug_assert((offset & 1) == 0); 397 if (offset == sextreg(offset, 0, 12)) { 398 *src_rw |= encode_sbimm12(offset); 399 return true; 400 } 401 402 return false; 403} 404 405static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 406{ 407 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 408 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 409 410 tcg_debug_assert((offset & 1) == 0); 411 if (offset == sextreg(offset, 0, 20)) { 412 *src_rw |= encode_ujimm20(offset); 413 return true; 414 } 415 416 return false; 417} 418 419static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 420{ 421 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 422 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 423 int32_t lo = sextreg(offset, 0, 12); 424 int32_t hi = offset - lo; 425 426 if (offset == hi + lo) { 427 src_rw[0] |= encode_uimm20(hi); 428 src_rw[1] |= encode_imm12(lo); 429 return true; 430 } 431 432 return false; 433} 434 435static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 436 intptr_t value, intptr_t addend) 437{ 438 tcg_debug_assert(addend == 0); 439 switch (type) { 440 case R_RISCV_BRANCH: 441 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 442 case R_RISCV_JAL: 443 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 444 case R_RISCV_CALL: 445 return reloc_call(code_ptr, (tcg_insn_unit *)value); 446 default: 447 g_assert_not_reached(); 448 } 449} 450 451/* 452 * TCG intrinsics 453 */ 454 455static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 456{ 457 if (ret == arg) { 458 return true; 459 } 460 switch (type) { 461 case TCG_TYPE_I32: 462 case TCG_TYPE_I64: 463 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 464 break; 465 default: 466 g_assert_not_reached(); 467 } 468 return true; 469} 470 471static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 472 tcg_target_long val) 473{ 474 tcg_target_long lo, hi, tmp; 475 int shift, ret; 476 477 if (type == TCG_TYPE_I32) { 478 val = (int32_t)val; 479 } 480 481 lo = sextreg(val, 0, 12); 482 if (val == lo) { 483 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 484 return; 485 } 486 487 hi = val - lo; 488 if (val == (int32_t)val) { 489 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 490 if (lo != 0) { 491 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 492 } 493 return; 494 } 495 496 tmp = tcg_pcrel_diff(s, (void *)val); 497 if (tmp == (int32_t)tmp) { 498 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 499 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 500 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 501 tcg_debug_assert(ret == true); 502 return; 503 } 504 505 /* Look for a single 20-bit section. */ 506 shift = ctz64(val); 507 tmp = val >> shift; 508 if (tmp == sextreg(tmp, 0, 20)) { 509 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 510 if (shift > 12) { 511 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 512 } else { 513 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 514 } 515 return; 516 } 517 518 /* Look for a few high zero bits, with lots of bits set in the middle. */ 519 shift = clz64(val); 520 tmp = val << shift; 521 if (tmp == sextreg(tmp, 12, 20) << 12) { 522 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 523 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 524 return; 525 } else if (tmp == sextreg(tmp, 0, 12)) { 526 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 527 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 528 return; 529 } 530 531 /* Drop into the constant pool. */ 532 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 533 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 534 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 535} 536 537static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 538{ 539 return false; 540} 541 542static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 543 tcg_target_long imm) 544{ 545 /* This function is only used for passing structs by reference. */ 546 g_assert_not_reached(); 547} 548 549static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 550{ 551 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 552} 553 554static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 555{ 556 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 557 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 558} 559 560static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 561{ 562 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 563 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 564} 565 566static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 567{ 568 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 569 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 570} 571 572static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 573{ 574 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 575 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 576} 577 578static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 579{ 580 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 581} 582 583static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 584{ 585 if (ret != arg) { 586 tcg_out_ext32s(s, ret, arg); 587 } 588} 589 590static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 591{ 592 tcg_out_ext32u(s, ret, arg); 593} 594 595static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 596{ 597 tcg_out_ext32s(s, ret, arg); 598} 599 600static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 601 TCGReg addr, intptr_t offset) 602{ 603 intptr_t imm12 = sextreg(offset, 0, 12); 604 605 if (offset != imm12) { 606 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 607 608 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 609 imm12 = sextreg(diff, 0, 12); 610 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 611 } else { 612 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 613 if (addr != TCG_REG_ZERO) { 614 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 615 } 616 } 617 addr = TCG_REG_TMP2; 618 } 619 620 switch (opc) { 621 case OPC_SB: 622 case OPC_SH: 623 case OPC_SW: 624 case OPC_SD: 625 tcg_out_opc_store(s, opc, addr, data, imm12); 626 break; 627 case OPC_LB: 628 case OPC_LBU: 629 case OPC_LH: 630 case OPC_LHU: 631 case OPC_LW: 632 case OPC_LWU: 633 case OPC_LD: 634 tcg_out_opc_imm(s, opc, data, addr, imm12); 635 break; 636 default: 637 g_assert_not_reached(); 638 } 639} 640 641static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 642 TCGReg arg1, intptr_t arg2) 643{ 644 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; 645 tcg_out_ldst(s, insn, arg, arg1, arg2); 646} 647 648static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 649 TCGReg arg1, intptr_t arg2) 650{ 651 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; 652 tcg_out_ldst(s, insn, arg, arg1, arg2); 653} 654 655static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 656 TCGReg base, intptr_t ofs) 657{ 658 if (val == 0) { 659 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 660 return true; 661 } 662 return false; 663} 664 665static void tcg_out_addsub2(TCGContext *s, 666 TCGReg rl, TCGReg rh, 667 TCGReg al, TCGReg ah, 668 TCGArg bl, TCGArg bh, 669 bool cbl, bool cbh, bool is_sub, bool is32bit) 670{ 671 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; 672 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; 673 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; 674 TCGReg th = TCG_REG_TMP1; 675 676 /* If we have a negative constant such that negating it would 677 make the high part zero, we can (usually) eliminate one insn. */ 678 if (cbl && cbh && bh == -1 && bl != 0) { 679 bl = -bl; 680 bh = 0; 681 is_sub = !is_sub; 682 } 683 684 /* By operating on the high part first, we get to use the final 685 carry operation to move back from the temporary. */ 686 if (!cbh) { 687 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); 688 } else if (bh != 0 || ah == rl) { 689 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); 690 } else { 691 th = ah; 692 } 693 694 /* Note that tcg optimization should eliminate the bl == 0 case. */ 695 if (is_sub) { 696 if (cbl) { 697 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); 698 tcg_out_opc_imm(s, opc_addi, rl, al, -bl); 699 } else { 700 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); 701 tcg_out_opc_reg(s, opc_sub, rl, al, bl); 702 } 703 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); 704 } else { 705 if (cbl) { 706 tcg_out_opc_imm(s, opc_addi, rl, al, bl); 707 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); 708 } else if (al == bl) { 709 /* 710 * If the input regs overlap, this is a simple doubling 711 * and carry-out is the input msb. This special case is 712 * required when the output reg overlaps the input, 713 * but we might as well use it always. 714 */ 715 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); 716 tcg_out_opc_reg(s, opc_add, rl, al, al); 717 } else { 718 tcg_out_opc_reg(s, opc_add, rl, al, bl); 719 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, 720 rl, (rl == bl ? al : bl)); 721 } 722 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); 723 } 724} 725 726static const struct { 727 RISCVInsn op; 728 bool swap; 729} tcg_brcond_to_riscv[] = { 730 [TCG_COND_EQ] = { OPC_BEQ, false }, 731 [TCG_COND_NE] = { OPC_BNE, false }, 732 [TCG_COND_LT] = { OPC_BLT, false }, 733 [TCG_COND_GE] = { OPC_BGE, false }, 734 [TCG_COND_LE] = { OPC_BGE, true }, 735 [TCG_COND_GT] = { OPC_BLT, true }, 736 [TCG_COND_LTU] = { OPC_BLTU, false }, 737 [TCG_COND_GEU] = { OPC_BGEU, false }, 738 [TCG_COND_LEU] = { OPC_BGEU, true }, 739 [TCG_COND_GTU] = { OPC_BLTU, true } 740}; 741 742static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 743 TCGReg arg2, TCGLabel *l) 744{ 745 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 746 747 tcg_debug_assert(op != 0); 748 749 if (tcg_brcond_to_riscv[cond].swap) { 750 TCGReg t = arg1; 751 arg1 = arg2; 752 arg2 = t; 753 } 754 755 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 756 tcg_out_opc_branch(s, op, arg1, arg2, 0); 757} 758 759static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 760 TCGReg arg1, TCGReg arg2) 761{ 762 switch (cond) { 763 case TCG_COND_EQ: 764 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 765 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); 766 break; 767 case TCG_COND_NE: 768 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 769 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); 770 break; 771 case TCG_COND_LT: 772 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 773 break; 774 case TCG_COND_GE: 775 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 776 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 777 break; 778 case TCG_COND_LE: 779 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 780 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 781 break; 782 case TCG_COND_GT: 783 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 784 break; 785 case TCG_COND_LTU: 786 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 787 break; 788 case TCG_COND_GEU: 789 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 790 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 791 break; 792 case TCG_COND_LEU: 793 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 794 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 795 break; 796 case TCG_COND_GTU: 797 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 798 break; 799 default: 800 g_assert_not_reached(); 801 break; 802 } 803} 804 805static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 806{ 807 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 808 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 809 int ret; 810 811 tcg_debug_assert((offset & 1) == 0); 812 if (offset == sextreg(offset, 0, 20)) { 813 /* short jump: -2097150 to 2097152 */ 814 tcg_out_opc_jump(s, OPC_JAL, link, offset); 815 } else if (offset == (int32_t)offset) { 816 /* long jump: -2147483646 to 2147483648 */ 817 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 818 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 819 ret = reloc_call(s->code_ptr - 2, arg); 820 tcg_debug_assert(ret == true); 821 } else { 822 /* far jump: 64-bit */ 823 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 824 tcg_target_long base = (tcg_target_long)arg - imm; 825 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 826 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 827 } 828} 829 830static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 831 const TCGHelperInfo *info) 832{ 833 tcg_out_call_int(s, arg, false); 834} 835 836static void tcg_out_mb(TCGContext *s, TCGArg a0) 837{ 838 tcg_insn_unit insn = OPC_FENCE; 839 840 if (a0 & TCG_MO_LD_LD) { 841 insn |= 0x02200000; 842 } 843 if (a0 & TCG_MO_ST_LD) { 844 insn |= 0x01200000; 845 } 846 if (a0 & TCG_MO_LD_ST) { 847 insn |= 0x02100000; 848 } 849 if (a0 & TCG_MO_ST_ST) { 850 insn |= 0x02200000; 851 } 852 tcg_out32(s, insn); 853} 854 855/* 856 * Load/store and TLB 857 */ 858 859#if defined(CONFIG_SOFTMMU) 860/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 861 * MemOpIdx oi, uintptr_t ra) 862 */ 863static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 864 [MO_UB] = helper_ret_ldub_mmu, 865 [MO_SB] = helper_ret_ldsb_mmu, 866#if HOST_BIG_ENDIAN 867 [MO_UW] = helper_be_lduw_mmu, 868 [MO_SW] = helper_be_ldsw_mmu, 869 [MO_UL] = helper_be_ldul_mmu, 870#if TCG_TARGET_REG_BITS == 64 871 [MO_SL] = helper_be_ldsl_mmu, 872#endif 873 [MO_UQ] = helper_be_ldq_mmu, 874#else 875 [MO_UW] = helper_le_lduw_mmu, 876 [MO_SW] = helper_le_ldsw_mmu, 877 [MO_UL] = helper_le_ldul_mmu, 878#if TCG_TARGET_REG_BITS == 64 879 [MO_SL] = helper_le_ldsl_mmu, 880#endif 881 [MO_UQ] = helper_le_ldq_mmu, 882#endif 883}; 884 885/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 886 * uintxx_t val, MemOpIdx oi, 887 * uintptr_t ra) 888 */ 889static void * const qemu_st_helpers[MO_SIZE + 1] = { 890 [MO_8] = helper_ret_stb_mmu, 891#if HOST_BIG_ENDIAN 892 [MO_16] = helper_be_stw_mmu, 893 [MO_32] = helper_be_stl_mmu, 894 [MO_64] = helper_be_stq_mmu, 895#else 896 [MO_16] = helper_le_stw_mmu, 897 [MO_32] = helper_le_stl_mmu, 898 [MO_64] = helper_le_stq_mmu, 899#endif 900}; 901 902/* We expect to use a 12-bit negative offset from ENV. */ 903QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 904QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 905 906static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 907{ 908 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 909 bool ok = reloc_jimm20(s->code_ptr - 1, target); 910 tcg_debug_assert(ok); 911} 912 913static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi, 914 tcg_insn_unit **label_ptr, bool is_load) 915{ 916 MemOp opc = get_memop(oi); 917 unsigned s_bits = opc & MO_SIZE; 918 unsigned a_bits = get_alignment_bits(opc); 919 tcg_target_long compare_mask; 920 int mem_index = get_mmuidx(oi); 921 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 922 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 923 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 924 TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 925 926 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); 927 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); 928 929 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr, 930 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 931 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 932 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 933 934 /* Load the tlb comparator and the addend. */ 935 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 936 is_load ? offsetof(CPUTLBEntry, addr_read) 937 : offsetof(CPUTLBEntry, addr_write)); 938 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 939 offsetof(CPUTLBEntry, addend)); 940 941 /* We don't support unaligned accesses. */ 942 if (a_bits < s_bits) { 943 a_bits = s_bits; 944 } 945 /* Clear the non-page, non-alignment bits from the address. */ 946 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 947 if (compare_mask == sextreg(compare_mask, 0, 12)) { 948 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask); 949 } else { 950 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 951 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr); 952 } 953 954 /* Compare masked address with the TLB entry. */ 955 label_ptr[0] = s->code_ptr; 956 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 957 958 /* TLB Hit - translate address using addend. */ 959 if (TARGET_LONG_BITS == 32) { 960 tcg_out_ext32u(s, TCG_REG_TMP0, addr); 961 addr = TCG_REG_TMP0; 962 } 963 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr); 964 return TCG_REG_TMP0; 965} 966 967static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, 968 TCGType data_type, TCGReg data_reg, 969 TCGReg addr_reg, void *raddr, 970 tcg_insn_unit **label_ptr) 971{ 972 TCGLabelQemuLdst *label = new_ldst_label(s); 973 974 label->is_ld = is_ld; 975 label->oi = oi; 976 label->type = data_type; 977 label->datalo_reg = data_reg; 978 label->addrlo_reg = addr_reg; 979 label->raddr = tcg_splitwx_to_rx(raddr); 980 label->label_ptr[0] = label_ptr[0]; 981} 982 983static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 984{ 985 MemOpIdx oi = l->oi; 986 MemOp opc = get_memop(oi); 987 TCGReg a0 = tcg_target_call_iarg_regs[0]; 988 TCGReg a1 = tcg_target_call_iarg_regs[1]; 989 TCGReg a2 = tcg_target_call_iarg_regs[2]; 990 TCGReg a3 = tcg_target_call_iarg_regs[3]; 991 992 /* resolve label address */ 993 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 994 return false; 995 } 996 997 /* call load helper */ 998 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 999 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1000 tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); 1001 tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); 1002 1003 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false); 1004 tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); 1005 1006 tcg_out_goto(s, l->raddr); 1007 return true; 1008} 1009 1010static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1011{ 1012 MemOpIdx oi = l->oi; 1013 MemOp opc = get_memop(oi); 1014 MemOp s_bits = opc & MO_SIZE; 1015 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1016 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1017 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1018 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1019 TCGReg a4 = tcg_target_call_iarg_regs[4]; 1020 1021 /* resolve label address */ 1022 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1023 return false; 1024 } 1025 1026 /* call store helper */ 1027 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1028 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1029 tcg_out_movext(s, s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, a2, 1030 l->type, s_bits, l->datalo_reg); 1031 tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); 1032 tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); 1033 1034 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 1035 1036 tcg_out_goto(s, l->raddr); 1037 return true; 1038} 1039#else 1040 1041static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, 1042 unsigned a_bits) 1043{ 1044 unsigned a_mask = (1 << a_bits) - 1; 1045 TCGLabelQemuLdst *l = new_ldst_label(s); 1046 1047 l->is_ld = is_ld; 1048 l->addrlo_reg = addr_reg; 1049 1050 /* We are expecting a_bits to max out at 7, so we can always use andi. */ 1051 tcg_debug_assert(a_bits < 12); 1052 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); 1053 1054 l->label_ptr[0] = s->code_ptr; 1055 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1056 1057 l->raddr = tcg_splitwx_to_rx(s->code_ptr); 1058} 1059 1060static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 1061{ 1062 /* resolve label address */ 1063 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1064 return false; 1065 } 1066 1067 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); 1068 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 1069 1070 /* tail call, with the return address back inline. */ 1071 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); 1072 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld 1073 : helper_unaligned_st), true); 1074 return true; 1075} 1076 1077static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1078{ 1079 return tcg_out_fail_alignment(s, l); 1080} 1081 1082static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1083{ 1084 return tcg_out_fail_alignment(s, l); 1085} 1086 1087#endif /* CONFIG_SOFTMMU */ 1088 1089static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, 1090 TCGReg base, MemOp opc, TCGType type) 1091{ 1092 /* Byte swapping is left to middle-end expansion. */ 1093 tcg_debug_assert((opc & MO_BSWAP) == 0); 1094 1095 switch (opc & (MO_SSIZE)) { 1096 case MO_UB: 1097 tcg_out_opc_imm(s, OPC_LBU, val, base, 0); 1098 break; 1099 case MO_SB: 1100 tcg_out_opc_imm(s, OPC_LB, val, base, 0); 1101 break; 1102 case MO_UW: 1103 tcg_out_opc_imm(s, OPC_LHU, val, base, 0); 1104 break; 1105 case MO_SW: 1106 tcg_out_opc_imm(s, OPC_LH, val, base, 0); 1107 break; 1108 case MO_UL: 1109 if (type == TCG_TYPE_I64) { 1110 tcg_out_opc_imm(s, OPC_LWU, val, base, 0); 1111 break; 1112 } 1113 /* FALLTHRU */ 1114 case MO_SL: 1115 tcg_out_opc_imm(s, OPC_LW, val, base, 0); 1116 break; 1117 case MO_UQ: 1118 tcg_out_opc_imm(s, OPC_LD, val, base, 0); 1119 break; 1120 default: 1121 g_assert_not_reached(); 1122 } 1123} 1124 1125static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1126 MemOpIdx oi, TCGType data_type) 1127{ 1128 MemOp opc = get_memop(oi); 1129 TCGReg base; 1130 1131#if defined(CONFIG_SOFTMMU) 1132 tcg_insn_unit *label_ptr[1]; 1133 1134 base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); 1135 tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type); 1136 add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg, 1137 s->code_ptr, label_ptr); 1138#else 1139 unsigned a_bits = get_alignment_bits(opc); 1140 if (a_bits) { 1141 tcg_out_test_alignment(s, true, addr_reg, a_bits); 1142 } 1143 base = addr_reg; 1144 if (TARGET_LONG_BITS == 32) { 1145 tcg_out_ext32u(s, TCG_REG_TMP0, base); 1146 base = TCG_REG_TMP0; 1147 } 1148 if (guest_base != 0) { 1149 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); 1150 base = TCG_REG_TMP0; 1151 } 1152 tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type); 1153#endif 1154} 1155 1156static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, 1157 TCGReg base, MemOp opc) 1158{ 1159 /* Byte swapping is left to middle-end expansion. */ 1160 tcg_debug_assert((opc & MO_BSWAP) == 0); 1161 1162 switch (opc & (MO_SSIZE)) { 1163 case MO_8: 1164 tcg_out_opc_store(s, OPC_SB, base, val, 0); 1165 break; 1166 case MO_16: 1167 tcg_out_opc_store(s, OPC_SH, base, val, 0); 1168 break; 1169 case MO_32: 1170 tcg_out_opc_store(s, OPC_SW, base, val, 0); 1171 break; 1172 case MO_64: 1173 tcg_out_opc_store(s, OPC_SD, base, val, 0); 1174 break; 1175 default: 1176 g_assert_not_reached(); 1177 } 1178} 1179 1180static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1181 MemOpIdx oi, TCGType data_type) 1182{ 1183 MemOp opc = get_memop(oi); 1184 TCGReg base; 1185 1186#if defined(CONFIG_SOFTMMU) 1187 tcg_insn_unit *label_ptr[1]; 1188 1189 base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); 1190 tcg_out_qemu_st_direct(s, data_reg, base, opc); 1191 add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg, 1192 s->code_ptr, label_ptr); 1193#else 1194 unsigned a_bits = get_alignment_bits(opc); 1195 if (a_bits) { 1196 tcg_out_test_alignment(s, false, addr_reg, a_bits); 1197 } 1198 base = addr_reg; 1199 if (TARGET_LONG_BITS == 32) { 1200 tcg_out_ext32u(s, TCG_REG_TMP0, base); 1201 base = TCG_REG_TMP0; 1202 } 1203 if (guest_base != 0) { 1204 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); 1205 base = TCG_REG_TMP0; 1206 } 1207 tcg_out_qemu_st_direct(s, data_reg, base, opc); 1208#endif 1209} 1210 1211static const tcg_insn_unit *tb_ret_addr; 1212 1213static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1214{ 1215 /* Reuse the zeroing that exists for goto_ptr. */ 1216 if (a0 == 0) { 1217 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1218 } else { 1219 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1220 tcg_out_call_int(s, tb_ret_addr, true); 1221 } 1222} 1223 1224static void tcg_out_goto_tb(TCGContext *s, int which) 1225{ 1226 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1227 set_jmp_insn_offset(s, which); 1228 tcg_out32(s, OPC_JAL); 1229 1230 /* When branch is out of range, fall through to indirect. */ 1231 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1232 get_jmp_target_addr(s, which)); 1233 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1234 set_jmp_reset_offset(s, which); 1235} 1236 1237void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1238 uintptr_t jmp_rx, uintptr_t jmp_rw) 1239{ 1240 uintptr_t addr = tb->jmp_target_addr[n]; 1241 ptrdiff_t offset = addr - jmp_rx; 1242 tcg_insn_unit insn; 1243 1244 /* Either directly branch, or fall through to indirect branch. */ 1245 if (offset == sextreg(offset, 0, 20)) { 1246 insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset); 1247 } else { 1248 insn = OPC_NOP; 1249 } 1250 qatomic_set((uint32_t *)jmp_rw, insn); 1251 flush_idcache_range(jmp_rx, jmp_rw, 4); 1252} 1253 1254static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1255 const TCGArg args[TCG_MAX_OP_ARGS], 1256 const int const_args[TCG_MAX_OP_ARGS]) 1257{ 1258 TCGArg a0 = args[0]; 1259 TCGArg a1 = args[1]; 1260 TCGArg a2 = args[2]; 1261 int c2 = const_args[2]; 1262 1263 switch (opc) { 1264 case INDEX_op_goto_ptr: 1265 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1266 break; 1267 1268 case INDEX_op_br: 1269 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); 1270 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1271 break; 1272 1273 case INDEX_op_ld8u_i32: 1274 case INDEX_op_ld8u_i64: 1275 tcg_out_ldst(s, OPC_LBU, a0, a1, a2); 1276 break; 1277 case INDEX_op_ld8s_i32: 1278 case INDEX_op_ld8s_i64: 1279 tcg_out_ldst(s, OPC_LB, a0, a1, a2); 1280 break; 1281 case INDEX_op_ld16u_i32: 1282 case INDEX_op_ld16u_i64: 1283 tcg_out_ldst(s, OPC_LHU, a0, a1, a2); 1284 break; 1285 case INDEX_op_ld16s_i32: 1286 case INDEX_op_ld16s_i64: 1287 tcg_out_ldst(s, OPC_LH, a0, a1, a2); 1288 break; 1289 case INDEX_op_ld32u_i64: 1290 tcg_out_ldst(s, OPC_LWU, a0, a1, a2); 1291 break; 1292 case INDEX_op_ld_i32: 1293 case INDEX_op_ld32s_i64: 1294 tcg_out_ldst(s, OPC_LW, a0, a1, a2); 1295 break; 1296 case INDEX_op_ld_i64: 1297 tcg_out_ldst(s, OPC_LD, a0, a1, a2); 1298 break; 1299 1300 case INDEX_op_st8_i32: 1301 case INDEX_op_st8_i64: 1302 tcg_out_ldst(s, OPC_SB, a0, a1, a2); 1303 break; 1304 case INDEX_op_st16_i32: 1305 case INDEX_op_st16_i64: 1306 tcg_out_ldst(s, OPC_SH, a0, a1, a2); 1307 break; 1308 case INDEX_op_st_i32: 1309 case INDEX_op_st32_i64: 1310 tcg_out_ldst(s, OPC_SW, a0, a1, a2); 1311 break; 1312 case INDEX_op_st_i64: 1313 tcg_out_ldst(s, OPC_SD, a0, a1, a2); 1314 break; 1315 1316 case INDEX_op_add_i32: 1317 if (c2) { 1318 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); 1319 } else { 1320 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); 1321 } 1322 break; 1323 case INDEX_op_add_i64: 1324 if (c2) { 1325 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); 1326 } else { 1327 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); 1328 } 1329 break; 1330 1331 case INDEX_op_sub_i32: 1332 if (c2) { 1333 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); 1334 } else { 1335 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); 1336 } 1337 break; 1338 case INDEX_op_sub_i64: 1339 if (c2) { 1340 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); 1341 } else { 1342 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); 1343 } 1344 break; 1345 1346 case INDEX_op_and_i32: 1347 case INDEX_op_and_i64: 1348 if (c2) { 1349 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1350 } else { 1351 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1352 } 1353 break; 1354 1355 case INDEX_op_or_i32: 1356 case INDEX_op_or_i64: 1357 if (c2) { 1358 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 1359 } else { 1360 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 1361 } 1362 break; 1363 1364 case INDEX_op_xor_i32: 1365 case INDEX_op_xor_i64: 1366 if (c2) { 1367 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 1368 } else { 1369 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 1370 } 1371 break; 1372 1373 case INDEX_op_not_i32: 1374 case INDEX_op_not_i64: 1375 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); 1376 break; 1377 1378 case INDEX_op_neg_i32: 1379 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); 1380 break; 1381 case INDEX_op_neg_i64: 1382 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); 1383 break; 1384 1385 case INDEX_op_mul_i32: 1386 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); 1387 break; 1388 case INDEX_op_mul_i64: 1389 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); 1390 break; 1391 1392 case INDEX_op_div_i32: 1393 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); 1394 break; 1395 case INDEX_op_div_i64: 1396 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); 1397 break; 1398 1399 case INDEX_op_divu_i32: 1400 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); 1401 break; 1402 case INDEX_op_divu_i64: 1403 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); 1404 break; 1405 1406 case INDEX_op_rem_i32: 1407 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); 1408 break; 1409 case INDEX_op_rem_i64: 1410 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); 1411 break; 1412 1413 case INDEX_op_remu_i32: 1414 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); 1415 break; 1416 case INDEX_op_remu_i64: 1417 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); 1418 break; 1419 1420 case INDEX_op_shl_i32: 1421 if (c2) { 1422 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); 1423 } else { 1424 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); 1425 } 1426 break; 1427 case INDEX_op_shl_i64: 1428 if (c2) { 1429 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); 1430 } else { 1431 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); 1432 } 1433 break; 1434 1435 case INDEX_op_shr_i32: 1436 if (c2) { 1437 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); 1438 } else { 1439 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); 1440 } 1441 break; 1442 case INDEX_op_shr_i64: 1443 if (c2) { 1444 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); 1445 } else { 1446 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); 1447 } 1448 break; 1449 1450 case INDEX_op_sar_i32: 1451 if (c2) { 1452 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); 1453 } else { 1454 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); 1455 } 1456 break; 1457 case INDEX_op_sar_i64: 1458 if (c2) { 1459 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); 1460 } else { 1461 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); 1462 } 1463 break; 1464 1465 case INDEX_op_add2_i32: 1466 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1467 const_args[4], const_args[5], false, true); 1468 break; 1469 case INDEX_op_add2_i64: 1470 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1471 const_args[4], const_args[5], false, false); 1472 break; 1473 case INDEX_op_sub2_i32: 1474 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1475 const_args[4], const_args[5], true, true); 1476 break; 1477 case INDEX_op_sub2_i64: 1478 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1479 const_args[4], const_args[5], true, false); 1480 break; 1481 1482 case INDEX_op_brcond_i32: 1483 case INDEX_op_brcond_i64: 1484 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1485 break; 1486 1487 case INDEX_op_setcond_i32: 1488 case INDEX_op_setcond_i64: 1489 tcg_out_setcond(s, args[3], a0, a1, a2); 1490 break; 1491 1492 case INDEX_op_qemu_ld_i32: 1493 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1494 break; 1495 case INDEX_op_qemu_ld_i64: 1496 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1497 break; 1498 case INDEX_op_qemu_st_i32: 1499 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1500 break; 1501 case INDEX_op_qemu_st_i64: 1502 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1503 break; 1504 1505 case INDEX_op_extrh_i64_i32: 1506 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 1507 break; 1508 1509 case INDEX_op_mulsh_i32: 1510 case INDEX_op_mulsh_i64: 1511 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 1512 break; 1513 1514 case INDEX_op_muluh_i32: 1515 case INDEX_op_muluh_i64: 1516 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 1517 break; 1518 1519 case INDEX_op_mb: 1520 tcg_out_mb(s, a0); 1521 break; 1522 1523 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1524 case INDEX_op_mov_i64: 1525 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1526 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1527 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1528 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1529 case INDEX_op_ext8s_i64: 1530 case INDEX_op_ext8u_i32: 1531 case INDEX_op_ext8u_i64: 1532 case INDEX_op_ext16s_i32: 1533 case INDEX_op_ext16s_i64: 1534 case INDEX_op_ext16u_i32: 1535 case INDEX_op_ext16u_i64: 1536 case INDEX_op_ext32s_i64: 1537 case INDEX_op_ext32u_i64: 1538 case INDEX_op_ext_i32_i64: 1539 case INDEX_op_extu_i32_i64: 1540 case INDEX_op_extrl_i64_i32: 1541 default: 1542 g_assert_not_reached(); 1543 } 1544} 1545 1546static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1547{ 1548 switch (op) { 1549 case INDEX_op_goto_ptr: 1550 return C_O0_I1(r); 1551 1552 case INDEX_op_ld8u_i32: 1553 case INDEX_op_ld8s_i32: 1554 case INDEX_op_ld16u_i32: 1555 case INDEX_op_ld16s_i32: 1556 case INDEX_op_ld_i32: 1557 case INDEX_op_not_i32: 1558 case INDEX_op_neg_i32: 1559 case INDEX_op_ld8u_i64: 1560 case INDEX_op_ld8s_i64: 1561 case INDEX_op_ld16u_i64: 1562 case INDEX_op_ld16s_i64: 1563 case INDEX_op_ld32s_i64: 1564 case INDEX_op_ld32u_i64: 1565 case INDEX_op_ld_i64: 1566 case INDEX_op_not_i64: 1567 case INDEX_op_neg_i64: 1568 case INDEX_op_ext8u_i32: 1569 case INDEX_op_ext8u_i64: 1570 case INDEX_op_ext16u_i32: 1571 case INDEX_op_ext16u_i64: 1572 case INDEX_op_ext32u_i64: 1573 case INDEX_op_extu_i32_i64: 1574 case INDEX_op_ext8s_i32: 1575 case INDEX_op_ext8s_i64: 1576 case INDEX_op_ext16s_i32: 1577 case INDEX_op_ext16s_i64: 1578 case INDEX_op_ext32s_i64: 1579 case INDEX_op_extrl_i64_i32: 1580 case INDEX_op_extrh_i64_i32: 1581 case INDEX_op_ext_i32_i64: 1582 return C_O1_I1(r, r); 1583 1584 case INDEX_op_st8_i32: 1585 case INDEX_op_st16_i32: 1586 case INDEX_op_st_i32: 1587 case INDEX_op_st8_i64: 1588 case INDEX_op_st16_i64: 1589 case INDEX_op_st32_i64: 1590 case INDEX_op_st_i64: 1591 return C_O0_I2(rZ, r); 1592 1593 case INDEX_op_add_i32: 1594 case INDEX_op_and_i32: 1595 case INDEX_op_or_i32: 1596 case INDEX_op_xor_i32: 1597 case INDEX_op_add_i64: 1598 case INDEX_op_and_i64: 1599 case INDEX_op_or_i64: 1600 case INDEX_op_xor_i64: 1601 return C_O1_I2(r, r, rI); 1602 1603 case INDEX_op_sub_i32: 1604 case INDEX_op_sub_i64: 1605 return C_O1_I2(r, rZ, rN); 1606 1607 case INDEX_op_mul_i32: 1608 case INDEX_op_mulsh_i32: 1609 case INDEX_op_muluh_i32: 1610 case INDEX_op_div_i32: 1611 case INDEX_op_divu_i32: 1612 case INDEX_op_rem_i32: 1613 case INDEX_op_remu_i32: 1614 case INDEX_op_setcond_i32: 1615 case INDEX_op_mul_i64: 1616 case INDEX_op_mulsh_i64: 1617 case INDEX_op_muluh_i64: 1618 case INDEX_op_div_i64: 1619 case INDEX_op_divu_i64: 1620 case INDEX_op_rem_i64: 1621 case INDEX_op_remu_i64: 1622 case INDEX_op_setcond_i64: 1623 return C_O1_I2(r, rZ, rZ); 1624 1625 case INDEX_op_shl_i32: 1626 case INDEX_op_shr_i32: 1627 case INDEX_op_sar_i32: 1628 case INDEX_op_shl_i64: 1629 case INDEX_op_shr_i64: 1630 case INDEX_op_sar_i64: 1631 return C_O1_I2(r, r, ri); 1632 1633 case INDEX_op_brcond_i32: 1634 case INDEX_op_brcond_i64: 1635 return C_O0_I2(rZ, rZ); 1636 1637 case INDEX_op_add2_i32: 1638 case INDEX_op_add2_i64: 1639 case INDEX_op_sub2_i32: 1640 case INDEX_op_sub2_i64: 1641 return C_O2_I4(r, r, rZ, rZ, rM, rM); 1642 1643 case INDEX_op_qemu_ld_i32: 1644 case INDEX_op_qemu_ld_i64: 1645 return C_O1_I1(r, L); 1646 case INDEX_op_qemu_st_i32: 1647 case INDEX_op_qemu_st_i64: 1648 return C_O0_I2(LZ, L); 1649 1650 default: 1651 g_assert_not_reached(); 1652 } 1653} 1654 1655static const int tcg_target_callee_save_regs[] = { 1656 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1657 TCG_REG_S1, 1658 TCG_REG_S2, 1659 TCG_REG_S3, 1660 TCG_REG_S4, 1661 TCG_REG_S5, 1662 TCG_REG_S6, 1663 TCG_REG_S7, 1664 TCG_REG_S8, 1665 TCG_REG_S9, 1666 TCG_REG_S10, 1667 TCG_REG_S11, 1668 TCG_REG_RA, /* should be last for ABI compliance */ 1669}; 1670 1671/* Stack frame parameters. */ 1672#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1673#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1674#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1675#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1676 + TCG_TARGET_STACK_ALIGN - 1) \ 1677 & -TCG_TARGET_STACK_ALIGN) 1678#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1679 1680/* We're expecting to be able to use an immediate for frame allocation. */ 1681QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1682 1683/* Generate global QEMU prologue and epilogue code */ 1684static void tcg_target_qemu_prologue(TCGContext *s) 1685{ 1686 int i; 1687 1688 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1689 1690 /* TB prologue */ 1691 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1692 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1693 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1694 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1695 } 1696 1697#if !defined(CONFIG_SOFTMMU) 1698 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1699 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1700#endif 1701 1702 /* Call generated code */ 1703 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1704 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1705 1706 /* Return path for goto_ptr. Set return value to 0 */ 1707 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1708 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1709 1710 /* TB epilogue */ 1711 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1712 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1713 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1714 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1715 } 1716 1717 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1718 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 1719} 1720 1721static void tcg_target_init(TCGContext *s) 1722{ 1723 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 1724 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 1725 1726 tcg_target_call_clobber_regs = -1u; 1727 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1728 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1729 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1730 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1731 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1732 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1733 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1734 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1735 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1736 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1737 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 1738 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 1739 1740 s->reserved_regs = 0; 1741 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1742 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1743 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1744 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1745 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1746 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 1747 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1748} 1749 1750typedef struct { 1751 DebugFrameHeader h; 1752 uint8_t fde_def_cfa[4]; 1753 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1754} DebugFrame; 1755 1756#define ELF_HOST_MACHINE EM_RISCV 1757 1758static const DebugFrame debug_frame = { 1759 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1760 .h.cie.id = -1, 1761 .h.cie.version = 1, 1762 .h.cie.code_align = 1, 1763 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1764 .h.cie.return_column = TCG_REG_RA, 1765 1766 /* Total FDE size does not include the "len" member. */ 1767 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1768 1769 .fde_def_cfa = { 1770 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1771 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1772 (FRAME_SIZE >> 7) 1773 }, 1774 .fde_reg_ofs = { 1775 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 1776 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 1777 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 1778 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 1779 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 1780 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 1781 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 1782 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 1783 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 1784 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 1785 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 1786 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1787 } 1788}; 1789 1790void tcg_register_jit(const void *buf, size_t buf_size) 1791{ 1792 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1793} 1794