1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30#include "../tcg-pool.c.inc" 31 32#ifdef CONFIG_DEBUG_TCG 33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 34 "zero", 35 "ra", 36 "sp", 37 "gp", 38 "tp", 39 "t0", 40 "t1", 41 "t2", 42 "s0", 43 "s1", 44 "a0", 45 "a1", 46 "a2", 47 "a3", 48 "a4", 49 "a5", 50 "a6", 51 "a7", 52 "s2", 53 "s3", 54 "s4", 55 "s5", 56 "s6", 57 "s7", 58 "s8", 59 "s9", 60 "s10", 61 "s11", 62 "t3", 63 "t4", 64 "t5", 65 "t6" 66}; 67#endif 68 69static const int tcg_target_reg_alloc_order[] = { 70 /* Call saved registers */ 71 /* TCG_REG_S0 reservered for TCG_AREG0 */ 72 TCG_REG_S1, 73 TCG_REG_S2, 74 TCG_REG_S3, 75 TCG_REG_S4, 76 TCG_REG_S5, 77 TCG_REG_S6, 78 TCG_REG_S7, 79 TCG_REG_S8, 80 TCG_REG_S9, 81 TCG_REG_S10, 82 TCG_REG_S11, 83 84 /* Call clobbered registers */ 85 TCG_REG_T0, 86 TCG_REG_T1, 87 TCG_REG_T2, 88 TCG_REG_T3, 89 TCG_REG_T4, 90 TCG_REG_T5, 91 TCG_REG_T6, 92 93 /* Argument registers */ 94 TCG_REG_A0, 95 TCG_REG_A1, 96 TCG_REG_A2, 97 TCG_REG_A3, 98 TCG_REG_A4, 99 TCG_REG_A5, 100 TCG_REG_A6, 101 TCG_REG_A7, 102}; 103 104static const int tcg_target_call_iarg_regs[] = { 105 TCG_REG_A0, 106 TCG_REG_A1, 107 TCG_REG_A2, 108 TCG_REG_A3, 109 TCG_REG_A4, 110 TCG_REG_A5, 111 TCG_REG_A6, 112 TCG_REG_A7, 113}; 114 115static const int tcg_target_call_oarg_regs[] = { 116 TCG_REG_A0, 117 TCG_REG_A1, 118}; 119 120#define TCG_CT_CONST_ZERO 0x100 121#define TCG_CT_CONST_S12 0x200 122#define TCG_CT_CONST_N12 0x400 123#define TCG_CT_CONST_M12 0x800 124 125#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 126/* 127 * For softmmu, we need to avoid conflicts with the first 5 128 * argument registers to call the helper. Some of these are 129 * also used for the tlb lookup. 130 */ 131#ifdef CONFIG_SOFTMMU 132#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) 133#else 134#define SOFTMMU_RESERVE_REGS 0 135#endif 136 137 138static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 139{ 140 if (TCG_TARGET_REG_BITS == 32) { 141 return sextract32(val, pos, len); 142 } else { 143 return sextract64(val, pos, len); 144 } 145} 146 147/* test if a constant matches the constraint */ 148static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 149{ 150 if (ct & TCG_CT_CONST) { 151 return 1; 152 } 153 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 154 return 1; 155 } 156 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 157 return 1; 158 } 159 if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { 160 return 1; 161 } 162 if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { 163 return 1; 164 } 165 return 0; 166} 167 168/* 169 * RISC-V Base ISA opcodes (IM) 170 */ 171 172typedef enum { 173 OPC_ADD = 0x33, 174 OPC_ADDI = 0x13, 175 OPC_AND = 0x7033, 176 OPC_ANDI = 0x7013, 177 OPC_AUIPC = 0x17, 178 OPC_BEQ = 0x63, 179 OPC_BGE = 0x5063, 180 OPC_BGEU = 0x7063, 181 OPC_BLT = 0x4063, 182 OPC_BLTU = 0x6063, 183 OPC_BNE = 0x1063, 184 OPC_DIV = 0x2004033, 185 OPC_DIVU = 0x2005033, 186 OPC_JAL = 0x6f, 187 OPC_JALR = 0x67, 188 OPC_LB = 0x3, 189 OPC_LBU = 0x4003, 190 OPC_LD = 0x3003, 191 OPC_LH = 0x1003, 192 OPC_LHU = 0x5003, 193 OPC_LUI = 0x37, 194 OPC_LW = 0x2003, 195 OPC_LWU = 0x6003, 196 OPC_MUL = 0x2000033, 197 OPC_MULH = 0x2001033, 198 OPC_MULHSU = 0x2002033, 199 OPC_MULHU = 0x2003033, 200 OPC_OR = 0x6033, 201 OPC_ORI = 0x6013, 202 OPC_REM = 0x2006033, 203 OPC_REMU = 0x2007033, 204 OPC_SB = 0x23, 205 OPC_SD = 0x3023, 206 OPC_SH = 0x1023, 207 OPC_SLL = 0x1033, 208 OPC_SLLI = 0x1013, 209 OPC_SLT = 0x2033, 210 OPC_SLTI = 0x2013, 211 OPC_SLTIU = 0x3013, 212 OPC_SLTU = 0x3033, 213 OPC_SRA = 0x40005033, 214 OPC_SRAI = 0x40005013, 215 OPC_SRL = 0x5033, 216 OPC_SRLI = 0x5013, 217 OPC_SUB = 0x40000033, 218 OPC_SW = 0x2023, 219 OPC_XOR = 0x4033, 220 OPC_XORI = 0x4013, 221 222#if TCG_TARGET_REG_BITS == 64 223 OPC_ADDIW = 0x1b, 224 OPC_ADDW = 0x3b, 225 OPC_DIVUW = 0x200503b, 226 OPC_DIVW = 0x200403b, 227 OPC_MULW = 0x200003b, 228 OPC_REMUW = 0x200703b, 229 OPC_REMW = 0x200603b, 230 OPC_SLLIW = 0x101b, 231 OPC_SLLW = 0x103b, 232 OPC_SRAIW = 0x4000501b, 233 OPC_SRAW = 0x4000503b, 234 OPC_SRLIW = 0x501b, 235 OPC_SRLW = 0x503b, 236 OPC_SUBW = 0x4000003b, 237#else 238 /* Simplify code throughout by defining aliases for RV32. */ 239 OPC_ADDIW = OPC_ADDI, 240 OPC_ADDW = OPC_ADD, 241 OPC_DIVUW = OPC_DIVU, 242 OPC_DIVW = OPC_DIV, 243 OPC_MULW = OPC_MUL, 244 OPC_REMUW = OPC_REMU, 245 OPC_REMW = OPC_REM, 246 OPC_SLLIW = OPC_SLLI, 247 OPC_SLLW = OPC_SLL, 248 OPC_SRAIW = OPC_SRAI, 249 OPC_SRAW = OPC_SRA, 250 OPC_SRLIW = OPC_SRLI, 251 OPC_SRLW = OPC_SRL, 252 OPC_SUBW = OPC_SUB, 253#endif 254 255 OPC_FENCE = 0x0000000f, 256} RISCVInsn; 257 258/* 259 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 260 */ 261 262/* Type-R */ 263 264static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 265{ 266 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 267} 268 269/* Type-I */ 270 271static int32_t encode_imm12(uint32_t imm) 272{ 273 return (imm & 0xfff) << 20; 274} 275 276static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 277{ 278 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 279} 280 281/* Type-S */ 282 283static int32_t encode_simm12(uint32_t imm) 284{ 285 int32_t ret = 0; 286 287 ret |= (imm & 0xFE0) << 20; 288 ret |= (imm & 0x1F) << 7; 289 290 return ret; 291} 292 293static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 294{ 295 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 296} 297 298/* Type-SB */ 299 300static int32_t encode_sbimm12(uint32_t imm) 301{ 302 int32_t ret = 0; 303 304 ret |= (imm & 0x1000) << 19; 305 ret |= (imm & 0x7e0) << 20; 306 ret |= (imm & 0x1e) << 7; 307 ret |= (imm & 0x800) >> 4; 308 309 return ret; 310} 311 312static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 313{ 314 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 315} 316 317/* Type-U */ 318 319static int32_t encode_uimm20(uint32_t imm) 320{ 321 return imm & 0xfffff000; 322} 323 324static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 325{ 326 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 327} 328 329/* Type-UJ */ 330 331static int32_t encode_ujimm20(uint32_t imm) 332{ 333 int32_t ret = 0; 334 335 ret |= (imm & 0x0007fe) << (21 - 1); 336 ret |= (imm & 0x000800) << (20 - 11); 337 ret |= (imm & 0x0ff000) << (12 - 12); 338 ret |= (imm & 0x100000) << (31 - 20); 339 340 return ret; 341} 342 343static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 344{ 345 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 346} 347 348/* 349 * RISC-V instruction emitters 350 */ 351 352static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 353 TCGReg rd, TCGReg rs1, TCGReg rs2) 354{ 355 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 356} 357 358static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 359 TCGReg rd, TCGReg rs1, TCGArg imm) 360{ 361 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 362} 363 364static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 365 TCGReg rs1, TCGReg rs2, uint32_t imm) 366{ 367 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 368} 369 370static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 371 TCGReg rs1, TCGReg rs2, uint32_t imm) 372{ 373 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 374} 375 376static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 377 TCGReg rd, uint32_t imm) 378{ 379 tcg_out32(s, encode_u(opc, rd, imm)); 380} 381 382static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 383 TCGReg rd, uint32_t imm) 384{ 385 tcg_out32(s, encode_uj(opc, rd, imm)); 386} 387 388static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 389{ 390 int i; 391 for (i = 0; i < count; ++i) { 392 p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); 393 } 394} 395 396/* 397 * Relocations 398 */ 399 400static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 401{ 402 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 403 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 404 405 tcg_debug_assert((offset & 1) == 0); 406 if (offset == sextreg(offset, 0, 12)) { 407 *src_rw |= encode_sbimm12(offset); 408 return true; 409 } 410 411 return false; 412} 413 414static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 415{ 416 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 417 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 418 419 tcg_debug_assert((offset & 1) == 0); 420 if (offset == sextreg(offset, 0, 20)) { 421 *src_rw |= encode_ujimm20(offset); 422 return true; 423 } 424 425 return false; 426} 427 428static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 429{ 430 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 431 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 432 int32_t lo = sextreg(offset, 0, 12); 433 int32_t hi = offset - lo; 434 435 if (offset == hi + lo) { 436 src_rw[0] |= encode_uimm20(hi); 437 src_rw[1] |= encode_imm12(lo); 438 return true; 439 } 440 441 return false; 442} 443 444static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 445 intptr_t value, intptr_t addend) 446{ 447 tcg_debug_assert(addend == 0); 448 switch (type) { 449 case R_RISCV_BRANCH: 450 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 451 case R_RISCV_JAL: 452 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 453 case R_RISCV_CALL: 454 return reloc_call(code_ptr, (tcg_insn_unit *)value); 455 default: 456 g_assert_not_reached(); 457 } 458} 459 460/* 461 * TCG intrinsics 462 */ 463 464static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 465{ 466 if (ret == arg) { 467 return true; 468 } 469 switch (type) { 470 case TCG_TYPE_I32: 471 case TCG_TYPE_I64: 472 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 473 break; 474 default: 475 g_assert_not_reached(); 476 } 477 return true; 478} 479 480static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 481 tcg_target_long val) 482{ 483 tcg_target_long lo, hi, tmp; 484 int shift, ret; 485 486 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 487 val = (int32_t)val; 488 } 489 490 lo = sextreg(val, 0, 12); 491 if (val == lo) { 492 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 493 return; 494 } 495 496 hi = val - lo; 497 if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { 498 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 499 if (lo != 0) { 500 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 501 } 502 return; 503 } 504 505 /* We can only be here if TCG_TARGET_REG_BITS != 32 */ 506 tmp = tcg_pcrel_diff(s, (void *)val); 507 if (tmp == (int32_t)tmp) { 508 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 509 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 510 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 511 tcg_debug_assert(ret == true); 512 return; 513 } 514 515 /* Look for a single 20-bit section. */ 516 shift = ctz64(val); 517 tmp = val >> shift; 518 if (tmp == sextreg(tmp, 0, 20)) { 519 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 520 if (shift > 12) { 521 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 522 } else { 523 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 524 } 525 return; 526 } 527 528 /* Look for a few high zero bits, with lots of bits set in the middle. */ 529 shift = clz64(val); 530 tmp = val << shift; 531 if (tmp == sextreg(tmp, 12, 20) << 12) { 532 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 533 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 534 return; 535 } else if (tmp == sextreg(tmp, 0, 12)) { 536 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 537 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 538 return; 539 } 540 541 /* Drop into the constant pool. */ 542 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 543 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 544 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 545} 546 547static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 548{ 549 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 550} 551 552static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 553{ 554 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 555 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 556} 557 558static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 559{ 560 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 561 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 562} 563 564static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) 565{ 566 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 567 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 568} 569 570static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) 571{ 572 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 573 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 574} 575 576static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 577{ 578 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 579} 580 581static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 582 TCGReg addr, intptr_t offset) 583{ 584 intptr_t imm12 = sextreg(offset, 0, 12); 585 586 if (offset != imm12) { 587 intptr_t diff = offset - (uintptr_t)s->code_ptr; 588 589 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 590 imm12 = sextreg(diff, 0, 12); 591 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 592 } else { 593 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 594 if (addr != TCG_REG_ZERO) { 595 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 596 } 597 } 598 addr = TCG_REG_TMP2; 599 } 600 601 switch (opc) { 602 case OPC_SB: 603 case OPC_SH: 604 case OPC_SW: 605 case OPC_SD: 606 tcg_out_opc_store(s, opc, addr, data, imm12); 607 break; 608 case OPC_LB: 609 case OPC_LBU: 610 case OPC_LH: 611 case OPC_LHU: 612 case OPC_LW: 613 case OPC_LWU: 614 case OPC_LD: 615 tcg_out_opc_imm(s, opc, data, addr, imm12); 616 break; 617 default: 618 g_assert_not_reached(); 619 } 620} 621 622static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 623 TCGReg arg1, intptr_t arg2) 624{ 625 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 626 tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); 627} 628 629static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 630 TCGReg arg1, intptr_t arg2) 631{ 632 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 633 tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); 634} 635 636static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 637 TCGReg base, intptr_t ofs) 638{ 639 if (val == 0) { 640 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 641 return true; 642 } 643 return false; 644} 645 646static void tcg_out_addsub2(TCGContext *s, 647 TCGReg rl, TCGReg rh, 648 TCGReg al, TCGReg ah, 649 TCGArg bl, TCGArg bh, 650 bool cbl, bool cbh, bool is_sub, bool is32bit) 651{ 652 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; 653 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; 654 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; 655 TCGReg th = TCG_REG_TMP1; 656 657 /* If we have a negative constant such that negating it would 658 make the high part zero, we can (usually) eliminate one insn. */ 659 if (cbl && cbh && bh == -1 && bl != 0) { 660 bl = -bl; 661 bh = 0; 662 is_sub = !is_sub; 663 } 664 665 /* By operating on the high part first, we get to use the final 666 carry operation to move back from the temporary. */ 667 if (!cbh) { 668 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); 669 } else if (bh != 0 || ah == rl) { 670 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); 671 } else { 672 th = ah; 673 } 674 675 /* Note that tcg optimization should eliminate the bl == 0 case. */ 676 if (is_sub) { 677 if (cbl) { 678 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); 679 tcg_out_opc_imm(s, opc_addi, rl, al, -bl); 680 } else { 681 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); 682 tcg_out_opc_reg(s, opc_sub, rl, al, bl); 683 } 684 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); 685 } else { 686 if (cbl) { 687 tcg_out_opc_imm(s, opc_addi, rl, al, bl); 688 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); 689 } else if (rl == al && rl == bl) { 690 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); 691 tcg_out_opc_reg(s, opc_addi, rl, al, bl); 692 } else { 693 tcg_out_opc_reg(s, opc_add, rl, al, bl); 694 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, 695 rl, (rl == bl ? al : bl)); 696 } 697 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); 698 } 699} 700 701static const struct { 702 RISCVInsn op; 703 bool swap; 704} tcg_brcond_to_riscv[] = { 705 [TCG_COND_EQ] = { OPC_BEQ, false }, 706 [TCG_COND_NE] = { OPC_BNE, false }, 707 [TCG_COND_LT] = { OPC_BLT, false }, 708 [TCG_COND_GE] = { OPC_BGE, false }, 709 [TCG_COND_LE] = { OPC_BGE, true }, 710 [TCG_COND_GT] = { OPC_BLT, true }, 711 [TCG_COND_LTU] = { OPC_BLTU, false }, 712 [TCG_COND_GEU] = { OPC_BGEU, false }, 713 [TCG_COND_LEU] = { OPC_BGEU, true }, 714 [TCG_COND_GTU] = { OPC_BLTU, true } 715}; 716 717static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 718 TCGReg arg2, TCGLabel *l) 719{ 720 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 721 722 tcg_debug_assert(op != 0); 723 724 if (tcg_brcond_to_riscv[cond].swap) { 725 TCGReg t = arg1; 726 arg1 = arg2; 727 arg2 = t; 728 } 729 730 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 731 tcg_out_opc_branch(s, op, arg1, arg2, 0); 732} 733 734static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 735 TCGReg arg1, TCGReg arg2) 736{ 737 switch (cond) { 738 case TCG_COND_EQ: 739 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 740 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); 741 break; 742 case TCG_COND_NE: 743 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 744 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); 745 break; 746 case TCG_COND_LT: 747 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 748 break; 749 case TCG_COND_GE: 750 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 751 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 752 break; 753 case TCG_COND_LE: 754 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 755 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 756 break; 757 case TCG_COND_GT: 758 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 759 break; 760 case TCG_COND_LTU: 761 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 762 break; 763 case TCG_COND_GEU: 764 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 765 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 766 break; 767 case TCG_COND_LEU: 768 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 769 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 770 break; 771 case TCG_COND_GTU: 772 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 773 break; 774 default: 775 g_assert_not_reached(); 776 break; 777 } 778} 779 780static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 781 TCGReg bl, TCGReg bh, TCGLabel *l) 782{ 783 /* todo */ 784 g_assert_not_reached(); 785} 786 787static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, 788 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) 789{ 790 /* todo */ 791 g_assert_not_reached(); 792} 793 794static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 795{ 796 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 797 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 798 int ret; 799 800 tcg_debug_assert((offset & 1) == 0); 801 if (offset == sextreg(offset, 0, 20)) { 802 /* short jump: -2097150 to 2097152 */ 803 tcg_out_opc_jump(s, OPC_JAL, link, offset); 804 } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { 805 /* long jump: -2147483646 to 2147483648 */ 806 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 807 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 808 ret = reloc_call(s->code_ptr - 2, arg); 809 tcg_debug_assert(ret == true); 810 } else if (TCG_TARGET_REG_BITS == 64) { 811 /* far jump: 64-bit */ 812 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 813 tcg_target_long base = (tcg_target_long)arg - imm; 814 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 815 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 816 } else { 817 g_assert_not_reached(); 818 } 819} 820 821static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) 822{ 823 tcg_out_call_int(s, arg, false); 824} 825 826static void tcg_out_mb(TCGContext *s, TCGArg a0) 827{ 828 tcg_insn_unit insn = OPC_FENCE; 829 830 if (a0 & TCG_MO_LD_LD) { 831 insn |= 0x02200000; 832 } 833 if (a0 & TCG_MO_ST_LD) { 834 insn |= 0x01200000; 835 } 836 if (a0 & TCG_MO_LD_ST) { 837 insn |= 0x02100000; 838 } 839 if (a0 & TCG_MO_ST_ST) { 840 insn |= 0x02200000; 841 } 842 tcg_out32(s, insn); 843} 844 845/* 846 * Load/store and TLB 847 */ 848 849#if defined(CONFIG_SOFTMMU) 850#include "../tcg-ldst.c.inc" 851 852/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 853 * MemOpIdx oi, uintptr_t ra) 854 */ 855static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 856 [MO_UB] = helper_ret_ldub_mmu, 857 [MO_SB] = helper_ret_ldsb_mmu, 858#ifdef HOST_WORDS_BIGENDIAN 859 [MO_UW] = helper_be_lduw_mmu, 860 [MO_SW] = helper_be_ldsw_mmu, 861 [MO_UL] = helper_be_ldul_mmu, 862#if TCG_TARGET_REG_BITS == 64 863 [MO_SL] = helper_be_ldsl_mmu, 864#endif 865 [MO_Q] = helper_be_ldq_mmu, 866#else 867 [MO_UW] = helper_le_lduw_mmu, 868 [MO_SW] = helper_le_ldsw_mmu, 869 [MO_UL] = helper_le_ldul_mmu, 870#if TCG_TARGET_REG_BITS == 64 871 [MO_SL] = helper_le_ldsl_mmu, 872#endif 873 [MO_Q] = helper_le_ldq_mmu, 874#endif 875}; 876 877/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 878 * uintxx_t val, MemOpIdx oi, 879 * uintptr_t ra) 880 */ 881static void * const qemu_st_helpers[MO_SIZE + 1] = { 882 [MO_8] = helper_ret_stb_mmu, 883#ifdef HOST_WORDS_BIGENDIAN 884 [MO_16] = helper_be_stw_mmu, 885 [MO_32] = helper_be_stl_mmu, 886 [MO_64] = helper_be_stq_mmu, 887#else 888 [MO_16] = helper_le_stw_mmu, 889 [MO_32] = helper_le_stl_mmu, 890 [MO_64] = helper_le_stq_mmu, 891#endif 892}; 893 894/* We don't support oversize guests */ 895QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); 896 897/* We expect to use a 12-bit negative offset from ENV. */ 898QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 899QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 900 901static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 902{ 903 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 904 bool ok = reloc_jimm20(s->code_ptr - 1, target); 905 tcg_debug_assert(ok); 906} 907 908static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, 909 TCGReg addrh, MemOpIdx oi, 910 tcg_insn_unit **label_ptr, bool is_load) 911{ 912 MemOp opc = get_memop(oi); 913 unsigned s_bits = opc & MO_SIZE; 914 unsigned a_bits = get_alignment_bits(opc); 915 tcg_target_long compare_mask; 916 int mem_index = get_mmuidx(oi); 917 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 918 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 919 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 920 TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 921 922 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); 923 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); 924 925 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, 926 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 927 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 928 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 929 930 /* Load the tlb comparator and the addend. */ 931 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 932 is_load ? offsetof(CPUTLBEntry, addr_read) 933 : offsetof(CPUTLBEntry, addr_write)); 934 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 935 offsetof(CPUTLBEntry, addend)); 936 937 /* We don't support unaligned accesses. */ 938 if (a_bits < s_bits) { 939 a_bits = s_bits; 940 } 941 /* Clear the non-page, non-alignment bits from the address. */ 942 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 943 if (compare_mask == sextreg(compare_mask, 0, 12)) { 944 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); 945 } else { 946 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 947 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); 948 } 949 950 /* Compare masked address with the TLB entry. */ 951 label_ptr[0] = s->code_ptr; 952 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 953 954 /* TLB Hit - translate address using addend. */ 955 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 956 tcg_out_ext32u(s, TCG_REG_TMP0, addrl); 957 addrl = TCG_REG_TMP0; 958 } 959 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); 960} 961 962static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, 963 TCGType ext, 964 TCGReg datalo, TCGReg datahi, 965 TCGReg addrlo, TCGReg addrhi, 966 void *raddr, tcg_insn_unit **label_ptr) 967{ 968 TCGLabelQemuLdst *label = new_ldst_label(s); 969 970 label->is_ld = is_ld; 971 label->oi = oi; 972 label->type = ext; 973 label->datalo_reg = datalo; 974 label->datahi_reg = datahi; 975 label->addrlo_reg = addrlo; 976 label->addrhi_reg = addrhi; 977 label->raddr = tcg_splitwx_to_rx(raddr); 978 label->label_ptr[0] = label_ptr[0]; 979} 980 981static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 982{ 983 MemOpIdx oi = l->oi; 984 MemOp opc = get_memop(oi); 985 TCGReg a0 = tcg_target_call_iarg_regs[0]; 986 TCGReg a1 = tcg_target_call_iarg_regs[1]; 987 TCGReg a2 = tcg_target_call_iarg_regs[2]; 988 TCGReg a3 = tcg_target_call_iarg_regs[3]; 989 990 /* We don't support oversize guests */ 991 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 992 g_assert_not_reached(); 993 } 994 995 /* resolve label address */ 996 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 997 return false; 998 } 999 1000 /* call load helper */ 1001 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1002 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1003 tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); 1004 tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); 1005 1006 tcg_out_call(s, qemu_ld_helpers[opc & MO_SSIZE]); 1007 tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); 1008 1009 tcg_out_goto(s, l->raddr); 1010 return true; 1011} 1012 1013static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1014{ 1015 MemOpIdx oi = l->oi; 1016 MemOp opc = get_memop(oi); 1017 MemOp s_bits = opc & MO_SIZE; 1018 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1019 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1020 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1021 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1022 TCGReg a4 = tcg_target_call_iarg_regs[4]; 1023 1024 /* We don't support oversize guests */ 1025 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 1026 g_assert_not_reached(); 1027 } 1028 1029 /* resolve label address */ 1030 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1031 return false; 1032 } 1033 1034 /* call store helper */ 1035 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1036 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1037 tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); 1038 switch (s_bits) { 1039 case MO_8: 1040 tcg_out_ext8u(s, a2, a2); 1041 break; 1042 case MO_16: 1043 tcg_out_ext16u(s, a2, a2); 1044 break; 1045 default: 1046 break; 1047 } 1048 tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); 1049 tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); 1050 1051 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]); 1052 1053 tcg_out_goto(s, l->raddr); 1054 return true; 1055} 1056#endif /* CONFIG_SOFTMMU */ 1057 1058static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1059 TCGReg base, MemOp opc, bool is_64) 1060{ 1061 /* Byte swapping is left to middle-end expansion. */ 1062 tcg_debug_assert((opc & MO_BSWAP) == 0); 1063 1064 switch (opc & (MO_SSIZE)) { 1065 case MO_UB: 1066 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); 1067 break; 1068 case MO_SB: 1069 tcg_out_opc_imm(s, OPC_LB, lo, base, 0); 1070 break; 1071 case MO_UW: 1072 tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); 1073 break; 1074 case MO_SW: 1075 tcg_out_opc_imm(s, OPC_LH, lo, base, 0); 1076 break; 1077 case MO_UL: 1078 if (TCG_TARGET_REG_BITS == 64 && is_64) { 1079 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); 1080 break; 1081 } 1082 /* FALLTHRU */ 1083 case MO_SL: 1084 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1085 break; 1086 case MO_Q: 1087 /* Prefer to load from offset 0 first, but allow for overlap. */ 1088 if (TCG_TARGET_REG_BITS == 64) { 1089 tcg_out_opc_imm(s, OPC_LD, lo, base, 0); 1090 } else if (lo != base) { 1091 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1092 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1093 } else { 1094 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1095 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1096 } 1097 break; 1098 default: 1099 g_assert_not_reached(); 1100 } 1101} 1102 1103static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) 1104{ 1105 TCGReg addr_regl, addr_regh __attribute__((unused)); 1106 TCGReg data_regl, data_regh; 1107 MemOpIdx oi; 1108 MemOp opc; 1109#if defined(CONFIG_SOFTMMU) 1110 tcg_insn_unit *label_ptr[1]; 1111#endif 1112 TCGReg base = TCG_REG_TMP0; 1113 1114 data_regl = *args++; 1115 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1116 addr_regl = *args++; 1117 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1118 oi = *args++; 1119 opc = get_memop(oi); 1120 1121#if defined(CONFIG_SOFTMMU) 1122 tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); 1123 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1124 add_qemu_ldst_label(s, 1, oi, 1125 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1126 data_regl, data_regh, addr_regl, addr_regh, 1127 s->code_ptr, label_ptr); 1128#else 1129 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1130 tcg_out_ext32u(s, base, addr_regl); 1131 addr_regl = base; 1132 } 1133 if (guest_base != 0) { 1134 tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); 1135 } 1136 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1137#endif 1138} 1139 1140static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1141 TCGReg base, MemOp opc) 1142{ 1143 /* Byte swapping is left to middle-end expansion. */ 1144 tcg_debug_assert((opc & MO_BSWAP) == 0); 1145 1146 switch (opc & (MO_SSIZE)) { 1147 case MO_8: 1148 tcg_out_opc_store(s, OPC_SB, base, lo, 0); 1149 break; 1150 case MO_16: 1151 tcg_out_opc_store(s, OPC_SH, base, lo, 0); 1152 break; 1153 case MO_32: 1154 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1155 break; 1156 case MO_64: 1157 if (TCG_TARGET_REG_BITS == 64) { 1158 tcg_out_opc_store(s, OPC_SD, base, lo, 0); 1159 } else { 1160 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1161 tcg_out_opc_store(s, OPC_SW, base, hi, 4); 1162 } 1163 break; 1164 default: 1165 g_assert_not_reached(); 1166 } 1167} 1168 1169static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) 1170{ 1171 TCGReg addr_regl, addr_regh __attribute__((unused)); 1172 TCGReg data_regl, data_regh; 1173 MemOpIdx oi; 1174 MemOp opc; 1175#if defined(CONFIG_SOFTMMU) 1176 tcg_insn_unit *label_ptr[1]; 1177#endif 1178 TCGReg base = TCG_REG_TMP0; 1179 1180 data_regl = *args++; 1181 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1182 addr_regl = *args++; 1183 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1184 oi = *args++; 1185 opc = get_memop(oi); 1186 1187#if defined(CONFIG_SOFTMMU) 1188 tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); 1189 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1190 add_qemu_ldst_label(s, 0, oi, 1191 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1192 data_regl, data_regh, addr_regl, addr_regh, 1193 s->code_ptr, label_ptr); 1194#else 1195 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1196 tcg_out_ext32u(s, base, addr_regl); 1197 addr_regl = base; 1198 } 1199 if (guest_base != 0) { 1200 tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); 1201 } 1202 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1203#endif 1204} 1205 1206static const tcg_insn_unit *tb_ret_addr; 1207 1208static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1209 const TCGArg args[TCG_MAX_OP_ARGS], 1210 const int const_args[TCG_MAX_OP_ARGS]) 1211{ 1212 TCGArg a0 = args[0]; 1213 TCGArg a1 = args[1]; 1214 TCGArg a2 = args[2]; 1215 int c2 = const_args[2]; 1216 1217 switch (opc) { 1218 case INDEX_op_exit_tb: 1219 /* Reuse the zeroing that exists for goto_ptr. */ 1220 if (a0 == 0) { 1221 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1222 } else { 1223 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1224 tcg_out_call_int(s, tb_ret_addr, true); 1225 } 1226 break; 1227 1228 case INDEX_op_goto_tb: 1229 assert(s->tb_jmp_insn_offset == 0); 1230 /* indirect jump method */ 1231 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1232 (uintptr_t)(s->tb_jmp_target_addr + a0)); 1233 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1234 set_jmp_reset_offset(s, a0); 1235 break; 1236 1237 case INDEX_op_goto_ptr: 1238 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1239 break; 1240 1241 case INDEX_op_br: 1242 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); 1243 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1244 break; 1245 1246 case INDEX_op_ld8u_i32: 1247 case INDEX_op_ld8u_i64: 1248 tcg_out_ldst(s, OPC_LBU, a0, a1, a2); 1249 break; 1250 case INDEX_op_ld8s_i32: 1251 case INDEX_op_ld8s_i64: 1252 tcg_out_ldst(s, OPC_LB, a0, a1, a2); 1253 break; 1254 case INDEX_op_ld16u_i32: 1255 case INDEX_op_ld16u_i64: 1256 tcg_out_ldst(s, OPC_LHU, a0, a1, a2); 1257 break; 1258 case INDEX_op_ld16s_i32: 1259 case INDEX_op_ld16s_i64: 1260 tcg_out_ldst(s, OPC_LH, a0, a1, a2); 1261 break; 1262 case INDEX_op_ld32u_i64: 1263 tcg_out_ldst(s, OPC_LWU, a0, a1, a2); 1264 break; 1265 case INDEX_op_ld_i32: 1266 case INDEX_op_ld32s_i64: 1267 tcg_out_ldst(s, OPC_LW, a0, a1, a2); 1268 break; 1269 case INDEX_op_ld_i64: 1270 tcg_out_ldst(s, OPC_LD, a0, a1, a2); 1271 break; 1272 1273 case INDEX_op_st8_i32: 1274 case INDEX_op_st8_i64: 1275 tcg_out_ldst(s, OPC_SB, a0, a1, a2); 1276 break; 1277 case INDEX_op_st16_i32: 1278 case INDEX_op_st16_i64: 1279 tcg_out_ldst(s, OPC_SH, a0, a1, a2); 1280 break; 1281 case INDEX_op_st_i32: 1282 case INDEX_op_st32_i64: 1283 tcg_out_ldst(s, OPC_SW, a0, a1, a2); 1284 break; 1285 case INDEX_op_st_i64: 1286 tcg_out_ldst(s, OPC_SD, a0, a1, a2); 1287 break; 1288 1289 case INDEX_op_add_i32: 1290 if (c2) { 1291 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); 1292 } else { 1293 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); 1294 } 1295 break; 1296 case INDEX_op_add_i64: 1297 if (c2) { 1298 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); 1299 } else { 1300 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); 1301 } 1302 break; 1303 1304 case INDEX_op_sub_i32: 1305 if (c2) { 1306 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); 1307 } else { 1308 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); 1309 } 1310 break; 1311 case INDEX_op_sub_i64: 1312 if (c2) { 1313 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); 1314 } else { 1315 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); 1316 } 1317 break; 1318 1319 case INDEX_op_and_i32: 1320 case INDEX_op_and_i64: 1321 if (c2) { 1322 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1323 } else { 1324 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1325 } 1326 break; 1327 1328 case INDEX_op_or_i32: 1329 case INDEX_op_or_i64: 1330 if (c2) { 1331 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 1332 } else { 1333 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 1334 } 1335 break; 1336 1337 case INDEX_op_xor_i32: 1338 case INDEX_op_xor_i64: 1339 if (c2) { 1340 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 1341 } else { 1342 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 1343 } 1344 break; 1345 1346 case INDEX_op_not_i32: 1347 case INDEX_op_not_i64: 1348 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); 1349 break; 1350 1351 case INDEX_op_neg_i32: 1352 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); 1353 break; 1354 case INDEX_op_neg_i64: 1355 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); 1356 break; 1357 1358 case INDEX_op_mul_i32: 1359 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); 1360 break; 1361 case INDEX_op_mul_i64: 1362 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); 1363 break; 1364 1365 case INDEX_op_div_i32: 1366 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); 1367 break; 1368 case INDEX_op_div_i64: 1369 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); 1370 break; 1371 1372 case INDEX_op_divu_i32: 1373 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); 1374 break; 1375 case INDEX_op_divu_i64: 1376 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); 1377 break; 1378 1379 case INDEX_op_rem_i32: 1380 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); 1381 break; 1382 case INDEX_op_rem_i64: 1383 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); 1384 break; 1385 1386 case INDEX_op_remu_i32: 1387 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); 1388 break; 1389 case INDEX_op_remu_i64: 1390 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); 1391 break; 1392 1393 case INDEX_op_shl_i32: 1394 if (c2) { 1395 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); 1396 } else { 1397 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); 1398 } 1399 break; 1400 case INDEX_op_shl_i64: 1401 if (c2) { 1402 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); 1403 } else { 1404 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); 1405 } 1406 break; 1407 1408 case INDEX_op_shr_i32: 1409 if (c2) { 1410 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); 1411 } else { 1412 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); 1413 } 1414 break; 1415 case INDEX_op_shr_i64: 1416 if (c2) { 1417 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); 1418 } else { 1419 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); 1420 } 1421 break; 1422 1423 case INDEX_op_sar_i32: 1424 if (c2) { 1425 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); 1426 } else { 1427 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); 1428 } 1429 break; 1430 case INDEX_op_sar_i64: 1431 if (c2) { 1432 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); 1433 } else { 1434 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); 1435 } 1436 break; 1437 1438 case INDEX_op_add2_i32: 1439 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1440 const_args[4], const_args[5], false, true); 1441 break; 1442 case INDEX_op_add2_i64: 1443 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1444 const_args[4], const_args[5], false, false); 1445 break; 1446 case INDEX_op_sub2_i32: 1447 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1448 const_args[4], const_args[5], true, true); 1449 break; 1450 case INDEX_op_sub2_i64: 1451 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1452 const_args[4], const_args[5], true, false); 1453 break; 1454 1455 case INDEX_op_brcond_i32: 1456 case INDEX_op_brcond_i64: 1457 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1458 break; 1459 case INDEX_op_brcond2_i32: 1460 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); 1461 break; 1462 1463 case INDEX_op_setcond_i32: 1464 case INDEX_op_setcond_i64: 1465 tcg_out_setcond(s, args[3], a0, a1, a2); 1466 break; 1467 case INDEX_op_setcond2_i32: 1468 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); 1469 break; 1470 1471 case INDEX_op_qemu_ld_i32: 1472 tcg_out_qemu_ld(s, args, false); 1473 break; 1474 case INDEX_op_qemu_ld_i64: 1475 tcg_out_qemu_ld(s, args, true); 1476 break; 1477 case INDEX_op_qemu_st_i32: 1478 tcg_out_qemu_st(s, args, false); 1479 break; 1480 case INDEX_op_qemu_st_i64: 1481 tcg_out_qemu_st(s, args, true); 1482 break; 1483 1484 case INDEX_op_ext8u_i32: 1485 case INDEX_op_ext8u_i64: 1486 tcg_out_ext8u(s, a0, a1); 1487 break; 1488 1489 case INDEX_op_ext16u_i32: 1490 case INDEX_op_ext16u_i64: 1491 tcg_out_ext16u(s, a0, a1); 1492 break; 1493 1494 case INDEX_op_ext32u_i64: 1495 case INDEX_op_extu_i32_i64: 1496 tcg_out_ext32u(s, a0, a1); 1497 break; 1498 1499 case INDEX_op_ext8s_i32: 1500 case INDEX_op_ext8s_i64: 1501 tcg_out_ext8s(s, a0, a1); 1502 break; 1503 1504 case INDEX_op_ext16s_i32: 1505 case INDEX_op_ext16s_i64: 1506 tcg_out_ext16s(s, a0, a1); 1507 break; 1508 1509 case INDEX_op_ext32s_i64: 1510 case INDEX_op_extrl_i64_i32: 1511 case INDEX_op_ext_i32_i64: 1512 tcg_out_ext32s(s, a0, a1); 1513 break; 1514 1515 case INDEX_op_extrh_i64_i32: 1516 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 1517 break; 1518 1519 case INDEX_op_mulsh_i32: 1520 case INDEX_op_mulsh_i64: 1521 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 1522 break; 1523 1524 case INDEX_op_muluh_i32: 1525 case INDEX_op_muluh_i64: 1526 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 1527 break; 1528 1529 case INDEX_op_mb: 1530 tcg_out_mb(s, a0); 1531 break; 1532 1533 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1534 case INDEX_op_mov_i64: 1535 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1536 default: 1537 g_assert_not_reached(); 1538 } 1539} 1540 1541static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1542{ 1543 switch (op) { 1544 case INDEX_op_goto_ptr: 1545 return C_O0_I1(r); 1546 1547 case INDEX_op_ld8u_i32: 1548 case INDEX_op_ld8s_i32: 1549 case INDEX_op_ld16u_i32: 1550 case INDEX_op_ld16s_i32: 1551 case INDEX_op_ld_i32: 1552 case INDEX_op_not_i32: 1553 case INDEX_op_neg_i32: 1554 case INDEX_op_ld8u_i64: 1555 case INDEX_op_ld8s_i64: 1556 case INDEX_op_ld16u_i64: 1557 case INDEX_op_ld16s_i64: 1558 case INDEX_op_ld32s_i64: 1559 case INDEX_op_ld32u_i64: 1560 case INDEX_op_ld_i64: 1561 case INDEX_op_not_i64: 1562 case INDEX_op_neg_i64: 1563 case INDEX_op_ext8u_i32: 1564 case INDEX_op_ext8u_i64: 1565 case INDEX_op_ext16u_i32: 1566 case INDEX_op_ext16u_i64: 1567 case INDEX_op_ext32u_i64: 1568 case INDEX_op_extu_i32_i64: 1569 case INDEX_op_ext8s_i32: 1570 case INDEX_op_ext8s_i64: 1571 case INDEX_op_ext16s_i32: 1572 case INDEX_op_ext16s_i64: 1573 case INDEX_op_ext32s_i64: 1574 case INDEX_op_extrl_i64_i32: 1575 case INDEX_op_extrh_i64_i32: 1576 case INDEX_op_ext_i32_i64: 1577 return C_O1_I1(r, r); 1578 1579 case INDEX_op_st8_i32: 1580 case INDEX_op_st16_i32: 1581 case INDEX_op_st_i32: 1582 case INDEX_op_st8_i64: 1583 case INDEX_op_st16_i64: 1584 case INDEX_op_st32_i64: 1585 case INDEX_op_st_i64: 1586 return C_O0_I2(rZ, r); 1587 1588 case INDEX_op_add_i32: 1589 case INDEX_op_and_i32: 1590 case INDEX_op_or_i32: 1591 case INDEX_op_xor_i32: 1592 case INDEX_op_add_i64: 1593 case INDEX_op_and_i64: 1594 case INDEX_op_or_i64: 1595 case INDEX_op_xor_i64: 1596 return C_O1_I2(r, r, rI); 1597 1598 case INDEX_op_sub_i32: 1599 case INDEX_op_sub_i64: 1600 return C_O1_I2(r, rZ, rN); 1601 1602 case INDEX_op_mul_i32: 1603 case INDEX_op_mulsh_i32: 1604 case INDEX_op_muluh_i32: 1605 case INDEX_op_div_i32: 1606 case INDEX_op_divu_i32: 1607 case INDEX_op_rem_i32: 1608 case INDEX_op_remu_i32: 1609 case INDEX_op_setcond_i32: 1610 case INDEX_op_mul_i64: 1611 case INDEX_op_mulsh_i64: 1612 case INDEX_op_muluh_i64: 1613 case INDEX_op_div_i64: 1614 case INDEX_op_divu_i64: 1615 case INDEX_op_rem_i64: 1616 case INDEX_op_remu_i64: 1617 case INDEX_op_setcond_i64: 1618 return C_O1_I2(r, rZ, rZ); 1619 1620 case INDEX_op_shl_i32: 1621 case INDEX_op_shr_i32: 1622 case INDEX_op_sar_i32: 1623 case INDEX_op_shl_i64: 1624 case INDEX_op_shr_i64: 1625 case INDEX_op_sar_i64: 1626 return C_O1_I2(r, r, ri); 1627 1628 case INDEX_op_brcond_i32: 1629 case INDEX_op_brcond_i64: 1630 return C_O0_I2(rZ, rZ); 1631 1632 case INDEX_op_add2_i32: 1633 case INDEX_op_add2_i64: 1634 case INDEX_op_sub2_i32: 1635 case INDEX_op_sub2_i64: 1636 return C_O2_I4(r, r, rZ, rZ, rM, rM); 1637 1638 case INDEX_op_brcond2_i32: 1639 return C_O0_I4(rZ, rZ, rZ, rZ); 1640 1641 case INDEX_op_setcond2_i32: 1642 return C_O1_I4(r, rZ, rZ, rZ, rZ); 1643 1644 case INDEX_op_qemu_ld_i32: 1645 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 1646 ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); 1647 case INDEX_op_qemu_st_i32: 1648 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 1649 ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L)); 1650 case INDEX_op_qemu_ld_i64: 1651 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) 1652 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) 1653 : C_O2_I2(r, r, L, L)); 1654 case INDEX_op_qemu_st_i64: 1655 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) 1656 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L) 1657 : C_O0_I4(LZ, LZ, L, L)); 1658 1659 default: 1660 g_assert_not_reached(); 1661 } 1662} 1663 1664static const int tcg_target_callee_save_regs[] = { 1665 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1666 TCG_REG_S1, 1667 TCG_REG_S2, 1668 TCG_REG_S3, 1669 TCG_REG_S4, 1670 TCG_REG_S5, 1671 TCG_REG_S6, 1672 TCG_REG_S7, 1673 TCG_REG_S8, 1674 TCG_REG_S9, 1675 TCG_REG_S10, 1676 TCG_REG_S11, 1677 TCG_REG_RA, /* should be last for ABI compliance */ 1678}; 1679 1680/* Stack frame parameters. */ 1681#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1682#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1683#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1684#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1685 + TCG_TARGET_STACK_ALIGN - 1) \ 1686 & -TCG_TARGET_STACK_ALIGN) 1687#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1688 1689/* We're expecting to be able to use an immediate for frame allocation. */ 1690QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1691 1692/* Generate global QEMU prologue and epilogue code */ 1693static void tcg_target_qemu_prologue(TCGContext *s) 1694{ 1695 int i; 1696 1697 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1698 1699 /* TB prologue */ 1700 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1701 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1702 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1703 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1704 } 1705 1706#if !defined(CONFIG_SOFTMMU) 1707 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1708 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1709#endif 1710 1711 /* Call generated code */ 1712 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1713 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1714 1715 /* Return path for goto_ptr. Set return value to 0 */ 1716 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1717 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1718 1719 /* TB epilogue */ 1720 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1721 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1722 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1723 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1724 } 1725 1726 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1727 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 1728} 1729 1730static void tcg_target_init(TCGContext *s) 1731{ 1732 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 1733 if (TCG_TARGET_REG_BITS == 64) { 1734 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 1735 } 1736 1737 tcg_target_call_clobber_regs = -1u; 1738 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1739 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1740 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1741 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1742 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1743 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1744 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1745 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1746 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1747 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1748 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 1749 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 1750 1751 s->reserved_regs = 0; 1752 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1753 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1754 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1755 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1756 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1757 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 1758 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1759} 1760 1761typedef struct { 1762 DebugFrameHeader h; 1763 uint8_t fde_def_cfa[4]; 1764 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1765} DebugFrame; 1766 1767#define ELF_HOST_MACHINE EM_RISCV 1768 1769static const DebugFrame debug_frame = { 1770 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1771 .h.cie.id = -1, 1772 .h.cie.version = 1, 1773 .h.cie.code_align = 1, 1774 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1775 .h.cie.return_column = TCG_REG_RA, 1776 1777 /* Total FDE size does not include the "len" member. */ 1778 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1779 1780 .fde_def_cfa = { 1781 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1782 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1783 (FRAME_SIZE >> 7) 1784 }, 1785 .fde_reg_ofs = { 1786 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 1787 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 1788 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 1789 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 1790 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 1791 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 1792 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 1793 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 1794 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 1795 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 1796 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 1797 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1798 } 1799}; 1800 1801void tcg_register_jit(const void *buf, size_t buf_size) 1802{ 1803 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1804} 1805