1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30#include "../tcg-pool.c.inc" 31 32#ifdef CONFIG_DEBUG_TCG 33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 34 "zero", 35 "ra", 36 "sp", 37 "gp", 38 "tp", 39 "t0", 40 "t1", 41 "t2", 42 "s0", 43 "s1", 44 "a0", 45 "a1", 46 "a2", 47 "a3", 48 "a4", 49 "a5", 50 "a6", 51 "a7", 52 "s2", 53 "s3", 54 "s4", 55 "s5", 56 "s6", 57 "s7", 58 "s8", 59 "s9", 60 "s10", 61 "s11", 62 "t3", 63 "t4", 64 "t5", 65 "t6" 66}; 67#endif 68 69static const int tcg_target_reg_alloc_order[] = { 70 /* Call saved registers */ 71 /* TCG_REG_S0 reservered for TCG_AREG0 */ 72 TCG_REG_S1, 73 TCG_REG_S2, 74 TCG_REG_S3, 75 TCG_REG_S4, 76 TCG_REG_S5, 77 TCG_REG_S6, 78 TCG_REG_S7, 79 TCG_REG_S8, 80 TCG_REG_S9, 81 TCG_REG_S10, 82 TCG_REG_S11, 83 84 /* Call clobbered registers */ 85 TCG_REG_T0, 86 TCG_REG_T1, 87 TCG_REG_T2, 88 TCG_REG_T3, 89 TCG_REG_T4, 90 TCG_REG_T5, 91 TCG_REG_T6, 92 93 /* Argument registers */ 94 TCG_REG_A0, 95 TCG_REG_A1, 96 TCG_REG_A2, 97 TCG_REG_A3, 98 TCG_REG_A4, 99 TCG_REG_A5, 100 TCG_REG_A6, 101 TCG_REG_A7, 102}; 103 104static const int tcg_target_call_iarg_regs[] = { 105 TCG_REG_A0, 106 TCG_REG_A1, 107 TCG_REG_A2, 108 TCG_REG_A3, 109 TCG_REG_A4, 110 TCG_REG_A5, 111 TCG_REG_A6, 112 TCG_REG_A7, 113}; 114 115static const int tcg_target_call_oarg_regs[] = { 116 TCG_REG_A0, 117 TCG_REG_A1, 118}; 119 120#define TCG_CT_CONST_ZERO 0x100 121#define TCG_CT_CONST_S12 0x200 122#define TCG_CT_CONST_N12 0x400 123#define TCG_CT_CONST_M12 0x800 124 125static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 126{ 127 if (TCG_TARGET_REG_BITS == 32) { 128 return sextract32(val, pos, len); 129 } else { 130 return sextract64(val, pos, len); 131 } 132} 133 134/* parse target specific constraints */ 135static const char *target_parse_constraint(TCGArgConstraint *ct, 136 const char *ct_str, TCGType type) 137{ 138 switch (*ct_str++) { 139 case 'r': 140 ct->regs = 0xffffffff; 141 break; 142 case 'L': 143 /* qemu_ld/qemu_st constraint */ 144 ct->regs = 0xffffffff; 145 /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ 146#if defined(CONFIG_SOFTMMU) 147 tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[0]); 148 tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[1]); 149 tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[2]); 150 tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[3]); 151 tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[4]); 152#endif 153 break; 154 case 'I': 155 ct->ct |= TCG_CT_CONST_S12; 156 break; 157 case 'N': 158 ct->ct |= TCG_CT_CONST_N12; 159 break; 160 case 'M': 161 ct->ct |= TCG_CT_CONST_M12; 162 break; 163 case 'Z': 164 /* we can use a zero immediate as a zero register argument. */ 165 ct->ct |= TCG_CT_CONST_ZERO; 166 break; 167 default: 168 return NULL; 169 } 170 return ct_str; 171} 172 173/* test if a constant matches the constraint */ 174static int tcg_target_const_match(tcg_target_long val, TCGType type, 175 const TCGArgConstraint *arg_ct) 176{ 177 int ct = arg_ct->ct; 178 if (ct & TCG_CT_CONST) { 179 return 1; 180 } 181 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 182 return 1; 183 } 184 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 185 return 1; 186 } 187 if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { 188 return 1; 189 } 190 if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { 191 return 1; 192 } 193 return 0; 194} 195 196/* 197 * RISC-V Base ISA opcodes (IM) 198 */ 199 200typedef enum { 201 OPC_ADD = 0x33, 202 OPC_ADDI = 0x13, 203 OPC_AND = 0x7033, 204 OPC_ANDI = 0x7013, 205 OPC_AUIPC = 0x17, 206 OPC_BEQ = 0x63, 207 OPC_BGE = 0x5063, 208 OPC_BGEU = 0x7063, 209 OPC_BLT = 0x4063, 210 OPC_BLTU = 0x6063, 211 OPC_BNE = 0x1063, 212 OPC_DIV = 0x2004033, 213 OPC_DIVU = 0x2005033, 214 OPC_JAL = 0x6f, 215 OPC_JALR = 0x67, 216 OPC_LB = 0x3, 217 OPC_LBU = 0x4003, 218 OPC_LD = 0x3003, 219 OPC_LH = 0x1003, 220 OPC_LHU = 0x5003, 221 OPC_LUI = 0x37, 222 OPC_LW = 0x2003, 223 OPC_LWU = 0x6003, 224 OPC_MUL = 0x2000033, 225 OPC_MULH = 0x2001033, 226 OPC_MULHSU = 0x2002033, 227 OPC_MULHU = 0x2003033, 228 OPC_OR = 0x6033, 229 OPC_ORI = 0x6013, 230 OPC_REM = 0x2006033, 231 OPC_REMU = 0x2007033, 232 OPC_SB = 0x23, 233 OPC_SD = 0x3023, 234 OPC_SH = 0x1023, 235 OPC_SLL = 0x1033, 236 OPC_SLLI = 0x1013, 237 OPC_SLT = 0x2033, 238 OPC_SLTI = 0x2013, 239 OPC_SLTIU = 0x3013, 240 OPC_SLTU = 0x3033, 241 OPC_SRA = 0x40005033, 242 OPC_SRAI = 0x40005013, 243 OPC_SRL = 0x5033, 244 OPC_SRLI = 0x5013, 245 OPC_SUB = 0x40000033, 246 OPC_SW = 0x2023, 247 OPC_XOR = 0x4033, 248 OPC_XORI = 0x4013, 249 250#if TCG_TARGET_REG_BITS == 64 251 OPC_ADDIW = 0x1b, 252 OPC_ADDW = 0x3b, 253 OPC_DIVUW = 0x200503b, 254 OPC_DIVW = 0x200403b, 255 OPC_MULW = 0x200003b, 256 OPC_REMUW = 0x200703b, 257 OPC_REMW = 0x200603b, 258 OPC_SLLIW = 0x101b, 259 OPC_SLLW = 0x103b, 260 OPC_SRAIW = 0x4000501b, 261 OPC_SRAW = 0x4000503b, 262 OPC_SRLIW = 0x501b, 263 OPC_SRLW = 0x503b, 264 OPC_SUBW = 0x4000003b, 265#else 266 /* Simplify code throughout by defining aliases for RV32. */ 267 OPC_ADDIW = OPC_ADDI, 268 OPC_ADDW = OPC_ADD, 269 OPC_DIVUW = OPC_DIVU, 270 OPC_DIVW = OPC_DIV, 271 OPC_MULW = OPC_MUL, 272 OPC_REMUW = OPC_REMU, 273 OPC_REMW = OPC_REM, 274 OPC_SLLIW = OPC_SLLI, 275 OPC_SLLW = OPC_SLL, 276 OPC_SRAIW = OPC_SRAI, 277 OPC_SRAW = OPC_SRA, 278 OPC_SRLIW = OPC_SRLI, 279 OPC_SRLW = OPC_SRL, 280 OPC_SUBW = OPC_SUB, 281#endif 282 283 OPC_FENCE = 0x0000000f, 284} RISCVInsn; 285 286/* 287 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 288 */ 289 290/* Type-R */ 291 292static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 293{ 294 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 295} 296 297/* Type-I */ 298 299static int32_t encode_imm12(uint32_t imm) 300{ 301 return (imm & 0xfff) << 20; 302} 303 304static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 305{ 306 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 307} 308 309/* Type-S */ 310 311static int32_t encode_simm12(uint32_t imm) 312{ 313 int32_t ret = 0; 314 315 ret |= (imm & 0xFE0) << 20; 316 ret |= (imm & 0x1F) << 7; 317 318 return ret; 319} 320 321static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 322{ 323 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 324} 325 326/* Type-SB */ 327 328static int32_t encode_sbimm12(uint32_t imm) 329{ 330 int32_t ret = 0; 331 332 ret |= (imm & 0x1000) << 19; 333 ret |= (imm & 0x7e0) << 20; 334 ret |= (imm & 0x1e) << 7; 335 ret |= (imm & 0x800) >> 4; 336 337 return ret; 338} 339 340static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 341{ 342 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 343} 344 345/* Type-U */ 346 347static int32_t encode_uimm20(uint32_t imm) 348{ 349 return imm & 0xfffff000; 350} 351 352static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 353{ 354 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 355} 356 357/* Type-UJ */ 358 359static int32_t encode_ujimm20(uint32_t imm) 360{ 361 int32_t ret = 0; 362 363 ret |= (imm & 0x0007fe) << (21 - 1); 364 ret |= (imm & 0x000800) << (20 - 11); 365 ret |= (imm & 0x0ff000) << (12 - 12); 366 ret |= (imm & 0x100000) << (31 - 20); 367 368 return ret; 369} 370 371static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 372{ 373 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 374} 375 376/* 377 * RISC-V instruction emitters 378 */ 379 380static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 381 TCGReg rd, TCGReg rs1, TCGReg rs2) 382{ 383 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 384} 385 386static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 387 TCGReg rd, TCGReg rs1, TCGArg imm) 388{ 389 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 390} 391 392static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 393 TCGReg rs1, TCGReg rs2, uint32_t imm) 394{ 395 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 396} 397 398static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 399 TCGReg rs1, TCGReg rs2, uint32_t imm) 400{ 401 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 402} 403 404static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 405 TCGReg rd, uint32_t imm) 406{ 407 tcg_out32(s, encode_u(opc, rd, imm)); 408} 409 410static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 411 TCGReg rd, uint32_t imm) 412{ 413 tcg_out32(s, encode_uj(opc, rd, imm)); 414} 415 416static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 417{ 418 int i; 419 for (i = 0; i < count; ++i) { 420 p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); 421 } 422} 423 424/* 425 * Relocations 426 */ 427 428static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 429{ 430 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 431 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 432 433 tcg_debug_assert((offset & 1) == 0); 434 if (offset == sextreg(offset, 0, 12)) { 435 *src_rw |= encode_sbimm12(offset); 436 return true; 437 } 438 439 return false; 440} 441 442static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 443{ 444 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 445 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 446 447 tcg_debug_assert((offset & 1) == 0); 448 if (offset == sextreg(offset, 0, 20)) { 449 *src_rw |= encode_ujimm20(offset); 450 return true; 451 } 452 453 return false; 454} 455 456static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 457{ 458 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 459 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 460 int32_t lo = sextreg(offset, 0, 12); 461 int32_t hi = offset - lo; 462 463 if (offset == hi + lo) { 464 src_rw[0] |= encode_uimm20(hi); 465 src_rw[1] |= encode_imm12(lo); 466 return true; 467 } 468 469 return false; 470} 471 472static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 473 intptr_t value, intptr_t addend) 474{ 475 tcg_debug_assert(addend == 0); 476 switch (type) { 477 case R_RISCV_BRANCH: 478 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 479 case R_RISCV_JAL: 480 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 481 case R_RISCV_CALL: 482 return reloc_call(code_ptr, (tcg_insn_unit *)value); 483 default: 484 g_assert_not_reached(); 485 } 486} 487 488/* 489 * TCG intrinsics 490 */ 491 492static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 493{ 494 if (ret == arg) { 495 return true; 496 } 497 switch (type) { 498 case TCG_TYPE_I32: 499 case TCG_TYPE_I64: 500 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 501 break; 502 default: 503 g_assert_not_reached(); 504 } 505 return true; 506} 507 508static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 509 tcg_target_long val) 510{ 511 tcg_target_long lo, hi, tmp; 512 int shift, ret; 513 514 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 515 val = (int32_t)val; 516 } 517 518 lo = sextreg(val, 0, 12); 519 if (val == lo) { 520 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 521 return; 522 } 523 524 hi = val - lo; 525 if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { 526 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 527 if (lo != 0) { 528 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 529 } 530 return; 531 } 532 533 /* We can only be here if TCG_TARGET_REG_BITS != 32 */ 534 tmp = tcg_pcrel_diff(s, (void *)val); 535 if (tmp == (int32_t)tmp) { 536 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 537 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 538 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 539 tcg_debug_assert(ret == true); 540 return; 541 } 542 543 /* Look for a single 20-bit section. */ 544 shift = ctz64(val); 545 tmp = val >> shift; 546 if (tmp == sextreg(tmp, 0, 20)) { 547 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 548 if (shift > 12) { 549 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 550 } else { 551 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 552 } 553 return; 554 } 555 556 /* Look for a few high zero bits, with lots of bits set in the middle. */ 557 shift = clz64(val); 558 tmp = val << shift; 559 if (tmp == sextreg(tmp, 12, 20) << 12) { 560 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 561 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 562 return; 563 } else if (tmp == sextreg(tmp, 0, 12)) { 564 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 565 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 566 return; 567 } 568 569 /* Drop into the constant pool. */ 570 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 571 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 572 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 573} 574 575static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 576{ 577 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 578} 579 580static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 581{ 582 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 583 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 584} 585 586static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 587{ 588 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 589 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 590} 591 592static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) 593{ 594 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 595 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 596} 597 598static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) 599{ 600 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 601 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 602} 603 604static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 605{ 606 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 607} 608 609static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 610 TCGReg addr, intptr_t offset) 611{ 612 intptr_t imm12 = sextreg(offset, 0, 12); 613 614 if (offset != imm12) { 615 intptr_t diff = offset - (uintptr_t)s->code_ptr; 616 617 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 618 imm12 = sextreg(diff, 0, 12); 619 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 620 } else { 621 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 622 if (addr != TCG_REG_ZERO) { 623 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 624 } 625 } 626 addr = TCG_REG_TMP2; 627 } 628 629 switch (opc) { 630 case OPC_SB: 631 case OPC_SH: 632 case OPC_SW: 633 case OPC_SD: 634 tcg_out_opc_store(s, opc, addr, data, imm12); 635 break; 636 case OPC_LB: 637 case OPC_LBU: 638 case OPC_LH: 639 case OPC_LHU: 640 case OPC_LW: 641 case OPC_LWU: 642 case OPC_LD: 643 tcg_out_opc_imm(s, opc, data, addr, imm12); 644 break; 645 default: 646 g_assert_not_reached(); 647 } 648} 649 650static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 651 TCGReg arg1, intptr_t arg2) 652{ 653 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 654 tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); 655} 656 657static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 658 TCGReg arg1, intptr_t arg2) 659{ 660 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 661 tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); 662} 663 664static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 665 TCGReg base, intptr_t ofs) 666{ 667 if (val == 0) { 668 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 669 return true; 670 } 671 return false; 672} 673 674static void tcg_out_addsub2(TCGContext *s, 675 TCGReg rl, TCGReg rh, 676 TCGReg al, TCGReg ah, 677 TCGArg bl, TCGArg bh, 678 bool cbl, bool cbh, bool is_sub, bool is32bit) 679{ 680 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; 681 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; 682 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; 683 TCGReg th = TCG_REG_TMP1; 684 685 /* If we have a negative constant such that negating it would 686 make the high part zero, we can (usually) eliminate one insn. */ 687 if (cbl && cbh && bh == -1 && bl != 0) { 688 bl = -bl; 689 bh = 0; 690 is_sub = !is_sub; 691 } 692 693 /* By operating on the high part first, we get to use the final 694 carry operation to move back from the temporary. */ 695 if (!cbh) { 696 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); 697 } else if (bh != 0 || ah == rl) { 698 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); 699 } else { 700 th = ah; 701 } 702 703 /* Note that tcg optimization should eliminate the bl == 0 case. */ 704 if (is_sub) { 705 if (cbl) { 706 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); 707 tcg_out_opc_imm(s, opc_addi, rl, al, -bl); 708 } else { 709 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); 710 tcg_out_opc_reg(s, opc_sub, rl, al, bl); 711 } 712 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); 713 } else { 714 if (cbl) { 715 tcg_out_opc_imm(s, opc_addi, rl, al, bl); 716 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); 717 } else if (rl == al && rl == bl) { 718 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); 719 tcg_out_opc_reg(s, opc_addi, rl, al, bl); 720 } else { 721 tcg_out_opc_reg(s, opc_add, rl, al, bl); 722 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, 723 rl, (rl == bl ? al : bl)); 724 } 725 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); 726 } 727} 728 729static const struct { 730 RISCVInsn op; 731 bool swap; 732} tcg_brcond_to_riscv[] = { 733 [TCG_COND_EQ] = { OPC_BEQ, false }, 734 [TCG_COND_NE] = { OPC_BNE, false }, 735 [TCG_COND_LT] = { OPC_BLT, false }, 736 [TCG_COND_GE] = { OPC_BGE, false }, 737 [TCG_COND_LE] = { OPC_BGE, true }, 738 [TCG_COND_GT] = { OPC_BLT, true }, 739 [TCG_COND_LTU] = { OPC_BLTU, false }, 740 [TCG_COND_GEU] = { OPC_BGEU, false }, 741 [TCG_COND_LEU] = { OPC_BGEU, true }, 742 [TCG_COND_GTU] = { OPC_BLTU, true } 743}; 744 745static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 746 TCGReg arg2, TCGLabel *l) 747{ 748 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 749 750 tcg_debug_assert(op != 0); 751 752 if (tcg_brcond_to_riscv[cond].swap) { 753 TCGReg t = arg1; 754 arg1 = arg2; 755 arg2 = t; 756 } 757 758 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 759 tcg_out_opc_branch(s, op, arg1, arg2, 0); 760} 761 762static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 763 TCGReg arg1, TCGReg arg2) 764{ 765 switch (cond) { 766 case TCG_COND_EQ: 767 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 768 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); 769 break; 770 case TCG_COND_NE: 771 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 772 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); 773 break; 774 case TCG_COND_LT: 775 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 776 break; 777 case TCG_COND_GE: 778 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 779 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 780 break; 781 case TCG_COND_LE: 782 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 783 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 784 break; 785 case TCG_COND_GT: 786 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 787 break; 788 case TCG_COND_LTU: 789 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 790 break; 791 case TCG_COND_GEU: 792 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 793 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 794 break; 795 case TCG_COND_LEU: 796 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 797 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 798 break; 799 case TCG_COND_GTU: 800 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 801 break; 802 default: 803 g_assert_not_reached(); 804 break; 805 } 806} 807 808static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 809 TCGReg bl, TCGReg bh, TCGLabel *l) 810{ 811 /* todo */ 812 g_assert_not_reached(); 813} 814 815static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, 816 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) 817{ 818 /* todo */ 819 g_assert_not_reached(); 820} 821 822static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 823{ 824 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 825 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 826 int ret; 827 828 tcg_debug_assert((offset & 1) == 0); 829 if (offset == sextreg(offset, 0, 20)) { 830 /* short jump: -2097150 to 2097152 */ 831 tcg_out_opc_jump(s, OPC_JAL, link, offset); 832 } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { 833 /* long jump: -2147483646 to 2147483648 */ 834 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 835 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 836 ret = reloc_call(s->code_ptr - 2, arg); 837 tcg_debug_assert(ret == true); 838 } else if (TCG_TARGET_REG_BITS == 64) { 839 /* far jump: 64-bit */ 840 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 841 tcg_target_long base = (tcg_target_long)arg - imm; 842 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 843 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 844 } else { 845 g_assert_not_reached(); 846 } 847} 848 849static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) 850{ 851 tcg_out_call_int(s, arg, false); 852} 853 854static void tcg_out_mb(TCGContext *s, TCGArg a0) 855{ 856 tcg_insn_unit insn = OPC_FENCE; 857 858 if (a0 & TCG_MO_LD_LD) { 859 insn |= 0x02200000; 860 } 861 if (a0 & TCG_MO_ST_LD) { 862 insn |= 0x01200000; 863 } 864 if (a0 & TCG_MO_LD_ST) { 865 insn |= 0x02100000; 866 } 867 if (a0 & TCG_MO_ST_ST) { 868 insn |= 0x02200000; 869 } 870 tcg_out32(s, insn); 871} 872 873/* 874 * Load/store and TLB 875 */ 876 877#if defined(CONFIG_SOFTMMU) 878#include "../tcg-ldst.c.inc" 879 880/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 881 * TCGMemOpIdx oi, uintptr_t ra) 882 */ 883static void * const qemu_ld_helpers[16] = { 884 [MO_UB] = helper_ret_ldub_mmu, 885 [MO_SB] = helper_ret_ldsb_mmu, 886 [MO_LEUW] = helper_le_lduw_mmu, 887 [MO_LESW] = helper_le_ldsw_mmu, 888 [MO_LEUL] = helper_le_ldul_mmu, 889#if TCG_TARGET_REG_BITS == 64 890 [MO_LESL] = helper_le_ldsl_mmu, 891#endif 892 [MO_LEQ] = helper_le_ldq_mmu, 893 [MO_BEUW] = helper_be_lduw_mmu, 894 [MO_BESW] = helper_be_ldsw_mmu, 895 [MO_BEUL] = helper_be_ldul_mmu, 896#if TCG_TARGET_REG_BITS == 64 897 [MO_BESL] = helper_be_ldsl_mmu, 898#endif 899 [MO_BEQ] = helper_be_ldq_mmu, 900}; 901 902/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 903 * uintxx_t val, TCGMemOpIdx oi, 904 * uintptr_t ra) 905 */ 906static void * const qemu_st_helpers[16] = { 907 [MO_UB] = helper_ret_stb_mmu, 908 [MO_LEUW] = helper_le_stw_mmu, 909 [MO_LEUL] = helper_le_stl_mmu, 910 [MO_LEQ] = helper_le_stq_mmu, 911 [MO_BEUW] = helper_be_stw_mmu, 912 [MO_BEUL] = helper_be_stl_mmu, 913 [MO_BEQ] = helper_be_stq_mmu, 914}; 915 916/* We don't support oversize guests */ 917QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); 918 919/* We expect to use a 12-bit negative offset from ENV. */ 920QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 921QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 922 923static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 924{ 925 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 926 bool ok = reloc_jimm20(s->code_ptr - 1, target); 927 tcg_debug_assert(ok); 928} 929 930static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, 931 TCGReg addrh, TCGMemOpIdx oi, 932 tcg_insn_unit **label_ptr, bool is_load) 933{ 934 MemOp opc = get_memop(oi); 935 unsigned s_bits = opc & MO_SIZE; 936 unsigned a_bits = get_alignment_bits(opc); 937 tcg_target_long compare_mask; 938 int mem_index = get_mmuidx(oi); 939 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 940 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 941 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 942 TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 943 944 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); 945 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); 946 947 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, 948 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 949 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 950 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 951 952 /* Load the tlb comparator and the addend. */ 953 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 954 is_load ? offsetof(CPUTLBEntry, addr_read) 955 : offsetof(CPUTLBEntry, addr_write)); 956 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 957 offsetof(CPUTLBEntry, addend)); 958 959 /* We don't support unaligned accesses. */ 960 if (a_bits < s_bits) { 961 a_bits = s_bits; 962 } 963 /* Clear the non-page, non-alignment bits from the address. */ 964 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 965 if (compare_mask == sextreg(compare_mask, 0, 12)) { 966 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); 967 } else { 968 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 969 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); 970 } 971 972 /* Compare masked address with the TLB entry. */ 973 label_ptr[0] = s->code_ptr; 974 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 975 976 /* TLB Hit - translate address using addend. */ 977 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 978 tcg_out_ext32u(s, TCG_REG_TMP0, addrl); 979 addrl = TCG_REG_TMP0; 980 } 981 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); 982} 983 984static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, 985 TCGType ext, 986 TCGReg datalo, TCGReg datahi, 987 TCGReg addrlo, TCGReg addrhi, 988 void *raddr, tcg_insn_unit **label_ptr) 989{ 990 TCGLabelQemuLdst *label = new_ldst_label(s); 991 992 label->is_ld = is_ld; 993 label->oi = oi; 994 label->type = ext; 995 label->datalo_reg = datalo; 996 label->datahi_reg = datahi; 997 label->addrlo_reg = addrlo; 998 label->addrhi_reg = addrhi; 999 label->raddr = tcg_splitwx_to_rx(raddr); 1000 label->label_ptr[0] = label_ptr[0]; 1001} 1002 1003static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1004{ 1005 TCGMemOpIdx oi = l->oi; 1006 MemOp opc = get_memop(oi); 1007 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1008 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1009 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1010 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1011 1012 /* We don't support oversize guests */ 1013 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 1014 g_assert_not_reached(); 1015 } 1016 1017 /* resolve label address */ 1018 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1019 return false; 1020 } 1021 1022 /* call load helper */ 1023 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1024 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1025 tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); 1026 tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); 1027 1028 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); 1029 tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); 1030 1031 tcg_out_goto(s, l->raddr); 1032 return true; 1033} 1034 1035static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1036{ 1037 TCGMemOpIdx oi = l->oi; 1038 MemOp opc = get_memop(oi); 1039 MemOp s_bits = opc & MO_SIZE; 1040 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1041 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1042 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1043 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1044 TCGReg a4 = tcg_target_call_iarg_regs[4]; 1045 1046 /* We don't support oversize guests */ 1047 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 1048 g_assert_not_reached(); 1049 } 1050 1051 /* resolve label address */ 1052 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1053 return false; 1054 } 1055 1056 /* call store helper */ 1057 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1058 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1059 tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); 1060 switch (s_bits) { 1061 case MO_8: 1062 tcg_out_ext8u(s, a2, a2); 1063 break; 1064 case MO_16: 1065 tcg_out_ext16u(s, a2, a2); 1066 break; 1067 default: 1068 break; 1069 } 1070 tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); 1071 tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); 1072 1073 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); 1074 1075 tcg_out_goto(s, l->raddr); 1076 return true; 1077} 1078#endif /* CONFIG_SOFTMMU */ 1079 1080static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1081 TCGReg base, MemOp opc, bool is_64) 1082{ 1083 const MemOp bswap = opc & MO_BSWAP; 1084 1085 /* We don't yet handle byteswapping, assert */ 1086 g_assert(!bswap); 1087 1088 switch (opc & (MO_SSIZE)) { 1089 case MO_UB: 1090 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); 1091 break; 1092 case MO_SB: 1093 tcg_out_opc_imm(s, OPC_LB, lo, base, 0); 1094 break; 1095 case MO_UW: 1096 tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); 1097 break; 1098 case MO_SW: 1099 tcg_out_opc_imm(s, OPC_LH, lo, base, 0); 1100 break; 1101 case MO_UL: 1102 if (TCG_TARGET_REG_BITS == 64 && is_64) { 1103 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); 1104 break; 1105 } 1106 /* FALLTHRU */ 1107 case MO_SL: 1108 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1109 break; 1110 case MO_Q: 1111 /* Prefer to load from offset 0 first, but allow for overlap. */ 1112 if (TCG_TARGET_REG_BITS == 64) { 1113 tcg_out_opc_imm(s, OPC_LD, lo, base, 0); 1114 } else if (lo != base) { 1115 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1116 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1117 } else { 1118 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1119 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1120 } 1121 break; 1122 default: 1123 g_assert_not_reached(); 1124 } 1125} 1126 1127static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) 1128{ 1129 TCGReg addr_regl, addr_regh __attribute__((unused)); 1130 TCGReg data_regl, data_regh; 1131 TCGMemOpIdx oi; 1132 MemOp opc; 1133#if defined(CONFIG_SOFTMMU) 1134 tcg_insn_unit *label_ptr[1]; 1135#endif 1136 TCGReg base = TCG_REG_TMP0; 1137 1138 data_regl = *args++; 1139 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1140 addr_regl = *args++; 1141 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1142 oi = *args++; 1143 opc = get_memop(oi); 1144 1145#if defined(CONFIG_SOFTMMU) 1146 tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); 1147 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1148 add_qemu_ldst_label(s, 1, oi, 1149 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1150 data_regl, data_regh, addr_regl, addr_regh, 1151 s->code_ptr, label_ptr); 1152#else 1153 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1154 tcg_out_ext32u(s, base, addr_regl); 1155 addr_regl = base; 1156 } 1157 1158 if (guest_base == 0) { 1159 tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); 1160 } else { 1161 tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); 1162 } 1163 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1164#endif 1165} 1166 1167static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1168 TCGReg base, MemOp opc) 1169{ 1170 const MemOp bswap = opc & MO_BSWAP; 1171 1172 /* We don't yet handle byteswapping, assert */ 1173 g_assert(!bswap); 1174 1175 switch (opc & (MO_SSIZE)) { 1176 case MO_8: 1177 tcg_out_opc_store(s, OPC_SB, base, lo, 0); 1178 break; 1179 case MO_16: 1180 tcg_out_opc_store(s, OPC_SH, base, lo, 0); 1181 break; 1182 case MO_32: 1183 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1184 break; 1185 case MO_64: 1186 if (TCG_TARGET_REG_BITS == 64) { 1187 tcg_out_opc_store(s, OPC_SD, base, lo, 0); 1188 } else { 1189 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1190 tcg_out_opc_store(s, OPC_SW, base, hi, 4); 1191 } 1192 break; 1193 default: 1194 g_assert_not_reached(); 1195 } 1196} 1197 1198static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) 1199{ 1200 TCGReg addr_regl, addr_regh __attribute__((unused)); 1201 TCGReg data_regl, data_regh; 1202 TCGMemOpIdx oi; 1203 MemOp opc; 1204#if defined(CONFIG_SOFTMMU) 1205 tcg_insn_unit *label_ptr[1]; 1206#endif 1207 TCGReg base = TCG_REG_TMP0; 1208 1209 data_regl = *args++; 1210 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1211 addr_regl = *args++; 1212 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1213 oi = *args++; 1214 opc = get_memop(oi); 1215 1216#if defined(CONFIG_SOFTMMU) 1217 tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); 1218 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1219 add_qemu_ldst_label(s, 0, oi, 1220 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1221 data_regl, data_regh, addr_regl, addr_regh, 1222 s->code_ptr, label_ptr); 1223#else 1224 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1225 tcg_out_ext32u(s, base, addr_regl); 1226 addr_regl = base; 1227 } 1228 1229 if (guest_base == 0) { 1230 tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); 1231 } else { 1232 tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); 1233 } 1234 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1235#endif 1236} 1237 1238static const tcg_insn_unit *tb_ret_addr; 1239 1240static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1241 const TCGArg *args, const int *const_args) 1242{ 1243 TCGArg a0 = args[0]; 1244 TCGArg a1 = args[1]; 1245 TCGArg a2 = args[2]; 1246 int c2 = const_args[2]; 1247 1248 switch (opc) { 1249 case INDEX_op_exit_tb: 1250 /* Reuse the zeroing that exists for goto_ptr. */ 1251 if (a0 == 0) { 1252 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1253 } else { 1254 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1255 tcg_out_call_int(s, tb_ret_addr, true); 1256 } 1257 break; 1258 1259 case INDEX_op_goto_tb: 1260 assert(s->tb_jmp_insn_offset == 0); 1261 /* indirect jump method */ 1262 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1263 (uintptr_t)(s->tb_jmp_target_addr + a0)); 1264 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1265 set_jmp_reset_offset(s, a0); 1266 break; 1267 1268 case INDEX_op_goto_ptr: 1269 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1270 break; 1271 1272 case INDEX_op_br: 1273 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); 1274 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1275 break; 1276 1277 case INDEX_op_ld8u_i32: 1278 case INDEX_op_ld8u_i64: 1279 tcg_out_ldst(s, OPC_LBU, a0, a1, a2); 1280 break; 1281 case INDEX_op_ld8s_i32: 1282 case INDEX_op_ld8s_i64: 1283 tcg_out_ldst(s, OPC_LB, a0, a1, a2); 1284 break; 1285 case INDEX_op_ld16u_i32: 1286 case INDEX_op_ld16u_i64: 1287 tcg_out_ldst(s, OPC_LHU, a0, a1, a2); 1288 break; 1289 case INDEX_op_ld16s_i32: 1290 case INDEX_op_ld16s_i64: 1291 tcg_out_ldst(s, OPC_LH, a0, a1, a2); 1292 break; 1293 case INDEX_op_ld32u_i64: 1294 tcg_out_ldst(s, OPC_LWU, a0, a1, a2); 1295 break; 1296 case INDEX_op_ld_i32: 1297 case INDEX_op_ld32s_i64: 1298 tcg_out_ldst(s, OPC_LW, a0, a1, a2); 1299 break; 1300 case INDEX_op_ld_i64: 1301 tcg_out_ldst(s, OPC_LD, a0, a1, a2); 1302 break; 1303 1304 case INDEX_op_st8_i32: 1305 case INDEX_op_st8_i64: 1306 tcg_out_ldst(s, OPC_SB, a0, a1, a2); 1307 break; 1308 case INDEX_op_st16_i32: 1309 case INDEX_op_st16_i64: 1310 tcg_out_ldst(s, OPC_SH, a0, a1, a2); 1311 break; 1312 case INDEX_op_st_i32: 1313 case INDEX_op_st32_i64: 1314 tcg_out_ldst(s, OPC_SW, a0, a1, a2); 1315 break; 1316 case INDEX_op_st_i64: 1317 tcg_out_ldst(s, OPC_SD, a0, a1, a2); 1318 break; 1319 1320 case INDEX_op_add_i32: 1321 if (c2) { 1322 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); 1323 } else { 1324 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); 1325 } 1326 break; 1327 case INDEX_op_add_i64: 1328 if (c2) { 1329 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); 1330 } else { 1331 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); 1332 } 1333 break; 1334 1335 case INDEX_op_sub_i32: 1336 if (c2) { 1337 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); 1338 } else { 1339 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); 1340 } 1341 break; 1342 case INDEX_op_sub_i64: 1343 if (c2) { 1344 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); 1345 } else { 1346 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); 1347 } 1348 break; 1349 1350 case INDEX_op_and_i32: 1351 case INDEX_op_and_i64: 1352 if (c2) { 1353 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1354 } else { 1355 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1356 } 1357 break; 1358 1359 case INDEX_op_or_i32: 1360 case INDEX_op_or_i64: 1361 if (c2) { 1362 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 1363 } else { 1364 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 1365 } 1366 break; 1367 1368 case INDEX_op_xor_i32: 1369 case INDEX_op_xor_i64: 1370 if (c2) { 1371 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 1372 } else { 1373 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 1374 } 1375 break; 1376 1377 case INDEX_op_not_i32: 1378 case INDEX_op_not_i64: 1379 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); 1380 break; 1381 1382 case INDEX_op_neg_i32: 1383 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); 1384 break; 1385 case INDEX_op_neg_i64: 1386 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); 1387 break; 1388 1389 case INDEX_op_mul_i32: 1390 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); 1391 break; 1392 case INDEX_op_mul_i64: 1393 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); 1394 break; 1395 1396 case INDEX_op_div_i32: 1397 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); 1398 break; 1399 case INDEX_op_div_i64: 1400 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); 1401 break; 1402 1403 case INDEX_op_divu_i32: 1404 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); 1405 break; 1406 case INDEX_op_divu_i64: 1407 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); 1408 break; 1409 1410 case INDEX_op_rem_i32: 1411 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); 1412 break; 1413 case INDEX_op_rem_i64: 1414 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); 1415 break; 1416 1417 case INDEX_op_remu_i32: 1418 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); 1419 break; 1420 case INDEX_op_remu_i64: 1421 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); 1422 break; 1423 1424 case INDEX_op_shl_i32: 1425 if (c2) { 1426 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); 1427 } else { 1428 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); 1429 } 1430 break; 1431 case INDEX_op_shl_i64: 1432 if (c2) { 1433 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); 1434 } else { 1435 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); 1436 } 1437 break; 1438 1439 case INDEX_op_shr_i32: 1440 if (c2) { 1441 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); 1442 } else { 1443 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); 1444 } 1445 break; 1446 case INDEX_op_shr_i64: 1447 if (c2) { 1448 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); 1449 } else { 1450 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); 1451 } 1452 break; 1453 1454 case INDEX_op_sar_i32: 1455 if (c2) { 1456 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); 1457 } else { 1458 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); 1459 } 1460 break; 1461 case INDEX_op_sar_i64: 1462 if (c2) { 1463 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); 1464 } else { 1465 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); 1466 } 1467 break; 1468 1469 case INDEX_op_add2_i32: 1470 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1471 const_args[4], const_args[5], false, true); 1472 break; 1473 case INDEX_op_add2_i64: 1474 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1475 const_args[4], const_args[5], false, false); 1476 break; 1477 case INDEX_op_sub2_i32: 1478 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1479 const_args[4], const_args[5], true, true); 1480 break; 1481 case INDEX_op_sub2_i64: 1482 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1483 const_args[4], const_args[5], true, false); 1484 break; 1485 1486 case INDEX_op_brcond_i32: 1487 case INDEX_op_brcond_i64: 1488 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1489 break; 1490 case INDEX_op_brcond2_i32: 1491 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); 1492 break; 1493 1494 case INDEX_op_setcond_i32: 1495 case INDEX_op_setcond_i64: 1496 tcg_out_setcond(s, args[3], a0, a1, a2); 1497 break; 1498 case INDEX_op_setcond2_i32: 1499 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); 1500 break; 1501 1502 case INDEX_op_qemu_ld_i32: 1503 tcg_out_qemu_ld(s, args, false); 1504 break; 1505 case INDEX_op_qemu_ld_i64: 1506 tcg_out_qemu_ld(s, args, true); 1507 break; 1508 case INDEX_op_qemu_st_i32: 1509 tcg_out_qemu_st(s, args, false); 1510 break; 1511 case INDEX_op_qemu_st_i64: 1512 tcg_out_qemu_st(s, args, true); 1513 break; 1514 1515 case INDEX_op_ext8u_i32: 1516 case INDEX_op_ext8u_i64: 1517 tcg_out_ext8u(s, a0, a1); 1518 break; 1519 1520 case INDEX_op_ext16u_i32: 1521 case INDEX_op_ext16u_i64: 1522 tcg_out_ext16u(s, a0, a1); 1523 break; 1524 1525 case INDEX_op_ext32u_i64: 1526 case INDEX_op_extu_i32_i64: 1527 tcg_out_ext32u(s, a0, a1); 1528 break; 1529 1530 case INDEX_op_ext8s_i32: 1531 case INDEX_op_ext8s_i64: 1532 tcg_out_ext8s(s, a0, a1); 1533 break; 1534 1535 case INDEX_op_ext16s_i32: 1536 case INDEX_op_ext16s_i64: 1537 tcg_out_ext16s(s, a0, a1); 1538 break; 1539 1540 case INDEX_op_ext32s_i64: 1541 case INDEX_op_extrl_i64_i32: 1542 case INDEX_op_ext_i32_i64: 1543 tcg_out_ext32s(s, a0, a1); 1544 break; 1545 1546 case INDEX_op_extrh_i64_i32: 1547 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 1548 break; 1549 1550 case INDEX_op_mulsh_i32: 1551 case INDEX_op_mulsh_i64: 1552 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 1553 break; 1554 1555 case INDEX_op_muluh_i32: 1556 case INDEX_op_muluh_i64: 1557 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 1558 break; 1559 1560 case INDEX_op_mb: 1561 tcg_out_mb(s, a0); 1562 break; 1563 1564 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1565 case INDEX_op_mov_i64: 1566 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ 1567 case INDEX_op_movi_i64: 1568 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1569 default: 1570 g_assert_not_reached(); 1571 } 1572} 1573 1574static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) 1575{ 1576 static const TCGTargetOpDef r 1577 = { .args_ct_str = { "r" } }; 1578 static const TCGTargetOpDef r_r 1579 = { .args_ct_str = { "r", "r" } }; 1580 static const TCGTargetOpDef rZ_r 1581 = { .args_ct_str = { "rZ", "r" } }; 1582 static const TCGTargetOpDef rZ_rZ 1583 = { .args_ct_str = { "rZ", "rZ" } }; 1584 static const TCGTargetOpDef rZ_rZ_rZ_rZ 1585 = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; 1586 static const TCGTargetOpDef r_r_ri 1587 = { .args_ct_str = { "r", "r", "ri" } }; 1588 static const TCGTargetOpDef r_r_rI 1589 = { .args_ct_str = { "r", "r", "rI" } }; 1590 static const TCGTargetOpDef r_rZ_rN 1591 = { .args_ct_str = { "r", "rZ", "rN" } }; 1592 static const TCGTargetOpDef r_rZ_rZ 1593 = { .args_ct_str = { "r", "rZ", "rZ" } }; 1594 static const TCGTargetOpDef r_rZ_rZ_rZ_rZ 1595 = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; 1596 static const TCGTargetOpDef r_L 1597 = { .args_ct_str = { "r", "L" } }; 1598 static const TCGTargetOpDef r_r_L 1599 = { .args_ct_str = { "r", "r", "L" } }; 1600 static const TCGTargetOpDef r_L_L 1601 = { .args_ct_str = { "r", "L", "L" } }; 1602 static const TCGTargetOpDef r_r_L_L 1603 = { .args_ct_str = { "r", "r", "L", "L" } }; 1604 static const TCGTargetOpDef LZ_L 1605 = { .args_ct_str = { "LZ", "L" } }; 1606 static const TCGTargetOpDef LZ_L_L 1607 = { .args_ct_str = { "LZ", "L", "L" } }; 1608 static const TCGTargetOpDef LZ_LZ_L 1609 = { .args_ct_str = { "LZ", "LZ", "L" } }; 1610 static const TCGTargetOpDef LZ_LZ_L_L 1611 = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; 1612 static const TCGTargetOpDef r_r_rZ_rZ_rM_rM 1613 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; 1614 1615 switch (op) { 1616 case INDEX_op_goto_ptr: 1617 return &r; 1618 1619 case INDEX_op_ld8u_i32: 1620 case INDEX_op_ld8s_i32: 1621 case INDEX_op_ld16u_i32: 1622 case INDEX_op_ld16s_i32: 1623 case INDEX_op_ld_i32: 1624 case INDEX_op_not_i32: 1625 case INDEX_op_neg_i32: 1626 case INDEX_op_ld8u_i64: 1627 case INDEX_op_ld8s_i64: 1628 case INDEX_op_ld16u_i64: 1629 case INDEX_op_ld16s_i64: 1630 case INDEX_op_ld32s_i64: 1631 case INDEX_op_ld32u_i64: 1632 case INDEX_op_ld_i64: 1633 case INDEX_op_not_i64: 1634 case INDEX_op_neg_i64: 1635 case INDEX_op_ext8u_i32: 1636 case INDEX_op_ext8u_i64: 1637 case INDEX_op_ext16u_i32: 1638 case INDEX_op_ext16u_i64: 1639 case INDEX_op_ext32u_i64: 1640 case INDEX_op_extu_i32_i64: 1641 case INDEX_op_ext8s_i32: 1642 case INDEX_op_ext8s_i64: 1643 case INDEX_op_ext16s_i32: 1644 case INDEX_op_ext16s_i64: 1645 case INDEX_op_ext32s_i64: 1646 case INDEX_op_extrl_i64_i32: 1647 case INDEX_op_extrh_i64_i32: 1648 case INDEX_op_ext_i32_i64: 1649 return &r_r; 1650 1651 case INDEX_op_st8_i32: 1652 case INDEX_op_st16_i32: 1653 case INDEX_op_st_i32: 1654 case INDEX_op_st8_i64: 1655 case INDEX_op_st16_i64: 1656 case INDEX_op_st32_i64: 1657 case INDEX_op_st_i64: 1658 return &rZ_r; 1659 1660 case INDEX_op_add_i32: 1661 case INDEX_op_and_i32: 1662 case INDEX_op_or_i32: 1663 case INDEX_op_xor_i32: 1664 case INDEX_op_add_i64: 1665 case INDEX_op_and_i64: 1666 case INDEX_op_or_i64: 1667 case INDEX_op_xor_i64: 1668 return &r_r_rI; 1669 1670 case INDEX_op_sub_i32: 1671 case INDEX_op_sub_i64: 1672 return &r_rZ_rN; 1673 1674 case INDEX_op_mul_i32: 1675 case INDEX_op_mulsh_i32: 1676 case INDEX_op_muluh_i32: 1677 case INDEX_op_div_i32: 1678 case INDEX_op_divu_i32: 1679 case INDEX_op_rem_i32: 1680 case INDEX_op_remu_i32: 1681 case INDEX_op_setcond_i32: 1682 case INDEX_op_mul_i64: 1683 case INDEX_op_mulsh_i64: 1684 case INDEX_op_muluh_i64: 1685 case INDEX_op_div_i64: 1686 case INDEX_op_divu_i64: 1687 case INDEX_op_rem_i64: 1688 case INDEX_op_remu_i64: 1689 case INDEX_op_setcond_i64: 1690 return &r_rZ_rZ; 1691 1692 case INDEX_op_shl_i32: 1693 case INDEX_op_shr_i32: 1694 case INDEX_op_sar_i32: 1695 case INDEX_op_shl_i64: 1696 case INDEX_op_shr_i64: 1697 case INDEX_op_sar_i64: 1698 return &r_r_ri; 1699 1700 case INDEX_op_brcond_i32: 1701 case INDEX_op_brcond_i64: 1702 return &rZ_rZ; 1703 1704 case INDEX_op_add2_i32: 1705 case INDEX_op_add2_i64: 1706 case INDEX_op_sub2_i32: 1707 case INDEX_op_sub2_i64: 1708 return &r_r_rZ_rZ_rM_rM; 1709 1710 case INDEX_op_brcond2_i32: 1711 return &rZ_rZ_rZ_rZ; 1712 1713 case INDEX_op_setcond2_i32: 1714 return &r_rZ_rZ_rZ_rZ; 1715 1716 case INDEX_op_qemu_ld_i32: 1717 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; 1718 case INDEX_op_qemu_st_i32: 1719 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; 1720 case INDEX_op_qemu_ld_i64: 1721 return TCG_TARGET_REG_BITS == 64 ? &r_L 1722 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L 1723 : &r_r_L_L; 1724 case INDEX_op_qemu_st_i64: 1725 return TCG_TARGET_REG_BITS == 64 ? &LZ_L 1726 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L 1727 : &LZ_LZ_L_L; 1728 1729 default: 1730 return NULL; 1731 } 1732} 1733 1734static const int tcg_target_callee_save_regs[] = { 1735 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1736 TCG_REG_S1, 1737 TCG_REG_S2, 1738 TCG_REG_S3, 1739 TCG_REG_S4, 1740 TCG_REG_S5, 1741 TCG_REG_S6, 1742 TCG_REG_S7, 1743 TCG_REG_S8, 1744 TCG_REG_S9, 1745 TCG_REG_S10, 1746 TCG_REG_S11, 1747 TCG_REG_RA, /* should be last for ABI compliance */ 1748}; 1749 1750/* Stack frame parameters. */ 1751#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1752#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1753#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1754#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1755 + TCG_TARGET_STACK_ALIGN - 1) \ 1756 & -TCG_TARGET_STACK_ALIGN) 1757#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1758 1759/* We're expecting to be able to use an immediate for frame allocation. */ 1760QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1761 1762/* Generate global QEMU prologue and epilogue code */ 1763static void tcg_target_qemu_prologue(TCGContext *s) 1764{ 1765 int i; 1766 1767 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1768 1769 /* TB prologue */ 1770 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1771 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1772 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1773 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1774 } 1775 1776#if !defined(CONFIG_SOFTMMU) 1777 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1778 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1779#endif 1780 1781 /* Call generated code */ 1782 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1783 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1784 1785 /* Return path for goto_ptr. Set return value to 0 */ 1786 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1787 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1788 1789 /* TB epilogue */ 1790 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1791 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1792 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1793 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1794 } 1795 1796 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1797 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 1798} 1799 1800static void tcg_target_init(TCGContext *s) 1801{ 1802 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 1803 if (TCG_TARGET_REG_BITS == 64) { 1804 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 1805 } 1806 1807 tcg_target_call_clobber_regs = -1u; 1808 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1809 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1810 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1811 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1812 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1813 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1814 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1815 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1816 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1817 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1818 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 1819 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 1820 1821 s->reserved_regs = 0; 1822 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1823 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1824 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1825 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1826 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1827 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 1828 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1829} 1830 1831typedef struct { 1832 DebugFrameHeader h; 1833 uint8_t fde_def_cfa[4]; 1834 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1835} DebugFrame; 1836 1837#define ELF_HOST_MACHINE EM_RISCV 1838 1839static const DebugFrame debug_frame = { 1840 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1841 .h.cie.id = -1, 1842 .h.cie.version = 1, 1843 .h.cie.code_align = 1, 1844 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1845 .h.cie.return_column = TCG_REG_RA, 1846 1847 /* Total FDE size does not include the "len" member. */ 1848 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1849 1850 .fde_def_cfa = { 1851 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1852 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1853 (FRAME_SIZE >> 7) 1854 }, 1855 .fde_reg_ofs = { 1856 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 1857 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 1858 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 1859 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 1860 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 1861 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 1862 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 1863 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 1864 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 1865 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 1866 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 1867 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1868 } 1869}; 1870 1871void tcg_register_jit(const void *buf, size_t buf_size) 1872{ 1873 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1874} 1875