1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30#include "../tcg-ldst.c.inc" 31#include "../tcg-pool.c.inc" 32 33#ifdef CONFIG_DEBUG_TCG 34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 35 "zero", 36 "ra", 37 "sp", 38 "gp", 39 "tp", 40 "t0", 41 "t1", 42 "t2", 43 "s0", 44 "s1", 45 "a0", 46 "a1", 47 "a2", 48 "a3", 49 "a4", 50 "a5", 51 "a6", 52 "a7", 53 "s2", 54 "s3", 55 "s4", 56 "s5", 57 "s6", 58 "s7", 59 "s8", 60 "s9", 61 "s10", 62 "s11", 63 "t3", 64 "t4", 65 "t5", 66 "t6" 67}; 68#endif 69 70static const int tcg_target_reg_alloc_order[] = { 71 /* Call saved registers */ 72 /* TCG_REG_S0 reservered for TCG_AREG0 */ 73 TCG_REG_S1, 74 TCG_REG_S2, 75 TCG_REG_S3, 76 TCG_REG_S4, 77 TCG_REG_S5, 78 TCG_REG_S6, 79 TCG_REG_S7, 80 TCG_REG_S8, 81 TCG_REG_S9, 82 TCG_REG_S10, 83 TCG_REG_S11, 84 85 /* Call clobbered registers */ 86 TCG_REG_T0, 87 TCG_REG_T1, 88 TCG_REG_T2, 89 TCG_REG_T3, 90 TCG_REG_T4, 91 TCG_REG_T5, 92 TCG_REG_T6, 93 94 /* Argument registers */ 95 TCG_REG_A0, 96 TCG_REG_A1, 97 TCG_REG_A2, 98 TCG_REG_A3, 99 TCG_REG_A4, 100 TCG_REG_A5, 101 TCG_REG_A6, 102 TCG_REG_A7, 103}; 104 105static const int tcg_target_call_iarg_regs[] = { 106 TCG_REG_A0, 107 TCG_REG_A1, 108 TCG_REG_A2, 109 TCG_REG_A3, 110 TCG_REG_A4, 111 TCG_REG_A5, 112 TCG_REG_A6, 113 TCG_REG_A7, 114}; 115 116static const int tcg_target_call_oarg_regs[] = { 117 TCG_REG_A0, 118 TCG_REG_A1, 119}; 120 121#define TCG_CT_CONST_ZERO 0x100 122#define TCG_CT_CONST_S12 0x200 123#define TCG_CT_CONST_N12 0x400 124#define TCG_CT_CONST_M12 0x800 125 126#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 127/* 128 * For softmmu, we need to avoid conflicts with the first 5 129 * argument registers to call the helper. Some of these are 130 * also used for the tlb lookup. 131 */ 132#ifdef CONFIG_SOFTMMU 133#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) 134#else 135#define SOFTMMU_RESERVE_REGS 0 136#endif 137 138 139static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 140{ 141 if (TCG_TARGET_REG_BITS == 32) { 142 return sextract32(val, pos, len); 143 } else { 144 return sextract64(val, pos, len); 145 } 146} 147 148/* test if a constant matches the constraint */ 149static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 150{ 151 if (ct & TCG_CT_CONST) { 152 return 1; 153 } 154 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 155 return 1; 156 } 157 /* 158 * Sign extended from 12 bits: [-0x800, 0x7ff]. 159 * Used for most arithmetic, as this is the isa field. 160 */ 161 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { 162 return 1; 163 } 164 /* 165 * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. 166 * Used for subtraction, where a constant must be handled by ADDI. 167 */ 168 if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { 169 return 1; 170 } 171 /* 172 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. 173 * Used by addsub2, which may need the negative operation, 174 * and requires the modified constant to be representable. 175 */ 176 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { 177 return 1; 178 } 179 return 0; 180} 181 182/* 183 * RISC-V Base ISA opcodes (IM) 184 */ 185 186typedef enum { 187 OPC_ADD = 0x33, 188 OPC_ADDI = 0x13, 189 OPC_AND = 0x7033, 190 OPC_ANDI = 0x7013, 191 OPC_AUIPC = 0x17, 192 OPC_BEQ = 0x63, 193 OPC_BGE = 0x5063, 194 OPC_BGEU = 0x7063, 195 OPC_BLT = 0x4063, 196 OPC_BLTU = 0x6063, 197 OPC_BNE = 0x1063, 198 OPC_DIV = 0x2004033, 199 OPC_DIVU = 0x2005033, 200 OPC_JAL = 0x6f, 201 OPC_JALR = 0x67, 202 OPC_LB = 0x3, 203 OPC_LBU = 0x4003, 204 OPC_LD = 0x3003, 205 OPC_LH = 0x1003, 206 OPC_LHU = 0x5003, 207 OPC_LUI = 0x37, 208 OPC_LW = 0x2003, 209 OPC_LWU = 0x6003, 210 OPC_MUL = 0x2000033, 211 OPC_MULH = 0x2001033, 212 OPC_MULHSU = 0x2002033, 213 OPC_MULHU = 0x2003033, 214 OPC_OR = 0x6033, 215 OPC_ORI = 0x6013, 216 OPC_REM = 0x2006033, 217 OPC_REMU = 0x2007033, 218 OPC_SB = 0x23, 219 OPC_SD = 0x3023, 220 OPC_SH = 0x1023, 221 OPC_SLL = 0x1033, 222 OPC_SLLI = 0x1013, 223 OPC_SLT = 0x2033, 224 OPC_SLTI = 0x2013, 225 OPC_SLTIU = 0x3013, 226 OPC_SLTU = 0x3033, 227 OPC_SRA = 0x40005033, 228 OPC_SRAI = 0x40005013, 229 OPC_SRL = 0x5033, 230 OPC_SRLI = 0x5013, 231 OPC_SUB = 0x40000033, 232 OPC_SW = 0x2023, 233 OPC_XOR = 0x4033, 234 OPC_XORI = 0x4013, 235 236#if TCG_TARGET_REG_BITS == 64 237 OPC_ADDIW = 0x1b, 238 OPC_ADDW = 0x3b, 239 OPC_DIVUW = 0x200503b, 240 OPC_DIVW = 0x200403b, 241 OPC_MULW = 0x200003b, 242 OPC_REMUW = 0x200703b, 243 OPC_REMW = 0x200603b, 244 OPC_SLLIW = 0x101b, 245 OPC_SLLW = 0x103b, 246 OPC_SRAIW = 0x4000501b, 247 OPC_SRAW = 0x4000503b, 248 OPC_SRLIW = 0x501b, 249 OPC_SRLW = 0x503b, 250 OPC_SUBW = 0x4000003b, 251#else 252 /* Simplify code throughout by defining aliases for RV32. */ 253 OPC_ADDIW = OPC_ADDI, 254 OPC_ADDW = OPC_ADD, 255 OPC_DIVUW = OPC_DIVU, 256 OPC_DIVW = OPC_DIV, 257 OPC_MULW = OPC_MUL, 258 OPC_REMUW = OPC_REMU, 259 OPC_REMW = OPC_REM, 260 OPC_SLLIW = OPC_SLLI, 261 OPC_SLLW = OPC_SLL, 262 OPC_SRAIW = OPC_SRAI, 263 OPC_SRAW = OPC_SRA, 264 OPC_SRLIW = OPC_SRLI, 265 OPC_SRLW = OPC_SRL, 266 OPC_SUBW = OPC_SUB, 267#endif 268 269 OPC_FENCE = 0x0000000f, 270} RISCVInsn; 271 272/* 273 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 274 */ 275 276/* Type-R */ 277 278static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 279{ 280 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 281} 282 283/* Type-I */ 284 285static int32_t encode_imm12(uint32_t imm) 286{ 287 return (imm & 0xfff) << 20; 288} 289 290static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 291{ 292 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 293} 294 295/* Type-S */ 296 297static int32_t encode_simm12(uint32_t imm) 298{ 299 int32_t ret = 0; 300 301 ret |= (imm & 0xFE0) << 20; 302 ret |= (imm & 0x1F) << 7; 303 304 return ret; 305} 306 307static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 308{ 309 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 310} 311 312/* Type-SB */ 313 314static int32_t encode_sbimm12(uint32_t imm) 315{ 316 int32_t ret = 0; 317 318 ret |= (imm & 0x1000) << 19; 319 ret |= (imm & 0x7e0) << 20; 320 ret |= (imm & 0x1e) << 7; 321 ret |= (imm & 0x800) >> 4; 322 323 return ret; 324} 325 326static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 327{ 328 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 329} 330 331/* Type-U */ 332 333static int32_t encode_uimm20(uint32_t imm) 334{ 335 return imm & 0xfffff000; 336} 337 338static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 339{ 340 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 341} 342 343/* Type-UJ */ 344 345static int32_t encode_ujimm20(uint32_t imm) 346{ 347 int32_t ret = 0; 348 349 ret |= (imm & 0x0007fe) << (21 - 1); 350 ret |= (imm & 0x000800) << (20 - 11); 351 ret |= (imm & 0x0ff000) << (12 - 12); 352 ret |= (imm & 0x100000) << (31 - 20); 353 354 return ret; 355} 356 357static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 358{ 359 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 360} 361 362/* 363 * RISC-V instruction emitters 364 */ 365 366static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 367 TCGReg rd, TCGReg rs1, TCGReg rs2) 368{ 369 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 370} 371 372static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 373 TCGReg rd, TCGReg rs1, TCGArg imm) 374{ 375 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 376} 377 378static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 379 TCGReg rs1, TCGReg rs2, uint32_t imm) 380{ 381 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 382} 383 384static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 385 TCGReg rs1, TCGReg rs2, uint32_t imm) 386{ 387 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 388} 389 390static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 391 TCGReg rd, uint32_t imm) 392{ 393 tcg_out32(s, encode_u(opc, rd, imm)); 394} 395 396static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 397 TCGReg rd, uint32_t imm) 398{ 399 tcg_out32(s, encode_uj(opc, rd, imm)); 400} 401 402static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 403{ 404 int i; 405 for (i = 0; i < count; ++i) { 406 p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); 407 } 408} 409 410/* 411 * Relocations 412 */ 413 414static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 415{ 416 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 417 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 418 419 tcg_debug_assert((offset & 1) == 0); 420 if (offset == sextreg(offset, 0, 12)) { 421 *src_rw |= encode_sbimm12(offset); 422 return true; 423 } 424 425 return false; 426} 427 428static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 429{ 430 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 431 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 432 433 tcg_debug_assert((offset & 1) == 0); 434 if (offset == sextreg(offset, 0, 20)) { 435 *src_rw |= encode_ujimm20(offset); 436 return true; 437 } 438 439 return false; 440} 441 442static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 443{ 444 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 445 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 446 int32_t lo = sextreg(offset, 0, 12); 447 int32_t hi = offset - lo; 448 449 if (offset == hi + lo) { 450 src_rw[0] |= encode_uimm20(hi); 451 src_rw[1] |= encode_imm12(lo); 452 return true; 453 } 454 455 return false; 456} 457 458static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 459 intptr_t value, intptr_t addend) 460{ 461 tcg_debug_assert(addend == 0); 462 switch (type) { 463 case R_RISCV_BRANCH: 464 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 465 case R_RISCV_JAL: 466 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 467 case R_RISCV_CALL: 468 return reloc_call(code_ptr, (tcg_insn_unit *)value); 469 default: 470 g_assert_not_reached(); 471 } 472} 473 474/* 475 * TCG intrinsics 476 */ 477 478static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 479{ 480 if (ret == arg) { 481 return true; 482 } 483 switch (type) { 484 case TCG_TYPE_I32: 485 case TCG_TYPE_I64: 486 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 487 break; 488 default: 489 g_assert_not_reached(); 490 } 491 return true; 492} 493 494static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 495 tcg_target_long val) 496{ 497 tcg_target_long lo, hi, tmp; 498 int shift, ret; 499 500 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 501 val = (int32_t)val; 502 } 503 504 lo = sextreg(val, 0, 12); 505 if (val == lo) { 506 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 507 return; 508 } 509 510 hi = val - lo; 511 if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { 512 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 513 if (lo != 0) { 514 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 515 } 516 return; 517 } 518 519 /* We can only be here if TCG_TARGET_REG_BITS != 32 */ 520 tmp = tcg_pcrel_diff(s, (void *)val); 521 if (tmp == (int32_t)tmp) { 522 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 523 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 524 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 525 tcg_debug_assert(ret == true); 526 return; 527 } 528 529 /* Look for a single 20-bit section. */ 530 shift = ctz64(val); 531 tmp = val >> shift; 532 if (tmp == sextreg(tmp, 0, 20)) { 533 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 534 if (shift > 12) { 535 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 536 } else { 537 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 538 } 539 return; 540 } 541 542 /* Look for a few high zero bits, with lots of bits set in the middle. */ 543 shift = clz64(val); 544 tmp = val << shift; 545 if (tmp == sextreg(tmp, 12, 20) << 12) { 546 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 547 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 548 return; 549 } else if (tmp == sextreg(tmp, 0, 12)) { 550 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 551 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 552 return; 553 } 554 555 /* Drop into the constant pool. */ 556 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 557 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 558 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 559} 560 561static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 562{ 563 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 564} 565 566static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 567{ 568 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 569 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 570} 571 572static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 573{ 574 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 575 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 576} 577 578static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) 579{ 580 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 581 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 582} 583 584static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) 585{ 586 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 587 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 588} 589 590static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 591{ 592 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 593} 594 595static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 596 TCGReg addr, intptr_t offset) 597{ 598 intptr_t imm12 = sextreg(offset, 0, 12); 599 600 if (offset != imm12) { 601 intptr_t diff = offset - (uintptr_t)s->code_ptr; 602 603 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 604 imm12 = sextreg(diff, 0, 12); 605 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 606 } else { 607 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 608 if (addr != TCG_REG_ZERO) { 609 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 610 } 611 } 612 addr = TCG_REG_TMP2; 613 } 614 615 switch (opc) { 616 case OPC_SB: 617 case OPC_SH: 618 case OPC_SW: 619 case OPC_SD: 620 tcg_out_opc_store(s, opc, addr, data, imm12); 621 break; 622 case OPC_LB: 623 case OPC_LBU: 624 case OPC_LH: 625 case OPC_LHU: 626 case OPC_LW: 627 case OPC_LWU: 628 case OPC_LD: 629 tcg_out_opc_imm(s, opc, data, addr, imm12); 630 break; 631 default: 632 g_assert_not_reached(); 633 } 634} 635 636static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 637 TCGReg arg1, intptr_t arg2) 638{ 639 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 640 tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); 641} 642 643static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 644 TCGReg arg1, intptr_t arg2) 645{ 646 bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); 647 tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); 648} 649 650static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 651 TCGReg base, intptr_t ofs) 652{ 653 if (val == 0) { 654 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 655 return true; 656 } 657 return false; 658} 659 660static void tcg_out_addsub2(TCGContext *s, 661 TCGReg rl, TCGReg rh, 662 TCGReg al, TCGReg ah, 663 TCGArg bl, TCGArg bh, 664 bool cbl, bool cbh, bool is_sub, bool is32bit) 665{ 666 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; 667 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; 668 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; 669 TCGReg th = TCG_REG_TMP1; 670 671 /* If we have a negative constant such that negating it would 672 make the high part zero, we can (usually) eliminate one insn. */ 673 if (cbl && cbh && bh == -1 && bl != 0) { 674 bl = -bl; 675 bh = 0; 676 is_sub = !is_sub; 677 } 678 679 /* By operating on the high part first, we get to use the final 680 carry operation to move back from the temporary. */ 681 if (!cbh) { 682 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); 683 } else if (bh != 0 || ah == rl) { 684 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); 685 } else { 686 th = ah; 687 } 688 689 /* Note that tcg optimization should eliminate the bl == 0 case. */ 690 if (is_sub) { 691 if (cbl) { 692 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); 693 tcg_out_opc_imm(s, opc_addi, rl, al, -bl); 694 } else { 695 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); 696 tcg_out_opc_reg(s, opc_sub, rl, al, bl); 697 } 698 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); 699 } else { 700 if (cbl) { 701 tcg_out_opc_imm(s, opc_addi, rl, al, bl); 702 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); 703 } else if (al == bl) { 704 /* 705 * If the input regs overlap, this is a simple doubling 706 * and carry-out is the input msb. This special case is 707 * required when the output reg overlaps the input, 708 * but we might as well use it always. 709 */ 710 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); 711 tcg_out_opc_reg(s, opc_add, rl, al, al); 712 } else { 713 tcg_out_opc_reg(s, opc_add, rl, al, bl); 714 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, 715 rl, (rl == bl ? al : bl)); 716 } 717 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); 718 } 719} 720 721static const struct { 722 RISCVInsn op; 723 bool swap; 724} tcg_brcond_to_riscv[] = { 725 [TCG_COND_EQ] = { OPC_BEQ, false }, 726 [TCG_COND_NE] = { OPC_BNE, false }, 727 [TCG_COND_LT] = { OPC_BLT, false }, 728 [TCG_COND_GE] = { OPC_BGE, false }, 729 [TCG_COND_LE] = { OPC_BGE, true }, 730 [TCG_COND_GT] = { OPC_BLT, true }, 731 [TCG_COND_LTU] = { OPC_BLTU, false }, 732 [TCG_COND_GEU] = { OPC_BGEU, false }, 733 [TCG_COND_LEU] = { OPC_BGEU, true }, 734 [TCG_COND_GTU] = { OPC_BLTU, true } 735}; 736 737static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 738 TCGReg arg2, TCGLabel *l) 739{ 740 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 741 742 tcg_debug_assert(op != 0); 743 744 if (tcg_brcond_to_riscv[cond].swap) { 745 TCGReg t = arg1; 746 arg1 = arg2; 747 arg2 = t; 748 } 749 750 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 751 tcg_out_opc_branch(s, op, arg1, arg2, 0); 752} 753 754static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 755 TCGReg arg1, TCGReg arg2) 756{ 757 switch (cond) { 758 case TCG_COND_EQ: 759 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 760 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); 761 break; 762 case TCG_COND_NE: 763 tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); 764 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); 765 break; 766 case TCG_COND_LT: 767 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 768 break; 769 case TCG_COND_GE: 770 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 771 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 772 break; 773 case TCG_COND_LE: 774 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 775 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 776 break; 777 case TCG_COND_GT: 778 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); 779 break; 780 case TCG_COND_LTU: 781 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 782 break; 783 case TCG_COND_GEU: 784 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 785 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 786 break; 787 case TCG_COND_LEU: 788 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 789 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); 790 break; 791 case TCG_COND_GTU: 792 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); 793 break; 794 default: 795 g_assert_not_reached(); 796 break; 797 } 798} 799 800static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 801 TCGReg bl, TCGReg bh, TCGLabel *l) 802{ 803 /* todo */ 804 g_assert_not_reached(); 805} 806 807static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, 808 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) 809{ 810 /* todo */ 811 g_assert_not_reached(); 812} 813 814static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 815{ 816 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 817 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 818 int ret; 819 820 tcg_debug_assert((offset & 1) == 0); 821 if (offset == sextreg(offset, 0, 20)) { 822 /* short jump: -2097150 to 2097152 */ 823 tcg_out_opc_jump(s, OPC_JAL, link, offset); 824 } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { 825 /* long jump: -2147483646 to 2147483648 */ 826 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 827 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 828 ret = reloc_call(s->code_ptr - 2, arg); 829 tcg_debug_assert(ret == true); 830 } else if (TCG_TARGET_REG_BITS == 64) { 831 /* far jump: 64-bit */ 832 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 833 tcg_target_long base = (tcg_target_long)arg - imm; 834 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 835 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 836 } else { 837 g_assert_not_reached(); 838 } 839} 840 841static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) 842{ 843 tcg_out_call_int(s, arg, false); 844} 845 846static void tcg_out_mb(TCGContext *s, TCGArg a0) 847{ 848 tcg_insn_unit insn = OPC_FENCE; 849 850 if (a0 & TCG_MO_LD_LD) { 851 insn |= 0x02200000; 852 } 853 if (a0 & TCG_MO_ST_LD) { 854 insn |= 0x01200000; 855 } 856 if (a0 & TCG_MO_LD_ST) { 857 insn |= 0x02100000; 858 } 859 if (a0 & TCG_MO_ST_ST) { 860 insn |= 0x02200000; 861 } 862 tcg_out32(s, insn); 863} 864 865/* 866 * Load/store and TLB 867 */ 868 869#if defined(CONFIG_SOFTMMU) 870/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 871 * MemOpIdx oi, uintptr_t ra) 872 */ 873static void * const qemu_ld_helpers[MO_SSIZE + 1] = { 874 [MO_UB] = helper_ret_ldub_mmu, 875 [MO_SB] = helper_ret_ldsb_mmu, 876#if HOST_BIG_ENDIAN 877 [MO_UW] = helper_be_lduw_mmu, 878 [MO_SW] = helper_be_ldsw_mmu, 879 [MO_UL] = helper_be_ldul_mmu, 880#if TCG_TARGET_REG_BITS == 64 881 [MO_SL] = helper_be_ldsl_mmu, 882#endif 883 [MO_UQ] = helper_be_ldq_mmu, 884#else 885 [MO_UW] = helper_le_lduw_mmu, 886 [MO_SW] = helper_le_ldsw_mmu, 887 [MO_UL] = helper_le_ldul_mmu, 888#if TCG_TARGET_REG_BITS == 64 889 [MO_SL] = helper_le_ldsl_mmu, 890#endif 891 [MO_UQ] = helper_le_ldq_mmu, 892#endif 893}; 894 895/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 896 * uintxx_t val, MemOpIdx oi, 897 * uintptr_t ra) 898 */ 899static void * const qemu_st_helpers[MO_SIZE + 1] = { 900 [MO_8] = helper_ret_stb_mmu, 901#if HOST_BIG_ENDIAN 902 [MO_16] = helper_be_stw_mmu, 903 [MO_32] = helper_be_stl_mmu, 904 [MO_64] = helper_be_stq_mmu, 905#else 906 [MO_16] = helper_le_stw_mmu, 907 [MO_32] = helper_le_stl_mmu, 908 [MO_64] = helper_le_stq_mmu, 909#endif 910}; 911 912/* We don't support oversize guests */ 913QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); 914 915/* We expect to use a 12-bit negative offset from ENV. */ 916QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 917QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 918 919static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 920{ 921 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 922 bool ok = reloc_jimm20(s->code_ptr - 1, target); 923 tcg_debug_assert(ok); 924} 925 926static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl, 927 TCGReg addrh, MemOpIdx oi, 928 tcg_insn_unit **label_ptr, bool is_load) 929{ 930 MemOp opc = get_memop(oi); 931 unsigned s_bits = opc & MO_SIZE; 932 unsigned a_bits = get_alignment_bits(opc); 933 tcg_target_long compare_mask; 934 int mem_index = get_mmuidx(oi); 935 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 936 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 937 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 938 TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 939 940 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); 941 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); 942 943 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, 944 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 945 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 946 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 947 948 /* Load the tlb comparator and the addend. */ 949 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 950 is_load ? offsetof(CPUTLBEntry, addr_read) 951 : offsetof(CPUTLBEntry, addr_write)); 952 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 953 offsetof(CPUTLBEntry, addend)); 954 955 /* We don't support unaligned accesses. */ 956 if (a_bits < s_bits) { 957 a_bits = s_bits; 958 } 959 /* Clear the non-page, non-alignment bits from the address. */ 960 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 961 if (compare_mask == sextreg(compare_mask, 0, 12)) { 962 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); 963 } else { 964 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 965 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); 966 } 967 968 /* Compare masked address with the TLB entry. */ 969 label_ptr[0] = s->code_ptr; 970 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 971 972 /* TLB Hit - translate address using addend. */ 973 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 974 tcg_out_ext32u(s, TCG_REG_TMP0, addrl); 975 addrl = TCG_REG_TMP0; 976 } 977 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); 978 return TCG_REG_TMP0; 979} 980 981static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, 982 TCGType ext, 983 TCGReg datalo, TCGReg datahi, 984 TCGReg addrlo, TCGReg addrhi, 985 void *raddr, tcg_insn_unit **label_ptr) 986{ 987 TCGLabelQemuLdst *label = new_ldst_label(s); 988 989 label->is_ld = is_ld; 990 label->oi = oi; 991 label->type = ext; 992 label->datalo_reg = datalo; 993 label->datahi_reg = datahi; 994 label->addrlo_reg = addrlo; 995 label->addrhi_reg = addrhi; 996 label->raddr = tcg_splitwx_to_rx(raddr); 997 label->label_ptr[0] = label_ptr[0]; 998} 999 1000static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1001{ 1002 MemOpIdx oi = l->oi; 1003 MemOp opc = get_memop(oi); 1004 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1005 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1006 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1007 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1008 1009 /* We don't support oversize guests */ 1010 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 1011 g_assert_not_reached(); 1012 } 1013 1014 /* resolve label address */ 1015 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1016 return false; 1017 } 1018 1019 /* call load helper */ 1020 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1021 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1022 tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); 1023 tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); 1024 1025 tcg_out_call(s, qemu_ld_helpers[opc & MO_SSIZE]); 1026 tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); 1027 1028 tcg_out_goto(s, l->raddr); 1029 return true; 1030} 1031 1032static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1033{ 1034 MemOpIdx oi = l->oi; 1035 MemOp opc = get_memop(oi); 1036 MemOp s_bits = opc & MO_SIZE; 1037 TCGReg a0 = tcg_target_call_iarg_regs[0]; 1038 TCGReg a1 = tcg_target_call_iarg_regs[1]; 1039 TCGReg a2 = tcg_target_call_iarg_regs[2]; 1040 TCGReg a3 = tcg_target_call_iarg_regs[3]; 1041 TCGReg a4 = tcg_target_call_iarg_regs[4]; 1042 1043 /* We don't support oversize guests */ 1044 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 1045 g_assert_not_reached(); 1046 } 1047 1048 /* resolve label address */ 1049 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1050 return false; 1051 } 1052 1053 /* call store helper */ 1054 tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); 1055 tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); 1056 tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); 1057 switch (s_bits) { 1058 case MO_8: 1059 tcg_out_ext8u(s, a2, a2); 1060 break; 1061 case MO_16: 1062 tcg_out_ext16u(s, a2, a2); 1063 break; 1064 default: 1065 break; 1066 } 1067 tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); 1068 tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); 1069 1070 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]); 1071 1072 tcg_out_goto(s, l->raddr); 1073 return true; 1074} 1075#else 1076 1077static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, 1078 unsigned a_bits) 1079{ 1080 unsigned a_mask = (1 << a_bits) - 1; 1081 TCGLabelQemuLdst *l = new_ldst_label(s); 1082 1083 l->is_ld = is_ld; 1084 l->addrlo_reg = addr_reg; 1085 1086 /* We are expecting a_bits to max out at 7, so we can always use andi. */ 1087 tcg_debug_assert(a_bits < 12); 1088 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); 1089 1090 l->label_ptr[0] = s->code_ptr; 1091 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1092 1093 l->raddr = tcg_splitwx_to_rx(s->code_ptr); 1094} 1095 1096static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 1097{ 1098 /* resolve label address */ 1099 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1100 return false; 1101 } 1102 1103 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); 1104 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 1105 1106 /* tail call, with the return address back inline. */ 1107 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); 1108 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld 1109 : helper_unaligned_st), true); 1110 return true; 1111} 1112 1113static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1114{ 1115 return tcg_out_fail_alignment(s, l); 1116} 1117 1118static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1119{ 1120 return tcg_out_fail_alignment(s, l); 1121} 1122 1123#endif /* CONFIG_SOFTMMU */ 1124 1125static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1126 TCGReg base, MemOp opc, bool is_64) 1127{ 1128 /* Byte swapping is left to middle-end expansion. */ 1129 tcg_debug_assert((opc & MO_BSWAP) == 0); 1130 1131 switch (opc & (MO_SSIZE)) { 1132 case MO_UB: 1133 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); 1134 break; 1135 case MO_SB: 1136 tcg_out_opc_imm(s, OPC_LB, lo, base, 0); 1137 break; 1138 case MO_UW: 1139 tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); 1140 break; 1141 case MO_SW: 1142 tcg_out_opc_imm(s, OPC_LH, lo, base, 0); 1143 break; 1144 case MO_UL: 1145 if (TCG_TARGET_REG_BITS == 64 && is_64) { 1146 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); 1147 break; 1148 } 1149 /* FALLTHRU */ 1150 case MO_SL: 1151 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1152 break; 1153 case MO_UQ: 1154 /* Prefer to load from offset 0 first, but allow for overlap. */ 1155 if (TCG_TARGET_REG_BITS == 64) { 1156 tcg_out_opc_imm(s, OPC_LD, lo, base, 0); 1157 } else if (lo != base) { 1158 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1159 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1160 } else { 1161 tcg_out_opc_imm(s, OPC_LW, hi, base, 4); 1162 tcg_out_opc_imm(s, OPC_LW, lo, base, 0); 1163 } 1164 break; 1165 default: 1166 g_assert_not_reached(); 1167 } 1168} 1169 1170static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) 1171{ 1172 TCGReg addr_regl, addr_regh __attribute__((unused)); 1173 TCGReg data_regl, data_regh; 1174 MemOpIdx oi; 1175 MemOp opc; 1176#if defined(CONFIG_SOFTMMU) 1177 tcg_insn_unit *label_ptr[1]; 1178#else 1179 unsigned a_bits; 1180#endif 1181 TCGReg base; 1182 1183 data_regl = *args++; 1184 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1185 addr_regl = *args++; 1186 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1187 oi = *args++; 1188 opc = get_memop(oi); 1189 1190#if defined(CONFIG_SOFTMMU) 1191 base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); 1192 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1193 add_qemu_ldst_label(s, 1, oi, 1194 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1195 data_regl, data_regh, addr_regl, addr_regh, 1196 s->code_ptr, label_ptr); 1197#else 1198 a_bits = get_alignment_bits(opc); 1199 if (a_bits) { 1200 tcg_out_test_alignment(s, true, addr_regl, a_bits); 1201 } 1202 base = addr_regl; 1203 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1204 tcg_out_ext32u(s, TCG_REG_TMP0, base); 1205 base = TCG_REG_TMP0; 1206 } 1207 if (guest_base != 0) { 1208 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); 1209 base = TCG_REG_TMP0; 1210 } 1211 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); 1212#endif 1213} 1214 1215static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, 1216 TCGReg base, MemOp opc) 1217{ 1218 /* Byte swapping is left to middle-end expansion. */ 1219 tcg_debug_assert((opc & MO_BSWAP) == 0); 1220 1221 switch (opc & (MO_SSIZE)) { 1222 case MO_8: 1223 tcg_out_opc_store(s, OPC_SB, base, lo, 0); 1224 break; 1225 case MO_16: 1226 tcg_out_opc_store(s, OPC_SH, base, lo, 0); 1227 break; 1228 case MO_32: 1229 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1230 break; 1231 case MO_64: 1232 if (TCG_TARGET_REG_BITS == 64) { 1233 tcg_out_opc_store(s, OPC_SD, base, lo, 0); 1234 } else { 1235 tcg_out_opc_store(s, OPC_SW, base, lo, 0); 1236 tcg_out_opc_store(s, OPC_SW, base, hi, 4); 1237 } 1238 break; 1239 default: 1240 g_assert_not_reached(); 1241 } 1242} 1243 1244static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) 1245{ 1246 TCGReg addr_regl, addr_regh __attribute__((unused)); 1247 TCGReg data_regl, data_regh; 1248 MemOpIdx oi; 1249 MemOp opc; 1250#if defined(CONFIG_SOFTMMU) 1251 tcg_insn_unit *label_ptr[1]; 1252#else 1253 unsigned a_bits; 1254#endif 1255 TCGReg base; 1256 1257 data_regl = *args++; 1258 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 1259 addr_regl = *args++; 1260 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 1261 oi = *args++; 1262 opc = get_memop(oi); 1263 1264#if defined(CONFIG_SOFTMMU) 1265 base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); 1266 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1267 add_qemu_ldst_label(s, 0, oi, 1268 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1269 data_regl, data_regh, addr_regl, addr_regh, 1270 s->code_ptr, label_ptr); 1271#else 1272 a_bits = get_alignment_bits(opc); 1273 if (a_bits) { 1274 tcg_out_test_alignment(s, false, addr_regl, a_bits); 1275 } 1276 base = addr_regl; 1277 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 1278 tcg_out_ext32u(s, TCG_REG_TMP0, base); 1279 base = TCG_REG_TMP0; 1280 } 1281 if (guest_base != 0) { 1282 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base); 1283 base = TCG_REG_TMP0; 1284 } 1285 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); 1286#endif 1287} 1288 1289static const tcg_insn_unit *tb_ret_addr; 1290 1291static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1292 const TCGArg args[TCG_MAX_OP_ARGS], 1293 const int const_args[TCG_MAX_OP_ARGS]) 1294{ 1295 TCGArg a0 = args[0]; 1296 TCGArg a1 = args[1]; 1297 TCGArg a2 = args[2]; 1298 int c2 = const_args[2]; 1299 1300 switch (opc) { 1301 case INDEX_op_exit_tb: 1302 /* Reuse the zeroing that exists for goto_ptr. */ 1303 if (a0 == 0) { 1304 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1305 } else { 1306 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1307 tcg_out_call_int(s, tb_ret_addr, true); 1308 } 1309 break; 1310 1311 case INDEX_op_goto_tb: 1312 assert(s->tb_jmp_insn_offset == 0); 1313 /* indirect jump method */ 1314 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1315 (uintptr_t)(s->tb_jmp_target_addr + a0)); 1316 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1317 set_jmp_reset_offset(s, a0); 1318 break; 1319 1320 case INDEX_op_goto_ptr: 1321 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1322 break; 1323 1324 case INDEX_op_br: 1325 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); 1326 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1327 break; 1328 1329 case INDEX_op_ld8u_i32: 1330 case INDEX_op_ld8u_i64: 1331 tcg_out_ldst(s, OPC_LBU, a0, a1, a2); 1332 break; 1333 case INDEX_op_ld8s_i32: 1334 case INDEX_op_ld8s_i64: 1335 tcg_out_ldst(s, OPC_LB, a0, a1, a2); 1336 break; 1337 case INDEX_op_ld16u_i32: 1338 case INDEX_op_ld16u_i64: 1339 tcg_out_ldst(s, OPC_LHU, a0, a1, a2); 1340 break; 1341 case INDEX_op_ld16s_i32: 1342 case INDEX_op_ld16s_i64: 1343 tcg_out_ldst(s, OPC_LH, a0, a1, a2); 1344 break; 1345 case INDEX_op_ld32u_i64: 1346 tcg_out_ldst(s, OPC_LWU, a0, a1, a2); 1347 break; 1348 case INDEX_op_ld_i32: 1349 case INDEX_op_ld32s_i64: 1350 tcg_out_ldst(s, OPC_LW, a0, a1, a2); 1351 break; 1352 case INDEX_op_ld_i64: 1353 tcg_out_ldst(s, OPC_LD, a0, a1, a2); 1354 break; 1355 1356 case INDEX_op_st8_i32: 1357 case INDEX_op_st8_i64: 1358 tcg_out_ldst(s, OPC_SB, a0, a1, a2); 1359 break; 1360 case INDEX_op_st16_i32: 1361 case INDEX_op_st16_i64: 1362 tcg_out_ldst(s, OPC_SH, a0, a1, a2); 1363 break; 1364 case INDEX_op_st_i32: 1365 case INDEX_op_st32_i64: 1366 tcg_out_ldst(s, OPC_SW, a0, a1, a2); 1367 break; 1368 case INDEX_op_st_i64: 1369 tcg_out_ldst(s, OPC_SD, a0, a1, a2); 1370 break; 1371 1372 case INDEX_op_add_i32: 1373 if (c2) { 1374 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); 1375 } else { 1376 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); 1377 } 1378 break; 1379 case INDEX_op_add_i64: 1380 if (c2) { 1381 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); 1382 } else { 1383 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); 1384 } 1385 break; 1386 1387 case INDEX_op_sub_i32: 1388 if (c2) { 1389 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); 1390 } else { 1391 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); 1392 } 1393 break; 1394 case INDEX_op_sub_i64: 1395 if (c2) { 1396 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); 1397 } else { 1398 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); 1399 } 1400 break; 1401 1402 case INDEX_op_and_i32: 1403 case INDEX_op_and_i64: 1404 if (c2) { 1405 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1406 } else { 1407 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1408 } 1409 break; 1410 1411 case INDEX_op_or_i32: 1412 case INDEX_op_or_i64: 1413 if (c2) { 1414 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 1415 } else { 1416 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 1417 } 1418 break; 1419 1420 case INDEX_op_xor_i32: 1421 case INDEX_op_xor_i64: 1422 if (c2) { 1423 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 1424 } else { 1425 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 1426 } 1427 break; 1428 1429 case INDEX_op_not_i32: 1430 case INDEX_op_not_i64: 1431 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); 1432 break; 1433 1434 case INDEX_op_neg_i32: 1435 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); 1436 break; 1437 case INDEX_op_neg_i64: 1438 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); 1439 break; 1440 1441 case INDEX_op_mul_i32: 1442 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); 1443 break; 1444 case INDEX_op_mul_i64: 1445 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); 1446 break; 1447 1448 case INDEX_op_div_i32: 1449 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); 1450 break; 1451 case INDEX_op_div_i64: 1452 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); 1453 break; 1454 1455 case INDEX_op_divu_i32: 1456 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); 1457 break; 1458 case INDEX_op_divu_i64: 1459 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); 1460 break; 1461 1462 case INDEX_op_rem_i32: 1463 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); 1464 break; 1465 case INDEX_op_rem_i64: 1466 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); 1467 break; 1468 1469 case INDEX_op_remu_i32: 1470 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); 1471 break; 1472 case INDEX_op_remu_i64: 1473 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); 1474 break; 1475 1476 case INDEX_op_shl_i32: 1477 if (c2) { 1478 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); 1479 } else { 1480 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); 1481 } 1482 break; 1483 case INDEX_op_shl_i64: 1484 if (c2) { 1485 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); 1486 } else { 1487 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); 1488 } 1489 break; 1490 1491 case INDEX_op_shr_i32: 1492 if (c2) { 1493 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); 1494 } else { 1495 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); 1496 } 1497 break; 1498 case INDEX_op_shr_i64: 1499 if (c2) { 1500 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); 1501 } else { 1502 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); 1503 } 1504 break; 1505 1506 case INDEX_op_sar_i32: 1507 if (c2) { 1508 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); 1509 } else { 1510 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); 1511 } 1512 break; 1513 case INDEX_op_sar_i64: 1514 if (c2) { 1515 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); 1516 } else { 1517 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); 1518 } 1519 break; 1520 1521 case INDEX_op_add2_i32: 1522 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1523 const_args[4], const_args[5], false, true); 1524 break; 1525 case INDEX_op_add2_i64: 1526 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1527 const_args[4], const_args[5], false, false); 1528 break; 1529 case INDEX_op_sub2_i32: 1530 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1531 const_args[4], const_args[5], true, true); 1532 break; 1533 case INDEX_op_sub2_i64: 1534 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1535 const_args[4], const_args[5], true, false); 1536 break; 1537 1538 case INDEX_op_brcond_i32: 1539 case INDEX_op_brcond_i64: 1540 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1541 break; 1542 case INDEX_op_brcond2_i32: 1543 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); 1544 break; 1545 1546 case INDEX_op_setcond_i32: 1547 case INDEX_op_setcond_i64: 1548 tcg_out_setcond(s, args[3], a0, a1, a2); 1549 break; 1550 case INDEX_op_setcond2_i32: 1551 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); 1552 break; 1553 1554 case INDEX_op_qemu_ld_i32: 1555 tcg_out_qemu_ld(s, args, false); 1556 break; 1557 case INDEX_op_qemu_ld_i64: 1558 tcg_out_qemu_ld(s, args, true); 1559 break; 1560 case INDEX_op_qemu_st_i32: 1561 tcg_out_qemu_st(s, args, false); 1562 break; 1563 case INDEX_op_qemu_st_i64: 1564 tcg_out_qemu_st(s, args, true); 1565 break; 1566 1567 case INDEX_op_ext8u_i32: 1568 case INDEX_op_ext8u_i64: 1569 tcg_out_ext8u(s, a0, a1); 1570 break; 1571 1572 case INDEX_op_ext16u_i32: 1573 case INDEX_op_ext16u_i64: 1574 tcg_out_ext16u(s, a0, a1); 1575 break; 1576 1577 case INDEX_op_ext32u_i64: 1578 case INDEX_op_extu_i32_i64: 1579 tcg_out_ext32u(s, a0, a1); 1580 break; 1581 1582 case INDEX_op_ext8s_i32: 1583 case INDEX_op_ext8s_i64: 1584 tcg_out_ext8s(s, a0, a1); 1585 break; 1586 1587 case INDEX_op_ext16s_i32: 1588 case INDEX_op_ext16s_i64: 1589 tcg_out_ext16s(s, a0, a1); 1590 break; 1591 1592 case INDEX_op_ext32s_i64: 1593 case INDEX_op_extrl_i64_i32: 1594 case INDEX_op_ext_i32_i64: 1595 tcg_out_ext32s(s, a0, a1); 1596 break; 1597 1598 case INDEX_op_extrh_i64_i32: 1599 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 1600 break; 1601 1602 case INDEX_op_mulsh_i32: 1603 case INDEX_op_mulsh_i64: 1604 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 1605 break; 1606 1607 case INDEX_op_muluh_i32: 1608 case INDEX_op_muluh_i64: 1609 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 1610 break; 1611 1612 case INDEX_op_mb: 1613 tcg_out_mb(s, a0); 1614 break; 1615 1616 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1617 case INDEX_op_mov_i64: 1618 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1619 default: 1620 g_assert_not_reached(); 1621 } 1622} 1623 1624static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1625{ 1626 switch (op) { 1627 case INDEX_op_goto_ptr: 1628 return C_O0_I1(r); 1629 1630 case INDEX_op_ld8u_i32: 1631 case INDEX_op_ld8s_i32: 1632 case INDEX_op_ld16u_i32: 1633 case INDEX_op_ld16s_i32: 1634 case INDEX_op_ld_i32: 1635 case INDEX_op_not_i32: 1636 case INDEX_op_neg_i32: 1637 case INDEX_op_ld8u_i64: 1638 case INDEX_op_ld8s_i64: 1639 case INDEX_op_ld16u_i64: 1640 case INDEX_op_ld16s_i64: 1641 case INDEX_op_ld32s_i64: 1642 case INDEX_op_ld32u_i64: 1643 case INDEX_op_ld_i64: 1644 case INDEX_op_not_i64: 1645 case INDEX_op_neg_i64: 1646 case INDEX_op_ext8u_i32: 1647 case INDEX_op_ext8u_i64: 1648 case INDEX_op_ext16u_i32: 1649 case INDEX_op_ext16u_i64: 1650 case INDEX_op_ext32u_i64: 1651 case INDEX_op_extu_i32_i64: 1652 case INDEX_op_ext8s_i32: 1653 case INDEX_op_ext8s_i64: 1654 case INDEX_op_ext16s_i32: 1655 case INDEX_op_ext16s_i64: 1656 case INDEX_op_ext32s_i64: 1657 case INDEX_op_extrl_i64_i32: 1658 case INDEX_op_extrh_i64_i32: 1659 case INDEX_op_ext_i32_i64: 1660 return C_O1_I1(r, r); 1661 1662 case INDEX_op_st8_i32: 1663 case INDEX_op_st16_i32: 1664 case INDEX_op_st_i32: 1665 case INDEX_op_st8_i64: 1666 case INDEX_op_st16_i64: 1667 case INDEX_op_st32_i64: 1668 case INDEX_op_st_i64: 1669 return C_O0_I2(rZ, r); 1670 1671 case INDEX_op_add_i32: 1672 case INDEX_op_and_i32: 1673 case INDEX_op_or_i32: 1674 case INDEX_op_xor_i32: 1675 case INDEX_op_add_i64: 1676 case INDEX_op_and_i64: 1677 case INDEX_op_or_i64: 1678 case INDEX_op_xor_i64: 1679 return C_O1_I2(r, r, rI); 1680 1681 case INDEX_op_sub_i32: 1682 case INDEX_op_sub_i64: 1683 return C_O1_I2(r, rZ, rN); 1684 1685 case INDEX_op_mul_i32: 1686 case INDEX_op_mulsh_i32: 1687 case INDEX_op_muluh_i32: 1688 case INDEX_op_div_i32: 1689 case INDEX_op_divu_i32: 1690 case INDEX_op_rem_i32: 1691 case INDEX_op_remu_i32: 1692 case INDEX_op_setcond_i32: 1693 case INDEX_op_mul_i64: 1694 case INDEX_op_mulsh_i64: 1695 case INDEX_op_muluh_i64: 1696 case INDEX_op_div_i64: 1697 case INDEX_op_divu_i64: 1698 case INDEX_op_rem_i64: 1699 case INDEX_op_remu_i64: 1700 case INDEX_op_setcond_i64: 1701 return C_O1_I2(r, rZ, rZ); 1702 1703 case INDEX_op_shl_i32: 1704 case INDEX_op_shr_i32: 1705 case INDEX_op_sar_i32: 1706 case INDEX_op_shl_i64: 1707 case INDEX_op_shr_i64: 1708 case INDEX_op_sar_i64: 1709 return C_O1_I2(r, r, ri); 1710 1711 case INDEX_op_brcond_i32: 1712 case INDEX_op_brcond_i64: 1713 return C_O0_I2(rZ, rZ); 1714 1715 case INDEX_op_add2_i32: 1716 case INDEX_op_add2_i64: 1717 case INDEX_op_sub2_i32: 1718 case INDEX_op_sub2_i64: 1719 return C_O2_I4(r, r, rZ, rZ, rM, rM); 1720 1721 case INDEX_op_brcond2_i32: 1722 return C_O0_I4(rZ, rZ, rZ, rZ); 1723 1724 case INDEX_op_setcond2_i32: 1725 return C_O1_I4(r, rZ, rZ, rZ, rZ); 1726 1727 case INDEX_op_qemu_ld_i32: 1728 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 1729 ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); 1730 case INDEX_op_qemu_st_i32: 1731 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 1732 ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L)); 1733 case INDEX_op_qemu_ld_i64: 1734 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) 1735 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) 1736 : C_O2_I2(r, r, L, L)); 1737 case INDEX_op_qemu_st_i64: 1738 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) 1739 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L) 1740 : C_O0_I4(LZ, LZ, L, L)); 1741 1742 default: 1743 g_assert_not_reached(); 1744 } 1745} 1746 1747static const int tcg_target_callee_save_regs[] = { 1748 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1749 TCG_REG_S1, 1750 TCG_REG_S2, 1751 TCG_REG_S3, 1752 TCG_REG_S4, 1753 TCG_REG_S5, 1754 TCG_REG_S6, 1755 TCG_REG_S7, 1756 TCG_REG_S8, 1757 TCG_REG_S9, 1758 TCG_REG_S10, 1759 TCG_REG_S11, 1760 TCG_REG_RA, /* should be last for ABI compliance */ 1761}; 1762 1763/* Stack frame parameters. */ 1764#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1765#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1766#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1767#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1768 + TCG_TARGET_STACK_ALIGN - 1) \ 1769 & -TCG_TARGET_STACK_ALIGN) 1770#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1771 1772/* We're expecting to be able to use an immediate for frame allocation. */ 1773QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1774 1775/* Generate global QEMU prologue and epilogue code */ 1776static void tcg_target_qemu_prologue(TCGContext *s) 1777{ 1778 int i; 1779 1780 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1781 1782 /* TB prologue */ 1783 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1784 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1785 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1786 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1787 } 1788 1789#if !defined(CONFIG_SOFTMMU) 1790 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1791 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1792#endif 1793 1794 /* Call generated code */ 1795 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1796 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1797 1798 /* Return path for goto_ptr. Set return value to 0 */ 1799 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1800 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1801 1802 /* TB epilogue */ 1803 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1804 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1805 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1806 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1807 } 1808 1809 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1810 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 1811} 1812 1813static void tcg_target_init(TCGContext *s) 1814{ 1815 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 1816 if (TCG_TARGET_REG_BITS == 64) { 1817 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 1818 } 1819 1820 tcg_target_call_clobber_regs = -1u; 1821 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1822 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1823 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1824 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1825 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1826 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1827 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1828 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1829 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1830 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1831 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 1832 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 1833 1834 s->reserved_regs = 0; 1835 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1836 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1837 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1838 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1839 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1840 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 1841 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1842} 1843 1844typedef struct { 1845 DebugFrameHeader h; 1846 uint8_t fde_def_cfa[4]; 1847 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1848} DebugFrame; 1849 1850#define ELF_HOST_MACHINE EM_RISCV 1851 1852static const DebugFrame debug_frame = { 1853 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1854 .h.cie.id = -1, 1855 .h.cie.version = 1, 1856 .h.cie.code_align = 1, 1857 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1858 .h.cie.return_column = TCG_REG_RA, 1859 1860 /* Total FDE size does not include the "len" member. */ 1861 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1862 1863 .fde_def_cfa = { 1864 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1865 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1866 (FRAME_SIZE >> 7) 1867 }, 1868 .fde_reg_ofs = { 1869 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 1870 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 1871 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 1872 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 1873 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 1874 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 1875 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 1876 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 1877 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 1878 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 1879 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 1880 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1881 } 1882}; 1883 1884void tcg_register_jit(const void *buf, size_t buf_size) 1885{ 1886 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1887} 1888