1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2018 SiFive, Inc 5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 7 * Copyright (c) 2008 Fabrice Bellard 8 * 9 * Based on i386/tcg-target.c and mips/tcg-target.c 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a copy 12 * of this software and associated documentation files (the "Software"), to deal 13 * in the Software without restriction, including without limitation the rights 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 15 * copies of the Software, and to permit persons to whom the Software is 16 * furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice shall be included in 19 * all copies or substantial portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 27 * THE SOFTWARE. 28 */ 29 30#include "../tcg-ldst.c.inc" 31#include "../tcg-pool.c.inc" 32 33#ifdef CONFIG_DEBUG_TCG 34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 35 "zero", 36 "ra", 37 "sp", 38 "gp", 39 "tp", 40 "t0", 41 "t1", 42 "t2", 43 "s0", 44 "s1", 45 "a0", 46 "a1", 47 "a2", 48 "a3", 49 "a4", 50 "a5", 51 "a6", 52 "a7", 53 "s2", 54 "s3", 55 "s4", 56 "s5", 57 "s6", 58 "s7", 59 "s8", 60 "s9", 61 "s10", 62 "s11", 63 "t3", 64 "t4", 65 "t5", 66 "t6" 67}; 68#endif 69 70static const int tcg_target_reg_alloc_order[] = { 71 /* Call saved registers */ 72 /* TCG_REG_S0 reserved for TCG_AREG0 */ 73 TCG_REG_S1, 74 TCG_REG_S2, 75 TCG_REG_S3, 76 TCG_REG_S4, 77 TCG_REG_S5, 78 TCG_REG_S6, 79 TCG_REG_S7, 80 TCG_REG_S8, 81 TCG_REG_S9, 82 TCG_REG_S10, 83 TCG_REG_S11, 84 85 /* Call clobbered registers */ 86 TCG_REG_T0, 87 TCG_REG_T1, 88 TCG_REG_T2, 89 TCG_REG_T3, 90 TCG_REG_T4, 91 TCG_REG_T5, 92 TCG_REG_T6, 93 94 /* Argument registers */ 95 TCG_REG_A0, 96 TCG_REG_A1, 97 TCG_REG_A2, 98 TCG_REG_A3, 99 TCG_REG_A4, 100 TCG_REG_A5, 101 TCG_REG_A6, 102 TCG_REG_A7, 103}; 104 105static const int tcg_target_call_iarg_regs[] = { 106 TCG_REG_A0, 107 TCG_REG_A1, 108 TCG_REG_A2, 109 TCG_REG_A3, 110 TCG_REG_A4, 111 TCG_REG_A5, 112 TCG_REG_A6, 113 TCG_REG_A7, 114}; 115 116static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 117{ 118 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 119 tcg_debug_assert(slot >= 0 && slot <= 1); 120 return TCG_REG_A0 + slot; 121} 122 123#define TCG_CT_CONST_ZERO 0x100 124#define TCG_CT_CONST_S12 0x200 125#define TCG_CT_CONST_N12 0x400 126#define TCG_CT_CONST_M12 0x800 127#define TCG_CT_CONST_J12 0x1000 128 129#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 130 131#define sextreg sextract64 132 133/* test if a constant matches the constraint */ 134static bool tcg_target_const_match(int64_t val, int ct, 135 TCGType type, TCGCond cond, int vece) 136{ 137 if (ct & TCG_CT_CONST) { 138 return 1; 139 } 140 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 141 return 1; 142 } 143 /* 144 * Sign extended from 12 bits: [-0x800, 0x7ff]. 145 * Used for most arithmetic, as this is the isa field. 146 */ 147 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { 148 return 1; 149 } 150 /* 151 * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. 152 * Used for subtraction, where a constant must be handled by ADDI. 153 */ 154 if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { 155 return 1; 156 } 157 /* 158 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. 159 * Used by addsub2 and movcond, which may need the negative value, 160 * and requires the modified constant to be representable. 161 */ 162 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { 163 return 1; 164 } 165 /* 166 * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. 167 * Used to map ANDN back to ANDI, etc. 168 */ 169 if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { 170 return 1; 171 } 172 return 0; 173} 174 175/* 176 * RISC-V Base ISA opcodes (IM) 177 */ 178 179typedef enum { 180 OPC_ADD = 0x33, 181 OPC_ADDI = 0x13, 182 OPC_AND = 0x7033, 183 OPC_ANDI = 0x7013, 184 OPC_AUIPC = 0x17, 185 OPC_BEQ = 0x63, 186 OPC_BGE = 0x5063, 187 OPC_BGEU = 0x7063, 188 OPC_BLT = 0x4063, 189 OPC_BLTU = 0x6063, 190 OPC_BNE = 0x1063, 191 OPC_DIV = 0x2004033, 192 OPC_DIVU = 0x2005033, 193 OPC_JAL = 0x6f, 194 OPC_JALR = 0x67, 195 OPC_LB = 0x3, 196 OPC_LBU = 0x4003, 197 OPC_LD = 0x3003, 198 OPC_LH = 0x1003, 199 OPC_LHU = 0x5003, 200 OPC_LUI = 0x37, 201 OPC_LW = 0x2003, 202 OPC_LWU = 0x6003, 203 OPC_MUL = 0x2000033, 204 OPC_MULH = 0x2001033, 205 OPC_MULHSU = 0x2002033, 206 OPC_MULHU = 0x2003033, 207 OPC_OR = 0x6033, 208 OPC_ORI = 0x6013, 209 OPC_REM = 0x2006033, 210 OPC_REMU = 0x2007033, 211 OPC_SB = 0x23, 212 OPC_SD = 0x3023, 213 OPC_SH = 0x1023, 214 OPC_SLL = 0x1033, 215 OPC_SLLI = 0x1013, 216 OPC_SLT = 0x2033, 217 OPC_SLTI = 0x2013, 218 OPC_SLTIU = 0x3013, 219 OPC_SLTU = 0x3033, 220 OPC_SRA = 0x40005033, 221 OPC_SRAI = 0x40005013, 222 OPC_SRL = 0x5033, 223 OPC_SRLI = 0x5013, 224 OPC_SUB = 0x40000033, 225 OPC_SW = 0x2023, 226 OPC_XOR = 0x4033, 227 OPC_XORI = 0x4013, 228 229 OPC_ADDIW = 0x1b, 230 OPC_ADDW = 0x3b, 231 OPC_DIVUW = 0x200503b, 232 OPC_DIVW = 0x200403b, 233 OPC_MULW = 0x200003b, 234 OPC_REMUW = 0x200703b, 235 OPC_REMW = 0x200603b, 236 OPC_SLLIW = 0x101b, 237 OPC_SLLW = 0x103b, 238 OPC_SRAIW = 0x4000501b, 239 OPC_SRAW = 0x4000503b, 240 OPC_SRLIW = 0x501b, 241 OPC_SRLW = 0x503b, 242 OPC_SUBW = 0x4000003b, 243 244 OPC_FENCE = 0x0000000f, 245 OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */ 246 247 /* Zba: Bit manipulation extension, address generation */ 248 OPC_ADD_UW = 0x0800003b, 249 250 /* Zbb: Bit manipulation extension, basic bit manipulation */ 251 OPC_ANDN = 0x40007033, 252 OPC_CLZ = 0x60001013, 253 OPC_CLZW = 0x6000101b, 254 OPC_CPOP = 0x60201013, 255 OPC_CPOPW = 0x6020101b, 256 OPC_CTZ = 0x60101013, 257 OPC_CTZW = 0x6010101b, 258 OPC_ORN = 0x40006033, 259 OPC_REV8 = 0x6b805013, 260 OPC_ROL = 0x60001033, 261 OPC_ROLW = 0x6000103b, 262 OPC_ROR = 0x60005033, 263 OPC_RORW = 0x6000503b, 264 OPC_RORI = 0x60005013, 265 OPC_RORIW = 0x6000501b, 266 OPC_SEXT_B = 0x60401013, 267 OPC_SEXT_H = 0x60501013, 268 OPC_XNOR = 0x40004033, 269 OPC_ZEXT_H = 0x0800403b, 270 271 /* Zicond: integer conditional operations */ 272 OPC_CZERO_EQZ = 0x0e005033, 273 OPC_CZERO_NEZ = 0x0e007033, 274} RISCVInsn; 275 276/* 277 * RISC-V immediate and instruction encoders (excludes 16-bit RVC) 278 */ 279 280/* Type-R */ 281 282static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) 283{ 284 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; 285} 286 287/* Type-I */ 288 289static int32_t encode_imm12(uint32_t imm) 290{ 291 return (imm & 0xfff) << 20; 292} 293 294static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) 295{ 296 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); 297} 298 299/* Type-S */ 300 301static int32_t encode_simm12(uint32_t imm) 302{ 303 int32_t ret = 0; 304 305 ret |= (imm & 0xFE0) << 20; 306 ret |= (imm & 0x1F) << 7; 307 308 return ret; 309} 310 311static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 312{ 313 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); 314} 315 316/* Type-SB */ 317 318static int32_t encode_sbimm12(uint32_t imm) 319{ 320 int32_t ret = 0; 321 322 ret |= (imm & 0x1000) << 19; 323 ret |= (imm & 0x7e0) << 20; 324 ret |= (imm & 0x1e) << 7; 325 ret |= (imm & 0x800) >> 4; 326 327 return ret; 328} 329 330static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) 331{ 332 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); 333} 334 335/* Type-U */ 336 337static int32_t encode_uimm20(uint32_t imm) 338{ 339 return imm & 0xfffff000; 340} 341 342static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) 343{ 344 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); 345} 346 347/* Type-UJ */ 348 349static int32_t encode_ujimm20(uint32_t imm) 350{ 351 int32_t ret = 0; 352 353 ret |= (imm & 0x0007fe) << (21 - 1); 354 ret |= (imm & 0x000800) << (20 - 11); 355 ret |= (imm & 0x0ff000) << (12 - 12); 356 ret |= (imm & 0x100000) << (31 - 20); 357 358 return ret; 359} 360 361static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) 362{ 363 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); 364} 365 366/* 367 * RISC-V instruction emitters 368 */ 369 370static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, 371 TCGReg rd, TCGReg rs1, TCGReg rs2) 372{ 373 tcg_out32(s, encode_r(opc, rd, rs1, rs2)); 374} 375 376static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, 377 TCGReg rd, TCGReg rs1, TCGArg imm) 378{ 379 tcg_out32(s, encode_i(opc, rd, rs1, imm)); 380} 381 382static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, 383 TCGReg rs1, TCGReg rs2, uint32_t imm) 384{ 385 tcg_out32(s, encode_s(opc, rs1, rs2, imm)); 386} 387 388static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, 389 TCGReg rs1, TCGReg rs2, uint32_t imm) 390{ 391 tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); 392} 393 394static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, 395 TCGReg rd, uint32_t imm) 396{ 397 tcg_out32(s, encode_u(opc, rd, imm)); 398} 399 400static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, 401 TCGReg rd, uint32_t imm) 402{ 403 tcg_out32(s, encode_uj(opc, rd, imm)); 404} 405 406static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 407{ 408 int i; 409 for (i = 0; i < count; ++i) { 410 p[i] = OPC_NOP; 411 } 412} 413 414/* 415 * Relocations 416 */ 417 418static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 419{ 420 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 421 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 422 423 tcg_debug_assert((offset & 1) == 0); 424 if (offset == sextreg(offset, 0, 12)) { 425 *src_rw |= encode_sbimm12(offset); 426 return true; 427 } 428 429 return false; 430} 431 432static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 433{ 434 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 435 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 436 437 tcg_debug_assert((offset & 1) == 0); 438 if (offset == sextreg(offset, 0, 20)) { 439 *src_rw |= encode_ujimm20(offset); 440 return true; 441 } 442 443 return false; 444} 445 446static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 447{ 448 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 449 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 450 int32_t lo = sextreg(offset, 0, 12); 451 int32_t hi = offset - lo; 452 453 if (offset == hi + lo) { 454 src_rw[0] |= encode_uimm20(hi); 455 src_rw[1] |= encode_imm12(lo); 456 return true; 457 } 458 459 return false; 460} 461 462static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 463 intptr_t value, intptr_t addend) 464{ 465 tcg_debug_assert(addend == 0); 466 switch (type) { 467 case R_RISCV_BRANCH: 468 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); 469 case R_RISCV_JAL: 470 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); 471 case R_RISCV_CALL: 472 return reloc_call(code_ptr, (tcg_insn_unit *)value); 473 default: 474 g_assert_not_reached(); 475 } 476} 477 478/* 479 * TCG intrinsics 480 */ 481 482static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 483{ 484 if (ret == arg) { 485 return true; 486 } 487 switch (type) { 488 case TCG_TYPE_I32: 489 case TCG_TYPE_I64: 490 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); 491 break; 492 default: 493 g_assert_not_reached(); 494 } 495 return true; 496} 497 498static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 499 tcg_target_long val) 500{ 501 tcg_target_long lo, hi, tmp; 502 int shift, ret; 503 504 if (type == TCG_TYPE_I32) { 505 val = (int32_t)val; 506 } 507 508 lo = sextreg(val, 0, 12); 509 if (val == lo) { 510 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); 511 return; 512 } 513 514 hi = val - lo; 515 if (val == (int32_t)val) { 516 tcg_out_opc_upper(s, OPC_LUI, rd, hi); 517 if (lo != 0) { 518 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); 519 } 520 return; 521 } 522 523 tmp = tcg_pcrel_diff(s, (void *)val); 524 if (tmp == (int32_t)tmp) { 525 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 526 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); 527 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); 528 tcg_debug_assert(ret == true); 529 return; 530 } 531 532 /* Look for a single 20-bit section. */ 533 shift = ctz64(val); 534 tmp = val >> shift; 535 if (tmp == sextreg(tmp, 0, 20)) { 536 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); 537 if (shift > 12) { 538 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); 539 } else { 540 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); 541 } 542 return; 543 } 544 545 /* Look for a few high zero bits, with lots of bits set in the middle. */ 546 shift = clz64(val); 547 tmp = val << shift; 548 if (tmp == sextreg(tmp, 12, 20) << 12) { 549 tcg_out_opc_upper(s, OPC_LUI, rd, tmp); 550 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 551 return; 552 } else if (tmp == sextreg(tmp, 0, 12)) { 553 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); 554 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); 555 return; 556 } 557 558 /* Drop into the constant pool. */ 559 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); 560 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); 561 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); 562} 563 564static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 565{ 566 return false; 567} 568 569static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 570 tcg_target_long imm) 571{ 572 /* This function is only used for passing structs by reference. */ 573 g_assert_not_reached(); 574} 575 576static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 577{ 578 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); 579} 580 581static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 582{ 583 if (cpuinfo & CPUINFO_ZBB) { 584 tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO); 585 } else { 586 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 587 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); 588 } 589} 590 591static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 592{ 593 if (cpuinfo & CPUINFO_ZBA) { 594 tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO); 595 } else { 596 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); 597 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); 598 } 599} 600 601static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 602{ 603 if (cpuinfo & CPUINFO_ZBB) { 604 tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0); 605 } else { 606 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); 607 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); 608 } 609} 610 611static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 612{ 613 if (cpuinfo & CPUINFO_ZBB) { 614 tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0); 615 } else { 616 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); 617 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); 618 } 619} 620 621static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 622{ 623 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); 624} 625 626static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 627{ 628 if (ret != arg) { 629 tcg_out_ext32s(s, ret, arg); 630 } 631} 632 633static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 634{ 635 tcg_out_ext32u(s, ret, arg); 636} 637 638static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 639{ 640 tcg_out_ext32s(s, ret, arg); 641} 642 643static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, 644 TCGReg addr, intptr_t offset) 645{ 646 intptr_t imm12 = sextreg(offset, 0, 12); 647 648 if (offset != imm12) { 649 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 650 651 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 652 imm12 = sextreg(diff, 0, 12); 653 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); 654 } else { 655 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 656 if (addr != TCG_REG_ZERO) { 657 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); 658 } 659 } 660 addr = TCG_REG_TMP2; 661 } 662 663 switch (opc) { 664 case OPC_SB: 665 case OPC_SH: 666 case OPC_SW: 667 case OPC_SD: 668 tcg_out_opc_store(s, opc, addr, data, imm12); 669 break; 670 case OPC_LB: 671 case OPC_LBU: 672 case OPC_LH: 673 case OPC_LHU: 674 case OPC_LW: 675 case OPC_LWU: 676 case OPC_LD: 677 tcg_out_opc_imm(s, opc, data, addr, imm12); 678 break; 679 default: 680 g_assert_not_reached(); 681 } 682} 683 684static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 685 TCGReg arg1, intptr_t arg2) 686{ 687 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; 688 tcg_out_ldst(s, insn, arg, arg1, arg2); 689} 690 691static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 692 TCGReg arg1, intptr_t arg2) 693{ 694 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; 695 tcg_out_ldst(s, insn, arg, arg1, arg2); 696} 697 698static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 699 TCGReg base, intptr_t ofs) 700{ 701 if (val == 0) { 702 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 703 return true; 704 } 705 return false; 706} 707 708static void tcg_out_addsub2(TCGContext *s, 709 TCGReg rl, TCGReg rh, 710 TCGReg al, TCGReg ah, 711 TCGArg bl, TCGArg bh, 712 bool cbl, bool cbh, bool is_sub, bool is32bit) 713{ 714 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; 715 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; 716 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; 717 TCGReg th = TCG_REG_TMP1; 718 719 /* If we have a negative constant such that negating it would 720 make the high part zero, we can (usually) eliminate one insn. */ 721 if (cbl && cbh && bh == -1 && bl != 0) { 722 bl = -bl; 723 bh = 0; 724 is_sub = !is_sub; 725 } 726 727 /* By operating on the high part first, we get to use the final 728 carry operation to move back from the temporary. */ 729 if (!cbh) { 730 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); 731 } else if (bh != 0 || ah == rl) { 732 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); 733 } else { 734 th = ah; 735 } 736 737 /* Note that tcg optimization should eliminate the bl == 0 case. */ 738 if (is_sub) { 739 if (cbl) { 740 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); 741 tcg_out_opc_imm(s, opc_addi, rl, al, -bl); 742 } else { 743 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); 744 tcg_out_opc_reg(s, opc_sub, rl, al, bl); 745 } 746 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); 747 } else { 748 if (cbl) { 749 tcg_out_opc_imm(s, opc_addi, rl, al, bl); 750 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); 751 } else if (al == bl) { 752 /* 753 * If the input regs overlap, this is a simple doubling 754 * and carry-out is the input msb. This special case is 755 * required when the output reg overlaps the input, 756 * but we might as well use it always. 757 */ 758 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); 759 tcg_out_opc_reg(s, opc_add, rl, al, al); 760 } else { 761 tcg_out_opc_reg(s, opc_add, rl, al, bl); 762 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, 763 rl, (rl == bl ? al : bl)); 764 } 765 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); 766 } 767} 768 769static const struct { 770 RISCVInsn op; 771 bool swap; 772} tcg_brcond_to_riscv[] = { 773 [TCG_COND_EQ] = { OPC_BEQ, false }, 774 [TCG_COND_NE] = { OPC_BNE, false }, 775 [TCG_COND_LT] = { OPC_BLT, false }, 776 [TCG_COND_GE] = { OPC_BGE, false }, 777 [TCG_COND_LE] = { OPC_BGE, true }, 778 [TCG_COND_GT] = { OPC_BLT, true }, 779 [TCG_COND_LTU] = { OPC_BLTU, false }, 780 [TCG_COND_GEU] = { OPC_BGEU, false }, 781 [TCG_COND_LEU] = { OPC_BGEU, true }, 782 [TCG_COND_GTU] = { OPC_BLTU, true } 783}; 784 785static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 786 TCGReg arg2, TCGLabel *l) 787{ 788 RISCVInsn op = tcg_brcond_to_riscv[cond].op; 789 790 tcg_debug_assert(op != 0); 791 792 if (tcg_brcond_to_riscv[cond].swap) { 793 TCGReg t = arg1; 794 arg1 = arg2; 795 arg2 = t; 796 } 797 798 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); 799 tcg_out_opc_branch(s, op, arg1, arg2, 0); 800} 801 802#define SETCOND_INV TCG_TARGET_NB_REGS 803#define SETCOND_NEZ (SETCOND_INV << 1) 804#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 805 806static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 807 TCGReg arg1, tcg_target_long arg2, bool c2) 808{ 809 int flags = 0; 810 811 switch (cond) { 812 case TCG_COND_EQ: /* -> NE */ 813 case TCG_COND_GE: /* -> LT */ 814 case TCG_COND_GEU: /* -> LTU */ 815 case TCG_COND_GT: /* -> LE */ 816 case TCG_COND_GTU: /* -> LEU */ 817 cond = tcg_invert_cond(cond); 818 flags ^= SETCOND_INV; 819 break; 820 default: 821 break; 822 } 823 824 switch (cond) { 825 case TCG_COND_LE: 826 case TCG_COND_LEU: 827 /* 828 * If we have a constant input, the most efficient way to implement 829 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 830 * We don't need to care for this for LE because the constant input 831 * is constrained to signed 12-bit, and 0x800 is representable in the 832 * temporary register. 833 */ 834 if (c2) { 835 if (cond == TCG_COND_LEU) { 836 /* unsigned <= -1 is true */ 837 if (arg2 == -1) { 838 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 839 return ret; 840 } 841 cond = TCG_COND_LTU; 842 } else { 843 cond = TCG_COND_LT; 844 } 845 tcg_debug_assert(arg2 <= 0x7ff); 846 if (++arg2 == 0x800) { 847 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 848 arg2 = TCG_REG_TMP0; 849 c2 = false; 850 } 851 } else { 852 TCGReg tmp = arg2; 853 arg2 = arg1; 854 arg1 = tmp; 855 cond = tcg_swap_cond(cond); /* LE -> GE */ 856 cond = tcg_invert_cond(cond); /* GE -> LT */ 857 flags ^= SETCOND_INV; 858 } 859 break; 860 default: 861 break; 862 } 863 864 switch (cond) { 865 case TCG_COND_NE: 866 flags |= SETCOND_NEZ; 867 if (!c2) { 868 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); 869 } else if (arg2 == 0) { 870 ret = arg1; 871 } else { 872 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2); 873 } 874 break; 875 876 case TCG_COND_LT: 877 if (c2) { 878 tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2); 879 } else { 880 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); 881 } 882 break; 883 884 case TCG_COND_LTU: 885 if (c2) { 886 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2); 887 } else { 888 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); 889 } 890 break; 891 892 default: 893 g_assert_not_reached(); 894 } 895 896 return ret | flags; 897} 898 899static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 900 TCGReg arg1, tcg_target_long arg2, bool c2) 901{ 902 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 903 904 if (tmpflags != ret) { 905 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 906 907 switch (tmpflags & SETCOND_FLAGS) { 908 case SETCOND_INV: 909 /* Intermediate result is boolean: simply invert. */ 910 tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1); 911 break; 912 case SETCOND_NEZ: 913 /* Intermediate result is zero/non-zero: test != 0. */ 914 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); 915 break; 916 case SETCOND_NEZ | SETCOND_INV: 917 /* Intermediate result is zero/non-zero: test == 0. */ 918 tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1); 919 break; 920 default: 921 g_assert_not_reached(); 922 } 923 } 924} 925 926static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret, 927 TCGReg arg1, tcg_target_long arg2, bool c2) 928{ 929 int tmpflags; 930 TCGReg tmp; 931 932 /* For LT/GE comparison against 0, replicate the sign bit. */ 933 if (c2 && arg2 == 0) { 934 switch (cond) { 935 case TCG_COND_GE: 936 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1); 937 arg1 = ret; 938 /* fall through */ 939 case TCG_COND_LT: 940 tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1); 941 return; 942 default: 943 break; 944 } 945 } 946 947 tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 948 tmp = tmpflags & ~SETCOND_FLAGS; 949 950 /* If intermediate result is zero/non-zero: test != 0. */ 951 if (tmpflags & SETCOND_NEZ) { 952 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); 953 tmp = ret; 954 } 955 956 /* Produce the 0/-1 result. */ 957 if (tmpflags & SETCOND_INV) { 958 tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1); 959 } else { 960 tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp); 961 } 962} 963 964static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne, 965 int val1, bool c_val1, 966 int val2, bool c_val2) 967{ 968 if (val1 == 0) { 969 if (c_val2) { 970 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2); 971 val2 = TCG_REG_TMP1; 972 } 973 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne); 974 return; 975 } 976 977 if (val2 == 0) { 978 if (c_val1) { 979 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1); 980 val1 = TCG_REG_TMP1; 981 } 982 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne); 983 return; 984 } 985 986 if (c_val2) { 987 if (c_val1) { 988 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2); 989 } else { 990 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2); 991 } 992 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne); 993 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2); 994 return; 995 } 996 997 if (c_val1) { 998 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1); 999 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne); 1000 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1); 1001 return; 1002 } 1003 1004 tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne); 1005 tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne); 1006 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1); 1007} 1008 1009static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret, 1010 TCGReg cmp1, TCGReg cmp2, 1011 int val, bool c_val) 1012{ 1013 RISCVInsn op; 1014 int disp = 8; 1015 1016 tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv)); 1017 op = tcg_brcond_to_riscv[cond].op; 1018 tcg_debug_assert(op != 0); 1019 1020 if (tcg_brcond_to_riscv[cond].swap) { 1021 tcg_out_opc_branch(s, op, cmp2, cmp1, disp); 1022 } else { 1023 tcg_out_opc_branch(s, op, cmp1, cmp2, disp); 1024 } 1025 if (c_val) { 1026 tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val); 1027 } else { 1028 tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0); 1029 } 1030} 1031 1032static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret, 1033 TCGReg cmp1, TCGReg cmp2, 1034 int val1, bool c_val1, 1035 int val2, bool c_val2) 1036{ 1037 TCGReg tmp; 1038 1039 /* TCG optimizer reorders to prefer ret matching val2. */ 1040 if (!c_val2 && ret == val2) { 1041 cond = tcg_invert_cond(cond); 1042 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1); 1043 return; 1044 } 1045 1046 if (!c_val1 && ret == val1) { 1047 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2); 1048 return; 1049 } 1050 1051 tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret); 1052 if (c_val1) { 1053 tcg_out_movi(s, TCG_TYPE_REG, tmp, val1); 1054 } else { 1055 tcg_out_mov(s, TCG_TYPE_REG, tmp, val1); 1056 } 1057 tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2); 1058 tcg_out_mov(s, TCG_TYPE_REG, ret, tmp); 1059} 1060 1061static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 1062 TCGReg cmp1, int cmp2, bool c_cmp2, 1063 TCGReg val1, bool c_val1, 1064 TCGReg val2, bool c_val2) 1065{ 1066 int tmpflags; 1067 TCGReg t; 1068 1069 if (!(cpuinfo & CPUINFO_ZICOND) && (!c_cmp2 || cmp2 == 0)) { 1070 tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2, 1071 val1, c_val1, val2, c_val2); 1072 return; 1073 } 1074 1075 tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2); 1076 t = tmpflags & ~SETCOND_FLAGS; 1077 1078 if (cpuinfo & CPUINFO_ZICOND) { 1079 if (tmpflags & SETCOND_INV) { 1080 tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1); 1081 } else { 1082 tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2); 1083 } 1084 } else { 1085 cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE; 1086 tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO, 1087 val1, c_val1, val2, c_val2); 1088 } 1089} 1090 1091static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, 1092 TCGReg ret, TCGReg src1, int src2, bool c_src2) 1093{ 1094 tcg_out_opc_imm(s, insn, ret, src1, 0); 1095 1096 if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) { 1097 /* 1098 * The requested zero result does not match the insn, so adjust. 1099 * Note that constraints put 'ret' in a new register, so the 1100 * computation above did not clobber either 'src1' or 'src2'. 1101 */ 1102 tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true, 1103 src2, c_src2, ret, false); 1104 } 1105} 1106 1107static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 1108{ 1109 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 1110 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 1111 int ret; 1112 1113 tcg_debug_assert((offset & 1) == 0); 1114 if (offset == sextreg(offset, 0, 20)) { 1115 /* short jump: -2097150 to 2097152 */ 1116 tcg_out_opc_jump(s, OPC_JAL, link, offset); 1117 } else if (offset == (int32_t)offset) { 1118 /* long jump: -2147483646 to 2147483648 */ 1119 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); 1120 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); 1121 ret = reloc_call(s->code_ptr - 2, arg); 1122 tcg_debug_assert(ret == true); 1123 } else { 1124 /* far jump: 64-bit */ 1125 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); 1126 tcg_target_long base = (tcg_target_long)arg - imm; 1127 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); 1128 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); 1129 } 1130} 1131 1132static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 1133 const TCGHelperInfo *info) 1134{ 1135 tcg_out_call_int(s, arg, false); 1136} 1137 1138static void tcg_out_mb(TCGContext *s, TCGArg a0) 1139{ 1140 tcg_insn_unit insn = OPC_FENCE; 1141 1142 if (a0 & TCG_MO_LD_LD) { 1143 insn |= 0x02200000; 1144 } 1145 if (a0 & TCG_MO_ST_LD) { 1146 insn |= 0x01200000; 1147 } 1148 if (a0 & TCG_MO_LD_ST) { 1149 insn |= 0x02100000; 1150 } 1151 if (a0 & TCG_MO_ST_ST) { 1152 insn |= 0x02200000; 1153 } 1154 tcg_out32(s, insn); 1155} 1156 1157/* 1158 * Load/store and TLB 1159 */ 1160 1161static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 1162{ 1163 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1164 bool ok = reloc_jimm20(s->code_ptr - 1, target); 1165 tcg_debug_assert(ok); 1166} 1167 1168bool tcg_target_has_memory_bswap(MemOp memop) 1169{ 1170 return false; 1171} 1172 1173/* We have three temps, we might as well expose them. */ 1174static const TCGLdstHelperParam ldst_helper_param = { 1175 .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 } 1176}; 1177 1178static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1179{ 1180 MemOp opc = get_memop(l->oi); 1181 1182 /* resolve label address */ 1183 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1184 return false; 1185 } 1186 1187 /* call load helper */ 1188 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 1189 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false); 1190 tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param); 1191 1192 tcg_out_goto(s, l->raddr); 1193 return true; 1194} 1195 1196static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1197{ 1198 MemOp opc = get_memop(l->oi); 1199 1200 /* resolve label address */ 1201 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 1202 return false; 1203 } 1204 1205 /* call store helper */ 1206 tcg_out_st_helper_args(s, l, &ldst_helper_param); 1207 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 1208 1209 tcg_out_goto(s, l->raddr); 1210 return true; 1211} 1212 1213/* We expect to use a 12-bit negative offset from ENV. */ 1214#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 1215 1216/* 1217 * For system-mode, perform the TLB load and compare. 1218 * For user-mode, perform any required alignment tests. 1219 * In both cases, return a TCGLabelQemuLdst structure if the slow path 1220 * is required and fill in @h with the host address for the fast path. 1221 */ 1222static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, 1223 TCGReg addr_reg, MemOpIdx oi, 1224 bool is_ld) 1225{ 1226 TCGType addr_type = s->addr_type; 1227 TCGLabelQemuLdst *ldst = NULL; 1228 MemOp opc = get_memop(oi); 1229 TCGAtomAlign aa; 1230 unsigned a_mask; 1231 1232 aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1233 a_mask = (1u << aa.align) - 1; 1234 1235 if (tcg_use_softmmu) { 1236 unsigned s_bits = opc & MO_SIZE; 1237 unsigned s_mask = (1u << s_bits) - 1; 1238 int mem_index = get_mmuidx(oi); 1239 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 1240 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 1241 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 1242 int compare_mask; 1243 TCGReg addr_adj; 1244 1245 ldst = new_ldst_label(s); 1246 ldst->is_ld = is_ld; 1247 ldst->oi = oi; 1248 ldst->addrlo_reg = addr_reg; 1249 1250 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 1251 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 1252 1253 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, 1254 s->page_bits - CPU_TLB_ENTRY_BITS); 1255 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 1256 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 1257 1258 /* 1259 * For aligned accesses, we check the first byte and include the 1260 * alignment bits within the address. For unaligned access, we 1261 * check that we don't cross pages using the address of the last 1262 * byte of the access. 1263 */ 1264 addr_adj = addr_reg; 1265 if (a_mask < s_mask) { 1266 addr_adj = TCG_REG_TMP0; 1267 tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, 1268 addr_adj, addr_reg, s_mask - a_mask); 1269 } 1270 compare_mask = s->page_mask | a_mask; 1271 if (compare_mask == sextreg(compare_mask, 0, 12)) { 1272 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); 1273 } else { 1274 tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); 1275 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); 1276 } 1277 1278 /* Load the tlb comparator and the addend. */ 1279 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1280 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1281 is_ld ? offsetof(CPUTLBEntry, addr_read) 1282 : offsetof(CPUTLBEntry, addr_write)); 1283 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1284 offsetof(CPUTLBEntry, addend)); 1285 1286 /* Compare masked address with the TLB entry. */ 1287 ldst->label_ptr[0] = s->code_ptr; 1288 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1289 1290 /* TLB Hit - translate address using addend. */ 1291 if (addr_type != TCG_TYPE_I32) { 1292 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); 1293 } else if (cpuinfo & CPUINFO_ZBA) { 1294 tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, 1295 addr_reg, TCG_REG_TMP2); 1296 } else { 1297 tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); 1298 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, 1299 TCG_REG_TMP0, TCG_REG_TMP2); 1300 } 1301 *pbase = TCG_REG_TMP0; 1302 } else { 1303 TCGReg base; 1304 1305 if (a_mask) { 1306 ldst = new_ldst_label(s); 1307 ldst->is_ld = is_ld; 1308 ldst->oi = oi; 1309 ldst->addrlo_reg = addr_reg; 1310 1311 /* We are expecting alignment max 7, so we can always use andi. */ 1312 tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); 1313 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); 1314 1315 ldst->label_ptr[0] = s->code_ptr; 1316 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1317 } 1318 1319 if (guest_base != 0) { 1320 base = TCG_REG_TMP0; 1321 if (addr_type != TCG_TYPE_I32) { 1322 tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, 1323 TCG_GUEST_BASE_REG); 1324 } else if (cpuinfo & CPUINFO_ZBA) { 1325 tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, 1326 TCG_GUEST_BASE_REG); 1327 } else { 1328 tcg_out_ext32u(s, base, addr_reg); 1329 tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); 1330 } 1331 } else if (addr_type != TCG_TYPE_I32) { 1332 base = addr_reg; 1333 } else { 1334 base = TCG_REG_TMP0; 1335 tcg_out_ext32u(s, base, addr_reg); 1336 } 1337 *pbase = base; 1338 } 1339 1340 return ldst; 1341} 1342 1343static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, 1344 TCGReg base, MemOp opc, TCGType type) 1345{ 1346 /* Byte swapping is left to middle-end expansion. */ 1347 tcg_debug_assert((opc & MO_BSWAP) == 0); 1348 1349 switch (opc & (MO_SSIZE)) { 1350 case MO_UB: 1351 tcg_out_opc_imm(s, OPC_LBU, val, base, 0); 1352 break; 1353 case MO_SB: 1354 tcg_out_opc_imm(s, OPC_LB, val, base, 0); 1355 break; 1356 case MO_UW: 1357 tcg_out_opc_imm(s, OPC_LHU, val, base, 0); 1358 break; 1359 case MO_SW: 1360 tcg_out_opc_imm(s, OPC_LH, val, base, 0); 1361 break; 1362 case MO_UL: 1363 if (type == TCG_TYPE_I64) { 1364 tcg_out_opc_imm(s, OPC_LWU, val, base, 0); 1365 break; 1366 } 1367 /* FALLTHRU */ 1368 case MO_SL: 1369 tcg_out_opc_imm(s, OPC_LW, val, base, 0); 1370 break; 1371 case MO_UQ: 1372 tcg_out_opc_imm(s, OPC_LD, val, base, 0); 1373 break; 1374 default: 1375 g_assert_not_reached(); 1376 } 1377} 1378 1379static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1380 MemOpIdx oi, TCGType data_type) 1381{ 1382 TCGLabelQemuLdst *ldst; 1383 TCGReg base; 1384 1385 ldst = prepare_host_addr(s, &base, addr_reg, oi, true); 1386 tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type); 1387 1388 if (ldst) { 1389 ldst->type = data_type; 1390 ldst->datalo_reg = data_reg; 1391 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1392 } 1393} 1394 1395static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, 1396 TCGReg base, MemOp opc) 1397{ 1398 /* Byte swapping is left to middle-end expansion. */ 1399 tcg_debug_assert((opc & MO_BSWAP) == 0); 1400 1401 switch (opc & (MO_SSIZE)) { 1402 case MO_8: 1403 tcg_out_opc_store(s, OPC_SB, base, val, 0); 1404 break; 1405 case MO_16: 1406 tcg_out_opc_store(s, OPC_SH, base, val, 0); 1407 break; 1408 case MO_32: 1409 tcg_out_opc_store(s, OPC_SW, base, val, 0); 1410 break; 1411 case MO_64: 1412 tcg_out_opc_store(s, OPC_SD, base, val, 0); 1413 break; 1414 default: 1415 g_assert_not_reached(); 1416 } 1417} 1418 1419static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1420 MemOpIdx oi, TCGType data_type) 1421{ 1422 TCGLabelQemuLdst *ldst; 1423 TCGReg base; 1424 1425 ldst = prepare_host_addr(s, &base, addr_reg, oi, false); 1426 tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi)); 1427 1428 if (ldst) { 1429 ldst->type = data_type; 1430 ldst->datalo_reg = data_reg; 1431 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1432 } 1433} 1434 1435static const tcg_insn_unit *tb_ret_addr; 1436 1437static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1438{ 1439 /* Reuse the zeroing that exists for goto_ptr. */ 1440 if (a0 == 0) { 1441 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1442 } else { 1443 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1444 tcg_out_call_int(s, tb_ret_addr, true); 1445 } 1446} 1447 1448static void tcg_out_goto_tb(TCGContext *s, int which) 1449{ 1450 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1451 set_jmp_insn_offset(s, which); 1452 tcg_out32(s, OPC_JAL); 1453 1454 /* When branch is out of range, fall through to indirect. */ 1455 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, 1456 get_jmp_target_addr(s, which)); 1457 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1458 set_jmp_reset_offset(s, which); 1459} 1460 1461void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1462 uintptr_t jmp_rx, uintptr_t jmp_rw) 1463{ 1464 uintptr_t addr = tb->jmp_target_addr[n]; 1465 ptrdiff_t offset = addr - jmp_rx; 1466 tcg_insn_unit insn; 1467 1468 /* Either directly branch, or fall through to indirect branch. */ 1469 if (offset == sextreg(offset, 0, 20)) { 1470 insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset); 1471 } else { 1472 insn = OPC_NOP; 1473 } 1474 qatomic_set((uint32_t *)jmp_rw, insn); 1475 flush_idcache_range(jmp_rx, jmp_rw, 4); 1476} 1477 1478static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1479 const TCGArg args[TCG_MAX_OP_ARGS], 1480 const int const_args[TCG_MAX_OP_ARGS]) 1481{ 1482 TCGArg a0 = args[0]; 1483 TCGArg a1 = args[1]; 1484 TCGArg a2 = args[2]; 1485 int c2 = const_args[2]; 1486 1487 switch (opc) { 1488 case INDEX_op_goto_ptr: 1489 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); 1490 break; 1491 1492 case INDEX_op_br: 1493 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); 1494 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); 1495 break; 1496 1497 case INDEX_op_ld8u_i32: 1498 case INDEX_op_ld8u_i64: 1499 tcg_out_ldst(s, OPC_LBU, a0, a1, a2); 1500 break; 1501 case INDEX_op_ld8s_i32: 1502 case INDEX_op_ld8s_i64: 1503 tcg_out_ldst(s, OPC_LB, a0, a1, a2); 1504 break; 1505 case INDEX_op_ld16u_i32: 1506 case INDEX_op_ld16u_i64: 1507 tcg_out_ldst(s, OPC_LHU, a0, a1, a2); 1508 break; 1509 case INDEX_op_ld16s_i32: 1510 case INDEX_op_ld16s_i64: 1511 tcg_out_ldst(s, OPC_LH, a0, a1, a2); 1512 break; 1513 case INDEX_op_ld32u_i64: 1514 tcg_out_ldst(s, OPC_LWU, a0, a1, a2); 1515 break; 1516 case INDEX_op_ld_i32: 1517 case INDEX_op_ld32s_i64: 1518 tcg_out_ldst(s, OPC_LW, a0, a1, a2); 1519 break; 1520 case INDEX_op_ld_i64: 1521 tcg_out_ldst(s, OPC_LD, a0, a1, a2); 1522 break; 1523 1524 case INDEX_op_st8_i32: 1525 case INDEX_op_st8_i64: 1526 tcg_out_ldst(s, OPC_SB, a0, a1, a2); 1527 break; 1528 case INDEX_op_st16_i32: 1529 case INDEX_op_st16_i64: 1530 tcg_out_ldst(s, OPC_SH, a0, a1, a2); 1531 break; 1532 case INDEX_op_st_i32: 1533 case INDEX_op_st32_i64: 1534 tcg_out_ldst(s, OPC_SW, a0, a1, a2); 1535 break; 1536 case INDEX_op_st_i64: 1537 tcg_out_ldst(s, OPC_SD, a0, a1, a2); 1538 break; 1539 1540 case INDEX_op_add_i32: 1541 if (c2) { 1542 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); 1543 } else { 1544 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); 1545 } 1546 break; 1547 case INDEX_op_add_i64: 1548 if (c2) { 1549 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); 1550 } else { 1551 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); 1552 } 1553 break; 1554 1555 case INDEX_op_sub_i32: 1556 if (c2) { 1557 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); 1558 } else { 1559 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); 1560 } 1561 break; 1562 case INDEX_op_sub_i64: 1563 if (c2) { 1564 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); 1565 } else { 1566 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); 1567 } 1568 break; 1569 1570 case INDEX_op_and_i32: 1571 case INDEX_op_and_i64: 1572 if (c2) { 1573 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); 1574 } else { 1575 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); 1576 } 1577 break; 1578 1579 case INDEX_op_or_i32: 1580 case INDEX_op_or_i64: 1581 if (c2) { 1582 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); 1583 } else { 1584 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); 1585 } 1586 break; 1587 1588 case INDEX_op_xor_i32: 1589 case INDEX_op_xor_i64: 1590 if (c2) { 1591 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); 1592 } else { 1593 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); 1594 } 1595 break; 1596 1597 case INDEX_op_andc_i32: 1598 case INDEX_op_andc_i64: 1599 if (c2) { 1600 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2); 1601 } else { 1602 tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2); 1603 } 1604 break; 1605 case INDEX_op_orc_i32: 1606 case INDEX_op_orc_i64: 1607 if (c2) { 1608 tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2); 1609 } else { 1610 tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2); 1611 } 1612 break; 1613 case INDEX_op_eqv_i32: 1614 case INDEX_op_eqv_i64: 1615 if (c2) { 1616 tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2); 1617 } else { 1618 tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2); 1619 } 1620 break; 1621 1622 case INDEX_op_not_i32: 1623 case INDEX_op_not_i64: 1624 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); 1625 break; 1626 1627 case INDEX_op_neg_i32: 1628 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); 1629 break; 1630 case INDEX_op_neg_i64: 1631 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); 1632 break; 1633 1634 case INDEX_op_mul_i32: 1635 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); 1636 break; 1637 case INDEX_op_mul_i64: 1638 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); 1639 break; 1640 1641 case INDEX_op_div_i32: 1642 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); 1643 break; 1644 case INDEX_op_div_i64: 1645 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); 1646 break; 1647 1648 case INDEX_op_divu_i32: 1649 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); 1650 break; 1651 case INDEX_op_divu_i64: 1652 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); 1653 break; 1654 1655 case INDEX_op_rem_i32: 1656 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); 1657 break; 1658 case INDEX_op_rem_i64: 1659 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); 1660 break; 1661 1662 case INDEX_op_remu_i32: 1663 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); 1664 break; 1665 case INDEX_op_remu_i64: 1666 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); 1667 break; 1668 1669 case INDEX_op_shl_i32: 1670 if (c2) { 1671 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); 1672 } else { 1673 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); 1674 } 1675 break; 1676 case INDEX_op_shl_i64: 1677 if (c2) { 1678 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); 1679 } else { 1680 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); 1681 } 1682 break; 1683 1684 case INDEX_op_shr_i32: 1685 if (c2) { 1686 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); 1687 } else { 1688 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); 1689 } 1690 break; 1691 case INDEX_op_shr_i64: 1692 if (c2) { 1693 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); 1694 } else { 1695 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); 1696 } 1697 break; 1698 1699 case INDEX_op_sar_i32: 1700 if (c2) { 1701 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); 1702 } else { 1703 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); 1704 } 1705 break; 1706 case INDEX_op_sar_i64: 1707 if (c2) { 1708 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); 1709 } else { 1710 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); 1711 } 1712 break; 1713 1714 case INDEX_op_rotl_i32: 1715 if (c2) { 1716 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f); 1717 } else { 1718 tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2); 1719 } 1720 break; 1721 case INDEX_op_rotl_i64: 1722 if (c2) { 1723 tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f); 1724 } else { 1725 tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2); 1726 } 1727 break; 1728 1729 case INDEX_op_rotr_i32: 1730 if (c2) { 1731 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f); 1732 } else { 1733 tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2); 1734 } 1735 break; 1736 case INDEX_op_rotr_i64: 1737 if (c2) { 1738 tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f); 1739 } else { 1740 tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2); 1741 } 1742 break; 1743 1744 case INDEX_op_bswap64_i64: 1745 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 1746 break; 1747 case INDEX_op_bswap32_i32: 1748 a2 = 0; 1749 /* fall through */ 1750 case INDEX_op_bswap32_i64: 1751 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 1752 if (a2 & TCG_BSWAP_OZ) { 1753 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32); 1754 } else { 1755 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32); 1756 } 1757 break; 1758 case INDEX_op_bswap16_i64: 1759 case INDEX_op_bswap16_i32: 1760 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); 1761 if (a2 & TCG_BSWAP_OZ) { 1762 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48); 1763 } else { 1764 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48); 1765 } 1766 break; 1767 1768 case INDEX_op_ctpop_i32: 1769 tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0); 1770 break; 1771 case INDEX_op_ctpop_i64: 1772 tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0); 1773 break; 1774 1775 case INDEX_op_clz_i32: 1776 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2); 1777 break; 1778 case INDEX_op_clz_i64: 1779 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2); 1780 break; 1781 case INDEX_op_ctz_i32: 1782 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2); 1783 break; 1784 case INDEX_op_ctz_i64: 1785 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2); 1786 break; 1787 1788 case INDEX_op_add2_i32: 1789 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1790 const_args[4], const_args[5], false, true); 1791 break; 1792 case INDEX_op_add2_i64: 1793 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1794 const_args[4], const_args[5], false, false); 1795 break; 1796 case INDEX_op_sub2_i32: 1797 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1798 const_args[4], const_args[5], true, true); 1799 break; 1800 case INDEX_op_sub2_i64: 1801 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], 1802 const_args[4], const_args[5], true, false); 1803 break; 1804 1805 case INDEX_op_brcond_i32: 1806 case INDEX_op_brcond_i64: 1807 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1808 break; 1809 1810 case INDEX_op_setcond_i32: 1811 case INDEX_op_setcond_i64: 1812 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1813 break; 1814 1815 case INDEX_op_negsetcond_i32: 1816 case INDEX_op_negsetcond_i64: 1817 tcg_out_negsetcond(s, args[3], a0, a1, a2, c2); 1818 break; 1819 1820 case INDEX_op_movcond_i32: 1821 case INDEX_op_movcond_i64: 1822 tcg_out_movcond(s, args[5], a0, a1, a2, c2, 1823 args[3], const_args[3], args[4], const_args[4]); 1824 break; 1825 1826 case INDEX_op_qemu_ld_a32_i32: 1827 case INDEX_op_qemu_ld_a64_i32: 1828 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1829 break; 1830 case INDEX_op_qemu_ld_a32_i64: 1831 case INDEX_op_qemu_ld_a64_i64: 1832 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1833 break; 1834 case INDEX_op_qemu_st_a32_i32: 1835 case INDEX_op_qemu_st_a64_i32: 1836 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1837 break; 1838 case INDEX_op_qemu_st_a32_i64: 1839 case INDEX_op_qemu_st_a64_i64: 1840 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1841 break; 1842 1843 case INDEX_op_extrh_i64_i32: 1844 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); 1845 break; 1846 1847 case INDEX_op_mulsh_i32: 1848 case INDEX_op_mulsh_i64: 1849 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); 1850 break; 1851 1852 case INDEX_op_muluh_i32: 1853 case INDEX_op_muluh_i64: 1854 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); 1855 break; 1856 1857 case INDEX_op_mb: 1858 tcg_out_mb(s, a0); 1859 break; 1860 1861 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1862 case INDEX_op_mov_i64: 1863 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1864 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1865 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1866 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1867 case INDEX_op_ext8s_i64: 1868 case INDEX_op_ext8u_i32: 1869 case INDEX_op_ext8u_i64: 1870 case INDEX_op_ext16s_i32: 1871 case INDEX_op_ext16s_i64: 1872 case INDEX_op_ext16u_i32: 1873 case INDEX_op_ext16u_i64: 1874 case INDEX_op_ext32s_i64: 1875 case INDEX_op_ext32u_i64: 1876 case INDEX_op_ext_i32_i64: 1877 case INDEX_op_extu_i32_i64: 1878 case INDEX_op_extrl_i64_i32: 1879 default: 1880 g_assert_not_reached(); 1881 } 1882} 1883 1884static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1885{ 1886 switch (op) { 1887 case INDEX_op_goto_ptr: 1888 return C_O0_I1(r); 1889 1890 case INDEX_op_ld8u_i32: 1891 case INDEX_op_ld8s_i32: 1892 case INDEX_op_ld16u_i32: 1893 case INDEX_op_ld16s_i32: 1894 case INDEX_op_ld_i32: 1895 case INDEX_op_not_i32: 1896 case INDEX_op_neg_i32: 1897 case INDEX_op_ld8u_i64: 1898 case INDEX_op_ld8s_i64: 1899 case INDEX_op_ld16u_i64: 1900 case INDEX_op_ld16s_i64: 1901 case INDEX_op_ld32s_i64: 1902 case INDEX_op_ld32u_i64: 1903 case INDEX_op_ld_i64: 1904 case INDEX_op_not_i64: 1905 case INDEX_op_neg_i64: 1906 case INDEX_op_ext8u_i32: 1907 case INDEX_op_ext8u_i64: 1908 case INDEX_op_ext16u_i32: 1909 case INDEX_op_ext16u_i64: 1910 case INDEX_op_ext32u_i64: 1911 case INDEX_op_extu_i32_i64: 1912 case INDEX_op_ext8s_i32: 1913 case INDEX_op_ext8s_i64: 1914 case INDEX_op_ext16s_i32: 1915 case INDEX_op_ext16s_i64: 1916 case INDEX_op_ext32s_i64: 1917 case INDEX_op_extrl_i64_i32: 1918 case INDEX_op_extrh_i64_i32: 1919 case INDEX_op_ext_i32_i64: 1920 case INDEX_op_bswap16_i32: 1921 case INDEX_op_bswap32_i32: 1922 case INDEX_op_bswap16_i64: 1923 case INDEX_op_bswap32_i64: 1924 case INDEX_op_bswap64_i64: 1925 case INDEX_op_ctpop_i32: 1926 case INDEX_op_ctpop_i64: 1927 return C_O1_I1(r, r); 1928 1929 case INDEX_op_st8_i32: 1930 case INDEX_op_st16_i32: 1931 case INDEX_op_st_i32: 1932 case INDEX_op_st8_i64: 1933 case INDEX_op_st16_i64: 1934 case INDEX_op_st32_i64: 1935 case INDEX_op_st_i64: 1936 return C_O0_I2(rZ, r); 1937 1938 case INDEX_op_add_i32: 1939 case INDEX_op_and_i32: 1940 case INDEX_op_or_i32: 1941 case INDEX_op_xor_i32: 1942 case INDEX_op_add_i64: 1943 case INDEX_op_and_i64: 1944 case INDEX_op_or_i64: 1945 case INDEX_op_xor_i64: 1946 case INDEX_op_setcond_i32: 1947 case INDEX_op_setcond_i64: 1948 case INDEX_op_negsetcond_i32: 1949 case INDEX_op_negsetcond_i64: 1950 return C_O1_I2(r, r, rI); 1951 1952 case INDEX_op_andc_i32: 1953 case INDEX_op_andc_i64: 1954 case INDEX_op_orc_i32: 1955 case INDEX_op_orc_i64: 1956 case INDEX_op_eqv_i32: 1957 case INDEX_op_eqv_i64: 1958 return C_O1_I2(r, r, rJ); 1959 1960 case INDEX_op_sub_i32: 1961 case INDEX_op_sub_i64: 1962 return C_O1_I2(r, rZ, rN); 1963 1964 case INDEX_op_mul_i32: 1965 case INDEX_op_mulsh_i32: 1966 case INDEX_op_muluh_i32: 1967 case INDEX_op_div_i32: 1968 case INDEX_op_divu_i32: 1969 case INDEX_op_rem_i32: 1970 case INDEX_op_remu_i32: 1971 case INDEX_op_mul_i64: 1972 case INDEX_op_mulsh_i64: 1973 case INDEX_op_muluh_i64: 1974 case INDEX_op_div_i64: 1975 case INDEX_op_divu_i64: 1976 case INDEX_op_rem_i64: 1977 case INDEX_op_remu_i64: 1978 return C_O1_I2(r, rZ, rZ); 1979 1980 case INDEX_op_shl_i32: 1981 case INDEX_op_shr_i32: 1982 case INDEX_op_sar_i32: 1983 case INDEX_op_rotl_i32: 1984 case INDEX_op_rotr_i32: 1985 case INDEX_op_shl_i64: 1986 case INDEX_op_shr_i64: 1987 case INDEX_op_sar_i64: 1988 case INDEX_op_rotl_i64: 1989 case INDEX_op_rotr_i64: 1990 return C_O1_I2(r, r, ri); 1991 1992 case INDEX_op_clz_i32: 1993 case INDEX_op_clz_i64: 1994 case INDEX_op_ctz_i32: 1995 case INDEX_op_ctz_i64: 1996 return C_N1_I2(r, r, rM); 1997 1998 case INDEX_op_brcond_i32: 1999 case INDEX_op_brcond_i64: 2000 return C_O0_I2(rZ, rZ); 2001 2002 case INDEX_op_movcond_i32: 2003 case INDEX_op_movcond_i64: 2004 return C_O1_I4(r, r, rI, rM, rM); 2005 2006 case INDEX_op_add2_i32: 2007 case INDEX_op_add2_i64: 2008 case INDEX_op_sub2_i32: 2009 case INDEX_op_sub2_i64: 2010 return C_O2_I4(r, r, rZ, rZ, rM, rM); 2011 2012 case INDEX_op_qemu_ld_a32_i32: 2013 case INDEX_op_qemu_ld_a64_i32: 2014 case INDEX_op_qemu_ld_a32_i64: 2015 case INDEX_op_qemu_ld_a64_i64: 2016 return C_O1_I1(r, r); 2017 case INDEX_op_qemu_st_a32_i32: 2018 case INDEX_op_qemu_st_a64_i32: 2019 case INDEX_op_qemu_st_a32_i64: 2020 case INDEX_op_qemu_st_a64_i64: 2021 return C_O0_I2(rZ, r); 2022 2023 default: 2024 g_assert_not_reached(); 2025 } 2026} 2027 2028static const int tcg_target_callee_save_regs[] = { 2029 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2030 TCG_REG_S1, 2031 TCG_REG_S2, 2032 TCG_REG_S3, 2033 TCG_REG_S4, 2034 TCG_REG_S5, 2035 TCG_REG_S6, 2036 TCG_REG_S7, 2037 TCG_REG_S8, 2038 TCG_REG_S9, 2039 TCG_REG_S10, 2040 TCG_REG_S11, 2041 TCG_REG_RA, /* should be last for ABI compliance */ 2042}; 2043 2044/* Stack frame parameters. */ 2045#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2046#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2047#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2048#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2049 + TCG_TARGET_STACK_ALIGN - 1) \ 2050 & -TCG_TARGET_STACK_ALIGN) 2051#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2052 2053/* We're expecting to be able to use an immediate for frame allocation. */ 2054QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2055 2056/* Generate global QEMU prologue and epilogue code */ 2057static void tcg_target_qemu_prologue(TCGContext *s) 2058{ 2059 int i; 2060 2061 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2062 2063 /* TB prologue */ 2064 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2065 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2066 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2067 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2068 } 2069 2070 if (!tcg_use_softmmu && guest_base) { 2071 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2072 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2073 } 2074 2075 /* Call generated code */ 2076 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2077 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2078 2079 /* Return path for goto_ptr. Set return value to 0 */ 2080 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2081 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2082 2083 /* TB epilogue */ 2084 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2085 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2086 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2087 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2088 } 2089 2090 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2091 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); 2092} 2093 2094static void tcg_out_tb_start(TCGContext *s) 2095{ 2096 /* nothing to do */ 2097} 2098 2099static void tcg_target_init(TCGContext *s) 2100{ 2101 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 2102 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 2103 2104 tcg_target_call_clobber_regs = -1u; 2105 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2106 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2107 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2108 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2109 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2110 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2111 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2112 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2113 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2114 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2115 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); 2116 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); 2117 2118 s->reserved_regs = 0; 2119 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2120 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2121 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2122 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2123 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2124 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); 2125 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2126} 2127 2128typedef struct { 2129 DebugFrameHeader h; 2130 uint8_t fde_def_cfa[4]; 2131 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2132} DebugFrame; 2133 2134#define ELF_HOST_MACHINE EM_RISCV 2135 2136static const DebugFrame debug_frame = { 2137 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2138 .h.cie.id = -1, 2139 .h.cie.version = 1, 2140 .h.cie.code_align = 1, 2141 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2142 .h.cie.return_column = TCG_REG_RA, 2143 2144 /* Total FDE size does not include the "len" member. */ 2145 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2146 2147 .fde_def_cfa = { 2148 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2149 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2150 (FRAME_SIZE >> 7) 2151 }, 2152 .fde_reg_ofs = { 2153 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 2154 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 2155 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 2156 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 2157 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 2158 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 2159 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 2160 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 2161 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 2162 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 2163 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 2164 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2165 } 2166}; 2167 2168void tcg_register_jit(const void *buf, size_t buf_size) 2169{ 2170 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2171} 2172