1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33 34#ifdef CONFIG_DEBUG_TCG 35static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 36 "zero", 37 "ra", 38 "tp", 39 "sp", 40 "a0", 41 "a1", 42 "a2", 43 "a3", 44 "a4", 45 "a5", 46 "a6", 47 "a7", 48 "t0", 49 "t1", 50 "t2", 51 "t3", 52 "t4", 53 "t5", 54 "t6", 55 "t7", 56 "t8", 57 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 58 "s9", 59 "s0", 60 "s1", 61 "s2", 62 "s3", 63 "s4", 64 "s5", 65 "s6", 66 "s7", 67 "s8" 68}; 69#endif 70 71static const int tcg_target_reg_alloc_order[] = { 72 /* Registers preserved across calls */ 73 /* TCG_REG_S0 reserved for TCG_AREG0 */ 74 TCG_REG_S1, 75 TCG_REG_S2, 76 TCG_REG_S3, 77 TCG_REG_S4, 78 TCG_REG_S5, 79 TCG_REG_S6, 80 TCG_REG_S7, 81 TCG_REG_S8, 82 TCG_REG_S9, 83 84 /* Registers (potentially) clobbered across calls */ 85 TCG_REG_T0, 86 TCG_REG_T1, 87 TCG_REG_T2, 88 TCG_REG_T3, 89 TCG_REG_T4, 90 TCG_REG_T5, 91 TCG_REG_T6, 92 TCG_REG_T7, 93 TCG_REG_T8, 94 95 /* Argument registers, opposite order of allocation. */ 96 TCG_REG_A7, 97 TCG_REG_A6, 98 TCG_REG_A5, 99 TCG_REG_A4, 100 TCG_REG_A3, 101 TCG_REG_A2, 102 TCG_REG_A1, 103 TCG_REG_A0, 104}; 105 106static const int tcg_target_call_iarg_regs[] = { 107 TCG_REG_A0, 108 TCG_REG_A1, 109 TCG_REG_A2, 110 TCG_REG_A3, 111 TCG_REG_A4, 112 TCG_REG_A5, 113 TCG_REG_A6, 114 TCG_REG_A7, 115}; 116 117static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 118{ 119 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 120 tcg_debug_assert(slot >= 0 && slot <= 1); 121 return TCG_REG_A0 + slot; 122} 123 124#ifndef CONFIG_SOFTMMU 125#define USE_GUEST_BASE (guest_base != 0) 126#define TCG_GUEST_BASE_REG TCG_REG_S1 127#endif 128 129#define TCG_CT_CONST_ZERO 0x100 130#define TCG_CT_CONST_S12 0x200 131#define TCG_CT_CONST_S32 0x400 132#define TCG_CT_CONST_U12 0x800 133#define TCG_CT_CONST_C12 0x1000 134#define TCG_CT_CONST_WSZ 0x2000 135 136#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 137 138static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 139{ 140 return sextract64(val, pos, len); 141} 142 143/* test if a constant matches the constraint */ 144static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 145{ 146 if (ct & TCG_CT_CONST) { 147 return true; 148 } 149 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 150 return true; 151 } 152 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 153 return true; 154 } 155 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 156 return true; 157 } 158 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 159 return true; 160 } 161 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 162 return true; 163 } 164 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 165 return true; 166 } 167 return false; 168} 169 170/* 171 * Relocations 172 */ 173 174/* 175 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 176 * complicated; a whopping stack machine is needed to stuff the fields, at 177 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 178 * needed. 179 * 180 * Hence, define our own simpler relocation types. Numbers are chosen as to 181 * not collide with potential future additions to the true ELF relocation 182 * type enum. 183 */ 184 185/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 186#define R_LOONGARCH_BR_SK16 256 187/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 188#define R_LOONGARCH_BR_SD10K16 257 189 190static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 191{ 192 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 193 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 194 195 tcg_debug_assert((offset & 3) == 0); 196 offset >>= 2; 197 if (offset == sextreg(offset, 0, 16)) { 198 *src_rw = deposit64(*src_rw, 10, 16, offset); 199 return true; 200 } 201 202 return false; 203} 204 205static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 206 const tcg_insn_unit *target) 207{ 208 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 209 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 210 211 tcg_debug_assert((offset & 3) == 0); 212 offset >>= 2; 213 if (offset == sextreg(offset, 0, 26)) { 214 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 215 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 216 return true; 217 } 218 219 return false; 220} 221 222static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 223 intptr_t value, intptr_t addend) 224{ 225 tcg_debug_assert(addend == 0); 226 switch (type) { 227 case R_LOONGARCH_BR_SK16: 228 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 229 case R_LOONGARCH_BR_SD10K16: 230 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 231 default: 232 g_assert_not_reached(); 233 } 234} 235 236#include "tcg-insn-defs.c.inc" 237 238/* 239 * TCG intrinsics 240 */ 241 242static void tcg_out_mb(TCGContext *s, TCGArg a0) 243{ 244 /* Baseline LoongArch only has the full barrier, unfortunately. */ 245 tcg_out_opc_dbar(s, 0); 246} 247 248static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 249{ 250 if (ret == arg) { 251 return true; 252 } 253 switch (type) { 254 case TCG_TYPE_I32: 255 case TCG_TYPE_I64: 256 /* 257 * Conventional register-register move used in LoongArch is 258 * `or dst, src, zero`. 259 */ 260 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 261 break; 262 default: 263 g_assert_not_reached(); 264 } 265 return true; 266} 267 268/* Loads a 32-bit immediate into rd, sign-extended. */ 269static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 270{ 271 tcg_target_long lo = sextreg(val, 0, 12); 272 tcg_target_long hi12 = sextreg(val, 12, 20); 273 274 /* Single-instruction cases. */ 275 if (hi12 == 0) { 276 /* val fits in uimm12: ori rd, zero, val */ 277 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 278 return; 279 } 280 if (hi12 == sextreg(lo, 12, 20)) { 281 /* val fits in simm12: addi.w rd, zero, val */ 282 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 283 return; 284 } 285 286 /* High bits must be set; load with lu12i.w + optional ori. */ 287 tcg_out_opc_lu12i_w(s, rd, hi12); 288 if (lo != 0) { 289 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 290 } 291} 292 293static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 294 tcg_target_long val) 295{ 296 /* 297 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 298 * with dedicated instructions for filling the respective bitfields 299 * below: 300 * 301 * 6 5 4 3 302 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 303 * +-----------------------+---------------------------------------+... 304 * | hi52 | hi32 | 305 * +-----------------------+---------------------------------------+... 306 * 3 2 1 307 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 308 * ...+-------------------------------------+-------------------------+ 309 * | hi12 | lo | 310 * ...+-------------------------------------+-------------------------+ 311 * 312 * Check if val belong to one of the several fast cases, before falling 313 * back to the slow path. 314 */ 315 316 intptr_t pc_offset; 317 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 318 tcg_target_long hi12, hi32, hi52; 319 320 /* Value fits in signed i32. */ 321 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 322 tcg_out_movi_i32(s, rd, val); 323 return; 324 } 325 326 /* PC-relative cases. */ 327 pc_offset = tcg_pcrel_diff(s, (void *)val); 328 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 329 /* Single pcaddu2i. */ 330 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 331 return; 332 } 333 334 if (pc_offset == (int32_t)pc_offset) { 335 /* Offset within 32 bits; load with pcalau12i + ori. */ 336 val_lo = sextreg(val, 0, 12); 337 val_hi = val >> 12; 338 pc_hi = (val - pc_offset) >> 12; 339 offset_hi = val_hi - pc_hi; 340 341 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 342 tcg_out_opc_pcalau12i(s, rd, offset_hi); 343 if (val_lo != 0) { 344 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 345 } 346 return; 347 } 348 349 hi12 = sextreg(val, 12, 20); 350 hi32 = sextreg(val, 32, 20); 351 hi52 = sextreg(val, 52, 12); 352 353 /* Single cu52i.d case. */ 354 if ((hi52 != 0) && (ctz64(val) >= 52)) { 355 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 356 return; 357 } 358 359 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 360 tcg_out_movi_i32(s, rd, val); 361 362 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 363 if (hi32 != sextreg(hi12, 20, 20)) { 364 tcg_out_opc_cu32i_d(s, rd, hi32); 365 } 366 367 if (hi52 != sextreg(hi32, 20, 12)) { 368 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 369 } 370} 371 372static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 373 TCGReg rs, tcg_target_long imm) 374{ 375 tcg_target_long lo12 = sextreg(imm, 0, 12); 376 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 377 378 /* 379 * Note that there's a hole in between hi16 and lo12: 380 * 381 * 3 2 1 0 382 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 383 * ...+-------------------------------+-------+-----------------------+ 384 * | hi16 | | lo12 | 385 * ...+-------------------------------+-------+-----------------------+ 386 * 387 * For bits within that hole, it's more efficient to use LU12I and ADD. 388 */ 389 if (imm == (hi16 << 16) + lo12) { 390 if (hi16) { 391 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 392 rs = rd; 393 } 394 if (type == TCG_TYPE_I32) { 395 tcg_out_opc_addi_w(s, rd, rs, lo12); 396 } else if (lo12) { 397 tcg_out_opc_addi_d(s, rd, rs, lo12); 398 } else { 399 tcg_out_mov(s, type, rd, rs); 400 } 401 } else { 402 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 403 if (type == TCG_TYPE_I32) { 404 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 405 } else { 406 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 407 } 408 } 409} 410 411static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 412{ 413 return false; 414} 415 416static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 417 tcg_target_long imm) 418{ 419 /* This function is only used for passing structs by reference. */ 420 g_assert_not_reached(); 421} 422 423static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 424{ 425 tcg_out_opc_andi(s, ret, arg, 0xff); 426} 427 428static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 429{ 430 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 431} 432 433static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 434{ 435 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 436} 437 438static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 439{ 440 tcg_out_opc_sext_b(s, ret, arg); 441} 442 443static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 444{ 445 tcg_out_opc_sext_h(s, ret, arg); 446} 447 448static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 449{ 450 tcg_out_opc_addi_w(s, ret, arg, 0); 451} 452 453static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 454{ 455 if (ret != arg) { 456 tcg_out_ext32s(s, ret, arg); 457 } 458} 459 460static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 461{ 462 tcg_out_ext32u(s, ret, arg); 463} 464 465static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 466{ 467 tcg_out_ext32s(s, ret, arg); 468} 469 470static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 471 TCGReg a0, TCGReg a1, TCGReg a2, 472 bool c2, bool is_32bit) 473{ 474 if (c2) { 475 /* 476 * Fast path: semantics already satisfied due to constraint and 477 * insn behavior, single instruction is enough. 478 */ 479 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 480 /* all clz/ctz insns belong to DJ-format */ 481 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 482 return; 483 } 484 485 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 486 /* a0 = a1 ? REG_TMP0 : a2 */ 487 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 488 tcg_out_opc_masknez(s, a0, a2, a1); 489 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 490} 491 492#define SETCOND_INV TCG_TARGET_NB_REGS 493#define SETCOND_NEZ (SETCOND_INV << 1) 494#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 495 496static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 497 TCGReg arg1, tcg_target_long arg2, bool c2) 498{ 499 int flags = 0; 500 501 switch (cond) { 502 case TCG_COND_EQ: /* -> NE */ 503 case TCG_COND_GE: /* -> LT */ 504 case TCG_COND_GEU: /* -> LTU */ 505 case TCG_COND_GT: /* -> LE */ 506 case TCG_COND_GTU: /* -> LEU */ 507 cond = tcg_invert_cond(cond); 508 flags ^= SETCOND_INV; 509 break; 510 default: 511 break; 512 } 513 514 switch (cond) { 515 case TCG_COND_LE: 516 case TCG_COND_LEU: 517 /* 518 * If we have a constant input, the most efficient way to implement 519 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 520 * We don't need to care for this for LE because the constant input 521 * is still constrained to int32_t, and INT32_MAX+1 is representable 522 * in the 64-bit temporary register. 523 */ 524 if (c2) { 525 if (cond == TCG_COND_LEU) { 526 /* unsigned <= -1 is true */ 527 if (arg2 == -1) { 528 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 529 return ret; 530 } 531 cond = TCG_COND_LTU; 532 } else { 533 cond = TCG_COND_LT; 534 } 535 arg2 += 1; 536 } else { 537 TCGReg tmp = arg2; 538 arg2 = arg1; 539 arg1 = tmp; 540 cond = tcg_swap_cond(cond); /* LE -> GE */ 541 cond = tcg_invert_cond(cond); /* GE -> LT */ 542 flags ^= SETCOND_INV; 543 } 544 break; 545 default: 546 break; 547 } 548 549 switch (cond) { 550 case TCG_COND_NE: 551 flags |= SETCOND_NEZ; 552 if (!c2) { 553 tcg_out_opc_xor(s, ret, arg1, arg2); 554 } else if (arg2 == 0) { 555 ret = arg1; 556 } else if (arg2 >= 0 && arg2 <= 0xfff) { 557 tcg_out_opc_xori(s, ret, arg1, arg2); 558 } else { 559 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 560 } 561 break; 562 563 case TCG_COND_LT: 564 case TCG_COND_LTU: 565 if (c2) { 566 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 567 if (cond == TCG_COND_LT) { 568 tcg_out_opc_slti(s, ret, arg1, arg2); 569 } else { 570 tcg_out_opc_sltui(s, ret, arg1, arg2); 571 } 572 break; 573 } 574 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 575 arg2 = TCG_REG_TMP0; 576 } 577 if (cond == TCG_COND_LT) { 578 tcg_out_opc_slt(s, ret, arg1, arg2); 579 } else { 580 tcg_out_opc_sltu(s, ret, arg1, arg2); 581 } 582 break; 583 584 default: 585 g_assert_not_reached(); 586 break; 587 } 588 589 return ret | flags; 590} 591 592static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 593 TCGReg arg1, tcg_target_long arg2, bool c2) 594{ 595 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 596 597 if (tmpflags != ret) { 598 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 599 600 switch (tmpflags & SETCOND_FLAGS) { 601 case SETCOND_INV: 602 /* Intermediate result is boolean: simply invert. */ 603 tcg_out_opc_xori(s, ret, tmp, 1); 604 break; 605 case SETCOND_NEZ: 606 /* Intermediate result is zero/non-zero: test != 0. */ 607 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 608 break; 609 case SETCOND_NEZ | SETCOND_INV: 610 /* Intermediate result is zero/non-zero: test == 0. */ 611 tcg_out_opc_sltui(s, ret, tmp, 1); 612 break; 613 default: 614 g_assert_not_reached(); 615 } 616 } 617} 618 619static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 620 TCGReg c1, tcg_target_long c2, bool const2, 621 TCGReg v1, TCGReg v2) 622{ 623 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 624 TCGReg t; 625 626 /* Standardize the test below to t != 0. */ 627 if (tmpflags & SETCOND_INV) { 628 t = v1, v1 = v2, v2 = t; 629 } 630 631 t = tmpflags & ~SETCOND_FLAGS; 632 if (v1 == TCG_REG_ZERO) { 633 tcg_out_opc_masknez(s, ret, v2, t); 634 } else if (v2 == TCG_REG_ZERO) { 635 tcg_out_opc_maskeqz(s, ret, v1, t); 636 } else { 637 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 638 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 639 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 640 } 641} 642 643/* 644 * Branch helpers 645 */ 646 647static const struct { 648 LoongArchInsn op; 649 bool swap; 650} tcg_brcond_to_loongarch[] = { 651 [TCG_COND_EQ] = { OPC_BEQ, false }, 652 [TCG_COND_NE] = { OPC_BNE, false }, 653 [TCG_COND_LT] = { OPC_BGT, true }, 654 [TCG_COND_GE] = { OPC_BLE, true }, 655 [TCG_COND_LE] = { OPC_BLE, false }, 656 [TCG_COND_GT] = { OPC_BGT, false }, 657 [TCG_COND_LTU] = { OPC_BGTU, true }, 658 [TCG_COND_GEU] = { OPC_BLEU, true }, 659 [TCG_COND_LEU] = { OPC_BLEU, false }, 660 [TCG_COND_GTU] = { OPC_BGTU, false } 661}; 662 663static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 664 TCGReg arg2, TCGLabel *l) 665{ 666 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 667 668 tcg_debug_assert(op != 0); 669 670 if (tcg_brcond_to_loongarch[cond].swap) { 671 TCGReg t = arg1; 672 arg1 = arg2; 673 arg2 = t; 674 } 675 676 /* all conditional branch insns belong to DJSk16-format */ 677 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 678 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 679} 680 681static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 682{ 683 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 684 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 685 686 tcg_debug_assert((offset & 3) == 0); 687 if (offset == sextreg(offset, 0, 28)) { 688 /* short jump: +/- 256MiB */ 689 if (tail) { 690 tcg_out_opc_b(s, offset >> 2); 691 } else { 692 tcg_out_opc_bl(s, offset >> 2); 693 } 694 } else if (offset == sextreg(offset, 0, 38)) { 695 /* long jump: +/- 256GiB */ 696 tcg_target_long lo = sextreg(offset, 0, 18); 697 tcg_target_long hi = offset - lo; 698 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 699 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 700 } else { 701 /* far jump: 64-bit */ 702 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 703 tcg_target_long hi = (tcg_target_long)arg - lo; 704 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 705 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 706 } 707} 708 709static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 710 const TCGHelperInfo *info) 711{ 712 tcg_out_call_int(s, arg, false); 713} 714 715/* 716 * Load/store helpers 717 */ 718 719static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 720 TCGReg addr, intptr_t offset) 721{ 722 intptr_t imm12 = sextreg(offset, 0, 12); 723 724 if (offset != imm12) { 725 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 726 727 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 728 imm12 = sextreg(diff, 0, 12); 729 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 730 } else { 731 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 732 if (addr != TCG_REG_ZERO) { 733 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 734 } 735 } 736 addr = TCG_REG_TMP2; 737 } 738 739 switch (opc) { 740 case OPC_LD_B: 741 case OPC_LD_BU: 742 case OPC_LD_H: 743 case OPC_LD_HU: 744 case OPC_LD_W: 745 case OPC_LD_WU: 746 case OPC_LD_D: 747 case OPC_ST_B: 748 case OPC_ST_H: 749 case OPC_ST_W: 750 case OPC_ST_D: 751 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 752 break; 753 default: 754 g_assert_not_reached(); 755 } 756} 757 758static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 759 TCGReg arg1, intptr_t arg2) 760{ 761 bool is_32bit = type == TCG_TYPE_I32; 762 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 763} 764 765static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 766 TCGReg arg1, intptr_t arg2) 767{ 768 bool is_32bit = type == TCG_TYPE_I32; 769 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 770} 771 772static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 773 TCGReg base, intptr_t ofs) 774{ 775 if (val == 0) { 776 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 777 return true; 778 } 779 return false; 780} 781 782/* 783 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 784 */ 785 786#if defined(CONFIG_SOFTMMU) 787static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 788{ 789 tcg_out_opc_b(s, 0); 790 return reloc_br_sd10k16(s->code_ptr - 1, target); 791} 792 793static const TCGLdstHelperParam ldst_helper_param = { 794 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 795}; 796 797static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 798{ 799 MemOp opc = get_memop(l->oi); 800 801 /* resolve label address */ 802 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 803 return false; 804 } 805 806 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 807 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 808 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 809 return tcg_out_goto(s, l->raddr); 810} 811 812static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 813{ 814 MemOp opc = get_memop(l->oi); 815 816 /* resolve label address */ 817 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 818 return false; 819 } 820 821 tcg_out_st_helper_args(s, l, &ldst_helper_param); 822 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 823 return tcg_out_goto(s, l->raddr); 824} 825#else 826static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 827{ 828 /* resolve label address */ 829 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 830 return false; 831 } 832 833 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); 834 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 835 836 /* tail call, with the return address back inline. */ 837 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); 838 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld 839 : helper_unaligned_st), true); 840 return true; 841} 842 843static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 844{ 845 return tcg_out_fail_alignment(s, l); 846} 847 848static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 849{ 850 return tcg_out_fail_alignment(s, l); 851} 852 853#endif /* CONFIG_SOFTMMU */ 854 855typedef struct { 856 TCGReg base; 857 TCGReg index; 858} HostAddress; 859 860/* 861 * For softmmu, perform the TLB load and compare. 862 * For useronly, perform any required alignment tests. 863 * In both cases, return a TCGLabelQemuLdst structure if the slow path 864 * is required and fill in @h with the host address for the fast path. 865 */ 866static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 867 TCGReg addr_reg, MemOpIdx oi, 868 bool is_ld) 869{ 870 TCGLabelQemuLdst *ldst = NULL; 871 MemOp opc = get_memop(oi); 872 unsigned a_bits = get_alignment_bits(opc); 873 874#ifdef CONFIG_SOFTMMU 875 unsigned s_bits = opc & MO_SIZE; 876 int mem_index = get_mmuidx(oi); 877 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 878 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 879 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 880 tcg_target_long compare_mask; 881 882 ldst = new_ldst_label(s); 883 ldst->is_ld = is_ld; 884 ldst->oi = oi; 885 ldst->addrlo_reg = addr_reg; 886 887 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 888 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 889 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 890 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 891 892 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 893 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 894 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 895 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 896 897 /* Load the tlb comparator and the addend. */ 898 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 899 is_ld ? offsetof(CPUTLBEntry, addr_read) 900 : offsetof(CPUTLBEntry, addr_write)); 901 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 902 offsetof(CPUTLBEntry, addend)); 903 904 /* We don't support unaligned accesses. */ 905 if (a_bits < s_bits) { 906 a_bits = s_bits; 907 } 908 /* Clear the non-page, non-alignment bits from the address. */ 909 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 910 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 911 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg); 912 913 /* Compare masked address with the TLB entry. */ 914 ldst->label_ptr[0] = s->code_ptr; 915 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 916 917 h->index = TCG_REG_TMP2; 918#else 919 if (a_bits) { 920 ldst = new_ldst_label(s); 921 922 ldst->is_ld = is_ld; 923 ldst->oi = oi; 924 ldst->addrlo_reg = addr_reg; 925 926 /* 927 * Without micro-architecture details, we don't know which of 928 * bstrpick or andi is faster, so use bstrpick as it's not 929 * constrained by imm field width. Not to say alignments >= 2^12 930 * are going to happen any time soon. 931 */ 932 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 933 934 ldst->label_ptr[0] = s->code_ptr; 935 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 936 } 937 938 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 939#endif 940 941 if (TARGET_LONG_BITS == 32) { 942 h->base = TCG_REG_TMP0; 943 tcg_out_ext32u(s, h->base, addr_reg); 944 } else { 945 h->base = addr_reg; 946 } 947 948 return ldst; 949} 950 951static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 952 TCGReg rd, HostAddress h) 953{ 954 /* Byte swapping is left to middle-end expansion. */ 955 tcg_debug_assert((opc & MO_BSWAP) == 0); 956 957 switch (opc & MO_SSIZE) { 958 case MO_UB: 959 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 960 break; 961 case MO_SB: 962 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 963 break; 964 case MO_UW: 965 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 966 break; 967 case MO_SW: 968 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 969 break; 970 case MO_UL: 971 if (type == TCG_TYPE_I64) { 972 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 973 break; 974 } 975 /* fallthrough */ 976 case MO_SL: 977 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 978 break; 979 case MO_UQ: 980 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 981 break; 982 default: 983 g_assert_not_reached(); 984 } 985} 986 987static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 988 MemOpIdx oi, TCGType data_type) 989{ 990 TCGLabelQemuLdst *ldst; 991 HostAddress h; 992 993 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 994 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 995 996 if (ldst) { 997 ldst->type = data_type; 998 ldst->datalo_reg = data_reg; 999 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1000 } 1001} 1002 1003static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1004 TCGReg rd, HostAddress h) 1005{ 1006 /* Byte swapping is left to middle-end expansion. */ 1007 tcg_debug_assert((opc & MO_BSWAP) == 0); 1008 1009 switch (opc & MO_SIZE) { 1010 case MO_8: 1011 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1012 break; 1013 case MO_16: 1014 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1015 break; 1016 case MO_32: 1017 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1018 break; 1019 case MO_64: 1020 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1021 break; 1022 default: 1023 g_assert_not_reached(); 1024 } 1025} 1026 1027static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1028 MemOpIdx oi, TCGType data_type) 1029{ 1030 TCGLabelQemuLdst *ldst; 1031 HostAddress h; 1032 1033 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1034 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1035 1036 if (ldst) { 1037 ldst->type = data_type; 1038 ldst->datalo_reg = data_reg; 1039 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1040 } 1041} 1042 1043/* 1044 * Entry-points 1045 */ 1046 1047static const tcg_insn_unit *tb_ret_addr; 1048 1049static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1050{ 1051 /* Reuse the zeroing that exists for goto_ptr. */ 1052 if (a0 == 0) { 1053 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1054 } else { 1055 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1056 tcg_out_call_int(s, tb_ret_addr, true); 1057 } 1058} 1059 1060static void tcg_out_goto_tb(TCGContext *s, int which) 1061{ 1062 /* 1063 * Direct branch, or load indirect address, to be patched 1064 * by tb_target_set_jmp_target. Check indirect load offset 1065 * in range early, regardless of direct branch distance, 1066 * via assert within tcg_out_opc_pcaddu2i. 1067 */ 1068 uintptr_t i_addr = get_jmp_target_addr(s, which); 1069 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1070 1071 set_jmp_insn_offset(s, which); 1072 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1073 1074 /* Finish the load and indirect branch. */ 1075 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1076 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1077 set_jmp_reset_offset(s, which); 1078} 1079 1080void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1081 uintptr_t jmp_rx, uintptr_t jmp_rw) 1082{ 1083 uintptr_t d_addr = tb->jmp_target_addr[n]; 1084 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1085 tcg_insn_unit insn; 1086 1087 /* Either directly branch, or load slot address for indirect branch. */ 1088 if (d_disp == sextreg(d_disp, 0, 26)) { 1089 insn = encode_sd10k16_insn(OPC_B, d_disp); 1090 } else { 1091 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1092 intptr_t i_disp = i_addr - jmp_rx; 1093 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1094 } 1095 1096 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1097 flush_idcache_range(jmp_rx, jmp_rw, 4); 1098} 1099 1100static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1101 const TCGArg args[TCG_MAX_OP_ARGS], 1102 const int const_args[TCG_MAX_OP_ARGS]) 1103{ 1104 TCGArg a0 = args[0]; 1105 TCGArg a1 = args[1]; 1106 TCGArg a2 = args[2]; 1107 int c2 = const_args[2]; 1108 1109 switch (opc) { 1110 case INDEX_op_mb: 1111 tcg_out_mb(s, a0); 1112 break; 1113 1114 case INDEX_op_goto_ptr: 1115 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1116 break; 1117 1118 case INDEX_op_br: 1119 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1120 0); 1121 tcg_out_opc_b(s, 0); 1122 break; 1123 1124 case INDEX_op_brcond_i32: 1125 case INDEX_op_brcond_i64: 1126 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1127 break; 1128 1129 case INDEX_op_extrh_i64_i32: 1130 tcg_out_opc_srai_d(s, a0, a1, 32); 1131 break; 1132 1133 case INDEX_op_not_i32: 1134 case INDEX_op_not_i64: 1135 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1136 break; 1137 1138 case INDEX_op_nor_i32: 1139 case INDEX_op_nor_i64: 1140 if (c2) { 1141 tcg_out_opc_ori(s, a0, a1, a2); 1142 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1143 } else { 1144 tcg_out_opc_nor(s, a0, a1, a2); 1145 } 1146 break; 1147 1148 case INDEX_op_andc_i32: 1149 case INDEX_op_andc_i64: 1150 if (c2) { 1151 /* guaranteed to fit due to constraint */ 1152 tcg_out_opc_andi(s, a0, a1, ~a2); 1153 } else { 1154 tcg_out_opc_andn(s, a0, a1, a2); 1155 } 1156 break; 1157 1158 case INDEX_op_orc_i32: 1159 case INDEX_op_orc_i64: 1160 if (c2) { 1161 /* guaranteed to fit due to constraint */ 1162 tcg_out_opc_ori(s, a0, a1, ~a2); 1163 } else { 1164 tcg_out_opc_orn(s, a0, a1, a2); 1165 } 1166 break; 1167 1168 case INDEX_op_and_i32: 1169 case INDEX_op_and_i64: 1170 if (c2) { 1171 tcg_out_opc_andi(s, a0, a1, a2); 1172 } else { 1173 tcg_out_opc_and(s, a0, a1, a2); 1174 } 1175 break; 1176 1177 case INDEX_op_or_i32: 1178 case INDEX_op_or_i64: 1179 if (c2) { 1180 tcg_out_opc_ori(s, a0, a1, a2); 1181 } else { 1182 tcg_out_opc_or(s, a0, a1, a2); 1183 } 1184 break; 1185 1186 case INDEX_op_xor_i32: 1187 case INDEX_op_xor_i64: 1188 if (c2) { 1189 tcg_out_opc_xori(s, a0, a1, a2); 1190 } else { 1191 tcg_out_opc_xor(s, a0, a1, a2); 1192 } 1193 break; 1194 1195 case INDEX_op_extract_i32: 1196 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1197 break; 1198 case INDEX_op_extract_i64: 1199 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1200 break; 1201 1202 case INDEX_op_deposit_i32: 1203 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1204 break; 1205 case INDEX_op_deposit_i64: 1206 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1207 break; 1208 1209 case INDEX_op_bswap16_i32: 1210 case INDEX_op_bswap16_i64: 1211 tcg_out_opc_revb_2h(s, a0, a1); 1212 if (a2 & TCG_BSWAP_OS) { 1213 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1214 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1215 tcg_out_ext16u(s, a0, a0); 1216 } 1217 break; 1218 1219 case INDEX_op_bswap32_i32: 1220 /* All 32-bit values are computed sign-extended in the register. */ 1221 a2 = TCG_BSWAP_OS; 1222 /* fallthrough */ 1223 case INDEX_op_bswap32_i64: 1224 tcg_out_opc_revb_2w(s, a0, a1); 1225 if (a2 & TCG_BSWAP_OS) { 1226 tcg_out_ext32s(s, a0, a0); 1227 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1228 tcg_out_ext32u(s, a0, a0); 1229 } 1230 break; 1231 1232 case INDEX_op_bswap64_i64: 1233 tcg_out_opc_revb_d(s, a0, a1); 1234 break; 1235 1236 case INDEX_op_clz_i32: 1237 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1238 break; 1239 case INDEX_op_clz_i64: 1240 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1241 break; 1242 1243 case INDEX_op_ctz_i32: 1244 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1245 break; 1246 case INDEX_op_ctz_i64: 1247 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1248 break; 1249 1250 case INDEX_op_shl_i32: 1251 if (c2) { 1252 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1253 } else { 1254 tcg_out_opc_sll_w(s, a0, a1, a2); 1255 } 1256 break; 1257 case INDEX_op_shl_i64: 1258 if (c2) { 1259 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1260 } else { 1261 tcg_out_opc_sll_d(s, a0, a1, a2); 1262 } 1263 break; 1264 1265 case INDEX_op_shr_i32: 1266 if (c2) { 1267 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1268 } else { 1269 tcg_out_opc_srl_w(s, a0, a1, a2); 1270 } 1271 break; 1272 case INDEX_op_shr_i64: 1273 if (c2) { 1274 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1275 } else { 1276 tcg_out_opc_srl_d(s, a0, a1, a2); 1277 } 1278 break; 1279 1280 case INDEX_op_sar_i32: 1281 if (c2) { 1282 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1283 } else { 1284 tcg_out_opc_sra_w(s, a0, a1, a2); 1285 } 1286 break; 1287 case INDEX_op_sar_i64: 1288 if (c2) { 1289 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1290 } else { 1291 tcg_out_opc_sra_d(s, a0, a1, a2); 1292 } 1293 break; 1294 1295 case INDEX_op_rotl_i32: 1296 /* transform into equivalent rotr/rotri */ 1297 if (c2) { 1298 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1299 } else { 1300 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1301 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1302 } 1303 break; 1304 case INDEX_op_rotl_i64: 1305 /* transform into equivalent rotr/rotri */ 1306 if (c2) { 1307 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1308 } else { 1309 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1310 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1311 } 1312 break; 1313 1314 case INDEX_op_rotr_i32: 1315 if (c2) { 1316 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1317 } else { 1318 tcg_out_opc_rotr_w(s, a0, a1, a2); 1319 } 1320 break; 1321 case INDEX_op_rotr_i64: 1322 if (c2) { 1323 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1324 } else { 1325 tcg_out_opc_rotr_d(s, a0, a1, a2); 1326 } 1327 break; 1328 1329 case INDEX_op_add_i32: 1330 if (c2) { 1331 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1332 } else { 1333 tcg_out_opc_add_w(s, a0, a1, a2); 1334 } 1335 break; 1336 case INDEX_op_add_i64: 1337 if (c2) { 1338 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1339 } else { 1340 tcg_out_opc_add_d(s, a0, a1, a2); 1341 } 1342 break; 1343 1344 case INDEX_op_sub_i32: 1345 if (c2) { 1346 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1347 } else { 1348 tcg_out_opc_sub_w(s, a0, a1, a2); 1349 } 1350 break; 1351 case INDEX_op_sub_i64: 1352 if (c2) { 1353 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1354 } else { 1355 tcg_out_opc_sub_d(s, a0, a1, a2); 1356 } 1357 break; 1358 1359 case INDEX_op_mul_i32: 1360 tcg_out_opc_mul_w(s, a0, a1, a2); 1361 break; 1362 case INDEX_op_mul_i64: 1363 tcg_out_opc_mul_d(s, a0, a1, a2); 1364 break; 1365 1366 case INDEX_op_mulsh_i32: 1367 tcg_out_opc_mulh_w(s, a0, a1, a2); 1368 break; 1369 case INDEX_op_mulsh_i64: 1370 tcg_out_opc_mulh_d(s, a0, a1, a2); 1371 break; 1372 1373 case INDEX_op_muluh_i32: 1374 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1375 break; 1376 case INDEX_op_muluh_i64: 1377 tcg_out_opc_mulh_du(s, a0, a1, a2); 1378 break; 1379 1380 case INDEX_op_div_i32: 1381 tcg_out_opc_div_w(s, a0, a1, a2); 1382 break; 1383 case INDEX_op_div_i64: 1384 tcg_out_opc_div_d(s, a0, a1, a2); 1385 break; 1386 1387 case INDEX_op_divu_i32: 1388 tcg_out_opc_div_wu(s, a0, a1, a2); 1389 break; 1390 case INDEX_op_divu_i64: 1391 tcg_out_opc_div_du(s, a0, a1, a2); 1392 break; 1393 1394 case INDEX_op_rem_i32: 1395 tcg_out_opc_mod_w(s, a0, a1, a2); 1396 break; 1397 case INDEX_op_rem_i64: 1398 tcg_out_opc_mod_d(s, a0, a1, a2); 1399 break; 1400 1401 case INDEX_op_remu_i32: 1402 tcg_out_opc_mod_wu(s, a0, a1, a2); 1403 break; 1404 case INDEX_op_remu_i64: 1405 tcg_out_opc_mod_du(s, a0, a1, a2); 1406 break; 1407 1408 case INDEX_op_setcond_i32: 1409 case INDEX_op_setcond_i64: 1410 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1411 break; 1412 1413 case INDEX_op_movcond_i32: 1414 case INDEX_op_movcond_i64: 1415 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1416 break; 1417 1418 case INDEX_op_ld8s_i32: 1419 case INDEX_op_ld8s_i64: 1420 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1421 break; 1422 case INDEX_op_ld8u_i32: 1423 case INDEX_op_ld8u_i64: 1424 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1425 break; 1426 case INDEX_op_ld16s_i32: 1427 case INDEX_op_ld16s_i64: 1428 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1429 break; 1430 case INDEX_op_ld16u_i32: 1431 case INDEX_op_ld16u_i64: 1432 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1433 break; 1434 case INDEX_op_ld_i32: 1435 case INDEX_op_ld32s_i64: 1436 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1437 break; 1438 case INDEX_op_ld32u_i64: 1439 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1440 break; 1441 case INDEX_op_ld_i64: 1442 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1443 break; 1444 1445 case INDEX_op_st8_i32: 1446 case INDEX_op_st8_i64: 1447 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1448 break; 1449 case INDEX_op_st16_i32: 1450 case INDEX_op_st16_i64: 1451 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1452 break; 1453 case INDEX_op_st_i32: 1454 case INDEX_op_st32_i64: 1455 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1456 break; 1457 case INDEX_op_st_i64: 1458 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1459 break; 1460 1461 case INDEX_op_qemu_ld_i32: 1462 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1463 break; 1464 case INDEX_op_qemu_ld_i64: 1465 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1466 break; 1467 case INDEX_op_qemu_st_i32: 1468 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1469 break; 1470 case INDEX_op_qemu_st_i64: 1471 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1472 break; 1473 1474 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1475 case INDEX_op_mov_i64: 1476 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1477 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1478 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1479 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1480 case INDEX_op_ext8s_i64: 1481 case INDEX_op_ext8u_i32: 1482 case INDEX_op_ext8u_i64: 1483 case INDEX_op_ext16s_i32: 1484 case INDEX_op_ext16s_i64: 1485 case INDEX_op_ext16u_i32: 1486 case INDEX_op_ext16u_i64: 1487 case INDEX_op_ext32s_i64: 1488 case INDEX_op_ext32u_i64: 1489 case INDEX_op_ext_i32_i64: 1490 case INDEX_op_extu_i32_i64: 1491 case INDEX_op_extrl_i64_i32: 1492 default: 1493 g_assert_not_reached(); 1494 } 1495} 1496 1497static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1498{ 1499 switch (op) { 1500 case INDEX_op_goto_ptr: 1501 return C_O0_I1(r); 1502 1503 case INDEX_op_st8_i32: 1504 case INDEX_op_st8_i64: 1505 case INDEX_op_st16_i32: 1506 case INDEX_op_st16_i64: 1507 case INDEX_op_st32_i64: 1508 case INDEX_op_st_i32: 1509 case INDEX_op_st_i64: 1510 case INDEX_op_qemu_st_i32: 1511 case INDEX_op_qemu_st_i64: 1512 return C_O0_I2(rZ, r); 1513 1514 case INDEX_op_brcond_i32: 1515 case INDEX_op_brcond_i64: 1516 return C_O0_I2(rZ, rZ); 1517 1518 case INDEX_op_ext8s_i32: 1519 case INDEX_op_ext8s_i64: 1520 case INDEX_op_ext8u_i32: 1521 case INDEX_op_ext8u_i64: 1522 case INDEX_op_ext16s_i32: 1523 case INDEX_op_ext16s_i64: 1524 case INDEX_op_ext16u_i32: 1525 case INDEX_op_ext16u_i64: 1526 case INDEX_op_ext32s_i64: 1527 case INDEX_op_ext32u_i64: 1528 case INDEX_op_extu_i32_i64: 1529 case INDEX_op_extrl_i64_i32: 1530 case INDEX_op_extrh_i64_i32: 1531 case INDEX_op_ext_i32_i64: 1532 case INDEX_op_not_i32: 1533 case INDEX_op_not_i64: 1534 case INDEX_op_extract_i32: 1535 case INDEX_op_extract_i64: 1536 case INDEX_op_bswap16_i32: 1537 case INDEX_op_bswap16_i64: 1538 case INDEX_op_bswap32_i32: 1539 case INDEX_op_bswap32_i64: 1540 case INDEX_op_bswap64_i64: 1541 case INDEX_op_ld8s_i32: 1542 case INDEX_op_ld8s_i64: 1543 case INDEX_op_ld8u_i32: 1544 case INDEX_op_ld8u_i64: 1545 case INDEX_op_ld16s_i32: 1546 case INDEX_op_ld16s_i64: 1547 case INDEX_op_ld16u_i32: 1548 case INDEX_op_ld16u_i64: 1549 case INDEX_op_ld32s_i64: 1550 case INDEX_op_ld32u_i64: 1551 case INDEX_op_ld_i32: 1552 case INDEX_op_ld_i64: 1553 case INDEX_op_qemu_ld_i32: 1554 case INDEX_op_qemu_ld_i64: 1555 return C_O1_I1(r, r); 1556 1557 case INDEX_op_andc_i32: 1558 case INDEX_op_andc_i64: 1559 case INDEX_op_orc_i32: 1560 case INDEX_op_orc_i64: 1561 /* 1562 * LoongArch insns for these ops don't have reg-imm forms, but we 1563 * can express using andi/ori if ~constant satisfies 1564 * TCG_CT_CONST_U12. 1565 */ 1566 return C_O1_I2(r, r, rC); 1567 1568 case INDEX_op_shl_i32: 1569 case INDEX_op_shl_i64: 1570 case INDEX_op_shr_i32: 1571 case INDEX_op_shr_i64: 1572 case INDEX_op_sar_i32: 1573 case INDEX_op_sar_i64: 1574 case INDEX_op_rotl_i32: 1575 case INDEX_op_rotl_i64: 1576 case INDEX_op_rotr_i32: 1577 case INDEX_op_rotr_i64: 1578 return C_O1_I2(r, r, ri); 1579 1580 case INDEX_op_add_i32: 1581 return C_O1_I2(r, r, ri); 1582 case INDEX_op_add_i64: 1583 return C_O1_I2(r, r, rJ); 1584 1585 case INDEX_op_and_i32: 1586 case INDEX_op_and_i64: 1587 case INDEX_op_nor_i32: 1588 case INDEX_op_nor_i64: 1589 case INDEX_op_or_i32: 1590 case INDEX_op_or_i64: 1591 case INDEX_op_xor_i32: 1592 case INDEX_op_xor_i64: 1593 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 1594 return C_O1_I2(r, r, rU); 1595 1596 case INDEX_op_clz_i32: 1597 case INDEX_op_clz_i64: 1598 case INDEX_op_ctz_i32: 1599 case INDEX_op_ctz_i64: 1600 return C_O1_I2(r, r, rW); 1601 1602 case INDEX_op_deposit_i32: 1603 case INDEX_op_deposit_i64: 1604 /* Must deposit into the same register as input */ 1605 return C_O1_I2(r, 0, rZ); 1606 1607 case INDEX_op_sub_i32: 1608 case INDEX_op_setcond_i32: 1609 return C_O1_I2(r, rZ, ri); 1610 case INDEX_op_sub_i64: 1611 case INDEX_op_setcond_i64: 1612 return C_O1_I2(r, rZ, rJ); 1613 1614 case INDEX_op_mul_i32: 1615 case INDEX_op_mul_i64: 1616 case INDEX_op_mulsh_i32: 1617 case INDEX_op_mulsh_i64: 1618 case INDEX_op_muluh_i32: 1619 case INDEX_op_muluh_i64: 1620 case INDEX_op_div_i32: 1621 case INDEX_op_div_i64: 1622 case INDEX_op_divu_i32: 1623 case INDEX_op_divu_i64: 1624 case INDEX_op_rem_i32: 1625 case INDEX_op_rem_i64: 1626 case INDEX_op_remu_i32: 1627 case INDEX_op_remu_i64: 1628 return C_O1_I2(r, rZ, rZ); 1629 1630 case INDEX_op_movcond_i32: 1631 case INDEX_op_movcond_i64: 1632 return C_O1_I4(r, rZ, rJ, rZ, rZ); 1633 1634 default: 1635 g_assert_not_reached(); 1636 } 1637} 1638 1639static const int tcg_target_callee_save_regs[] = { 1640 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1641 TCG_REG_S1, 1642 TCG_REG_S2, 1643 TCG_REG_S3, 1644 TCG_REG_S4, 1645 TCG_REG_S5, 1646 TCG_REG_S6, 1647 TCG_REG_S7, 1648 TCG_REG_S8, 1649 TCG_REG_S9, 1650 TCG_REG_RA, /* should be last for ABI compliance */ 1651}; 1652 1653/* Stack frame parameters. */ 1654#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1655#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1656#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1657#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1658 + TCG_TARGET_STACK_ALIGN - 1) \ 1659 & -TCG_TARGET_STACK_ALIGN) 1660#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1661 1662/* We're expecting to be able to use an immediate for frame allocation. */ 1663QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1664 1665/* Generate global QEMU prologue and epilogue code */ 1666static void tcg_target_qemu_prologue(TCGContext *s) 1667{ 1668 int i; 1669 1670 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1671 1672 /* TB prologue */ 1673 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1674 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1675 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1676 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1677 } 1678 1679#if !defined(CONFIG_SOFTMMU) 1680 if (USE_GUEST_BASE) { 1681 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1682 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1683 } 1684#endif 1685 1686 /* Call generated code */ 1687 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1688 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1689 1690 /* Return path for goto_ptr. Set return value to 0 */ 1691 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1692 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1693 1694 /* TB epilogue */ 1695 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1696 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1697 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1698 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1699 } 1700 1701 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1702 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 1703} 1704 1705static void tcg_target_init(TCGContext *s) 1706{ 1707 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1708 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1709 1710 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 1711 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1712 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1713 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1714 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1715 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1716 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1717 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1718 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1719 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1720 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1721 1722 s->reserved_regs = 0; 1723 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1724 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1725 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1726 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1727 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1728 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1729 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 1730} 1731 1732typedef struct { 1733 DebugFrameHeader h; 1734 uint8_t fde_def_cfa[4]; 1735 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1736} DebugFrame; 1737 1738#define ELF_HOST_MACHINE EM_LOONGARCH 1739 1740static const DebugFrame debug_frame = { 1741 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1742 .h.cie.id = -1, 1743 .h.cie.version = 1, 1744 .h.cie.code_align = 1, 1745 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1746 .h.cie.return_column = TCG_REG_RA, 1747 1748 /* Total FDE size does not include the "len" member. */ 1749 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1750 1751 .fde_def_cfa = { 1752 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1753 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1754 (FRAME_SIZE >> 7) 1755 }, 1756 .fde_reg_ofs = { 1757 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 1758 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 1759 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 1760 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 1761 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 1762 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 1763 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 1764 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 1765 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 1766 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 1767 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1768 } 1769}; 1770 1771void tcg_register_jit(const void *buf, size_t buf_size) 1772{ 1773 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1774} 1775