1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33 34#ifdef CONFIG_DEBUG_TCG 35static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 36 "zero", 37 "ra", 38 "tp", 39 "sp", 40 "a0", 41 "a1", 42 "a2", 43 "a3", 44 "a4", 45 "a5", 46 "a6", 47 "a7", 48 "t0", 49 "t1", 50 "t2", 51 "t3", 52 "t4", 53 "t5", 54 "t6", 55 "t7", 56 "t8", 57 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 58 "s9", 59 "s0", 60 "s1", 61 "s2", 62 "s3", 63 "s4", 64 "s5", 65 "s6", 66 "s7", 67 "s8" 68}; 69#endif 70 71static const int tcg_target_reg_alloc_order[] = { 72 /* Registers preserved across calls */ 73 /* TCG_REG_S0 reserved for TCG_AREG0 */ 74 TCG_REG_S1, 75 TCG_REG_S2, 76 TCG_REG_S3, 77 TCG_REG_S4, 78 TCG_REG_S5, 79 TCG_REG_S6, 80 TCG_REG_S7, 81 TCG_REG_S8, 82 TCG_REG_S9, 83 84 /* Registers (potentially) clobbered across calls */ 85 TCG_REG_T0, 86 TCG_REG_T1, 87 TCG_REG_T2, 88 TCG_REG_T3, 89 TCG_REG_T4, 90 TCG_REG_T5, 91 TCG_REG_T6, 92 TCG_REG_T7, 93 TCG_REG_T8, 94 95 /* Argument registers, opposite order of allocation. */ 96 TCG_REG_A7, 97 TCG_REG_A6, 98 TCG_REG_A5, 99 TCG_REG_A4, 100 TCG_REG_A3, 101 TCG_REG_A2, 102 TCG_REG_A1, 103 TCG_REG_A0, 104}; 105 106static const int tcg_target_call_iarg_regs[] = { 107 TCG_REG_A0, 108 TCG_REG_A1, 109 TCG_REG_A2, 110 TCG_REG_A3, 111 TCG_REG_A4, 112 TCG_REG_A5, 113 TCG_REG_A6, 114 TCG_REG_A7, 115}; 116 117static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 118{ 119 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 120 tcg_debug_assert(slot >= 0 && slot <= 1); 121 return TCG_REG_A0 + slot; 122} 123 124#ifndef CONFIG_SOFTMMU 125#define USE_GUEST_BASE (guest_base != 0) 126#define TCG_GUEST_BASE_REG TCG_REG_S1 127#endif 128 129#define TCG_CT_CONST_ZERO 0x100 130#define TCG_CT_CONST_S12 0x200 131#define TCG_CT_CONST_S32 0x400 132#define TCG_CT_CONST_U12 0x800 133#define TCG_CT_CONST_C12 0x1000 134#define TCG_CT_CONST_WSZ 0x2000 135 136#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 137 138static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 139{ 140 return sextract64(val, pos, len); 141} 142 143/* test if a constant matches the constraint */ 144static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 145{ 146 if (ct & TCG_CT_CONST) { 147 return true; 148 } 149 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 150 return true; 151 } 152 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 153 return true; 154 } 155 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 156 return true; 157 } 158 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 159 return true; 160 } 161 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 162 return true; 163 } 164 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 165 return true; 166 } 167 return false; 168} 169 170/* 171 * Relocations 172 */ 173 174/* 175 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 176 * complicated; a whopping stack machine is needed to stuff the fields, at 177 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 178 * needed. 179 * 180 * Hence, define our own simpler relocation types. Numbers are chosen as to 181 * not collide with potential future additions to the true ELF relocation 182 * type enum. 183 */ 184 185/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 186#define R_LOONGARCH_BR_SK16 256 187/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 188#define R_LOONGARCH_BR_SD10K16 257 189 190static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 191{ 192 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 193 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 194 195 tcg_debug_assert((offset & 3) == 0); 196 offset >>= 2; 197 if (offset == sextreg(offset, 0, 16)) { 198 *src_rw = deposit64(*src_rw, 10, 16, offset); 199 return true; 200 } 201 202 return false; 203} 204 205static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 206 const tcg_insn_unit *target) 207{ 208 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 209 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 210 211 tcg_debug_assert((offset & 3) == 0); 212 offset >>= 2; 213 if (offset == sextreg(offset, 0, 26)) { 214 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 215 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 216 return true; 217 } 218 219 return false; 220} 221 222static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 223 intptr_t value, intptr_t addend) 224{ 225 tcg_debug_assert(addend == 0); 226 switch (type) { 227 case R_LOONGARCH_BR_SK16: 228 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 229 case R_LOONGARCH_BR_SD10K16: 230 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 231 default: 232 g_assert_not_reached(); 233 } 234} 235 236#include "tcg-insn-defs.c.inc" 237 238/* 239 * TCG intrinsics 240 */ 241 242static void tcg_out_mb(TCGContext *s, TCGArg a0) 243{ 244 /* Baseline LoongArch only has the full barrier, unfortunately. */ 245 tcg_out_opc_dbar(s, 0); 246} 247 248static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 249{ 250 if (ret == arg) { 251 return true; 252 } 253 switch (type) { 254 case TCG_TYPE_I32: 255 case TCG_TYPE_I64: 256 /* 257 * Conventional register-register move used in LoongArch is 258 * `or dst, src, zero`. 259 */ 260 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 261 break; 262 default: 263 g_assert_not_reached(); 264 } 265 return true; 266} 267 268/* Loads a 32-bit immediate into rd, sign-extended. */ 269static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 270{ 271 tcg_target_long lo = sextreg(val, 0, 12); 272 tcg_target_long hi12 = sextreg(val, 12, 20); 273 274 /* Single-instruction cases. */ 275 if (hi12 == 0) { 276 /* val fits in uimm12: ori rd, zero, val */ 277 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 278 return; 279 } 280 if (hi12 == sextreg(lo, 12, 20)) { 281 /* val fits in simm12: addi.w rd, zero, val */ 282 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 283 return; 284 } 285 286 /* High bits must be set; load with lu12i.w + optional ori. */ 287 tcg_out_opc_lu12i_w(s, rd, hi12); 288 if (lo != 0) { 289 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 290 } 291} 292 293static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 294 tcg_target_long val) 295{ 296 /* 297 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 298 * with dedicated instructions for filling the respective bitfields 299 * below: 300 * 301 * 6 5 4 3 302 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 303 * +-----------------------+---------------------------------------+... 304 * | hi52 | hi32 | 305 * +-----------------------+---------------------------------------+... 306 * 3 2 1 307 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 308 * ...+-------------------------------------+-------------------------+ 309 * | hi12 | lo | 310 * ...+-------------------------------------+-------------------------+ 311 * 312 * Check if val belong to one of the several fast cases, before falling 313 * back to the slow path. 314 */ 315 316 intptr_t pc_offset; 317 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 318 tcg_target_long hi12, hi32, hi52; 319 320 /* Value fits in signed i32. */ 321 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 322 tcg_out_movi_i32(s, rd, val); 323 return; 324 } 325 326 /* PC-relative cases. */ 327 pc_offset = tcg_pcrel_diff(s, (void *)val); 328 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 329 /* Single pcaddu2i. */ 330 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 331 return; 332 } 333 334 if (pc_offset == (int32_t)pc_offset) { 335 /* Offset within 32 bits; load with pcalau12i + ori. */ 336 val_lo = sextreg(val, 0, 12); 337 val_hi = val >> 12; 338 pc_hi = (val - pc_offset) >> 12; 339 offset_hi = val_hi - pc_hi; 340 341 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 342 tcg_out_opc_pcalau12i(s, rd, offset_hi); 343 if (val_lo != 0) { 344 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 345 } 346 return; 347 } 348 349 hi12 = sextreg(val, 12, 20); 350 hi32 = sextreg(val, 32, 20); 351 hi52 = sextreg(val, 52, 12); 352 353 /* Single cu52i.d case. */ 354 if ((hi52 != 0) && (ctz64(val) >= 52)) { 355 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 356 return; 357 } 358 359 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 360 tcg_out_movi_i32(s, rd, val); 361 362 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 363 if (hi32 != sextreg(hi12, 20, 20)) { 364 tcg_out_opc_cu32i_d(s, rd, hi32); 365 } 366 367 if (hi52 != sextreg(hi32, 20, 12)) { 368 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 369 } 370} 371 372static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 373 TCGReg rs, tcg_target_long imm) 374{ 375 tcg_target_long lo12 = sextreg(imm, 0, 12); 376 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 377 378 /* 379 * Note that there's a hole in between hi16 and lo12: 380 * 381 * 3 2 1 0 382 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 383 * ...+-------------------------------+-------+-----------------------+ 384 * | hi16 | | lo12 | 385 * ...+-------------------------------+-------+-----------------------+ 386 * 387 * For bits within that hole, it's more efficient to use LU12I and ADD. 388 */ 389 if (imm == (hi16 << 16) + lo12) { 390 if (hi16) { 391 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 392 rs = rd; 393 } 394 if (type == TCG_TYPE_I32) { 395 tcg_out_opc_addi_w(s, rd, rs, lo12); 396 } else if (lo12) { 397 tcg_out_opc_addi_d(s, rd, rs, lo12); 398 } else { 399 tcg_out_mov(s, type, rd, rs); 400 } 401 } else { 402 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 403 if (type == TCG_TYPE_I32) { 404 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 405 } else { 406 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 407 } 408 } 409} 410 411static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 412{ 413 return false; 414} 415 416static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 417 tcg_target_long imm) 418{ 419 /* This function is only used for passing structs by reference. */ 420 g_assert_not_reached(); 421} 422 423static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 424{ 425 tcg_out_opc_andi(s, ret, arg, 0xff); 426} 427 428static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 429{ 430 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 431} 432 433static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 434{ 435 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 436} 437 438static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 439{ 440 tcg_out_opc_sext_b(s, ret, arg); 441} 442 443static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 444{ 445 tcg_out_opc_sext_h(s, ret, arg); 446} 447 448static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 449{ 450 tcg_out_opc_addi_w(s, ret, arg, 0); 451} 452 453static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 454{ 455 if (ret != arg) { 456 tcg_out_ext32s(s, ret, arg); 457 } 458} 459 460static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 461{ 462 tcg_out_ext32u(s, ret, arg); 463} 464 465static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 466{ 467 tcg_out_ext32s(s, ret, arg); 468} 469 470static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 471 TCGReg a0, TCGReg a1, TCGReg a2, 472 bool c2, bool is_32bit) 473{ 474 if (c2) { 475 /* 476 * Fast path: semantics already satisfied due to constraint and 477 * insn behavior, single instruction is enough. 478 */ 479 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 480 /* all clz/ctz insns belong to DJ-format */ 481 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 482 return; 483 } 484 485 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 486 /* a0 = a1 ? REG_TMP0 : a2 */ 487 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 488 tcg_out_opc_masknez(s, a0, a2, a1); 489 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 490} 491 492#define SETCOND_INV TCG_TARGET_NB_REGS 493#define SETCOND_NEZ (SETCOND_INV << 1) 494#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 495 496static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 497 TCGReg arg1, tcg_target_long arg2, bool c2) 498{ 499 int flags = 0; 500 501 switch (cond) { 502 case TCG_COND_EQ: /* -> NE */ 503 case TCG_COND_GE: /* -> LT */ 504 case TCG_COND_GEU: /* -> LTU */ 505 case TCG_COND_GT: /* -> LE */ 506 case TCG_COND_GTU: /* -> LEU */ 507 cond = tcg_invert_cond(cond); 508 flags ^= SETCOND_INV; 509 break; 510 default: 511 break; 512 } 513 514 switch (cond) { 515 case TCG_COND_LE: 516 case TCG_COND_LEU: 517 /* 518 * If we have a constant input, the most efficient way to implement 519 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 520 * We don't need to care for this for LE because the constant input 521 * is still constrained to int32_t, and INT32_MAX+1 is representable 522 * in the 64-bit temporary register. 523 */ 524 if (c2) { 525 if (cond == TCG_COND_LEU) { 526 /* unsigned <= -1 is true */ 527 if (arg2 == -1) { 528 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 529 return ret; 530 } 531 cond = TCG_COND_LTU; 532 } else { 533 cond = TCG_COND_LT; 534 } 535 arg2 += 1; 536 } else { 537 TCGReg tmp = arg2; 538 arg2 = arg1; 539 arg1 = tmp; 540 cond = tcg_swap_cond(cond); /* LE -> GE */ 541 cond = tcg_invert_cond(cond); /* GE -> LT */ 542 flags ^= SETCOND_INV; 543 } 544 break; 545 default: 546 break; 547 } 548 549 switch (cond) { 550 case TCG_COND_NE: 551 flags |= SETCOND_NEZ; 552 if (!c2) { 553 tcg_out_opc_xor(s, ret, arg1, arg2); 554 } else if (arg2 == 0) { 555 ret = arg1; 556 } else if (arg2 >= 0 && arg2 <= 0xfff) { 557 tcg_out_opc_xori(s, ret, arg1, arg2); 558 } else { 559 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 560 } 561 break; 562 563 case TCG_COND_LT: 564 case TCG_COND_LTU: 565 if (c2) { 566 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 567 if (cond == TCG_COND_LT) { 568 tcg_out_opc_slti(s, ret, arg1, arg2); 569 } else { 570 tcg_out_opc_sltui(s, ret, arg1, arg2); 571 } 572 break; 573 } 574 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 575 arg2 = TCG_REG_TMP0; 576 } 577 if (cond == TCG_COND_LT) { 578 tcg_out_opc_slt(s, ret, arg1, arg2); 579 } else { 580 tcg_out_opc_sltu(s, ret, arg1, arg2); 581 } 582 break; 583 584 default: 585 g_assert_not_reached(); 586 break; 587 } 588 589 return ret | flags; 590} 591 592static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 593 TCGReg arg1, tcg_target_long arg2, bool c2) 594{ 595 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 596 597 if (tmpflags != ret) { 598 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 599 600 switch (tmpflags & SETCOND_FLAGS) { 601 case SETCOND_INV: 602 /* Intermediate result is boolean: simply invert. */ 603 tcg_out_opc_xori(s, ret, tmp, 1); 604 break; 605 case SETCOND_NEZ: 606 /* Intermediate result is zero/non-zero: test != 0. */ 607 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 608 break; 609 case SETCOND_NEZ | SETCOND_INV: 610 /* Intermediate result is zero/non-zero: test == 0. */ 611 tcg_out_opc_sltui(s, ret, tmp, 1); 612 break; 613 default: 614 g_assert_not_reached(); 615 } 616 } 617} 618 619static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 620 TCGReg c1, tcg_target_long c2, bool const2, 621 TCGReg v1, TCGReg v2) 622{ 623 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 624 TCGReg t; 625 626 /* Standardize the test below to t != 0. */ 627 if (tmpflags & SETCOND_INV) { 628 t = v1, v1 = v2, v2 = t; 629 } 630 631 t = tmpflags & ~SETCOND_FLAGS; 632 if (v1 == TCG_REG_ZERO) { 633 tcg_out_opc_masknez(s, ret, v2, t); 634 } else if (v2 == TCG_REG_ZERO) { 635 tcg_out_opc_maskeqz(s, ret, v1, t); 636 } else { 637 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 638 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 639 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 640 } 641} 642 643/* 644 * Branch helpers 645 */ 646 647static const struct { 648 LoongArchInsn op; 649 bool swap; 650} tcg_brcond_to_loongarch[] = { 651 [TCG_COND_EQ] = { OPC_BEQ, false }, 652 [TCG_COND_NE] = { OPC_BNE, false }, 653 [TCG_COND_LT] = { OPC_BGT, true }, 654 [TCG_COND_GE] = { OPC_BLE, true }, 655 [TCG_COND_LE] = { OPC_BLE, false }, 656 [TCG_COND_GT] = { OPC_BGT, false }, 657 [TCG_COND_LTU] = { OPC_BGTU, true }, 658 [TCG_COND_GEU] = { OPC_BLEU, true }, 659 [TCG_COND_LEU] = { OPC_BLEU, false }, 660 [TCG_COND_GTU] = { OPC_BGTU, false } 661}; 662 663static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 664 TCGReg arg2, TCGLabel *l) 665{ 666 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 667 668 tcg_debug_assert(op != 0); 669 670 if (tcg_brcond_to_loongarch[cond].swap) { 671 TCGReg t = arg1; 672 arg1 = arg2; 673 arg2 = t; 674 } 675 676 /* all conditional branch insns belong to DJSk16-format */ 677 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 678 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 679} 680 681static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 682{ 683 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 684 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 685 686 tcg_debug_assert((offset & 3) == 0); 687 if (offset == sextreg(offset, 0, 28)) { 688 /* short jump: +/- 256MiB */ 689 if (tail) { 690 tcg_out_opc_b(s, offset >> 2); 691 } else { 692 tcg_out_opc_bl(s, offset >> 2); 693 } 694 } else if (offset == sextreg(offset, 0, 38)) { 695 /* long jump: +/- 256GiB */ 696 tcg_target_long lo = sextreg(offset, 0, 18); 697 tcg_target_long hi = offset - lo; 698 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 699 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 700 } else { 701 /* far jump: 64-bit */ 702 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 703 tcg_target_long hi = (tcg_target_long)arg - lo; 704 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 705 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 706 } 707} 708 709static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 710 const TCGHelperInfo *info) 711{ 712 tcg_out_call_int(s, arg, false); 713} 714 715/* 716 * Load/store helpers 717 */ 718 719static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 720 TCGReg addr, intptr_t offset) 721{ 722 intptr_t imm12 = sextreg(offset, 0, 12); 723 724 if (offset != imm12) { 725 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 726 727 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 728 imm12 = sextreg(diff, 0, 12); 729 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 730 } else { 731 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 732 if (addr != TCG_REG_ZERO) { 733 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 734 } 735 } 736 addr = TCG_REG_TMP2; 737 } 738 739 switch (opc) { 740 case OPC_LD_B: 741 case OPC_LD_BU: 742 case OPC_LD_H: 743 case OPC_LD_HU: 744 case OPC_LD_W: 745 case OPC_LD_WU: 746 case OPC_LD_D: 747 case OPC_ST_B: 748 case OPC_ST_H: 749 case OPC_ST_W: 750 case OPC_ST_D: 751 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 752 break; 753 default: 754 g_assert_not_reached(); 755 } 756} 757 758static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 759 TCGReg arg1, intptr_t arg2) 760{ 761 bool is_32bit = type == TCG_TYPE_I32; 762 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 763} 764 765static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 766 TCGReg arg1, intptr_t arg2) 767{ 768 bool is_32bit = type == TCG_TYPE_I32; 769 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 770} 771 772static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 773 TCGReg base, intptr_t ofs) 774{ 775 if (val == 0) { 776 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 777 return true; 778 } 779 return false; 780} 781 782/* 783 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 784 */ 785 786#if defined(CONFIG_SOFTMMU) 787/* 788 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 789 * MemOpIdx oi, uintptr_t ra) 790 */ 791static void * const qemu_ld_helpers[4] = { 792 [MO_8] = helper_ret_ldub_mmu, 793 [MO_16] = helper_le_lduw_mmu, 794 [MO_32] = helper_le_ldul_mmu, 795 [MO_64] = helper_le_ldq_mmu, 796}; 797 798/* 799 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 800 * uintxx_t val, MemOpIdx oi, 801 * uintptr_t ra) 802 */ 803static void * const qemu_st_helpers[4] = { 804 [MO_8] = helper_ret_stb_mmu, 805 [MO_16] = helper_le_stw_mmu, 806 [MO_32] = helper_le_stl_mmu, 807 [MO_64] = helper_le_stq_mmu, 808}; 809 810static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 811{ 812 tcg_out_opc_b(s, 0); 813 return reloc_br_sd10k16(s->code_ptr - 1, target); 814} 815 816static const TCGLdstHelperParam ldst_helper_param = { 817 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 818}; 819 820static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 821{ 822 MemOp opc = get_memop(l->oi); 823 824 /* resolve label address */ 825 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 826 return false; 827 } 828 829 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 830 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 831 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 832 return tcg_out_goto(s, l->raddr); 833} 834 835static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 836{ 837 MemOp opc = get_memop(l->oi); 838 839 /* resolve label address */ 840 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 841 return false; 842 } 843 844 tcg_out_st_helper_args(s, l, &ldst_helper_param); 845 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 846 return tcg_out_goto(s, l->raddr); 847} 848#else 849static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 850{ 851 /* resolve label address */ 852 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 853 return false; 854 } 855 856 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); 857 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 858 859 /* tail call, with the return address back inline. */ 860 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); 861 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld 862 : helper_unaligned_st), true); 863 return true; 864} 865 866static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 867{ 868 return tcg_out_fail_alignment(s, l); 869} 870 871static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 872{ 873 return tcg_out_fail_alignment(s, l); 874} 875 876#endif /* CONFIG_SOFTMMU */ 877 878typedef struct { 879 TCGReg base; 880 TCGReg index; 881} HostAddress; 882 883/* 884 * For softmmu, perform the TLB load and compare. 885 * For useronly, perform any required alignment tests. 886 * In both cases, return a TCGLabelQemuLdst structure if the slow path 887 * is required and fill in @h with the host address for the fast path. 888 */ 889static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 890 TCGReg addr_reg, MemOpIdx oi, 891 bool is_ld) 892{ 893 TCGLabelQemuLdst *ldst = NULL; 894 MemOp opc = get_memop(oi); 895 unsigned a_bits = get_alignment_bits(opc); 896 897#ifdef CONFIG_SOFTMMU 898 unsigned s_bits = opc & MO_SIZE; 899 int mem_index = get_mmuidx(oi); 900 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 901 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 902 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 903 tcg_target_long compare_mask; 904 905 ldst = new_ldst_label(s); 906 ldst->is_ld = is_ld; 907 ldst->oi = oi; 908 ldst->addrlo_reg = addr_reg; 909 910 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 911 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 912 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 913 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 914 915 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 916 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 917 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 918 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 919 920 /* Load the tlb comparator and the addend. */ 921 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 922 is_ld ? offsetof(CPUTLBEntry, addr_read) 923 : offsetof(CPUTLBEntry, addr_write)); 924 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 925 offsetof(CPUTLBEntry, addend)); 926 927 /* We don't support unaligned accesses. */ 928 if (a_bits < s_bits) { 929 a_bits = s_bits; 930 } 931 /* Clear the non-page, non-alignment bits from the address. */ 932 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 933 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 934 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg); 935 936 /* Compare masked address with the TLB entry. */ 937 ldst->label_ptr[0] = s->code_ptr; 938 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 939 940 h->index = TCG_REG_TMP2; 941#else 942 if (a_bits) { 943 ldst = new_ldst_label(s); 944 945 ldst->is_ld = is_ld; 946 ldst->oi = oi; 947 ldst->addrlo_reg = addr_reg; 948 949 /* 950 * Without micro-architecture details, we don't know which of 951 * bstrpick or andi is faster, so use bstrpick as it's not 952 * constrained by imm field width. Not to say alignments >= 2^12 953 * are going to happen any time soon. 954 */ 955 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 956 957 ldst->label_ptr[0] = s->code_ptr; 958 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 959 } 960 961 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 962#endif 963 964 if (TARGET_LONG_BITS == 32) { 965 h->base = TCG_REG_TMP0; 966 tcg_out_ext32u(s, h->base, addr_reg); 967 } else { 968 h->base = addr_reg; 969 } 970 971 return ldst; 972} 973 974static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 975 TCGReg rd, HostAddress h) 976{ 977 /* Byte swapping is left to middle-end expansion. */ 978 tcg_debug_assert((opc & MO_BSWAP) == 0); 979 980 switch (opc & MO_SSIZE) { 981 case MO_UB: 982 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 983 break; 984 case MO_SB: 985 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 986 break; 987 case MO_UW: 988 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 989 break; 990 case MO_SW: 991 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 992 break; 993 case MO_UL: 994 if (type == TCG_TYPE_I64) { 995 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 996 break; 997 } 998 /* fallthrough */ 999 case MO_SL: 1000 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1001 break; 1002 case MO_UQ: 1003 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1004 break; 1005 default: 1006 g_assert_not_reached(); 1007 } 1008} 1009 1010static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1011 MemOpIdx oi, TCGType data_type) 1012{ 1013 TCGLabelQemuLdst *ldst; 1014 HostAddress h; 1015 1016 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1017 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1018 1019 if (ldst) { 1020 ldst->type = data_type; 1021 ldst->datalo_reg = data_reg; 1022 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1023 } 1024} 1025 1026static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1027 TCGReg rd, HostAddress h) 1028{ 1029 /* Byte swapping is left to middle-end expansion. */ 1030 tcg_debug_assert((opc & MO_BSWAP) == 0); 1031 1032 switch (opc & MO_SIZE) { 1033 case MO_8: 1034 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1035 break; 1036 case MO_16: 1037 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1038 break; 1039 case MO_32: 1040 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1041 break; 1042 case MO_64: 1043 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1044 break; 1045 default: 1046 g_assert_not_reached(); 1047 } 1048} 1049 1050static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1051 MemOpIdx oi, TCGType data_type) 1052{ 1053 TCGLabelQemuLdst *ldst; 1054 HostAddress h; 1055 1056 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1057 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1058 1059 if (ldst) { 1060 ldst->type = data_type; 1061 ldst->datalo_reg = data_reg; 1062 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1063 } 1064} 1065 1066/* 1067 * Entry-points 1068 */ 1069 1070static const tcg_insn_unit *tb_ret_addr; 1071 1072static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1073{ 1074 /* Reuse the zeroing that exists for goto_ptr. */ 1075 if (a0 == 0) { 1076 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1077 } else { 1078 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1079 tcg_out_call_int(s, tb_ret_addr, true); 1080 } 1081} 1082 1083static void tcg_out_goto_tb(TCGContext *s, int which) 1084{ 1085 /* 1086 * Direct branch, or load indirect address, to be patched 1087 * by tb_target_set_jmp_target. Check indirect load offset 1088 * in range early, regardless of direct branch distance, 1089 * via assert within tcg_out_opc_pcaddu2i. 1090 */ 1091 uintptr_t i_addr = get_jmp_target_addr(s, which); 1092 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1093 1094 set_jmp_insn_offset(s, which); 1095 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1096 1097 /* Finish the load and indirect branch. */ 1098 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1099 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1100 set_jmp_reset_offset(s, which); 1101} 1102 1103void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1104 uintptr_t jmp_rx, uintptr_t jmp_rw) 1105{ 1106 uintptr_t d_addr = tb->jmp_target_addr[n]; 1107 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1108 tcg_insn_unit insn; 1109 1110 /* Either directly branch, or load slot address for indirect branch. */ 1111 if (d_disp == sextreg(d_disp, 0, 26)) { 1112 insn = encode_sd10k16_insn(OPC_B, d_disp); 1113 } else { 1114 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1115 intptr_t i_disp = i_addr - jmp_rx; 1116 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1117 } 1118 1119 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1120 flush_idcache_range(jmp_rx, jmp_rw, 4); 1121} 1122 1123static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1124 const TCGArg args[TCG_MAX_OP_ARGS], 1125 const int const_args[TCG_MAX_OP_ARGS]) 1126{ 1127 TCGArg a0 = args[0]; 1128 TCGArg a1 = args[1]; 1129 TCGArg a2 = args[2]; 1130 int c2 = const_args[2]; 1131 1132 switch (opc) { 1133 case INDEX_op_mb: 1134 tcg_out_mb(s, a0); 1135 break; 1136 1137 case INDEX_op_goto_ptr: 1138 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1139 break; 1140 1141 case INDEX_op_br: 1142 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1143 0); 1144 tcg_out_opc_b(s, 0); 1145 break; 1146 1147 case INDEX_op_brcond_i32: 1148 case INDEX_op_brcond_i64: 1149 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1150 break; 1151 1152 case INDEX_op_extrh_i64_i32: 1153 tcg_out_opc_srai_d(s, a0, a1, 32); 1154 break; 1155 1156 case INDEX_op_not_i32: 1157 case INDEX_op_not_i64: 1158 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1159 break; 1160 1161 case INDEX_op_nor_i32: 1162 case INDEX_op_nor_i64: 1163 if (c2) { 1164 tcg_out_opc_ori(s, a0, a1, a2); 1165 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1166 } else { 1167 tcg_out_opc_nor(s, a0, a1, a2); 1168 } 1169 break; 1170 1171 case INDEX_op_andc_i32: 1172 case INDEX_op_andc_i64: 1173 if (c2) { 1174 /* guaranteed to fit due to constraint */ 1175 tcg_out_opc_andi(s, a0, a1, ~a2); 1176 } else { 1177 tcg_out_opc_andn(s, a0, a1, a2); 1178 } 1179 break; 1180 1181 case INDEX_op_orc_i32: 1182 case INDEX_op_orc_i64: 1183 if (c2) { 1184 /* guaranteed to fit due to constraint */ 1185 tcg_out_opc_ori(s, a0, a1, ~a2); 1186 } else { 1187 tcg_out_opc_orn(s, a0, a1, a2); 1188 } 1189 break; 1190 1191 case INDEX_op_and_i32: 1192 case INDEX_op_and_i64: 1193 if (c2) { 1194 tcg_out_opc_andi(s, a0, a1, a2); 1195 } else { 1196 tcg_out_opc_and(s, a0, a1, a2); 1197 } 1198 break; 1199 1200 case INDEX_op_or_i32: 1201 case INDEX_op_or_i64: 1202 if (c2) { 1203 tcg_out_opc_ori(s, a0, a1, a2); 1204 } else { 1205 tcg_out_opc_or(s, a0, a1, a2); 1206 } 1207 break; 1208 1209 case INDEX_op_xor_i32: 1210 case INDEX_op_xor_i64: 1211 if (c2) { 1212 tcg_out_opc_xori(s, a0, a1, a2); 1213 } else { 1214 tcg_out_opc_xor(s, a0, a1, a2); 1215 } 1216 break; 1217 1218 case INDEX_op_extract_i32: 1219 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1220 break; 1221 case INDEX_op_extract_i64: 1222 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1223 break; 1224 1225 case INDEX_op_deposit_i32: 1226 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1227 break; 1228 case INDEX_op_deposit_i64: 1229 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1230 break; 1231 1232 case INDEX_op_bswap16_i32: 1233 case INDEX_op_bswap16_i64: 1234 tcg_out_opc_revb_2h(s, a0, a1); 1235 if (a2 & TCG_BSWAP_OS) { 1236 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1237 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1238 tcg_out_ext16u(s, a0, a0); 1239 } 1240 break; 1241 1242 case INDEX_op_bswap32_i32: 1243 /* All 32-bit values are computed sign-extended in the register. */ 1244 a2 = TCG_BSWAP_OS; 1245 /* fallthrough */ 1246 case INDEX_op_bswap32_i64: 1247 tcg_out_opc_revb_2w(s, a0, a1); 1248 if (a2 & TCG_BSWAP_OS) { 1249 tcg_out_ext32s(s, a0, a0); 1250 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1251 tcg_out_ext32u(s, a0, a0); 1252 } 1253 break; 1254 1255 case INDEX_op_bswap64_i64: 1256 tcg_out_opc_revb_d(s, a0, a1); 1257 break; 1258 1259 case INDEX_op_clz_i32: 1260 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1261 break; 1262 case INDEX_op_clz_i64: 1263 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1264 break; 1265 1266 case INDEX_op_ctz_i32: 1267 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1268 break; 1269 case INDEX_op_ctz_i64: 1270 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1271 break; 1272 1273 case INDEX_op_shl_i32: 1274 if (c2) { 1275 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1276 } else { 1277 tcg_out_opc_sll_w(s, a0, a1, a2); 1278 } 1279 break; 1280 case INDEX_op_shl_i64: 1281 if (c2) { 1282 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1283 } else { 1284 tcg_out_opc_sll_d(s, a0, a1, a2); 1285 } 1286 break; 1287 1288 case INDEX_op_shr_i32: 1289 if (c2) { 1290 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1291 } else { 1292 tcg_out_opc_srl_w(s, a0, a1, a2); 1293 } 1294 break; 1295 case INDEX_op_shr_i64: 1296 if (c2) { 1297 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1298 } else { 1299 tcg_out_opc_srl_d(s, a0, a1, a2); 1300 } 1301 break; 1302 1303 case INDEX_op_sar_i32: 1304 if (c2) { 1305 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1306 } else { 1307 tcg_out_opc_sra_w(s, a0, a1, a2); 1308 } 1309 break; 1310 case INDEX_op_sar_i64: 1311 if (c2) { 1312 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1313 } else { 1314 tcg_out_opc_sra_d(s, a0, a1, a2); 1315 } 1316 break; 1317 1318 case INDEX_op_rotl_i32: 1319 /* transform into equivalent rotr/rotri */ 1320 if (c2) { 1321 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1322 } else { 1323 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1324 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1325 } 1326 break; 1327 case INDEX_op_rotl_i64: 1328 /* transform into equivalent rotr/rotri */ 1329 if (c2) { 1330 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1331 } else { 1332 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1333 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1334 } 1335 break; 1336 1337 case INDEX_op_rotr_i32: 1338 if (c2) { 1339 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1340 } else { 1341 tcg_out_opc_rotr_w(s, a0, a1, a2); 1342 } 1343 break; 1344 case INDEX_op_rotr_i64: 1345 if (c2) { 1346 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1347 } else { 1348 tcg_out_opc_rotr_d(s, a0, a1, a2); 1349 } 1350 break; 1351 1352 case INDEX_op_add_i32: 1353 if (c2) { 1354 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1355 } else { 1356 tcg_out_opc_add_w(s, a0, a1, a2); 1357 } 1358 break; 1359 case INDEX_op_add_i64: 1360 if (c2) { 1361 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1362 } else { 1363 tcg_out_opc_add_d(s, a0, a1, a2); 1364 } 1365 break; 1366 1367 case INDEX_op_sub_i32: 1368 if (c2) { 1369 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1370 } else { 1371 tcg_out_opc_sub_w(s, a0, a1, a2); 1372 } 1373 break; 1374 case INDEX_op_sub_i64: 1375 if (c2) { 1376 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1377 } else { 1378 tcg_out_opc_sub_d(s, a0, a1, a2); 1379 } 1380 break; 1381 1382 case INDEX_op_mul_i32: 1383 tcg_out_opc_mul_w(s, a0, a1, a2); 1384 break; 1385 case INDEX_op_mul_i64: 1386 tcg_out_opc_mul_d(s, a0, a1, a2); 1387 break; 1388 1389 case INDEX_op_mulsh_i32: 1390 tcg_out_opc_mulh_w(s, a0, a1, a2); 1391 break; 1392 case INDEX_op_mulsh_i64: 1393 tcg_out_opc_mulh_d(s, a0, a1, a2); 1394 break; 1395 1396 case INDEX_op_muluh_i32: 1397 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1398 break; 1399 case INDEX_op_muluh_i64: 1400 tcg_out_opc_mulh_du(s, a0, a1, a2); 1401 break; 1402 1403 case INDEX_op_div_i32: 1404 tcg_out_opc_div_w(s, a0, a1, a2); 1405 break; 1406 case INDEX_op_div_i64: 1407 tcg_out_opc_div_d(s, a0, a1, a2); 1408 break; 1409 1410 case INDEX_op_divu_i32: 1411 tcg_out_opc_div_wu(s, a0, a1, a2); 1412 break; 1413 case INDEX_op_divu_i64: 1414 tcg_out_opc_div_du(s, a0, a1, a2); 1415 break; 1416 1417 case INDEX_op_rem_i32: 1418 tcg_out_opc_mod_w(s, a0, a1, a2); 1419 break; 1420 case INDEX_op_rem_i64: 1421 tcg_out_opc_mod_d(s, a0, a1, a2); 1422 break; 1423 1424 case INDEX_op_remu_i32: 1425 tcg_out_opc_mod_wu(s, a0, a1, a2); 1426 break; 1427 case INDEX_op_remu_i64: 1428 tcg_out_opc_mod_du(s, a0, a1, a2); 1429 break; 1430 1431 case INDEX_op_setcond_i32: 1432 case INDEX_op_setcond_i64: 1433 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1434 break; 1435 1436 case INDEX_op_movcond_i32: 1437 case INDEX_op_movcond_i64: 1438 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1439 break; 1440 1441 case INDEX_op_ld8s_i32: 1442 case INDEX_op_ld8s_i64: 1443 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1444 break; 1445 case INDEX_op_ld8u_i32: 1446 case INDEX_op_ld8u_i64: 1447 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1448 break; 1449 case INDEX_op_ld16s_i32: 1450 case INDEX_op_ld16s_i64: 1451 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1452 break; 1453 case INDEX_op_ld16u_i32: 1454 case INDEX_op_ld16u_i64: 1455 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1456 break; 1457 case INDEX_op_ld_i32: 1458 case INDEX_op_ld32s_i64: 1459 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1460 break; 1461 case INDEX_op_ld32u_i64: 1462 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1463 break; 1464 case INDEX_op_ld_i64: 1465 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1466 break; 1467 1468 case INDEX_op_st8_i32: 1469 case INDEX_op_st8_i64: 1470 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1471 break; 1472 case INDEX_op_st16_i32: 1473 case INDEX_op_st16_i64: 1474 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1475 break; 1476 case INDEX_op_st_i32: 1477 case INDEX_op_st32_i64: 1478 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1479 break; 1480 case INDEX_op_st_i64: 1481 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1482 break; 1483 1484 case INDEX_op_qemu_ld_i32: 1485 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1486 break; 1487 case INDEX_op_qemu_ld_i64: 1488 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1489 break; 1490 case INDEX_op_qemu_st_i32: 1491 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1492 break; 1493 case INDEX_op_qemu_st_i64: 1494 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1495 break; 1496 1497 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1498 case INDEX_op_mov_i64: 1499 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1500 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1501 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1502 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1503 case INDEX_op_ext8s_i64: 1504 case INDEX_op_ext8u_i32: 1505 case INDEX_op_ext8u_i64: 1506 case INDEX_op_ext16s_i32: 1507 case INDEX_op_ext16s_i64: 1508 case INDEX_op_ext16u_i32: 1509 case INDEX_op_ext16u_i64: 1510 case INDEX_op_ext32s_i64: 1511 case INDEX_op_ext32u_i64: 1512 case INDEX_op_ext_i32_i64: 1513 case INDEX_op_extu_i32_i64: 1514 case INDEX_op_extrl_i64_i32: 1515 default: 1516 g_assert_not_reached(); 1517 } 1518} 1519 1520static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1521{ 1522 switch (op) { 1523 case INDEX_op_goto_ptr: 1524 return C_O0_I1(r); 1525 1526 case INDEX_op_st8_i32: 1527 case INDEX_op_st8_i64: 1528 case INDEX_op_st16_i32: 1529 case INDEX_op_st16_i64: 1530 case INDEX_op_st32_i64: 1531 case INDEX_op_st_i32: 1532 case INDEX_op_st_i64: 1533 case INDEX_op_qemu_st_i32: 1534 case INDEX_op_qemu_st_i64: 1535 return C_O0_I2(rZ, r); 1536 1537 case INDEX_op_brcond_i32: 1538 case INDEX_op_brcond_i64: 1539 return C_O0_I2(rZ, rZ); 1540 1541 case INDEX_op_ext8s_i32: 1542 case INDEX_op_ext8s_i64: 1543 case INDEX_op_ext8u_i32: 1544 case INDEX_op_ext8u_i64: 1545 case INDEX_op_ext16s_i32: 1546 case INDEX_op_ext16s_i64: 1547 case INDEX_op_ext16u_i32: 1548 case INDEX_op_ext16u_i64: 1549 case INDEX_op_ext32s_i64: 1550 case INDEX_op_ext32u_i64: 1551 case INDEX_op_extu_i32_i64: 1552 case INDEX_op_extrl_i64_i32: 1553 case INDEX_op_extrh_i64_i32: 1554 case INDEX_op_ext_i32_i64: 1555 case INDEX_op_not_i32: 1556 case INDEX_op_not_i64: 1557 case INDEX_op_extract_i32: 1558 case INDEX_op_extract_i64: 1559 case INDEX_op_bswap16_i32: 1560 case INDEX_op_bswap16_i64: 1561 case INDEX_op_bswap32_i32: 1562 case INDEX_op_bswap32_i64: 1563 case INDEX_op_bswap64_i64: 1564 case INDEX_op_ld8s_i32: 1565 case INDEX_op_ld8s_i64: 1566 case INDEX_op_ld8u_i32: 1567 case INDEX_op_ld8u_i64: 1568 case INDEX_op_ld16s_i32: 1569 case INDEX_op_ld16s_i64: 1570 case INDEX_op_ld16u_i32: 1571 case INDEX_op_ld16u_i64: 1572 case INDEX_op_ld32s_i64: 1573 case INDEX_op_ld32u_i64: 1574 case INDEX_op_ld_i32: 1575 case INDEX_op_ld_i64: 1576 case INDEX_op_qemu_ld_i32: 1577 case INDEX_op_qemu_ld_i64: 1578 return C_O1_I1(r, r); 1579 1580 case INDEX_op_andc_i32: 1581 case INDEX_op_andc_i64: 1582 case INDEX_op_orc_i32: 1583 case INDEX_op_orc_i64: 1584 /* 1585 * LoongArch insns for these ops don't have reg-imm forms, but we 1586 * can express using andi/ori if ~constant satisfies 1587 * TCG_CT_CONST_U12. 1588 */ 1589 return C_O1_I2(r, r, rC); 1590 1591 case INDEX_op_shl_i32: 1592 case INDEX_op_shl_i64: 1593 case INDEX_op_shr_i32: 1594 case INDEX_op_shr_i64: 1595 case INDEX_op_sar_i32: 1596 case INDEX_op_sar_i64: 1597 case INDEX_op_rotl_i32: 1598 case INDEX_op_rotl_i64: 1599 case INDEX_op_rotr_i32: 1600 case INDEX_op_rotr_i64: 1601 return C_O1_I2(r, r, ri); 1602 1603 case INDEX_op_add_i32: 1604 return C_O1_I2(r, r, ri); 1605 case INDEX_op_add_i64: 1606 return C_O1_I2(r, r, rJ); 1607 1608 case INDEX_op_and_i32: 1609 case INDEX_op_and_i64: 1610 case INDEX_op_nor_i32: 1611 case INDEX_op_nor_i64: 1612 case INDEX_op_or_i32: 1613 case INDEX_op_or_i64: 1614 case INDEX_op_xor_i32: 1615 case INDEX_op_xor_i64: 1616 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 1617 return C_O1_I2(r, r, rU); 1618 1619 case INDEX_op_clz_i32: 1620 case INDEX_op_clz_i64: 1621 case INDEX_op_ctz_i32: 1622 case INDEX_op_ctz_i64: 1623 return C_O1_I2(r, r, rW); 1624 1625 case INDEX_op_deposit_i32: 1626 case INDEX_op_deposit_i64: 1627 /* Must deposit into the same register as input */ 1628 return C_O1_I2(r, 0, rZ); 1629 1630 case INDEX_op_sub_i32: 1631 case INDEX_op_setcond_i32: 1632 return C_O1_I2(r, rZ, ri); 1633 case INDEX_op_sub_i64: 1634 case INDEX_op_setcond_i64: 1635 return C_O1_I2(r, rZ, rJ); 1636 1637 case INDEX_op_mul_i32: 1638 case INDEX_op_mul_i64: 1639 case INDEX_op_mulsh_i32: 1640 case INDEX_op_mulsh_i64: 1641 case INDEX_op_muluh_i32: 1642 case INDEX_op_muluh_i64: 1643 case INDEX_op_div_i32: 1644 case INDEX_op_div_i64: 1645 case INDEX_op_divu_i32: 1646 case INDEX_op_divu_i64: 1647 case INDEX_op_rem_i32: 1648 case INDEX_op_rem_i64: 1649 case INDEX_op_remu_i32: 1650 case INDEX_op_remu_i64: 1651 return C_O1_I2(r, rZ, rZ); 1652 1653 case INDEX_op_movcond_i32: 1654 case INDEX_op_movcond_i64: 1655 return C_O1_I4(r, rZ, rJ, rZ, rZ); 1656 1657 default: 1658 g_assert_not_reached(); 1659 } 1660} 1661 1662static const int tcg_target_callee_save_regs[] = { 1663 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1664 TCG_REG_S1, 1665 TCG_REG_S2, 1666 TCG_REG_S3, 1667 TCG_REG_S4, 1668 TCG_REG_S5, 1669 TCG_REG_S6, 1670 TCG_REG_S7, 1671 TCG_REG_S8, 1672 TCG_REG_S9, 1673 TCG_REG_RA, /* should be last for ABI compliance */ 1674}; 1675 1676/* Stack frame parameters. */ 1677#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1678#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1679#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1680#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1681 + TCG_TARGET_STACK_ALIGN - 1) \ 1682 & -TCG_TARGET_STACK_ALIGN) 1683#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1684 1685/* We're expecting to be able to use an immediate for frame allocation. */ 1686QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1687 1688/* Generate global QEMU prologue and epilogue code */ 1689static void tcg_target_qemu_prologue(TCGContext *s) 1690{ 1691 int i; 1692 1693 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1694 1695 /* TB prologue */ 1696 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1697 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1698 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1699 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1700 } 1701 1702#if !defined(CONFIG_SOFTMMU) 1703 if (USE_GUEST_BASE) { 1704 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1705 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1706 } 1707#endif 1708 1709 /* Call generated code */ 1710 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1711 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1712 1713 /* Return path for goto_ptr. Set return value to 0 */ 1714 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1715 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1716 1717 /* TB epilogue */ 1718 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1719 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1720 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1721 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1722 } 1723 1724 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1725 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 1726} 1727 1728static void tcg_target_init(TCGContext *s) 1729{ 1730 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1731 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1732 1733 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 1734 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1735 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1736 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1737 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1738 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1739 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1740 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1741 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1742 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1743 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1744 1745 s->reserved_regs = 0; 1746 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1747 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1748 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1749 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1750 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1751 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1752 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 1753} 1754 1755typedef struct { 1756 DebugFrameHeader h; 1757 uint8_t fde_def_cfa[4]; 1758 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1759} DebugFrame; 1760 1761#define ELF_HOST_MACHINE EM_LOONGARCH 1762 1763static const DebugFrame debug_frame = { 1764 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1765 .h.cie.id = -1, 1766 .h.cie.version = 1, 1767 .h.cie.code_align = 1, 1768 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1769 .h.cie.return_column = TCG_REG_RA, 1770 1771 /* Total FDE size does not include the "len" member. */ 1772 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1773 1774 .fde_def_cfa = { 1775 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1776 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1777 (FRAME_SIZE >> 7) 1778 }, 1779 .fde_reg_ofs = { 1780 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 1781 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 1782 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 1783 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 1784 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 1785 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 1786 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 1787 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 1788 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 1789 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 1790 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1791 } 1792}; 1793 1794void tcg_register_jit(const void *buf, size_t buf_size) 1795{ 1796 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1797} 1798