1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33 34#ifdef CONFIG_DEBUG_TCG 35static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 36 "zero", 37 "ra", 38 "tp", 39 "sp", 40 "a0", 41 "a1", 42 "a2", 43 "a3", 44 "a4", 45 "a5", 46 "a6", 47 "a7", 48 "t0", 49 "t1", 50 "t2", 51 "t3", 52 "t4", 53 "t5", 54 "t6", 55 "t7", 56 "t8", 57 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 58 "s9", 59 "s0", 60 "s1", 61 "s2", 62 "s3", 63 "s4", 64 "s5", 65 "s6", 66 "s7", 67 "s8" 68}; 69#endif 70 71static const int tcg_target_reg_alloc_order[] = { 72 /* Registers preserved across calls */ 73 /* TCG_REG_S0 reserved for TCG_AREG0 */ 74 TCG_REG_S1, 75 TCG_REG_S2, 76 TCG_REG_S3, 77 TCG_REG_S4, 78 TCG_REG_S5, 79 TCG_REG_S6, 80 TCG_REG_S7, 81 TCG_REG_S8, 82 TCG_REG_S9, 83 84 /* Registers (potentially) clobbered across calls */ 85 TCG_REG_T0, 86 TCG_REG_T1, 87 TCG_REG_T2, 88 TCG_REG_T3, 89 TCG_REG_T4, 90 TCG_REG_T5, 91 TCG_REG_T6, 92 TCG_REG_T7, 93 TCG_REG_T8, 94 95 /* Argument registers, opposite order of allocation. */ 96 TCG_REG_A7, 97 TCG_REG_A6, 98 TCG_REG_A5, 99 TCG_REG_A4, 100 TCG_REG_A3, 101 TCG_REG_A2, 102 TCG_REG_A1, 103 TCG_REG_A0, 104}; 105 106static const int tcg_target_call_iarg_regs[] = { 107 TCG_REG_A0, 108 TCG_REG_A1, 109 TCG_REG_A2, 110 TCG_REG_A3, 111 TCG_REG_A4, 112 TCG_REG_A5, 113 TCG_REG_A6, 114 TCG_REG_A7, 115}; 116 117static const int tcg_target_call_oarg_regs[] = { 118 TCG_REG_A0, 119 TCG_REG_A1, 120}; 121 122#ifndef CONFIG_SOFTMMU 123#define USE_GUEST_BASE (guest_base != 0) 124#define TCG_GUEST_BASE_REG TCG_REG_S1 125#endif 126 127#define TCG_CT_CONST_ZERO 0x100 128#define TCG_CT_CONST_S12 0x200 129#define TCG_CT_CONST_N12 0x400 130#define TCG_CT_CONST_U12 0x800 131#define TCG_CT_CONST_C12 0x1000 132#define TCG_CT_CONST_WSZ 0x2000 133 134#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 135/* 136 * For softmmu, we need to avoid conflicts with the first 5 137 * argument registers to call the helper. Some of these are 138 * also used for the tlb lookup. 139 */ 140#ifdef CONFIG_SOFTMMU 141#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) 142#else 143#define SOFTMMU_RESERVE_REGS 0 144#endif 145 146 147static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 148{ 149 return sextract64(val, pos, len); 150} 151 152/* test if a constant matches the constraint */ 153static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 154{ 155 if (ct & TCG_CT_CONST) { 156 return true; 157 } 158 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 159 return true; 160 } 161 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 162 return true; 163 } 164 if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { 165 return true; 166 } 167 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 168 return true; 169 } 170 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 171 return true; 172 } 173 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 174 return true; 175 } 176 return false; 177} 178 179/* 180 * Relocations 181 */ 182 183/* 184 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 185 * complicated; a whopping stack machine is needed to stuff the fields, at 186 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 187 * needed. 188 * 189 * Hence, define our own simpler relocation types. Numbers are chosen as to 190 * not collide with potential future additions to the true ELF relocation 191 * type enum. 192 */ 193 194/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 195#define R_LOONGARCH_BR_SK16 256 196/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 197#define R_LOONGARCH_BR_SD10K16 257 198 199static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 200{ 201 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 202 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 203 204 tcg_debug_assert((offset & 3) == 0); 205 offset >>= 2; 206 if (offset == sextreg(offset, 0, 16)) { 207 *src_rw = deposit64(*src_rw, 10, 16, offset); 208 return true; 209 } 210 211 return false; 212} 213 214static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 215 const tcg_insn_unit *target) 216{ 217 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 218 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 219 220 tcg_debug_assert((offset & 3) == 0); 221 offset >>= 2; 222 if (offset == sextreg(offset, 0, 26)) { 223 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 224 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 225 return true; 226 } 227 228 return false; 229} 230 231static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 232 intptr_t value, intptr_t addend) 233{ 234 tcg_debug_assert(addend == 0); 235 switch (type) { 236 case R_LOONGARCH_BR_SK16: 237 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 238 case R_LOONGARCH_BR_SD10K16: 239 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 240 default: 241 g_assert_not_reached(); 242 } 243} 244 245#include "tcg-insn-defs.c.inc" 246 247/* 248 * TCG intrinsics 249 */ 250 251static void tcg_out_mb(TCGContext *s, TCGArg a0) 252{ 253 /* Baseline LoongArch only has the full barrier, unfortunately. */ 254 tcg_out_opc_dbar(s, 0); 255} 256 257static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 258{ 259 if (ret == arg) { 260 return true; 261 } 262 switch (type) { 263 case TCG_TYPE_I32: 264 case TCG_TYPE_I64: 265 /* 266 * Conventional register-register move used in LoongArch is 267 * `or dst, src, zero`. 268 */ 269 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 270 break; 271 default: 272 g_assert_not_reached(); 273 } 274 return true; 275} 276 277static bool imm_part_needs_loading(bool high_bits_are_ones, 278 tcg_target_long part) 279{ 280 if (high_bits_are_ones) { 281 return part != -1; 282 } else { 283 return part != 0; 284 } 285} 286 287/* Loads a 32-bit immediate into rd, sign-extended. */ 288static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 289{ 290 tcg_target_long lo = sextreg(val, 0, 12); 291 tcg_target_long hi12 = sextreg(val, 12, 20); 292 293 /* Single-instruction cases. */ 294 if (lo == val) { 295 /* val fits in simm12: addi.w rd, zero, val */ 296 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 297 return; 298 } 299 if (0x800 <= val && val <= 0xfff) { 300 /* val fits in uimm12: ori rd, zero, val */ 301 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 302 return; 303 } 304 305 /* High bits must be set; load with lu12i.w + optional ori. */ 306 tcg_out_opc_lu12i_w(s, rd, hi12); 307 if (lo != 0) { 308 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 309 } 310} 311 312static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 313 tcg_target_long val) 314{ 315 /* 316 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 317 * with dedicated instructions for filling the respective bitfields 318 * below: 319 * 320 * 6 5 4 3 321 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 322 * +-----------------------+---------------------------------------+... 323 * | hi52 | hi32 | 324 * +-----------------------+---------------------------------------+... 325 * 3 2 1 326 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 327 * ...+-------------------------------------+-------------------------+ 328 * | hi12 | lo | 329 * ...+-------------------------------------+-------------------------+ 330 * 331 * Check if val belong to one of the several fast cases, before falling 332 * back to the slow path. 333 */ 334 335 intptr_t pc_offset; 336 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 337 tcg_target_long hi32, hi52; 338 bool rd_high_bits_are_ones; 339 340 /* Value fits in signed i32. */ 341 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 342 tcg_out_movi_i32(s, rd, val); 343 return; 344 } 345 346 /* PC-relative cases. */ 347 pc_offset = tcg_pcrel_diff(s, (void *)val); 348 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 349 /* Single pcaddu2i. */ 350 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 351 return; 352 } 353 354 if (pc_offset == (int32_t)pc_offset) { 355 /* Offset within 32 bits; load with pcalau12i + ori. */ 356 val_lo = sextreg(val, 0, 12); 357 val_hi = val >> 12; 358 pc_hi = (val - pc_offset) >> 12; 359 offset_hi = val_hi - pc_hi; 360 361 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 362 tcg_out_opc_pcalau12i(s, rd, offset_hi); 363 if (val_lo != 0) { 364 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 365 } 366 return; 367 } 368 369 hi32 = sextreg(val, 32, 20); 370 hi52 = sextreg(val, 52, 12); 371 372 /* Single cu52i.d case. */ 373 if (ctz64(val) >= 52) { 374 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 375 return; 376 } 377 378 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 379 tcg_out_movi_i32(s, rd, val); 380 rd_high_bits_are_ones = (int32_t)val < 0; 381 382 if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) { 383 tcg_out_opc_cu32i_d(s, rd, hi32); 384 rd_high_bits_are_ones = hi32 < 0; 385 } 386 387 if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) { 388 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 389 } 390} 391 392static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 393{ 394 tcg_out_opc_andi(s, ret, arg, 0xff); 395} 396 397static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 398{ 399 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 400} 401 402static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 403{ 404 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 405} 406 407static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) 408{ 409 tcg_out_opc_sext_b(s, ret, arg); 410} 411 412static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) 413{ 414 tcg_out_opc_sext_h(s, ret, arg); 415} 416 417static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 418{ 419 tcg_out_opc_addi_w(s, ret, arg, 0); 420} 421 422static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 423 TCGReg a0, TCGReg a1, TCGReg a2, 424 bool c2, bool is_32bit) 425{ 426 if (c2) { 427 /* 428 * Fast path: semantics already satisfied due to constraint and 429 * insn behavior, single instruction is enough. 430 */ 431 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 432 /* all clz/ctz insns belong to DJ-format */ 433 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 434 return; 435 } 436 437 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 438 /* a0 = a1 ? REG_TMP0 : a2 */ 439 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 440 tcg_out_opc_masknez(s, a0, a2, a1); 441 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 442} 443 444static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 445 TCGReg arg1, TCGReg arg2, bool c2) 446{ 447 TCGReg tmp; 448 449 if (c2) { 450 tcg_debug_assert(arg2 == 0); 451 } 452 453 switch (cond) { 454 case TCG_COND_EQ: 455 if (c2) { 456 tmp = arg1; 457 } else { 458 tcg_out_opc_sub_d(s, ret, arg1, arg2); 459 tmp = ret; 460 } 461 tcg_out_opc_sltui(s, ret, tmp, 1); 462 break; 463 case TCG_COND_NE: 464 if (c2) { 465 tmp = arg1; 466 } else { 467 tcg_out_opc_sub_d(s, ret, arg1, arg2); 468 tmp = ret; 469 } 470 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 471 break; 472 case TCG_COND_LT: 473 tcg_out_opc_slt(s, ret, arg1, arg2); 474 break; 475 case TCG_COND_GE: 476 tcg_out_opc_slt(s, ret, arg1, arg2); 477 tcg_out_opc_xori(s, ret, ret, 1); 478 break; 479 case TCG_COND_LE: 480 tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false); 481 break; 482 case TCG_COND_GT: 483 tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false); 484 break; 485 case TCG_COND_LTU: 486 tcg_out_opc_sltu(s, ret, arg1, arg2); 487 break; 488 case TCG_COND_GEU: 489 tcg_out_opc_sltu(s, ret, arg1, arg2); 490 tcg_out_opc_xori(s, ret, ret, 1); 491 break; 492 case TCG_COND_LEU: 493 tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false); 494 break; 495 case TCG_COND_GTU: 496 tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false); 497 break; 498 default: 499 g_assert_not_reached(); 500 break; 501 } 502} 503 504/* 505 * Branch helpers 506 */ 507 508static const struct { 509 LoongArchInsn op; 510 bool swap; 511} tcg_brcond_to_loongarch[] = { 512 [TCG_COND_EQ] = { OPC_BEQ, false }, 513 [TCG_COND_NE] = { OPC_BNE, false }, 514 [TCG_COND_LT] = { OPC_BGT, true }, 515 [TCG_COND_GE] = { OPC_BLE, true }, 516 [TCG_COND_LE] = { OPC_BLE, false }, 517 [TCG_COND_GT] = { OPC_BGT, false }, 518 [TCG_COND_LTU] = { OPC_BGTU, true }, 519 [TCG_COND_GEU] = { OPC_BLEU, true }, 520 [TCG_COND_LEU] = { OPC_BLEU, false }, 521 [TCG_COND_GTU] = { OPC_BGTU, false } 522}; 523 524static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 525 TCGReg arg2, TCGLabel *l) 526{ 527 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 528 529 tcg_debug_assert(op != 0); 530 531 if (tcg_brcond_to_loongarch[cond].swap) { 532 TCGReg t = arg1; 533 arg1 = arg2; 534 arg2 = t; 535 } 536 537 /* all conditional branch insns belong to DJSk16-format */ 538 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 539 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 540} 541 542static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 543{ 544 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 545 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 546 547 tcg_debug_assert((offset & 3) == 0); 548 if (offset == sextreg(offset, 0, 28)) { 549 /* short jump: +/- 256MiB */ 550 if (tail) { 551 tcg_out_opc_b(s, offset >> 2); 552 } else { 553 tcg_out_opc_bl(s, offset >> 2); 554 } 555 } else if (offset == sextreg(offset, 0, 38)) { 556 /* long jump: +/- 256GiB */ 557 tcg_target_long lo = sextreg(offset, 0, 18); 558 tcg_target_long hi = offset - lo; 559 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 560 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 561 } else { 562 /* far jump: 64-bit */ 563 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 564 tcg_target_long hi = (tcg_target_long)arg - lo; 565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 566 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 567 } 568} 569 570static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 571 const TCGHelperInfo *info) 572{ 573 tcg_out_call_int(s, arg, false); 574} 575 576/* 577 * Load/store helpers 578 */ 579 580static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 581 TCGReg addr, intptr_t offset) 582{ 583 intptr_t imm12 = sextreg(offset, 0, 12); 584 585 if (offset != imm12) { 586 intptr_t diff = offset - (uintptr_t)s->code_ptr; 587 588 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 589 imm12 = sextreg(diff, 0, 12); 590 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 591 } else { 592 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 593 if (addr != TCG_REG_ZERO) { 594 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 595 } 596 } 597 addr = TCG_REG_TMP2; 598 } 599 600 switch (opc) { 601 case OPC_LD_B: 602 case OPC_LD_BU: 603 case OPC_LD_H: 604 case OPC_LD_HU: 605 case OPC_LD_W: 606 case OPC_LD_WU: 607 case OPC_LD_D: 608 case OPC_ST_B: 609 case OPC_ST_H: 610 case OPC_ST_W: 611 case OPC_ST_D: 612 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 613 break; 614 default: 615 g_assert_not_reached(); 616 } 617} 618 619static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 620 TCGReg arg1, intptr_t arg2) 621{ 622 bool is_32bit = type == TCG_TYPE_I32; 623 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 624} 625 626static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 627 TCGReg arg1, intptr_t arg2) 628{ 629 bool is_32bit = type == TCG_TYPE_I32; 630 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 631} 632 633static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 634 TCGReg base, intptr_t ofs) 635{ 636 if (val == 0) { 637 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 638 return true; 639 } 640 return false; 641} 642 643/* 644 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 645 */ 646 647#if defined(CONFIG_SOFTMMU) 648/* 649 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, 650 * MemOpIdx oi, uintptr_t ra) 651 */ 652static void * const qemu_ld_helpers[4] = { 653 [MO_8] = helper_ret_ldub_mmu, 654 [MO_16] = helper_le_lduw_mmu, 655 [MO_32] = helper_le_ldul_mmu, 656 [MO_64] = helper_le_ldq_mmu, 657}; 658 659/* 660 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, 661 * uintxx_t val, MemOpIdx oi, 662 * uintptr_t ra) 663 */ 664static void * const qemu_st_helpers[4] = { 665 [MO_8] = helper_ret_stb_mmu, 666 [MO_16] = helper_le_stw_mmu, 667 [MO_32] = helper_le_stl_mmu, 668 [MO_64] = helper_le_stq_mmu, 669}; 670 671/* We expect to use a 12-bit negative offset from ENV. */ 672QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 673QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 674 675static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 676{ 677 tcg_out_opc_b(s, 0); 678 return reloc_br_sd10k16(s->code_ptr - 1, target); 679} 680 681/* 682 * Emits common code for TLB addend lookup, that eventually loads the 683 * addend in TCG_REG_TMP2. 684 */ 685static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi, 686 tcg_insn_unit **label_ptr, bool is_load) 687{ 688 MemOp opc = get_memop(oi); 689 unsigned s_bits = opc & MO_SIZE; 690 unsigned a_bits = get_alignment_bits(opc); 691 tcg_target_long compare_mask; 692 int mem_index = get_mmuidx(oi); 693 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 694 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 695 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 696 697 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 698 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 699 700 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl, 701 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 702 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 703 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 704 705 /* Load the tlb comparator and the addend. */ 706 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, 707 is_load ? offsetof(CPUTLBEntry, addr_read) 708 : offsetof(CPUTLBEntry, addr_write)); 709 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 710 offsetof(CPUTLBEntry, addend)); 711 712 /* We don't support unaligned accesses. */ 713 if (a_bits < s_bits) { 714 a_bits = s_bits; 715 } 716 /* Clear the non-page, non-alignment bits from the address. */ 717 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 718 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); 719 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl); 720 721 /* Compare masked address with the TLB entry. */ 722 label_ptr[0] = s->code_ptr; 723 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 724 725 /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */ 726} 727 728static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, 729 TCGType type, 730 TCGReg datalo, TCGReg addrlo, 731 void *raddr, tcg_insn_unit **label_ptr) 732{ 733 TCGLabelQemuLdst *label = new_ldst_label(s); 734 735 label->is_ld = is_ld; 736 label->oi = oi; 737 label->type = type; 738 label->datalo_reg = datalo; 739 label->datahi_reg = 0; /* unused */ 740 label->addrlo_reg = addrlo; 741 label->addrhi_reg = 0; /* unused */ 742 label->raddr = tcg_splitwx_to_rx(raddr); 743 label->label_ptr[0] = label_ptr[0]; 744} 745 746static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 747{ 748 MemOpIdx oi = l->oi; 749 MemOp opc = get_memop(oi); 750 MemOp size = opc & MO_SIZE; 751 TCGType type = l->type; 752 753 /* resolve label address */ 754 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 755 return false; 756 } 757 758 /* call load helper */ 759 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 760 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); 761 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi); 762 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr); 763 764 tcg_out_call_int(s, qemu_ld_helpers[size], false); 765 766 switch (opc & MO_SSIZE) { 767 case MO_SB: 768 tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0); 769 break; 770 case MO_SW: 771 tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0); 772 break; 773 case MO_SL: 774 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); 775 break; 776 case MO_UL: 777 if (type == TCG_TYPE_I32) { 778 /* MO_UL loads of i32 should be sign-extended too */ 779 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); 780 break; 781 } 782 /* fallthrough */ 783 default: 784 tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0); 785 break; 786 } 787 788 return tcg_out_goto(s, l->raddr); 789} 790 791static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 792{ 793 MemOpIdx oi = l->oi; 794 MemOp opc = get_memop(oi); 795 MemOp size = opc & MO_SIZE; 796 797 /* resolve label address */ 798 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 799 return false; 800 } 801 802 /* call store helper */ 803 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 804 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); 805 switch (size) { 806 case MO_8: 807 tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg); 808 break; 809 case MO_16: 810 tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg); 811 break; 812 case MO_32: 813 tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg); 814 break; 815 case MO_64: 816 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg); 817 break; 818 default: 819 g_assert_not_reached(); 820 break; 821 } 822 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi); 823 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr); 824 825 tcg_out_call_int(s, qemu_st_helpers[size], false); 826 827 return tcg_out_goto(s, l->raddr); 828} 829#else 830 831/* 832 * Alignment helpers for user-mode emulation 833 */ 834 835static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, 836 unsigned a_bits) 837{ 838 TCGLabelQemuLdst *l = new_ldst_label(s); 839 840 l->is_ld = is_ld; 841 l->addrlo_reg = addr_reg; 842 843 /* 844 * Without micro-architecture details, we don't know which of bstrpick or 845 * andi is faster, so use bstrpick as it's not constrained by imm field 846 * width. (Not to say alignments >= 2^12 are going to happen any time 847 * soon, though) 848 */ 849 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 850 851 l->label_ptr[0] = s->code_ptr; 852 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 853 854 l->raddr = tcg_splitwx_to_rx(s->code_ptr); 855} 856 857static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 858{ 859 /* resolve label address */ 860 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 861 return false; 862 } 863 864 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); 865 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); 866 867 /* tail call, with the return address back inline. */ 868 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); 869 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld 870 : helper_unaligned_st), true); 871 return true; 872} 873 874static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 875{ 876 return tcg_out_fail_alignment(s, l); 877} 878 879static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 880{ 881 return tcg_out_fail_alignment(s, l); 882} 883 884#endif /* CONFIG_SOFTMMU */ 885 886/* 887 * `ext32u` the address register into the temp register given, 888 * if target is 32-bit, no-op otherwise. 889 * 890 * Returns the address register ready for use with TLB addend. 891 */ 892static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s, 893 TCGReg addr, TCGReg tmp) 894{ 895 if (TARGET_LONG_BITS == 32) { 896 tcg_out_ext32u(s, tmp, addr); 897 return tmp; 898 } 899 return addr; 900} 901 902static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, 903 TCGReg rk, MemOp opc, TCGType type) 904{ 905 /* Byte swapping is left to middle-end expansion. */ 906 tcg_debug_assert((opc & MO_BSWAP) == 0); 907 908 switch (opc & MO_SSIZE) { 909 case MO_UB: 910 tcg_out_opc_ldx_bu(s, rd, rj, rk); 911 break; 912 case MO_SB: 913 tcg_out_opc_ldx_b(s, rd, rj, rk); 914 break; 915 case MO_UW: 916 tcg_out_opc_ldx_hu(s, rd, rj, rk); 917 break; 918 case MO_SW: 919 tcg_out_opc_ldx_h(s, rd, rj, rk); 920 break; 921 case MO_UL: 922 if (type == TCG_TYPE_I64) { 923 tcg_out_opc_ldx_wu(s, rd, rj, rk); 924 break; 925 } 926 /* fallthrough */ 927 case MO_SL: 928 tcg_out_opc_ldx_w(s, rd, rj, rk); 929 break; 930 case MO_UQ: 931 tcg_out_opc_ldx_d(s, rd, rj, rk); 932 break; 933 default: 934 g_assert_not_reached(); 935 } 936} 937 938static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) 939{ 940 TCGReg addr_regl; 941 TCGReg data_regl; 942 MemOpIdx oi; 943 MemOp opc; 944#if defined(CONFIG_SOFTMMU) 945 tcg_insn_unit *label_ptr[1]; 946#else 947 unsigned a_bits; 948#endif 949 TCGReg base; 950 951 data_regl = *args++; 952 addr_regl = *args++; 953 oi = *args++; 954 opc = get_memop(oi); 955 956#if defined(CONFIG_SOFTMMU) 957 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1); 958 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); 959 tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type); 960 add_qemu_ldst_label(s, 1, oi, type, 961 data_regl, addr_regl, 962 s->code_ptr, label_ptr); 963#else 964 a_bits = get_alignment_bits(opc); 965 if (a_bits) { 966 tcg_out_test_alignment(s, true, addr_regl, a_bits); 967 } 968 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); 969 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 970 tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); 971#endif 972} 973 974static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, 975 TCGReg rj, TCGReg rk, MemOp opc) 976{ 977 /* Byte swapping is left to middle-end expansion. */ 978 tcg_debug_assert((opc & MO_BSWAP) == 0); 979 980 switch (opc & MO_SIZE) { 981 case MO_8: 982 tcg_out_opc_stx_b(s, data, rj, rk); 983 break; 984 case MO_16: 985 tcg_out_opc_stx_h(s, data, rj, rk); 986 break; 987 case MO_32: 988 tcg_out_opc_stx_w(s, data, rj, rk); 989 break; 990 case MO_64: 991 tcg_out_opc_stx_d(s, data, rj, rk); 992 break; 993 default: 994 g_assert_not_reached(); 995 } 996} 997 998static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) 999{ 1000 TCGReg addr_regl; 1001 TCGReg data_regl; 1002 MemOpIdx oi; 1003 MemOp opc; 1004#if defined(CONFIG_SOFTMMU) 1005 tcg_insn_unit *label_ptr[1]; 1006#else 1007 unsigned a_bits; 1008#endif 1009 TCGReg base; 1010 1011 data_regl = *args++; 1012 addr_regl = *args++; 1013 oi = *args++; 1014 opc = get_memop(oi); 1015 1016#if defined(CONFIG_SOFTMMU) 1017 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0); 1018 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); 1019 tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc); 1020 add_qemu_ldst_label(s, 0, oi, 1021 0, /* type param is unused for stores */ 1022 data_regl, addr_regl, 1023 s->code_ptr, label_ptr); 1024#else 1025 a_bits = get_alignment_bits(opc); 1026 if (a_bits) { 1027 tcg_out_test_alignment(s, false, addr_regl, a_bits); 1028 } 1029 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); 1030 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 1031 tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); 1032#endif 1033} 1034 1035/* LoongArch uses `andi zero, zero, 0` as NOP. */ 1036#define NOP OPC_ANDI 1037static void tcg_out_nop(TCGContext *s) 1038{ 1039 tcg_out32(s, NOP); 1040} 1041 1042void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1043 uintptr_t jmp_rx, uintptr_t jmp_rw) 1044{ 1045 tcg_insn_unit i1, i2; 1046 ptrdiff_t upper, lower; 1047 uintptr_t addr = tb->jmp_target_addr[n]; 1048 ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2; 1049 1050 if (offset == sextreg(offset, 0, 26)) { 1051 i1 = encode_sd10k16_insn(OPC_B, offset); 1052 i2 = NOP; 1053 } else { 1054 tcg_debug_assert(offset == sextreg(offset, 0, 36)); 1055 lower = (int16_t)offset; 1056 upper = (offset - lower) >> 16; 1057 1058 i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper); 1059 i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower); 1060 } 1061 uint64_t pair = ((uint64_t)i2 << 32) | i1; 1062 qatomic_set((uint64_t *)jmp_rw, pair); 1063 flush_idcache_range(jmp_rx, jmp_rw, 8); 1064} 1065 1066/* 1067 * Entry-points 1068 */ 1069 1070static const tcg_insn_unit *tb_ret_addr; 1071 1072static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1073{ 1074 /* Reuse the zeroing that exists for goto_ptr. */ 1075 if (a0 == 0) { 1076 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1077 } else { 1078 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1079 tcg_out_call_int(s, tb_ret_addr, true); 1080 } 1081} 1082 1083static void tcg_out_goto_tb(TCGContext *s, int which) 1084{ 1085 /* 1086 * Ensure that patch area is 8-byte aligned so that an 1087 * atomic write can be used to patch the target address. 1088 */ 1089 if ((uintptr_t)s->code_ptr & 7) { 1090 tcg_out_nop(s); 1091 } 1092 set_jmp_insn_offset(s, which); 1093 /* 1094 * actual branch destination will be patched by 1095 * tb_target_set_jmp_target later 1096 */ 1097 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0); 1098 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1099 set_jmp_reset_offset(s, which); 1100} 1101 1102static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1103 const TCGArg args[TCG_MAX_OP_ARGS], 1104 const int const_args[TCG_MAX_OP_ARGS]) 1105{ 1106 TCGArg a0 = args[0]; 1107 TCGArg a1 = args[1]; 1108 TCGArg a2 = args[2]; 1109 int c2 = const_args[2]; 1110 1111 switch (opc) { 1112 case INDEX_op_mb: 1113 tcg_out_mb(s, a0); 1114 break; 1115 1116 case INDEX_op_goto_ptr: 1117 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1118 break; 1119 1120 case INDEX_op_br: 1121 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1122 0); 1123 tcg_out_opc_b(s, 0); 1124 break; 1125 1126 case INDEX_op_brcond_i32: 1127 case INDEX_op_brcond_i64: 1128 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1129 break; 1130 1131 case INDEX_op_ext8s_i32: 1132 case INDEX_op_ext8s_i64: 1133 tcg_out_ext8s(s, a0, a1); 1134 break; 1135 1136 case INDEX_op_ext8u_i32: 1137 case INDEX_op_ext8u_i64: 1138 tcg_out_ext8u(s, a0, a1); 1139 break; 1140 1141 case INDEX_op_ext16s_i32: 1142 case INDEX_op_ext16s_i64: 1143 tcg_out_ext16s(s, a0, a1); 1144 break; 1145 1146 case INDEX_op_ext16u_i32: 1147 case INDEX_op_ext16u_i64: 1148 tcg_out_ext16u(s, a0, a1); 1149 break; 1150 1151 case INDEX_op_ext32u_i64: 1152 case INDEX_op_extu_i32_i64: 1153 tcg_out_ext32u(s, a0, a1); 1154 break; 1155 1156 case INDEX_op_ext32s_i64: 1157 case INDEX_op_extrl_i64_i32: 1158 case INDEX_op_ext_i32_i64: 1159 tcg_out_ext32s(s, a0, a1); 1160 break; 1161 1162 case INDEX_op_extrh_i64_i32: 1163 tcg_out_opc_srai_d(s, a0, a1, 32); 1164 break; 1165 1166 case INDEX_op_not_i32: 1167 case INDEX_op_not_i64: 1168 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1169 break; 1170 1171 case INDEX_op_nor_i32: 1172 case INDEX_op_nor_i64: 1173 if (c2) { 1174 tcg_out_opc_ori(s, a0, a1, a2); 1175 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1176 } else { 1177 tcg_out_opc_nor(s, a0, a1, a2); 1178 } 1179 break; 1180 1181 case INDEX_op_andc_i32: 1182 case INDEX_op_andc_i64: 1183 if (c2) { 1184 /* guaranteed to fit due to constraint */ 1185 tcg_out_opc_andi(s, a0, a1, ~a2); 1186 } else { 1187 tcg_out_opc_andn(s, a0, a1, a2); 1188 } 1189 break; 1190 1191 case INDEX_op_orc_i32: 1192 case INDEX_op_orc_i64: 1193 if (c2) { 1194 /* guaranteed to fit due to constraint */ 1195 tcg_out_opc_ori(s, a0, a1, ~a2); 1196 } else { 1197 tcg_out_opc_orn(s, a0, a1, a2); 1198 } 1199 break; 1200 1201 case INDEX_op_and_i32: 1202 case INDEX_op_and_i64: 1203 if (c2) { 1204 tcg_out_opc_andi(s, a0, a1, a2); 1205 } else { 1206 tcg_out_opc_and(s, a0, a1, a2); 1207 } 1208 break; 1209 1210 case INDEX_op_or_i32: 1211 case INDEX_op_or_i64: 1212 if (c2) { 1213 tcg_out_opc_ori(s, a0, a1, a2); 1214 } else { 1215 tcg_out_opc_or(s, a0, a1, a2); 1216 } 1217 break; 1218 1219 case INDEX_op_xor_i32: 1220 case INDEX_op_xor_i64: 1221 if (c2) { 1222 tcg_out_opc_xori(s, a0, a1, a2); 1223 } else { 1224 tcg_out_opc_xor(s, a0, a1, a2); 1225 } 1226 break; 1227 1228 case INDEX_op_extract_i32: 1229 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1230 break; 1231 case INDEX_op_extract_i64: 1232 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1233 break; 1234 1235 case INDEX_op_deposit_i32: 1236 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1237 break; 1238 case INDEX_op_deposit_i64: 1239 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1240 break; 1241 1242 case INDEX_op_bswap16_i32: 1243 case INDEX_op_bswap16_i64: 1244 tcg_out_opc_revb_2h(s, a0, a1); 1245 if (a2 & TCG_BSWAP_OS) { 1246 tcg_out_ext16s(s, a0, a0); 1247 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1248 tcg_out_ext16u(s, a0, a0); 1249 } 1250 break; 1251 1252 case INDEX_op_bswap32_i32: 1253 /* All 32-bit values are computed sign-extended in the register. */ 1254 a2 = TCG_BSWAP_OS; 1255 /* fallthrough */ 1256 case INDEX_op_bswap32_i64: 1257 tcg_out_opc_revb_2w(s, a0, a1); 1258 if (a2 & TCG_BSWAP_OS) { 1259 tcg_out_ext32s(s, a0, a0); 1260 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1261 tcg_out_ext32u(s, a0, a0); 1262 } 1263 break; 1264 1265 case INDEX_op_bswap64_i64: 1266 tcg_out_opc_revb_d(s, a0, a1); 1267 break; 1268 1269 case INDEX_op_clz_i32: 1270 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1271 break; 1272 case INDEX_op_clz_i64: 1273 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1274 break; 1275 1276 case INDEX_op_ctz_i32: 1277 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1278 break; 1279 case INDEX_op_ctz_i64: 1280 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1281 break; 1282 1283 case INDEX_op_shl_i32: 1284 if (c2) { 1285 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1286 } else { 1287 tcg_out_opc_sll_w(s, a0, a1, a2); 1288 } 1289 break; 1290 case INDEX_op_shl_i64: 1291 if (c2) { 1292 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1293 } else { 1294 tcg_out_opc_sll_d(s, a0, a1, a2); 1295 } 1296 break; 1297 1298 case INDEX_op_shr_i32: 1299 if (c2) { 1300 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1301 } else { 1302 tcg_out_opc_srl_w(s, a0, a1, a2); 1303 } 1304 break; 1305 case INDEX_op_shr_i64: 1306 if (c2) { 1307 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1308 } else { 1309 tcg_out_opc_srl_d(s, a0, a1, a2); 1310 } 1311 break; 1312 1313 case INDEX_op_sar_i32: 1314 if (c2) { 1315 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1316 } else { 1317 tcg_out_opc_sra_w(s, a0, a1, a2); 1318 } 1319 break; 1320 case INDEX_op_sar_i64: 1321 if (c2) { 1322 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1323 } else { 1324 tcg_out_opc_sra_d(s, a0, a1, a2); 1325 } 1326 break; 1327 1328 case INDEX_op_rotl_i32: 1329 /* transform into equivalent rotr/rotri */ 1330 if (c2) { 1331 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1332 } else { 1333 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1334 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1335 } 1336 break; 1337 case INDEX_op_rotl_i64: 1338 /* transform into equivalent rotr/rotri */ 1339 if (c2) { 1340 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1341 } else { 1342 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1343 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1344 } 1345 break; 1346 1347 case INDEX_op_rotr_i32: 1348 if (c2) { 1349 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1350 } else { 1351 tcg_out_opc_rotr_w(s, a0, a1, a2); 1352 } 1353 break; 1354 case INDEX_op_rotr_i64: 1355 if (c2) { 1356 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1357 } else { 1358 tcg_out_opc_rotr_d(s, a0, a1, a2); 1359 } 1360 break; 1361 1362 case INDEX_op_add_i32: 1363 if (c2) { 1364 tcg_out_opc_addi_w(s, a0, a1, a2); 1365 } else { 1366 tcg_out_opc_add_w(s, a0, a1, a2); 1367 } 1368 break; 1369 case INDEX_op_add_i64: 1370 if (c2) { 1371 tcg_out_opc_addi_d(s, a0, a1, a2); 1372 } else { 1373 tcg_out_opc_add_d(s, a0, a1, a2); 1374 } 1375 break; 1376 1377 case INDEX_op_sub_i32: 1378 if (c2) { 1379 tcg_out_opc_addi_w(s, a0, a1, -a2); 1380 } else { 1381 tcg_out_opc_sub_w(s, a0, a1, a2); 1382 } 1383 break; 1384 case INDEX_op_sub_i64: 1385 if (c2) { 1386 tcg_out_opc_addi_d(s, a0, a1, -a2); 1387 } else { 1388 tcg_out_opc_sub_d(s, a0, a1, a2); 1389 } 1390 break; 1391 1392 case INDEX_op_mul_i32: 1393 tcg_out_opc_mul_w(s, a0, a1, a2); 1394 break; 1395 case INDEX_op_mul_i64: 1396 tcg_out_opc_mul_d(s, a0, a1, a2); 1397 break; 1398 1399 case INDEX_op_mulsh_i32: 1400 tcg_out_opc_mulh_w(s, a0, a1, a2); 1401 break; 1402 case INDEX_op_mulsh_i64: 1403 tcg_out_opc_mulh_d(s, a0, a1, a2); 1404 break; 1405 1406 case INDEX_op_muluh_i32: 1407 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1408 break; 1409 case INDEX_op_muluh_i64: 1410 tcg_out_opc_mulh_du(s, a0, a1, a2); 1411 break; 1412 1413 case INDEX_op_div_i32: 1414 tcg_out_opc_div_w(s, a0, a1, a2); 1415 break; 1416 case INDEX_op_div_i64: 1417 tcg_out_opc_div_d(s, a0, a1, a2); 1418 break; 1419 1420 case INDEX_op_divu_i32: 1421 tcg_out_opc_div_wu(s, a0, a1, a2); 1422 break; 1423 case INDEX_op_divu_i64: 1424 tcg_out_opc_div_du(s, a0, a1, a2); 1425 break; 1426 1427 case INDEX_op_rem_i32: 1428 tcg_out_opc_mod_w(s, a0, a1, a2); 1429 break; 1430 case INDEX_op_rem_i64: 1431 tcg_out_opc_mod_d(s, a0, a1, a2); 1432 break; 1433 1434 case INDEX_op_remu_i32: 1435 tcg_out_opc_mod_wu(s, a0, a1, a2); 1436 break; 1437 case INDEX_op_remu_i64: 1438 tcg_out_opc_mod_du(s, a0, a1, a2); 1439 break; 1440 1441 case INDEX_op_setcond_i32: 1442 case INDEX_op_setcond_i64: 1443 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1444 break; 1445 1446 case INDEX_op_ld8s_i32: 1447 case INDEX_op_ld8s_i64: 1448 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1449 break; 1450 case INDEX_op_ld8u_i32: 1451 case INDEX_op_ld8u_i64: 1452 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1453 break; 1454 case INDEX_op_ld16s_i32: 1455 case INDEX_op_ld16s_i64: 1456 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1457 break; 1458 case INDEX_op_ld16u_i32: 1459 case INDEX_op_ld16u_i64: 1460 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1461 break; 1462 case INDEX_op_ld_i32: 1463 case INDEX_op_ld32s_i64: 1464 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1465 break; 1466 case INDEX_op_ld32u_i64: 1467 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1468 break; 1469 case INDEX_op_ld_i64: 1470 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1471 break; 1472 1473 case INDEX_op_st8_i32: 1474 case INDEX_op_st8_i64: 1475 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1476 break; 1477 case INDEX_op_st16_i32: 1478 case INDEX_op_st16_i64: 1479 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1480 break; 1481 case INDEX_op_st_i32: 1482 case INDEX_op_st32_i64: 1483 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1484 break; 1485 case INDEX_op_st_i64: 1486 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1487 break; 1488 1489 case INDEX_op_qemu_ld_i32: 1490 tcg_out_qemu_ld(s, args, TCG_TYPE_I32); 1491 break; 1492 case INDEX_op_qemu_ld_i64: 1493 tcg_out_qemu_ld(s, args, TCG_TYPE_I64); 1494 break; 1495 case INDEX_op_qemu_st_i32: 1496 tcg_out_qemu_st(s, args); 1497 break; 1498 case INDEX_op_qemu_st_i64: 1499 tcg_out_qemu_st(s, args); 1500 break; 1501 1502 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1503 case INDEX_op_mov_i64: 1504 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1505 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1506 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1507 default: 1508 g_assert_not_reached(); 1509 } 1510} 1511 1512static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1513{ 1514 switch (op) { 1515 case INDEX_op_goto_ptr: 1516 return C_O0_I1(r); 1517 1518 case INDEX_op_st8_i32: 1519 case INDEX_op_st8_i64: 1520 case INDEX_op_st16_i32: 1521 case INDEX_op_st16_i64: 1522 case INDEX_op_st32_i64: 1523 case INDEX_op_st_i32: 1524 case INDEX_op_st_i64: 1525 return C_O0_I2(rZ, r); 1526 1527 case INDEX_op_brcond_i32: 1528 case INDEX_op_brcond_i64: 1529 return C_O0_I2(rZ, rZ); 1530 1531 case INDEX_op_qemu_st_i32: 1532 case INDEX_op_qemu_st_i64: 1533 return C_O0_I2(LZ, L); 1534 1535 case INDEX_op_ext8s_i32: 1536 case INDEX_op_ext8s_i64: 1537 case INDEX_op_ext8u_i32: 1538 case INDEX_op_ext8u_i64: 1539 case INDEX_op_ext16s_i32: 1540 case INDEX_op_ext16s_i64: 1541 case INDEX_op_ext16u_i32: 1542 case INDEX_op_ext16u_i64: 1543 case INDEX_op_ext32s_i64: 1544 case INDEX_op_ext32u_i64: 1545 case INDEX_op_extu_i32_i64: 1546 case INDEX_op_extrl_i64_i32: 1547 case INDEX_op_extrh_i64_i32: 1548 case INDEX_op_ext_i32_i64: 1549 case INDEX_op_not_i32: 1550 case INDEX_op_not_i64: 1551 case INDEX_op_extract_i32: 1552 case INDEX_op_extract_i64: 1553 case INDEX_op_bswap16_i32: 1554 case INDEX_op_bswap16_i64: 1555 case INDEX_op_bswap32_i32: 1556 case INDEX_op_bswap32_i64: 1557 case INDEX_op_bswap64_i64: 1558 case INDEX_op_ld8s_i32: 1559 case INDEX_op_ld8s_i64: 1560 case INDEX_op_ld8u_i32: 1561 case INDEX_op_ld8u_i64: 1562 case INDEX_op_ld16s_i32: 1563 case INDEX_op_ld16s_i64: 1564 case INDEX_op_ld16u_i32: 1565 case INDEX_op_ld16u_i64: 1566 case INDEX_op_ld32s_i64: 1567 case INDEX_op_ld32u_i64: 1568 case INDEX_op_ld_i32: 1569 case INDEX_op_ld_i64: 1570 return C_O1_I1(r, r); 1571 1572 case INDEX_op_qemu_ld_i32: 1573 case INDEX_op_qemu_ld_i64: 1574 return C_O1_I1(r, L); 1575 1576 case INDEX_op_andc_i32: 1577 case INDEX_op_andc_i64: 1578 case INDEX_op_orc_i32: 1579 case INDEX_op_orc_i64: 1580 /* 1581 * LoongArch insns for these ops don't have reg-imm forms, but we 1582 * can express using andi/ori if ~constant satisfies 1583 * TCG_CT_CONST_U12. 1584 */ 1585 return C_O1_I2(r, r, rC); 1586 1587 case INDEX_op_shl_i32: 1588 case INDEX_op_shl_i64: 1589 case INDEX_op_shr_i32: 1590 case INDEX_op_shr_i64: 1591 case INDEX_op_sar_i32: 1592 case INDEX_op_sar_i64: 1593 case INDEX_op_rotl_i32: 1594 case INDEX_op_rotl_i64: 1595 case INDEX_op_rotr_i32: 1596 case INDEX_op_rotr_i64: 1597 return C_O1_I2(r, r, ri); 1598 1599 case INDEX_op_add_i32: 1600 case INDEX_op_add_i64: 1601 return C_O1_I2(r, r, rI); 1602 1603 case INDEX_op_and_i32: 1604 case INDEX_op_and_i64: 1605 case INDEX_op_nor_i32: 1606 case INDEX_op_nor_i64: 1607 case INDEX_op_or_i32: 1608 case INDEX_op_or_i64: 1609 case INDEX_op_xor_i32: 1610 case INDEX_op_xor_i64: 1611 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 1612 return C_O1_I2(r, r, rU); 1613 1614 case INDEX_op_clz_i32: 1615 case INDEX_op_clz_i64: 1616 case INDEX_op_ctz_i32: 1617 case INDEX_op_ctz_i64: 1618 return C_O1_I2(r, r, rW); 1619 1620 case INDEX_op_setcond_i32: 1621 case INDEX_op_setcond_i64: 1622 return C_O1_I2(r, r, rZ); 1623 1624 case INDEX_op_deposit_i32: 1625 case INDEX_op_deposit_i64: 1626 /* Must deposit into the same register as input */ 1627 return C_O1_I2(r, 0, rZ); 1628 1629 case INDEX_op_sub_i32: 1630 case INDEX_op_sub_i64: 1631 return C_O1_I2(r, rZ, rN); 1632 1633 case INDEX_op_mul_i32: 1634 case INDEX_op_mul_i64: 1635 case INDEX_op_mulsh_i32: 1636 case INDEX_op_mulsh_i64: 1637 case INDEX_op_muluh_i32: 1638 case INDEX_op_muluh_i64: 1639 case INDEX_op_div_i32: 1640 case INDEX_op_div_i64: 1641 case INDEX_op_divu_i32: 1642 case INDEX_op_divu_i64: 1643 case INDEX_op_rem_i32: 1644 case INDEX_op_rem_i64: 1645 case INDEX_op_remu_i32: 1646 case INDEX_op_remu_i64: 1647 return C_O1_I2(r, rZ, rZ); 1648 1649 default: 1650 g_assert_not_reached(); 1651 } 1652} 1653 1654static const int tcg_target_callee_save_regs[] = { 1655 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1656 TCG_REG_S1, 1657 TCG_REG_S2, 1658 TCG_REG_S3, 1659 TCG_REG_S4, 1660 TCG_REG_S5, 1661 TCG_REG_S6, 1662 TCG_REG_S7, 1663 TCG_REG_S8, 1664 TCG_REG_S9, 1665 TCG_REG_RA, /* should be last for ABI compliance */ 1666}; 1667 1668/* Stack frame parameters. */ 1669#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1670#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1671#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1672#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1673 + TCG_TARGET_STACK_ALIGN - 1) \ 1674 & -TCG_TARGET_STACK_ALIGN) 1675#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1676 1677/* We're expecting to be able to use an immediate for frame allocation. */ 1678QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1679 1680/* Generate global QEMU prologue and epilogue code */ 1681static void tcg_target_qemu_prologue(TCGContext *s) 1682{ 1683 int i; 1684 1685 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1686 1687 /* TB prologue */ 1688 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1689 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1690 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1691 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1692 } 1693 1694#if !defined(CONFIG_SOFTMMU) 1695 if (USE_GUEST_BASE) { 1696 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1697 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1698 } 1699#endif 1700 1701 /* Call generated code */ 1702 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1703 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1704 1705 /* Return path for goto_ptr. Set return value to 0 */ 1706 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1707 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1708 1709 /* TB epilogue */ 1710 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1711 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1712 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1713 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1714 } 1715 1716 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1717 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 1718} 1719 1720static void tcg_target_init(TCGContext *s) 1721{ 1722 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1723 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1724 1725 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 1726 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1727 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1728 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1729 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1730 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1731 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1732 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1733 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1734 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1735 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1736 1737 s->reserved_regs = 0; 1738 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1739 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1740 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1741 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1742 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1743 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1744 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 1745} 1746 1747typedef struct { 1748 DebugFrameHeader h; 1749 uint8_t fde_def_cfa[4]; 1750 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1751} DebugFrame; 1752 1753#define ELF_HOST_MACHINE EM_LOONGARCH 1754 1755static const DebugFrame debug_frame = { 1756 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1757 .h.cie.id = -1, 1758 .h.cie.version = 1, 1759 .h.cie.code_align = 1, 1760 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1761 .h.cie.return_column = TCG_REG_RA, 1762 1763 /* Total FDE size does not include the "len" member. */ 1764 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1765 1766 .fde_def_cfa = { 1767 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1768 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1769 (FRAME_SIZE >> 7) 1770 }, 1771 .fde_reg_ofs = { 1772 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 1773 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 1774 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 1775 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 1776 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 1777 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 1778 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 1779 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 1780 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 1781 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 1782 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1783 } 1784}; 1785 1786void tcg_register_jit(const void *buf, size_t buf_size) 1787{ 1788 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1789} 1790