1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33#include <asm/hwcap.h> 34 35#ifdef CONFIG_DEBUG_TCG 36static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 37 "zero", 38 "ra", 39 "tp", 40 "sp", 41 "a0", 42 "a1", 43 "a2", 44 "a3", 45 "a4", 46 "a5", 47 "a6", 48 "a7", 49 "t0", 50 "t1", 51 "t2", 52 "t3", 53 "t4", 54 "t5", 55 "t6", 56 "t7", 57 "t8", 58 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 59 "s9", 60 "s0", 61 "s1", 62 "s2", 63 "s3", 64 "s4", 65 "s5", 66 "s6", 67 "s7", 68 "s8", 69 "vr0", 70 "vr1", 71 "vr2", 72 "vr3", 73 "vr4", 74 "vr5", 75 "vr6", 76 "vr7", 77 "vr8", 78 "vr9", 79 "vr10", 80 "vr11", 81 "vr12", 82 "vr13", 83 "vr14", 84 "vr15", 85 "vr16", 86 "vr17", 87 "vr18", 88 "vr19", 89 "vr20", 90 "vr21", 91 "vr22", 92 "vr23", 93 "vr24", 94 "vr25", 95 "vr26", 96 "vr27", 97 "vr28", 98 "vr29", 99 "vr30", 100 "vr31", 101}; 102#endif 103 104static const int tcg_target_reg_alloc_order[] = { 105 /* Registers preserved across calls */ 106 /* TCG_REG_S0 reserved for TCG_AREG0 */ 107 TCG_REG_S1, 108 TCG_REG_S2, 109 TCG_REG_S3, 110 TCG_REG_S4, 111 TCG_REG_S5, 112 TCG_REG_S6, 113 TCG_REG_S7, 114 TCG_REG_S8, 115 TCG_REG_S9, 116 117 /* Registers (potentially) clobbered across calls */ 118 TCG_REG_T0, 119 TCG_REG_T1, 120 TCG_REG_T2, 121 TCG_REG_T3, 122 TCG_REG_T4, 123 TCG_REG_T5, 124 TCG_REG_T6, 125 TCG_REG_T7, 126 TCG_REG_T8, 127 128 /* Argument registers, opposite order of allocation. */ 129 TCG_REG_A7, 130 TCG_REG_A6, 131 TCG_REG_A5, 132 TCG_REG_A4, 133 TCG_REG_A3, 134 TCG_REG_A2, 135 TCG_REG_A1, 136 TCG_REG_A0, 137 138 /* Vector registers */ 139 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 140 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 141 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 142 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 143 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 144 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 145 /* V24 - V31 are caller-saved, and skipped. */ 146}; 147 148static const int tcg_target_call_iarg_regs[] = { 149 TCG_REG_A0, 150 TCG_REG_A1, 151 TCG_REG_A2, 152 TCG_REG_A3, 153 TCG_REG_A4, 154 TCG_REG_A5, 155 TCG_REG_A6, 156 TCG_REG_A7, 157}; 158 159static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 160{ 161 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 162 tcg_debug_assert(slot >= 0 && slot <= 1); 163 return TCG_REG_A0 + slot; 164} 165 166#define TCG_GUEST_BASE_REG TCG_REG_S1 167 168#define TCG_CT_CONST_ZERO 0x100 169#define TCG_CT_CONST_S12 0x200 170#define TCG_CT_CONST_S32 0x400 171#define TCG_CT_CONST_U12 0x800 172#define TCG_CT_CONST_C12 0x1000 173#define TCG_CT_CONST_WSZ 0x2000 174#define TCG_CT_CONST_VCMP 0x4000 175#define TCG_CT_CONST_VADD 0x8000 176 177#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 178#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 179 180static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 181{ 182 return sextract64(val, pos, len); 183} 184 185/* test if a constant matches the constraint */ 186static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) 187{ 188 if (ct & TCG_CT_CONST) { 189 return true; 190 } 191 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 192 return true; 193 } 194 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 195 return true; 196 } 197 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 198 return true; 199 } 200 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 201 return true; 202 } 203 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 204 return true; 205 } 206 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 207 return true; 208 } 209 int64_t vec_val = sextract64(val, 0, 8 << vece); 210 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { 211 return true; 212 } 213 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 214 return true; 215 } 216 return false; 217} 218 219/* 220 * Relocations 221 */ 222 223/* 224 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 225 * complicated; a whopping stack machine is needed to stuff the fields, at 226 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 227 * needed. 228 * 229 * Hence, define our own simpler relocation types. Numbers are chosen as to 230 * not collide with potential future additions to the true ELF relocation 231 * type enum. 232 */ 233 234/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 235#define R_LOONGARCH_BR_SK16 256 236/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 237#define R_LOONGARCH_BR_SD10K16 257 238 239static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 240{ 241 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 242 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 243 244 tcg_debug_assert((offset & 3) == 0); 245 offset >>= 2; 246 if (offset == sextreg(offset, 0, 16)) { 247 *src_rw = deposit64(*src_rw, 10, 16, offset); 248 return true; 249 } 250 251 return false; 252} 253 254static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 255 const tcg_insn_unit *target) 256{ 257 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 258 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 259 260 tcg_debug_assert((offset & 3) == 0); 261 offset >>= 2; 262 if (offset == sextreg(offset, 0, 26)) { 263 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 264 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 265 return true; 266 } 267 268 return false; 269} 270 271static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 272 intptr_t value, intptr_t addend) 273{ 274 tcg_debug_assert(addend == 0); 275 switch (type) { 276 case R_LOONGARCH_BR_SK16: 277 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 278 case R_LOONGARCH_BR_SD10K16: 279 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 280 default: 281 g_assert_not_reached(); 282 } 283} 284 285#include "tcg-insn-defs.c.inc" 286 287/* 288 * TCG intrinsics 289 */ 290 291static void tcg_out_mb(TCGContext *s, TCGArg a0) 292{ 293 /* Baseline LoongArch only has the full barrier, unfortunately. */ 294 tcg_out_opc_dbar(s, 0); 295} 296 297static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 298{ 299 if (ret == arg) { 300 return true; 301 } 302 switch (type) { 303 case TCG_TYPE_I32: 304 case TCG_TYPE_I64: 305 /* 306 * Conventional register-register move used in LoongArch is 307 * `or dst, src, zero`. 308 */ 309 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 310 break; 311 default: 312 g_assert_not_reached(); 313 } 314 return true; 315} 316 317/* Loads a 32-bit immediate into rd, sign-extended. */ 318static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 319{ 320 tcg_target_long lo = sextreg(val, 0, 12); 321 tcg_target_long hi12 = sextreg(val, 12, 20); 322 323 /* Single-instruction cases. */ 324 if (hi12 == 0) { 325 /* val fits in uimm12: ori rd, zero, val */ 326 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 327 return; 328 } 329 if (hi12 == sextreg(lo, 12, 20)) { 330 /* val fits in simm12: addi.w rd, zero, val */ 331 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 332 return; 333 } 334 335 /* High bits must be set; load with lu12i.w + optional ori. */ 336 tcg_out_opc_lu12i_w(s, rd, hi12); 337 if (lo != 0) { 338 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 339 } 340} 341 342static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 343 tcg_target_long val) 344{ 345 /* 346 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 347 * with dedicated instructions for filling the respective bitfields 348 * below: 349 * 350 * 6 5 4 3 351 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 352 * +-----------------------+---------------------------------------+... 353 * | hi52 | hi32 | 354 * +-----------------------+---------------------------------------+... 355 * 3 2 1 356 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 357 * ...+-------------------------------------+-------------------------+ 358 * | hi12 | lo | 359 * ...+-------------------------------------+-------------------------+ 360 * 361 * Check if val belong to one of the several fast cases, before falling 362 * back to the slow path. 363 */ 364 365 intptr_t pc_offset; 366 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 367 tcg_target_long hi12, hi32, hi52; 368 369 /* Value fits in signed i32. */ 370 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 371 tcg_out_movi_i32(s, rd, val); 372 return; 373 } 374 375 /* PC-relative cases. */ 376 pc_offset = tcg_pcrel_diff(s, (void *)val); 377 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 378 /* Single pcaddu2i. */ 379 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 380 return; 381 } 382 383 if (pc_offset == (int32_t)pc_offset) { 384 /* Offset within 32 bits; load with pcalau12i + ori. */ 385 val_lo = sextreg(val, 0, 12); 386 val_hi = val >> 12; 387 pc_hi = (val - pc_offset) >> 12; 388 offset_hi = val_hi - pc_hi; 389 390 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 391 tcg_out_opc_pcalau12i(s, rd, offset_hi); 392 if (val_lo != 0) { 393 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 394 } 395 return; 396 } 397 398 hi12 = sextreg(val, 12, 20); 399 hi32 = sextreg(val, 32, 20); 400 hi52 = sextreg(val, 52, 12); 401 402 /* Single cu52i.d case. */ 403 if ((hi52 != 0) && (ctz64(val) >= 52)) { 404 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 405 return; 406 } 407 408 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 409 tcg_out_movi_i32(s, rd, val); 410 411 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 412 if (hi32 != sextreg(hi12, 20, 20)) { 413 tcg_out_opc_cu32i_d(s, rd, hi32); 414 } 415 416 if (hi52 != sextreg(hi32, 20, 12)) { 417 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 418 } 419} 420 421static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 422 TCGReg rs, tcg_target_long imm) 423{ 424 tcg_target_long lo12 = sextreg(imm, 0, 12); 425 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 426 427 /* 428 * Note that there's a hole in between hi16 and lo12: 429 * 430 * 3 2 1 0 431 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 432 * ...+-------------------------------+-------+-----------------------+ 433 * | hi16 | | lo12 | 434 * ...+-------------------------------+-------+-----------------------+ 435 * 436 * For bits within that hole, it's more efficient to use LU12I and ADD. 437 */ 438 if (imm == (hi16 << 16) + lo12) { 439 if (hi16) { 440 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 441 rs = rd; 442 } 443 if (type == TCG_TYPE_I32) { 444 tcg_out_opc_addi_w(s, rd, rs, lo12); 445 } else if (lo12) { 446 tcg_out_opc_addi_d(s, rd, rs, lo12); 447 } else { 448 tcg_out_mov(s, type, rd, rs); 449 } 450 } else { 451 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 452 if (type == TCG_TYPE_I32) { 453 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 454 } else { 455 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 456 } 457 } 458} 459 460static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 461{ 462 return false; 463} 464 465static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 466 tcg_target_long imm) 467{ 468 /* This function is only used for passing structs by reference. */ 469 g_assert_not_reached(); 470} 471 472static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 473{ 474 tcg_out_opc_andi(s, ret, arg, 0xff); 475} 476 477static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 478{ 479 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 480} 481 482static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 483{ 484 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 485} 486 487static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 488{ 489 tcg_out_opc_sext_b(s, ret, arg); 490} 491 492static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 493{ 494 tcg_out_opc_sext_h(s, ret, arg); 495} 496 497static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 498{ 499 tcg_out_opc_addi_w(s, ret, arg, 0); 500} 501 502static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 503{ 504 if (ret != arg) { 505 tcg_out_ext32s(s, ret, arg); 506 } 507} 508 509static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 510{ 511 tcg_out_ext32u(s, ret, arg); 512} 513 514static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 515{ 516 tcg_out_ext32s(s, ret, arg); 517} 518 519static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 520 TCGReg a0, TCGReg a1, TCGReg a2, 521 bool c2, bool is_32bit) 522{ 523 if (c2) { 524 /* 525 * Fast path: semantics already satisfied due to constraint and 526 * insn behavior, single instruction is enough. 527 */ 528 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 529 /* all clz/ctz insns belong to DJ-format */ 530 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 531 return; 532 } 533 534 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 535 /* a0 = a1 ? REG_TMP0 : a2 */ 536 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 537 tcg_out_opc_masknez(s, a0, a2, a1); 538 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 539} 540 541#define SETCOND_INV TCG_TARGET_NB_REGS 542#define SETCOND_NEZ (SETCOND_INV << 1) 543#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 544 545static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 546 TCGReg arg1, tcg_target_long arg2, bool c2) 547{ 548 int flags = 0; 549 550 switch (cond) { 551 case TCG_COND_EQ: /* -> NE */ 552 case TCG_COND_GE: /* -> LT */ 553 case TCG_COND_GEU: /* -> LTU */ 554 case TCG_COND_GT: /* -> LE */ 555 case TCG_COND_GTU: /* -> LEU */ 556 cond = tcg_invert_cond(cond); 557 flags ^= SETCOND_INV; 558 break; 559 default: 560 break; 561 } 562 563 switch (cond) { 564 case TCG_COND_LE: 565 case TCG_COND_LEU: 566 /* 567 * If we have a constant input, the most efficient way to implement 568 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 569 * We don't need to care for this for LE because the constant input 570 * is still constrained to int32_t, and INT32_MAX+1 is representable 571 * in the 64-bit temporary register. 572 */ 573 if (c2) { 574 if (cond == TCG_COND_LEU) { 575 /* unsigned <= -1 is true */ 576 if (arg2 == -1) { 577 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 578 return ret; 579 } 580 cond = TCG_COND_LTU; 581 } else { 582 cond = TCG_COND_LT; 583 } 584 arg2 += 1; 585 } else { 586 TCGReg tmp = arg2; 587 arg2 = arg1; 588 arg1 = tmp; 589 cond = tcg_swap_cond(cond); /* LE -> GE */ 590 cond = tcg_invert_cond(cond); /* GE -> LT */ 591 flags ^= SETCOND_INV; 592 } 593 break; 594 default: 595 break; 596 } 597 598 switch (cond) { 599 case TCG_COND_NE: 600 flags |= SETCOND_NEZ; 601 if (!c2) { 602 tcg_out_opc_xor(s, ret, arg1, arg2); 603 } else if (arg2 == 0) { 604 ret = arg1; 605 } else if (arg2 >= 0 && arg2 <= 0xfff) { 606 tcg_out_opc_xori(s, ret, arg1, arg2); 607 } else { 608 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 609 } 610 break; 611 612 case TCG_COND_LT: 613 case TCG_COND_LTU: 614 if (c2) { 615 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 616 if (cond == TCG_COND_LT) { 617 tcg_out_opc_slti(s, ret, arg1, arg2); 618 } else { 619 tcg_out_opc_sltui(s, ret, arg1, arg2); 620 } 621 break; 622 } 623 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 624 arg2 = TCG_REG_TMP0; 625 } 626 if (cond == TCG_COND_LT) { 627 tcg_out_opc_slt(s, ret, arg1, arg2); 628 } else { 629 tcg_out_opc_sltu(s, ret, arg1, arg2); 630 } 631 break; 632 633 default: 634 g_assert_not_reached(); 635 break; 636 } 637 638 return ret | flags; 639} 640 641static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 642 TCGReg arg1, tcg_target_long arg2, bool c2) 643{ 644 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 645 646 if (tmpflags != ret) { 647 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 648 649 switch (tmpflags & SETCOND_FLAGS) { 650 case SETCOND_INV: 651 /* Intermediate result is boolean: simply invert. */ 652 tcg_out_opc_xori(s, ret, tmp, 1); 653 break; 654 case SETCOND_NEZ: 655 /* Intermediate result is zero/non-zero: test != 0. */ 656 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 657 break; 658 case SETCOND_NEZ | SETCOND_INV: 659 /* Intermediate result is zero/non-zero: test == 0. */ 660 tcg_out_opc_sltui(s, ret, tmp, 1); 661 break; 662 default: 663 g_assert_not_reached(); 664 } 665 } 666} 667 668static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 669 TCGReg c1, tcg_target_long c2, bool const2, 670 TCGReg v1, TCGReg v2) 671{ 672 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 673 TCGReg t; 674 675 /* Standardize the test below to t != 0. */ 676 if (tmpflags & SETCOND_INV) { 677 t = v1, v1 = v2, v2 = t; 678 } 679 680 t = tmpflags & ~SETCOND_FLAGS; 681 if (v1 == TCG_REG_ZERO) { 682 tcg_out_opc_masknez(s, ret, v2, t); 683 } else if (v2 == TCG_REG_ZERO) { 684 tcg_out_opc_maskeqz(s, ret, v1, t); 685 } else { 686 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 687 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 688 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 689 } 690} 691 692/* 693 * Branch helpers 694 */ 695 696static const struct { 697 LoongArchInsn op; 698 bool swap; 699} tcg_brcond_to_loongarch[] = { 700 [TCG_COND_EQ] = { OPC_BEQ, false }, 701 [TCG_COND_NE] = { OPC_BNE, false }, 702 [TCG_COND_LT] = { OPC_BGT, true }, 703 [TCG_COND_GE] = { OPC_BLE, true }, 704 [TCG_COND_LE] = { OPC_BLE, false }, 705 [TCG_COND_GT] = { OPC_BGT, false }, 706 [TCG_COND_LTU] = { OPC_BGTU, true }, 707 [TCG_COND_GEU] = { OPC_BLEU, true }, 708 [TCG_COND_LEU] = { OPC_BLEU, false }, 709 [TCG_COND_GTU] = { OPC_BGTU, false } 710}; 711 712static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 713 TCGReg arg2, TCGLabel *l) 714{ 715 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 716 717 tcg_debug_assert(op != 0); 718 719 if (tcg_brcond_to_loongarch[cond].swap) { 720 TCGReg t = arg1; 721 arg1 = arg2; 722 arg2 = t; 723 } 724 725 /* all conditional branch insns belong to DJSk16-format */ 726 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 727 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 728} 729 730static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 731{ 732 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 733 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 734 735 tcg_debug_assert((offset & 3) == 0); 736 if (offset == sextreg(offset, 0, 28)) { 737 /* short jump: +/- 256MiB */ 738 if (tail) { 739 tcg_out_opc_b(s, offset >> 2); 740 } else { 741 tcg_out_opc_bl(s, offset >> 2); 742 } 743 } else if (offset == sextreg(offset, 0, 38)) { 744 /* long jump: +/- 256GiB */ 745 tcg_target_long lo = sextreg(offset, 0, 18); 746 tcg_target_long hi = offset - lo; 747 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 748 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 749 } else { 750 /* far jump: 64-bit */ 751 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 752 tcg_target_long hi = (tcg_target_long)arg - lo; 753 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 754 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 755 } 756} 757 758static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 759 const TCGHelperInfo *info) 760{ 761 tcg_out_call_int(s, arg, false); 762} 763 764/* 765 * Load/store helpers 766 */ 767 768static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 769 TCGReg addr, intptr_t offset) 770{ 771 intptr_t imm12 = sextreg(offset, 0, 12); 772 773 if (offset != imm12) { 774 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 775 776 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 777 imm12 = sextreg(diff, 0, 12); 778 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 779 } else { 780 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 781 if (addr != TCG_REG_ZERO) { 782 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 783 } 784 } 785 addr = TCG_REG_TMP2; 786 } 787 788 switch (opc) { 789 case OPC_LD_B: 790 case OPC_LD_BU: 791 case OPC_LD_H: 792 case OPC_LD_HU: 793 case OPC_LD_W: 794 case OPC_LD_WU: 795 case OPC_LD_D: 796 case OPC_ST_B: 797 case OPC_ST_H: 798 case OPC_ST_W: 799 case OPC_ST_D: 800 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 801 break; 802 default: 803 g_assert_not_reached(); 804 } 805} 806 807static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 808 TCGReg arg1, intptr_t arg2) 809{ 810 bool is_32bit = type == TCG_TYPE_I32; 811 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 812} 813 814static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 815 TCGReg arg1, intptr_t arg2) 816{ 817 bool is_32bit = type == TCG_TYPE_I32; 818 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 819} 820 821static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 822 TCGReg base, intptr_t ofs) 823{ 824 if (val == 0) { 825 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 826 return true; 827 } 828 return false; 829} 830 831/* 832 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 833 */ 834 835static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 836{ 837 tcg_out_opc_b(s, 0); 838 return reloc_br_sd10k16(s->code_ptr - 1, target); 839} 840 841static const TCGLdstHelperParam ldst_helper_param = { 842 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 843}; 844 845static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 846{ 847 MemOp opc = get_memop(l->oi); 848 849 /* resolve label address */ 850 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 851 return false; 852 } 853 854 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 855 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 856 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 857 return tcg_out_goto(s, l->raddr); 858} 859 860static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 861{ 862 MemOp opc = get_memop(l->oi); 863 864 /* resolve label address */ 865 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 866 return false; 867 } 868 869 tcg_out_st_helper_args(s, l, &ldst_helper_param); 870 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 871 return tcg_out_goto(s, l->raddr); 872} 873 874typedef struct { 875 TCGReg base; 876 TCGReg index; 877 TCGAtomAlign aa; 878} HostAddress; 879 880bool tcg_target_has_memory_bswap(MemOp memop) 881{ 882 return false; 883} 884 885/* We expect to use a 12-bit negative offset from ENV. */ 886#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 887 888/* 889 * For system-mode, perform the TLB load and compare. 890 * For user-mode, perform any required alignment tests. 891 * In both cases, return a TCGLabelQemuLdst structure if the slow path 892 * is required and fill in @h with the host address for the fast path. 893 */ 894static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 895 TCGReg addr_reg, MemOpIdx oi, 896 bool is_ld) 897{ 898 TCGType addr_type = s->addr_type; 899 TCGLabelQemuLdst *ldst = NULL; 900 MemOp opc = get_memop(oi); 901 MemOp a_bits; 902 903 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 904 a_bits = h->aa.align; 905 906 if (tcg_use_softmmu) { 907 unsigned s_bits = opc & MO_SIZE; 908 int mem_index = get_mmuidx(oi); 909 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 910 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 911 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 912 913 ldst = new_ldst_label(s); 914 ldst->is_ld = is_ld; 915 ldst->oi = oi; 916 ldst->addrlo_reg = addr_reg; 917 918 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 919 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 920 921 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 922 s->page_bits - CPU_TLB_ENTRY_BITS); 923 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 924 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 925 926 /* Load the tlb comparator and the addend. */ 927 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 928 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 929 is_ld ? offsetof(CPUTLBEntry, addr_read) 930 : offsetof(CPUTLBEntry, addr_write)); 931 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 932 offsetof(CPUTLBEntry, addend)); 933 934 /* 935 * For aligned accesses, we check the first byte and include the 936 * alignment bits within the address. For unaligned access, we 937 * check that we don't cross pages using the address of the last 938 * byte of the access. 939 */ 940 if (a_bits < s_bits) { 941 unsigned a_mask = (1u << a_bits) - 1; 942 unsigned s_mask = (1u << s_bits) - 1; 943 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 944 } else { 945 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 946 } 947 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 948 a_bits, s->page_bits - 1); 949 950 /* Compare masked address with the TLB entry. */ 951 ldst->label_ptr[0] = s->code_ptr; 952 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 953 954 h->index = TCG_REG_TMP2; 955 } else { 956 if (a_bits) { 957 ldst = new_ldst_label(s); 958 959 ldst->is_ld = is_ld; 960 ldst->oi = oi; 961 ldst->addrlo_reg = addr_reg; 962 963 /* 964 * Without micro-architecture details, we don't know which of 965 * bstrpick or andi is faster, so use bstrpick as it's not 966 * constrained by imm field width. Not to say alignments >= 2^12 967 * are going to happen any time soon. 968 */ 969 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 970 971 ldst->label_ptr[0] = s->code_ptr; 972 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 973 } 974 975 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 976 } 977 978 if (addr_type == TCG_TYPE_I32) { 979 h->base = TCG_REG_TMP0; 980 tcg_out_ext32u(s, h->base, addr_reg); 981 } else { 982 h->base = addr_reg; 983 } 984 985 return ldst; 986} 987 988static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 989 TCGReg rd, HostAddress h) 990{ 991 /* Byte swapping is left to middle-end expansion. */ 992 tcg_debug_assert((opc & MO_BSWAP) == 0); 993 994 switch (opc & MO_SSIZE) { 995 case MO_UB: 996 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 997 break; 998 case MO_SB: 999 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1000 break; 1001 case MO_UW: 1002 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1003 break; 1004 case MO_SW: 1005 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1006 break; 1007 case MO_UL: 1008 if (type == TCG_TYPE_I64) { 1009 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1010 break; 1011 } 1012 /* fallthrough */ 1013 case MO_SL: 1014 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1015 break; 1016 case MO_UQ: 1017 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1018 break; 1019 default: 1020 g_assert_not_reached(); 1021 } 1022} 1023 1024static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1025 MemOpIdx oi, TCGType data_type) 1026{ 1027 TCGLabelQemuLdst *ldst; 1028 HostAddress h; 1029 1030 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1031 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1032 1033 if (ldst) { 1034 ldst->type = data_type; 1035 ldst->datalo_reg = data_reg; 1036 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1037 } 1038} 1039 1040static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1041 TCGReg rd, HostAddress h) 1042{ 1043 /* Byte swapping is left to middle-end expansion. */ 1044 tcg_debug_assert((opc & MO_BSWAP) == 0); 1045 1046 switch (opc & MO_SIZE) { 1047 case MO_8: 1048 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1049 break; 1050 case MO_16: 1051 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1052 break; 1053 case MO_32: 1054 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1055 break; 1056 case MO_64: 1057 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1058 break; 1059 default: 1060 g_assert_not_reached(); 1061 } 1062} 1063 1064static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1065 MemOpIdx oi, TCGType data_type) 1066{ 1067 TCGLabelQemuLdst *ldst; 1068 HostAddress h; 1069 1070 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1071 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1072 1073 if (ldst) { 1074 ldst->type = data_type; 1075 ldst->datalo_reg = data_reg; 1076 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1077 } 1078} 1079 1080static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1081 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1082{ 1083 TCGLabelQemuLdst *ldst; 1084 HostAddress h; 1085 1086 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1087 1088 if (h.aa.atom == MO_128) { 1089 /* 1090 * Use VLDX/VSTX when 128-bit atomicity is required. 1091 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1092 */ 1093 if (is_ld) { 1094 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1095 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1096 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1097 } else { 1098 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1099 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1100 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1101 } 1102 } else { 1103 /* Otherwise use a pair of LD/ST. */ 1104 TCGReg base = h.base; 1105 if (h.index != TCG_REG_ZERO) { 1106 base = TCG_REG_TMP0; 1107 tcg_out_opc_add_d(s, base, h.base, h.index); 1108 } 1109 if (is_ld) { 1110 tcg_debug_assert(base != data_lo); 1111 tcg_out_opc_ld_d(s, data_lo, base, 0); 1112 tcg_out_opc_ld_d(s, data_hi, base, 8); 1113 } else { 1114 tcg_out_opc_st_d(s, data_lo, base, 0); 1115 tcg_out_opc_st_d(s, data_hi, base, 8); 1116 } 1117 } 1118 1119 if (ldst) { 1120 ldst->type = TCG_TYPE_I128; 1121 ldst->datalo_reg = data_lo; 1122 ldst->datahi_reg = data_hi; 1123 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1124 } 1125} 1126 1127/* 1128 * Entry-points 1129 */ 1130 1131static const tcg_insn_unit *tb_ret_addr; 1132 1133static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1134{ 1135 /* Reuse the zeroing that exists for goto_ptr. */ 1136 if (a0 == 0) { 1137 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1138 } else { 1139 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1140 tcg_out_call_int(s, tb_ret_addr, true); 1141 } 1142} 1143 1144static void tcg_out_goto_tb(TCGContext *s, int which) 1145{ 1146 /* 1147 * Direct branch, or load indirect address, to be patched 1148 * by tb_target_set_jmp_target. Check indirect load offset 1149 * in range early, regardless of direct branch distance, 1150 * via assert within tcg_out_opc_pcaddu2i. 1151 */ 1152 uintptr_t i_addr = get_jmp_target_addr(s, which); 1153 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1154 1155 set_jmp_insn_offset(s, which); 1156 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1157 1158 /* Finish the load and indirect branch. */ 1159 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1160 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1161 set_jmp_reset_offset(s, which); 1162} 1163 1164void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1165 uintptr_t jmp_rx, uintptr_t jmp_rw) 1166{ 1167 uintptr_t d_addr = tb->jmp_target_addr[n]; 1168 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1169 tcg_insn_unit insn; 1170 1171 /* Either directly branch, or load slot address for indirect branch. */ 1172 if (d_disp == sextreg(d_disp, 0, 26)) { 1173 insn = encode_sd10k16_insn(OPC_B, d_disp); 1174 } else { 1175 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1176 intptr_t i_disp = i_addr - jmp_rx; 1177 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1178 } 1179 1180 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1181 flush_idcache_range(jmp_rx, jmp_rw, 4); 1182} 1183 1184static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1185 const TCGArg args[TCG_MAX_OP_ARGS], 1186 const int const_args[TCG_MAX_OP_ARGS]) 1187{ 1188 TCGArg a0 = args[0]; 1189 TCGArg a1 = args[1]; 1190 TCGArg a2 = args[2]; 1191 TCGArg a3 = args[3]; 1192 int c2 = const_args[2]; 1193 1194 switch (opc) { 1195 case INDEX_op_mb: 1196 tcg_out_mb(s, a0); 1197 break; 1198 1199 case INDEX_op_goto_ptr: 1200 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1201 break; 1202 1203 case INDEX_op_br: 1204 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1205 0); 1206 tcg_out_opc_b(s, 0); 1207 break; 1208 1209 case INDEX_op_brcond_i32: 1210 case INDEX_op_brcond_i64: 1211 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1212 break; 1213 1214 case INDEX_op_extrh_i64_i32: 1215 tcg_out_opc_srai_d(s, a0, a1, 32); 1216 break; 1217 1218 case INDEX_op_not_i32: 1219 case INDEX_op_not_i64: 1220 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1221 break; 1222 1223 case INDEX_op_nor_i32: 1224 case INDEX_op_nor_i64: 1225 if (c2) { 1226 tcg_out_opc_ori(s, a0, a1, a2); 1227 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1228 } else { 1229 tcg_out_opc_nor(s, a0, a1, a2); 1230 } 1231 break; 1232 1233 case INDEX_op_andc_i32: 1234 case INDEX_op_andc_i64: 1235 if (c2) { 1236 /* guaranteed to fit due to constraint */ 1237 tcg_out_opc_andi(s, a0, a1, ~a2); 1238 } else { 1239 tcg_out_opc_andn(s, a0, a1, a2); 1240 } 1241 break; 1242 1243 case INDEX_op_orc_i32: 1244 case INDEX_op_orc_i64: 1245 if (c2) { 1246 /* guaranteed to fit due to constraint */ 1247 tcg_out_opc_ori(s, a0, a1, ~a2); 1248 } else { 1249 tcg_out_opc_orn(s, a0, a1, a2); 1250 } 1251 break; 1252 1253 case INDEX_op_and_i32: 1254 case INDEX_op_and_i64: 1255 if (c2) { 1256 tcg_out_opc_andi(s, a0, a1, a2); 1257 } else { 1258 tcg_out_opc_and(s, a0, a1, a2); 1259 } 1260 break; 1261 1262 case INDEX_op_or_i32: 1263 case INDEX_op_or_i64: 1264 if (c2) { 1265 tcg_out_opc_ori(s, a0, a1, a2); 1266 } else { 1267 tcg_out_opc_or(s, a0, a1, a2); 1268 } 1269 break; 1270 1271 case INDEX_op_xor_i32: 1272 case INDEX_op_xor_i64: 1273 if (c2) { 1274 tcg_out_opc_xori(s, a0, a1, a2); 1275 } else { 1276 tcg_out_opc_xor(s, a0, a1, a2); 1277 } 1278 break; 1279 1280 case INDEX_op_extract_i32: 1281 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1282 break; 1283 case INDEX_op_extract_i64: 1284 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1285 break; 1286 1287 case INDEX_op_deposit_i32: 1288 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1289 break; 1290 case INDEX_op_deposit_i64: 1291 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1292 break; 1293 1294 case INDEX_op_bswap16_i32: 1295 case INDEX_op_bswap16_i64: 1296 tcg_out_opc_revb_2h(s, a0, a1); 1297 if (a2 & TCG_BSWAP_OS) { 1298 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1299 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1300 tcg_out_ext16u(s, a0, a0); 1301 } 1302 break; 1303 1304 case INDEX_op_bswap32_i32: 1305 /* All 32-bit values are computed sign-extended in the register. */ 1306 a2 = TCG_BSWAP_OS; 1307 /* fallthrough */ 1308 case INDEX_op_bswap32_i64: 1309 tcg_out_opc_revb_2w(s, a0, a1); 1310 if (a2 & TCG_BSWAP_OS) { 1311 tcg_out_ext32s(s, a0, a0); 1312 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1313 tcg_out_ext32u(s, a0, a0); 1314 } 1315 break; 1316 1317 case INDEX_op_bswap64_i64: 1318 tcg_out_opc_revb_d(s, a0, a1); 1319 break; 1320 1321 case INDEX_op_clz_i32: 1322 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1323 break; 1324 case INDEX_op_clz_i64: 1325 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1326 break; 1327 1328 case INDEX_op_ctz_i32: 1329 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1330 break; 1331 case INDEX_op_ctz_i64: 1332 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1333 break; 1334 1335 case INDEX_op_shl_i32: 1336 if (c2) { 1337 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1338 } else { 1339 tcg_out_opc_sll_w(s, a0, a1, a2); 1340 } 1341 break; 1342 case INDEX_op_shl_i64: 1343 if (c2) { 1344 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1345 } else { 1346 tcg_out_opc_sll_d(s, a0, a1, a2); 1347 } 1348 break; 1349 1350 case INDEX_op_shr_i32: 1351 if (c2) { 1352 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1353 } else { 1354 tcg_out_opc_srl_w(s, a0, a1, a2); 1355 } 1356 break; 1357 case INDEX_op_shr_i64: 1358 if (c2) { 1359 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1360 } else { 1361 tcg_out_opc_srl_d(s, a0, a1, a2); 1362 } 1363 break; 1364 1365 case INDEX_op_sar_i32: 1366 if (c2) { 1367 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1368 } else { 1369 tcg_out_opc_sra_w(s, a0, a1, a2); 1370 } 1371 break; 1372 case INDEX_op_sar_i64: 1373 if (c2) { 1374 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1375 } else { 1376 tcg_out_opc_sra_d(s, a0, a1, a2); 1377 } 1378 break; 1379 1380 case INDEX_op_rotl_i32: 1381 /* transform into equivalent rotr/rotri */ 1382 if (c2) { 1383 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1384 } else { 1385 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1386 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1387 } 1388 break; 1389 case INDEX_op_rotl_i64: 1390 /* transform into equivalent rotr/rotri */ 1391 if (c2) { 1392 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1393 } else { 1394 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1395 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1396 } 1397 break; 1398 1399 case INDEX_op_rotr_i32: 1400 if (c2) { 1401 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1402 } else { 1403 tcg_out_opc_rotr_w(s, a0, a1, a2); 1404 } 1405 break; 1406 case INDEX_op_rotr_i64: 1407 if (c2) { 1408 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1409 } else { 1410 tcg_out_opc_rotr_d(s, a0, a1, a2); 1411 } 1412 break; 1413 1414 case INDEX_op_add_i32: 1415 if (c2) { 1416 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1417 } else { 1418 tcg_out_opc_add_w(s, a0, a1, a2); 1419 } 1420 break; 1421 case INDEX_op_add_i64: 1422 if (c2) { 1423 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1424 } else { 1425 tcg_out_opc_add_d(s, a0, a1, a2); 1426 } 1427 break; 1428 1429 case INDEX_op_sub_i32: 1430 if (c2) { 1431 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1432 } else { 1433 tcg_out_opc_sub_w(s, a0, a1, a2); 1434 } 1435 break; 1436 case INDEX_op_sub_i64: 1437 if (c2) { 1438 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1439 } else { 1440 tcg_out_opc_sub_d(s, a0, a1, a2); 1441 } 1442 break; 1443 1444 case INDEX_op_mul_i32: 1445 tcg_out_opc_mul_w(s, a0, a1, a2); 1446 break; 1447 case INDEX_op_mul_i64: 1448 tcg_out_opc_mul_d(s, a0, a1, a2); 1449 break; 1450 1451 case INDEX_op_mulsh_i32: 1452 tcg_out_opc_mulh_w(s, a0, a1, a2); 1453 break; 1454 case INDEX_op_mulsh_i64: 1455 tcg_out_opc_mulh_d(s, a0, a1, a2); 1456 break; 1457 1458 case INDEX_op_muluh_i32: 1459 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1460 break; 1461 case INDEX_op_muluh_i64: 1462 tcg_out_opc_mulh_du(s, a0, a1, a2); 1463 break; 1464 1465 case INDEX_op_div_i32: 1466 tcg_out_opc_div_w(s, a0, a1, a2); 1467 break; 1468 case INDEX_op_div_i64: 1469 tcg_out_opc_div_d(s, a0, a1, a2); 1470 break; 1471 1472 case INDEX_op_divu_i32: 1473 tcg_out_opc_div_wu(s, a0, a1, a2); 1474 break; 1475 case INDEX_op_divu_i64: 1476 tcg_out_opc_div_du(s, a0, a1, a2); 1477 break; 1478 1479 case INDEX_op_rem_i32: 1480 tcg_out_opc_mod_w(s, a0, a1, a2); 1481 break; 1482 case INDEX_op_rem_i64: 1483 tcg_out_opc_mod_d(s, a0, a1, a2); 1484 break; 1485 1486 case INDEX_op_remu_i32: 1487 tcg_out_opc_mod_wu(s, a0, a1, a2); 1488 break; 1489 case INDEX_op_remu_i64: 1490 tcg_out_opc_mod_du(s, a0, a1, a2); 1491 break; 1492 1493 case INDEX_op_setcond_i32: 1494 case INDEX_op_setcond_i64: 1495 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1496 break; 1497 1498 case INDEX_op_movcond_i32: 1499 case INDEX_op_movcond_i64: 1500 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1501 break; 1502 1503 case INDEX_op_ld8s_i32: 1504 case INDEX_op_ld8s_i64: 1505 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1506 break; 1507 case INDEX_op_ld8u_i32: 1508 case INDEX_op_ld8u_i64: 1509 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1510 break; 1511 case INDEX_op_ld16s_i32: 1512 case INDEX_op_ld16s_i64: 1513 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1514 break; 1515 case INDEX_op_ld16u_i32: 1516 case INDEX_op_ld16u_i64: 1517 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1518 break; 1519 case INDEX_op_ld_i32: 1520 case INDEX_op_ld32s_i64: 1521 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1522 break; 1523 case INDEX_op_ld32u_i64: 1524 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1525 break; 1526 case INDEX_op_ld_i64: 1527 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1528 break; 1529 1530 case INDEX_op_st8_i32: 1531 case INDEX_op_st8_i64: 1532 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1533 break; 1534 case INDEX_op_st16_i32: 1535 case INDEX_op_st16_i64: 1536 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1537 break; 1538 case INDEX_op_st_i32: 1539 case INDEX_op_st32_i64: 1540 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1541 break; 1542 case INDEX_op_st_i64: 1543 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1544 break; 1545 1546 case INDEX_op_qemu_ld_a32_i32: 1547 case INDEX_op_qemu_ld_a64_i32: 1548 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1549 break; 1550 case INDEX_op_qemu_ld_a32_i64: 1551 case INDEX_op_qemu_ld_a64_i64: 1552 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1553 break; 1554 case INDEX_op_qemu_ld_a32_i128: 1555 case INDEX_op_qemu_ld_a64_i128: 1556 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1557 break; 1558 case INDEX_op_qemu_st_a32_i32: 1559 case INDEX_op_qemu_st_a64_i32: 1560 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1561 break; 1562 case INDEX_op_qemu_st_a32_i64: 1563 case INDEX_op_qemu_st_a64_i64: 1564 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1565 break; 1566 case INDEX_op_qemu_st_a32_i128: 1567 case INDEX_op_qemu_st_a64_i128: 1568 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1569 break; 1570 1571 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1572 case INDEX_op_mov_i64: 1573 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1574 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1575 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1576 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1577 case INDEX_op_ext8s_i64: 1578 case INDEX_op_ext8u_i32: 1579 case INDEX_op_ext8u_i64: 1580 case INDEX_op_ext16s_i32: 1581 case INDEX_op_ext16s_i64: 1582 case INDEX_op_ext16u_i32: 1583 case INDEX_op_ext16u_i64: 1584 case INDEX_op_ext32s_i64: 1585 case INDEX_op_ext32u_i64: 1586 case INDEX_op_ext_i32_i64: 1587 case INDEX_op_extu_i32_i64: 1588 case INDEX_op_extrl_i64_i32: 1589 default: 1590 g_assert_not_reached(); 1591 } 1592} 1593 1594static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1595 TCGReg rd, TCGReg rs) 1596{ 1597 switch (vece) { 1598 case MO_8: 1599 tcg_out_opc_vreplgr2vr_b(s, rd, rs); 1600 break; 1601 case MO_16: 1602 tcg_out_opc_vreplgr2vr_h(s, rd, rs); 1603 break; 1604 case MO_32: 1605 tcg_out_opc_vreplgr2vr_w(s, rd, rs); 1606 break; 1607 case MO_64: 1608 tcg_out_opc_vreplgr2vr_d(s, rd, rs); 1609 break; 1610 default: 1611 g_assert_not_reached(); 1612 } 1613 return true; 1614} 1615 1616static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1617 TCGReg r, TCGReg base, intptr_t offset) 1618{ 1619 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */ 1620 if (offset < -0x800 || offset > 0x7ff || \ 1621 (offset & ((1 << vece) - 1)) != 0) { 1622 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1623 base = TCG_REG_TMP0; 1624 offset = 0; 1625 } 1626 offset >>= vece; 1627 1628 switch (vece) { 1629 case MO_8: 1630 tcg_out_opc_vldrepl_b(s, r, base, offset); 1631 break; 1632 case MO_16: 1633 tcg_out_opc_vldrepl_h(s, r, base, offset); 1634 break; 1635 case MO_32: 1636 tcg_out_opc_vldrepl_w(s, r, base, offset); 1637 break; 1638 case MO_64: 1639 tcg_out_opc_vldrepl_d(s, r, base, offset); 1640 break; 1641 default: 1642 g_assert_not_reached(); 1643 } 1644 return true; 1645} 1646 1647static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1648 TCGReg rd, int64_t v64) 1649{ 1650 /* Try vldi if imm can fit */ 1651 int64_t value = sextract64(v64, 0, 8 << vece); 1652 if (-0x200 <= value && value <= 0x1FF) { 1653 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1654 tcg_out_opc_vldi(s, rd, imm); 1655 return; 1656 } 1657 1658 /* TODO: vldi patterns when imm 12 is set */ 1659 1660 /* Fallback to vreplgr2vr */ 1661 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1662 switch (vece) { 1663 case MO_8: 1664 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0); 1665 break; 1666 case MO_16: 1667 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0); 1668 break; 1669 case MO_32: 1670 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0); 1671 break; 1672 case MO_64: 1673 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0); 1674 break; 1675 default: 1676 g_assert_not_reached(); 1677 } 1678} 1679 1680static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0, 1681 const TCGArg a1, const TCGArg a2, 1682 bool a2_is_const, bool is_add) 1683{ 1684 static const LoongArchInsn add_vec_insn[4] = { 1685 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D 1686 }; 1687 static const LoongArchInsn add_vec_imm_insn[4] = { 1688 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU 1689 }; 1690 static const LoongArchInsn sub_vec_insn[4] = { 1691 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D 1692 }; 1693 static const LoongArchInsn sub_vec_imm_insn[4] = { 1694 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU 1695 }; 1696 1697 if (a2_is_const) { 1698 int64_t value = sextract64(a2, 0, 8 << vece); 1699 if (!is_add) { 1700 value = -value; 1701 } 1702 1703 /* Try vaddi/vsubi */ 1704 if (0 <= value && value <= 0x1f) { 1705 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ 1706 a1, value)); 1707 return; 1708 } else if (-0x1f <= value && value < 0) { 1709 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ 1710 a1, -value)); 1711 return; 1712 } 1713 1714 /* constraint TCG_CT_CONST_VADD ensures unreachable */ 1715 g_assert_not_reached(); 1716 } 1717 1718 if (is_add) { 1719 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); 1720 } else { 1721 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); 1722 } 1723} 1724 1725static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1726 unsigned vecl, unsigned vece, 1727 const TCGArg args[TCG_MAX_OP_ARGS], 1728 const int const_args[TCG_MAX_OP_ARGS]) 1729{ 1730 TCGType type = vecl + TCG_TYPE_V64; 1731 TCGArg a0, a1, a2, a3; 1732 TCGReg temp = TCG_REG_TMP0; 1733 TCGReg temp_vec = TCG_VEC_TMP0; 1734 1735 static const LoongArchInsn cmp_vec_insn[16][4] = { 1736 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D}, 1737 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D}, 1738 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU}, 1739 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D}, 1740 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU}, 1741 }; 1742 static const LoongArchInsn cmp_vec_imm_insn[16][4] = { 1743 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D}, 1744 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D}, 1745 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU}, 1746 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D}, 1747 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, 1748 }; 1749 LoongArchInsn insn; 1750 static const LoongArchInsn neg_vec_insn[4] = { 1751 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D 1752 }; 1753 static const LoongArchInsn mul_vec_insn[4] = { 1754 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D 1755 }; 1756 static const LoongArchInsn smin_vec_insn[4] = { 1757 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D 1758 }; 1759 static const LoongArchInsn umin_vec_insn[4] = { 1760 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU 1761 }; 1762 static const LoongArchInsn smax_vec_insn[4] = { 1763 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D 1764 }; 1765 static const LoongArchInsn umax_vec_insn[4] = { 1766 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU 1767 }; 1768 static const LoongArchInsn ssadd_vec_insn[4] = { 1769 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D 1770 }; 1771 static const LoongArchInsn usadd_vec_insn[4] = { 1772 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU 1773 }; 1774 static const LoongArchInsn sssub_vec_insn[4] = { 1775 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D 1776 }; 1777 static const LoongArchInsn ussub_vec_insn[4] = { 1778 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU 1779 }; 1780 static const LoongArchInsn shlv_vec_insn[4] = { 1781 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D 1782 }; 1783 static const LoongArchInsn shrv_vec_insn[4] = { 1784 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D 1785 }; 1786 static const LoongArchInsn sarv_vec_insn[4] = { 1787 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D 1788 }; 1789 static const LoongArchInsn shli_vec_insn[4] = { 1790 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D 1791 }; 1792 static const LoongArchInsn shri_vec_insn[4] = { 1793 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D 1794 }; 1795 static const LoongArchInsn sari_vec_insn[4] = { 1796 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D 1797 }; 1798 static const LoongArchInsn rotrv_vec_insn[4] = { 1799 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D 1800 }; 1801 1802 a0 = args[0]; 1803 a1 = args[1]; 1804 a2 = args[2]; 1805 a3 = args[3]; 1806 1807 /* Currently only supports V128 */ 1808 tcg_debug_assert(type == TCG_TYPE_V128); 1809 1810 switch (opc) { 1811 case INDEX_op_st_vec: 1812 /* Try to fit vst imm */ 1813 if (-0x800 <= a2 && a2 <= 0x7ff) { 1814 tcg_out_opc_vst(s, a0, a1, a2); 1815 } else { 1816 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1817 tcg_out_opc_vstx(s, a0, a1, temp); 1818 } 1819 break; 1820 case INDEX_op_ld_vec: 1821 /* Try to fit vld imm */ 1822 if (-0x800 <= a2 && a2 <= 0x7ff) { 1823 tcg_out_opc_vld(s, a0, a1, a2); 1824 } else { 1825 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1826 tcg_out_opc_vldx(s, a0, a1, temp); 1827 } 1828 break; 1829 case INDEX_op_and_vec: 1830 tcg_out_opc_vand_v(s, a0, a1, a2); 1831 break; 1832 case INDEX_op_andc_vec: 1833 /* 1834 * vandn vd, vj, vk: vd = vk & ~vj 1835 * andc_vec vd, vj, vk: vd = vj & ~vk 1836 * vk and vk are swapped 1837 */ 1838 tcg_out_opc_vandn_v(s, a0, a2, a1); 1839 break; 1840 case INDEX_op_or_vec: 1841 tcg_out_opc_vor_v(s, a0, a1, a2); 1842 break; 1843 case INDEX_op_orc_vec: 1844 tcg_out_opc_vorn_v(s, a0, a1, a2); 1845 break; 1846 case INDEX_op_xor_vec: 1847 tcg_out_opc_vxor_v(s, a0, a1, a2); 1848 break; 1849 case INDEX_op_nor_vec: 1850 tcg_out_opc_vnor_v(s, a0, a1, a2); 1851 break; 1852 case INDEX_op_not_vec: 1853 tcg_out_opc_vnor_v(s, a0, a1, a1); 1854 break; 1855 case INDEX_op_cmp_vec: 1856 { 1857 TCGCond cond = args[3]; 1858 if (const_args[2]) { 1859 /* 1860 * cmp_vec dest, src, value 1861 * Try vseqi/vslei/vslti 1862 */ 1863 int64_t value = sextract64(a2, 0, 8 << vece); 1864 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \ 1865 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) { 1866 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \ 1867 a0, a1, value)); 1868 break; 1869 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) && 1870 (0x00 <= value && value <= 0x1f)) { 1871 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \ 1872 a0, a1, value)); 1873 break; 1874 } 1875 1876 /* 1877 * Fallback to: 1878 * dupi_vec temp, a2 1879 * cmp_vec a0, a1, temp, cond 1880 */ 1881 tcg_out_dupi_vec(s, type, vece, temp_vec, a2); 1882 a2 = temp_vec; 1883 } 1884 1885 insn = cmp_vec_insn[cond][vece]; 1886 if (insn == 0) { 1887 TCGArg t; 1888 t = a1, a1 = a2, a2 = t; 1889 cond = tcg_swap_cond(cond); 1890 insn = cmp_vec_insn[cond][vece]; 1891 tcg_debug_assert(insn != 0); 1892 } 1893 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1894 } 1895 break; 1896 case INDEX_op_add_vec: 1897 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true); 1898 break; 1899 case INDEX_op_sub_vec: 1900 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false); 1901 break; 1902 case INDEX_op_neg_vec: 1903 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1)); 1904 break; 1905 case INDEX_op_mul_vec: 1906 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2)); 1907 break; 1908 case INDEX_op_smin_vec: 1909 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2)); 1910 break; 1911 case INDEX_op_smax_vec: 1912 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2)); 1913 break; 1914 case INDEX_op_umin_vec: 1915 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2)); 1916 break; 1917 case INDEX_op_umax_vec: 1918 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2)); 1919 break; 1920 case INDEX_op_ssadd_vec: 1921 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2)); 1922 break; 1923 case INDEX_op_usadd_vec: 1924 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2)); 1925 break; 1926 case INDEX_op_sssub_vec: 1927 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2)); 1928 break; 1929 case INDEX_op_ussub_vec: 1930 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2)); 1931 break; 1932 case INDEX_op_shlv_vec: 1933 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2)); 1934 break; 1935 case INDEX_op_shrv_vec: 1936 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2)); 1937 break; 1938 case INDEX_op_sarv_vec: 1939 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2)); 1940 break; 1941 case INDEX_op_shli_vec: 1942 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2)); 1943 break; 1944 case INDEX_op_shri_vec: 1945 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2)); 1946 break; 1947 case INDEX_op_sari_vec: 1948 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2)); 1949 break; 1950 case INDEX_op_rotrv_vec: 1951 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2)); 1952 break; 1953 case INDEX_op_rotlv_vec: 1954 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 1955 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2)); 1956 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, 1957 temp_vec)); 1958 break; 1959 case INDEX_op_rotli_vec: 1960 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 1961 a2 = extract32(-a2, 0, 3 + vece); 1962 switch (vece) { 1963 case MO_8: 1964 tcg_out_opc_vrotri_b(s, a0, a1, a2); 1965 break; 1966 case MO_16: 1967 tcg_out_opc_vrotri_h(s, a0, a1, a2); 1968 break; 1969 case MO_32: 1970 tcg_out_opc_vrotri_w(s, a0, a1, a2); 1971 break; 1972 case MO_64: 1973 tcg_out_opc_vrotri_d(s, a0, a1, a2); 1974 break; 1975 default: 1976 g_assert_not_reached(); 1977 } 1978 break; 1979 case INDEX_op_bitsel_vec: 1980 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 1981 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 1982 break; 1983 case INDEX_op_dupm_vec: 1984 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 1985 break; 1986 default: 1987 g_assert_not_reached(); 1988 } 1989} 1990 1991int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 1992{ 1993 switch (opc) { 1994 case INDEX_op_ld_vec: 1995 case INDEX_op_st_vec: 1996 case INDEX_op_dup_vec: 1997 case INDEX_op_dupm_vec: 1998 case INDEX_op_cmp_vec: 1999 case INDEX_op_add_vec: 2000 case INDEX_op_sub_vec: 2001 case INDEX_op_and_vec: 2002 case INDEX_op_andc_vec: 2003 case INDEX_op_or_vec: 2004 case INDEX_op_orc_vec: 2005 case INDEX_op_xor_vec: 2006 case INDEX_op_nor_vec: 2007 case INDEX_op_not_vec: 2008 case INDEX_op_neg_vec: 2009 case INDEX_op_mul_vec: 2010 case INDEX_op_smin_vec: 2011 case INDEX_op_smax_vec: 2012 case INDEX_op_umin_vec: 2013 case INDEX_op_umax_vec: 2014 case INDEX_op_ssadd_vec: 2015 case INDEX_op_usadd_vec: 2016 case INDEX_op_sssub_vec: 2017 case INDEX_op_ussub_vec: 2018 case INDEX_op_shlv_vec: 2019 case INDEX_op_shrv_vec: 2020 case INDEX_op_sarv_vec: 2021 case INDEX_op_bitsel_vec: 2022 return 1; 2023 default: 2024 return 0; 2025 } 2026} 2027 2028void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2029 TCGArg a0, ...) 2030{ 2031 g_assert_not_reached(); 2032} 2033 2034static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2035{ 2036 switch (op) { 2037 case INDEX_op_goto_ptr: 2038 return C_O0_I1(r); 2039 2040 case INDEX_op_st8_i32: 2041 case INDEX_op_st8_i64: 2042 case INDEX_op_st16_i32: 2043 case INDEX_op_st16_i64: 2044 case INDEX_op_st32_i64: 2045 case INDEX_op_st_i32: 2046 case INDEX_op_st_i64: 2047 case INDEX_op_qemu_st_a32_i32: 2048 case INDEX_op_qemu_st_a64_i32: 2049 case INDEX_op_qemu_st_a32_i64: 2050 case INDEX_op_qemu_st_a64_i64: 2051 return C_O0_I2(rZ, r); 2052 2053 case INDEX_op_qemu_ld_a32_i128: 2054 case INDEX_op_qemu_ld_a64_i128: 2055 return C_N2_I1(r, r, r); 2056 2057 case INDEX_op_qemu_st_a32_i128: 2058 case INDEX_op_qemu_st_a64_i128: 2059 return C_O0_I3(r, r, r); 2060 2061 case INDEX_op_brcond_i32: 2062 case INDEX_op_brcond_i64: 2063 return C_O0_I2(rZ, rZ); 2064 2065 case INDEX_op_ext8s_i32: 2066 case INDEX_op_ext8s_i64: 2067 case INDEX_op_ext8u_i32: 2068 case INDEX_op_ext8u_i64: 2069 case INDEX_op_ext16s_i32: 2070 case INDEX_op_ext16s_i64: 2071 case INDEX_op_ext16u_i32: 2072 case INDEX_op_ext16u_i64: 2073 case INDEX_op_ext32s_i64: 2074 case INDEX_op_ext32u_i64: 2075 case INDEX_op_extu_i32_i64: 2076 case INDEX_op_extrl_i64_i32: 2077 case INDEX_op_extrh_i64_i32: 2078 case INDEX_op_ext_i32_i64: 2079 case INDEX_op_not_i32: 2080 case INDEX_op_not_i64: 2081 case INDEX_op_extract_i32: 2082 case INDEX_op_extract_i64: 2083 case INDEX_op_bswap16_i32: 2084 case INDEX_op_bswap16_i64: 2085 case INDEX_op_bswap32_i32: 2086 case INDEX_op_bswap32_i64: 2087 case INDEX_op_bswap64_i64: 2088 case INDEX_op_ld8s_i32: 2089 case INDEX_op_ld8s_i64: 2090 case INDEX_op_ld8u_i32: 2091 case INDEX_op_ld8u_i64: 2092 case INDEX_op_ld16s_i32: 2093 case INDEX_op_ld16s_i64: 2094 case INDEX_op_ld16u_i32: 2095 case INDEX_op_ld16u_i64: 2096 case INDEX_op_ld32s_i64: 2097 case INDEX_op_ld32u_i64: 2098 case INDEX_op_ld_i32: 2099 case INDEX_op_ld_i64: 2100 case INDEX_op_qemu_ld_a32_i32: 2101 case INDEX_op_qemu_ld_a64_i32: 2102 case INDEX_op_qemu_ld_a32_i64: 2103 case INDEX_op_qemu_ld_a64_i64: 2104 return C_O1_I1(r, r); 2105 2106 case INDEX_op_andc_i32: 2107 case INDEX_op_andc_i64: 2108 case INDEX_op_orc_i32: 2109 case INDEX_op_orc_i64: 2110 /* 2111 * LoongArch insns for these ops don't have reg-imm forms, but we 2112 * can express using andi/ori if ~constant satisfies 2113 * TCG_CT_CONST_U12. 2114 */ 2115 return C_O1_I2(r, r, rC); 2116 2117 case INDEX_op_shl_i32: 2118 case INDEX_op_shl_i64: 2119 case INDEX_op_shr_i32: 2120 case INDEX_op_shr_i64: 2121 case INDEX_op_sar_i32: 2122 case INDEX_op_sar_i64: 2123 case INDEX_op_rotl_i32: 2124 case INDEX_op_rotl_i64: 2125 case INDEX_op_rotr_i32: 2126 case INDEX_op_rotr_i64: 2127 return C_O1_I2(r, r, ri); 2128 2129 case INDEX_op_add_i32: 2130 return C_O1_I2(r, r, ri); 2131 case INDEX_op_add_i64: 2132 return C_O1_I2(r, r, rJ); 2133 2134 case INDEX_op_and_i32: 2135 case INDEX_op_and_i64: 2136 case INDEX_op_nor_i32: 2137 case INDEX_op_nor_i64: 2138 case INDEX_op_or_i32: 2139 case INDEX_op_or_i64: 2140 case INDEX_op_xor_i32: 2141 case INDEX_op_xor_i64: 2142 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2143 return C_O1_I2(r, r, rU); 2144 2145 case INDEX_op_clz_i32: 2146 case INDEX_op_clz_i64: 2147 case INDEX_op_ctz_i32: 2148 case INDEX_op_ctz_i64: 2149 return C_O1_I2(r, r, rW); 2150 2151 case INDEX_op_deposit_i32: 2152 case INDEX_op_deposit_i64: 2153 /* Must deposit into the same register as input */ 2154 return C_O1_I2(r, 0, rZ); 2155 2156 case INDEX_op_sub_i32: 2157 case INDEX_op_setcond_i32: 2158 return C_O1_I2(r, rZ, ri); 2159 case INDEX_op_sub_i64: 2160 case INDEX_op_setcond_i64: 2161 return C_O1_I2(r, rZ, rJ); 2162 2163 case INDEX_op_mul_i32: 2164 case INDEX_op_mul_i64: 2165 case INDEX_op_mulsh_i32: 2166 case INDEX_op_mulsh_i64: 2167 case INDEX_op_muluh_i32: 2168 case INDEX_op_muluh_i64: 2169 case INDEX_op_div_i32: 2170 case INDEX_op_div_i64: 2171 case INDEX_op_divu_i32: 2172 case INDEX_op_divu_i64: 2173 case INDEX_op_rem_i32: 2174 case INDEX_op_rem_i64: 2175 case INDEX_op_remu_i32: 2176 case INDEX_op_remu_i64: 2177 return C_O1_I2(r, rZ, rZ); 2178 2179 case INDEX_op_movcond_i32: 2180 case INDEX_op_movcond_i64: 2181 return C_O1_I4(r, rZ, rJ, rZ, rZ); 2182 2183 case INDEX_op_ld_vec: 2184 case INDEX_op_dupm_vec: 2185 case INDEX_op_dup_vec: 2186 return C_O1_I1(w, r); 2187 2188 case INDEX_op_st_vec: 2189 return C_O0_I2(w, r); 2190 2191 case INDEX_op_cmp_vec: 2192 return C_O1_I2(w, w, wM); 2193 2194 case INDEX_op_add_vec: 2195 case INDEX_op_sub_vec: 2196 return C_O1_I2(w, w, wA); 2197 2198 case INDEX_op_and_vec: 2199 case INDEX_op_andc_vec: 2200 case INDEX_op_or_vec: 2201 case INDEX_op_orc_vec: 2202 case INDEX_op_xor_vec: 2203 case INDEX_op_nor_vec: 2204 case INDEX_op_mul_vec: 2205 case INDEX_op_smin_vec: 2206 case INDEX_op_smax_vec: 2207 case INDEX_op_umin_vec: 2208 case INDEX_op_umax_vec: 2209 case INDEX_op_ssadd_vec: 2210 case INDEX_op_usadd_vec: 2211 case INDEX_op_sssub_vec: 2212 case INDEX_op_ussub_vec: 2213 case INDEX_op_shlv_vec: 2214 case INDEX_op_shrv_vec: 2215 case INDEX_op_sarv_vec: 2216 case INDEX_op_rotrv_vec: 2217 case INDEX_op_rotlv_vec: 2218 return C_O1_I2(w, w, w); 2219 2220 case INDEX_op_not_vec: 2221 case INDEX_op_neg_vec: 2222 case INDEX_op_shli_vec: 2223 case INDEX_op_shri_vec: 2224 case INDEX_op_sari_vec: 2225 case INDEX_op_rotli_vec: 2226 return C_O1_I1(w, w); 2227 2228 case INDEX_op_bitsel_vec: 2229 return C_O1_I3(w, w, w, w); 2230 2231 default: 2232 g_assert_not_reached(); 2233 } 2234} 2235 2236static const int tcg_target_callee_save_regs[] = { 2237 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2238 TCG_REG_S1, 2239 TCG_REG_S2, 2240 TCG_REG_S3, 2241 TCG_REG_S4, 2242 TCG_REG_S5, 2243 TCG_REG_S6, 2244 TCG_REG_S7, 2245 TCG_REG_S8, 2246 TCG_REG_S9, 2247 TCG_REG_RA, /* should be last for ABI compliance */ 2248}; 2249 2250/* Stack frame parameters. */ 2251#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2252#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2253#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2254#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2255 + TCG_TARGET_STACK_ALIGN - 1) \ 2256 & -TCG_TARGET_STACK_ALIGN) 2257#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2258 2259/* We're expecting to be able to use an immediate for frame allocation. */ 2260QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2261 2262/* Generate global QEMU prologue and epilogue code */ 2263static void tcg_target_qemu_prologue(TCGContext *s) 2264{ 2265 int i; 2266 2267 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2268 2269 /* TB prologue */ 2270 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2271 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2272 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2273 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2274 } 2275 2276 if (!tcg_use_softmmu && guest_base) { 2277 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2278 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2279 } 2280 2281 /* Call generated code */ 2282 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2283 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2284 2285 /* Return path for goto_ptr. Set return value to 0 */ 2286 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2287 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2288 2289 /* TB epilogue */ 2290 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2291 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2292 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2293 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2294 } 2295 2296 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2297 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2298} 2299 2300static void tcg_out_tb_start(TCGContext *s) 2301{ 2302 /* nothing to do */ 2303} 2304 2305static void tcg_target_init(TCGContext *s) 2306{ 2307 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2308 2309 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2310 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2311 error_report("TCG: unaligned access support required; exiting"); 2312 exit(EXIT_FAILURE); 2313 } 2314 2315 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2316 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2317 2318 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 2319 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2320 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2321 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2322 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2323 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2324 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2325 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2326 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2327 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2328 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2329 2330 if (cpuinfo & CPUINFO_LSX) { 2331 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2332 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2333 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2334 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2335 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2336 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2337 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2338 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2339 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2340 } 2341 2342 s->reserved_regs = 0; 2343 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2344 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2345 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2346 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2347 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2348 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2349 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2350 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2351} 2352 2353typedef struct { 2354 DebugFrameHeader h; 2355 uint8_t fde_def_cfa[4]; 2356 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2357} DebugFrame; 2358 2359#define ELF_HOST_MACHINE EM_LOONGARCH 2360 2361static const DebugFrame debug_frame = { 2362 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2363 .h.cie.id = -1, 2364 .h.cie.version = 1, 2365 .h.cie.code_align = 1, 2366 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2367 .h.cie.return_column = TCG_REG_RA, 2368 2369 /* Total FDE size does not include the "len" member. */ 2370 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2371 2372 .fde_def_cfa = { 2373 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2374 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2375 (FRAME_SIZE >> 7) 2376 }, 2377 .fde_reg_ofs = { 2378 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2379 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2380 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2381 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2382 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2383 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2384 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2385 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2386 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2387 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2388 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2389 } 2390}; 2391 2392void tcg_register_jit(const void *buf, size_t buf_size) 2393{ 2394 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2395} 2396