1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33#include <asm/hwcap.h> 34 35bool use_lsx_instructions; 36 37#ifdef CONFIG_DEBUG_TCG 38static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 39 "zero", 40 "ra", 41 "tp", 42 "sp", 43 "a0", 44 "a1", 45 "a2", 46 "a3", 47 "a4", 48 "a5", 49 "a6", 50 "a7", 51 "t0", 52 "t1", 53 "t2", 54 "t3", 55 "t4", 56 "t5", 57 "t6", 58 "t7", 59 "t8", 60 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 61 "s9", 62 "s0", 63 "s1", 64 "s2", 65 "s3", 66 "s4", 67 "s5", 68 "s6", 69 "s7", 70 "s8", 71 "vr0", 72 "vr1", 73 "vr2", 74 "vr3", 75 "vr4", 76 "vr5", 77 "vr6", 78 "vr7", 79 "vr8", 80 "vr9", 81 "vr10", 82 "vr11", 83 "vr12", 84 "vr13", 85 "vr14", 86 "vr15", 87 "vr16", 88 "vr17", 89 "vr18", 90 "vr19", 91 "vr20", 92 "vr21", 93 "vr22", 94 "vr23", 95 "vr24", 96 "vr25", 97 "vr26", 98 "vr27", 99 "vr28", 100 "vr29", 101 "vr30", 102 "vr31", 103}; 104#endif 105 106static const int tcg_target_reg_alloc_order[] = { 107 /* Registers preserved across calls */ 108 /* TCG_REG_S0 reserved for TCG_AREG0 */ 109 TCG_REG_S1, 110 TCG_REG_S2, 111 TCG_REG_S3, 112 TCG_REG_S4, 113 TCG_REG_S5, 114 TCG_REG_S6, 115 TCG_REG_S7, 116 TCG_REG_S8, 117 TCG_REG_S9, 118 119 /* Registers (potentially) clobbered across calls */ 120 TCG_REG_T0, 121 TCG_REG_T1, 122 TCG_REG_T2, 123 TCG_REG_T3, 124 TCG_REG_T4, 125 TCG_REG_T5, 126 TCG_REG_T6, 127 TCG_REG_T7, 128 TCG_REG_T8, 129 130 /* Argument registers, opposite order of allocation. */ 131 TCG_REG_A7, 132 TCG_REG_A6, 133 TCG_REG_A5, 134 TCG_REG_A4, 135 TCG_REG_A3, 136 TCG_REG_A2, 137 TCG_REG_A1, 138 TCG_REG_A0, 139 140 /* Vector registers */ 141 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 142 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 143 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 144 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 145 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 146 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 147 /* V24 - V31 are caller-saved, and skipped. */ 148}; 149 150static const int tcg_target_call_iarg_regs[] = { 151 TCG_REG_A0, 152 TCG_REG_A1, 153 TCG_REG_A2, 154 TCG_REG_A3, 155 TCG_REG_A4, 156 TCG_REG_A5, 157 TCG_REG_A6, 158 TCG_REG_A7, 159}; 160 161static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 162{ 163 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 164 tcg_debug_assert(slot >= 0 && slot <= 1); 165 return TCG_REG_A0 + slot; 166} 167 168#define TCG_GUEST_BASE_REG TCG_REG_S1 169 170#define TCG_CT_CONST_ZERO 0x100 171#define TCG_CT_CONST_S12 0x200 172#define TCG_CT_CONST_S32 0x400 173#define TCG_CT_CONST_U12 0x800 174#define TCG_CT_CONST_C12 0x1000 175#define TCG_CT_CONST_WSZ 0x2000 176#define TCG_CT_CONST_VCMP 0x4000 177#define TCG_CT_CONST_VADD 0x8000 178 179#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 180#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 181 182static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 183{ 184 return sextract64(val, pos, len); 185} 186 187/* test if a constant matches the constraint */ 188static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) 189{ 190 if (ct & TCG_CT_CONST) { 191 return true; 192 } 193 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 194 return true; 195 } 196 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 197 return true; 198 } 199 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 200 return true; 201 } 202 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 203 return true; 204 } 205 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 206 return true; 207 } 208 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 209 return true; 210 } 211 int64_t vec_val = sextract64(val, 0, 8 << vece); 212 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { 213 return true; 214 } 215 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 216 return true; 217 } 218 return false; 219} 220 221/* 222 * Relocations 223 */ 224 225/* 226 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 227 * complicated; a whopping stack machine is needed to stuff the fields, at 228 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 229 * needed. 230 * 231 * Hence, define our own simpler relocation types. Numbers are chosen as to 232 * not collide with potential future additions to the true ELF relocation 233 * type enum. 234 */ 235 236/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 237#define R_LOONGARCH_BR_SK16 256 238/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 239#define R_LOONGARCH_BR_SD10K16 257 240 241static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 242{ 243 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 244 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 245 246 tcg_debug_assert((offset & 3) == 0); 247 offset >>= 2; 248 if (offset == sextreg(offset, 0, 16)) { 249 *src_rw = deposit64(*src_rw, 10, 16, offset); 250 return true; 251 } 252 253 return false; 254} 255 256static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 257 const tcg_insn_unit *target) 258{ 259 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 260 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 261 262 tcg_debug_assert((offset & 3) == 0); 263 offset >>= 2; 264 if (offset == sextreg(offset, 0, 26)) { 265 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 266 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 267 return true; 268 } 269 270 return false; 271} 272 273static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 274 intptr_t value, intptr_t addend) 275{ 276 tcg_debug_assert(addend == 0); 277 switch (type) { 278 case R_LOONGARCH_BR_SK16: 279 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 280 case R_LOONGARCH_BR_SD10K16: 281 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 282 default: 283 g_assert_not_reached(); 284 } 285} 286 287#include "tcg-insn-defs.c.inc" 288 289/* 290 * TCG intrinsics 291 */ 292 293static void tcg_out_mb(TCGContext *s, TCGArg a0) 294{ 295 /* Baseline LoongArch only has the full barrier, unfortunately. */ 296 tcg_out_opc_dbar(s, 0); 297} 298 299static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 300{ 301 if (ret == arg) { 302 return true; 303 } 304 switch (type) { 305 case TCG_TYPE_I32: 306 case TCG_TYPE_I64: 307 /* 308 * Conventional register-register move used in LoongArch is 309 * `or dst, src, zero`. 310 */ 311 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 312 break; 313 default: 314 g_assert_not_reached(); 315 } 316 return true; 317} 318 319/* Loads a 32-bit immediate into rd, sign-extended. */ 320static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 321{ 322 tcg_target_long lo = sextreg(val, 0, 12); 323 tcg_target_long hi12 = sextreg(val, 12, 20); 324 325 /* Single-instruction cases. */ 326 if (hi12 == 0) { 327 /* val fits in uimm12: ori rd, zero, val */ 328 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 329 return; 330 } 331 if (hi12 == sextreg(lo, 12, 20)) { 332 /* val fits in simm12: addi.w rd, zero, val */ 333 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 334 return; 335 } 336 337 /* High bits must be set; load with lu12i.w + optional ori. */ 338 tcg_out_opc_lu12i_w(s, rd, hi12); 339 if (lo != 0) { 340 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 341 } 342} 343 344static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 345 tcg_target_long val) 346{ 347 /* 348 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 349 * with dedicated instructions for filling the respective bitfields 350 * below: 351 * 352 * 6 5 4 3 353 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 354 * +-----------------------+---------------------------------------+... 355 * | hi52 | hi32 | 356 * +-----------------------+---------------------------------------+... 357 * 3 2 1 358 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 359 * ...+-------------------------------------+-------------------------+ 360 * | hi12 | lo | 361 * ...+-------------------------------------+-------------------------+ 362 * 363 * Check if val belong to one of the several fast cases, before falling 364 * back to the slow path. 365 */ 366 367 intptr_t pc_offset; 368 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 369 tcg_target_long hi12, hi32, hi52; 370 371 /* Value fits in signed i32. */ 372 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 373 tcg_out_movi_i32(s, rd, val); 374 return; 375 } 376 377 /* PC-relative cases. */ 378 pc_offset = tcg_pcrel_diff(s, (void *)val); 379 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 380 /* Single pcaddu2i. */ 381 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 382 return; 383 } 384 385 if (pc_offset == (int32_t)pc_offset) { 386 /* Offset within 32 bits; load with pcalau12i + ori. */ 387 val_lo = sextreg(val, 0, 12); 388 val_hi = val >> 12; 389 pc_hi = (val - pc_offset) >> 12; 390 offset_hi = val_hi - pc_hi; 391 392 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 393 tcg_out_opc_pcalau12i(s, rd, offset_hi); 394 if (val_lo != 0) { 395 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 396 } 397 return; 398 } 399 400 hi12 = sextreg(val, 12, 20); 401 hi32 = sextreg(val, 32, 20); 402 hi52 = sextreg(val, 52, 12); 403 404 /* Single cu52i.d case. */ 405 if ((hi52 != 0) && (ctz64(val) >= 52)) { 406 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 407 return; 408 } 409 410 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 411 tcg_out_movi_i32(s, rd, val); 412 413 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 414 if (hi32 != sextreg(hi12, 20, 20)) { 415 tcg_out_opc_cu32i_d(s, rd, hi32); 416 } 417 418 if (hi52 != sextreg(hi32, 20, 12)) { 419 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 420 } 421} 422 423static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 424 TCGReg rs, tcg_target_long imm) 425{ 426 tcg_target_long lo12 = sextreg(imm, 0, 12); 427 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 428 429 /* 430 * Note that there's a hole in between hi16 and lo12: 431 * 432 * 3 2 1 0 433 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 434 * ...+-------------------------------+-------+-----------------------+ 435 * | hi16 | | lo12 | 436 * ...+-------------------------------+-------+-----------------------+ 437 * 438 * For bits within that hole, it's more efficient to use LU12I and ADD. 439 */ 440 if (imm == (hi16 << 16) + lo12) { 441 if (hi16) { 442 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 443 rs = rd; 444 } 445 if (type == TCG_TYPE_I32) { 446 tcg_out_opc_addi_w(s, rd, rs, lo12); 447 } else if (lo12) { 448 tcg_out_opc_addi_d(s, rd, rs, lo12); 449 } else { 450 tcg_out_mov(s, type, rd, rs); 451 } 452 } else { 453 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 454 if (type == TCG_TYPE_I32) { 455 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 456 } else { 457 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 458 } 459 } 460} 461 462static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 463{ 464 return false; 465} 466 467static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 468 tcg_target_long imm) 469{ 470 /* This function is only used for passing structs by reference. */ 471 g_assert_not_reached(); 472} 473 474static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 475{ 476 tcg_out_opc_andi(s, ret, arg, 0xff); 477} 478 479static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 480{ 481 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 482} 483 484static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 485{ 486 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 487} 488 489static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 490{ 491 tcg_out_opc_sext_b(s, ret, arg); 492} 493 494static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 495{ 496 tcg_out_opc_sext_h(s, ret, arg); 497} 498 499static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 500{ 501 tcg_out_opc_addi_w(s, ret, arg, 0); 502} 503 504static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 505{ 506 if (ret != arg) { 507 tcg_out_ext32s(s, ret, arg); 508 } 509} 510 511static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 512{ 513 tcg_out_ext32u(s, ret, arg); 514} 515 516static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 517{ 518 tcg_out_ext32s(s, ret, arg); 519} 520 521static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 522 TCGReg a0, TCGReg a1, TCGReg a2, 523 bool c2, bool is_32bit) 524{ 525 if (c2) { 526 /* 527 * Fast path: semantics already satisfied due to constraint and 528 * insn behavior, single instruction is enough. 529 */ 530 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 531 /* all clz/ctz insns belong to DJ-format */ 532 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 533 return; 534 } 535 536 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 537 /* a0 = a1 ? REG_TMP0 : a2 */ 538 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 539 tcg_out_opc_masknez(s, a0, a2, a1); 540 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 541} 542 543#define SETCOND_INV TCG_TARGET_NB_REGS 544#define SETCOND_NEZ (SETCOND_INV << 1) 545#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 546 547static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 548 TCGReg arg1, tcg_target_long arg2, bool c2) 549{ 550 int flags = 0; 551 552 switch (cond) { 553 case TCG_COND_EQ: /* -> NE */ 554 case TCG_COND_GE: /* -> LT */ 555 case TCG_COND_GEU: /* -> LTU */ 556 case TCG_COND_GT: /* -> LE */ 557 case TCG_COND_GTU: /* -> LEU */ 558 cond = tcg_invert_cond(cond); 559 flags ^= SETCOND_INV; 560 break; 561 default: 562 break; 563 } 564 565 switch (cond) { 566 case TCG_COND_LE: 567 case TCG_COND_LEU: 568 /* 569 * If we have a constant input, the most efficient way to implement 570 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 571 * We don't need to care for this for LE because the constant input 572 * is still constrained to int32_t, and INT32_MAX+1 is representable 573 * in the 64-bit temporary register. 574 */ 575 if (c2) { 576 if (cond == TCG_COND_LEU) { 577 /* unsigned <= -1 is true */ 578 if (arg2 == -1) { 579 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 580 return ret; 581 } 582 cond = TCG_COND_LTU; 583 } else { 584 cond = TCG_COND_LT; 585 } 586 arg2 += 1; 587 } else { 588 TCGReg tmp = arg2; 589 arg2 = arg1; 590 arg1 = tmp; 591 cond = tcg_swap_cond(cond); /* LE -> GE */ 592 cond = tcg_invert_cond(cond); /* GE -> LT */ 593 flags ^= SETCOND_INV; 594 } 595 break; 596 default: 597 break; 598 } 599 600 switch (cond) { 601 case TCG_COND_NE: 602 flags |= SETCOND_NEZ; 603 if (!c2) { 604 tcg_out_opc_xor(s, ret, arg1, arg2); 605 } else if (arg2 == 0) { 606 ret = arg1; 607 } else if (arg2 >= 0 && arg2 <= 0xfff) { 608 tcg_out_opc_xori(s, ret, arg1, arg2); 609 } else { 610 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 611 } 612 break; 613 614 case TCG_COND_LT: 615 case TCG_COND_LTU: 616 if (c2) { 617 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 618 if (cond == TCG_COND_LT) { 619 tcg_out_opc_slti(s, ret, arg1, arg2); 620 } else { 621 tcg_out_opc_sltui(s, ret, arg1, arg2); 622 } 623 break; 624 } 625 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 626 arg2 = TCG_REG_TMP0; 627 } 628 if (cond == TCG_COND_LT) { 629 tcg_out_opc_slt(s, ret, arg1, arg2); 630 } else { 631 tcg_out_opc_sltu(s, ret, arg1, arg2); 632 } 633 break; 634 635 default: 636 g_assert_not_reached(); 637 break; 638 } 639 640 return ret | flags; 641} 642 643static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 644 TCGReg arg1, tcg_target_long arg2, bool c2) 645{ 646 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 647 648 if (tmpflags != ret) { 649 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 650 651 switch (tmpflags & SETCOND_FLAGS) { 652 case SETCOND_INV: 653 /* Intermediate result is boolean: simply invert. */ 654 tcg_out_opc_xori(s, ret, tmp, 1); 655 break; 656 case SETCOND_NEZ: 657 /* Intermediate result is zero/non-zero: test != 0. */ 658 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 659 break; 660 case SETCOND_NEZ | SETCOND_INV: 661 /* Intermediate result is zero/non-zero: test == 0. */ 662 tcg_out_opc_sltui(s, ret, tmp, 1); 663 break; 664 default: 665 g_assert_not_reached(); 666 } 667 } 668} 669 670static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 671 TCGReg c1, tcg_target_long c2, bool const2, 672 TCGReg v1, TCGReg v2) 673{ 674 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 675 TCGReg t; 676 677 /* Standardize the test below to t != 0. */ 678 if (tmpflags & SETCOND_INV) { 679 t = v1, v1 = v2, v2 = t; 680 } 681 682 t = tmpflags & ~SETCOND_FLAGS; 683 if (v1 == TCG_REG_ZERO) { 684 tcg_out_opc_masknez(s, ret, v2, t); 685 } else if (v2 == TCG_REG_ZERO) { 686 tcg_out_opc_maskeqz(s, ret, v1, t); 687 } else { 688 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 689 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 690 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 691 } 692} 693 694/* 695 * Branch helpers 696 */ 697 698static const struct { 699 LoongArchInsn op; 700 bool swap; 701} tcg_brcond_to_loongarch[] = { 702 [TCG_COND_EQ] = { OPC_BEQ, false }, 703 [TCG_COND_NE] = { OPC_BNE, false }, 704 [TCG_COND_LT] = { OPC_BGT, true }, 705 [TCG_COND_GE] = { OPC_BLE, true }, 706 [TCG_COND_LE] = { OPC_BLE, false }, 707 [TCG_COND_GT] = { OPC_BGT, false }, 708 [TCG_COND_LTU] = { OPC_BGTU, true }, 709 [TCG_COND_GEU] = { OPC_BLEU, true }, 710 [TCG_COND_LEU] = { OPC_BLEU, false }, 711 [TCG_COND_GTU] = { OPC_BGTU, false } 712}; 713 714static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 715 TCGReg arg2, TCGLabel *l) 716{ 717 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 718 719 tcg_debug_assert(op != 0); 720 721 if (tcg_brcond_to_loongarch[cond].swap) { 722 TCGReg t = arg1; 723 arg1 = arg2; 724 arg2 = t; 725 } 726 727 /* all conditional branch insns belong to DJSk16-format */ 728 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 729 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 730} 731 732static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 733{ 734 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 735 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 736 737 tcg_debug_assert((offset & 3) == 0); 738 if (offset == sextreg(offset, 0, 28)) { 739 /* short jump: +/- 256MiB */ 740 if (tail) { 741 tcg_out_opc_b(s, offset >> 2); 742 } else { 743 tcg_out_opc_bl(s, offset >> 2); 744 } 745 } else if (offset == sextreg(offset, 0, 38)) { 746 /* long jump: +/- 256GiB */ 747 tcg_target_long lo = sextreg(offset, 0, 18); 748 tcg_target_long hi = offset - lo; 749 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 750 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 751 } else { 752 /* far jump: 64-bit */ 753 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 754 tcg_target_long hi = (tcg_target_long)arg - lo; 755 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 756 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 757 } 758} 759 760static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 761 const TCGHelperInfo *info) 762{ 763 tcg_out_call_int(s, arg, false); 764} 765 766/* 767 * Load/store helpers 768 */ 769 770static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 771 TCGReg addr, intptr_t offset) 772{ 773 intptr_t imm12 = sextreg(offset, 0, 12); 774 775 if (offset != imm12) { 776 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 777 778 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 779 imm12 = sextreg(diff, 0, 12); 780 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 781 } else { 782 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 783 if (addr != TCG_REG_ZERO) { 784 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 785 } 786 } 787 addr = TCG_REG_TMP2; 788 } 789 790 switch (opc) { 791 case OPC_LD_B: 792 case OPC_LD_BU: 793 case OPC_LD_H: 794 case OPC_LD_HU: 795 case OPC_LD_W: 796 case OPC_LD_WU: 797 case OPC_LD_D: 798 case OPC_ST_B: 799 case OPC_ST_H: 800 case OPC_ST_W: 801 case OPC_ST_D: 802 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 803 break; 804 default: 805 g_assert_not_reached(); 806 } 807} 808 809static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 810 TCGReg arg1, intptr_t arg2) 811{ 812 bool is_32bit = type == TCG_TYPE_I32; 813 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 814} 815 816static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 817 TCGReg arg1, intptr_t arg2) 818{ 819 bool is_32bit = type == TCG_TYPE_I32; 820 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 821} 822 823static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 824 TCGReg base, intptr_t ofs) 825{ 826 if (val == 0) { 827 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 828 return true; 829 } 830 return false; 831} 832 833/* 834 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 835 */ 836 837static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 838{ 839 tcg_out_opc_b(s, 0); 840 return reloc_br_sd10k16(s->code_ptr - 1, target); 841} 842 843static const TCGLdstHelperParam ldst_helper_param = { 844 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 845}; 846 847static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 848{ 849 MemOp opc = get_memop(l->oi); 850 851 /* resolve label address */ 852 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 853 return false; 854 } 855 856 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 857 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 858 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 859 return tcg_out_goto(s, l->raddr); 860} 861 862static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 863{ 864 MemOp opc = get_memop(l->oi); 865 866 /* resolve label address */ 867 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 868 return false; 869 } 870 871 tcg_out_st_helper_args(s, l, &ldst_helper_param); 872 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 873 return tcg_out_goto(s, l->raddr); 874} 875 876typedef struct { 877 TCGReg base; 878 TCGReg index; 879 TCGAtomAlign aa; 880} HostAddress; 881 882bool tcg_target_has_memory_bswap(MemOp memop) 883{ 884 return false; 885} 886 887/* We expect to use a 12-bit negative offset from ENV. */ 888#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 889 890/* 891 * For system-mode, perform the TLB load and compare. 892 * For user-mode, perform any required alignment tests. 893 * In both cases, return a TCGLabelQemuLdst structure if the slow path 894 * is required and fill in @h with the host address for the fast path. 895 */ 896static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 897 TCGReg addr_reg, MemOpIdx oi, 898 bool is_ld) 899{ 900 TCGType addr_type = s->addr_type; 901 TCGLabelQemuLdst *ldst = NULL; 902 MemOp opc = get_memop(oi); 903 MemOp a_bits; 904 905 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 906 a_bits = h->aa.align; 907 908 if (tcg_use_softmmu) { 909 unsigned s_bits = opc & MO_SIZE; 910 int mem_index = get_mmuidx(oi); 911 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 912 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 913 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 914 915 ldst = new_ldst_label(s); 916 ldst->is_ld = is_ld; 917 ldst->oi = oi; 918 ldst->addrlo_reg = addr_reg; 919 920 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 921 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 922 923 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 924 s->page_bits - CPU_TLB_ENTRY_BITS); 925 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 926 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 927 928 /* Load the tlb comparator and the addend. */ 929 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 930 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 931 is_ld ? offsetof(CPUTLBEntry, addr_read) 932 : offsetof(CPUTLBEntry, addr_write)); 933 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 934 offsetof(CPUTLBEntry, addend)); 935 936 /* 937 * For aligned accesses, we check the first byte and include the 938 * alignment bits within the address. For unaligned access, we 939 * check that we don't cross pages using the address of the last 940 * byte of the access. 941 */ 942 if (a_bits < s_bits) { 943 unsigned a_mask = (1u << a_bits) - 1; 944 unsigned s_mask = (1u << s_bits) - 1; 945 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 946 } else { 947 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 948 } 949 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 950 a_bits, s->page_bits - 1); 951 952 /* Compare masked address with the TLB entry. */ 953 ldst->label_ptr[0] = s->code_ptr; 954 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 955 956 h->index = TCG_REG_TMP2; 957 } else { 958 if (a_bits) { 959 ldst = new_ldst_label(s); 960 961 ldst->is_ld = is_ld; 962 ldst->oi = oi; 963 ldst->addrlo_reg = addr_reg; 964 965 /* 966 * Without micro-architecture details, we don't know which of 967 * bstrpick or andi is faster, so use bstrpick as it's not 968 * constrained by imm field width. Not to say alignments >= 2^12 969 * are going to happen any time soon. 970 */ 971 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 972 973 ldst->label_ptr[0] = s->code_ptr; 974 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 975 } 976 977 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 978 } 979 980 if (addr_type == TCG_TYPE_I32) { 981 h->base = TCG_REG_TMP0; 982 tcg_out_ext32u(s, h->base, addr_reg); 983 } else { 984 h->base = addr_reg; 985 } 986 987 return ldst; 988} 989 990static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 991 TCGReg rd, HostAddress h) 992{ 993 /* Byte swapping is left to middle-end expansion. */ 994 tcg_debug_assert((opc & MO_BSWAP) == 0); 995 996 switch (opc & MO_SSIZE) { 997 case MO_UB: 998 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 999 break; 1000 case MO_SB: 1001 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1002 break; 1003 case MO_UW: 1004 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1005 break; 1006 case MO_SW: 1007 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1008 break; 1009 case MO_UL: 1010 if (type == TCG_TYPE_I64) { 1011 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1012 break; 1013 } 1014 /* fallthrough */ 1015 case MO_SL: 1016 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1017 break; 1018 case MO_UQ: 1019 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1020 break; 1021 default: 1022 g_assert_not_reached(); 1023 } 1024} 1025 1026static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1027 MemOpIdx oi, TCGType data_type) 1028{ 1029 TCGLabelQemuLdst *ldst; 1030 HostAddress h; 1031 1032 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1033 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1034 1035 if (ldst) { 1036 ldst->type = data_type; 1037 ldst->datalo_reg = data_reg; 1038 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1039 } 1040} 1041 1042static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1043 TCGReg rd, HostAddress h) 1044{ 1045 /* Byte swapping is left to middle-end expansion. */ 1046 tcg_debug_assert((opc & MO_BSWAP) == 0); 1047 1048 switch (opc & MO_SIZE) { 1049 case MO_8: 1050 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1051 break; 1052 case MO_16: 1053 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1054 break; 1055 case MO_32: 1056 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1057 break; 1058 case MO_64: 1059 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1060 break; 1061 default: 1062 g_assert_not_reached(); 1063 } 1064} 1065 1066static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1067 MemOpIdx oi, TCGType data_type) 1068{ 1069 TCGLabelQemuLdst *ldst; 1070 HostAddress h; 1071 1072 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1073 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1074 1075 if (ldst) { 1076 ldst->type = data_type; 1077 ldst->datalo_reg = data_reg; 1078 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1079 } 1080} 1081 1082static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1083 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1084{ 1085 TCGLabelQemuLdst *ldst; 1086 HostAddress h; 1087 1088 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1089 1090 if (h.aa.atom == MO_128) { 1091 /* 1092 * Use VLDX/VSTX when 128-bit atomicity is required. 1093 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1094 */ 1095 if (is_ld) { 1096 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1097 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1098 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1099 } else { 1100 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1101 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1102 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1103 } 1104 } else { 1105 /* Otherwise use a pair of LD/ST. */ 1106 TCGReg base = h.base; 1107 if (h.index != TCG_REG_ZERO) { 1108 base = TCG_REG_TMP0; 1109 tcg_out_opc_add_d(s, base, h.base, h.index); 1110 } 1111 if (is_ld) { 1112 tcg_debug_assert(base != data_lo); 1113 tcg_out_opc_ld_d(s, data_lo, base, 0); 1114 tcg_out_opc_ld_d(s, data_hi, base, 8); 1115 } else { 1116 tcg_out_opc_st_d(s, data_lo, base, 0); 1117 tcg_out_opc_st_d(s, data_hi, base, 8); 1118 } 1119 } 1120 1121 if (ldst) { 1122 ldst->type = TCG_TYPE_I128; 1123 ldst->datalo_reg = data_lo; 1124 ldst->datahi_reg = data_hi; 1125 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1126 } 1127} 1128 1129/* 1130 * Entry-points 1131 */ 1132 1133static const tcg_insn_unit *tb_ret_addr; 1134 1135static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1136{ 1137 /* Reuse the zeroing that exists for goto_ptr. */ 1138 if (a0 == 0) { 1139 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1140 } else { 1141 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1142 tcg_out_call_int(s, tb_ret_addr, true); 1143 } 1144} 1145 1146static void tcg_out_goto_tb(TCGContext *s, int which) 1147{ 1148 /* 1149 * Direct branch, or load indirect address, to be patched 1150 * by tb_target_set_jmp_target. Check indirect load offset 1151 * in range early, regardless of direct branch distance, 1152 * via assert within tcg_out_opc_pcaddu2i. 1153 */ 1154 uintptr_t i_addr = get_jmp_target_addr(s, which); 1155 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1156 1157 set_jmp_insn_offset(s, which); 1158 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1159 1160 /* Finish the load and indirect branch. */ 1161 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1162 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1163 set_jmp_reset_offset(s, which); 1164} 1165 1166void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1167 uintptr_t jmp_rx, uintptr_t jmp_rw) 1168{ 1169 uintptr_t d_addr = tb->jmp_target_addr[n]; 1170 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1171 tcg_insn_unit insn; 1172 1173 /* Either directly branch, or load slot address for indirect branch. */ 1174 if (d_disp == sextreg(d_disp, 0, 26)) { 1175 insn = encode_sd10k16_insn(OPC_B, d_disp); 1176 } else { 1177 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1178 intptr_t i_disp = i_addr - jmp_rx; 1179 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1180 } 1181 1182 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1183 flush_idcache_range(jmp_rx, jmp_rw, 4); 1184} 1185 1186static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1187 const TCGArg args[TCG_MAX_OP_ARGS], 1188 const int const_args[TCG_MAX_OP_ARGS]) 1189{ 1190 TCGArg a0 = args[0]; 1191 TCGArg a1 = args[1]; 1192 TCGArg a2 = args[2]; 1193 TCGArg a3 = args[3]; 1194 int c2 = const_args[2]; 1195 1196 switch (opc) { 1197 case INDEX_op_mb: 1198 tcg_out_mb(s, a0); 1199 break; 1200 1201 case INDEX_op_goto_ptr: 1202 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1203 break; 1204 1205 case INDEX_op_br: 1206 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1207 0); 1208 tcg_out_opc_b(s, 0); 1209 break; 1210 1211 case INDEX_op_brcond_i32: 1212 case INDEX_op_brcond_i64: 1213 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1214 break; 1215 1216 case INDEX_op_extrh_i64_i32: 1217 tcg_out_opc_srai_d(s, a0, a1, 32); 1218 break; 1219 1220 case INDEX_op_not_i32: 1221 case INDEX_op_not_i64: 1222 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1223 break; 1224 1225 case INDEX_op_nor_i32: 1226 case INDEX_op_nor_i64: 1227 if (c2) { 1228 tcg_out_opc_ori(s, a0, a1, a2); 1229 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1230 } else { 1231 tcg_out_opc_nor(s, a0, a1, a2); 1232 } 1233 break; 1234 1235 case INDEX_op_andc_i32: 1236 case INDEX_op_andc_i64: 1237 if (c2) { 1238 /* guaranteed to fit due to constraint */ 1239 tcg_out_opc_andi(s, a0, a1, ~a2); 1240 } else { 1241 tcg_out_opc_andn(s, a0, a1, a2); 1242 } 1243 break; 1244 1245 case INDEX_op_orc_i32: 1246 case INDEX_op_orc_i64: 1247 if (c2) { 1248 /* guaranteed to fit due to constraint */ 1249 tcg_out_opc_ori(s, a0, a1, ~a2); 1250 } else { 1251 tcg_out_opc_orn(s, a0, a1, a2); 1252 } 1253 break; 1254 1255 case INDEX_op_and_i32: 1256 case INDEX_op_and_i64: 1257 if (c2) { 1258 tcg_out_opc_andi(s, a0, a1, a2); 1259 } else { 1260 tcg_out_opc_and(s, a0, a1, a2); 1261 } 1262 break; 1263 1264 case INDEX_op_or_i32: 1265 case INDEX_op_or_i64: 1266 if (c2) { 1267 tcg_out_opc_ori(s, a0, a1, a2); 1268 } else { 1269 tcg_out_opc_or(s, a0, a1, a2); 1270 } 1271 break; 1272 1273 case INDEX_op_xor_i32: 1274 case INDEX_op_xor_i64: 1275 if (c2) { 1276 tcg_out_opc_xori(s, a0, a1, a2); 1277 } else { 1278 tcg_out_opc_xor(s, a0, a1, a2); 1279 } 1280 break; 1281 1282 case INDEX_op_extract_i32: 1283 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1284 break; 1285 case INDEX_op_extract_i64: 1286 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1287 break; 1288 1289 case INDEX_op_deposit_i32: 1290 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1291 break; 1292 case INDEX_op_deposit_i64: 1293 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1294 break; 1295 1296 case INDEX_op_bswap16_i32: 1297 case INDEX_op_bswap16_i64: 1298 tcg_out_opc_revb_2h(s, a0, a1); 1299 if (a2 & TCG_BSWAP_OS) { 1300 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1301 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1302 tcg_out_ext16u(s, a0, a0); 1303 } 1304 break; 1305 1306 case INDEX_op_bswap32_i32: 1307 /* All 32-bit values are computed sign-extended in the register. */ 1308 a2 = TCG_BSWAP_OS; 1309 /* fallthrough */ 1310 case INDEX_op_bswap32_i64: 1311 tcg_out_opc_revb_2w(s, a0, a1); 1312 if (a2 & TCG_BSWAP_OS) { 1313 tcg_out_ext32s(s, a0, a0); 1314 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1315 tcg_out_ext32u(s, a0, a0); 1316 } 1317 break; 1318 1319 case INDEX_op_bswap64_i64: 1320 tcg_out_opc_revb_d(s, a0, a1); 1321 break; 1322 1323 case INDEX_op_clz_i32: 1324 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1325 break; 1326 case INDEX_op_clz_i64: 1327 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1328 break; 1329 1330 case INDEX_op_ctz_i32: 1331 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1332 break; 1333 case INDEX_op_ctz_i64: 1334 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1335 break; 1336 1337 case INDEX_op_shl_i32: 1338 if (c2) { 1339 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1340 } else { 1341 tcg_out_opc_sll_w(s, a0, a1, a2); 1342 } 1343 break; 1344 case INDEX_op_shl_i64: 1345 if (c2) { 1346 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1347 } else { 1348 tcg_out_opc_sll_d(s, a0, a1, a2); 1349 } 1350 break; 1351 1352 case INDEX_op_shr_i32: 1353 if (c2) { 1354 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1355 } else { 1356 tcg_out_opc_srl_w(s, a0, a1, a2); 1357 } 1358 break; 1359 case INDEX_op_shr_i64: 1360 if (c2) { 1361 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1362 } else { 1363 tcg_out_opc_srl_d(s, a0, a1, a2); 1364 } 1365 break; 1366 1367 case INDEX_op_sar_i32: 1368 if (c2) { 1369 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1370 } else { 1371 tcg_out_opc_sra_w(s, a0, a1, a2); 1372 } 1373 break; 1374 case INDEX_op_sar_i64: 1375 if (c2) { 1376 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1377 } else { 1378 tcg_out_opc_sra_d(s, a0, a1, a2); 1379 } 1380 break; 1381 1382 case INDEX_op_rotl_i32: 1383 /* transform into equivalent rotr/rotri */ 1384 if (c2) { 1385 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1386 } else { 1387 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1388 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1389 } 1390 break; 1391 case INDEX_op_rotl_i64: 1392 /* transform into equivalent rotr/rotri */ 1393 if (c2) { 1394 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1395 } else { 1396 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1397 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1398 } 1399 break; 1400 1401 case INDEX_op_rotr_i32: 1402 if (c2) { 1403 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1404 } else { 1405 tcg_out_opc_rotr_w(s, a0, a1, a2); 1406 } 1407 break; 1408 case INDEX_op_rotr_i64: 1409 if (c2) { 1410 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1411 } else { 1412 tcg_out_opc_rotr_d(s, a0, a1, a2); 1413 } 1414 break; 1415 1416 case INDEX_op_add_i32: 1417 if (c2) { 1418 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1419 } else { 1420 tcg_out_opc_add_w(s, a0, a1, a2); 1421 } 1422 break; 1423 case INDEX_op_add_i64: 1424 if (c2) { 1425 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1426 } else { 1427 tcg_out_opc_add_d(s, a0, a1, a2); 1428 } 1429 break; 1430 1431 case INDEX_op_sub_i32: 1432 if (c2) { 1433 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1434 } else { 1435 tcg_out_opc_sub_w(s, a0, a1, a2); 1436 } 1437 break; 1438 case INDEX_op_sub_i64: 1439 if (c2) { 1440 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1441 } else { 1442 tcg_out_opc_sub_d(s, a0, a1, a2); 1443 } 1444 break; 1445 1446 case INDEX_op_mul_i32: 1447 tcg_out_opc_mul_w(s, a0, a1, a2); 1448 break; 1449 case INDEX_op_mul_i64: 1450 tcg_out_opc_mul_d(s, a0, a1, a2); 1451 break; 1452 1453 case INDEX_op_mulsh_i32: 1454 tcg_out_opc_mulh_w(s, a0, a1, a2); 1455 break; 1456 case INDEX_op_mulsh_i64: 1457 tcg_out_opc_mulh_d(s, a0, a1, a2); 1458 break; 1459 1460 case INDEX_op_muluh_i32: 1461 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1462 break; 1463 case INDEX_op_muluh_i64: 1464 tcg_out_opc_mulh_du(s, a0, a1, a2); 1465 break; 1466 1467 case INDEX_op_div_i32: 1468 tcg_out_opc_div_w(s, a0, a1, a2); 1469 break; 1470 case INDEX_op_div_i64: 1471 tcg_out_opc_div_d(s, a0, a1, a2); 1472 break; 1473 1474 case INDEX_op_divu_i32: 1475 tcg_out_opc_div_wu(s, a0, a1, a2); 1476 break; 1477 case INDEX_op_divu_i64: 1478 tcg_out_opc_div_du(s, a0, a1, a2); 1479 break; 1480 1481 case INDEX_op_rem_i32: 1482 tcg_out_opc_mod_w(s, a0, a1, a2); 1483 break; 1484 case INDEX_op_rem_i64: 1485 tcg_out_opc_mod_d(s, a0, a1, a2); 1486 break; 1487 1488 case INDEX_op_remu_i32: 1489 tcg_out_opc_mod_wu(s, a0, a1, a2); 1490 break; 1491 case INDEX_op_remu_i64: 1492 tcg_out_opc_mod_du(s, a0, a1, a2); 1493 break; 1494 1495 case INDEX_op_setcond_i32: 1496 case INDEX_op_setcond_i64: 1497 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1498 break; 1499 1500 case INDEX_op_movcond_i32: 1501 case INDEX_op_movcond_i64: 1502 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1503 break; 1504 1505 case INDEX_op_ld8s_i32: 1506 case INDEX_op_ld8s_i64: 1507 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1508 break; 1509 case INDEX_op_ld8u_i32: 1510 case INDEX_op_ld8u_i64: 1511 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1512 break; 1513 case INDEX_op_ld16s_i32: 1514 case INDEX_op_ld16s_i64: 1515 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1516 break; 1517 case INDEX_op_ld16u_i32: 1518 case INDEX_op_ld16u_i64: 1519 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1520 break; 1521 case INDEX_op_ld_i32: 1522 case INDEX_op_ld32s_i64: 1523 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1524 break; 1525 case INDEX_op_ld32u_i64: 1526 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1527 break; 1528 case INDEX_op_ld_i64: 1529 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1530 break; 1531 1532 case INDEX_op_st8_i32: 1533 case INDEX_op_st8_i64: 1534 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1535 break; 1536 case INDEX_op_st16_i32: 1537 case INDEX_op_st16_i64: 1538 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1539 break; 1540 case INDEX_op_st_i32: 1541 case INDEX_op_st32_i64: 1542 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1543 break; 1544 case INDEX_op_st_i64: 1545 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1546 break; 1547 1548 case INDEX_op_qemu_ld_a32_i32: 1549 case INDEX_op_qemu_ld_a64_i32: 1550 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1551 break; 1552 case INDEX_op_qemu_ld_a32_i64: 1553 case INDEX_op_qemu_ld_a64_i64: 1554 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1555 break; 1556 case INDEX_op_qemu_ld_a32_i128: 1557 case INDEX_op_qemu_ld_a64_i128: 1558 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1559 break; 1560 case INDEX_op_qemu_st_a32_i32: 1561 case INDEX_op_qemu_st_a64_i32: 1562 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1563 break; 1564 case INDEX_op_qemu_st_a32_i64: 1565 case INDEX_op_qemu_st_a64_i64: 1566 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1567 break; 1568 case INDEX_op_qemu_st_a32_i128: 1569 case INDEX_op_qemu_st_a64_i128: 1570 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1571 break; 1572 1573 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1574 case INDEX_op_mov_i64: 1575 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1576 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1577 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1578 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1579 case INDEX_op_ext8s_i64: 1580 case INDEX_op_ext8u_i32: 1581 case INDEX_op_ext8u_i64: 1582 case INDEX_op_ext16s_i32: 1583 case INDEX_op_ext16s_i64: 1584 case INDEX_op_ext16u_i32: 1585 case INDEX_op_ext16u_i64: 1586 case INDEX_op_ext32s_i64: 1587 case INDEX_op_ext32u_i64: 1588 case INDEX_op_ext_i32_i64: 1589 case INDEX_op_extu_i32_i64: 1590 case INDEX_op_extrl_i64_i32: 1591 default: 1592 g_assert_not_reached(); 1593 } 1594} 1595 1596static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1597 TCGReg rd, TCGReg rs) 1598{ 1599 switch (vece) { 1600 case MO_8: 1601 tcg_out_opc_vreplgr2vr_b(s, rd, rs); 1602 break; 1603 case MO_16: 1604 tcg_out_opc_vreplgr2vr_h(s, rd, rs); 1605 break; 1606 case MO_32: 1607 tcg_out_opc_vreplgr2vr_w(s, rd, rs); 1608 break; 1609 case MO_64: 1610 tcg_out_opc_vreplgr2vr_d(s, rd, rs); 1611 break; 1612 default: 1613 g_assert_not_reached(); 1614 } 1615 return true; 1616} 1617 1618static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1619 TCGReg r, TCGReg base, intptr_t offset) 1620{ 1621 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */ 1622 if (offset < -0x800 || offset > 0x7ff || \ 1623 (offset & ((1 << vece) - 1)) != 0) { 1624 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1625 base = TCG_REG_TMP0; 1626 offset = 0; 1627 } 1628 offset >>= vece; 1629 1630 switch (vece) { 1631 case MO_8: 1632 tcg_out_opc_vldrepl_b(s, r, base, offset); 1633 break; 1634 case MO_16: 1635 tcg_out_opc_vldrepl_h(s, r, base, offset); 1636 break; 1637 case MO_32: 1638 tcg_out_opc_vldrepl_w(s, r, base, offset); 1639 break; 1640 case MO_64: 1641 tcg_out_opc_vldrepl_d(s, r, base, offset); 1642 break; 1643 default: 1644 g_assert_not_reached(); 1645 } 1646 return true; 1647} 1648 1649static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1650 TCGReg rd, int64_t v64) 1651{ 1652 /* Try vldi if imm can fit */ 1653 int64_t value = sextract64(v64, 0, 8 << vece); 1654 if (-0x200 <= value && value <= 0x1FF) { 1655 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1656 tcg_out_opc_vldi(s, rd, imm); 1657 return; 1658 } 1659 1660 /* TODO: vldi patterns when imm 12 is set */ 1661 1662 /* Fallback to vreplgr2vr */ 1663 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1664 switch (vece) { 1665 case MO_8: 1666 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0); 1667 break; 1668 case MO_16: 1669 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0); 1670 break; 1671 case MO_32: 1672 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0); 1673 break; 1674 case MO_64: 1675 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0); 1676 break; 1677 default: 1678 g_assert_not_reached(); 1679 } 1680} 1681 1682static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0, 1683 const TCGArg a1, const TCGArg a2, 1684 bool a2_is_const, bool is_add) 1685{ 1686 static const LoongArchInsn add_vec_insn[4] = { 1687 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D 1688 }; 1689 static const LoongArchInsn add_vec_imm_insn[4] = { 1690 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU 1691 }; 1692 static const LoongArchInsn sub_vec_insn[4] = { 1693 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D 1694 }; 1695 static const LoongArchInsn sub_vec_imm_insn[4] = { 1696 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU 1697 }; 1698 1699 if (a2_is_const) { 1700 int64_t value = sextract64(a2, 0, 8 << vece); 1701 if (!is_add) { 1702 value = -value; 1703 } 1704 1705 /* Try vaddi/vsubi */ 1706 if (0 <= value && value <= 0x1f) { 1707 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ 1708 a1, value)); 1709 return; 1710 } else if (-0x1f <= value && value < 0) { 1711 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ 1712 a1, -value)); 1713 return; 1714 } 1715 1716 /* constraint TCG_CT_CONST_VADD ensures unreachable */ 1717 g_assert_not_reached(); 1718 } 1719 1720 if (is_add) { 1721 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); 1722 } else { 1723 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); 1724 } 1725} 1726 1727static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1728 unsigned vecl, unsigned vece, 1729 const TCGArg args[TCG_MAX_OP_ARGS], 1730 const int const_args[TCG_MAX_OP_ARGS]) 1731{ 1732 TCGType type = vecl + TCG_TYPE_V64; 1733 TCGArg a0, a1, a2, a3; 1734 TCGReg temp = TCG_REG_TMP0; 1735 TCGReg temp_vec = TCG_VEC_TMP0; 1736 1737 static const LoongArchInsn cmp_vec_insn[16][4] = { 1738 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D}, 1739 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D}, 1740 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU}, 1741 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D}, 1742 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU}, 1743 }; 1744 static const LoongArchInsn cmp_vec_imm_insn[16][4] = { 1745 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D}, 1746 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D}, 1747 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU}, 1748 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D}, 1749 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, 1750 }; 1751 LoongArchInsn insn; 1752 static const LoongArchInsn neg_vec_insn[4] = { 1753 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D 1754 }; 1755 static const LoongArchInsn mul_vec_insn[4] = { 1756 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D 1757 }; 1758 static const LoongArchInsn smin_vec_insn[4] = { 1759 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D 1760 }; 1761 static const LoongArchInsn umin_vec_insn[4] = { 1762 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU 1763 }; 1764 static const LoongArchInsn smax_vec_insn[4] = { 1765 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D 1766 }; 1767 static const LoongArchInsn umax_vec_insn[4] = { 1768 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU 1769 }; 1770 static const LoongArchInsn ssadd_vec_insn[4] = { 1771 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D 1772 }; 1773 static const LoongArchInsn usadd_vec_insn[4] = { 1774 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU 1775 }; 1776 static const LoongArchInsn sssub_vec_insn[4] = { 1777 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D 1778 }; 1779 static const LoongArchInsn ussub_vec_insn[4] = { 1780 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU 1781 }; 1782 static const LoongArchInsn shlv_vec_insn[4] = { 1783 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D 1784 }; 1785 static const LoongArchInsn shrv_vec_insn[4] = { 1786 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D 1787 }; 1788 static const LoongArchInsn sarv_vec_insn[4] = { 1789 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D 1790 }; 1791 static const LoongArchInsn shli_vec_insn[4] = { 1792 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D 1793 }; 1794 static const LoongArchInsn shri_vec_insn[4] = { 1795 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D 1796 }; 1797 static const LoongArchInsn sari_vec_insn[4] = { 1798 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D 1799 }; 1800 static const LoongArchInsn rotrv_vec_insn[4] = { 1801 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D 1802 }; 1803 1804 a0 = args[0]; 1805 a1 = args[1]; 1806 a2 = args[2]; 1807 a3 = args[3]; 1808 1809 /* Currently only supports V128 */ 1810 tcg_debug_assert(type == TCG_TYPE_V128); 1811 1812 switch (opc) { 1813 case INDEX_op_st_vec: 1814 /* Try to fit vst imm */ 1815 if (-0x800 <= a2 && a2 <= 0x7ff) { 1816 tcg_out_opc_vst(s, a0, a1, a2); 1817 } else { 1818 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1819 tcg_out_opc_vstx(s, a0, a1, temp); 1820 } 1821 break; 1822 case INDEX_op_ld_vec: 1823 /* Try to fit vld imm */ 1824 if (-0x800 <= a2 && a2 <= 0x7ff) { 1825 tcg_out_opc_vld(s, a0, a1, a2); 1826 } else { 1827 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1828 tcg_out_opc_vldx(s, a0, a1, temp); 1829 } 1830 break; 1831 case INDEX_op_and_vec: 1832 tcg_out_opc_vand_v(s, a0, a1, a2); 1833 break; 1834 case INDEX_op_andc_vec: 1835 /* 1836 * vandn vd, vj, vk: vd = vk & ~vj 1837 * andc_vec vd, vj, vk: vd = vj & ~vk 1838 * vk and vk are swapped 1839 */ 1840 tcg_out_opc_vandn_v(s, a0, a2, a1); 1841 break; 1842 case INDEX_op_or_vec: 1843 tcg_out_opc_vor_v(s, a0, a1, a2); 1844 break; 1845 case INDEX_op_orc_vec: 1846 tcg_out_opc_vorn_v(s, a0, a1, a2); 1847 break; 1848 case INDEX_op_xor_vec: 1849 tcg_out_opc_vxor_v(s, a0, a1, a2); 1850 break; 1851 case INDEX_op_nor_vec: 1852 tcg_out_opc_vnor_v(s, a0, a1, a2); 1853 break; 1854 case INDEX_op_not_vec: 1855 tcg_out_opc_vnor_v(s, a0, a1, a1); 1856 break; 1857 case INDEX_op_cmp_vec: 1858 { 1859 TCGCond cond = args[3]; 1860 if (const_args[2]) { 1861 /* 1862 * cmp_vec dest, src, value 1863 * Try vseqi/vslei/vslti 1864 */ 1865 int64_t value = sextract64(a2, 0, 8 << vece); 1866 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \ 1867 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) { 1868 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \ 1869 a0, a1, value)); 1870 break; 1871 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) && 1872 (0x00 <= value && value <= 0x1f)) { 1873 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \ 1874 a0, a1, value)); 1875 break; 1876 } 1877 1878 /* 1879 * Fallback to: 1880 * dupi_vec temp, a2 1881 * cmp_vec a0, a1, temp, cond 1882 */ 1883 tcg_out_dupi_vec(s, type, vece, temp_vec, a2); 1884 a2 = temp_vec; 1885 } 1886 1887 insn = cmp_vec_insn[cond][vece]; 1888 if (insn == 0) { 1889 TCGArg t; 1890 t = a1, a1 = a2, a2 = t; 1891 cond = tcg_swap_cond(cond); 1892 insn = cmp_vec_insn[cond][vece]; 1893 tcg_debug_assert(insn != 0); 1894 } 1895 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1896 } 1897 break; 1898 case INDEX_op_add_vec: 1899 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true); 1900 break; 1901 case INDEX_op_sub_vec: 1902 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false); 1903 break; 1904 case INDEX_op_neg_vec: 1905 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1)); 1906 break; 1907 case INDEX_op_mul_vec: 1908 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2)); 1909 break; 1910 case INDEX_op_smin_vec: 1911 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2)); 1912 break; 1913 case INDEX_op_smax_vec: 1914 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2)); 1915 break; 1916 case INDEX_op_umin_vec: 1917 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2)); 1918 break; 1919 case INDEX_op_umax_vec: 1920 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2)); 1921 break; 1922 case INDEX_op_ssadd_vec: 1923 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2)); 1924 break; 1925 case INDEX_op_usadd_vec: 1926 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2)); 1927 break; 1928 case INDEX_op_sssub_vec: 1929 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2)); 1930 break; 1931 case INDEX_op_ussub_vec: 1932 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2)); 1933 break; 1934 case INDEX_op_shlv_vec: 1935 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2)); 1936 break; 1937 case INDEX_op_shrv_vec: 1938 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2)); 1939 break; 1940 case INDEX_op_sarv_vec: 1941 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2)); 1942 break; 1943 case INDEX_op_shli_vec: 1944 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2)); 1945 break; 1946 case INDEX_op_shri_vec: 1947 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2)); 1948 break; 1949 case INDEX_op_sari_vec: 1950 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2)); 1951 break; 1952 case INDEX_op_rotrv_vec: 1953 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2)); 1954 break; 1955 case INDEX_op_rotlv_vec: 1956 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 1957 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2)); 1958 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, 1959 temp_vec)); 1960 break; 1961 case INDEX_op_rotli_vec: 1962 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 1963 a2 = extract32(-a2, 0, 3 + vece); 1964 switch (vece) { 1965 case MO_8: 1966 tcg_out_opc_vrotri_b(s, a0, a1, a2); 1967 break; 1968 case MO_16: 1969 tcg_out_opc_vrotri_h(s, a0, a1, a2); 1970 break; 1971 case MO_32: 1972 tcg_out_opc_vrotri_w(s, a0, a1, a2); 1973 break; 1974 case MO_64: 1975 tcg_out_opc_vrotri_d(s, a0, a1, a2); 1976 break; 1977 default: 1978 g_assert_not_reached(); 1979 } 1980 break; 1981 case INDEX_op_bitsel_vec: 1982 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 1983 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 1984 break; 1985 case INDEX_op_dupm_vec: 1986 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 1987 break; 1988 default: 1989 g_assert_not_reached(); 1990 } 1991} 1992 1993int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 1994{ 1995 switch (opc) { 1996 case INDEX_op_ld_vec: 1997 case INDEX_op_st_vec: 1998 case INDEX_op_dup_vec: 1999 case INDEX_op_dupm_vec: 2000 case INDEX_op_cmp_vec: 2001 case INDEX_op_add_vec: 2002 case INDEX_op_sub_vec: 2003 case INDEX_op_and_vec: 2004 case INDEX_op_andc_vec: 2005 case INDEX_op_or_vec: 2006 case INDEX_op_orc_vec: 2007 case INDEX_op_xor_vec: 2008 case INDEX_op_nor_vec: 2009 case INDEX_op_not_vec: 2010 case INDEX_op_neg_vec: 2011 case INDEX_op_mul_vec: 2012 case INDEX_op_smin_vec: 2013 case INDEX_op_smax_vec: 2014 case INDEX_op_umin_vec: 2015 case INDEX_op_umax_vec: 2016 case INDEX_op_ssadd_vec: 2017 case INDEX_op_usadd_vec: 2018 case INDEX_op_sssub_vec: 2019 case INDEX_op_ussub_vec: 2020 case INDEX_op_shlv_vec: 2021 case INDEX_op_shrv_vec: 2022 case INDEX_op_sarv_vec: 2023 case INDEX_op_bitsel_vec: 2024 return 1; 2025 default: 2026 return 0; 2027 } 2028} 2029 2030void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2031 TCGArg a0, ...) 2032{ 2033 g_assert_not_reached(); 2034} 2035 2036static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2037{ 2038 switch (op) { 2039 case INDEX_op_goto_ptr: 2040 return C_O0_I1(r); 2041 2042 case INDEX_op_st8_i32: 2043 case INDEX_op_st8_i64: 2044 case INDEX_op_st16_i32: 2045 case INDEX_op_st16_i64: 2046 case INDEX_op_st32_i64: 2047 case INDEX_op_st_i32: 2048 case INDEX_op_st_i64: 2049 case INDEX_op_qemu_st_a32_i32: 2050 case INDEX_op_qemu_st_a64_i32: 2051 case INDEX_op_qemu_st_a32_i64: 2052 case INDEX_op_qemu_st_a64_i64: 2053 return C_O0_I2(rZ, r); 2054 2055 case INDEX_op_qemu_ld_a32_i128: 2056 case INDEX_op_qemu_ld_a64_i128: 2057 return C_N2_I1(r, r, r); 2058 2059 case INDEX_op_qemu_st_a32_i128: 2060 case INDEX_op_qemu_st_a64_i128: 2061 return C_O0_I3(r, r, r); 2062 2063 case INDEX_op_brcond_i32: 2064 case INDEX_op_brcond_i64: 2065 return C_O0_I2(rZ, rZ); 2066 2067 case INDEX_op_ext8s_i32: 2068 case INDEX_op_ext8s_i64: 2069 case INDEX_op_ext8u_i32: 2070 case INDEX_op_ext8u_i64: 2071 case INDEX_op_ext16s_i32: 2072 case INDEX_op_ext16s_i64: 2073 case INDEX_op_ext16u_i32: 2074 case INDEX_op_ext16u_i64: 2075 case INDEX_op_ext32s_i64: 2076 case INDEX_op_ext32u_i64: 2077 case INDEX_op_extu_i32_i64: 2078 case INDEX_op_extrl_i64_i32: 2079 case INDEX_op_extrh_i64_i32: 2080 case INDEX_op_ext_i32_i64: 2081 case INDEX_op_not_i32: 2082 case INDEX_op_not_i64: 2083 case INDEX_op_extract_i32: 2084 case INDEX_op_extract_i64: 2085 case INDEX_op_bswap16_i32: 2086 case INDEX_op_bswap16_i64: 2087 case INDEX_op_bswap32_i32: 2088 case INDEX_op_bswap32_i64: 2089 case INDEX_op_bswap64_i64: 2090 case INDEX_op_ld8s_i32: 2091 case INDEX_op_ld8s_i64: 2092 case INDEX_op_ld8u_i32: 2093 case INDEX_op_ld8u_i64: 2094 case INDEX_op_ld16s_i32: 2095 case INDEX_op_ld16s_i64: 2096 case INDEX_op_ld16u_i32: 2097 case INDEX_op_ld16u_i64: 2098 case INDEX_op_ld32s_i64: 2099 case INDEX_op_ld32u_i64: 2100 case INDEX_op_ld_i32: 2101 case INDEX_op_ld_i64: 2102 case INDEX_op_qemu_ld_a32_i32: 2103 case INDEX_op_qemu_ld_a64_i32: 2104 case INDEX_op_qemu_ld_a32_i64: 2105 case INDEX_op_qemu_ld_a64_i64: 2106 return C_O1_I1(r, r); 2107 2108 case INDEX_op_andc_i32: 2109 case INDEX_op_andc_i64: 2110 case INDEX_op_orc_i32: 2111 case INDEX_op_orc_i64: 2112 /* 2113 * LoongArch insns for these ops don't have reg-imm forms, but we 2114 * can express using andi/ori if ~constant satisfies 2115 * TCG_CT_CONST_U12. 2116 */ 2117 return C_O1_I2(r, r, rC); 2118 2119 case INDEX_op_shl_i32: 2120 case INDEX_op_shl_i64: 2121 case INDEX_op_shr_i32: 2122 case INDEX_op_shr_i64: 2123 case INDEX_op_sar_i32: 2124 case INDEX_op_sar_i64: 2125 case INDEX_op_rotl_i32: 2126 case INDEX_op_rotl_i64: 2127 case INDEX_op_rotr_i32: 2128 case INDEX_op_rotr_i64: 2129 return C_O1_I2(r, r, ri); 2130 2131 case INDEX_op_add_i32: 2132 return C_O1_I2(r, r, ri); 2133 case INDEX_op_add_i64: 2134 return C_O1_I2(r, r, rJ); 2135 2136 case INDEX_op_and_i32: 2137 case INDEX_op_and_i64: 2138 case INDEX_op_nor_i32: 2139 case INDEX_op_nor_i64: 2140 case INDEX_op_or_i32: 2141 case INDEX_op_or_i64: 2142 case INDEX_op_xor_i32: 2143 case INDEX_op_xor_i64: 2144 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2145 return C_O1_I2(r, r, rU); 2146 2147 case INDEX_op_clz_i32: 2148 case INDEX_op_clz_i64: 2149 case INDEX_op_ctz_i32: 2150 case INDEX_op_ctz_i64: 2151 return C_O1_I2(r, r, rW); 2152 2153 case INDEX_op_deposit_i32: 2154 case INDEX_op_deposit_i64: 2155 /* Must deposit into the same register as input */ 2156 return C_O1_I2(r, 0, rZ); 2157 2158 case INDEX_op_sub_i32: 2159 case INDEX_op_setcond_i32: 2160 return C_O1_I2(r, rZ, ri); 2161 case INDEX_op_sub_i64: 2162 case INDEX_op_setcond_i64: 2163 return C_O1_I2(r, rZ, rJ); 2164 2165 case INDEX_op_mul_i32: 2166 case INDEX_op_mul_i64: 2167 case INDEX_op_mulsh_i32: 2168 case INDEX_op_mulsh_i64: 2169 case INDEX_op_muluh_i32: 2170 case INDEX_op_muluh_i64: 2171 case INDEX_op_div_i32: 2172 case INDEX_op_div_i64: 2173 case INDEX_op_divu_i32: 2174 case INDEX_op_divu_i64: 2175 case INDEX_op_rem_i32: 2176 case INDEX_op_rem_i64: 2177 case INDEX_op_remu_i32: 2178 case INDEX_op_remu_i64: 2179 return C_O1_I2(r, rZ, rZ); 2180 2181 case INDEX_op_movcond_i32: 2182 case INDEX_op_movcond_i64: 2183 return C_O1_I4(r, rZ, rJ, rZ, rZ); 2184 2185 case INDEX_op_ld_vec: 2186 case INDEX_op_dupm_vec: 2187 case INDEX_op_dup_vec: 2188 return C_O1_I1(w, r); 2189 2190 case INDEX_op_st_vec: 2191 return C_O0_I2(w, r); 2192 2193 case INDEX_op_cmp_vec: 2194 return C_O1_I2(w, w, wM); 2195 2196 case INDEX_op_add_vec: 2197 case INDEX_op_sub_vec: 2198 return C_O1_I2(w, w, wA); 2199 2200 case INDEX_op_and_vec: 2201 case INDEX_op_andc_vec: 2202 case INDEX_op_or_vec: 2203 case INDEX_op_orc_vec: 2204 case INDEX_op_xor_vec: 2205 case INDEX_op_nor_vec: 2206 case INDEX_op_mul_vec: 2207 case INDEX_op_smin_vec: 2208 case INDEX_op_smax_vec: 2209 case INDEX_op_umin_vec: 2210 case INDEX_op_umax_vec: 2211 case INDEX_op_ssadd_vec: 2212 case INDEX_op_usadd_vec: 2213 case INDEX_op_sssub_vec: 2214 case INDEX_op_ussub_vec: 2215 case INDEX_op_shlv_vec: 2216 case INDEX_op_shrv_vec: 2217 case INDEX_op_sarv_vec: 2218 case INDEX_op_rotrv_vec: 2219 case INDEX_op_rotlv_vec: 2220 return C_O1_I2(w, w, w); 2221 2222 case INDEX_op_not_vec: 2223 case INDEX_op_neg_vec: 2224 case INDEX_op_shli_vec: 2225 case INDEX_op_shri_vec: 2226 case INDEX_op_sari_vec: 2227 case INDEX_op_rotli_vec: 2228 return C_O1_I1(w, w); 2229 2230 case INDEX_op_bitsel_vec: 2231 return C_O1_I3(w, w, w, w); 2232 2233 default: 2234 g_assert_not_reached(); 2235 } 2236} 2237 2238static const int tcg_target_callee_save_regs[] = { 2239 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2240 TCG_REG_S1, 2241 TCG_REG_S2, 2242 TCG_REG_S3, 2243 TCG_REG_S4, 2244 TCG_REG_S5, 2245 TCG_REG_S6, 2246 TCG_REG_S7, 2247 TCG_REG_S8, 2248 TCG_REG_S9, 2249 TCG_REG_RA, /* should be last for ABI compliance */ 2250}; 2251 2252/* Stack frame parameters. */ 2253#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2254#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2255#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2256#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2257 + TCG_TARGET_STACK_ALIGN - 1) \ 2258 & -TCG_TARGET_STACK_ALIGN) 2259#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2260 2261/* We're expecting to be able to use an immediate for frame allocation. */ 2262QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2263 2264/* Generate global QEMU prologue and epilogue code */ 2265static void tcg_target_qemu_prologue(TCGContext *s) 2266{ 2267 int i; 2268 2269 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2270 2271 /* TB prologue */ 2272 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2273 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2274 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2275 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2276 } 2277 2278 if (!tcg_use_softmmu && guest_base) { 2279 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2280 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2281 } 2282 2283 /* Call generated code */ 2284 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2285 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2286 2287 /* Return path for goto_ptr. Set return value to 0 */ 2288 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2289 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2290 2291 /* TB epilogue */ 2292 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2293 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2294 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2295 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2296 } 2297 2298 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2299 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2300} 2301 2302static void tcg_out_tb_start(TCGContext *s) 2303{ 2304 /* nothing to do */ 2305} 2306 2307static void tcg_target_init(TCGContext *s) 2308{ 2309 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2310 2311 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2312 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2313 error_report("TCG: unaligned access support required; exiting"); 2314 exit(EXIT_FAILURE); 2315 } 2316 2317 if (hwcap & HWCAP_LOONGARCH_LSX) { 2318 use_lsx_instructions = 1; 2319 } 2320 2321 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2322 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2323 2324 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 2325 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2326 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2327 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2328 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2329 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2330 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2331 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2332 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2333 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2334 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2335 2336 if (use_lsx_instructions) { 2337 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2338 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2339 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2340 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2341 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2342 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2343 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2344 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2345 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2346 } 2347 2348 s->reserved_regs = 0; 2349 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2350 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2351 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2352 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2353 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2354 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2355 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2356 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2357} 2358 2359typedef struct { 2360 DebugFrameHeader h; 2361 uint8_t fde_def_cfa[4]; 2362 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2363} DebugFrame; 2364 2365#define ELF_HOST_MACHINE EM_LOONGARCH 2366 2367static const DebugFrame debug_frame = { 2368 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2369 .h.cie.id = -1, 2370 .h.cie.version = 1, 2371 .h.cie.code_align = 1, 2372 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2373 .h.cie.return_column = TCG_REG_RA, 2374 2375 /* Total FDE size does not include the "len" member. */ 2376 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2377 2378 .fde_def_cfa = { 2379 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2380 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2381 (FRAME_SIZE >> 7) 2382 }, 2383 .fde_reg_ofs = { 2384 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2385 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2386 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2387 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2388 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2389 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2390 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2391 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2392 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2393 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2394 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2395 } 2396}; 2397 2398void tcg_register_jit(const void *buf, size_t buf_size) 2399{ 2400 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2401} 2402