1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include <asm/hwcap.h> 33 34/* used for function call generation */ 35#define TCG_REG_CALL_STACK TCG_REG_SP 36#define TCG_TARGET_STACK_ALIGN 16 37#define TCG_TARGET_CALL_STACK_OFFSET 0 38#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 39#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 40#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 41#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 42 43#ifdef CONFIG_DEBUG_TCG 44static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 45 "zero", 46 "ra", 47 "tp", 48 "sp", 49 "a0", 50 "a1", 51 "a2", 52 "a3", 53 "a4", 54 "a5", 55 "a6", 56 "a7", 57 "t0", 58 "t1", 59 "t2", 60 "t3", 61 "t4", 62 "t5", 63 "t6", 64 "t7", 65 "t8", 66 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 67 "s9", 68 "s0", 69 "s1", 70 "s2", 71 "s3", 72 "s4", 73 "s5", 74 "s6", 75 "s7", 76 "s8", 77 "vr0", 78 "vr1", 79 "vr2", 80 "vr3", 81 "vr4", 82 "vr5", 83 "vr6", 84 "vr7", 85 "vr8", 86 "vr9", 87 "vr10", 88 "vr11", 89 "vr12", 90 "vr13", 91 "vr14", 92 "vr15", 93 "vr16", 94 "vr17", 95 "vr18", 96 "vr19", 97 "vr20", 98 "vr21", 99 "vr22", 100 "vr23", 101 "vr24", 102 "vr25", 103 "vr26", 104 "vr27", 105 "vr28", 106 "vr29", 107 "vr30", 108 "vr31", 109}; 110#endif 111 112static const int tcg_target_reg_alloc_order[] = { 113 /* Registers preserved across calls */ 114 /* TCG_REG_S0 reserved for TCG_AREG0 */ 115 TCG_REG_S1, 116 TCG_REG_S2, 117 TCG_REG_S3, 118 TCG_REG_S4, 119 TCG_REG_S5, 120 TCG_REG_S6, 121 TCG_REG_S7, 122 TCG_REG_S8, 123 TCG_REG_S9, 124 125 /* Registers (potentially) clobbered across calls */ 126 TCG_REG_T0, 127 TCG_REG_T1, 128 TCG_REG_T2, 129 TCG_REG_T3, 130 TCG_REG_T4, 131 TCG_REG_T5, 132 TCG_REG_T6, 133 TCG_REG_T7, 134 TCG_REG_T8, 135 136 /* Argument registers, opposite order of allocation. */ 137 TCG_REG_A7, 138 TCG_REG_A6, 139 TCG_REG_A5, 140 TCG_REG_A4, 141 TCG_REG_A3, 142 TCG_REG_A2, 143 TCG_REG_A1, 144 TCG_REG_A0, 145 146 /* Vector registers */ 147 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 148 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 149 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 150 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 151 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 152 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 153 /* V24 - V31 are caller-saved, and skipped. */ 154}; 155 156static const int tcg_target_call_iarg_regs[] = { 157 TCG_REG_A0, 158 TCG_REG_A1, 159 TCG_REG_A2, 160 TCG_REG_A3, 161 TCG_REG_A4, 162 TCG_REG_A5, 163 TCG_REG_A6, 164 TCG_REG_A7, 165}; 166 167static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 168{ 169 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 170 tcg_debug_assert(slot >= 0 && slot <= 1); 171 return TCG_REG_A0 + slot; 172} 173 174#define TCG_GUEST_BASE_REG TCG_REG_S1 175 176#define TCG_CT_CONST_S12 0x100 177#define TCG_CT_CONST_S32 0x200 178#define TCG_CT_CONST_U12 0x400 179#define TCG_CT_CONST_WSZ 0x800 180#define TCG_CT_CONST_VCMP 0x1000 181#define TCG_CT_CONST_VADD 0x2000 182 183#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 184#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 185 186static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 187{ 188 return sextract64(val, pos, len); 189} 190 191/* test if a constant matches the constraint */ 192static bool tcg_target_const_match(int64_t val, int ct, 193 TCGType type, TCGCond cond, int vece) 194{ 195 if (ct & TCG_CT_CONST) { 196 return true; 197 } 198 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 199 return true; 200 } 201 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 202 return true; 203 } 204 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 205 return true; 206 } 207 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 208 return true; 209 } 210 if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) { 211 int64_t vec_val = sextract64(val, 0, 8 << vece); 212 if (ct & TCG_CT_CONST_VCMP) { 213 switch (cond) { 214 case TCG_COND_EQ: 215 case TCG_COND_LE: 216 case TCG_COND_LT: 217 return -0x10 <= vec_val && vec_val <= 0x0f; 218 case TCG_COND_LEU: 219 case TCG_COND_LTU: 220 return 0x00 <= vec_val && vec_val <= 0x1f; 221 default: 222 return false; 223 } 224 } 225 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 226 return true; 227 } 228 } 229 return false; 230} 231 232/* 233 * Relocations 234 */ 235 236/* 237 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 238 * complicated; a whopping stack machine is needed to stuff the fields, at 239 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 240 * needed. 241 * 242 * Hence, define our own simpler relocation types. Numbers are chosen as to 243 * not collide with potential future additions to the true ELF relocation 244 * type enum. 245 */ 246 247/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 248#define R_LOONGARCH_BR_SK16 256 249/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 250#define R_LOONGARCH_BR_SD10K16 257 251 252static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 253{ 254 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 255 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 256 257 tcg_debug_assert((offset & 3) == 0); 258 offset >>= 2; 259 if (offset == sextreg(offset, 0, 16)) { 260 *src_rw = deposit64(*src_rw, 10, 16, offset); 261 return true; 262 } 263 264 return false; 265} 266 267static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 268 const tcg_insn_unit *target) 269{ 270 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 271 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 272 273 tcg_debug_assert((offset & 3) == 0); 274 offset >>= 2; 275 if (offset == sextreg(offset, 0, 26)) { 276 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 277 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 278 return true; 279 } 280 281 return false; 282} 283 284static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 285 intptr_t value, intptr_t addend) 286{ 287 tcg_debug_assert(addend == 0); 288 switch (type) { 289 case R_LOONGARCH_BR_SK16: 290 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 291 case R_LOONGARCH_BR_SD10K16: 292 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 293 default: 294 g_assert_not_reached(); 295 } 296} 297 298#include "tcg-insn-defs.c.inc" 299 300/* 301 * TCG intrinsics 302 */ 303 304static void tcg_out_mb(TCGContext *s, TCGArg a0) 305{ 306 /* Baseline LoongArch only has the full barrier, unfortunately. */ 307 tcg_out_opc_dbar(s, 0); 308} 309 310static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 311{ 312 if (ret == arg) { 313 return true; 314 } 315 switch (type) { 316 case TCG_TYPE_I32: 317 case TCG_TYPE_I64: 318 if (ret < TCG_REG_V0) { 319 if (arg < TCG_REG_V0) { 320 /* 321 * Conventional register-register move used in LoongArch is 322 * `or dst, src, zero`. 323 */ 324 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 325 } else { 326 tcg_out_opc_movfr2gr_d(s, ret, arg); 327 } 328 } else { 329 if (arg < TCG_REG_V0) { 330 tcg_out_opc_movgr2fr_d(s, ret, arg); 331 } else { 332 tcg_out_opc_fmov_d(s, ret, arg); 333 } 334 } 335 break; 336 case TCG_TYPE_V64: 337 case TCG_TYPE_V128: 338 tcg_out_opc_vori_b(s, ret, arg, 0); 339 break; 340 case TCG_TYPE_V256: 341 tcg_out_opc_xvori_b(s, ret, arg, 0); 342 break; 343 default: 344 g_assert_not_reached(); 345 } 346 return true; 347} 348 349/* Loads a 32-bit immediate into rd, sign-extended. */ 350static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 351{ 352 tcg_target_long lo = sextreg(val, 0, 12); 353 tcg_target_long hi12 = sextreg(val, 12, 20); 354 355 /* Single-instruction cases. */ 356 if (hi12 == 0) { 357 /* val fits in uimm12: ori rd, zero, val */ 358 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 359 return; 360 } 361 if (hi12 == sextreg(lo, 12, 20)) { 362 /* val fits in simm12: addi.w rd, zero, val */ 363 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 364 return; 365 } 366 367 /* High bits must be set; load with lu12i.w + optional ori. */ 368 tcg_out_opc_lu12i_w(s, rd, hi12); 369 if (lo != 0) { 370 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 371 } 372} 373 374static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 375 tcg_target_long val) 376{ 377 /* 378 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 379 * with dedicated instructions for filling the respective bitfields 380 * below: 381 * 382 * 6 5 4 3 383 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 384 * +-----------------------+---------------------------------------+... 385 * | hi52 | hi32 | 386 * +-----------------------+---------------------------------------+... 387 * 3 2 1 388 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 389 * ...+-------------------------------------+-------------------------+ 390 * | hi12 | lo | 391 * ...+-------------------------------------+-------------------------+ 392 * 393 * Check if val belong to one of the several fast cases, before falling 394 * back to the slow path. 395 */ 396 397 intptr_t src_rx, pc_offset; 398 tcg_target_long hi12, hi32, hi52; 399 400 /* Value fits in signed i32. */ 401 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 402 tcg_out_movi_i32(s, rd, val); 403 return; 404 } 405 406 /* PC-relative cases. */ 407 src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr); 408 if ((val & 3) == 0) { 409 pc_offset = val - src_rx; 410 if (pc_offset == sextreg(pc_offset, 0, 22)) { 411 /* Single pcaddu2i. */ 412 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 413 return; 414 } 415 } 416 417 pc_offset = (val >> 12) - (src_rx >> 12); 418 if (pc_offset == sextreg(pc_offset, 0, 20)) { 419 /* Load with pcalau12i + ori. */ 420 tcg_target_long val_lo = val & 0xfff; 421 tcg_out_opc_pcalau12i(s, rd, pc_offset); 422 if (val_lo != 0) { 423 tcg_out_opc_ori(s, rd, rd, val_lo); 424 } 425 return; 426 } 427 428 hi12 = sextreg(val, 12, 20); 429 hi32 = sextreg(val, 32, 20); 430 hi52 = sextreg(val, 52, 12); 431 432 /* Single cu52i.d case. */ 433 if ((hi52 != 0) && (ctz64(val) >= 52)) { 434 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 435 return; 436 } 437 438 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 439 tcg_out_movi_i32(s, rd, val); 440 441 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 442 if (hi32 != sextreg(hi12, 20, 20)) { 443 tcg_out_opc_cu32i_d(s, rd, hi32); 444 } 445 446 if (hi52 != sextreg(hi32, 20, 12)) { 447 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 448 } 449} 450 451static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 452 TCGReg rs, tcg_target_long imm) 453{ 454 tcg_target_long lo12 = sextreg(imm, 0, 12); 455 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 456 457 /* 458 * Note that there's a hole in between hi16 and lo12: 459 * 460 * 3 2 1 0 461 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 462 * ...+-------------------------------+-------+-----------------------+ 463 * | hi16 | | lo12 | 464 * ...+-------------------------------+-------+-----------------------+ 465 * 466 * For bits within that hole, it's more efficient to use LU12I and ADD. 467 */ 468 if (imm == (hi16 << 16) + lo12) { 469 if (hi16) { 470 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 471 rs = rd; 472 } 473 if (type == TCG_TYPE_I32) { 474 tcg_out_opc_addi_w(s, rd, rs, lo12); 475 } else if (lo12) { 476 tcg_out_opc_addi_d(s, rd, rs, lo12); 477 } else { 478 tcg_out_mov(s, type, rd, rs); 479 } 480 } else { 481 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 482 if (type == TCG_TYPE_I32) { 483 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 484 } else { 485 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 486 } 487 } 488} 489 490static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 491{ 492 return false; 493} 494 495static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 496 tcg_target_long imm) 497{ 498 /* This function is only used for passing structs by reference. */ 499 g_assert_not_reached(); 500} 501 502static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 503{ 504 tcg_out_opc_andi(s, ret, arg, 0xff); 505} 506 507static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 508{ 509 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 510} 511 512static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 513{ 514 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 515} 516 517static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 518{ 519 tcg_out_opc_sext_b(s, ret, arg); 520} 521 522static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 523{ 524 tcg_out_opc_sext_h(s, ret, arg); 525} 526 527static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 528{ 529 tcg_out_opc_addi_w(s, ret, arg, 0); 530} 531 532static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 533{ 534 if (ret != arg) { 535 tcg_out_ext32s(s, ret, arg); 536 } 537} 538 539static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 540{ 541 tcg_out_ext32u(s, ret, arg); 542} 543 544static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 545{ 546 tcg_out_ext32s(s, ret, arg); 547} 548 549static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 550 TCGReg a0, TCGReg a1, TCGReg a2, 551 bool c2, bool is_32bit) 552{ 553 if (c2) { 554 /* 555 * Fast path: semantics already satisfied due to constraint and 556 * insn behavior, single instruction is enough. 557 */ 558 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 559 /* all clz/ctz insns belong to DJ-format */ 560 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 561 return; 562 } 563 564 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 565 /* a0 = a1 ? REG_TMP0 : a2 */ 566 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 567 tcg_out_opc_masknez(s, a0, a2, a1); 568 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 569} 570 571#define SETCOND_INV TCG_TARGET_NB_REGS 572#define SETCOND_NEZ (SETCOND_INV << 1) 573#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 574 575static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 576 TCGReg arg1, tcg_target_long arg2, bool c2) 577{ 578 int flags = 0; 579 580 switch (cond) { 581 case TCG_COND_EQ: /* -> NE */ 582 case TCG_COND_GE: /* -> LT */ 583 case TCG_COND_GEU: /* -> LTU */ 584 case TCG_COND_GT: /* -> LE */ 585 case TCG_COND_GTU: /* -> LEU */ 586 cond = tcg_invert_cond(cond); 587 flags ^= SETCOND_INV; 588 break; 589 default: 590 break; 591 } 592 593 switch (cond) { 594 case TCG_COND_LE: 595 case TCG_COND_LEU: 596 /* 597 * If we have a constant input, the most efficient way to implement 598 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 599 * We don't need to care for this for LE because the constant input 600 * is still constrained to int32_t, and INT32_MAX+1 is representable 601 * in the 64-bit temporary register. 602 */ 603 if (c2) { 604 if (cond == TCG_COND_LEU) { 605 /* unsigned <= -1 is true */ 606 if (arg2 == -1) { 607 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 608 return ret; 609 } 610 cond = TCG_COND_LTU; 611 } else { 612 cond = TCG_COND_LT; 613 } 614 arg2 += 1; 615 } else { 616 TCGReg tmp = arg2; 617 arg2 = arg1; 618 arg1 = tmp; 619 cond = tcg_swap_cond(cond); /* LE -> GE */ 620 cond = tcg_invert_cond(cond); /* GE -> LT */ 621 flags ^= SETCOND_INV; 622 } 623 break; 624 default: 625 break; 626 } 627 628 switch (cond) { 629 case TCG_COND_NE: 630 flags |= SETCOND_NEZ; 631 if (!c2) { 632 tcg_out_opc_xor(s, ret, arg1, arg2); 633 } else if (arg2 == 0) { 634 ret = arg1; 635 } else if (arg2 >= 0 && arg2 <= 0xfff) { 636 tcg_out_opc_xori(s, ret, arg1, arg2); 637 } else { 638 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 639 } 640 break; 641 642 case TCG_COND_LT: 643 case TCG_COND_LTU: 644 if (c2) { 645 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 646 if (cond == TCG_COND_LT) { 647 tcg_out_opc_slti(s, ret, arg1, arg2); 648 } else { 649 tcg_out_opc_sltui(s, ret, arg1, arg2); 650 } 651 break; 652 } 653 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 654 arg2 = TCG_REG_TMP0; 655 } 656 if (cond == TCG_COND_LT) { 657 tcg_out_opc_slt(s, ret, arg1, arg2); 658 } else { 659 tcg_out_opc_sltu(s, ret, arg1, arg2); 660 } 661 break; 662 663 default: 664 g_assert_not_reached(); 665 } 666 667 return ret | flags; 668} 669 670static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 671 TCGReg arg1, tcg_target_long arg2, bool c2) 672{ 673 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 674 675 if (tmpflags != ret) { 676 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 677 678 switch (tmpflags & SETCOND_FLAGS) { 679 case SETCOND_INV: 680 /* Intermediate result is boolean: simply invert. */ 681 tcg_out_opc_xori(s, ret, tmp, 1); 682 break; 683 case SETCOND_NEZ: 684 /* Intermediate result is zero/non-zero: test != 0. */ 685 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 686 break; 687 case SETCOND_NEZ | SETCOND_INV: 688 /* Intermediate result is zero/non-zero: test == 0. */ 689 tcg_out_opc_sltui(s, ret, tmp, 1); 690 break; 691 default: 692 g_assert_not_reached(); 693 } 694 } 695} 696 697static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 698 TCGReg c1, tcg_target_long c2, bool const2, 699 TCGReg v1, TCGReg v2) 700{ 701 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 702 TCGReg t; 703 704 /* Standardize the test below to t != 0. */ 705 if (tmpflags & SETCOND_INV) { 706 t = v1, v1 = v2, v2 = t; 707 } 708 709 t = tmpflags & ~SETCOND_FLAGS; 710 if (v1 == TCG_REG_ZERO) { 711 tcg_out_opc_masknez(s, ret, v2, t); 712 } else if (v2 == TCG_REG_ZERO) { 713 tcg_out_opc_maskeqz(s, ret, v1, t); 714 } else { 715 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 716 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 717 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 718 } 719} 720 721/* 722 * Branch helpers 723 */ 724 725static const struct { 726 LoongArchInsn op; 727 bool swap; 728} tcg_brcond_to_loongarch[] = { 729 [TCG_COND_EQ] = { OPC_BEQ, false }, 730 [TCG_COND_NE] = { OPC_BNE, false }, 731 [TCG_COND_LT] = { OPC_BGT, true }, 732 [TCG_COND_GE] = { OPC_BLE, true }, 733 [TCG_COND_LE] = { OPC_BLE, false }, 734 [TCG_COND_GT] = { OPC_BGT, false }, 735 [TCG_COND_LTU] = { OPC_BGTU, true }, 736 [TCG_COND_GEU] = { OPC_BLEU, true }, 737 [TCG_COND_LEU] = { OPC_BLEU, false }, 738 [TCG_COND_GTU] = { OPC_BGTU, false } 739}; 740 741static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 742 TCGReg arg2, TCGLabel *l) 743{ 744 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 745 746 tcg_debug_assert(op != 0); 747 748 if (tcg_brcond_to_loongarch[cond].swap) { 749 TCGReg t = arg1; 750 arg1 = arg2; 751 arg2 = t; 752 } 753 754 /* all conditional branch insns belong to DJSk16-format */ 755 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 756 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 757} 758 759static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 760{ 761 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 762 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 763 764 tcg_debug_assert((offset & 3) == 0); 765 if (offset == sextreg(offset, 0, 28)) { 766 /* short jump: +/- 256MiB */ 767 if (tail) { 768 tcg_out_opc_b(s, offset >> 2); 769 } else { 770 tcg_out_opc_bl(s, offset >> 2); 771 } 772 } else if (offset == sextreg(offset, 0, 38)) { 773 /* long jump: +/- 256GiB */ 774 tcg_target_long lo = sextreg(offset, 0, 18); 775 tcg_target_long hi = offset - lo; 776 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 777 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 778 } else { 779 /* far jump: 64-bit */ 780 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 781 tcg_target_long hi = (tcg_target_long)arg - lo; 782 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 783 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 784 } 785} 786 787static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 788 const TCGHelperInfo *info) 789{ 790 tcg_out_call_int(s, arg, false); 791} 792 793/* 794 * Load/store helpers 795 */ 796 797static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 798 TCGReg addr, intptr_t offset) 799{ 800 intptr_t imm12 = sextreg(offset, 0, 12); 801 802 if (offset != imm12) { 803 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 804 805 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 806 imm12 = sextreg(diff, 0, 12); 807 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 808 } else { 809 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 810 if (addr != TCG_REG_ZERO) { 811 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 812 } 813 } 814 addr = TCG_REG_TMP2; 815 } 816 817 switch (opc) { 818 case OPC_LD_B: 819 case OPC_LD_BU: 820 case OPC_LD_H: 821 case OPC_LD_HU: 822 case OPC_LD_W: 823 case OPC_LD_WU: 824 case OPC_LD_D: 825 case OPC_ST_B: 826 case OPC_ST_H: 827 case OPC_ST_W: 828 case OPC_ST_D: 829 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 830 break; 831 case OPC_FLD_S: 832 case OPC_FLD_D: 833 case OPC_FST_S: 834 case OPC_FST_D: 835 tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12)); 836 break; 837 default: 838 g_assert_not_reached(); 839 } 840} 841 842static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest, 843 TCGReg base, intptr_t offset) 844{ 845 switch (type) { 846 case TCG_TYPE_I32: 847 if (dest < TCG_REG_V0) { 848 tcg_out_ldst(s, OPC_LD_W, dest, base, offset); 849 } else { 850 tcg_out_ldst(s, OPC_FLD_S, dest, base, offset); 851 } 852 break; 853 case TCG_TYPE_I64: 854 case TCG_TYPE_V64: 855 if (dest < TCG_REG_V0) { 856 tcg_out_ldst(s, OPC_LD_D, dest, base, offset); 857 } else { 858 tcg_out_ldst(s, OPC_FLD_D, dest, base, offset); 859 } 860 break; 861 case TCG_TYPE_V128: 862 if (-0x800 <= offset && offset <= 0x7ff) { 863 tcg_out_opc_vld(s, dest, base, offset); 864 } else { 865 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 866 tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0); 867 } 868 break; 869 case TCG_TYPE_V256: 870 if (-0x800 <= offset && offset <= 0x7ff) { 871 tcg_out_opc_xvld(s, dest, base, offset); 872 } else { 873 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 874 tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0); 875 } 876 break; 877 default: 878 g_assert_not_reached(); 879 } 880} 881 882static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, 883 TCGReg base, intptr_t offset) 884{ 885 switch (type) { 886 case TCG_TYPE_I32: 887 if (src < TCG_REG_V0) { 888 tcg_out_ldst(s, OPC_ST_W, src, base, offset); 889 } else { 890 tcg_out_ldst(s, OPC_FST_S, src, base, offset); 891 } 892 break; 893 case TCG_TYPE_I64: 894 case TCG_TYPE_V64: 895 if (src < TCG_REG_V0) { 896 tcg_out_ldst(s, OPC_ST_D, src, base, offset); 897 } else { 898 tcg_out_ldst(s, OPC_FST_D, src, base, offset); 899 } 900 break; 901 case TCG_TYPE_V128: 902 if (-0x800 <= offset && offset <= 0x7ff) { 903 tcg_out_opc_vst(s, src, base, offset); 904 } else { 905 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 906 tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0); 907 } 908 break; 909 case TCG_TYPE_V256: 910 if (-0x800 <= offset && offset <= 0x7ff) { 911 tcg_out_opc_xvst(s, src, base, offset); 912 } else { 913 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 914 tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0); 915 } 916 break; 917 default: 918 g_assert_not_reached(); 919 } 920} 921 922static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 923 TCGReg base, intptr_t ofs) 924{ 925 if (val == 0) { 926 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 927 return true; 928 } 929 return false; 930} 931 932/* 933 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 934 */ 935 936static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 937{ 938 tcg_out_opc_b(s, 0); 939 return reloc_br_sd10k16(s->code_ptr - 1, target); 940} 941 942static const TCGLdstHelperParam ldst_helper_param = { 943 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 944}; 945 946static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 947{ 948 MemOp opc = get_memop(l->oi); 949 950 /* resolve label address */ 951 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 952 return false; 953 } 954 955 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 956 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 957 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 958 return tcg_out_goto(s, l->raddr); 959} 960 961static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 962{ 963 MemOp opc = get_memop(l->oi); 964 965 /* resolve label address */ 966 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 967 return false; 968 } 969 970 tcg_out_st_helper_args(s, l, &ldst_helper_param); 971 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 972 return tcg_out_goto(s, l->raddr); 973} 974 975typedef struct { 976 TCGReg base; 977 TCGReg index; 978 TCGAtomAlign aa; 979} HostAddress; 980 981bool tcg_target_has_memory_bswap(MemOp memop) 982{ 983 return false; 984} 985 986/* We expect to use a 12-bit negative offset from ENV. */ 987#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 988 989/* 990 * For system-mode, perform the TLB load and compare. 991 * For user-mode, perform any required alignment tests. 992 * In both cases, return a TCGLabelQemuLdst structure if the slow path 993 * is required and fill in @h with the host address for the fast path. 994 */ 995static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 996 TCGReg addr_reg, MemOpIdx oi, 997 bool is_ld) 998{ 999 TCGType addr_type = s->addr_type; 1000 TCGLabelQemuLdst *ldst = NULL; 1001 MemOp opc = get_memop(oi); 1002 MemOp a_bits; 1003 1004 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1005 a_bits = h->aa.align; 1006 1007 if (tcg_use_softmmu) { 1008 unsigned s_bits = opc & MO_SIZE; 1009 int mem_index = get_mmuidx(oi); 1010 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 1011 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 1012 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 1013 1014 ldst = new_ldst_label(s); 1015 ldst->is_ld = is_ld; 1016 ldst->oi = oi; 1017 ldst->addr_reg = addr_reg; 1018 1019 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 1020 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 1021 1022 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 1023 s->page_bits - CPU_TLB_ENTRY_BITS); 1024 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 1025 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 1026 1027 /* Load the tlb comparator and the addend. */ 1028 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1029 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1030 is_ld ? offsetof(CPUTLBEntry, addr_read) 1031 : offsetof(CPUTLBEntry, addr_write)); 1032 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1033 offsetof(CPUTLBEntry, addend)); 1034 1035 /* 1036 * For aligned accesses, we check the first byte and include the 1037 * alignment bits within the address. For unaligned access, we 1038 * check that we don't cross pages using the address of the last 1039 * byte of the access. 1040 */ 1041 if (a_bits < s_bits) { 1042 unsigned a_mask = (1u << a_bits) - 1; 1043 unsigned s_mask = (1u << s_bits) - 1; 1044 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 1045 } else { 1046 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 1047 } 1048 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 1049 a_bits, s->page_bits - 1); 1050 1051 /* Compare masked address with the TLB entry. */ 1052 ldst->label_ptr[0] = s->code_ptr; 1053 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1054 1055 h->index = TCG_REG_TMP2; 1056 } else { 1057 if (a_bits) { 1058 ldst = new_ldst_label(s); 1059 1060 ldst->is_ld = is_ld; 1061 ldst->oi = oi; 1062 ldst->addr_reg = addr_reg; 1063 1064 /* 1065 * Without micro-architecture details, we don't know which of 1066 * bstrpick or andi is faster, so use bstrpick as it's not 1067 * constrained by imm field width. Not to say alignments >= 2^12 1068 * are going to happen any time soon. 1069 */ 1070 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 1071 1072 ldst->label_ptr[0] = s->code_ptr; 1073 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1074 } 1075 1076 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 1077 } 1078 1079 if (addr_type == TCG_TYPE_I32) { 1080 h->base = TCG_REG_TMP0; 1081 tcg_out_ext32u(s, h->base, addr_reg); 1082 } else { 1083 h->base = addr_reg; 1084 } 1085 1086 return ldst; 1087} 1088 1089static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 1090 TCGReg rd, HostAddress h) 1091{ 1092 /* Byte swapping is left to middle-end expansion. */ 1093 tcg_debug_assert((opc & MO_BSWAP) == 0); 1094 1095 switch (opc & MO_SSIZE) { 1096 case MO_UB: 1097 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 1098 break; 1099 case MO_SB: 1100 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1101 break; 1102 case MO_UW: 1103 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1104 break; 1105 case MO_SW: 1106 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1107 break; 1108 case MO_UL: 1109 if (type == TCG_TYPE_I64) { 1110 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1111 break; 1112 } 1113 /* fallthrough */ 1114 case MO_SL: 1115 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1116 break; 1117 case MO_UQ: 1118 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1119 break; 1120 default: 1121 g_assert_not_reached(); 1122 } 1123} 1124 1125static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1126 MemOpIdx oi, TCGType data_type) 1127{ 1128 TCGLabelQemuLdst *ldst; 1129 HostAddress h; 1130 1131 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1132 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1133 1134 if (ldst) { 1135 ldst->type = data_type; 1136 ldst->datalo_reg = data_reg; 1137 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1138 } 1139} 1140 1141static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1142 TCGReg rd, HostAddress h) 1143{ 1144 /* Byte swapping is left to middle-end expansion. */ 1145 tcg_debug_assert((opc & MO_BSWAP) == 0); 1146 1147 switch (opc & MO_SIZE) { 1148 case MO_8: 1149 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1150 break; 1151 case MO_16: 1152 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1153 break; 1154 case MO_32: 1155 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1156 break; 1157 case MO_64: 1158 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1159 break; 1160 default: 1161 g_assert_not_reached(); 1162 } 1163} 1164 1165static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1166 MemOpIdx oi, TCGType data_type) 1167{ 1168 TCGLabelQemuLdst *ldst; 1169 HostAddress h; 1170 1171 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1172 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1173 1174 if (ldst) { 1175 ldst->type = data_type; 1176 ldst->datalo_reg = data_reg; 1177 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1178 } 1179} 1180 1181static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1182 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1183{ 1184 TCGLabelQemuLdst *ldst; 1185 HostAddress h; 1186 1187 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1188 1189 if (h.aa.atom == MO_128) { 1190 /* 1191 * Use VLDX/VSTX when 128-bit atomicity is required. 1192 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1193 */ 1194 if (is_ld) { 1195 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1196 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1197 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1198 } else { 1199 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1200 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1201 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1202 } 1203 } else { 1204 /* Otherwise use a pair of LD/ST. */ 1205 TCGReg base = h.base; 1206 if (h.index != TCG_REG_ZERO) { 1207 base = TCG_REG_TMP0; 1208 tcg_out_opc_add_d(s, base, h.base, h.index); 1209 } 1210 if (is_ld) { 1211 tcg_debug_assert(base != data_lo); 1212 tcg_out_opc_ld_d(s, data_lo, base, 0); 1213 tcg_out_opc_ld_d(s, data_hi, base, 8); 1214 } else { 1215 tcg_out_opc_st_d(s, data_lo, base, 0); 1216 tcg_out_opc_st_d(s, data_hi, base, 8); 1217 } 1218 } 1219 1220 if (ldst) { 1221 ldst->type = TCG_TYPE_I128; 1222 ldst->datalo_reg = data_lo; 1223 ldst->datahi_reg = data_hi; 1224 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1225 } 1226} 1227 1228/* 1229 * Entry-points 1230 */ 1231 1232static const tcg_insn_unit *tb_ret_addr; 1233 1234static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1235{ 1236 /* Reuse the zeroing that exists for goto_ptr. */ 1237 if (a0 == 0) { 1238 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1239 } else { 1240 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1241 tcg_out_call_int(s, tb_ret_addr, true); 1242 } 1243} 1244 1245static void tcg_out_goto_tb(TCGContext *s, int which) 1246{ 1247 /* 1248 * Direct branch, or load indirect address, to be patched 1249 * by tb_target_set_jmp_target. Check indirect load offset 1250 * in range early, regardless of direct branch distance, 1251 * via assert within tcg_out_opc_pcaddu2i. 1252 */ 1253 uintptr_t i_addr = get_jmp_target_addr(s, which); 1254 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1255 1256 set_jmp_insn_offset(s, which); 1257 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1258 1259 /* Finish the load and indirect branch. */ 1260 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1261 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1262 set_jmp_reset_offset(s, which); 1263} 1264 1265void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1266 uintptr_t jmp_rx, uintptr_t jmp_rw) 1267{ 1268 uintptr_t d_addr = tb->jmp_target_addr[n]; 1269 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1270 tcg_insn_unit insn; 1271 1272 /* Either directly branch, or load slot address for indirect branch. */ 1273 if (d_disp == sextreg(d_disp, 0, 26)) { 1274 insn = encode_sd10k16_insn(OPC_B, d_disp); 1275 } else { 1276 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1277 intptr_t i_disp = i_addr - jmp_rx; 1278 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1279 } 1280 1281 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1282 flush_idcache_range(jmp_rx, jmp_rw, 4); 1283} 1284 1285 1286static void tgen_add(TCGContext *s, TCGType type, 1287 TCGReg a0, TCGReg a1, TCGReg a2) 1288{ 1289 if (type == TCG_TYPE_I32) { 1290 tcg_out_opc_add_w(s, a0, a1, a2); 1291 } else { 1292 tcg_out_opc_add_d(s, a0, a1, a2); 1293 } 1294} 1295 1296static const TCGOutOpBinary outop_add = { 1297 .base.static_constraint = C_O1_I2(r, r, rJ), 1298 .out_rrr = tgen_add, 1299 .out_rri = tcg_out_addi, 1300}; 1301 1302static void tgen_and(TCGContext *s, TCGType type, 1303 TCGReg a0, TCGReg a1, TCGReg a2) 1304{ 1305 tcg_out_opc_and(s, a0, a1, a2); 1306} 1307 1308static void tgen_andi(TCGContext *s, TCGType type, 1309 TCGReg a0, TCGReg a1, tcg_target_long a2) 1310{ 1311 tcg_out_opc_andi(s, a0, a1, a2); 1312} 1313 1314static const TCGOutOpBinary outop_and = { 1315 .base.static_constraint = C_O1_I2(r, r, rU), 1316 .out_rrr = tgen_and, 1317 .out_rri = tgen_andi, 1318}; 1319 1320static void tgen_andc(TCGContext *s, TCGType type, 1321 TCGReg a0, TCGReg a1, TCGReg a2) 1322{ 1323 tcg_out_opc_andn(s, a0, a1, a2); 1324} 1325 1326static const TCGOutOpBinary outop_andc = { 1327 .base.static_constraint = C_O1_I2(r, r, r), 1328 .out_rrr = tgen_andc, 1329}; 1330 1331static void tgen_or(TCGContext *s, TCGType type, 1332 TCGReg a0, TCGReg a1, TCGReg a2) 1333{ 1334 tcg_out_opc_or(s, a0, a1, a2); 1335} 1336 1337static void tgen_ori(TCGContext *s, TCGType type, 1338 TCGReg a0, TCGReg a1, tcg_target_long a2) 1339{ 1340 tcg_out_opc_ori(s, a0, a1, a2); 1341} 1342 1343static const TCGOutOpBinary outop_or = { 1344 .base.static_constraint = C_O1_I2(r, r, rU), 1345 .out_rrr = tgen_or, 1346 .out_rri = tgen_ori, 1347}; 1348 1349static void tgen_orc(TCGContext *s, TCGType type, 1350 TCGReg a0, TCGReg a1, TCGReg a2) 1351{ 1352 tcg_out_opc_orn(s, a0, a1, a2); 1353} 1354 1355static const TCGOutOpBinary outop_orc = { 1356 .base.static_constraint = C_O1_I2(r, r, r), 1357 .out_rrr = tgen_orc, 1358}; 1359 1360 1361static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 1362 const TCGArg args[TCG_MAX_OP_ARGS], 1363 const int const_args[TCG_MAX_OP_ARGS]) 1364{ 1365 TCGArg a0 = args[0]; 1366 TCGArg a1 = args[1]; 1367 TCGArg a2 = args[2]; 1368 TCGArg a3 = args[3]; 1369 int c2 = const_args[2]; 1370 1371 switch (opc) { 1372 case INDEX_op_mb: 1373 tcg_out_mb(s, a0); 1374 break; 1375 1376 case INDEX_op_goto_ptr: 1377 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1378 break; 1379 1380 case INDEX_op_br: 1381 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1382 0); 1383 tcg_out_opc_b(s, 0); 1384 break; 1385 1386 case INDEX_op_brcond_i32: 1387 case INDEX_op_brcond_i64: 1388 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1389 break; 1390 1391 case INDEX_op_extrh_i64_i32: 1392 tcg_out_opc_srai_d(s, a0, a1, 32); 1393 break; 1394 1395 case INDEX_op_not_i32: 1396 case INDEX_op_not_i64: 1397 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1398 break; 1399 1400 case INDEX_op_nor_i32: 1401 case INDEX_op_nor_i64: 1402 if (c2) { 1403 tcg_out_opc_ori(s, a0, a1, a2); 1404 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1405 } else { 1406 tcg_out_opc_nor(s, a0, a1, a2); 1407 } 1408 break; 1409 1410 case INDEX_op_xor_i32: 1411 case INDEX_op_xor_i64: 1412 if (c2) { 1413 tcg_out_opc_xori(s, a0, a1, a2); 1414 } else { 1415 tcg_out_opc_xor(s, a0, a1, a2); 1416 } 1417 break; 1418 1419 case INDEX_op_extract_i32: 1420 if (a2 == 0 && args[3] <= 12) { 1421 tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1); 1422 } else { 1423 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1424 } 1425 break; 1426 case INDEX_op_extract_i64: 1427 if (a2 == 0 && args[3] <= 12) { 1428 tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1); 1429 } else { 1430 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1431 } 1432 break; 1433 1434 case INDEX_op_sextract_i64: 1435 if (a2 + args[3] == 32) { 1436 if (a2 == 0) { 1437 tcg_out_ext32s(s, a0, a1); 1438 } else { 1439 tcg_out_opc_srai_w(s, a0, a1, a2); 1440 } 1441 break; 1442 } 1443 /* FALLTHRU */ 1444 case INDEX_op_sextract_i32: 1445 if (a2 == 0 && args[3] == 8) { 1446 tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1); 1447 } else if (a2 == 0 && args[3] == 16) { 1448 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1); 1449 } else { 1450 g_assert_not_reached(); 1451 } 1452 break; 1453 1454 case INDEX_op_deposit_i32: 1455 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1456 break; 1457 case INDEX_op_deposit_i64: 1458 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1459 break; 1460 1461 case INDEX_op_bswap16_i32: 1462 case INDEX_op_bswap16_i64: 1463 tcg_out_opc_revb_2h(s, a0, a1); 1464 if (a2 & TCG_BSWAP_OS) { 1465 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1466 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1467 tcg_out_ext16u(s, a0, a0); 1468 } 1469 break; 1470 1471 case INDEX_op_bswap32_i32: 1472 /* All 32-bit values are computed sign-extended in the register. */ 1473 a2 = TCG_BSWAP_OS; 1474 /* fallthrough */ 1475 case INDEX_op_bswap32_i64: 1476 tcg_out_opc_revb_2w(s, a0, a1); 1477 if (a2 & TCG_BSWAP_OS) { 1478 tcg_out_ext32s(s, a0, a0); 1479 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1480 tcg_out_ext32u(s, a0, a0); 1481 } 1482 break; 1483 1484 case INDEX_op_bswap64_i64: 1485 tcg_out_opc_revb_d(s, a0, a1); 1486 break; 1487 1488 case INDEX_op_clz_i32: 1489 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1490 break; 1491 case INDEX_op_clz_i64: 1492 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1493 break; 1494 1495 case INDEX_op_ctz_i32: 1496 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1497 break; 1498 case INDEX_op_ctz_i64: 1499 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1500 break; 1501 1502 case INDEX_op_shl_i32: 1503 if (c2) { 1504 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1505 } else { 1506 tcg_out_opc_sll_w(s, a0, a1, a2); 1507 } 1508 break; 1509 case INDEX_op_shl_i64: 1510 if (c2) { 1511 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1512 } else { 1513 tcg_out_opc_sll_d(s, a0, a1, a2); 1514 } 1515 break; 1516 1517 case INDEX_op_shr_i32: 1518 if (c2) { 1519 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1520 } else { 1521 tcg_out_opc_srl_w(s, a0, a1, a2); 1522 } 1523 break; 1524 case INDEX_op_shr_i64: 1525 if (c2) { 1526 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1527 } else { 1528 tcg_out_opc_srl_d(s, a0, a1, a2); 1529 } 1530 break; 1531 1532 case INDEX_op_sar_i32: 1533 if (c2) { 1534 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1535 } else { 1536 tcg_out_opc_sra_w(s, a0, a1, a2); 1537 } 1538 break; 1539 case INDEX_op_sar_i64: 1540 if (c2) { 1541 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1542 } else { 1543 tcg_out_opc_sra_d(s, a0, a1, a2); 1544 } 1545 break; 1546 1547 case INDEX_op_rotl_i32: 1548 /* transform into equivalent rotr/rotri */ 1549 if (c2) { 1550 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1551 } else { 1552 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1553 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1554 } 1555 break; 1556 case INDEX_op_rotl_i64: 1557 /* transform into equivalent rotr/rotri */ 1558 if (c2) { 1559 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1560 } else { 1561 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1562 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1563 } 1564 break; 1565 1566 case INDEX_op_rotr_i32: 1567 if (c2) { 1568 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1569 } else { 1570 tcg_out_opc_rotr_w(s, a0, a1, a2); 1571 } 1572 break; 1573 case INDEX_op_rotr_i64: 1574 if (c2) { 1575 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1576 } else { 1577 tcg_out_opc_rotr_d(s, a0, a1, a2); 1578 } 1579 break; 1580 1581 case INDEX_op_sub_i32: 1582 if (c2) { 1583 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1584 } else { 1585 tcg_out_opc_sub_w(s, a0, a1, a2); 1586 } 1587 break; 1588 case INDEX_op_sub_i64: 1589 if (c2) { 1590 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1591 } else { 1592 tcg_out_opc_sub_d(s, a0, a1, a2); 1593 } 1594 break; 1595 1596 case INDEX_op_neg_i32: 1597 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1); 1598 break; 1599 case INDEX_op_neg_i64: 1600 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1); 1601 break; 1602 1603 case INDEX_op_mul_i32: 1604 tcg_out_opc_mul_w(s, a0, a1, a2); 1605 break; 1606 case INDEX_op_mul_i64: 1607 tcg_out_opc_mul_d(s, a0, a1, a2); 1608 break; 1609 1610 case INDEX_op_mulsh_i32: 1611 tcg_out_opc_mulh_w(s, a0, a1, a2); 1612 break; 1613 case INDEX_op_mulsh_i64: 1614 tcg_out_opc_mulh_d(s, a0, a1, a2); 1615 break; 1616 1617 case INDEX_op_muluh_i32: 1618 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1619 break; 1620 case INDEX_op_muluh_i64: 1621 tcg_out_opc_mulh_du(s, a0, a1, a2); 1622 break; 1623 1624 case INDEX_op_div_i32: 1625 tcg_out_opc_div_w(s, a0, a1, a2); 1626 break; 1627 case INDEX_op_div_i64: 1628 tcg_out_opc_div_d(s, a0, a1, a2); 1629 break; 1630 1631 case INDEX_op_divu_i32: 1632 tcg_out_opc_div_wu(s, a0, a1, a2); 1633 break; 1634 case INDEX_op_divu_i64: 1635 tcg_out_opc_div_du(s, a0, a1, a2); 1636 break; 1637 1638 case INDEX_op_rem_i32: 1639 tcg_out_opc_mod_w(s, a0, a1, a2); 1640 break; 1641 case INDEX_op_rem_i64: 1642 tcg_out_opc_mod_d(s, a0, a1, a2); 1643 break; 1644 1645 case INDEX_op_remu_i32: 1646 tcg_out_opc_mod_wu(s, a0, a1, a2); 1647 break; 1648 case INDEX_op_remu_i64: 1649 tcg_out_opc_mod_du(s, a0, a1, a2); 1650 break; 1651 1652 case INDEX_op_setcond_i32: 1653 case INDEX_op_setcond_i64: 1654 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1655 break; 1656 1657 case INDEX_op_movcond_i32: 1658 case INDEX_op_movcond_i64: 1659 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1660 break; 1661 1662 case INDEX_op_ld8s_i32: 1663 case INDEX_op_ld8s_i64: 1664 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1665 break; 1666 case INDEX_op_ld8u_i32: 1667 case INDEX_op_ld8u_i64: 1668 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1669 break; 1670 case INDEX_op_ld16s_i32: 1671 case INDEX_op_ld16s_i64: 1672 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1673 break; 1674 case INDEX_op_ld16u_i32: 1675 case INDEX_op_ld16u_i64: 1676 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1677 break; 1678 case INDEX_op_ld_i32: 1679 case INDEX_op_ld32s_i64: 1680 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1681 break; 1682 case INDEX_op_ld32u_i64: 1683 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1684 break; 1685 case INDEX_op_ld_i64: 1686 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1687 break; 1688 1689 case INDEX_op_st8_i32: 1690 case INDEX_op_st8_i64: 1691 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1692 break; 1693 case INDEX_op_st16_i32: 1694 case INDEX_op_st16_i64: 1695 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1696 break; 1697 case INDEX_op_st_i32: 1698 case INDEX_op_st32_i64: 1699 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1700 break; 1701 case INDEX_op_st_i64: 1702 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1703 break; 1704 1705 case INDEX_op_qemu_ld_i32: 1706 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1707 break; 1708 case INDEX_op_qemu_ld_i64: 1709 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1710 break; 1711 case INDEX_op_qemu_ld_i128: 1712 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1713 break; 1714 case INDEX_op_qemu_st_i32: 1715 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1716 break; 1717 case INDEX_op_qemu_st_i64: 1718 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1719 break; 1720 case INDEX_op_qemu_st_i128: 1721 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1722 break; 1723 1724 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1725 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1726 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1727 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 1728 case INDEX_op_extu_i32_i64: 1729 case INDEX_op_extrl_i64_i32: 1730 default: 1731 g_assert_not_reached(); 1732 } 1733} 1734 1735static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1736 TCGReg rd, TCGReg rs) 1737{ 1738 static const LoongArchInsn repl_insn[2][4] = { 1739 { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H, 1740 OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D }, 1741 { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H, 1742 OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D }, 1743 }; 1744 bool lasx = type == TCG_TYPE_V256; 1745 1746 tcg_debug_assert(vece <= MO_64); 1747 tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs)); 1748 return true; 1749} 1750 1751static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1752 TCGReg r, TCGReg base, intptr_t offset) 1753{ 1754 bool lasx = type == TCG_TYPE_V256; 1755 1756 /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */ 1757 if (offset < -0x800 || offset > 0x7ff || 1758 (offset & ((1 << vece) - 1)) != 0) { 1759 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1760 base = TCG_REG_TMP0; 1761 offset = 0; 1762 } 1763 offset >>= vece; 1764 1765 switch (vece) { 1766 case MO_8: 1767 if (lasx) { 1768 tcg_out_opc_xvldrepl_b(s, r, base, offset); 1769 } else { 1770 tcg_out_opc_vldrepl_b(s, r, base, offset); 1771 } 1772 break; 1773 case MO_16: 1774 if (lasx) { 1775 tcg_out_opc_xvldrepl_h(s, r, base, offset); 1776 } else { 1777 tcg_out_opc_vldrepl_h(s, r, base, offset); 1778 } 1779 break; 1780 case MO_32: 1781 if (lasx) { 1782 tcg_out_opc_xvldrepl_w(s, r, base, offset); 1783 } else { 1784 tcg_out_opc_vldrepl_w(s, r, base, offset); 1785 } 1786 break; 1787 case MO_64: 1788 if (lasx) { 1789 tcg_out_opc_xvldrepl_d(s, r, base, offset); 1790 } else { 1791 tcg_out_opc_vldrepl_d(s, r, base, offset); 1792 } 1793 break; 1794 default: 1795 g_assert_not_reached(); 1796 } 1797 return true; 1798} 1799 1800static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1801 TCGReg rd, int64_t v64) 1802{ 1803 /* Try vldi if imm can fit */ 1804 int64_t value = sextract64(v64, 0, 8 << vece); 1805 if (-0x200 <= value && value <= 0x1FF) { 1806 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1807 1808 if (type == TCG_TYPE_V256) { 1809 tcg_out_opc_xvldi(s, rd, imm); 1810 } else { 1811 tcg_out_opc_vldi(s, rd, imm); 1812 } 1813 return; 1814 } 1815 1816 /* TODO: vldi patterns when imm 12 is set */ 1817 1818 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1819 tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0); 1820} 1821 1822static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece, 1823 TCGArg a0, TCGArg a1, TCGArg a2, 1824 bool a2_is_const, bool is_add) 1825{ 1826 static const LoongArchInsn add_vec_insn[2][4] = { 1827 { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D }, 1828 { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D }, 1829 }; 1830 static const LoongArchInsn add_vec_imm_insn[2][4] = { 1831 { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU }, 1832 { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU }, 1833 }; 1834 static const LoongArchInsn sub_vec_insn[2][4] = { 1835 { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D }, 1836 { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D }, 1837 }; 1838 static const LoongArchInsn sub_vec_imm_insn[2][4] = { 1839 { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU }, 1840 { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU }, 1841 }; 1842 LoongArchInsn insn; 1843 1844 if (a2_is_const) { 1845 int64_t value = sextract64(a2, 0, 8 << vece); 1846 1847 if (!is_add) { 1848 value = -value; 1849 } 1850 if (value < 0) { 1851 insn = sub_vec_imm_insn[lasx][vece]; 1852 value = -value; 1853 } else { 1854 insn = add_vec_imm_insn[lasx][vece]; 1855 } 1856 1857 /* Constraint TCG_CT_CONST_VADD ensures validity. */ 1858 tcg_debug_assert(0 <= value && value <= 0x1f); 1859 1860 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value)); 1861 return; 1862 } 1863 1864 if (is_add) { 1865 insn = add_vec_insn[lasx][vece]; 1866 } else { 1867 insn = sub_vec_insn[lasx][vece]; 1868 } 1869 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1870} 1871 1872static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1873 unsigned vecl, unsigned vece, 1874 const TCGArg args[TCG_MAX_OP_ARGS], 1875 const int const_args[TCG_MAX_OP_ARGS]) 1876{ 1877 TCGType type = vecl + TCG_TYPE_V64; 1878 bool lasx = type == TCG_TYPE_V256; 1879 TCGArg a0, a1, a2, a3; 1880 LoongArchInsn insn; 1881 1882 static const LoongArchInsn cmp_vec_insn[16][2][4] = { 1883 [TCG_COND_EQ] = { 1884 { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D }, 1885 { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D }, 1886 }, 1887 [TCG_COND_LE] = { 1888 { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D }, 1889 { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D }, 1890 }, 1891 [TCG_COND_LEU] = { 1892 { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU }, 1893 { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU }, 1894 }, 1895 [TCG_COND_LT] = { 1896 { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D }, 1897 { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D }, 1898 }, 1899 [TCG_COND_LTU] = { 1900 { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU }, 1901 { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU }, 1902 } 1903 }; 1904 static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = { 1905 [TCG_COND_EQ] = { 1906 { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D }, 1907 { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D }, 1908 }, 1909 [TCG_COND_LE] = { 1910 { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D }, 1911 { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D }, 1912 }, 1913 [TCG_COND_LEU] = { 1914 { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU }, 1915 { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU }, 1916 }, 1917 [TCG_COND_LT] = { 1918 { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D }, 1919 { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D }, 1920 }, 1921 [TCG_COND_LTU] = { 1922 { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU }, 1923 { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU }, 1924 } 1925 }; 1926 static const LoongArchInsn neg_vec_insn[2][4] = { 1927 { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D }, 1928 { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D }, 1929 }; 1930 static const LoongArchInsn mul_vec_insn[2][4] = { 1931 { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D }, 1932 { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D }, 1933 }; 1934 static const LoongArchInsn smin_vec_insn[2][4] = { 1935 { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D }, 1936 { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D }, 1937 }; 1938 static const LoongArchInsn umin_vec_insn[2][4] = { 1939 { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU }, 1940 { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU }, 1941 }; 1942 static const LoongArchInsn smax_vec_insn[2][4] = { 1943 { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D }, 1944 { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D }, 1945 }; 1946 static const LoongArchInsn umax_vec_insn[2][4] = { 1947 { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU }, 1948 { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU }, 1949 }; 1950 static const LoongArchInsn ssadd_vec_insn[2][4] = { 1951 { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D }, 1952 { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D }, 1953 }; 1954 static const LoongArchInsn usadd_vec_insn[2][4] = { 1955 { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU }, 1956 { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU }, 1957 }; 1958 static const LoongArchInsn sssub_vec_insn[2][4] = { 1959 { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D }, 1960 { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D }, 1961 }; 1962 static const LoongArchInsn ussub_vec_insn[2][4] = { 1963 { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU }, 1964 { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU }, 1965 }; 1966 static const LoongArchInsn shlv_vec_insn[2][4] = { 1967 { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D }, 1968 { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D }, 1969 }; 1970 static const LoongArchInsn shrv_vec_insn[2][4] = { 1971 { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D }, 1972 { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D }, 1973 }; 1974 static const LoongArchInsn sarv_vec_insn[2][4] = { 1975 { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D }, 1976 { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D }, 1977 }; 1978 static const LoongArchInsn shli_vec_insn[2][4] = { 1979 { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D }, 1980 { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D }, 1981 }; 1982 static const LoongArchInsn shri_vec_insn[2][4] = { 1983 { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D }, 1984 { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D }, 1985 }; 1986 static const LoongArchInsn sari_vec_insn[2][4] = { 1987 { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D }, 1988 { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D }, 1989 }; 1990 static const LoongArchInsn rotrv_vec_insn[2][4] = { 1991 { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D }, 1992 { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D }, 1993 }; 1994 static const LoongArchInsn rotri_vec_insn[2][4] = { 1995 { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D }, 1996 { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D }, 1997 }; 1998 1999 a0 = args[0]; 2000 a1 = args[1]; 2001 a2 = args[2]; 2002 a3 = args[3]; 2003 2004 switch (opc) { 2005 case INDEX_op_st_vec: 2006 tcg_out_st(s, type, a0, a1, a2); 2007 break; 2008 case INDEX_op_ld_vec: 2009 tcg_out_ld(s, type, a0, a1, a2); 2010 break; 2011 case INDEX_op_and_vec: 2012 insn = lasx ? OPC_XVAND_V : OPC_VAND_V; 2013 goto vdvjvk; 2014 case INDEX_op_andc_vec: 2015 /* 2016 * vandn vd, vj, vk: vd = vk & ~vj 2017 * andc_vec vd, vj, vk: vd = vj & ~vk 2018 * vj and vk are swapped 2019 */ 2020 a1 = a2; 2021 a2 = args[1]; 2022 insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V; 2023 goto vdvjvk; 2024 case INDEX_op_or_vec: 2025 insn = lasx ? OPC_XVOR_V : OPC_VOR_V; 2026 goto vdvjvk; 2027 case INDEX_op_orc_vec: 2028 insn = lasx ? OPC_XVORN_V : OPC_VORN_V; 2029 goto vdvjvk; 2030 case INDEX_op_xor_vec: 2031 insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V; 2032 goto vdvjvk; 2033 case INDEX_op_not_vec: 2034 a2 = a1; 2035 /* fall through */ 2036 case INDEX_op_nor_vec: 2037 insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V; 2038 goto vdvjvk; 2039 case INDEX_op_cmp_vec: 2040 { 2041 TCGCond cond = args[3]; 2042 2043 if (const_args[2]) { 2044 /* 2045 * cmp_vec dest, src, value 2046 * Try vseqi/vslei/vslti 2047 */ 2048 int64_t value = sextract64(a2, 0, 8 << vece); 2049 switch (cond) { 2050 case TCG_COND_EQ: 2051 case TCG_COND_LE: 2052 case TCG_COND_LT: 2053 insn = cmp_vec_imm_insn[cond][lasx][vece]; 2054 tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value)); 2055 break; 2056 case TCG_COND_LEU: 2057 case TCG_COND_LTU: 2058 insn = cmp_vec_imm_insn[cond][lasx][vece]; 2059 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value)); 2060 break; 2061 default: 2062 g_assert_not_reached(); 2063 } 2064 break; 2065 } 2066 2067 insn = cmp_vec_insn[cond][lasx][vece]; 2068 if (insn == 0) { 2069 TCGArg t; 2070 t = a1, a1 = a2, a2 = t; 2071 cond = tcg_swap_cond(cond); 2072 insn = cmp_vec_insn[cond][lasx][vece]; 2073 tcg_debug_assert(insn != 0); 2074 } 2075 } 2076 goto vdvjvk; 2077 case INDEX_op_add_vec: 2078 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true); 2079 break; 2080 case INDEX_op_sub_vec: 2081 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false); 2082 break; 2083 case INDEX_op_neg_vec: 2084 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1)); 2085 break; 2086 case INDEX_op_mul_vec: 2087 insn = mul_vec_insn[lasx][vece]; 2088 goto vdvjvk; 2089 case INDEX_op_smin_vec: 2090 insn = smin_vec_insn[lasx][vece]; 2091 goto vdvjvk; 2092 case INDEX_op_smax_vec: 2093 insn = smax_vec_insn[lasx][vece]; 2094 goto vdvjvk; 2095 case INDEX_op_umin_vec: 2096 insn = umin_vec_insn[lasx][vece]; 2097 goto vdvjvk; 2098 case INDEX_op_umax_vec: 2099 insn = umax_vec_insn[lasx][vece]; 2100 goto vdvjvk; 2101 case INDEX_op_ssadd_vec: 2102 insn = ssadd_vec_insn[lasx][vece]; 2103 goto vdvjvk; 2104 case INDEX_op_usadd_vec: 2105 insn = usadd_vec_insn[lasx][vece]; 2106 goto vdvjvk; 2107 case INDEX_op_sssub_vec: 2108 insn = sssub_vec_insn[lasx][vece]; 2109 goto vdvjvk; 2110 case INDEX_op_ussub_vec: 2111 insn = ussub_vec_insn[lasx][vece]; 2112 goto vdvjvk; 2113 case INDEX_op_shlv_vec: 2114 insn = shlv_vec_insn[lasx][vece]; 2115 goto vdvjvk; 2116 case INDEX_op_shrv_vec: 2117 insn = shrv_vec_insn[lasx][vece]; 2118 goto vdvjvk; 2119 case INDEX_op_sarv_vec: 2120 insn = sarv_vec_insn[lasx][vece]; 2121 goto vdvjvk; 2122 case INDEX_op_rotlv_vec: 2123 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 2124 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], 2125 TCG_VEC_TMP0, a2)); 2126 a2 = TCG_VEC_TMP0; 2127 /* fall through */ 2128 case INDEX_op_rotrv_vec: 2129 insn = rotrv_vec_insn[lasx][vece]; 2130 goto vdvjvk; 2131 case INDEX_op_shli_vec: 2132 insn = shli_vec_insn[lasx][vece]; 2133 goto vdvjukN; 2134 case INDEX_op_shri_vec: 2135 insn = shri_vec_insn[lasx][vece]; 2136 goto vdvjukN; 2137 case INDEX_op_sari_vec: 2138 insn = sari_vec_insn[lasx][vece]; 2139 goto vdvjukN; 2140 case INDEX_op_rotli_vec: 2141 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 2142 a2 = extract32(-a2, 0, 3 + vece); 2143 insn = rotri_vec_insn[lasx][vece]; 2144 goto vdvjukN; 2145 case INDEX_op_bitsel_vec: 2146 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 2147 if (lasx) { 2148 tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1); 2149 } else { 2150 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 2151 } 2152 break; 2153 case INDEX_op_dupm_vec: 2154 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2155 break; 2156 default: 2157 g_assert_not_reached(); 2158 vdvjvk: 2159 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 2160 break; 2161 vdvjukN: 2162 switch (vece) { 2163 case MO_8: 2164 tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2)); 2165 break; 2166 case MO_16: 2167 tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2)); 2168 break; 2169 case MO_32: 2170 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2)); 2171 break; 2172 case MO_64: 2173 tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2)); 2174 break; 2175 default: 2176 g_assert_not_reached(); 2177 } 2178 break; 2179 } 2180} 2181 2182int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2183{ 2184 switch (opc) { 2185 case INDEX_op_ld_vec: 2186 case INDEX_op_st_vec: 2187 case INDEX_op_dup_vec: 2188 case INDEX_op_dupm_vec: 2189 case INDEX_op_cmp_vec: 2190 case INDEX_op_add_vec: 2191 case INDEX_op_sub_vec: 2192 case INDEX_op_and_vec: 2193 case INDEX_op_andc_vec: 2194 case INDEX_op_or_vec: 2195 case INDEX_op_orc_vec: 2196 case INDEX_op_xor_vec: 2197 case INDEX_op_nor_vec: 2198 case INDEX_op_not_vec: 2199 case INDEX_op_neg_vec: 2200 case INDEX_op_mul_vec: 2201 case INDEX_op_smin_vec: 2202 case INDEX_op_smax_vec: 2203 case INDEX_op_umin_vec: 2204 case INDEX_op_umax_vec: 2205 case INDEX_op_ssadd_vec: 2206 case INDEX_op_usadd_vec: 2207 case INDEX_op_sssub_vec: 2208 case INDEX_op_ussub_vec: 2209 case INDEX_op_shlv_vec: 2210 case INDEX_op_shrv_vec: 2211 case INDEX_op_sarv_vec: 2212 case INDEX_op_bitsel_vec: 2213 return 1; 2214 default: 2215 return 0; 2216 } 2217} 2218 2219void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2220 TCGArg a0, ...) 2221{ 2222 g_assert_not_reached(); 2223} 2224 2225static TCGConstraintSetIndex 2226tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 2227{ 2228 switch (op) { 2229 case INDEX_op_goto_ptr: 2230 return C_O0_I1(r); 2231 2232 case INDEX_op_st8_i32: 2233 case INDEX_op_st8_i64: 2234 case INDEX_op_st16_i32: 2235 case INDEX_op_st16_i64: 2236 case INDEX_op_st32_i64: 2237 case INDEX_op_st_i32: 2238 case INDEX_op_st_i64: 2239 case INDEX_op_qemu_st_i32: 2240 case INDEX_op_qemu_st_i64: 2241 return C_O0_I2(rz, r); 2242 2243 case INDEX_op_qemu_ld_i128: 2244 return C_N2_I1(r, r, r); 2245 2246 case INDEX_op_qemu_st_i128: 2247 return C_O0_I3(r, r, r); 2248 2249 case INDEX_op_brcond_i32: 2250 case INDEX_op_brcond_i64: 2251 return C_O0_I2(rz, rz); 2252 2253 case INDEX_op_extu_i32_i64: 2254 case INDEX_op_extrl_i64_i32: 2255 case INDEX_op_extrh_i64_i32: 2256 case INDEX_op_ext_i32_i64: 2257 case INDEX_op_neg_i32: 2258 case INDEX_op_neg_i64: 2259 case INDEX_op_not_i32: 2260 case INDEX_op_not_i64: 2261 case INDEX_op_extract_i32: 2262 case INDEX_op_extract_i64: 2263 case INDEX_op_sextract_i32: 2264 case INDEX_op_sextract_i64: 2265 case INDEX_op_bswap16_i32: 2266 case INDEX_op_bswap16_i64: 2267 case INDEX_op_bswap32_i32: 2268 case INDEX_op_bswap32_i64: 2269 case INDEX_op_bswap64_i64: 2270 case INDEX_op_ld8s_i32: 2271 case INDEX_op_ld8s_i64: 2272 case INDEX_op_ld8u_i32: 2273 case INDEX_op_ld8u_i64: 2274 case INDEX_op_ld16s_i32: 2275 case INDEX_op_ld16s_i64: 2276 case INDEX_op_ld16u_i32: 2277 case INDEX_op_ld16u_i64: 2278 case INDEX_op_ld32s_i64: 2279 case INDEX_op_ld32u_i64: 2280 case INDEX_op_ld_i32: 2281 case INDEX_op_ld_i64: 2282 case INDEX_op_qemu_ld_i32: 2283 case INDEX_op_qemu_ld_i64: 2284 return C_O1_I1(r, r); 2285 2286 case INDEX_op_shl_i32: 2287 case INDEX_op_shl_i64: 2288 case INDEX_op_shr_i32: 2289 case INDEX_op_shr_i64: 2290 case INDEX_op_sar_i32: 2291 case INDEX_op_sar_i64: 2292 case INDEX_op_rotl_i32: 2293 case INDEX_op_rotl_i64: 2294 case INDEX_op_rotr_i32: 2295 case INDEX_op_rotr_i64: 2296 return C_O1_I2(r, r, ri); 2297 2298 case INDEX_op_nor_i32: 2299 case INDEX_op_nor_i64: 2300 case INDEX_op_xor_i32: 2301 case INDEX_op_xor_i64: 2302 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2303 return C_O1_I2(r, r, rU); 2304 2305 case INDEX_op_clz_i32: 2306 case INDEX_op_clz_i64: 2307 case INDEX_op_ctz_i32: 2308 case INDEX_op_ctz_i64: 2309 return C_O1_I2(r, r, rW); 2310 2311 case INDEX_op_deposit_i32: 2312 case INDEX_op_deposit_i64: 2313 /* Must deposit into the same register as input */ 2314 return C_O1_I2(r, 0, rz); 2315 2316 case INDEX_op_sub_i32: 2317 case INDEX_op_setcond_i32: 2318 return C_O1_I2(r, rz, ri); 2319 case INDEX_op_sub_i64: 2320 case INDEX_op_setcond_i64: 2321 return C_O1_I2(r, rz, rJ); 2322 2323 case INDEX_op_mul_i32: 2324 case INDEX_op_mul_i64: 2325 case INDEX_op_mulsh_i32: 2326 case INDEX_op_mulsh_i64: 2327 case INDEX_op_muluh_i32: 2328 case INDEX_op_muluh_i64: 2329 case INDEX_op_div_i32: 2330 case INDEX_op_div_i64: 2331 case INDEX_op_divu_i32: 2332 case INDEX_op_divu_i64: 2333 case INDEX_op_rem_i32: 2334 case INDEX_op_rem_i64: 2335 case INDEX_op_remu_i32: 2336 case INDEX_op_remu_i64: 2337 return C_O1_I2(r, rz, rz); 2338 2339 case INDEX_op_movcond_i32: 2340 case INDEX_op_movcond_i64: 2341 return C_O1_I4(r, rz, rJ, rz, rz); 2342 2343 case INDEX_op_ld_vec: 2344 case INDEX_op_dupm_vec: 2345 case INDEX_op_dup_vec: 2346 return C_O1_I1(w, r); 2347 2348 case INDEX_op_st_vec: 2349 return C_O0_I2(w, r); 2350 2351 case INDEX_op_cmp_vec: 2352 return C_O1_I2(w, w, wM); 2353 2354 case INDEX_op_add_vec: 2355 case INDEX_op_sub_vec: 2356 return C_O1_I2(w, w, wA); 2357 2358 case INDEX_op_and_vec: 2359 case INDEX_op_andc_vec: 2360 case INDEX_op_or_vec: 2361 case INDEX_op_orc_vec: 2362 case INDEX_op_xor_vec: 2363 case INDEX_op_nor_vec: 2364 case INDEX_op_mul_vec: 2365 case INDEX_op_smin_vec: 2366 case INDEX_op_smax_vec: 2367 case INDEX_op_umin_vec: 2368 case INDEX_op_umax_vec: 2369 case INDEX_op_ssadd_vec: 2370 case INDEX_op_usadd_vec: 2371 case INDEX_op_sssub_vec: 2372 case INDEX_op_ussub_vec: 2373 case INDEX_op_shlv_vec: 2374 case INDEX_op_shrv_vec: 2375 case INDEX_op_sarv_vec: 2376 case INDEX_op_rotrv_vec: 2377 case INDEX_op_rotlv_vec: 2378 return C_O1_I2(w, w, w); 2379 2380 case INDEX_op_not_vec: 2381 case INDEX_op_neg_vec: 2382 case INDEX_op_shli_vec: 2383 case INDEX_op_shri_vec: 2384 case INDEX_op_sari_vec: 2385 case INDEX_op_rotli_vec: 2386 return C_O1_I1(w, w); 2387 2388 case INDEX_op_bitsel_vec: 2389 return C_O1_I3(w, w, w, w); 2390 2391 default: 2392 return C_NotImplemented; 2393 } 2394} 2395 2396static const int tcg_target_callee_save_regs[] = { 2397 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2398 TCG_REG_S1, 2399 TCG_REG_S2, 2400 TCG_REG_S3, 2401 TCG_REG_S4, 2402 TCG_REG_S5, 2403 TCG_REG_S6, 2404 TCG_REG_S7, 2405 TCG_REG_S8, 2406 TCG_REG_S9, 2407 TCG_REG_RA, /* should be last for ABI compliance */ 2408}; 2409 2410/* Stack frame parameters. */ 2411#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2412#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2413#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2414#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2415 + TCG_TARGET_STACK_ALIGN - 1) \ 2416 & -TCG_TARGET_STACK_ALIGN) 2417#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2418 2419/* We're expecting to be able to use an immediate for frame allocation. */ 2420QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2421 2422/* Generate global QEMU prologue and epilogue code */ 2423static void tcg_target_qemu_prologue(TCGContext *s) 2424{ 2425 int i; 2426 2427 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2428 2429 /* TB prologue */ 2430 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2431 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2432 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2433 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2434 } 2435 2436 if (!tcg_use_softmmu && guest_base) { 2437 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2438 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2439 } 2440 2441 /* Call generated code */ 2442 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2443 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2444 2445 /* Return path for goto_ptr. Set return value to 0 */ 2446 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2447 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2448 2449 /* TB epilogue */ 2450 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2451 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2452 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2453 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2454 } 2455 2456 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2457 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2458} 2459 2460static void tcg_out_tb_start(TCGContext *s) 2461{ 2462 /* nothing to do */ 2463} 2464 2465static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2466{ 2467 for (int i = 0; i < count; ++i) { 2468 /* Canonical nop is andi r0,r0,0 */ 2469 p[i] = OPC_ANDI; 2470 } 2471} 2472 2473static void tcg_target_init(TCGContext *s) 2474{ 2475 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2476 2477 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2478 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2479 error_report("TCG: unaligned access support required; exiting"); 2480 exit(EXIT_FAILURE); 2481 } 2482 2483 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2484 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2485 2486 tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS; 2487 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2488 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2489 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2490 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2491 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2492 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2493 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2494 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2495 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2496 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2497 2498 if (cpuinfo & CPUINFO_LSX) { 2499 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2500 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2501 if (cpuinfo & CPUINFO_LASX) { 2502 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; 2503 } 2504 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2505 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2506 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2507 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2508 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2509 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2510 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2511 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2512 } 2513 2514 s->reserved_regs = 0; 2515 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2516 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2517 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2518 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2519 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2520 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2521 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2522 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2523} 2524 2525typedef struct { 2526 DebugFrameHeader h; 2527 uint8_t fde_def_cfa[4]; 2528 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2529} DebugFrame; 2530 2531#define ELF_HOST_MACHINE EM_LOONGARCH 2532 2533static const DebugFrame debug_frame = { 2534 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2535 .h.cie.id = -1, 2536 .h.cie.version = 1, 2537 .h.cie.code_align = 1, 2538 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2539 .h.cie.return_column = TCG_REG_RA, 2540 2541 /* Total FDE size does not include the "len" member. */ 2542 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2543 2544 .fde_def_cfa = { 2545 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2546 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2547 (FRAME_SIZE >> 7) 2548 }, 2549 .fde_reg_ofs = { 2550 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2551 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2552 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2553 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2554 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2555 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2556 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2557 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2558 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2559 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2560 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2561 } 2562}; 2563 2564void tcg_register_jit(const void *buf, size_t buf_size) 2565{ 2566 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2567} 2568