1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include <asm/hwcap.h> 33 34/* used for function call generation */ 35#define TCG_REG_CALL_STACK TCG_REG_SP 36#define TCG_TARGET_STACK_ALIGN 16 37#define TCG_TARGET_CALL_STACK_OFFSET 0 38#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 39#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 40#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 41#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 42 43#ifdef CONFIG_DEBUG_TCG 44static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 45 "zero", 46 "ra", 47 "tp", 48 "sp", 49 "a0", 50 "a1", 51 "a2", 52 "a3", 53 "a4", 54 "a5", 55 "a6", 56 "a7", 57 "t0", 58 "t1", 59 "t2", 60 "t3", 61 "t4", 62 "t5", 63 "t6", 64 "t7", 65 "t8", 66 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 67 "s9", 68 "s0", 69 "s1", 70 "s2", 71 "s3", 72 "s4", 73 "s5", 74 "s6", 75 "s7", 76 "s8", 77 "vr0", 78 "vr1", 79 "vr2", 80 "vr3", 81 "vr4", 82 "vr5", 83 "vr6", 84 "vr7", 85 "vr8", 86 "vr9", 87 "vr10", 88 "vr11", 89 "vr12", 90 "vr13", 91 "vr14", 92 "vr15", 93 "vr16", 94 "vr17", 95 "vr18", 96 "vr19", 97 "vr20", 98 "vr21", 99 "vr22", 100 "vr23", 101 "vr24", 102 "vr25", 103 "vr26", 104 "vr27", 105 "vr28", 106 "vr29", 107 "vr30", 108 "vr31", 109}; 110#endif 111 112static const int tcg_target_reg_alloc_order[] = { 113 /* Registers preserved across calls */ 114 /* TCG_REG_S0 reserved for TCG_AREG0 */ 115 TCG_REG_S1, 116 TCG_REG_S2, 117 TCG_REG_S3, 118 TCG_REG_S4, 119 TCG_REG_S5, 120 TCG_REG_S6, 121 TCG_REG_S7, 122 TCG_REG_S8, 123 TCG_REG_S9, 124 125 /* Registers (potentially) clobbered across calls */ 126 TCG_REG_T0, 127 TCG_REG_T1, 128 TCG_REG_T2, 129 TCG_REG_T3, 130 TCG_REG_T4, 131 TCG_REG_T5, 132 TCG_REG_T6, 133 TCG_REG_T7, 134 TCG_REG_T8, 135 136 /* Argument registers, opposite order of allocation. */ 137 TCG_REG_A7, 138 TCG_REG_A6, 139 TCG_REG_A5, 140 TCG_REG_A4, 141 TCG_REG_A3, 142 TCG_REG_A2, 143 TCG_REG_A1, 144 TCG_REG_A0, 145 146 /* Vector registers */ 147 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 148 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 149 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 150 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 151 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 152 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 153 /* V24 - V31 are caller-saved, and skipped. */ 154}; 155 156static const int tcg_target_call_iarg_regs[] = { 157 TCG_REG_A0, 158 TCG_REG_A1, 159 TCG_REG_A2, 160 TCG_REG_A3, 161 TCG_REG_A4, 162 TCG_REG_A5, 163 TCG_REG_A6, 164 TCG_REG_A7, 165}; 166 167static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 168{ 169 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 170 tcg_debug_assert(slot >= 0 && slot <= 1); 171 return TCG_REG_A0 + slot; 172} 173 174#define TCG_GUEST_BASE_REG TCG_REG_S1 175 176#define TCG_CT_CONST_S12 0x100 177#define TCG_CT_CONST_S32 0x200 178#define TCG_CT_CONST_U12 0x400 179#define TCG_CT_CONST_C12 0x800 180#define TCG_CT_CONST_WSZ 0x1000 181#define TCG_CT_CONST_VCMP 0x2000 182#define TCG_CT_CONST_VADD 0x4000 183 184#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 185#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 186 187static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 188{ 189 return sextract64(val, pos, len); 190} 191 192/* test if a constant matches the constraint */ 193static bool tcg_target_const_match(int64_t val, int ct, 194 TCGType type, TCGCond cond, int vece) 195{ 196 if (ct & TCG_CT_CONST) { 197 return true; 198 } 199 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 200 return true; 201 } 202 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 203 return true; 204 } 205 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 206 return true; 207 } 208 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 209 return true; 210 } 211 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 212 return true; 213 } 214 if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) { 215 int64_t vec_val = sextract64(val, 0, 8 << vece); 216 if (ct & TCG_CT_CONST_VCMP) { 217 switch (cond) { 218 case TCG_COND_EQ: 219 case TCG_COND_LE: 220 case TCG_COND_LT: 221 return -0x10 <= vec_val && vec_val <= 0x0f; 222 case TCG_COND_LEU: 223 case TCG_COND_LTU: 224 return 0x00 <= vec_val && vec_val <= 0x1f; 225 default: 226 return false; 227 } 228 } 229 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 230 return true; 231 } 232 } 233 return false; 234} 235 236/* 237 * Relocations 238 */ 239 240/* 241 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 242 * complicated; a whopping stack machine is needed to stuff the fields, at 243 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 244 * needed. 245 * 246 * Hence, define our own simpler relocation types. Numbers are chosen as to 247 * not collide with potential future additions to the true ELF relocation 248 * type enum. 249 */ 250 251/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 252#define R_LOONGARCH_BR_SK16 256 253/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 254#define R_LOONGARCH_BR_SD10K16 257 255 256static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 257{ 258 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 259 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 260 261 tcg_debug_assert((offset & 3) == 0); 262 offset >>= 2; 263 if (offset == sextreg(offset, 0, 16)) { 264 *src_rw = deposit64(*src_rw, 10, 16, offset); 265 return true; 266 } 267 268 return false; 269} 270 271static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 272 const tcg_insn_unit *target) 273{ 274 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 275 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 276 277 tcg_debug_assert((offset & 3) == 0); 278 offset >>= 2; 279 if (offset == sextreg(offset, 0, 26)) { 280 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 281 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 282 return true; 283 } 284 285 return false; 286} 287 288static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 289 intptr_t value, intptr_t addend) 290{ 291 tcg_debug_assert(addend == 0); 292 switch (type) { 293 case R_LOONGARCH_BR_SK16: 294 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 295 case R_LOONGARCH_BR_SD10K16: 296 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 297 default: 298 g_assert_not_reached(); 299 } 300} 301 302#include "tcg-insn-defs.c.inc" 303 304/* 305 * TCG intrinsics 306 */ 307 308static void tcg_out_mb(TCGContext *s, TCGArg a0) 309{ 310 /* Baseline LoongArch only has the full barrier, unfortunately. */ 311 tcg_out_opc_dbar(s, 0); 312} 313 314static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 315{ 316 if (ret == arg) { 317 return true; 318 } 319 switch (type) { 320 case TCG_TYPE_I32: 321 case TCG_TYPE_I64: 322 if (ret < TCG_REG_V0) { 323 if (arg < TCG_REG_V0) { 324 /* 325 * Conventional register-register move used in LoongArch is 326 * `or dst, src, zero`. 327 */ 328 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 329 } else { 330 tcg_out_opc_movfr2gr_d(s, ret, arg); 331 } 332 } else { 333 if (arg < TCG_REG_V0) { 334 tcg_out_opc_movgr2fr_d(s, ret, arg); 335 } else { 336 tcg_out_opc_fmov_d(s, ret, arg); 337 } 338 } 339 break; 340 case TCG_TYPE_V64: 341 case TCG_TYPE_V128: 342 tcg_out_opc_vori_b(s, ret, arg, 0); 343 break; 344 case TCG_TYPE_V256: 345 tcg_out_opc_xvori_b(s, ret, arg, 0); 346 break; 347 default: 348 g_assert_not_reached(); 349 } 350 return true; 351} 352 353/* Loads a 32-bit immediate into rd, sign-extended. */ 354static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 355{ 356 tcg_target_long lo = sextreg(val, 0, 12); 357 tcg_target_long hi12 = sextreg(val, 12, 20); 358 359 /* Single-instruction cases. */ 360 if (hi12 == 0) { 361 /* val fits in uimm12: ori rd, zero, val */ 362 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 363 return; 364 } 365 if (hi12 == sextreg(lo, 12, 20)) { 366 /* val fits in simm12: addi.w rd, zero, val */ 367 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 368 return; 369 } 370 371 /* High bits must be set; load with lu12i.w + optional ori. */ 372 tcg_out_opc_lu12i_w(s, rd, hi12); 373 if (lo != 0) { 374 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 375 } 376} 377 378static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 379 tcg_target_long val) 380{ 381 /* 382 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 383 * with dedicated instructions for filling the respective bitfields 384 * below: 385 * 386 * 6 5 4 3 387 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 388 * +-----------------------+---------------------------------------+... 389 * | hi52 | hi32 | 390 * +-----------------------+---------------------------------------+... 391 * 3 2 1 392 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 393 * ...+-------------------------------------+-------------------------+ 394 * | hi12 | lo | 395 * ...+-------------------------------------+-------------------------+ 396 * 397 * Check if val belong to one of the several fast cases, before falling 398 * back to the slow path. 399 */ 400 401 intptr_t src_rx, pc_offset; 402 tcg_target_long hi12, hi32, hi52; 403 404 /* Value fits in signed i32. */ 405 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 406 tcg_out_movi_i32(s, rd, val); 407 return; 408 } 409 410 /* PC-relative cases. */ 411 src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr); 412 if ((val & 3) == 0) { 413 pc_offset = val - src_rx; 414 if (pc_offset == sextreg(pc_offset, 0, 22)) { 415 /* Single pcaddu2i. */ 416 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 417 return; 418 } 419 } 420 421 pc_offset = (val >> 12) - (src_rx >> 12); 422 if (pc_offset == sextreg(pc_offset, 0, 20)) { 423 /* Load with pcalau12i + ori. */ 424 tcg_target_long val_lo = val & 0xfff; 425 tcg_out_opc_pcalau12i(s, rd, pc_offset); 426 if (val_lo != 0) { 427 tcg_out_opc_ori(s, rd, rd, val_lo); 428 } 429 return; 430 } 431 432 hi12 = sextreg(val, 12, 20); 433 hi32 = sextreg(val, 32, 20); 434 hi52 = sextreg(val, 52, 12); 435 436 /* Single cu52i.d case. */ 437 if ((hi52 != 0) && (ctz64(val) >= 52)) { 438 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 439 return; 440 } 441 442 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 443 tcg_out_movi_i32(s, rd, val); 444 445 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 446 if (hi32 != sextreg(hi12, 20, 20)) { 447 tcg_out_opc_cu32i_d(s, rd, hi32); 448 } 449 450 if (hi52 != sextreg(hi32, 20, 12)) { 451 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 452 } 453} 454 455static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 456 TCGReg rs, tcg_target_long imm) 457{ 458 tcg_target_long lo12 = sextreg(imm, 0, 12); 459 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 460 461 /* 462 * Note that there's a hole in between hi16 and lo12: 463 * 464 * 3 2 1 0 465 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 466 * ...+-------------------------------+-------+-----------------------+ 467 * | hi16 | | lo12 | 468 * ...+-------------------------------+-------+-----------------------+ 469 * 470 * For bits within that hole, it's more efficient to use LU12I and ADD. 471 */ 472 if (imm == (hi16 << 16) + lo12) { 473 if (hi16) { 474 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 475 rs = rd; 476 } 477 if (type == TCG_TYPE_I32) { 478 tcg_out_opc_addi_w(s, rd, rs, lo12); 479 } else if (lo12) { 480 tcg_out_opc_addi_d(s, rd, rs, lo12); 481 } else { 482 tcg_out_mov(s, type, rd, rs); 483 } 484 } else { 485 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 486 if (type == TCG_TYPE_I32) { 487 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 488 } else { 489 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 490 } 491 } 492} 493 494static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 495{ 496 return false; 497} 498 499static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 500 tcg_target_long imm) 501{ 502 /* This function is only used for passing structs by reference. */ 503 g_assert_not_reached(); 504} 505 506static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 507{ 508 tcg_out_opc_andi(s, ret, arg, 0xff); 509} 510 511static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 512{ 513 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 514} 515 516static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 517{ 518 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 519} 520 521static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 522{ 523 tcg_out_opc_sext_b(s, ret, arg); 524} 525 526static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 527{ 528 tcg_out_opc_sext_h(s, ret, arg); 529} 530 531static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 532{ 533 tcg_out_opc_addi_w(s, ret, arg, 0); 534} 535 536static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 537{ 538 if (ret != arg) { 539 tcg_out_ext32s(s, ret, arg); 540 } 541} 542 543static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 544{ 545 tcg_out_ext32u(s, ret, arg); 546} 547 548static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 549{ 550 tcg_out_ext32s(s, ret, arg); 551} 552 553static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 554 TCGReg a0, TCGReg a1, TCGReg a2, 555 bool c2, bool is_32bit) 556{ 557 if (c2) { 558 /* 559 * Fast path: semantics already satisfied due to constraint and 560 * insn behavior, single instruction is enough. 561 */ 562 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 563 /* all clz/ctz insns belong to DJ-format */ 564 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 565 return; 566 } 567 568 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 569 /* a0 = a1 ? REG_TMP0 : a2 */ 570 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 571 tcg_out_opc_masknez(s, a0, a2, a1); 572 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 573} 574 575#define SETCOND_INV TCG_TARGET_NB_REGS 576#define SETCOND_NEZ (SETCOND_INV << 1) 577#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 578 579static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 580 TCGReg arg1, tcg_target_long arg2, bool c2) 581{ 582 int flags = 0; 583 584 switch (cond) { 585 case TCG_COND_EQ: /* -> NE */ 586 case TCG_COND_GE: /* -> LT */ 587 case TCG_COND_GEU: /* -> LTU */ 588 case TCG_COND_GT: /* -> LE */ 589 case TCG_COND_GTU: /* -> LEU */ 590 cond = tcg_invert_cond(cond); 591 flags ^= SETCOND_INV; 592 break; 593 default: 594 break; 595 } 596 597 switch (cond) { 598 case TCG_COND_LE: 599 case TCG_COND_LEU: 600 /* 601 * If we have a constant input, the most efficient way to implement 602 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 603 * We don't need to care for this for LE because the constant input 604 * is still constrained to int32_t, and INT32_MAX+1 is representable 605 * in the 64-bit temporary register. 606 */ 607 if (c2) { 608 if (cond == TCG_COND_LEU) { 609 /* unsigned <= -1 is true */ 610 if (arg2 == -1) { 611 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 612 return ret; 613 } 614 cond = TCG_COND_LTU; 615 } else { 616 cond = TCG_COND_LT; 617 } 618 arg2 += 1; 619 } else { 620 TCGReg tmp = arg2; 621 arg2 = arg1; 622 arg1 = tmp; 623 cond = tcg_swap_cond(cond); /* LE -> GE */ 624 cond = tcg_invert_cond(cond); /* GE -> LT */ 625 flags ^= SETCOND_INV; 626 } 627 break; 628 default: 629 break; 630 } 631 632 switch (cond) { 633 case TCG_COND_NE: 634 flags |= SETCOND_NEZ; 635 if (!c2) { 636 tcg_out_opc_xor(s, ret, arg1, arg2); 637 } else if (arg2 == 0) { 638 ret = arg1; 639 } else if (arg2 >= 0 && arg2 <= 0xfff) { 640 tcg_out_opc_xori(s, ret, arg1, arg2); 641 } else { 642 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 643 } 644 break; 645 646 case TCG_COND_LT: 647 case TCG_COND_LTU: 648 if (c2) { 649 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 650 if (cond == TCG_COND_LT) { 651 tcg_out_opc_slti(s, ret, arg1, arg2); 652 } else { 653 tcg_out_opc_sltui(s, ret, arg1, arg2); 654 } 655 break; 656 } 657 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 658 arg2 = TCG_REG_TMP0; 659 } 660 if (cond == TCG_COND_LT) { 661 tcg_out_opc_slt(s, ret, arg1, arg2); 662 } else { 663 tcg_out_opc_sltu(s, ret, arg1, arg2); 664 } 665 break; 666 667 default: 668 g_assert_not_reached(); 669 } 670 671 return ret | flags; 672} 673 674static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 675 TCGReg arg1, tcg_target_long arg2, bool c2) 676{ 677 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 678 679 if (tmpflags != ret) { 680 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 681 682 switch (tmpflags & SETCOND_FLAGS) { 683 case SETCOND_INV: 684 /* Intermediate result is boolean: simply invert. */ 685 tcg_out_opc_xori(s, ret, tmp, 1); 686 break; 687 case SETCOND_NEZ: 688 /* Intermediate result is zero/non-zero: test != 0. */ 689 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 690 break; 691 case SETCOND_NEZ | SETCOND_INV: 692 /* Intermediate result is zero/non-zero: test == 0. */ 693 tcg_out_opc_sltui(s, ret, tmp, 1); 694 break; 695 default: 696 g_assert_not_reached(); 697 } 698 } 699} 700 701static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 702 TCGReg c1, tcg_target_long c2, bool const2, 703 TCGReg v1, TCGReg v2) 704{ 705 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 706 TCGReg t; 707 708 /* Standardize the test below to t != 0. */ 709 if (tmpflags & SETCOND_INV) { 710 t = v1, v1 = v2, v2 = t; 711 } 712 713 t = tmpflags & ~SETCOND_FLAGS; 714 if (v1 == TCG_REG_ZERO) { 715 tcg_out_opc_masknez(s, ret, v2, t); 716 } else if (v2 == TCG_REG_ZERO) { 717 tcg_out_opc_maskeqz(s, ret, v1, t); 718 } else { 719 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 720 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 721 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 722 } 723} 724 725/* 726 * Branch helpers 727 */ 728 729static const struct { 730 LoongArchInsn op; 731 bool swap; 732} tcg_brcond_to_loongarch[] = { 733 [TCG_COND_EQ] = { OPC_BEQ, false }, 734 [TCG_COND_NE] = { OPC_BNE, false }, 735 [TCG_COND_LT] = { OPC_BGT, true }, 736 [TCG_COND_GE] = { OPC_BLE, true }, 737 [TCG_COND_LE] = { OPC_BLE, false }, 738 [TCG_COND_GT] = { OPC_BGT, false }, 739 [TCG_COND_LTU] = { OPC_BGTU, true }, 740 [TCG_COND_GEU] = { OPC_BLEU, true }, 741 [TCG_COND_LEU] = { OPC_BLEU, false }, 742 [TCG_COND_GTU] = { OPC_BGTU, false } 743}; 744 745static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 746 TCGReg arg2, TCGLabel *l) 747{ 748 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 749 750 tcg_debug_assert(op != 0); 751 752 if (tcg_brcond_to_loongarch[cond].swap) { 753 TCGReg t = arg1; 754 arg1 = arg2; 755 arg2 = t; 756 } 757 758 /* all conditional branch insns belong to DJSk16-format */ 759 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 760 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 761} 762 763static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 764{ 765 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 766 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 767 768 tcg_debug_assert((offset & 3) == 0); 769 if (offset == sextreg(offset, 0, 28)) { 770 /* short jump: +/- 256MiB */ 771 if (tail) { 772 tcg_out_opc_b(s, offset >> 2); 773 } else { 774 tcg_out_opc_bl(s, offset >> 2); 775 } 776 } else if (offset == sextreg(offset, 0, 38)) { 777 /* long jump: +/- 256GiB */ 778 tcg_target_long lo = sextreg(offset, 0, 18); 779 tcg_target_long hi = offset - lo; 780 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 781 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 782 } else { 783 /* far jump: 64-bit */ 784 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 785 tcg_target_long hi = (tcg_target_long)arg - lo; 786 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 787 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 788 } 789} 790 791static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 792 const TCGHelperInfo *info) 793{ 794 tcg_out_call_int(s, arg, false); 795} 796 797/* 798 * Load/store helpers 799 */ 800 801static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 802 TCGReg addr, intptr_t offset) 803{ 804 intptr_t imm12 = sextreg(offset, 0, 12); 805 806 if (offset != imm12) { 807 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 808 809 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 810 imm12 = sextreg(diff, 0, 12); 811 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 812 } else { 813 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 814 if (addr != TCG_REG_ZERO) { 815 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 816 } 817 } 818 addr = TCG_REG_TMP2; 819 } 820 821 switch (opc) { 822 case OPC_LD_B: 823 case OPC_LD_BU: 824 case OPC_LD_H: 825 case OPC_LD_HU: 826 case OPC_LD_W: 827 case OPC_LD_WU: 828 case OPC_LD_D: 829 case OPC_ST_B: 830 case OPC_ST_H: 831 case OPC_ST_W: 832 case OPC_ST_D: 833 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 834 break; 835 case OPC_FLD_S: 836 case OPC_FLD_D: 837 case OPC_FST_S: 838 case OPC_FST_D: 839 tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12)); 840 break; 841 default: 842 g_assert_not_reached(); 843 } 844} 845 846static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest, 847 TCGReg base, intptr_t offset) 848{ 849 switch (type) { 850 case TCG_TYPE_I32: 851 if (dest < TCG_REG_V0) { 852 tcg_out_ldst(s, OPC_LD_W, dest, base, offset); 853 } else { 854 tcg_out_ldst(s, OPC_FLD_S, dest, base, offset); 855 } 856 break; 857 case TCG_TYPE_I64: 858 case TCG_TYPE_V64: 859 if (dest < TCG_REG_V0) { 860 tcg_out_ldst(s, OPC_LD_D, dest, base, offset); 861 } else { 862 tcg_out_ldst(s, OPC_FLD_D, dest, base, offset); 863 } 864 break; 865 case TCG_TYPE_V128: 866 if (-0x800 <= offset && offset <= 0x7ff) { 867 tcg_out_opc_vld(s, dest, base, offset); 868 } else { 869 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 870 tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0); 871 } 872 break; 873 case TCG_TYPE_V256: 874 if (-0x800 <= offset && offset <= 0x7ff) { 875 tcg_out_opc_xvld(s, dest, base, offset); 876 } else { 877 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 878 tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0); 879 } 880 break; 881 default: 882 g_assert_not_reached(); 883 } 884} 885 886static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, 887 TCGReg base, intptr_t offset) 888{ 889 switch (type) { 890 case TCG_TYPE_I32: 891 if (src < TCG_REG_V0) { 892 tcg_out_ldst(s, OPC_ST_W, src, base, offset); 893 } else { 894 tcg_out_ldst(s, OPC_FST_S, src, base, offset); 895 } 896 break; 897 case TCG_TYPE_I64: 898 case TCG_TYPE_V64: 899 if (src < TCG_REG_V0) { 900 tcg_out_ldst(s, OPC_ST_D, src, base, offset); 901 } else { 902 tcg_out_ldst(s, OPC_FST_D, src, base, offset); 903 } 904 break; 905 case TCG_TYPE_V128: 906 if (-0x800 <= offset && offset <= 0x7ff) { 907 tcg_out_opc_vst(s, src, base, offset); 908 } else { 909 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 910 tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0); 911 } 912 break; 913 case TCG_TYPE_V256: 914 if (-0x800 <= offset && offset <= 0x7ff) { 915 tcg_out_opc_xvst(s, src, base, offset); 916 } else { 917 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 918 tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0); 919 } 920 break; 921 default: 922 g_assert_not_reached(); 923 } 924} 925 926static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 927 TCGReg base, intptr_t ofs) 928{ 929 if (val == 0) { 930 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 931 return true; 932 } 933 return false; 934} 935 936/* 937 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 938 */ 939 940static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 941{ 942 tcg_out_opc_b(s, 0); 943 return reloc_br_sd10k16(s->code_ptr - 1, target); 944} 945 946static const TCGLdstHelperParam ldst_helper_param = { 947 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 948}; 949 950static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 951{ 952 MemOp opc = get_memop(l->oi); 953 954 /* resolve label address */ 955 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 956 return false; 957 } 958 959 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 960 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 961 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 962 return tcg_out_goto(s, l->raddr); 963} 964 965static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 966{ 967 MemOp opc = get_memop(l->oi); 968 969 /* resolve label address */ 970 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 971 return false; 972 } 973 974 tcg_out_st_helper_args(s, l, &ldst_helper_param); 975 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 976 return tcg_out_goto(s, l->raddr); 977} 978 979typedef struct { 980 TCGReg base; 981 TCGReg index; 982 TCGAtomAlign aa; 983} HostAddress; 984 985bool tcg_target_has_memory_bswap(MemOp memop) 986{ 987 return false; 988} 989 990/* We expect to use a 12-bit negative offset from ENV. */ 991#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 992 993/* 994 * For system-mode, perform the TLB load and compare. 995 * For user-mode, perform any required alignment tests. 996 * In both cases, return a TCGLabelQemuLdst structure if the slow path 997 * is required and fill in @h with the host address for the fast path. 998 */ 999static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1000 TCGReg addr_reg, MemOpIdx oi, 1001 bool is_ld) 1002{ 1003 TCGType addr_type = s->addr_type; 1004 TCGLabelQemuLdst *ldst = NULL; 1005 MemOp opc = get_memop(oi); 1006 MemOp a_bits; 1007 1008 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1009 a_bits = h->aa.align; 1010 1011 if (tcg_use_softmmu) { 1012 unsigned s_bits = opc & MO_SIZE; 1013 int mem_index = get_mmuidx(oi); 1014 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 1015 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 1016 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 1017 1018 ldst = new_ldst_label(s); 1019 ldst->is_ld = is_ld; 1020 ldst->oi = oi; 1021 ldst->addr_reg = addr_reg; 1022 1023 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 1024 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 1025 1026 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 1027 s->page_bits - CPU_TLB_ENTRY_BITS); 1028 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 1029 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 1030 1031 /* Load the tlb comparator and the addend. */ 1032 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1033 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1034 is_ld ? offsetof(CPUTLBEntry, addr_read) 1035 : offsetof(CPUTLBEntry, addr_write)); 1036 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1037 offsetof(CPUTLBEntry, addend)); 1038 1039 /* 1040 * For aligned accesses, we check the first byte and include the 1041 * alignment bits within the address. For unaligned access, we 1042 * check that we don't cross pages using the address of the last 1043 * byte of the access. 1044 */ 1045 if (a_bits < s_bits) { 1046 unsigned a_mask = (1u << a_bits) - 1; 1047 unsigned s_mask = (1u << s_bits) - 1; 1048 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 1049 } else { 1050 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 1051 } 1052 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 1053 a_bits, s->page_bits - 1); 1054 1055 /* Compare masked address with the TLB entry. */ 1056 ldst->label_ptr[0] = s->code_ptr; 1057 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1058 1059 h->index = TCG_REG_TMP2; 1060 } else { 1061 if (a_bits) { 1062 ldst = new_ldst_label(s); 1063 1064 ldst->is_ld = is_ld; 1065 ldst->oi = oi; 1066 ldst->addr_reg = addr_reg; 1067 1068 /* 1069 * Without micro-architecture details, we don't know which of 1070 * bstrpick or andi is faster, so use bstrpick as it's not 1071 * constrained by imm field width. Not to say alignments >= 2^12 1072 * are going to happen any time soon. 1073 */ 1074 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 1075 1076 ldst->label_ptr[0] = s->code_ptr; 1077 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1078 } 1079 1080 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 1081 } 1082 1083 if (addr_type == TCG_TYPE_I32) { 1084 h->base = TCG_REG_TMP0; 1085 tcg_out_ext32u(s, h->base, addr_reg); 1086 } else { 1087 h->base = addr_reg; 1088 } 1089 1090 return ldst; 1091} 1092 1093static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 1094 TCGReg rd, HostAddress h) 1095{ 1096 /* Byte swapping is left to middle-end expansion. */ 1097 tcg_debug_assert((opc & MO_BSWAP) == 0); 1098 1099 switch (opc & MO_SSIZE) { 1100 case MO_UB: 1101 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 1102 break; 1103 case MO_SB: 1104 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1105 break; 1106 case MO_UW: 1107 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1108 break; 1109 case MO_SW: 1110 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1111 break; 1112 case MO_UL: 1113 if (type == TCG_TYPE_I64) { 1114 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1115 break; 1116 } 1117 /* fallthrough */ 1118 case MO_SL: 1119 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1120 break; 1121 case MO_UQ: 1122 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1123 break; 1124 default: 1125 g_assert_not_reached(); 1126 } 1127} 1128 1129static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1130 MemOpIdx oi, TCGType data_type) 1131{ 1132 TCGLabelQemuLdst *ldst; 1133 HostAddress h; 1134 1135 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1136 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1137 1138 if (ldst) { 1139 ldst->type = data_type; 1140 ldst->datalo_reg = data_reg; 1141 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1142 } 1143} 1144 1145static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1146 TCGReg rd, HostAddress h) 1147{ 1148 /* Byte swapping is left to middle-end expansion. */ 1149 tcg_debug_assert((opc & MO_BSWAP) == 0); 1150 1151 switch (opc & MO_SIZE) { 1152 case MO_8: 1153 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1154 break; 1155 case MO_16: 1156 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1157 break; 1158 case MO_32: 1159 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1160 break; 1161 case MO_64: 1162 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1163 break; 1164 default: 1165 g_assert_not_reached(); 1166 } 1167} 1168 1169static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1170 MemOpIdx oi, TCGType data_type) 1171{ 1172 TCGLabelQemuLdst *ldst; 1173 HostAddress h; 1174 1175 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1176 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1177 1178 if (ldst) { 1179 ldst->type = data_type; 1180 ldst->datalo_reg = data_reg; 1181 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1182 } 1183} 1184 1185static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1186 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1187{ 1188 TCGLabelQemuLdst *ldst; 1189 HostAddress h; 1190 1191 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1192 1193 if (h.aa.atom == MO_128) { 1194 /* 1195 * Use VLDX/VSTX when 128-bit atomicity is required. 1196 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1197 */ 1198 if (is_ld) { 1199 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1200 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1201 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1202 } else { 1203 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1204 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1205 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1206 } 1207 } else { 1208 /* Otherwise use a pair of LD/ST. */ 1209 TCGReg base = h.base; 1210 if (h.index != TCG_REG_ZERO) { 1211 base = TCG_REG_TMP0; 1212 tcg_out_opc_add_d(s, base, h.base, h.index); 1213 } 1214 if (is_ld) { 1215 tcg_debug_assert(base != data_lo); 1216 tcg_out_opc_ld_d(s, data_lo, base, 0); 1217 tcg_out_opc_ld_d(s, data_hi, base, 8); 1218 } else { 1219 tcg_out_opc_st_d(s, data_lo, base, 0); 1220 tcg_out_opc_st_d(s, data_hi, base, 8); 1221 } 1222 } 1223 1224 if (ldst) { 1225 ldst->type = TCG_TYPE_I128; 1226 ldst->datalo_reg = data_lo; 1227 ldst->datahi_reg = data_hi; 1228 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1229 } 1230} 1231 1232/* 1233 * Entry-points 1234 */ 1235 1236static const tcg_insn_unit *tb_ret_addr; 1237 1238static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1239{ 1240 /* Reuse the zeroing that exists for goto_ptr. */ 1241 if (a0 == 0) { 1242 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1243 } else { 1244 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1245 tcg_out_call_int(s, tb_ret_addr, true); 1246 } 1247} 1248 1249static void tcg_out_goto_tb(TCGContext *s, int which) 1250{ 1251 /* 1252 * Direct branch, or load indirect address, to be patched 1253 * by tb_target_set_jmp_target. Check indirect load offset 1254 * in range early, regardless of direct branch distance, 1255 * via assert within tcg_out_opc_pcaddu2i. 1256 */ 1257 uintptr_t i_addr = get_jmp_target_addr(s, which); 1258 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1259 1260 set_jmp_insn_offset(s, which); 1261 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1262 1263 /* Finish the load and indirect branch. */ 1264 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1265 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1266 set_jmp_reset_offset(s, which); 1267} 1268 1269void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1270 uintptr_t jmp_rx, uintptr_t jmp_rw) 1271{ 1272 uintptr_t d_addr = tb->jmp_target_addr[n]; 1273 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1274 tcg_insn_unit insn; 1275 1276 /* Either directly branch, or load slot address for indirect branch. */ 1277 if (d_disp == sextreg(d_disp, 0, 26)) { 1278 insn = encode_sd10k16_insn(OPC_B, d_disp); 1279 } else { 1280 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1281 intptr_t i_disp = i_addr - jmp_rx; 1282 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1283 } 1284 1285 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1286 flush_idcache_range(jmp_rx, jmp_rw, 4); 1287} 1288 1289 1290static void tgen_add(TCGContext *s, TCGType type, 1291 TCGReg a0, TCGReg a1, TCGReg a2) 1292{ 1293 if (type == TCG_TYPE_I32) { 1294 tcg_out_opc_add_w(s, a0, a1, a2); 1295 } else { 1296 tcg_out_opc_add_d(s, a0, a1, a2); 1297 } 1298} 1299 1300static const TCGOutOpBinary outop_add = { 1301 .base.static_constraint = C_O1_I2(r, r, rJ), 1302 .out_rrr = tgen_add, 1303 .out_rri = tcg_out_addi, 1304}; 1305 1306static void tgen_and(TCGContext *s, TCGType type, 1307 TCGReg a0, TCGReg a1, TCGReg a2) 1308{ 1309 tcg_out_opc_and(s, a0, a1, a2); 1310} 1311 1312static void tgen_andi(TCGContext *s, TCGType type, 1313 TCGReg a0, TCGReg a1, tcg_target_long a2) 1314{ 1315 tcg_out_opc_andi(s, a0, a1, a2); 1316} 1317 1318static const TCGOutOpBinary outop_and = { 1319 .base.static_constraint = C_O1_I2(r, r, rU), 1320 .out_rrr = tgen_and, 1321 .out_rri = tgen_andi, 1322}; 1323 1324static void tgen_andc(TCGContext *s, TCGType type, 1325 TCGReg a0, TCGReg a1, TCGReg a2) 1326{ 1327 tcg_out_opc_andn(s, a0, a1, a2); 1328} 1329 1330static const TCGOutOpBinary outop_andc = { 1331 .base.static_constraint = C_O1_I2(r, r, r), 1332 .out_rrr = tgen_andc, 1333}; 1334 1335static void tgen_or(TCGContext *s, TCGType type, 1336 TCGReg a0, TCGReg a1, TCGReg a2) 1337{ 1338 tcg_out_opc_or(s, a0, a1, a2); 1339} 1340 1341static void tgen_ori(TCGContext *s, TCGType type, 1342 TCGReg a0, TCGReg a1, tcg_target_long a2) 1343{ 1344 tcg_out_opc_ori(s, a0, a1, a2); 1345} 1346 1347static const TCGOutOpBinary outop_or = { 1348 .base.static_constraint = C_O1_I2(r, r, rU), 1349 .out_rrr = tgen_or, 1350 .out_rri = tgen_ori, 1351}; 1352 1353 1354static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 1355 const TCGArg args[TCG_MAX_OP_ARGS], 1356 const int const_args[TCG_MAX_OP_ARGS]) 1357{ 1358 TCGArg a0 = args[0]; 1359 TCGArg a1 = args[1]; 1360 TCGArg a2 = args[2]; 1361 TCGArg a3 = args[3]; 1362 int c2 = const_args[2]; 1363 1364 switch (opc) { 1365 case INDEX_op_mb: 1366 tcg_out_mb(s, a0); 1367 break; 1368 1369 case INDEX_op_goto_ptr: 1370 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1371 break; 1372 1373 case INDEX_op_br: 1374 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1375 0); 1376 tcg_out_opc_b(s, 0); 1377 break; 1378 1379 case INDEX_op_brcond_i32: 1380 case INDEX_op_brcond_i64: 1381 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1382 break; 1383 1384 case INDEX_op_extrh_i64_i32: 1385 tcg_out_opc_srai_d(s, a0, a1, 32); 1386 break; 1387 1388 case INDEX_op_not_i32: 1389 case INDEX_op_not_i64: 1390 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1391 break; 1392 1393 case INDEX_op_nor_i32: 1394 case INDEX_op_nor_i64: 1395 if (c2) { 1396 tcg_out_opc_ori(s, a0, a1, a2); 1397 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1398 } else { 1399 tcg_out_opc_nor(s, a0, a1, a2); 1400 } 1401 break; 1402 1403 case INDEX_op_orc_i32: 1404 case INDEX_op_orc_i64: 1405 if (c2) { 1406 /* guaranteed to fit due to constraint */ 1407 tcg_out_opc_ori(s, a0, a1, ~a2); 1408 } else { 1409 tcg_out_opc_orn(s, a0, a1, a2); 1410 } 1411 break; 1412 1413 case INDEX_op_xor_i32: 1414 case INDEX_op_xor_i64: 1415 if (c2) { 1416 tcg_out_opc_xori(s, a0, a1, a2); 1417 } else { 1418 tcg_out_opc_xor(s, a0, a1, a2); 1419 } 1420 break; 1421 1422 case INDEX_op_extract_i32: 1423 if (a2 == 0 && args[3] <= 12) { 1424 tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1); 1425 } else { 1426 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1427 } 1428 break; 1429 case INDEX_op_extract_i64: 1430 if (a2 == 0 && args[3] <= 12) { 1431 tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1); 1432 } else { 1433 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1434 } 1435 break; 1436 1437 case INDEX_op_sextract_i64: 1438 if (a2 + args[3] == 32) { 1439 if (a2 == 0) { 1440 tcg_out_ext32s(s, a0, a1); 1441 } else { 1442 tcg_out_opc_srai_w(s, a0, a1, a2); 1443 } 1444 break; 1445 } 1446 /* FALLTHRU */ 1447 case INDEX_op_sextract_i32: 1448 if (a2 == 0 && args[3] == 8) { 1449 tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1); 1450 } else if (a2 == 0 && args[3] == 16) { 1451 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1); 1452 } else { 1453 g_assert_not_reached(); 1454 } 1455 break; 1456 1457 case INDEX_op_deposit_i32: 1458 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1459 break; 1460 case INDEX_op_deposit_i64: 1461 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1462 break; 1463 1464 case INDEX_op_bswap16_i32: 1465 case INDEX_op_bswap16_i64: 1466 tcg_out_opc_revb_2h(s, a0, a1); 1467 if (a2 & TCG_BSWAP_OS) { 1468 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1469 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1470 tcg_out_ext16u(s, a0, a0); 1471 } 1472 break; 1473 1474 case INDEX_op_bswap32_i32: 1475 /* All 32-bit values are computed sign-extended in the register. */ 1476 a2 = TCG_BSWAP_OS; 1477 /* fallthrough */ 1478 case INDEX_op_bswap32_i64: 1479 tcg_out_opc_revb_2w(s, a0, a1); 1480 if (a2 & TCG_BSWAP_OS) { 1481 tcg_out_ext32s(s, a0, a0); 1482 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1483 tcg_out_ext32u(s, a0, a0); 1484 } 1485 break; 1486 1487 case INDEX_op_bswap64_i64: 1488 tcg_out_opc_revb_d(s, a0, a1); 1489 break; 1490 1491 case INDEX_op_clz_i32: 1492 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1493 break; 1494 case INDEX_op_clz_i64: 1495 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1496 break; 1497 1498 case INDEX_op_ctz_i32: 1499 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1500 break; 1501 case INDEX_op_ctz_i64: 1502 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1503 break; 1504 1505 case INDEX_op_shl_i32: 1506 if (c2) { 1507 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1508 } else { 1509 tcg_out_opc_sll_w(s, a0, a1, a2); 1510 } 1511 break; 1512 case INDEX_op_shl_i64: 1513 if (c2) { 1514 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1515 } else { 1516 tcg_out_opc_sll_d(s, a0, a1, a2); 1517 } 1518 break; 1519 1520 case INDEX_op_shr_i32: 1521 if (c2) { 1522 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1523 } else { 1524 tcg_out_opc_srl_w(s, a0, a1, a2); 1525 } 1526 break; 1527 case INDEX_op_shr_i64: 1528 if (c2) { 1529 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1530 } else { 1531 tcg_out_opc_srl_d(s, a0, a1, a2); 1532 } 1533 break; 1534 1535 case INDEX_op_sar_i32: 1536 if (c2) { 1537 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1538 } else { 1539 tcg_out_opc_sra_w(s, a0, a1, a2); 1540 } 1541 break; 1542 case INDEX_op_sar_i64: 1543 if (c2) { 1544 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1545 } else { 1546 tcg_out_opc_sra_d(s, a0, a1, a2); 1547 } 1548 break; 1549 1550 case INDEX_op_rotl_i32: 1551 /* transform into equivalent rotr/rotri */ 1552 if (c2) { 1553 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1554 } else { 1555 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1556 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1557 } 1558 break; 1559 case INDEX_op_rotl_i64: 1560 /* transform into equivalent rotr/rotri */ 1561 if (c2) { 1562 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1563 } else { 1564 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1565 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1566 } 1567 break; 1568 1569 case INDEX_op_rotr_i32: 1570 if (c2) { 1571 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1572 } else { 1573 tcg_out_opc_rotr_w(s, a0, a1, a2); 1574 } 1575 break; 1576 case INDEX_op_rotr_i64: 1577 if (c2) { 1578 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1579 } else { 1580 tcg_out_opc_rotr_d(s, a0, a1, a2); 1581 } 1582 break; 1583 1584 case INDEX_op_sub_i32: 1585 if (c2) { 1586 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1587 } else { 1588 tcg_out_opc_sub_w(s, a0, a1, a2); 1589 } 1590 break; 1591 case INDEX_op_sub_i64: 1592 if (c2) { 1593 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1594 } else { 1595 tcg_out_opc_sub_d(s, a0, a1, a2); 1596 } 1597 break; 1598 1599 case INDEX_op_neg_i32: 1600 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1); 1601 break; 1602 case INDEX_op_neg_i64: 1603 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1); 1604 break; 1605 1606 case INDEX_op_mul_i32: 1607 tcg_out_opc_mul_w(s, a0, a1, a2); 1608 break; 1609 case INDEX_op_mul_i64: 1610 tcg_out_opc_mul_d(s, a0, a1, a2); 1611 break; 1612 1613 case INDEX_op_mulsh_i32: 1614 tcg_out_opc_mulh_w(s, a0, a1, a2); 1615 break; 1616 case INDEX_op_mulsh_i64: 1617 tcg_out_opc_mulh_d(s, a0, a1, a2); 1618 break; 1619 1620 case INDEX_op_muluh_i32: 1621 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1622 break; 1623 case INDEX_op_muluh_i64: 1624 tcg_out_opc_mulh_du(s, a0, a1, a2); 1625 break; 1626 1627 case INDEX_op_div_i32: 1628 tcg_out_opc_div_w(s, a0, a1, a2); 1629 break; 1630 case INDEX_op_div_i64: 1631 tcg_out_opc_div_d(s, a0, a1, a2); 1632 break; 1633 1634 case INDEX_op_divu_i32: 1635 tcg_out_opc_div_wu(s, a0, a1, a2); 1636 break; 1637 case INDEX_op_divu_i64: 1638 tcg_out_opc_div_du(s, a0, a1, a2); 1639 break; 1640 1641 case INDEX_op_rem_i32: 1642 tcg_out_opc_mod_w(s, a0, a1, a2); 1643 break; 1644 case INDEX_op_rem_i64: 1645 tcg_out_opc_mod_d(s, a0, a1, a2); 1646 break; 1647 1648 case INDEX_op_remu_i32: 1649 tcg_out_opc_mod_wu(s, a0, a1, a2); 1650 break; 1651 case INDEX_op_remu_i64: 1652 tcg_out_opc_mod_du(s, a0, a1, a2); 1653 break; 1654 1655 case INDEX_op_setcond_i32: 1656 case INDEX_op_setcond_i64: 1657 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1658 break; 1659 1660 case INDEX_op_movcond_i32: 1661 case INDEX_op_movcond_i64: 1662 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1663 break; 1664 1665 case INDEX_op_ld8s_i32: 1666 case INDEX_op_ld8s_i64: 1667 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1668 break; 1669 case INDEX_op_ld8u_i32: 1670 case INDEX_op_ld8u_i64: 1671 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1672 break; 1673 case INDEX_op_ld16s_i32: 1674 case INDEX_op_ld16s_i64: 1675 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1676 break; 1677 case INDEX_op_ld16u_i32: 1678 case INDEX_op_ld16u_i64: 1679 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1680 break; 1681 case INDEX_op_ld_i32: 1682 case INDEX_op_ld32s_i64: 1683 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1684 break; 1685 case INDEX_op_ld32u_i64: 1686 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1687 break; 1688 case INDEX_op_ld_i64: 1689 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1690 break; 1691 1692 case INDEX_op_st8_i32: 1693 case INDEX_op_st8_i64: 1694 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1695 break; 1696 case INDEX_op_st16_i32: 1697 case INDEX_op_st16_i64: 1698 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1699 break; 1700 case INDEX_op_st_i32: 1701 case INDEX_op_st32_i64: 1702 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1703 break; 1704 case INDEX_op_st_i64: 1705 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1706 break; 1707 1708 case INDEX_op_qemu_ld_i32: 1709 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1710 break; 1711 case INDEX_op_qemu_ld_i64: 1712 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1713 break; 1714 case INDEX_op_qemu_ld_i128: 1715 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1716 break; 1717 case INDEX_op_qemu_st_i32: 1718 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1719 break; 1720 case INDEX_op_qemu_st_i64: 1721 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1722 break; 1723 case INDEX_op_qemu_st_i128: 1724 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1725 break; 1726 1727 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1728 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1729 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1730 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 1731 case INDEX_op_extu_i32_i64: 1732 case INDEX_op_extrl_i64_i32: 1733 default: 1734 g_assert_not_reached(); 1735 } 1736} 1737 1738static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1739 TCGReg rd, TCGReg rs) 1740{ 1741 static const LoongArchInsn repl_insn[2][4] = { 1742 { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H, 1743 OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D }, 1744 { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H, 1745 OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D }, 1746 }; 1747 bool lasx = type == TCG_TYPE_V256; 1748 1749 tcg_debug_assert(vece <= MO_64); 1750 tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs)); 1751 return true; 1752} 1753 1754static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1755 TCGReg r, TCGReg base, intptr_t offset) 1756{ 1757 bool lasx = type == TCG_TYPE_V256; 1758 1759 /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */ 1760 if (offset < -0x800 || offset > 0x7ff || 1761 (offset & ((1 << vece) - 1)) != 0) { 1762 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1763 base = TCG_REG_TMP0; 1764 offset = 0; 1765 } 1766 offset >>= vece; 1767 1768 switch (vece) { 1769 case MO_8: 1770 if (lasx) { 1771 tcg_out_opc_xvldrepl_b(s, r, base, offset); 1772 } else { 1773 tcg_out_opc_vldrepl_b(s, r, base, offset); 1774 } 1775 break; 1776 case MO_16: 1777 if (lasx) { 1778 tcg_out_opc_xvldrepl_h(s, r, base, offset); 1779 } else { 1780 tcg_out_opc_vldrepl_h(s, r, base, offset); 1781 } 1782 break; 1783 case MO_32: 1784 if (lasx) { 1785 tcg_out_opc_xvldrepl_w(s, r, base, offset); 1786 } else { 1787 tcg_out_opc_vldrepl_w(s, r, base, offset); 1788 } 1789 break; 1790 case MO_64: 1791 if (lasx) { 1792 tcg_out_opc_xvldrepl_d(s, r, base, offset); 1793 } else { 1794 tcg_out_opc_vldrepl_d(s, r, base, offset); 1795 } 1796 break; 1797 default: 1798 g_assert_not_reached(); 1799 } 1800 return true; 1801} 1802 1803static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1804 TCGReg rd, int64_t v64) 1805{ 1806 /* Try vldi if imm can fit */ 1807 int64_t value = sextract64(v64, 0, 8 << vece); 1808 if (-0x200 <= value && value <= 0x1FF) { 1809 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1810 1811 if (type == TCG_TYPE_V256) { 1812 tcg_out_opc_xvldi(s, rd, imm); 1813 } else { 1814 tcg_out_opc_vldi(s, rd, imm); 1815 } 1816 return; 1817 } 1818 1819 /* TODO: vldi patterns when imm 12 is set */ 1820 1821 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1822 tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0); 1823} 1824 1825static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece, 1826 TCGArg a0, TCGArg a1, TCGArg a2, 1827 bool a2_is_const, bool is_add) 1828{ 1829 static const LoongArchInsn add_vec_insn[2][4] = { 1830 { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D }, 1831 { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D }, 1832 }; 1833 static const LoongArchInsn add_vec_imm_insn[2][4] = { 1834 { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU }, 1835 { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU }, 1836 }; 1837 static const LoongArchInsn sub_vec_insn[2][4] = { 1838 { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D }, 1839 { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D }, 1840 }; 1841 static const LoongArchInsn sub_vec_imm_insn[2][4] = { 1842 { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU }, 1843 { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU }, 1844 }; 1845 LoongArchInsn insn; 1846 1847 if (a2_is_const) { 1848 int64_t value = sextract64(a2, 0, 8 << vece); 1849 1850 if (!is_add) { 1851 value = -value; 1852 } 1853 if (value < 0) { 1854 insn = sub_vec_imm_insn[lasx][vece]; 1855 value = -value; 1856 } else { 1857 insn = add_vec_imm_insn[lasx][vece]; 1858 } 1859 1860 /* Constraint TCG_CT_CONST_VADD ensures validity. */ 1861 tcg_debug_assert(0 <= value && value <= 0x1f); 1862 1863 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value)); 1864 return; 1865 } 1866 1867 if (is_add) { 1868 insn = add_vec_insn[lasx][vece]; 1869 } else { 1870 insn = sub_vec_insn[lasx][vece]; 1871 } 1872 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1873} 1874 1875static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1876 unsigned vecl, unsigned vece, 1877 const TCGArg args[TCG_MAX_OP_ARGS], 1878 const int const_args[TCG_MAX_OP_ARGS]) 1879{ 1880 TCGType type = vecl + TCG_TYPE_V64; 1881 bool lasx = type == TCG_TYPE_V256; 1882 TCGArg a0, a1, a2, a3; 1883 LoongArchInsn insn; 1884 1885 static const LoongArchInsn cmp_vec_insn[16][2][4] = { 1886 [TCG_COND_EQ] = { 1887 { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D }, 1888 { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D }, 1889 }, 1890 [TCG_COND_LE] = { 1891 { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D }, 1892 { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D }, 1893 }, 1894 [TCG_COND_LEU] = { 1895 { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU }, 1896 { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU }, 1897 }, 1898 [TCG_COND_LT] = { 1899 { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D }, 1900 { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D }, 1901 }, 1902 [TCG_COND_LTU] = { 1903 { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU }, 1904 { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU }, 1905 } 1906 }; 1907 static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = { 1908 [TCG_COND_EQ] = { 1909 { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D }, 1910 { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D }, 1911 }, 1912 [TCG_COND_LE] = { 1913 { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D }, 1914 { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D }, 1915 }, 1916 [TCG_COND_LEU] = { 1917 { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU }, 1918 { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU }, 1919 }, 1920 [TCG_COND_LT] = { 1921 { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D }, 1922 { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D }, 1923 }, 1924 [TCG_COND_LTU] = { 1925 { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU }, 1926 { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU }, 1927 } 1928 }; 1929 static const LoongArchInsn neg_vec_insn[2][4] = { 1930 { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D }, 1931 { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D }, 1932 }; 1933 static const LoongArchInsn mul_vec_insn[2][4] = { 1934 { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D }, 1935 { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D }, 1936 }; 1937 static const LoongArchInsn smin_vec_insn[2][4] = { 1938 { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D }, 1939 { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D }, 1940 }; 1941 static const LoongArchInsn umin_vec_insn[2][4] = { 1942 { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU }, 1943 { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU }, 1944 }; 1945 static const LoongArchInsn smax_vec_insn[2][4] = { 1946 { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D }, 1947 { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D }, 1948 }; 1949 static const LoongArchInsn umax_vec_insn[2][4] = { 1950 { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU }, 1951 { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU }, 1952 }; 1953 static const LoongArchInsn ssadd_vec_insn[2][4] = { 1954 { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D }, 1955 { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D }, 1956 }; 1957 static const LoongArchInsn usadd_vec_insn[2][4] = { 1958 { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU }, 1959 { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU }, 1960 }; 1961 static const LoongArchInsn sssub_vec_insn[2][4] = { 1962 { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D }, 1963 { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D }, 1964 }; 1965 static const LoongArchInsn ussub_vec_insn[2][4] = { 1966 { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU }, 1967 { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU }, 1968 }; 1969 static const LoongArchInsn shlv_vec_insn[2][4] = { 1970 { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D }, 1971 { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D }, 1972 }; 1973 static const LoongArchInsn shrv_vec_insn[2][4] = { 1974 { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D }, 1975 { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D }, 1976 }; 1977 static const LoongArchInsn sarv_vec_insn[2][4] = { 1978 { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D }, 1979 { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D }, 1980 }; 1981 static const LoongArchInsn shli_vec_insn[2][4] = { 1982 { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D }, 1983 { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D }, 1984 }; 1985 static const LoongArchInsn shri_vec_insn[2][4] = { 1986 { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D }, 1987 { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D }, 1988 }; 1989 static const LoongArchInsn sari_vec_insn[2][4] = { 1990 { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D }, 1991 { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D }, 1992 }; 1993 static const LoongArchInsn rotrv_vec_insn[2][4] = { 1994 { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D }, 1995 { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D }, 1996 }; 1997 static const LoongArchInsn rotri_vec_insn[2][4] = { 1998 { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D }, 1999 { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D }, 2000 }; 2001 2002 a0 = args[0]; 2003 a1 = args[1]; 2004 a2 = args[2]; 2005 a3 = args[3]; 2006 2007 switch (opc) { 2008 case INDEX_op_st_vec: 2009 tcg_out_st(s, type, a0, a1, a2); 2010 break; 2011 case INDEX_op_ld_vec: 2012 tcg_out_ld(s, type, a0, a1, a2); 2013 break; 2014 case INDEX_op_and_vec: 2015 insn = lasx ? OPC_XVAND_V : OPC_VAND_V; 2016 goto vdvjvk; 2017 case INDEX_op_andc_vec: 2018 /* 2019 * vandn vd, vj, vk: vd = vk & ~vj 2020 * andc_vec vd, vj, vk: vd = vj & ~vk 2021 * vj and vk are swapped 2022 */ 2023 a1 = a2; 2024 a2 = args[1]; 2025 insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V; 2026 goto vdvjvk; 2027 case INDEX_op_or_vec: 2028 insn = lasx ? OPC_XVOR_V : OPC_VOR_V; 2029 goto vdvjvk; 2030 case INDEX_op_orc_vec: 2031 insn = lasx ? OPC_XVORN_V : OPC_VORN_V; 2032 goto vdvjvk; 2033 case INDEX_op_xor_vec: 2034 insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V; 2035 goto vdvjvk; 2036 case INDEX_op_not_vec: 2037 a2 = a1; 2038 /* fall through */ 2039 case INDEX_op_nor_vec: 2040 insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V; 2041 goto vdvjvk; 2042 case INDEX_op_cmp_vec: 2043 { 2044 TCGCond cond = args[3]; 2045 2046 if (const_args[2]) { 2047 /* 2048 * cmp_vec dest, src, value 2049 * Try vseqi/vslei/vslti 2050 */ 2051 int64_t value = sextract64(a2, 0, 8 << vece); 2052 switch (cond) { 2053 case TCG_COND_EQ: 2054 case TCG_COND_LE: 2055 case TCG_COND_LT: 2056 insn = cmp_vec_imm_insn[cond][lasx][vece]; 2057 tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value)); 2058 break; 2059 case TCG_COND_LEU: 2060 case TCG_COND_LTU: 2061 insn = cmp_vec_imm_insn[cond][lasx][vece]; 2062 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value)); 2063 break; 2064 default: 2065 g_assert_not_reached(); 2066 } 2067 break; 2068 } 2069 2070 insn = cmp_vec_insn[cond][lasx][vece]; 2071 if (insn == 0) { 2072 TCGArg t; 2073 t = a1, a1 = a2, a2 = t; 2074 cond = tcg_swap_cond(cond); 2075 insn = cmp_vec_insn[cond][lasx][vece]; 2076 tcg_debug_assert(insn != 0); 2077 } 2078 } 2079 goto vdvjvk; 2080 case INDEX_op_add_vec: 2081 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true); 2082 break; 2083 case INDEX_op_sub_vec: 2084 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false); 2085 break; 2086 case INDEX_op_neg_vec: 2087 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1)); 2088 break; 2089 case INDEX_op_mul_vec: 2090 insn = mul_vec_insn[lasx][vece]; 2091 goto vdvjvk; 2092 case INDEX_op_smin_vec: 2093 insn = smin_vec_insn[lasx][vece]; 2094 goto vdvjvk; 2095 case INDEX_op_smax_vec: 2096 insn = smax_vec_insn[lasx][vece]; 2097 goto vdvjvk; 2098 case INDEX_op_umin_vec: 2099 insn = umin_vec_insn[lasx][vece]; 2100 goto vdvjvk; 2101 case INDEX_op_umax_vec: 2102 insn = umax_vec_insn[lasx][vece]; 2103 goto vdvjvk; 2104 case INDEX_op_ssadd_vec: 2105 insn = ssadd_vec_insn[lasx][vece]; 2106 goto vdvjvk; 2107 case INDEX_op_usadd_vec: 2108 insn = usadd_vec_insn[lasx][vece]; 2109 goto vdvjvk; 2110 case INDEX_op_sssub_vec: 2111 insn = sssub_vec_insn[lasx][vece]; 2112 goto vdvjvk; 2113 case INDEX_op_ussub_vec: 2114 insn = ussub_vec_insn[lasx][vece]; 2115 goto vdvjvk; 2116 case INDEX_op_shlv_vec: 2117 insn = shlv_vec_insn[lasx][vece]; 2118 goto vdvjvk; 2119 case INDEX_op_shrv_vec: 2120 insn = shrv_vec_insn[lasx][vece]; 2121 goto vdvjvk; 2122 case INDEX_op_sarv_vec: 2123 insn = sarv_vec_insn[lasx][vece]; 2124 goto vdvjvk; 2125 case INDEX_op_rotlv_vec: 2126 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 2127 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], 2128 TCG_VEC_TMP0, a2)); 2129 a2 = TCG_VEC_TMP0; 2130 /* fall through */ 2131 case INDEX_op_rotrv_vec: 2132 insn = rotrv_vec_insn[lasx][vece]; 2133 goto vdvjvk; 2134 case INDEX_op_shli_vec: 2135 insn = shli_vec_insn[lasx][vece]; 2136 goto vdvjukN; 2137 case INDEX_op_shri_vec: 2138 insn = shri_vec_insn[lasx][vece]; 2139 goto vdvjukN; 2140 case INDEX_op_sari_vec: 2141 insn = sari_vec_insn[lasx][vece]; 2142 goto vdvjukN; 2143 case INDEX_op_rotli_vec: 2144 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 2145 a2 = extract32(-a2, 0, 3 + vece); 2146 insn = rotri_vec_insn[lasx][vece]; 2147 goto vdvjukN; 2148 case INDEX_op_bitsel_vec: 2149 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 2150 if (lasx) { 2151 tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1); 2152 } else { 2153 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 2154 } 2155 break; 2156 case INDEX_op_dupm_vec: 2157 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2158 break; 2159 default: 2160 g_assert_not_reached(); 2161 vdvjvk: 2162 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 2163 break; 2164 vdvjukN: 2165 switch (vece) { 2166 case MO_8: 2167 tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2)); 2168 break; 2169 case MO_16: 2170 tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2)); 2171 break; 2172 case MO_32: 2173 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2)); 2174 break; 2175 case MO_64: 2176 tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2)); 2177 break; 2178 default: 2179 g_assert_not_reached(); 2180 } 2181 break; 2182 } 2183} 2184 2185int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2186{ 2187 switch (opc) { 2188 case INDEX_op_ld_vec: 2189 case INDEX_op_st_vec: 2190 case INDEX_op_dup_vec: 2191 case INDEX_op_dupm_vec: 2192 case INDEX_op_cmp_vec: 2193 case INDEX_op_add_vec: 2194 case INDEX_op_sub_vec: 2195 case INDEX_op_and_vec: 2196 case INDEX_op_andc_vec: 2197 case INDEX_op_or_vec: 2198 case INDEX_op_orc_vec: 2199 case INDEX_op_xor_vec: 2200 case INDEX_op_nor_vec: 2201 case INDEX_op_not_vec: 2202 case INDEX_op_neg_vec: 2203 case INDEX_op_mul_vec: 2204 case INDEX_op_smin_vec: 2205 case INDEX_op_smax_vec: 2206 case INDEX_op_umin_vec: 2207 case INDEX_op_umax_vec: 2208 case INDEX_op_ssadd_vec: 2209 case INDEX_op_usadd_vec: 2210 case INDEX_op_sssub_vec: 2211 case INDEX_op_ussub_vec: 2212 case INDEX_op_shlv_vec: 2213 case INDEX_op_shrv_vec: 2214 case INDEX_op_sarv_vec: 2215 case INDEX_op_bitsel_vec: 2216 return 1; 2217 default: 2218 return 0; 2219 } 2220} 2221 2222void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2223 TCGArg a0, ...) 2224{ 2225 g_assert_not_reached(); 2226} 2227 2228static TCGConstraintSetIndex 2229tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 2230{ 2231 switch (op) { 2232 case INDEX_op_goto_ptr: 2233 return C_O0_I1(r); 2234 2235 case INDEX_op_st8_i32: 2236 case INDEX_op_st8_i64: 2237 case INDEX_op_st16_i32: 2238 case INDEX_op_st16_i64: 2239 case INDEX_op_st32_i64: 2240 case INDEX_op_st_i32: 2241 case INDEX_op_st_i64: 2242 case INDEX_op_qemu_st_i32: 2243 case INDEX_op_qemu_st_i64: 2244 return C_O0_I2(rz, r); 2245 2246 case INDEX_op_qemu_ld_i128: 2247 return C_N2_I1(r, r, r); 2248 2249 case INDEX_op_qemu_st_i128: 2250 return C_O0_I3(r, r, r); 2251 2252 case INDEX_op_brcond_i32: 2253 case INDEX_op_brcond_i64: 2254 return C_O0_I2(rz, rz); 2255 2256 case INDEX_op_extu_i32_i64: 2257 case INDEX_op_extrl_i64_i32: 2258 case INDEX_op_extrh_i64_i32: 2259 case INDEX_op_ext_i32_i64: 2260 case INDEX_op_neg_i32: 2261 case INDEX_op_neg_i64: 2262 case INDEX_op_not_i32: 2263 case INDEX_op_not_i64: 2264 case INDEX_op_extract_i32: 2265 case INDEX_op_extract_i64: 2266 case INDEX_op_sextract_i32: 2267 case INDEX_op_sextract_i64: 2268 case INDEX_op_bswap16_i32: 2269 case INDEX_op_bswap16_i64: 2270 case INDEX_op_bswap32_i32: 2271 case INDEX_op_bswap32_i64: 2272 case INDEX_op_bswap64_i64: 2273 case INDEX_op_ld8s_i32: 2274 case INDEX_op_ld8s_i64: 2275 case INDEX_op_ld8u_i32: 2276 case INDEX_op_ld8u_i64: 2277 case INDEX_op_ld16s_i32: 2278 case INDEX_op_ld16s_i64: 2279 case INDEX_op_ld16u_i32: 2280 case INDEX_op_ld16u_i64: 2281 case INDEX_op_ld32s_i64: 2282 case INDEX_op_ld32u_i64: 2283 case INDEX_op_ld_i32: 2284 case INDEX_op_ld_i64: 2285 case INDEX_op_qemu_ld_i32: 2286 case INDEX_op_qemu_ld_i64: 2287 return C_O1_I1(r, r); 2288 2289 case INDEX_op_orc_i32: 2290 case INDEX_op_orc_i64: 2291 /* 2292 * LoongArch insns for these ops don't have reg-imm forms, but we 2293 * can express using andi/ori if ~constant satisfies 2294 * TCG_CT_CONST_U12. 2295 */ 2296 return C_O1_I2(r, r, rC); 2297 2298 case INDEX_op_shl_i32: 2299 case INDEX_op_shl_i64: 2300 case INDEX_op_shr_i32: 2301 case INDEX_op_shr_i64: 2302 case INDEX_op_sar_i32: 2303 case INDEX_op_sar_i64: 2304 case INDEX_op_rotl_i32: 2305 case INDEX_op_rotl_i64: 2306 case INDEX_op_rotr_i32: 2307 case INDEX_op_rotr_i64: 2308 return C_O1_I2(r, r, ri); 2309 2310 case INDEX_op_nor_i32: 2311 case INDEX_op_nor_i64: 2312 case INDEX_op_xor_i32: 2313 case INDEX_op_xor_i64: 2314 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2315 return C_O1_I2(r, r, rU); 2316 2317 case INDEX_op_clz_i32: 2318 case INDEX_op_clz_i64: 2319 case INDEX_op_ctz_i32: 2320 case INDEX_op_ctz_i64: 2321 return C_O1_I2(r, r, rW); 2322 2323 case INDEX_op_deposit_i32: 2324 case INDEX_op_deposit_i64: 2325 /* Must deposit into the same register as input */ 2326 return C_O1_I2(r, 0, rz); 2327 2328 case INDEX_op_sub_i32: 2329 case INDEX_op_setcond_i32: 2330 return C_O1_I2(r, rz, ri); 2331 case INDEX_op_sub_i64: 2332 case INDEX_op_setcond_i64: 2333 return C_O1_I2(r, rz, rJ); 2334 2335 case INDEX_op_mul_i32: 2336 case INDEX_op_mul_i64: 2337 case INDEX_op_mulsh_i32: 2338 case INDEX_op_mulsh_i64: 2339 case INDEX_op_muluh_i32: 2340 case INDEX_op_muluh_i64: 2341 case INDEX_op_div_i32: 2342 case INDEX_op_div_i64: 2343 case INDEX_op_divu_i32: 2344 case INDEX_op_divu_i64: 2345 case INDEX_op_rem_i32: 2346 case INDEX_op_rem_i64: 2347 case INDEX_op_remu_i32: 2348 case INDEX_op_remu_i64: 2349 return C_O1_I2(r, rz, rz); 2350 2351 case INDEX_op_movcond_i32: 2352 case INDEX_op_movcond_i64: 2353 return C_O1_I4(r, rz, rJ, rz, rz); 2354 2355 case INDEX_op_ld_vec: 2356 case INDEX_op_dupm_vec: 2357 case INDEX_op_dup_vec: 2358 return C_O1_I1(w, r); 2359 2360 case INDEX_op_st_vec: 2361 return C_O0_I2(w, r); 2362 2363 case INDEX_op_cmp_vec: 2364 return C_O1_I2(w, w, wM); 2365 2366 case INDEX_op_add_vec: 2367 case INDEX_op_sub_vec: 2368 return C_O1_I2(w, w, wA); 2369 2370 case INDEX_op_and_vec: 2371 case INDEX_op_andc_vec: 2372 case INDEX_op_or_vec: 2373 case INDEX_op_orc_vec: 2374 case INDEX_op_xor_vec: 2375 case INDEX_op_nor_vec: 2376 case INDEX_op_mul_vec: 2377 case INDEX_op_smin_vec: 2378 case INDEX_op_smax_vec: 2379 case INDEX_op_umin_vec: 2380 case INDEX_op_umax_vec: 2381 case INDEX_op_ssadd_vec: 2382 case INDEX_op_usadd_vec: 2383 case INDEX_op_sssub_vec: 2384 case INDEX_op_ussub_vec: 2385 case INDEX_op_shlv_vec: 2386 case INDEX_op_shrv_vec: 2387 case INDEX_op_sarv_vec: 2388 case INDEX_op_rotrv_vec: 2389 case INDEX_op_rotlv_vec: 2390 return C_O1_I2(w, w, w); 2391 2392 case INDEX_op_not_vec: 2393 case INDEX_op_neg_vec: 2394 case INDEX_op_shli_vec: 2395 case INDEX_op_shri_vec: 2396 case INDEX_op_sari_vec: 2397 case INDEX_op_rotli_vec: 2398 return C_O1_I1(w, w); 2399 2400 case INDEX_op_bitsel_vec: 2401 return C_O1_I3(w, w, w, w); 2402 2403 default: 2404 return C_NotImplemented; 2405 } 2406} 2407 2408static const int tcg_target_callee_save_regs[] = { 2409 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2410 TCG_REG_S1, 2411 TCG_REG_S2, 2412 TCG_REG_S3, 2413 TCG_REG_S4, 2414 TCG_REG_S5, 2415 TCG_REG_S6, 2416 TCG_REG_S7, 2417 TCG_REG_S8, 2418 TCG_REG_S9, 2419 TCG_REG_RA, /* should be last for ABI compliance */ 2420}; 2421 2422/* Stack frame parameters. */ 2423#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2424#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2425#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2426#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2427 + TCG_TARGET_STACK_ALIGN - 1) \ 2428 & -TCG_TARGET_STACK_ALIGN) 2429#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2430 2431/* We're expecting to be able to use an immediate for frame allocation. */ 2432QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2433 2434/* Generate global QEMU prologue and epilogue code */ 2435static void tcg_target_qemu_prologue(TCGContext *s) 2436{ 2437 int i; 2438 2439 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2440 2441 /* TB prologue */ 2442 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2443 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2444 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2445 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2446 } 2447 2448 if (!tcg_use_softmmu && guest_base) { 2449 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2450 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2451 } 2452 2453 /* Call generated code */ 2454 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2455 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2456 2457 /* Return path for goto_ptr. Set return value to 0 */ 2458 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2459 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2460 2461 /* TB epilogue */ 2462 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2463 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2464 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2465 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2466 } 2467 2468 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2469 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2470} 2471 2472static void tcg_out_tb_start(TCGContext *s) 2473{ 2474 /* nothing to do */ 2475} 2476 2477static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2478{ 2479 for (int i = 0; i < count; ++i) { 2480 /* Canonical nop is andi r0,r0,0 */ 2481 p[i] = OPC_ANDI; 2482 } 2483} 2484 2485static void tcg_target_init(TCGContext *s) 2486{ 2487 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2488 2489 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2490 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2491 error_report("TCG: unaligned access support required; exiting"); 2492 exit(EXIT_FAILURE); 2493 } 2494 2495 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2496 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2497 2498 tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS; 2499 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2500 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2501 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2502 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2503 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2504 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2505 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2506 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2507 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2508 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2509 2510 if (cpuinfo & CPUINFO_LSX) { 2511 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2512 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2513 if (cpuinfo & CPUINFO_LASX) { 2514 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; 2515 } 2516 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2517 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2518 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2519 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2520 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2521 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2522 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2523 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2524 } 2525 2526 s->reserved_regs = 0; 2527 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2528 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2529 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2530 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2531 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2532 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2533 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2534 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2535} 2536 2537typedef struct { 2538 DebugFrameHeader h; 2539 uint8_t fde_def_cfa[4]; 2540 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2541} DebugFrame; 2542 2543#define ELF_HOST_MACHINE EM_LOONGARCH 2544 2545static const DebugFrame debug_frame = { 2546 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2547 .h.cie.id = -1, 2548 .h.cie.version = 1, 2549 .h.cie.code_align = 1, 2550 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2551 .h.cie.return_column = TCG_REG_RA, 2552 2553 /* Total FDE size does not include the "len" member. */ 2554 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2555 2556 .fde_def_cfa = { 2557 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2558 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2559 (FRAME_SIZE >> 7) 2560 }, 2561 .fde_reg_ofs = { 2562 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2563 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2564 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2565 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2566 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2567 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2568 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2569 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2570 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2571 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2572 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2573 } 2574}; 2575 2576void tcg_register_jit(const void *buf, size_t buf_size) 2577{ 2578 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2579} 2580