1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33#include <asm/hwcap.h> 34 35#ifdef CONFIG_DEBUG_TCG 36static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 37 "zero", 38 "ra", 39 "tp", 40 "sp", 41 "a0", 42 "a1", 43 "a2", 44 "a3", 45 "a4", 46 "a5", 47 "a6", 48 "a7", 49 "t0", 50 "t1", 51 "t2", 52 "t3", 53 "t4", 54 "t5", 55 "t6", 56 "t7", 57 "t8", 58 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 59 "s9", 60 "s0", 61 "s1", 62 "s2", 63 "s3", 64 "s4", 65 "s5", 66 "s6", 67 "s7", 68 "s8", 69 "vr0", 70 "vr1", 71 "vr2", 72 "vr3", 73 "vr4", 74 "vr5", 75 "vr6", 76 "vr7", 77 "vr8", 78 "vr9", 79 "vr10", 80 "vr11", 81 "vr12", 82 "vr13", 83 "vr14", 84 "vr15", 85 "vr16", 86 "vr17", 87 "vr18", 88 "vr19", 89 "vr20", 90 "vr21", 91 "vr22", 92 "vr23", 93 "vr24", 94 "vr25", 95 "vr26", 96 "vr27", 97 "vr28", 98 "vr29", 99 "vr30", 100 "vr31", 101}; 102#endif 103 104static const int tcg_target_reg_alloc_order[] = { 105 /* Registers preserved across calls */ 106 /* TCG_REG_S0 reserved for TCG_AREG0 */ 107 TCG_REG_S1, 108 TCG_REG_S2, 109 TCG_REG_S3, 110 TCG_REG_S4, 111 TCG_REG_S5, 112 TCG_REG_S6, 113 TCG_REG_S7, 114 TCG_REG_S8, 115 TCG_REG_S9, 116 117 /* Registers (potentially) clobbered across calls */ 118 TCG_REG_T0, 119 TCG_REG_T1, 120 TCG_REG_T2, 121 TCG_REG_T3, 122 TCG_REG_T4, 123 TCG_REG_T5, 124 TCG_REG_T6, 125 TCG_REG_T7, 126 TCG_REG_T8, 127 128 /* Argument registers, opposite order of allocation. */ 129 TCG_REG_A7, 130 TCG_REG_A6, 131 TCG_REG_A5, 132 TCG_REG_A4, 133 TCG_REG_A3, 134 TCG_REG_A2, 135 TCG_REG_A1, 136 TCG_REG_A0, 137 138 /* Vector registers */ 139 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 140 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 141 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 142 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 143 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 144 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 145 /* V24 - V31 are caller-saved, and skipped. */ 146}; 147 148static const int tcg_target_call_iarg_regs[] = { 149 TCG_REG_A0, 150 TCG_REG_A1, 151 TCG_REG_A2, 152 TCG_REG_A3, 153 TCG_REG_A4, 154 TCG_REG_A5, 155 TCG_REG_A6, 156 TCG_REG_A7, 157}; 158 159static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 160{ 161 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 162 tcg_debug_assert(slot >= 0 && slot <= 1); 163 return TCG_REG_A0 + slot; 164} 165 166#define TCG_GUEST_BASE_REG TCG_REG_S1 167 168#define TCG_CT_CONST_ZERO 0x100 169#define TCG_CT_CONST_S12 0x200 170#define TCG_CT_CONST_S32 0x400 171#define TCG_CT_CONST_U12 0x800 172#define TCG_CT_CONST_C12 0x1000 173#define TCG_CT_CONST_WSZ 0x2000 174#define TCG_CT_CONST_VCMP 0x4000 175#define TCG_CT_CONST_VADD 0x8000 176 177#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 178#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 179 180static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 181{ 182 return sextract64(val, pos, len); 183} 184 185/* test if a constant matches the constraint */ 186static bool tcg_target_const_match(int64_t val, int ct, 187 TCGType type, TCGCond cond, int vece) 188{ 189 if (ct & TCG_CT_CONST) { 190 return true; 191 } 192 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 193 return true; 194 } 195 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 196 return true; 197 } 198 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 199 return true; 200 } 201 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 202 return true; 203 } 204 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 205 return true; 206 } 207 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 208 return true; 209 } 210 int64_t vec_val = sextract64(val, 0, 8 << vece); 211 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { 212 return true; 213 } 214 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 215 return true; 216 } 217 return false; 218} 219 220/* 221 * Relocations 222 */ 223 224/* 225 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 226 * complicated; a whopping stack machine is needed to stuff the fields, at 227 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 228 * needed. 229 * 230 * Hence, define our own simpler relocation types. Numbers are chosen as to 231 * not collide with potential future additions to the true ELF relocation 232 * type enum. 233 */ 234 235/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 236#define R_LOONGARCH_BR_SK16 256 237/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 238#define R_LOONGARCH_BR_SD10K16 257 239 240static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 241{ 242 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 243 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 244 245 tcg_debug_assert((offset & 3) == 0); 246 offset >>= 2; 247 if (offset == sextreg(offset, 0, 16)) { 248 *src_rw = deposit64(*src_rw, 10, 16, offset); 249 return true; 250 } 251 252 return false; 253} 254 255static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 256 const tcg_insn_unit *target) 257{ 258 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 259 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 260 261 tcg_debug_assert((offset & 3) == 0); 262 offset >>= 2; 263 if (offset == sextreg(offset, 0, 26)) { 264 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 265 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 266 return true; 267 } 268 269 return false; 270} 271 272static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 273 intptr_t value, intptr_t addend) 274{ 275 tcg_debug_assert(addend == 0); 276 switch (type) { 277 case R_LOONGARCH_BR_SK16: 278 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 279 case R_LOONGARCH_BR_SD10K16: 280 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 281 default: 282 g_assert_not_reached(); 283 } 284} 285 286#include "tcg-insn-defs.c.inc" 287 288/* 289 * TCG intrinsics 290 */ 291 292static void tcg_out_mb(TCGContext *s, TCGArg a0) 293{ 294 /* Baseline LoongArch only has the full barrier, unfortunately. */ 295 tcg_out_opc_dbar(s, 0); 296} 297 298static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 299{ 300 if (ret == arg) { 301 return true; 302 } 303 switch (type) { 304 case TCG_TYPE_I32: 305 case TCG_TYPE_I64: 306 if (ret < TCG_REG_V0) { 307 if (arg < TCG_REG_V0) { 308 /* 309 * Conventional register-register move used in LoongArch is 310 * `or dst, src, zero`. 311 */ 312 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 313 } else { 314 tcg_out_opc_movfr2gr_d(s, ret, arg); 315 } 316 } else { 317 if (arg < TCG_REG_V0) { 318 tcg_out_opc_movgr2fr_d(s, ret, arg); 319 } else { 320 tcg_out_opc_fmov_d(s, ret, arg); 321 } 322 } 323 break; 324 case TCG_TYPE_V64: 325 case TCG_TYPE_V128: 326 tcg_out_opc_vori_b(s, ret, arg, 0); 327 break; 328 default: 329 g_assert_not_reached(); 330 } 331 return true; 332} 333 334/* Loads a 32-bit immediate into rd, sign-extended. */ 335static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 336{ 337 tcg_target_long lo = sextreg(val, 0, 12); 338 tcg_target_long hi12 = sextreg(val, 12, 20); 339 340 /* Single-instruction cases. */ 341 if (hi12 == 0) { 342 /* val fits in uimm12: ori rd, zero, val */ 343 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 344 return; 345 } 346 if (hi12 == sextreg(lo, 12, 20)) { 347 /* val fits in simm12: addi.w rd, zero, val */ 348 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 349 return; 350 } 351 352 /* High bits must be set; load with lu12i.w + optional ori. */ 353 tcg_out_opc_lu12i_w(s, rd, hi12); 354 if (lo != 0) { 355 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 356 } 357} 358 359static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 360 tcg_target_long val) 361{ 362 /* 363 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 364 * with dedicated instructions for filling the respective bitfields 365 * below: 366 * 367 * 6 5 4 3 368 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 369 * +-----------------------+---------------------------------------+... 370 * | hi52 | hi32 | 371 * +-----------------------+---------------------------------------+... 372 * 3 2 1 373 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 374 * ...+-------------------------------------+-------------------------+ 375 * | hi12 | lo | 376 * ...+-------------------------------------+-------------------------+ 377 * 378 * Check if val belong to one of the several fast cases, before falling 379 * back to the slow path. 380 */ 381 382 intptr_t pc_offset; 383 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 384 tcg_target_long hi12, hi32, hi52; 385 386 /* Value fits in signed i32. */ 387 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 388 tcg_out_movi_i32(s, rd, val); 389 return; 390 } 391 392 /* PC-relative cases. */ 393 pc_offset = tcg_pcrel_diff(s, (void *)val); 394 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 395 /* Single pcaddu2i. */ 396 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 397 return; 398 } 399 400 if (pc_offset == (int32_t)pc_offset) { 401 /* Offset within 32 bits; load with pcalau12i + ori. */ 402 val_lo = sextreg(val, 0, 12); 403 val_hi = val >> 12; 404 pc_hi = (val - pc_offset) >> 12; 405 offset_hi = val_hi - pc_hi; 406 407 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 408 tcg_out_opc_pcalau12i(s, rd, offset_hi); 409 if (val_lo != 0) { 410 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 411 } 412 return; 413 } 414 415 hi12 = sextreg(val, 12, 20); 416 hi32 = sextreg(val, 32, 20); 417 hi52 = sextreg(val, 52, 12); 418 419 /* Single cu52i.d case. */ 420 if ((hi52 != 0) && (ctz64(val) >= 52)) { 421 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 422 return; 423 } 424 425 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 426 tcg_out_movi_i32(s, rd, val); 427 428 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 429 if (hi32 != sextreg(hi12, 20, 20)) { 430 tcg_out_opc_cu32i_d(s, rd, hi32); 431 } 432 433 if (hi52 != sextreg(hi32, 20, 12)) { 434 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 435 } 436} 437 438static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 439 TCGReg rs, tcg_target_long imm) 440{ 441 tcg_target_long lo12 = sextreg(imm, 0, 12); 442 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 443 444 /* 445 * Note that there's a hole in between hi16 and lo12: 446 * 447 * 3 2 1 0 448 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 449 * ...+-------------------------------+-------+-----------------------+ 450 * | hi16 | | lo12 | 451 * ...+-------------------------------+-------+-----------------------+ 452 * 453 * For bits within that hole, it's more efficient to use LU12I and ADD. 454 */ 455 if (imm == (hi16 << 16) + lo12) { 456 if (hi16) { 457 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 458 rs = rd; 459 } 460 if (type == TCG_TYPE_I32) { 461 tcg_out_opc_addi_w(s, rd, rs, lo12); 462 } else if (lo12) { 463 tcg_out_opc_addi_d(s, rd, rs, lo12); 464 } else { 465 tcg_out_mov(s, type, rd, rs); 466 } 467 } else { 468 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 469 if (type == TCG_TYPE_I32) { 470 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 471 } else { 472 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 473 } 474 } 475} 476 477static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 478{ 479 return false; 480} 481 482static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 483 tcg_target_long imm) 484{ 485 /* This function is only used for passing structs by reference. */ 486 g_assert_not_reached(); 487} 488 489static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 490{ 491 tcg_out_opc_andi(s, ret, arg, 0xff); 492} 493 494static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 495{ 496 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 497} 498 499static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 500{ 501 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 502} 503 504static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 505{ 506 tcg_out_opc_sext_b(s, ret, arg); 507} 508 509static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 510{ 511 tcg_out_opc_sext_h(s, ret, arg); 512} 513 514static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 515{ 516 tcg_out_opc_addi_w(s, ret, arg, 0); 517} 518 519static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 520{ 521 if (ret != arg) { 522 tcg_out_ext32s(s, ret, arg); 523 } 524} 525 526static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 527{ 528 tcg_out_ext32u(s, ret, arg); 529} 530 531static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 532{ 533 tcg_out_ext32s(s, ret, arg); 534} 535 536static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 537 TCGReg a0, TCGReg a1, TCGReg a2, 538 bool c2, bool is_32bit) 539{ 540 if (c2) { 541 /* 542 * Fast path: semantics already satisfied due to constraint and 543 * insn behavior, single instruction is enough. 544 */ 545 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 546 /* all clz/ctz insns belong to DJ-format */ 547 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 548 return; 549 } 550 551 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 552 /* a0 = a1 ? REG_TMP0 : a2 */ 553 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 554 tcg_out_opc_masknez(s, a0, a2, a1); 555 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 556} 557 558#define SETCOND_INV TCG_TARGET_NB_REGS 559#define SETCOND_NEZ (SETCOND_INV << 1) 560#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 561 562static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 563 TCGReg arg1, tcg_target_long arg2, bool c2) 564{ 565 int flags = 0; 566 567 switch (cond) { 568 case TCG_COND_EQ: /* -> NE */ 569 case TCG_COND_GE: /* -> LT */ 570 case TCG_COND_GEU: /* -> LTU */ 571 case TCG_COND_GT: /* -> LE */ 572 case TCG_COND_GTU: /* -> LEU */ 573 cond = tcg_invert_cond(cond); 574 flags ^= SETCOND_INV; 575 break; 576 default: 577 break; 578 } 579 580 switch (cond) { 581 case TCG_COND_LE: 582 case TCG_COND_LEU: 583 /* 584 * If we have a constant input, the most efficient way to implement 585 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 586 * We don't need to care for this for LE because the constant input 587 * is still constrained to int32_t, and INT32_MAX+1 is representable 588 * in the 64-bit temporary register. 589 */ 590 if (c2) { 591 if (cond == TCG_COND_LEU) { 592 /* unsigned <= -1 is true */ 593 if (arg2 == -1) { 594 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 595 return ret; 596 } 597 cond = TCG_COND_LTU; 598 } else { 599 cond = TCG_COND_LT; 600 } 601 arg2 += 1; 602 } else { 603 TCGReg tmp = arg2; 604 arg2 = arg1; 605 arg1 = tmp; 606 cond = tcg_swap_cond(cond); /* LE -> GE */ 607 cond = tcg_invert_cond(cond); /* GE -> LT */ 608 flags ^= SETCOND_INV; 609 } 610 break; 611 default: 612 break; 613 } 614 615 switch (cond) { 616 case TCG_COND_NE: 617 flags |= SETCOND_NEZ; 618 if (!c2) { 619 tcg_out_opc_xor(s, ret, arg1, arg2); 620 } else if (arg2 == 0) { 621 ret = arg1; 622 } else if (arg2 >= 0 && arg2 <= 0xfff) { 623 tcg_out_opc_xori(s, ret, arg1, arg2); 624 } else { 625 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 626 } 627 break; 628 629 case TCG_COND_LT: 630 case TCG_COND_LTU: 631 if (c2) { 632 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 633 if (cond == TCG_COND_LT) { 634 tcg_out_opc_slti(s, ret, arg1, arg2); 635 } else { 636 tcg_out_opc_sltui(s, ret, arg1, arg2); 637 } 638 break; 639 } 640 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 641 arg2 = TCG_REG_TMP0; 642 } 643 if (cond == TCG_COND_LT) { 644 tcg_out_opc_slt(s, ret, arg1, arg2); 645 } else { 646 tcg_out_opc_sltu(s, ret, arg1, arg2); 647 } 648 break; 649 650 default: 651 g_assert_not_reached(); 652 break; 653 } 654 655 return ret | flags; 656} 657 658static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 659 TCGReg arg1, tcg_target_long arg2, bool c2) 660{ 661 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 662 663 if (tmpflags != ret) { 664 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 665 666 switch (tmpflags & SETCOND_FLAGS) { 667 case SETCOND_INV: 668 /* Intermediate result is boolean: simply invert. */ 669 tcg_out_opc_xori(s, ret, tmp, 1); 670 break; 671 case SETCOND_NEZ: 672 /* Intermediate result is zero/non-zero: test != 0. */ 673 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 674 break; 675 case SETCOND_NEZ | SETCOND_INV: 676 /* Intermediate result is zero/non-zero: test == 0. */ 677 tcg_out_opc_sltui(s, ret, tmp, 1); 678 break; 679 default: 680 g_assert_not_reached(); 681 } 682 } 683} 684 685static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 686 TCGReg c1, tcg_target_long c2, bool const2, 687 TCGReg v1, TCGReg v2) 688{ 689 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 690 TCGReg t; 691 692 /* Standardize the test below to t != 0. */ 693 if (tmpflags & SETCOND_INV) { 694 t = v1, v1 = v2, v2 = t; 695 } 696 697 t = tmpflags & ~SETCOND_FLAGS; 698 if (v1 == TCG_REG_ZERO) { 699 tcg_out_opc_masknez(s, ret, v2, t); 700 } else if (v2 == TCG_REG_ZERO) { 701 tcg_out_opc_maskeqz(s, ret, v1, t); 702 } else { 703 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 704 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 705 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 706 } 707} 708 709/* 710 * Branch helpers 711 */ 712 713static const struct { 714 LoongArchInsn op; 715 bool swap; 716} tcg_brcond_to_loongarch[] = { 717 [TCG_COND_EQ] = { OPC_BEQ, false }, 718 [TCG_COND_NE] = { OPC_BNE, false }, 719 [TCG_COND_LT] = { OPC_BGT, true }, 720 [TCG_COND_GE] = { OPC_BLE, true }, 721 [TCG_COND_LE] = { OPC_BLE, false }, 722 [TCG_COND_GT] = { OPC_BGT, false }, 723 [TCG_COND_LTU] = { OPC_BGTU, true }, 724 [TCG_COND_GEU] = { OPC_BLEU, true }, 725 [TCG_COND_LEU] = { OPC_BLEU, false }, 726 [TCG_COND_GTU] = { OPC_BGTU, false } 727}; 728 729static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 730 TCGReg arg2, TCGLabel *l) 731{ 732 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 733 734 tcg_debug_assert(op != 0); 735 736 if (tcg_brcond_to_loongarch[cond].swap) { 737 TCGReg t = arg1; 738 arg1 = arg2; 739 arg2 = t; 740 } 741 742 /* all conditional branch insns belong to DJSk16-format */ 743 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 744 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 745} 746 747static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 748{ 749 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 750 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 751 752 tcg_debug_assert((offset & 3) == 0); 753 if (offset == sextreg(offset, 0, 28)) { 754 /* short jump: +/- 256MiB */ 755 if (tail) { 756 tcg_out_opc_b(s, offset >> 2); 757 } else { 758 tcg_out_opc_bl(s, offset >> 2); 759 } 760 } else if (offset == sextreg(offset, 0, 38)) { 761 /* long jump: +/- 256GiB */ 762 tcg_target_long lo = sextreg(offset, 0, 18); 763 tcg_target_long hi = offset - lo; 764 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 765 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 766 } else { 767 /* far jump: 64-bit */ 768 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 769 tcg_target_long hi = (tcg_target_long)arg - lo; 770 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 771 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 772 } 773} 774 775static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 776 const TCGHelperInfo *info) 777{ 778 tcg_out_call_int(s, arg, false); 779} 780 781/* 782 * Load/store helpers 783 */ 784 785static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 786 TCGReg addr, intptr_t offset) 787{ 788 intptr_t imm12 = sextreg(offset, 0, 12); 789 790 if (offset != imm12) { 791 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 792 793 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 794 imm12 = sextreg(diff, 0, 12); 795 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 796 } else { 797 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 798 if (addr != TCG_REG_ZERO) { 799 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 800 } 801 } 802 addr = TCG_REG_TMP2; 803 } 804 805 switch (opc) { 806 case OPC_LD_B: 807 case OPC_LD_BU: 808 case OPC_LD_H: 809 case OPC_LD_HU: 810 case OPC_LD_W: 811 case OPC_LD_WU: 812 case OPC_LD_D: 813 case OPC_ST_B: 814 case OPC_ST_H: 815 case OPC_ST_W: 816 case OPC_ST_D: 817 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 818 break; 819 case OPC_FLD_S: 820 case OPC_FLD_D: 821 case OPC_FST_S: 822 case OPC_FST_D: 823 tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12)); 824 break; 825 default: 826 g_assert_not_reached(); 827 } 828} 829 830static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest, 831 TCGReg base, intptr_t offset) 832{ 833 switch (type) { 834 case TCG_TYPE_I32: 835 if (dest < TCG_REG_V0) { 836 tcg_out_ldst(s, OPC_LD_W, dest, base, offset); 837 } else { 838 tcg_out_ldst(s, OPC_FLD_S, dest, base, offset); 839 } 840 break; 841 case TCG_TYPE_I64: 842 case TCG_TYPE_V64: 843 if (dest < TCG_REG_V0) { 844 tcg_out_ldst(s, OPC_LD_D, dest, base, offset); 845 } else { 846 tcg_out_ldst(s, OPC_FLD_D, dest, base, offset); 847 } 848 break; 849 case TCG_TYPE_V128: 850 if (-0x800 <= offset && offset <= 0x7ff) { 851 tcg_out_opc_vld(s, dest, base, offset); 852 } else { 853 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 854 tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0); 855 } 856 break; 857 default: 858 g_assert_not_reached(); 859 } 860} 861 862static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, 863 TCGReg base, intptr_t offset) 864{ 865 switch (type) { 866 case TCG_TYPE_I32: 867 if (src < TCG_REG_V0) { 868 tcg_out_ldst(s, OPC_ST_W, src, base, offset); 869 } else { 870 tcg_out_ldst(s, OPC_FST_S, src, base, offset); 871 } 872 break; 873 case TCG_TYPE_I64: 874 case TCG_TYPE_V64: 875 if (src < TCG_REG_V0) { 876 tcg_out_ldst(s, OPC_ST_D, src, base, offset); 877 } else { 878 tcg_out_ldst(s, OPC_FST_D, src, base, offset); 879 } 880 break; 881 case TCG_TYPE_V128: 882 if (-0x800 <= offset && offset <= 0x7ff) { 883 tcg_out_opc_vst(s, src, base, offset); 884 } else { 885 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 886 tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0); 887 } 888 break; 889 default: 890 g_assert_not_reached(); 891 } 892} 893 894static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 895 TCGReg base, intptr_t ofs) 896{ 897 if (val == 0) { 898 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 899 return true; 900 } 901 return false; 902} 903 904/* 905 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 906 */ 907 908static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 909{ 910 tcg_out_opc_b(s, 0); 911 return reloc_br_sd10k16(s->code_ptr - 1, target); 912} 913 914static const TCGLdstHelperParam ldst_helper_param = { 915 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 916}; 917 918static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 919{ 920 MemOp opc = get_memop(l->oi); 921 922 /* resolve label address */ 923 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 924 return false; 925 } 926 927 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 928 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 929 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 930 return tcg_out_goto(s, l->raddr); 931} 932 933static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 934{ 935 MemOp opc = get_memop(l->oi); 936 937 /* resolve label address */ 938 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 939 return false; 940 } 941 942 tcg_out_st_helper_args(s, l, &ldst_helper_param); 943 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 944 return tcg_out_goto(s, l->raddr); 945} 946 947typedef struct { 948 TCGReg base; 949 TCGReg index; 950 TCGAtomAlign aa; 951} HostAddress; 952 953bool tcg_target_has_memory_bswap(MemOp memop) 954{ 955 return false; 956} 957 958/* We expect to use a 12-bit negative offset from ENV. */ 959#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 960 961/* 962 * For system-mode, perform the TLB load and compare. 963 * For user-mode, perform any required alignment tests. 964 * In both cases, return a TCGLabelQemuLdst structure if the slow path 965 * is required and fill in @h with the host address for the fast path. 966 */ 967static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 968 TCGReg addr_reg, MemOpIdx oi, 969 bool is_ld) 970{ 971 TCGType addr_type = s->addr_type; 972 TCGLabelQemuLdst *ldst = NULL; 973 MemOp opc = get_memop(oi); 974 MemOp a_bits; 975 976 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 977 a_bits = h->aa.align; 978 979 if (tcg_use_softmmu) { 980 unsigned s_bits = opc & MO_SIZE; 981 int mem_index = get_mmuidx(oi); 982 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 983 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 984 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 985 986 ldst = new_ldst_label(s); 987 ldst->is_ld = is_ld; 988 ldst->oi = oi; 989 ldst->addrlo_reg = addr_reg; 990 991 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 992 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 993 994 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 995 s->page_bits - CPU_TLB_ENTRY_BITS); 996 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 997 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 998 999 /* Load the tlb comparator and the addend. */ 1000 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1001 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1002 is_ld ? offsetof(CPUTLBEntry, addr_read) 1003 : offsetof(CPUTLBEntry, addr_write)); 1004 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1005 offsetof(CPUTLBEntry, addend)); 1006 1007 /* 1008 * For aligned accesses, we check the first byte and include the 1009 * alignment bits within the address. For unaligned access, we 1010 * check that we don't cross pages using the address of the last 1011 * byte of the access. 1012 */ 1013 if (a_bits < s_bits) { 1014 unsigned a_mask = (1u << a_bits) - 1; 1015 unsigned s_mask = (1u << s_bits) - 1; 1016 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 1017 } else { 1018 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 1019 } 1020 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 1021 a_bits, s->page_bits - 1); 1022 1023 /* Compare masked address with the TLB entry. */ 1024 ldst->label_ptr[0] = s->code_ptr; 1025 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1026 1027 h->index = TCG_REG_TMP2; 1028 } else { 1029 if (a_bits) { 1030 ldst = new_ldst_label(s); 1031 1032 ldst->is_ld = is_ld; 1033 ldst->oi = oi; 1034 ldst->addrlo_reg = addr_reg; 1035 1036 /* 1037 * Without micro-architecture details, we don't know which of 1038 * bstrpick or andi is faster, so use bstrpick as it's not 1039 * constrained by imm field width. Not to say alignments >= 2^12 1040 * are going to happen any time soon. 1041 */ 1042 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 1043 1044 ldst->label_ptr[0] = s->code_ptr; 1045 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1046 } 1047 1048 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 1049 } 1050 1051 if (addr_type == TCG_TYPE_I32) { 1052 h->base = TCG_REG_TMP0; 1053 tcg_out_ext32u(s, h->base, addr_reg); 1054 } else { 1055 h->base = addr_reg; 1056 } 1057 1058 return ldst; 1059} 1060 1061static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 1062 TCGReg rd, HostAddress h) 1063{ 1064 /* Byte swapping is left to middle-end expansion. */ 1065 tcg_debug_assert((opc & MO_BSWAP) == 0); 1066 1067 switch (opc & MO_SSIZE) { 1068 case MO_UB: 1069 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 1070 break; 1071 case MO_SB: 1072 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1073 break; 1074 case MO_UW: 1075 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1076 break; 1077 case MO_SW: 1078 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1079 break; 1080 case MO_UL: 1081 if (type == TCG_TYPE_I64) { 1082 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1083 break; 1084 } 1085 /* fallthrough */ 1086 case MO_SL: 1087 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1088 break; 1089 case MO_UQ: 1090 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1091 break; 1092 default: 1093 g_assert_not_reached(); 1094 } 1095} 1096 1097static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1098 MemOpIdx oi, TCGType data_type) 1099{ 1100 TCGLabelQemuLdst *ldst; 1101 HostAddress h; 1102 1103 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1104 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1105 1106 if (ldst) { 1107 ldst->type = data_type; 1108 ldst->datalo_reg = data_reg; 1109 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1110 } 1111} 1112 1113static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1114 TCGReg rd, HostAddress h) 1115{ 1116 /* Byte swapping is left to middle-end expansion. */ 1117 tcg_debug_assert((opc & MO_BSWAP) == 0); 1118 1119 switch (opc & MO_SIZE) { 1120 case MO_8: 1121 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1122 break; 1123 case MO_16: 1124 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1125 break; 1126 case MO_32: 1127 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1128 break; 1129 case MO_64: 1130 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1131 break; 1132 default: 1133 g_assert_not_reached(); 1134 } 1135} 1136 1137static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1138 MemOpIdx oi, TCGType data_type) 1139{ 1140 TCGLabelQemuLdst *ldst; 1141 HostAddress h; 1142 1143 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1144 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1145 1146 if (ldst) { 1147 ldst->type = data_type; 1148 ldst->datalo_reg = data_reg; 1149 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1150 } 1151} 1152 1153static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1154 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1155{ 1156 TCGLabelQemuLdst *ldst; 1157 HostAddress h; 1158 1159 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1160 1161 if (h.aa.atom == MO_128) { 1162 /* 1163 * Use VLDX/VSTX when 128-bit atomicity is required. 1164 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1165 */ 1166 if (is_ld) { 1167 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1168 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1169 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1170 } else { 1171 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1172 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1173 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1174 } 1175 } else { 1176 /* Otherwise use a pair of LD/ST. */ 1177 TCGReg base = h.base; 1178 if (h.index != TCG_REG_ZERO) { 1179 base = TCG_REG_TMP0; 1180 tcg_out_opc_add_d(s, base, h.base, h.index); 1181 } 1182 if (is_ld) { 1183 tcg_debug_assert(base != data_lo); 1184 tcg_out_opc_ld_d(s, data_lo, base, 0); 1185 tcg_out_opc_ld_d(s, data_hi, base, 8); 1186 } else { 1187 tcg_out_opc_st_d(s, data_lo, base, 0); 1188 tcg_out_opc_st_d(s, data_hi, base, 8); 1189 } 1190 } 1191 1192 if (ldst) { 1193 ldst->type = TCG_TYPE_I128; 1194 ldst->datalo_reg = data_lo; 1195 ldst->datahi_reg = data_hi; 1196 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1197 } 1198} 1199 1200/* 1201 * Entry-points 1202 */ 1203 1204static const tcg_insn_unit *tb_ret_addr; 1205 1206static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1207{ 1208 /* Reuse the zeroing that exists for goto_ptr. */ 1209 if (a0 == 0) { 1210 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1211 } else { 1212 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1213 tcg_out_call_int(s, tb_ret_addr, true); 1214 } 1215} 1216 1217static void tcg_out_goto_tb(TCGContext *s, int which) 1218{ 1219 /* 1220 * Direct branch, or load indirect address, to be patched 1221 * by tb_target_set_jmp_target. Check indirect load offset 1222 * in range early, regardless of direct branch distance, 1223 * via assert within tcg_out_opc_pcaddu2i. 1224 */ 1225 uintptr_t i_addr = get_jmp_target_addr(s, which); 1226 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1227 1228 set_jmp_insn_offset(s, which); 1229 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1230 1231 /* Finish the load and indirect branch. */ 1232 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1233 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1234 set_jmp_reset_offset(s, which); 1235} 1236 1237void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1238 uintptr_t jmp_rx, uintptr_t jmp_rw) 1239{ 1240 uintptr_t d_addr = tb->jmp_target_addr[n]; 1241 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1242 tcg_insn_unit insn; 1243 1244 /* Either directly branch, or load slot address for indirect branch. */ 1245 if (d_disp == sextreg(d_disp, 0, 26)) { 1246 insn = encode_sd10k16_insn(OPC_B, d_disp); 1247 } else { 1248 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1249 intptr_t i_disp = i_addr - jmp_rx; 1250 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1251 } 1252 1253 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1254 flush_idcache_range(jmp_rx, jmp_rw, 4); 1255} 1256 1257static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1258 const TCGArg args[TCG_MAX_OP_ARGS], 1259 const int const_args[TCG_MAX_OP_ARGS]) 1260{ 1261 TCGArg a0 = args[0]; 1262 TCGArg a1 = args[1]; 1263 TCGArg a2 = args[2]; 1264 TCGArg a3 = args[3]; 1265 int c2 = const_args[2]; 1266 1267 switch (opc) { 1268 case INDEX_op_mb: 1269 tcg_out_mb(s, a0); 1270 break; 1271 1272 case INDEX_op_goto_ptr: 1273 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1274 break; 1275 1276 case INDEX_op_br: 1277 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1278 0); 1279 tcg_out_opc_b(s, 0); 1280 break; 1281 1282 case INDEX_op_brcond_i32: 1283 case INDEX_op_brcond_i64: 1284 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1285 break; 1286 1287 case INDEX_op_extrh_i64_i32: 1288 tcg_out_opc_srai_d(s, a0, a1, 32); 1289 break; 1290 1291 case INDEX_op_not_i32: 1292 case INDEX_op_not_i64: 1293 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1294 break; 1295 1296 case INDEX_op_nor_i32: 1297 case INDEX_op_nor_i64: 1298 if (c2) { 1299 tcg_out_opc_ori(s, a0, a1, a2); 1300 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1301 } else { 1302 tcg_out_opc_nor(s, a0, a1, a2); 1303 } 1304 break; 1305 1306 case INDEX_op_andc_i32: 1307 case INDEX_op_andc_i64: 1308 if (c2) { 1309 /* guaranteed to fit due to constraint */ 1310 tcg_out_opc_andi(s, a0, a1, ~a2); 1311 } else { 1312 tcg_out_opc_andn(s, a0, a1, a2); 1313 } 1314 break; 1315 1316 case INDEX_op_orc_i32: 1317 case INDEX_op_orc_i64: 1318 if (c2) { 1319 /* guaranteed to fit due to constraint */ 1320 tcg_out_opc_ori(s, a0, a1, ~a2); 1321 } else { 1322 tcg_out_opc_orn(s, a0, a1, a2); 1323 } 1324 break; 1325 1326 case INDEX_op_and_i32: 1327 case INDEX_op_and_i64: 1328 if (c2) { 1329 tcg_out_opc_andi(s, a0, a1, a2); 1330 } else { 1331 tcg_out_opc_and(s, a0, a1, a2); 1332 } 1333 break; 1334 1335 case INDEX_op_or_i32: 1336 case INDEX_op_or_i64: 1337 if (c2) { 1338 tcg_out_opc_ori(s, a0, a1, a2); 1339 } else { 1340 tcg_out_opc_or(s, a0, a1, a2); 1341 } 1342 break; 1343 1344 case INDEX_op_xor_i32: 1345 case INDEX_op_xor_i64: 1346 if (c2) { 1347 tcg_out_opc_xori(s, a0, a1, a2); 1348 } else { 1349 tcg_out_opc_xor(s, a0, a1, a2); 1350 } 1351 break; 1352 1353 case INDEX_op_extract_i32: 1354 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1355 break; 1356 case INDEX_op_extract_i64: 1357 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1358 break; 1359 1360 case INDEX_op_deposit_i32: 1361 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1362 break; 1363 case INDEX_op_deposit_i64: 1364 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1365 break; 1366 1367 case INDEX_op_bswap16_i32: 1368 case INDEX_op_bswap16_i64: 1369 tcg_out_opc_revb_2h(s, a0, a1); 1370 if (a2 & TCG_BSWAP_OS) { 1371 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1372 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1373 tcg_out_ext16u(s, a0, a0); 1374 } 1375 break; 1376 1377 case INDEX_op_bswap32_i32: 1378 /* All 32-bit values are computed sign-extended in the register. */ 1379 a2 = TCG_BSWAP_OS; 1380 /* fallthrough */ 1381 case INDEX_op_bswap32_i64: 1382 tcg_out_opc_revb_2w(s, a0, a1); 1383 if (a2 & TCG_BSWAP_OS) { 1384 tcg_out_ext32s(s, a0, a0); 1385 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1386 tcg_out_ext32u(s, a0, a0); 1387 } 1388 break; 1389 1390 case INDEX_op_bswap64_i64: 1391 tcg_out_opc_revb_d(s, a0, a1); 1392 break; 1393 1394 case INDEX_op_clz_i32: 1395 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1396 break; 1397 case INDEX_op_clz_i64: 1398 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1399 break; 1400 1401 case INDEX_op_ctz_i32: 1402 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1403 break; 1404 case INDEX_op_ctz_i64: 1405 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1406 break; 1407 1408 case INDEX_op_shl_i32: 1409 if (c2) { 1410 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1411 } else { 1412 tcg_out_opc_sll_w(s, a0, a1, a2); 1413 } 1414 break; 1415 case INDEX_op_shl_i64: 1416 if (c2) { 1417 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1418 } else { 1419 tcg_out_opc_sll_d(s, a0, a1, a2); 1420 } 1421 break; 1422 1423 case INDEX_op_shr_i32: 1424 if (c2) { 1425 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1426 } else { 1427 tcg_out_opc_srl_w(s, a0, a1, a2); 1428 } 1429 break; 1430 case INDEX_op_shr_i64: 1431 if (c2) { 1432 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1433 } else { 1434 tcg_out_opc_srl_d(s, a0, a1, a2); 1435 } 1436 break; 1437 1438 case INDEX_op_sar_i32: 1439 if (c2) { 1440 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1441 } else { 1442 tcg_out_opc_sra_w(s, a0, a1, a2); 1443 } 1444 break; 1445 case INDEX_op_sar_i64: 1446 if (c2) { 1447 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1448 } else { 1449 tcg_out_opc_sra_d(s, a0, a1, a2); 1450 } 1451 break; 1452 1453 case INDEX_op_rotl_i32: 1454 /* transform into equivalent rotr/rotri */ 1455 if (c2) { 1456 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1457 } else { 1458 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1459 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1460 } 1461 break; 1462 case INDEX_op_rotl_i64: 1463 /* transform into equivalent rotr/rotri */ 1464 if (c2) { 1465 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1466 } else { 1467 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1468 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1469 } 1470 break; 1471 1472 case INDEX_op_rotr_i32: 1473 if (c2) { 1474 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1475 } else { 1476 tcg_out_opc_rotr_w(s, a0, a1, a2); 1477 } 1478 break; 1479 case INDEX_op_rotr_i64: 1480 if (c2) { 1481 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1482 } else { 1483 tcg_out_opc_rotr_d(s, a0, a1, a2); 1484 } 1485 break; 1486 1487 case INDEX_op_add_i32: 1488 if (c2) { 1489 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1490 } else { 1491 tcg_out_opc_add_w(s, a0, a1, a2); 1492 } 1493 break; 1494 case INDEX_op_add_i64: 1495 if (c2) { 1496 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1497 } else { 1498 tcg_out_opc_add_d(s, a0, a1, a2); 1499 } 1500 break; 1501 1502 case INDEX_op_sub_i32: 1503 if (c2) { 1504 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1505 } else { 1506 tcg_out_opc_sub_w(s, a0, a1, a2); 1507 } 1508 break; 1509 case INDEX_op_sub_i64: 1510 if (c2) { 1511 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1512 } else { 1513 tcg_out_opc_sub_d(s, a0, a1, a2); 1514 } 1515 break; 1516 1517 case INDEX_op_neg_i32: 1518 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1); 1519 break; 1520 case INDEX_op_neg_i64: 1521 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1); 1522 break; 1523 1524 case INDEX_op_mul_i32: 1525 tcg_out_opc_mul_w(s, a0, a1, a2); 1526 break; 1527 case INDEX_op_mul_i64: 1528 tcg_out_opc_mul_d(s, a0, a1, a2); 1529 break; 1530 1531 case INDEX_op_mulsh_i32: 1532 tcg_out_opc_mulh_w(s, a0, a1, a2); 1533 break; 1534 case INDEX_op_mulsh_i64: 1535 tcg_out_opc_mulh_d(s, a0, a1, a2); 1536 break; 1537 1538 case INDEX_op_muluh_i32: 1539 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1540 break; 1541 case INDEX_op_muluh_i64: 1542 tcg_out_opc_mulh_du(s, a0, a1, a2); 1543 break; 1544 1545 case INDEX_op_div_i32: 1546 tcg_out_opc_div_w(s, a0, a1, a2); 1547 break; 1548 case INDEX_op_div_i64: 1549 tcg_out_opc_div_d(s, a0, a1, a2); 1550 break; 1551 1552 case INDEX_op_divu_i32: 1553 tcg_out_opc_div_wu(s, a0, a1, a2); 1554 break; 1555 case INDEX_op_divu_i64: 1556 tcg_out_opc_div_du(s, a0, a1, a2); 1557 break; 1558 1559 case INDEX_op_rem_i32: 1560 tcg_out_opc_mod_w(s, a0, a1, a2); 1561 break; 1562 case INDEX_op_rem_i64: 1563 tcg_out_opc_mod_d(s, a0, a1, a2); 1564 break; 1565 1566 case INDEX_op_remu_i32: 1567 tcg_out_opc_mod_wu(s, a0, a1, a2); 1568 break; 1569 case INDEX_op_remu_i64: 1570 tcg_out_opc_mod_du(s, a0, a1, a2); 1571 break; 1572 1573 case INDEX_op_setcond_i32: 1574 case INDEX_op_setcond_i64: 1575 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1576 break; 1577 1578 case INDEX_op_movcond_i32: 1579 case INDEX_op_movcond_i64: 1580 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1581 break; 1582 1583 case INDEX_op_ld8s_i32: 1584 case INDEX_op_ld8s_i64: 1585 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1586 break; 1587 case INDEX_op_ld8u_i32: 1588 case INDEX_op_ld8u_i64: 1589 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1590 break; 1591 case INDEX_op_ld16s_i32: 1592 case INDEX_op_ld16s_i64: 1593 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1594 break; 1595 case INDEX_op_ld16u_i32: 1596 case INDEX_op_ld16u_i64: 1597 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1598 break; 1599 case INDEX_op_ld_i32: 1600 case INDEX_op_ld32s_i64: 1601 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1602 break; 1603 case INDEX_op_ld32u_i64: 1604 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1605 break; 1606 case INDEX_op_ld_i64: 1607 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1608 break; 1609 1610 case INDEX_op_st8_i32: 1611 case INDEX_op_st8_i64: 1612 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1613 break; 1614 case INDEX_op_st16_i32: 1615 case INDEX_op_st16_i64: 1616 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1617 break; 1618 case INDEX_op_st_i32: 1619 case INDEX_op_st32_i64: 1620 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1621 break; 1622 case INDEX_op_st_i64: 1623 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1624 break; 1625 1626 case INDEX_op_qemu_ld_a32_i32: 1627 case INDEX_op_qemu_ld_a64_i32: 1628 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1629 break; 1630 case INDEX_op_qemu_ld_a32_i64: 1631 case INDEX_op_qemu_ld_a64_i64: 1632 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1633 break; 1634 case INDEX_op_qemu_ld_a32_i128: 1635 case INDEX_op_qemu_ld_a64_i128: 1636 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1637 break; 1638 case INDEX_op_qemu_st_a32_i32: 1639 case INDEX_op_qemu_st_a64_i32: 1640 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1641 break; 1642 case INDEX_op_qemu_st_a32_i64: 1643 case INDEX_op_qemu_st_a64_i64: 1644 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1645 break; 1646 case INDEX_op_qemu_st_a32_i128: 1647 case INDEX_op_qemu_st_a64_i128: 1648 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1649 break; 1650 1651 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1652 case INDEX_op_mov_i64: 1653 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1654 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1655 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1656 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1657 case INDEX_op_ext8s_i64: 1658 case INDEX_op_ext8u_i32: 1659 case INDEX_op_ext8u_i64: 1660 case INDEX_op_ext16s_i32: 1661 case INDEX_op_ext16s_i64: 1662 case INDEX_op_ext16u_i32: 1663 case INDEX_op_ext16u_i64: 1664 case INDEX_op_ext32s_i64: 1665 case INDEX_op_ext32u_i64: 1666 case INDEX_op_ext_i32_i64: 1667 case INDEX_op_extu_i32_i64: 1668 case INDEX_op_extrl_i64_i32: 1669 default: 1670 g_assert_not_reached(); 1671 } 1672} 1673 1674static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1675 TCGReg rd, TCGReg rs) 1676{ 1677 static const LoongArchInsn repl_insn[4] = { 1678 OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H, OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D 1679 }; 1680 1681 tcg_debug_assert(vece <= MO_64); 1682 tcg_out32(s, encode_vdj_insn(repl_insn[vece], rd, rs)); 1683 return true; 1684} 1685 1686static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1687 TCGReg r, TCGReg base, intptr_t offset) 1688{ 1689 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */ 1690 if (offset < -0x800 || offset > 0x7ff || \ 1691 (offset & ((1 << vece) - 1)) != 0) { 1692 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1693 base = TCG_REG_TMP0; 1694 offset = 0; 1695 } 1696 offset >>= vece; 1697 1698 switch (vece) { 1699 case MO_8: 1700 tcg_out_opc_vldrepl_b(s, r, base, offset); 1701 break; 1702 case MO_16: 1703 tcg_out_opc_vldrepl_h(s, r, base, offset); 1704 break; 1705 case MO_32: 1706 tcg_out_opc_vldrepl_w(s, r, base, offset); 1707 break; 1708 case MO_64: 1709 tcg_out_opc_vldrepl_d(s, r, base, offset); 1710 break; 1711 default: 1712 g_assert_not_reached(); 1713 } 1714 return true; 1715} 1716 1717static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1718 TCGReg rd, int64_t v64) 1719{ 1720 /* Try vldi if imm can fit */ 1721 int64_t value = sextract64(v64, 0, 8 << vece); 1722 if (-0x200 <= value && value <= 0x1FF) { 1723 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1724 tcg_out_opc_vldi(s, rd, imm); 1725 return; 1726 } 1727 1728 /* TODO: vldi patterns when imm 12 is set */ 1729 1730 /* Fallback to vreplgr2vr */ 1731 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1732 switch (vece) { 1733 case MO_8: 1734 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0); 1735 break; 1736 case MO_16: 1737 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0); 1738 break; 1739 case MO_32: 1740 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0); 1741 break; 1742 case MO_64: 1743 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0); 1744 break; 1745 default: 1746 g_assert_not_reached(); 1747 } 1748} 1749 1750static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0, 1751 const TCGArg a1, const TCGArg a2, 1752 bool a2_is_const, bool is_add) 1753{ 1754 static const LoongArchInsn add_vec_insn[4] = { 1755 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D 1756 }; 1757 static const LoongArchInsn add_vec_imm_insn[4] = { 1758 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU 1759 }; 1760 static const LoongArchInsn sub_vec_insn[4] = { 1761 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D 1762 }; 1763 static const LoongArchInsn sub_vec_imm_insn[4] = { 1764 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU 1765 }; 1766 1767 if (a2_is_const) { 1768 int64_t value = sextract64(a2, 0, 8 << vece); 1769 if (!is_add) { 1770 value = -value; 1771 } 1772 1773 /* Try vaddi/vsubi */ 1774 if (0 <= value && value <= 0x1f) { 1775 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ 1776 a1, value)); 1777 return; 1778 } else if (-0x1f <= value && value < 0) { 1779 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ 1780 a1, -value)); 1781 return; 1782 } 1783 1784 /* constraint TCG_CT_CONST_VADD ensures unreachable */ 1785 g_assert_not_reached(); 1786 } 1787 1788 if (is_add) { 1789 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); 1790 } else { 1791 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); 1792 } 1793} 1794 1795static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1796 unsigned vecl, unsigned vece, 1797 const TCGArg args[TCG_MAX_OP_ARGS], 1798 const int const_args[TCG_MAX_OP_ARGS]) 1799{ 1800 TCGType type = vecl + TCG_TYPE_V64; 1801 TCGArg a0, a1, a2, a3; 1802 TCGReg temp_vec = TCG_VEC_TMP0; 1803 1804 static const LoongArchInsn cmp_vec_insn[16][4] = { 1805 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D}, 1806 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D}, 1807 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU}, 1808 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D}, 1809 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU}, 1810 }; 1811 static const LoongArchInsn cmp_vec_imm_insn[16][4] = { 1812 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D}, 1813 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D}, 1814 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU}, 1815 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D}, 1816 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, 1817 }; 1818 LoongArchInsn insn; 1819 static const LoongArchInsn neg_vec_insn[4] = { 1820 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D 1821 }; 1822 static const LoongArchInsn mul_vec_insn[4] = { 1823 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D 1824 }; 1825 static const LoongArchInsn smin_vec_insn[4] = { 1826 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D 1827 }; 1828 static const LoongArchInsn umin_vec_insn[4] = { 1829 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU 1830 }; 1831 static const LoongArchInsn smax_vec_insn[4] = { 1832 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D 1833 }; 1834 static const LoongArchInsn umax_vec_insn[4] = { 1835 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU 1836 }; 1837 static const LoongArchInsn ssadd_vec_insn[4] = { 1838 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D 1839 }; 1840 static const LoongArchInsn usadd_vec_insn[4] = { 1841 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU 1842 }; 1843 static const LoongArchInsn sssub_vec_insn[4] = { 1844 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D 1845 }; 1846 static const LoongArchInsn ussub_vec_insn[4] = { 1847 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU 1848 }; 1849 static const LoongArchInsn shlv_vec_insn[4] = { 1850 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D 1851 }; 1852 static const LoongArchInsn shrv_vec_insn[4] = { 1853 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D 1854 }; 1855 static const LoongArchInsn sarv_vec_insn[4] = { 1856 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D 1857 }; 1858 static const LoongArchInsn shli_vec_insn[4] = { 1859 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D 1860 }; 1861 static const LoongArchInsn shri_vec_insn[4] = { 1862 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D 1863 }; 1864 static const LoongArchInsn sari_vec_insn[4] = { 1865 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D 1866 }; 1867 static const LoongArchInsn rotrv_vec_insn[4] = { 1868 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D 1869 }; 1870 1871 a0 = args[0]; 1872 a1 = args[1]; 1873 a2 = args[2]; 1874 a3 = args[3]; 1875 1876 /* Currently only supports V64 & V128 */ 1877 tcg_debug_assert(type == TCG_TYPE_V64 || type == TCG_TYPE_V128); 1878 1879 switch (opc) { 1880 case INDEX_op_st_vec: 1881 tcg_out_st(s, type, a0, a1, a2); 1882 break; 1883 case INDEX_op_ld_vec: 1884 tcg_out_ld(s, type, a0, a1, a2); 1885 break; 1886 case INDEX_op_and_vec: 1887 tcg_out_opc_vand_v(s, a0, a1, a2); 1888 break; 1889 case INDEX_op_andc_vec: 1890 /* 1891 * vandn vd, vj, vk: vd = vk & ~vj 1892 * andc_vec vd, vj, vk: vd = vj & ~vk 1893 * vk and vk are swapped 1894 */ 1895 tcg_out_opc_vandn_v(s, a0, a2, a1); 1896 break; 1897 case INDEX_op_or_vec: 1898 tcg_out_opc_vor_v(s, a0, a1, a2); 1899 break; 1900 case INDEX_op_orc_vec: 1901 tcg_out_opc_vorn_v(s, a0, a1, a2); 1902 break; 1903 case INDEX_op_xor_vec: 1904 tcg_out_opc_vxor_v(s, a0, a1, a2); 1905 break; 1906 case INDEX_op_nor_vec: 1907 tcg_out_opc_vnor_v(s, a0, a1, a2); 1908 break; 1909 case INDEX_op_not_vec: 1910 tcg_out_opc_vnor_v(s, a0, a1, a1); 1911 break; 1912 case INDEX_op_cmp_vec: 1913 { 1914 TCGCond cond = args[3]; 1915 if (const_args[2]) { 1916 /* 1917 * cmp_vec dest, src, value 1918 * Try vseqi/vslei/vslti 1919 */ 1920 int64_t value = sextract64(a2, 0, 8 << vece); 1921 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \ 1922 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) { 1923 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \ 1924 a0, a1, value)); 1925 break; 1926 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) && 1927 (0x00 <= value && value <= 0x1f)) { 1928 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \ 1929 a0, a1, value)); 1930 break; 1931 } 1932 1933 /* 1934 * Fallback to: 1935 * dupi_vec temp, a2 1936 * cmp_vec a0, a1, temp, cond 1937 */ 1938 tcg_out_dupi_vec(s, type, vece, temp_vec, a2); 1939 a2 = temp_vec; 1940 } 1941 1942 insn = cmp_vec_insn[cond][vece]; 1943 if (insn == 0) { 1944 TCGArg t; 1945 t = a1, a1 = a2, a2 = t; 1946 cond = tcg_swap_cond(cond); 1947 insn = cmp_vec_insn[cond][vece]; 1948 tcg_debug_assert(insn != 0); 1949 } 1950 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1951 } 1952 break; 1953 case INDEX_op_add_vec: 1954 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true); 1955 break; 1956 case INDEX_op_sub_vec: 1957 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false); 1958 break; 1959 case INDEX_op_neg_vec: 1960 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1)); 1961 break; 1962 case INDEX_op_mul_vec: 1963 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2)); 1964 break; 1965 case INDEX_op_smin_vec: 1966 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2)); 1967 break; 1968 case INDEX_op_smax_vec: 1969 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2)); 1970 break; 1971 case INDEX_op_umin_vec: 1972 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2)); 1973 break; 1974 case INDEX_op_umax_vec: 1975 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2)); 1976 break; 1977 case INDEX_op_ssadd_vec: 1978 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2)); 1979 break; 1980 case INDEX_op_usadd_vec: 1981 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2)); 1982 break; 1983 case INDEX_op_sssub_vec: 1984 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2)); 1985 break; 1986 case INDEX_op_ussub_vec: 1987 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2)); 1988 break; 1989 case INDEX_op_shlv_vec: 1990 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2)); 1991 break; 1992 case INDEX_op_shrv_vec: 1993 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2)); 1994 break; 1995 case INDEX_op_sarv_vec: 1996 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2)); 1997 break; 1998 case INDEX_op_shli_vec: 1999 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2)); 2000 break; 2001 case INDEX_op_shri_vec: 2002 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2)); 2003 break; 2004 case INDEX_op_sari_vec: 2005 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2)); 2006 break; 2007 case INDEX_op_rotrv_vec: 2008 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2)); 2009 break; 2010 case INDEX_op_rotlv_vec: 2011 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 2012 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2)); 2013 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, 2014 temp_vec)); 2015 break; 2016 case INDEX_op_rotli_vec: 2017 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 2018 a2 = extract32(-a2, 0, 3 + vece); 2019 switch (vece) { 2020 case MO_8: 2021 tcg_out_opc_vrotri_b(s, a0, a1, a2); 2022 break; 2023 case MO_16: 2024 tcg_out_opc_vrotri_h(s, a0, a1, a2); 2025 break; 2026 case MO_32: 2027 tcg_out_opc_vrotri_w(s, a0, a1, a2); 2028 break; 2029 case MO_64: 2030 tcg_out_opc_vrotri_d(s, a0, a1, a2); 2031 break; 2032 default: 2033 g_assert_not_reached(); 2034 } 2035 break; 2036 case INDEX_op_bitsel_vec: 2037 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 2038 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 2039 break; 2040 case INDEX_op_dupm_vec: 2041 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2042 break; 2043 default: 2044 g_assert_not_reached(); 2045 } 2046} 2047 2048int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2049{ 2050 switch (opc) { 2051 case INDEX_op_ld_vec: 2052 case INDEX_op_st_vec: 2053 case INDEX_op_dup_vec: 2054 case INDEX_op_dupm_vec: 2055 case INDEX_op_cmp_vec: 2056 case INDEX_op_add_vec: 2057 case INDEX_op_sub_vec: 2058 case INDEX_op_and_vec: 2059 case INDEX_op_andc_vec: 2060 case INDEX_op_or_vec: 2061 case INDEX_op_orc_vec: 2062 case INDEX_op_xor_vec: 2063 case INDEX_op_nor_vec: 2064 case INDEX_op_not_vec: 2065 case INDEX_op_neg_vec: 2066 case INDEX_op_mul_vec: 2067 case INDEX_op_smin_vec: 2068 case INDEX_op_smax_vec: 2069 case INDEX_op_umin_vec: 2070 case INDEX_op_umax_vec: 2071 case INDEX_op_ssadd_vec: 2072 case INDEX_op_usadd_vec: 2073 case INDEX_op_sssub_vec: 2074 case INDEX_op_ussub_vec: 2075 case INDEX_op_shlv_vec: 2076 case INDEX_op_shrv_vec: 2077 case INDEX_op_sarv_vec: 2078 case INDEX_op_bitsel_vec: 2079 return 1; 2080 default: 2081 return 0; 2082 } 2083} 2084 2085void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2086 TCGArg a0, ...) 2087{ 2088 g_assert_not_reached(); 2089} 2090 2091static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2092{ 2093 switch (op) { 2094 case INDEX_op_goto_ptr: 2095 return C_O0_I1(r); 2096 2097 case INDEX_op_st8_i32: 2098 case INDEX_op_st8_i64: 2099 case INDEX_op_st16_i32: 2100 case INDEX_op_st16_i64: 2101 case INDEX_op_st32_i64: 2102 case INDEX_op_st_i32: 2103 case INDEX_op_st_i64: 2104 case INDEX_op_qemu_st_a32_i32: 2105 case INDEX_op_qemu_st_a64_i32: 2106 case INDEX_op_qemu_st_a32_i64: 2107 case INDEX_op_qemu_st_a64_i64: 2108 return C_O0_I2(rZ, r); 2109 2110 case INDEX_op_qemu_ld_a32_i128: 2111 case INDEX_op_qemu_ld_a64_i128: 2112 return C_N2_I1(r, r, r); 2113 2114 case INDEX_op_qemu_st_a32_i128: 2115 case INDEX_op_qemu_st_a64_i128: 2116 return C_O0_I3(r, r, r); 2117 2118 case INDEX_op_brcond_i32: 2119 case INDEX_op_brcond_i64: 2120 return C_O0_I2(rZ, rZ); 2121 2122 case INDEX_op_ext8s_i32: 2123 case INDEX_op_ext8s_i64: 2124 case INDEX_op_ext8u_i32: 2125 case INDEX_op_ext8u_i64: 2126 case INDEX_op_ext16s_i32: 2127 case INDEX_op_ext16s_i64: 2128 case INDEX_op_ext16u_i32: 2129 case INDEX_op_ext16u_i64: 2130 case INDEX_op_ext32s_i64: 2131 case INDEX_op_ext32u_i64: 2132 case INDEX_op_extu_i32_i64: 2133 case INDEX_op_extrl_i64_i32: 2134 case INDEX_op_extrh_i64_i32: 2135 case INDEX_op_ext_i32_i64: 2136 case INDEX_op_neg_i32: 2137 case INDEX_op_neg_i64: 2138 case INDEX_op_not_i32: 2139 case INDEX_op_not_i64: 2140 case INDEX_op_extract_i32: 2141 case INDEX_op_extract_i64: 2142 case INDEX_op_bswap16_i32: 2143 case INDEX_op_bswap16_i64: 2144 case INDEX_op_bswap32_i32: 2145 case INDEX_op_bswap32_i64: 2146 case INDEX_op_bswap64_i64: 2147 case INDEX_op_ld8s_i32: 2148 case INDEX_op_ld8s_i64: 2149 case INDEX_op_ld8u_i32: 2150 case INDEX_op_ld8u_i64: 2151 case INDEX_op_ld16s_i32: 2152 case INDEX_op_ld16s_i64: 2153 case INDEX_op_ld16u_i32: 2154 case INDEX_op_ld16u_i64: 2155 case INDEX_op_ld32s_i64: 2156 case INDEX_op_ld32u_i64: 2157 case INDEX_op_ld_i32: 2158 case INDEX_op_ld_i64: 2159 case INDEX_op_qemu_ld_a32_i32: 2160 case INDEX_op_qemu_ld_a64_i32: 2161 case INDEX_op_qemu_ld_a32_i64: 2162 case INDEX_op_qemu_ld_a64_i64: 2163 return C_O1_I1(r, r); 2164 2165 case INDEX_op_andc_i32: 2166 case INDEX_op_andc_i64: 2167 case INDEX_op_orc_i32: 2168 case INDEX_op_orc_i64: 2169 /* 2170 * LoongArch insns for these ops don't have reg-imm forms, but we 2171 * can express using andi/ori if ~constant satisfies 2172 * TCG_CT_CONST_U12. 2173 */ 2174 return C_O1_I2(r, r, rC); 2175 2176 case INDEX_op_shl_i32: 2177 case INDEX_op_shl_i64: 2178 case INDEX_op_shr_i32: 2179 case INDEX_op_shr_i64: 2180 case INDEX_op_sar_i32: 2181 case INDEX_op_sar_i64: 2182 case INDEX_op_rotl_i32: 2183 case INDEX_op_rotl_i64: 2184 case INDEX_op_rotr_i32: 2185 case INDEX_op_rotr_i64: 2186 return C_O1_I2(r, r, ri); 2187 2188 case INDEX_op_add_i32: 2189 return C_O1_I2(r, r, ri); 2190 case INDEX_op_add_i64: 2191 return C_O1_I2(r, r, rJ); 2192 2193 case INDEX_op_and_i32: 2194 case INDEX_op_and_i64: 2195 case INDEX_op_nor_i32: 2196 case INDEX_op_nor_i64: 2197 case INDEX_op_or_i32: 2198 case INDEX_op_or_i64: 2199 case INDEX_op_xor_i32: 2200 case INDEX_op_xor_i64: 2201 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2202 return C_O1_I2(r, r, rU); 2203 2204 case INDEX_op_clz_i32: 2205 case INDEX_op_clz_i64: 2206 case INDEX_op_ctz_i32: 2207 case INDEX_op_ctz_i64: 2208 return C_O1_I2(r, r, rW); 2209 2210 case INDEX_op_deposit_i32: 2211 case INDEX_op_deposit_i64: 2212 /* Must deposit into the same register as input */ 2213 return C_O1_I2(r, 0, rZ); 2214 2215 case INDEX_op_sub_i32: 2216 case INDEX_op_setcond_i32: 2217 return C_O1_I2(r, rZ, ri); 2218 case INDEX_op_sub_i64: 2219 case INDEX_op_setcond_i64: 2220 return C_O1_I2(r, rZ, rJ); 2221 2222 case INDEX_op_mul_i32: 2223 case INDEX_op_mul_i64: 2224 case INDEX_op_mulsh_i32: 2225 case INDEX_op_mulsh_i64: 2226 case INDEX_op_muluh_i32: 2227 case INDEX_op_muluh_i64: 2228 case INDEX_op_div_i32: 2229 case INDEX_op_div_i64: 2230 case INDEX_op_divu_i32: 2231 case INDEX_op_divu_i64: 2232 case INDEX_op_rem_i32: 2233 case INDEX_op_rem_i64: 2234 case INDEX_op_remu_i32: 2235 case INDEX_op_remu_i64: 2236 return C_O1_I2(r, rZ, rZ); 2237 2238 case INDEX_op_movcond_i32: 2239 case INDEX_op_movcond_i64: 2240 return C_O1_I4(r, rZ, rJ, rZ, rZ); 2241 2242 case INDEX_op_ld_vec: 2243 case INDEX_op_dupm_vec: 2244 case INDEX_op_dup_vec: 2245 return C_O1_I1(w, r); 2246 2247 case INDEX_op_st_vec: 2248 return C_O0_I2(w, r); 2249 2250 case INDEX_op_cmp_vec: 2251 return C_O1_I2(w, w, wM); 2252 2253 case INDEX_op_add_vec: 2254 case INDEX_op_sub_vec: 2255 return C_O1_I2(w, w, wA); 2256 2257 case INDEX_op_and_vec: 2258 case INDEX_op_andc_vec: 2259 case INDEX_op_or_vec: 2260 case INDEX_op_orc_vec: 2261 case INDEX_op_xor_vec: 2262 case INDEX_op_nor_vec: 2263 case INDEX_op_mul_vec: 2264 case INDEX_op_smin_vec: 2265 case INDEX_op_smax_vec: 2266 case INDEX_op_umin_vec: 2267 case INDEX_op_umax_vec: 2268 case INDEX_op_ssadd_vec: 2269 case INDEX_op_usadd_vec: 2270 case INDEX_op_sssub_vec: 2271 case INDEX_op_ussub_vec: 2272 case INDEX_op_shlv_vec: 2273 case INDEX_op_shrv_vec: 2274 case INDEX_op_sarv_vec: 2275 case INDEX_op_rotrv_vec: 2276 case INDEX_op_rotlv_vec: 2277 return C_O1_I2(w, w, w); 2278 2279 case INDEX_op_not_vec: 2280 case INDEX_op_neg_vec: 2281 case INDEX_op_shli_vec: 2282 case INDEX_op_shri_vec: 2283 case INDEX_op_sari_vec: 2284 case INDEX_op_rotli_vec: 2285 return C_O1_I1(w, w); 2286 2287 case INDEX_op_bitsel_vec: 2288 return C_O1_I3(w, w, w, w); 2289 2290 default: 2291 g_assert_not_reached(); 2292 } 2293} 2294 2295static const int tcg_target_callee_save_regs[] = { 2296 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2297 TCG_REG_S1, 2298 TCG_REG_S2, 2299 TCG_REG_S3, 2300 TCG_REG_S4, 2301 TCG_REG_S5, 2302 TCG_REG_S6, 2303 TCG_REG_S7, 2304 TCG_REG_S8, 2305 TCG_REG_S9, 2306 TCG_REG_RA, /* should be last for ABI compliance */ 2307}; 2308 2309/* Stack frame parameters. */ 2310#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2311#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2312#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2313#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2314 + TCG_TARGET_STACK_ALIGN - 1) \ 2315 & -TCG_TARGET_STACK_ALIGN) 2316#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2317 2318/* We're expecting to be able to use an immediate for frame allocation. */ 2319QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2320 2321/* Generate global QEMU prologue and epilogue code */ 2322static void tcg_target_qemu_prologue(TCGContext *s) 2323{ 2324 int i; 2325 2326 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2327 2328 /* TB prologue */ 2329 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2330 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2331 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2332 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2333 } 2334 2335 if (!tcg_use_softmmu && guest_base) { 2336 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2337 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2338 } 2339 2340 /* Call generated code */ 2341 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2342 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2343 2344 /* Return path for goto_ptr. Set return value to 0 */ 2345 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2346 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2347 2348 /* TB epilogue */ 2349 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2350 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2351 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2352 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2353 } 2354 2355 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2356 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2357} 2358 2359static void tcg_out_tb_start(TCGContext *s) 2360{ 2361 /* nothing to do */ 2362} 2363 2364static void tcg_target_init(TCGContext *s) 2365{ 2366 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2367 2368 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2369 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2370 error_report("TCG: unaligned access support required; exiting"); 2371 exit(EXIT_FAILURE); 2372 } 2373 2374 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2375 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2376 2377 tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS; 2378 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2379 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2380 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2381 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2382 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2383 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2384 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2385 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2386 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2387 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2388 2389 if (cpuinfo & CPUINFO_LSX) { 2390 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; 2391 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2392 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2393 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2394 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2395 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2396 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2397 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2398 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2399 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2400 } 2401 2402 s->reserved_regs = 0; 2403 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2404 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2405 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2406 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2407 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2408 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2409 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2410 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2411} 2412 2413typedef struct { 2414 DebugFrameHeader h; 2415 uint8_t fde_def_cfa[4]; 2416 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2417} DebugFrame; 2418 2419#define ELF_HOST_MACHINE EM_LOONGARCH 2420 2421static const DebugFrame debug_frame = { 2422 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2423 .h.cie.id = -1, 2424 .h.cie.version = 1, 2425 .h.cie.code_align = 1, 2426 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2427 .h.cie.return_column = TCG_REG_RA, 2428 2429 /* Total FDE size does not include the "len" member. */ 2430 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2431 2432 .fde_def_cfa = { 2433 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2434 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2435 (FRAME_SIZE >> 7) 2436 }, 2437 .fde_reg_ofs = { 2438 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2439 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2440 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2441 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2442 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2443 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2444 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2445 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2446 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2447 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2448 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2449 } 2450}; 2451 2452void tcg_register_jit(const void *buf, size_t buf_size) 2453{ 2454 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2455} 2456