1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33#include <asm/hwcap.h> 34 35#ifdef CONFIG_DEBUG_TCG 36static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 37 "zero", 38 "ra", 39 "tp", 40 "sp", 41 "a0", 42 "a1", 43 "a2", 44 "a3", 45 "a4", 46 "a5", 47 "a6", 48 "a7", 49 "t0", 50 "t1", 51 "t2", 52 "t3", 53 "t4", 54 "t5", 55 "t6", 56 "t7", 57 "t8", 58 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 59 "s9", 60 "s0", 61 "s1", 62 "s2", 63 "s3", 64 "s4", 65 "s5", 66 "s6", 67 "s7", 68 "s8", 69 "vr0", 70 "vr1", 71 "vr2", 72 "vr3", 73 "vr4", 74 "vr5", 75 "vr6", 76 "vr7", 77 "vr8", 78 "vr9", 79 "vr10", 80 "vr11", 81 "vr12", 82 "vr13", 83 "vr14", 84 "vr15", 85 "vr16", 86 "vr17", 87 "vr18", 88 "vr19", 89 "vr20", 90 "vr21", 91 "vr22", 92 "vr23", 93 "vr24", 94 "vr25", 95 "vr26", 96 "vr27", 97 "vr28", 98 "vr29", 99 "vr30", 100 "vr31", 101}; 102#endif 103 104static const int tcg_target_reg_alloc_order[] = { 105 /* Registers preserved across calls */ 106 /* TCG_REG_S0 reserved for TCG_AREG0 */ 107 TCG_REG_S1, 108 TCG_REG_S2, 109 TCG_REG_S3, 110 TCG_REG_S4, 111 TCG_REG_S5, 112 TCG_REG_S6, 113 TCG_REG_S7, 114 TCG_REG_S8, 115 TCG_REG_S9, 116 117 /* Registers (potentially) clobbered across calls */ 118 TCG_REG_T0, 119 TCG_REG_T1, 120 TCG_REG_T2, 121 TCG_REG_T3, 122 TCG_REG_T4, 123 TCG_REG_T5, 124 TCG_REG_T6, 125 TCG_REG_T7, 126 TCG_REG_T8, 127 128 /* Argument registers, opposite order of allocation. */ 129 TCG_REG_A7, 130 TCG_REG_A6, 131 TCG_REG_A5, 132 TCG_REG_A4, 133 TCG_REG_A3, 134 TCG_REG_A2, 135 TCG_REG_A1, 136 TCG_REG_A0, 137 138 /* Vector registers */ 139 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 140 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 141 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 142 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 143 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 144 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 145 /* V24 - V31 are caller-saved, and skipped. */ 146}; 147 148static const int tcg_target_call_iarg_regs[] = { 149 TCG_REG_A0, 150 TCG_REG_A1, 151 TCG_REG_A2, 152 TCG_REG_A3, 153 TCG_REG_A4, 154 TCG_REG_A5, 155 TCG_REG_A6, 156 TCG_REG_A7, 157}; 158 159static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 160{ 161 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 162 tcg_debug_assert(slot >= 0 && slot <= 1); 163 return TCG_REG_A0 + slot; 164} 165 166#define TCG_GUEST_BASE_REG TCG_REG_S1 167 168#define TCG_CT_CONST_ZERO 0x100 169#define TCG_CT_CONST_S12 0x200 170#define TCG_CT_CONST_S32 0x400 171#define TCG_CT_CONST_U12 0x800 172#define TCG_CT_CONST_C12 0x1000 173#define TCG_CT_CONST_WSZ 0x2000 174#define TCG_CT_CONST_VCMP 0x4000 175#define TCG_CT_CONST_VADD 0x8000 176 177#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 178#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 179 180static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 181{ 182 return sextract64(val, pos, len); 183} 184 185/* test if a constant matches the constraint */ 186static bool tcg_target_const_match(int64_t val, int ct, 187 TCGType type, TCGCond cond, int vece) 188{ 189 if (ct & TCG_CT_CONST) { 190 return true; 191 } 192 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 193 return true; 194 } 195 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 196 return true; 197 } 198 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 199 return true; 200 } 201 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 202 return true; 203 } 204 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 205 return true; 206 } 207 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 208 return true; 209 } 210 int64_t vec_val = sextract64(val, 0, 8 << vece); 211 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { 212 return true; 213 } 214 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { 215 return true; 216 } 217 return false; 218} 219 220/* 221 * Relocations 222 */ 223 224/* 225 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 226 * complicated; a whopping stack machine is needed to stuff the fields, at 227 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 228 * needed. 229 * 230 * Hence, define our own simpler relocation types. Numbers are chosen as to 231 * not collide with potential future additions to the true ELF relocation 232 * type enum. 233 */ 234 235/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 236#define R_LOONGARCH_BR_SK16 256 237/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 238#define R_LOONGARCH_BR_SD10K16 257 239 240static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 241{ 242 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 243 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 244 245 tcg_debug_assert((offset & 3) == 0); 246 offset >>= 2; 247 if (offset == sextreg(offset, 0, 16)) { 248 *src_rw = deposit64(*src_rw, 10, 16, offset); 249 return true; 250 } 251 252 return false; 253} 254 255static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 256 const tcg_insn_unit *target) 257{ 258 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 259 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 260 261 tcg_debug_assert((offset & 3) == 0); 262 offset >>= 2; 263 if (offset == sextreg(offset, 0, 26)) { 264 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 265 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 266 return true; 267 } 268 269 return false; 270} 271 272static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 273 intptr_t value, intptr_t addend) 274{ 275 tcg_debug_assert(addend == 0); 276 switch (type) { 277 case R_LOONGARCH_BR_SK16: 278 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 279 case R_LOONGARCH_BR_SD10K16: 280 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 281 default: 282 g_assert_not_reached(); 283 } 284} 285 286#include "tcg-insn-defs.c.inc" 287 288/* 289 * TCG intrinsics 290 */ 291 292static void tcg_out_mb(TCGContext *s, TCGArg a0) 293{ 294 /* Baseline LoongArch only has the full barrier, unfortunately. */ 295 tcg_out_opc_dbar(s, 0); 296} 297 298static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 299{ 300 if (ret == arg) { 301 return true; 302 } 303 switch (type) { 304 case TCG_TYPE_I32: 305 case TCG_TYPE_I64: 306 /* 307 * Conventional register-register move used in LoongArch is 308 * `or dst, src, zero`. 309 */ 310 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 311 break; 312 case TCG_TYPE_V128: 313 tcg_out_opc_vori_b(s, ret, arg, 0); 314 break; 315 default: 316 g_assert_not_reached(); 317 } 318 return true; 319} 320 321/* Loads a 32-bit immediate into rd, sign-extended. */ 322static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 323{ 324 tcg_target_long lo = sextreg(val, 0, 12); 325 tcg_target_long hi12 = sextreg(val, 12, 20); 326 327 /* Single-instruction cases. */ 328 if (hi12 == 0) { 329 /* val fits in uimm12: ori rd, zero, val */ 330 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 331 return; 332 } 333 if (hi12 == sextreg(lo, 12, 20)) { 334 /* val fits in simm12: addi.w rd, zero, val */ 335 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 336 return; 337 } 338 339 /* High bits must be set; load with lu12i.w + optional ori. */ 340 tcg_out_opc_lu12i_w(s, rd, hi12); 341 if (lo != 0) { 342 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 343 } 344} 345 346static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 347 tcg_target_long val) 348{ 349 /* 350 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 351 * with dedicated instructions for filling the respective bitfields 352 * below: 353 * 354 * 6 5 4 3 355 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 356 * +-----------------------+---------------------------------------+... 357 * | hi52 | hi32 | 358 * +-----------------------+---------------------------------------+... 359 * 3 2 1 360 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 361 * ...+-------------------------------------+-------------------------+ 362 * | hi12 | lo | 363 * ...+-------------------------------------+-------------------------+ 364 * 365 * Check if val belong to one of the several fast cases, before falling 366 * back to the slow path. 367 */ 368 369 intptr_t pc_offset; 370 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 371 tcg_target_long hi12, hi32, hi52; 372 373 /* Value fits in signed i32. */ 374 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 375 tcg_out_movi_i32(s, rd, val); 376 return; 377 } 378 379 /* PC-relative cases. */ 380 pc_offset = tcg_pcrel_diff(s, (void *)val); 381 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 382 /* Single pcaddu2i. */ 383 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 384 return; 385 } 386 387 if (pc_offset == (int32_t)pc_offset) { 388 /* Offset within 32 bits; load with pcalau12i + ori. */ 389 val_lo = sextreg(val, 0, 12); 390 val_hi = val >> 12; 391 pc_hi = (val - pc_offset) >> 12; 392 offset_hi = val_hi - pc_hi; 393 394 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 395 tcg_out_opc_pcalau12i(s, rd, offset_hi); 396 if (val_lo != 0) { 397 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 398 } 399 return; 400 } 401 402 hi12 = sextreg(val, 12, 20); 403 hi32 = sextreg(val, 32, 20); 404 hi52 = sextreg(val, 52, 12); 405 406 /* Single cu52i.d case. */ 407 if ((hi52 != 0) && (ctz64(val) >= 52)) { 408 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 409 return; 410 } 411 412 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 413 tcg_out_movi_i32(s, rd, val); 414 415 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 416 if (hi32 != sextreg(hi12, 20, 20)) { 417 tcg_out_opc_cu32i_d(s, rd, hi32); 418 } 419 420 if (hi52 != sextreg(hi32, 20, 12)) { 421 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 422 } 423} 424 425static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 426 TCGReg rs, tcg_target_long imm) 427{ 428 tcg_target_long lo12 = sextreg(imm, 0, 12); 429 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 430 431 /* 432 * Note that there's a hole in between hi16 and lo12: 433 * 434 * 3 2 1 0 435 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 436 * ...+-------------------------------+-------+-----------------------+ 437 * | hi16 | | lo12 | 438 * ...+-------------------------------+-------+-----------------------+ 439 * 440 * For bits within that hole, it's more efficient to use LU12I and ADD. 441 */ 442 if (imm == (hi16 << 16) + lo12) { 443 if (hi16) { 444 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 445 rs = rd; 446 } 447 if (type == TCG_TYPE_I32) { 448 tcg_out_opc_addi_w(s, rd, rs, lo12); 449 } else if (lo12) { 450 tcg_out_opc_addi_d(s, rd, rs, lo12); 451 } else { 452 tcg_out_mov(s, type, rd, rs); 453 } 454 } else { 455 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 456 if (type == TCG_TYPE_I32) { 457 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 458 } else { 459 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 460 } 461 } 462} 463 464static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 465{ 466 return false; 467} 468 469static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 470 tcg_target_long imm) 471{ 472 /* This function is only used for passing structs by reference. */ 473 g_assert_not_reached(); 474} 475 476static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 477{ 478 tcg_out_opc_andi(s, ret, arg, 0xff); 479} 480 481static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 482{ 483 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 484} 485 486static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 487{ 488 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 489} 490 491static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 492{ 493 tcg_out_opc_sext_b(s, ret, arg); 494} 495 496static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 497{ 498 tcg_out_opc_sext_h(s, ret, arg); 499} 500 501static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 502{ 503 tcg_out_opc_addi_w(s, ret, arg, 0); 504} 505 506static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 507{ 508 if (ret != arg) { 509 tcg_out_ext32s(s, ret, arg); 510 } 511} 512 513static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 514{ 515 tcg_out_ext32u(s, ret, arg); 516} 517 518static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 519{ 520 tcg_out_ext32s(s, ret, arg); 521} 522 523static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 524 TCGReg a0, TCGReg a1, TCGReg a2, 525 bool c2, bool is_32bit) 526{ 527 if (c2) { 528 /* 529 * Fast path: semantics already satisfied due to constraint and 530 * insn behavior, single instruction is enough. 531 */ 532 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 533 /* all clz/ctz insns belong to DJ-format */ 534 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 535 return; 536 } 537 538 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 539 /* a0 = a1 ? REG_TMP0 : a2 */ 540 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 541 tcg_out_opc_masknez(s, a0, a2, a1); 542 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 543} 544 545#define SETCOND_INV TCG_TARGET_NB_REGS 546#define SETCOND_NEZ (SETCOND_INV << 1) 547#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 548 549static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 550 TCGReg arg1, tcg_target_long arg2, bool c2) 551{ 552 int flags = 0; 553 554 switch (cond) { 555 case TCG_COND_EQ: /* -> NE */ 556 case TCG_COND_GE: /* -> LT */ 557 case TCG_COND_GEU: /* -> LTU */ 558 case TCG_COND_GT: /* -> LE */ 559 case TCG_COND_GTU: /* -> LEU */ 560 cond = tcg_invert_cond(cond); 561 flags ^= SETCOND_INV; 562 break; 563 default: 564 break; 565 } 566 567 switch (cond) { 568 case TCG_COND_LE: 569 case TCG_COND_LEU: 570 /* 571 * If we have a constant input, the most efficient way to implement 572 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 573 * We don't need to care for this for LE because the constant input 574 * is still constrained to int32_t, and INT32_MAX+1 is representable 575 * in the 64-bit temporary register. 576 */ 577 if (c2) { 578 if (cond == TCG_COND_LEU) { 579 /* unsigned <= -1 is true */ 580 if (arg2 == -1) { 581 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 582 return ret; 583 } 584 cond = TCG_COND_LTU; 585 } else { 586 cond = TCG_COND_LT; 587 } 588 arg2 += 1; 589 } else { 590 TCGReg tmp = arg2; 591 arg2 = arg1; 592 arg1 = tmp; 593 cond = tcg_swap_cond(cond); /* LE -> GE */ 594 cond = tcg_invert_cond(cond); /* GE -> LT */ 595 flags ^= SETCOND_INV; 596 } 597 break; 598 default: 599 break; 600 } 601 602 switch (cond) { 603 case TCG_COND_NE: 604 flags |= SETCOND_NEZ; 605 if (!c2) { 606 tcg_out_opc_xor(s, ret, arg1, arg2); 607 } else if (arg2 == 0) { 608 ret = arg1; 609 } else if (arg2 >= 0 && arg2 <= 0xfff) { 610 tcg_out_opc_xori(s, ret, arg1, arg2); 611 } else { 612 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 613 } 614 break; 615 616 case TCG_COND_LT: 617 case TCG_COND_LTU: 618 if (c2) { 619 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 620 if (cond == TCG_COND_LT) { 621 tcg_out_opc_slti(s, ret, arg1, arg2); 622 } else { 623 tcg_out_opc_sltui(s, ret, arg1, arg2); 624 } 625 break; 626 } 627 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 628 arg2 = TCG_REG_TMP0; 629 } 630 if (cond == TCG_COND_LT) { 631 tcg_out_opc_slt(s, ret, arg1, arg2); 632 } else { 633 tcg_out_opc_sltu(s, ret, arg1, arg2); 634 } 635 break; 636 637 default: 638 g_assert_not_reached(); 639 break; 640 } 641 642 return ret | flags; 643} 644 645static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 646 TCGReg arg1, tcg_target_long arg2, bool c2) 647{ 648 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 649 650 if (tmpflags != ret) { 651 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 652 653 switch (tmpflags & SETCOND_FLAGS) { 654 case SETCOND_INV: 655 /* Intermediate result is boolean: simply invert. */ 656 tcg_out_opc_xori(s, ret, tmp, 1); 657 break; 658 case SETCOND_NEZ: 659 /* Intermediate result is zero/non-zero: test != 0. */ 660 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 661 break; 662 case SETCOND_NEZ | SETCOND_INV: 663 /* Intermediate result is zero/non-zero: test == 0. */ 664 tcg_out_opc_sltui(s, ret, tmp, 1); 665 break; 666 default: 667 g_assert_not_reached(); 668 } 669 } 670} 671 672static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 673 TCGReg c1, tcg_target_long c2, bool const2, 674 TCGReg v1, TCGReg v2) 675{ 676 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 677 TCGReg t; 678 679 /* Standardize the test below to t != 0. */ 680 if (tmpflags & SETCOND_INV) { 681 t = v1, v1 = v2, v2 = t; 682 } 683 684 t = tmpflags & ~SETCOND_FLAGS; 685 if (v1 == TCG_REG_ZERO) { 686 tcg_out_opc_masknez(s, ret, v2, t); 687 } else if (v2 == TCG_REG_ZERO) { 688 tcg_out_opc_maskeqz(s, ret, v1, t); 689 } else { 690 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 691 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 692 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 693 } 694} 695 696/* 697 * Branch helpers 698 */ 699 700static const struct { 701 LoongArchInsn op; 702 bool swap; 703} tcg_brcond_to_loongarch[] = { 704 [TCG_COND_EQ] = { OPC_BEQ, false }, 705 [TCG_COND_NE] = { OPC_BNE, false }, 706 [TCG_COND_LT] = { OPC_BGT, true }, 707 [TCG_COND_GE] = { OPC_BLE, true }, 708 [TCG_COND_LE] = { OPC_BLE, false }, 709 [TCG_COND_GT] = { OPC_BGT, false }, 710 [TCG_COND_LTU] = { OPC_BGTU, true }, 711 [TCG_COND_GEU] = { OPC_BLEU, true }, 712 [TCG_COND_LEU] = { OPC_BLEU, false }, 713 [TCG_COND_GTU] = { OPC_BGTU, false } 714}; 715 716static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 717 TCGReg arg2, TCGLabel *l) 718{ 719 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 720 721 tcg_debug_assert(op != 0); 722 723 if (tcg_brcond_to_loongarch[cond].swap) { 724 TCGReg t = arg1; 725 arg1 = arg2; 726 arg2 = t; 727 } 728 729 /* all conditional branch insns belong to DJSk16-format */ 730 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 731 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 732} 733 734static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 735{ 736 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 737 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 738 739 tcg_debug_assert((offset & 3) == 0); 740 if (offset == sextreg(offset, 0, 28)) { 741 /* short jump: +/- 256MiB */ 742 if (tail) { 743 tcg_out_opc_b(s, offset >> 2); 744 } else { 745 tcg_out_opc_bl(s, offset >> 2); 746 } 747 } else if (offset == sextreg(offset, 0, 38)) { 748 /* long jump: +/- 256GiB */ 749 tcg_target_long lo = sextreg(offset, 0, 18); 750 tcg_target_long hi = offset - lo; 751 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 752 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 753 } else { 754 /* far jump: 64-bit */ 755 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 756 tcg_target_long hi = (tcg_target_long)arg - lo; 757 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 758 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 759 } 760} 761 762static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 763 const TCGHelperInfo *info) 764{ 765 tcg_out_call_int(s, arg, false); 766} 767 768/* 769 * Load/store helpers 770 */ 771 772static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 773 TCGReg addr, intptr_t offset) 774{ 775 intptr_t imm12 = sextreg(offset, 0, 12); 776 777 if (offset != imm12) { 778 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 779 780 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 781 imm12 = sextreg(diff, 0, 12); 782 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 783 } else { 784 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 785 if (addr != TCG_REG_ZERO) { 786 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 787 } 788 } 789 addr = TCG_REG_TMP2; 790 } 791 792 switch (opc) { 793 case OPC_LD_B: 794 case OPC_LD_BU: 795 case OPC_LD_H: 796 case OPC_LD_HU: 797 case OPC_LD_W: 798 case OPC_LD_WU: 799 case OPC_LD_D: 800 case OPC_ST_B: 801 case OPC_ST_H: 802 case OPC_ST_W: 803 case OPC_ST_D: 804 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 805 break; 806 default: 807 g_assert_not_reached(); 808 } 809} 810 811static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest, 812 TCGReg base, intptr_t offset) 813{ 814 switch (type) { 815 case TCG_TYPE_I32: 816 if (dest < TCG_REG_V0) { 817 tcg_out_ldst(s, OPC_LD_W, dest, base, offset); 818 } else { 819 tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_32, dest, base, offset); 820 } 821 break; 822 case TCG_TYPE_I64: 823 if (dest < TCG_REG_V0) { 824 tcg_out_ldst(s, OPC_LD_D, dest, base, offset); 825 } else { 826 tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_64, dest, base, offset); 827 } 828 break; 829 case TCG_TYPE_V128: 830 if (-0x800 <= offset && offset <= 0x7ff) { 831 tcg_out_opc_vld(s, dest, base, offset); 832 } else { 833 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 834 tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0); 835 } 836 break; 837 default: 838 g_assert_not_reached(); 839 } 840} 841 842static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, 843 TCGReg base, intptr_t offset) 844{ 845 switch (type) { 846 case TCG_TYPE_I32: 847 if (src < TCG_REG_V0) { 848 tcg_out_ldst(s, OPC_ST_W, src, base, offset); 849 } else { 850 /* TODO: Could use fst_s, fstx_s */ 851 if (offset < -0x100 || offset > 0xff || (offset & 3)) { 852 if (-0x800 <= offset && offset <= 0x7ff) { 853 tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset); 854 } else { 855 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 856 tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base); 857 } 858 base = TCG_REG_TMP0; 859 offset = 0; 860 } 861 tcg_out_opc_vstelm_w(s, src, base, offset, 0); 862 } 863 break; 864 case TCG_TYPE_I64: 865 if (src < TCG_REG_V0) { 866 tcg_out_ldst(s, OPC_ST_D, src, base, offset); 867 } else { 868 /* TODO: Could use fst_d, fstx_d */ 869 if (offset < -0x100 || offset > 0xff || (offset & 7)) { 870 if (-0x800 <= offset && offset <= 0x7ff) { 871 tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset); 872 } else { 873 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 874 tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base); 875 } 876 base = TCG_REG_TMP0; 877 offset = 0; 878 } 879 tcg_out_opc_vstelm_d(s, src, base, offset, 0); 880 } 881 break; 882 case TCG_TYPE_V128: 883 if (-0x800 <= offset && offset <= 0x7ff) { 884 tcg_out_opc_vst(s, src, base, offset); 885 } else { 886 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); 887 tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0); 888 } 889 break; 890 default: 891 g_assert_not_reached(); 892 } 893} 894 895static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 896 TCGReg base, intptr_t ofs) 897{ 898 if (val == 0) { 899 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 900 return true; 901 } 902 return false; 903} 904 905/* 906 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 907 */ 908 909static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 910{ 911 tcg_out_opc_b(s, 0); 912 return reloc_br_sd10k16(s->code_ptr - 1, target); 913} 914 915static const TCGLdstHelperParam ldst_helper_param = { 916 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 917}; 918 919static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 920{ 921 MemOp opc = get_memop(l->oi); 922 923 /* resolve label address */ 924 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 925 return false; 926 } 927 928 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 929 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 930 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 931 return tcg_out_goto(s, l->raddr); 932} 933 934static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 935{ 936 MemOp opc = get_memop(l->oi); 937 938 /* resolve label address */ 939 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 940 return false; 941 } 942 943 tcg_out_st_helper_args(s, l, &ldst_helper_param); 944 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 945 return tcg_out_goto(s, l->raddr); 946} 947 948typedef struct { 949 TCGReg base; 950 TCGReg index; 951 TCGAtomAlign aa; 952} HostAddress; 953 954bool tcg_target_has_memory_bswap(MemOp memop) 955{ 956 return false; 957} 958 959/* We expect to use a 12-bit negative offset from ENV. */ 960#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 961 962/* 963 * For system-mode, perform the TLB load and compare. 964 * For user-mode, perform any required alignment tests. 965 * In both cases, return a TCGLabelQemuLdst structure if the slow path 966 * is required and fill in @h with the host address for the fast path. 967 */ 968static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 969 TCGReg addr_reg, MemOpIdx oi, 970 bool is_ld) 971{ 972 TCGType addr_type = s->addr_type; 973 TCGLabelQemuLdst *ldst = NULL; 974 MemOp opc = get_memop(oi); 975 MemOp a_bits; 976 977 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 978 a_bits = h->aa.align; 979 980 if (tcg_use_softmmu) { 981 unsigned s_bits = opc & MO_SIZE; 982 int mem_index = get_mmuidx(oi); 983 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 984 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 985 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 986 987 ldst = new_ldst_label(s); 988 ldst->is_ld = is_ld; 989 ldst->oi = oi; 990 ldst->addrlo_reg = addr_reg; 991 992 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 993 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 994 995 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 996 s->page_bits - CPU_TLB_ENTRY_BITS); 997 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 998 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 999 1000 /* Load the tlb comparator and the addend. */ 1001 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 1002 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 1003 is_ld ? offsetof(CPUTLBEntry, addr_read) 1004 : offsetof(CPUTLBEntry, addr_write)); 1005 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 1006 offsetof(CPUTLBEntry, addend)); 1007 1008 /* 1009 * For aligned accesses, we check the first byte and include the 1010 * alignment bits within the address. For unaligned access, we 1011 * check that we don't cross pages using the address of the last 1012 * byte of the access. 1013 */ 1014 if (a_bits < s_bits) { 1015 unsigned a_mask = (1u << a_bits) - 1; 1016 unsigned s_mask = (1u << s_bits) - 1; 1017 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 1018 } else { 1019 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 1020 } 1021 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 1022 a_bits, s->page_bits - 1); 1023 1024 /* Compare masked address with the TLB entry. */ 1025 ldst->label_ptr[0] = s->code_ptr; 1026 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 1027 1028 h->index = TCG_REG_TMP2; 1029 } else { 1030 if (a_bits) { 1031 ldst = new_ldst_label(s); 1032 1033 ldst->is_ld = is_ld; 1034 ldst->oi = oi; 1035 ldst->addrlo_reg = addr_reg; 1036 1037 /* 1038 * Without micro-architecture details, we don't know which of 1039 * bstrpick or andi is faster, so use bstrpick as it's not 1040 * constrained by imm field width. Not to say alignments >= 2^12 1041 * are going to happen any time soon. 1042 */ 1043 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 1044 1045 ldst->label_ptr[0] = s->code_ptr; 1046 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 1047 } 1048 1049 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 1050 } 1051 1052 if (addr_type == TCG_TYPE_I32) { 1053 h->base = TCG_REG_TMP0; 1054 tcg_out_ext32u(s, h->base, addr_reg); 1055 } else { 1056 h->base = addr_reg; 1057 } 1058 1059 return ldst; 1060} 1061 1062static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 1063 TCGReg rd, HostAddress h) 1064{ 1065 /* Byte swapping is left to middle-end expansion. */ 1066 tcg_debug_assert((opc & MO_BSWAP) == 0); 1067 1068 switch (opc & MO_SSIZE) { 1069 case MO_UB: 1070 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 1071 break; 1072 case MO_SB: 1073 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 1074 break; 1075 case MO_UW: 1076 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 1077 break; 1078 case MO_SW: 1079 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1080 break; 1081 case MO_UL: 1082 if (type == TCG_TYPE_I64) { 1083 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1084 break; 1085 } 1086 /* fallthrough */ 1087 case MO_SL: 1088 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1089 break; 1090 case MO_UQ: 1091 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1092 break; 1093 default: 1094 g_assert_not_reached(); 1095 } 1096} 1097 1098static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1099 MemOpIdx oi, TCGType data_type) 1100{ 1101 TCGLabelQemuLdst *ldst; 1102 HostAddress h; 1103 1104 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1105 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1106 1107 if (ldst) { 1108 ldst->type = data_type; 1109 ldst->datalo_reg = data_reg; 1110 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1111 } 1112} 1113 1114static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1115 TCGReg rd, HostAddress h) 1116{ 1117 /* Byte swapping is left to middle-end expansion. */ 1118 tcg_debug_assert((opc & MO_BSWAP) == 0); 1119 1120 switch (opc & MO_SIZE) { 1121 case MO_8: 1122 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1123 break; 1124 case MO_16: 1125 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1126 break; 1127 case MO_32: 1128 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1129 break; 1130 case MO_64: 1131 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1132 break; 1133 default: 1134 g_assert_not_reached(); 1135 } 1136} 1137 1138static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1139 MemOpIdx oi, TCGType data_type) 1140{ 1141 TCGLabelQemuLdst *ldst; 1142 HostAddress h; 1143 1144 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1145 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1146 1147 if (ldst) { 1148 ldst->type = data_type; 1149 ldst->datalo_reg = data_reg; 1150 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1151 } 1152} 1153 1154static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, 1155 TCGReg addr_reg, MemOpIdx oi, bool is_ld) 1156{ 1157 TCGLabelQemuLdst *ldst; 1158 HostAddress h; 1159 1160 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); 1161 1162 if (h.aa.atom == MO_128) { 1163 /* 1164 * Use VLDX/VSTX when 128-bit atomicity is required. 1165 * If address is aligned to 16-bytes, the 128-bit load/store is atomic. 1166 */ 1167 if (is_ld) { 1168 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index); 1169 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0); 1170 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1); 1171 } else { 1172 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0); 1173 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1); 1174 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index); 1175 } 1176 } else { 1177 /* Otherwise use a pair of LD/ST. */ 1178 TCGReg base = h.base; 1179 if (h.index != TCG_REG_ZERO) { 1180 base = TCG_REG_TMP0; 1181 tcg_out_opc_add_d(s, base, h.base, h.index); 1182 } 1183 if (is_ld) { 1184 tcg_debug_assert(base != data_lo); 1185 tcg_out_opc_ld_d(s, data_lo, base, 0); 1186 tcg_out_opc_ld_d(s, data_hi, base, 8); 1187 } else { 1188 tcg_out_opc_st_d(s, data_lo, base, 0); 1189 tcg_out_opc_st_d(s, data_hi, base, 8); 1190 } 1191 } 1192 1193 if (ldst) { 1194 ldst->type = TCG_TYPE_I128; 1195 ldst->datalo_reg = data_lo; 1196 ldst->datahi_reg = data_hi; 1197 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1198 } 1199} 1200 1201/* 1202 * Entry-points 1203 */ 1204 1205static const tcg_insn_unit *tb_ret_addr; 1206 1207static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1208{ 1209 /* Reuse the zeroing that exists for goto_ptr. */ 1210 if (a0 == 0) { 1211 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1212 } else { 1213 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1214 tcg_out_call_int(s, tb_ret_addr, true); 1215 } 1216} 1217 1218static void tcg_out_goto_tb(TCGContext *s, int which) 1219{ 1220 /* 1221 * Direct branch, or load indirect address, to be patched 1222 * by tb_target_set_jmp_target. Check indirect load offset 1223 * in range early, regardless of direct branch distance, 1224 * via assert within tcg_out_opc_pcaddu2i. 1225 */ 1226 uintptr_t i_addr = get_jmp_target_addr(s, which); 1227 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1228 1229 set_jmp_insn_offset(s, which); 1230 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1231 1232 /* Finish the load and indirect branch. */ 1233 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1234 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1235 set_jmp_reset_offset(s, which); 1236} 1237 1238void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1239 uintptr_t jmp_rx, uintptr_t jmp_rw) 1240{ 1241 uintptr_t d_addr = tb->jmp_target_addr[n]; 1242 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1243 tcg_insn_unit insn; 1244 1245 /* Either directly branch, or load slot address for indirect branch. */ 1246 if (d_disp == sextreg(d_disp, 0, 26)) { 1247 insn = encode_sd10k16_insn(OPC_B, d_disp); 1248 } else { 1249 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1250 intptr_t i_disp = i_addr - jmp_rx; 1251 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1252 } 1253 1254 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1255 flush_idcache_range(jmp_rx, jmp_rw, 4); 1256} 1257 1258static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1259 const TCGArg args[TCG_MAX_OP_ARGS], 1260 const int const_args[TCG_MAX_OP_ARGS]) 1261{ 1262 TCGArg a0 = args[0]; 1263 TCGArg a1 = args[1]; 1264 TCGArg a2 = args[2]; 1265 TCGArg a3 = args[3]; 1266 int c2 = const_args[2]; 1267 1268 switch (opc) { 1269 case INDEX_op_mb: 1270 tcg_out_mb(s, a0); 1271 break; 1272 1273 case INDEX_op_goto_ptr: 1274 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1275 break; 1276 1277 case INDEX_op_br: 1278 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1279 0); 1280 tcg_out_opc_b(s, 0); 1281 break; 1282 1283 case INDEX_op_brcond_i32: 1284 case INDEX_op_brcond_i64: 1285 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1286 break; 1287 1288 case INDEX_op_extrh_i64_i32: 1289 tcg_out_opc_srai_d(s, a0, a1, 32); 1290 break; 1291 1292 case INDEX_op_not_i32: 1293 case INDEX_op_not_i64: 1294 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1295 break; 1296 1297 case INDEX_op_nor_i32: 1298 case INDEX_op_nor_i64: 1299 if (c2) { 1300 tcg_out_opc_ori(s, a0, a1, a2); 1301 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1302 } else { 1303 tcg_out_opc_nor(s, a0, a1, a2); 1304 } 1305 break; 1306 1307 case INDEX_op_andc_i32: 1308 case INDEX_op_andc_i64: 1309 if (c2) { 1310 /* guaranteed to fit due to constraint */ 1311 tcg_out_opc_andi(s, a0, a1, ~a2); 1312 } else { 1313 tcg_out_opc_andn(s, a0, a1, a2); 1314 } 1315 break; 1316 1317 case INDEX_op_orc_i32: 1318 case INDEX_op_orc_i64: 1319 if (c2) { 1320 /* guaranteed to fit due to constraint */ 1321 tcg_out_opc_ori(s, a0, a1, ~a2); 1322 } else { 1323 tcg_out_opc_orn(s, a0, a1, a2); 1324 } 1325 break; 1326 1327 case INDEX_op_and_i32: 1328 case INDEX_op_and_i64: 1329 if (c2) { 1330 tcg_out_opc_andi(s, a0, a1, a2); 1331 } else { 1332 tcg_out_opc_and(s, a0, a1, a2); 1333 } 1334 break; 1335 1336 case INDEX_op_or_i32: 1337 case INDEX_op_or_i64: 1338 if (c2) { 1339 tcg_out_opc_ori(s, a0, a1, a2); 1340 } else { 1341 tcg_out_opc_or(s, a0, a1, a2); 1342 } 1343 break; 1344 1345 case INDEX_op_xor_i32: 1346 case INDEX_op_xor_i64: 1347 if (c2) { 1348 tcg_out_opc_xori(s, a0, a1, a2); 1349 } else { 1350 tcg_out_opc_xor(s, a0, a1, a2); 1351 } 1352 break; 1353 1354 case INDEX_op_extract_i32: 1355 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1356 break; 1357 case INDEX_op_extract_i64: 1358 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1359 break; 1360 1361 case INDEX_op_deposit_i32: 1362 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1363 break; 1364 case INDEX_op_deposit_i64: 1365 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1366 break; 1367 1368 case INDEX_op_bswap16_i32: 1369 case INDEX_op_bswap16_i64: 1370 tcg_out_opc_revb_2h(s, a0, a1); 1371 if (a2 & TCG_BSWAP_OS) { 1372 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1373 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1374 tcg_out_ext16u(s, a0, a0); 1375 } 1376 break; 1377 1378 case INDEX_op_bswap32_i32: 1379 /* All 32-bit values are computed sign-extended in the register. */ 1380 a2 = TCG_BSWAP_OS; 1381 /* fallthrough */ 1382 case INDEX_op_bswap32_i64: 1383 tcg_out_opc_revb_2w(s, a0, a1); 1384 if (a2 & TCG_BSWAP_OS) { 1385 tcg_out_ext32s(s, a0, a0); 1386 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1387 tcg_out_ext32u(s, a0, a0); 1388 } 1389 break; 1390 1391 case INDEX_op_bswap64_i64: 1392 tcg_out_opc_revb_d(s, a0, a1); 1393 break; 1394 1395 case INDEX_op_clz_i32: 1396 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1397 break; 1398 case INDEX_op_clz_i64: 1399 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1400 break; 1401 1402 case INDEX_op_ctz_i32: 1403 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1404 break; 1405 case INDEX_op_ctz_i64: 1406 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1407 break; 1408 1409 case INDEX_op_shl_i32: 1410 if (c2) { 1411 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1412 } else { 1413 tcg_out_opc_sll_w(s, a0, a1, a2); 1414 } 1415 break; 1416 case INDEX_op_shl_i64: 1417 if (c2) { 1418 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1419 } else { 1420 tcg_out_opc_sll_d(s, a0, a1, a2); 1421 } 1422 break; 1423 1424 case INDEX_op_shr_i32: 1425 if (c2) { 1426 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1427 } else { 1428 tcg_out_opc_srl_w(s, a0, a1, a2); 1429 } 1430 break; 1431 case INDEX_op_shr_i64: 1432 if (c2) { 1433 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1434 } else { 1435 tcg_out_opc_srl_d(s, a0, a1, a2); 1436 } 1437 break; 1438 1439 case INDEX_op_sar_i32: 1440 if (c2) { 1441 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1442 } else { 1443 tcg_out_opc_sra_w(s, a0, a1, a2); 1444 } 1445 break; 1446 case INDEX_op_sar_i64: 1447 if (c2) { 1448 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1449 } else { 1450 tcg_out_opc_sra_d(s, a0, a1, a2); 1451 } 1452 break; 1453 1454 case INDEX_op_rotl_i32: 1455 /* transform into equivalent rotr/rotri */ 1456 if (c2) { 1457 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1458 } else { 1459 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1460 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1461 } 1462 break; 1463 case INDEX_op_rotl_i64: 1464 /* transform into equivalent rotr/rotri */ 1465 if (c2) { 1466 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1467 } else { 1468 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1469 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1470 } 1471 break; 1472 1473 case INDEX_op_rotr_i32: 1474 if (c2) { 1475 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1476 } else { 1477 tcg_out_opc_rotr_w(s, a0, a1, a2); 1478 } 1479 break; 1480 case INDEX_op_rotr_i64: 1481 if (c2) { 1482 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1483 } else { 1484 tcg_out_opc_rotr_d(s, a0, a1, a2); 1485 } 1486 break; 1487 1488 case INDEX_op_add_i32: 1489 if (c2) { 1490 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1491 } else { 1492 tcg_out_opc_add_w(s, a0, a1, a2); 1493 } 1494 break; 1495 case INDEX_op_add_i64: 1496 if (c2) { 1497 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1498 } else { 1499 tcg_out_opc_add_d(s, a0, a1, a2); 1500 } 1501 break; 1502 1503 case INDEX_op_sub_i32: 1504 if (c2) { 1505 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1506 } else { 1507 tcg_out_opc_sub_w(s, a0, a1, a2); 1508 } 1509 break; 1510 case INDEX_op_sub_i64: 1511 if (c2) { 1512 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1513 } else { 1514 tcg_out_opc_sub_d(s, a0, a1, a2); 1515 } 1516 break; 1517 1518 case INDEX_op_neg_i32: 1519 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1); 1520 break; 1521 case INDEX_op_neg_i64: 1522 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1); 1523 break; 1524 1525 case INDEX_op_mul_i32: 1526 tcg_out_opc_mul_w(s, a0, a1, a2); 1527 break; 1528 case INDEX_op_mul_i64: 1529 tcg_out_opc_mul_d(s, a0, a1, a2); 1530 break; 1531 1532 case INDEX_op_mulsh_i32: 1533 tcg_out_opc_mulh_w(s, a0, a1, a2); 1534 break; 1535 case INDEX_op_mulsh_i64: 1536 tcg_out_opc_mulh_d(s, a0, a1, a2); 1537 break; 1538 1539 case INDEX_op_muluh_i32: 1540 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1541 break; 1542 case INDEX_op_muluh_i64: 1543 tcg_out_opc_mulh_du(s, a0, a1, a2); 1544 break; 1545 1546 case INDEX_op_div_i32: 1547 tcg_out_opc_div_w(s, a0, a1, a2); 1548 break; 1549 case INDEX_op_div_i64: 1550 tcg_out_opc_div_d(s, a0, a1, a2); 1551 break; 1552 1553 case INDEX_op_divu_i32: 1554 tcg_out_opc_div_wu(s, a0, a1, a2); 1555 break; 1556 case INDEX_op_divu_i64: 1557 tcg_out_opc_div_du(s, a0, a1, a2); 1558 break; 1559 1560 case INDEX_op_rem_i32: 1561 tcg_out_opc_mod_w(s, a0, a1, a2); 1562 break; 1563 case INDEX_op_rem_i64: 1564 tcg_out_opc_mod_d(s, a0, a1, a2); 1565 break; 1566 1567 case INDEX_op_remu_i32: 1568 tcg_out_opc_mod_wu(s, a0, a1, a2); 1569 break; 1570 case INDEX_op_remu_i64: 1571 tcg_out_opc_mod_du(s, a0, a1, a2); 1572 break; 1573 1574 case INDEX_op_setcond_i32: 1575 case INDEX_op_setcond_i64: 1576 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1577 break; 1578 1579 case INDEX_op_movcond_i32: 1580 case INDEX_op_movcond_i64: 1581 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1582 break; 1583 1584 case INDEX_op_ld8s_i32: 1585 case INDEX_op_ld8s_i64: 1586 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1587 break; 1588 case INDEX_op_ld8u_i32: 1589 case INDEX_op_ld8u_i64: 1590 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1591 break; 1592 case INDEX_op_ld16s_i32: 1593 case INDEX_op_ld16s_i64: 1594 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1595 break; 1596 case INDEX_op_ld16u_i32: 1597 case INDEX_op_ld16u_i64: 1598 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1599 break; 1600 case INDEX_op_ld_i32: 1601 case INDEX_op_ld32s_i64: 1602 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1603 break; 1604 case INDEX_op_ld32u_i64: 1605 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1606 break; 1607 case INDEX_op_ld_i64: 1608 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1609 break; 1610 1611 case INDEX_op_st8_i32: 1612 case INDEX_op_st8_i64: 1613 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1614 break; 1615 case INDEX_op_st16_i32: 1616 case INDEX_op_st16_i64: 1617 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1618 break; 1619 case INDEX_op_st_i32: 1620 case INDEX_op_st32_i64: 1621 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1622 break; 1623 case INDEX_op_st_i64: 1624 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1625 break; 1626 1627 case INDEX_op_qemu_ld_a32_i32: 1628 case INDEX_op_qemu_ld_a64_i32: 1629 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1630 break; 1631 case INDEX_op_qemu_ld_a32_i64: 1632 case INDEX_op_qemu_ld_a64_i64: 1633 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1634 break; 1635 case INDEX_op_qemu_ld_a32_i128: 1636 case INDEX_op_qemu_ld_a64_i128: 1637 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); 1638 break; 1639 case INDEX_op_qemu_st_a32_i32: 1640 case INDEX_op_qemu_st_a64_i32: 1641 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1642 break; 1643 case INDEX_op_qemu_st_a32_i64: 1644 case INDEX_op_qemu_st_a64_i64: 1645 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1646 break; 1647 case INDEX_op_qemu_st_a32_i128: 1648 case INDEX_op_qemu_st_a64_i128: 1649 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); 1650 break; 1651 1652 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1653 case INDEX_op_mov_i64: 1654 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1655 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1656 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1657 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1658 case INDEX_op_ext8s_i64: 1659 case INDEX_op_ext8u_i32: 1660 case INDEX_op_ext8u_i64: 1661 case INDEX_op_ext16s_i32: 1662 case INDEX_op_ext16s_i64: 1663 case INDEX_op_ext16u_i32: 1664 case INDEX_op_ext16u_i64: 1665 case INDEX_op_ext32s_i64: 1666 case INDEX_op_ext32u_i64: 1667 case INDEX_op_ext_i32_i64: 1668 case INDEX_op_extu_i32_i64: 1669 case INDEX_op_extrl_i64_i32: 1670 default: 1671 g_assert_not_reached(); 1672 } 1673} 1674 1675static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1676 TCGReg rd, TCGReg rs) 1677{ 1678 switch (vece) { 1679 case MO_8: 1680 tcg_out_opc_vreplgr2vr_b(s, rd, rs); 1681 break; 1682 case MO_16: 1683 tcg_out_opc_vreplgr2vr_h(s, rd, rs); 1684 break; 1685 case MO_32: 1686 tcg_out_opc_vreplgr2vr_w(s, rd, rs); 1687 break; 1688 case MO_64: 1689 tcg_out_opc_vreplgr2vr_d(s, rd, rs); 1690 break; 1691 default: 1692 g_assert_not_reached(); 1693 } 1694 return true; 1695} 1696 1697static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1698 TCGReg r, TCGReg base, intptr_t offset) 1699{ 1700 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */ 1701 if (offset < -0x800 || offset > 0x7ff || \ 1702 (offset & ((1 << vece) - 1)) != 0) { 1703 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1704 base = TCG_REG_TMP0; 1705 offset = 0; 1706 } 1707 offset >>= vece; 1708 1709 switch (vece) { 1710 case MO_8: 1711 tcg_out_opc_vldrepl_b(s, r, base, offset); 1712 break; 1713 case MO_16: 1714 tcg_out_opc_vldrepl_h(s, r, base, offset); 1715 break; 1716 case MO_32: 1717 tcg_out_opc_vldrepl_w(s, r, base, offset); 1718 break; 1719 case MO_64: 1720 tcg_out_opc_vldrepl_d(s, r, base, offset); 1721 break; 1722 default: 1723 g_assert_not_reached(); 1724 } 1725 return true; 1726} 1727 1728static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1729 TCGReg rd, int64_t v64) 1730{ 1731 /* Try vldi if imm can fit */ 1732 int64_t value = sextract64(v64, 0, 8 << vece); 1733 if (-0x200 <= value && value <= 0x1FF) { 1734 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1735 tcg_out_opc_vldi(s, rd, imm); 1736 return; 1737 } 1738 1739 /* TODO: vldi patterns when imm 12 is set */ 1740 1741 /* Fallback to vreplgr2vr */ 1742 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1743 switch (vece) { 1744 case MO_8: 1745 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0); 1746 break; 1747 case MO_16: 1748 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0); 1749 break; 1750 case MO_32: 1751 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0); 1752 break; 1753 case MO_64: 1754 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0); 1755 break; 1756 default: 1757 g_assert_not_reached(); 1758 } 1759} 1760 1761static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0, 1762 const TCGArg a1, const TCGArg a2, 1763 bool a2_is_const, bool is_add) 1764{ 1765 static const LoongArchInsn add_vec_insn[4] = { 1766 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D 1767 }; 1768 static const LoongArchInsn add_vec_imm_insn[4] = { 1769 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU 1770 }; 1771 static const LoongArchInsn sub_vec_insn[4] = { 1772 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D 1773 }; 1774 static const LoongArchInsn sub_vec_imm_insn[4] = { 1775 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU 1776 }; 1777 1778 if (a2_is_const) { 1779 int64_t value = sextract64(a2, 0, 8 << vece); 1780 if (!is_add) { 1781 value = -value; 1782 } 1783 1784 /* Try vaddi/vsubi */ 1785 if (0 <= value && value <= 0x1f) { 1786 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ 1787 a1, value)); 1788 return; 1789 } else if (-0x1f <= value && value < 0) { 1790 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ 1791 a1, -value)); 1792 return; 1793 } 1794 1795 /* constraint TCG_CT_CONST_VADD ensures unreachable */ 1796 g_assert_not_reached(); 1797 } 1798 1799 if (is_add) { 1800 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); 1801 } else { 1802 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); 1803 } 1804} 1805 1806static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1807 unsigned vecl, unsigned vece, 1808 const TCGArg args[TCG_MAX_OP_ARGS], 1809 const int const_args[TCG_MAX_OP_ARGS]) 1810{ 1811 TCGType type = vecl + TCG_TYPE_V64; 1812 TCGArg a0, a1, a2, a3; 1813 TCGReg temp_vec = TCG_VEC_TMP0; 1814 1815 static const LoongArchInsn cmp_vec_insn[16][4] = { 1816 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D}, 1817 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D}, 1818 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU}, 1819 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D}, 1820 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU}, 1821 }; 1822 static const LoongArchInsn cmp_vec_imm_insn[16][4] = { 1823 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D}, 1824 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D}, 1825 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU}, 1826 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D}, 1827 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, 1828 }; 1829 LoongArchInsn insn; 1830 static const LoongArchInsn neg_vec_insn[4] = { 1831 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D 1832 }; 1833 static const LoongArchInsn mul_vec_insn[4] = { 1834 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D 1835 }; 1836 static const LoongArchInsn smin_vec_insn[4] = { 1837 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D 1838 }; 1839 static const LoongArchInsn umin_vec_insn[4] = { 1840 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU 1841 }; 1842 static const LoongArchInsn smax_vec_insn[4] = { 1843 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D 1844 }; 1845 static const LoongArchInsn umax_vec_insn[4] = { 1846 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU 1847 }; 1848 static const LoongArchInsn ssadd_vec_insn[4] = { 1849 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D 1850 }; 1851 static const LoongArchInsn usadd_vec_insn[4] = { 1852 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU 1853 }; 1854 static const LoongArchInsn sssub_vec_insn[4] = { 1855 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D 1856 }; 1857 static const LoongArchInsn ussub_vec_insn[4] = { 1858 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU 1859 }; 1860 static const LoongArchInsn shlv_vec_insn[4] = { 1861 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D 1862 }; 1863 static const LoongArchInsn shrv_vec_insn[4] = { 1864 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D 1865 }; 1866 static const LoongArchInsn sarv_vec_insn[4] = { 1867 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D 1868 }; 1869 static const LoongArchInsn shli_vec_insn[4] = { 1870 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D 1871 }; 1872 static const LoongArchInsn shri_vec_insn[4] = { 1873 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D 1874 }; 1875 static const LoongArchInsn sari_vec_insn[4] = { 1876 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D 1877 }; 1878 static const LoongArchInsn rotrv_vec_insn[4] = { 1879 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D 1880 }; 1881 1882 a0 = args[0]; 1883 a1 = args[1]; 1884 a2 = args[2]; 1885 a3 = args[3]; 1886 1887 /* Currently only supports V128 */ 1888 tcg_debug_assert(type == TCG_TYPE_V128); 1889 1890 switch (opc) { 1891 case INDEX_op_st_vec: 1892 tcg_out_st(s, type, a0, a1, a2); 1893 break; 1894 case INDEX_op_ld_vec: 1895 tcg_out_ld(s, type, a0, a1, a2); 1896 break; 1897 case INDEX_op_and_vec: 1898 tcg_out_opc_vand_v(s, a0, a1, a2); 1899 break; 1900 case INDEX_op_andc_vec: 1901 /* 1902 * vandn vd, vj, vk: vd = vk & ~vj 1903 * andc_vec vd, vj, vk: vd = vj & ~vk 1904 * vk and vk are swapped 1905 */ 1906 tcg_out_opc_vandn_v(s, a0, a2, a1); 1907 break; 1908 case INDEX_op_or_vec: 1909 tcg_out_opc_vor_v(s, a0, a1, a2); 1910 break; 1911 case INDEX_op_orc_vec: 1912 tcg_out_opc_vorn_v(s, a0, a1, a2); 1913 break; 1914 case INDEX_op_xor_vec: 1915 tcg_out_opc_vxor_v(s, a0, a1, a2); 1916 break; 1917 case INDEX_op_nor_vec: 1918 tcg_out_opc_vnor_v(s, a0, a1, a2); 1919 break; 1920 case INDEX_op_not_vec: 1921 tcg_out_opc_vnor_v(s, a0, a1, a1); 1922 break; 1923 case INDEX_op_cmp_vec: 1924 { 1925 TCGCond cond = args[3]; 1926 if (const_args[2]) { 1927 /* 1928 * cmp_vec dest, src, value 1929 * Try vseqi/vslei/vslti 1930 */ 1931 int64_t value = sextract64(a2, 0, 8 << vece); 1932 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \ 1933 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) { 1934 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \ 1935 a0, a1, value)); 1936 break; 1937 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) && 1938 (0x00 <= value && value <= 0x1f)) { 1939 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \ 1940 a0, a1, value)); 1941 break; 1942 } 1943 1944 /* 1945 * Fallback to: 1946 * dupi_vec temp, a2 1947 * cmp_vec a0, a1, temp, cond 1948 */ 1949 tcg_out_dupi_vec(s, type, vece, temp_vec, a2); 1950 a2 = temp_vec; 1951 } 1952 1953 insn = cmp_vec_insn[cond][vece]; 1954 if (insn == 0) { 1955 TCGArg t; 1956 t = a1, a1 = a2, a2 = t; 1957 cond = tcg_swap_cond(cond); 1958 insn = cmp_vec_insn[cond][vece]; 1959 tcg_debug_assert(insn != 0); 1960 } 1961 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); 1962 } 1963 break; 1964 case INDEX_op_add_vec: 1965 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true); 1966 break; 1967 case INDEX_op_sub_vec: 1968 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false); 1969 break; 1970 case INDEX_op_neg_vec: 1971 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1)); 1972 break; 1973 case INDEX_op_mul_vec: 1974 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2)); 1975 break; 1976 case INDEX_op_smin_vec: 1977 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2)); 1978 break; 1979 case INDEX_op_smax_vec: 1980 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2)); 1981 break; 1982 case INDEX_op_umin_vec: 1983 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2)); 1984 break; 1985 case INDEX_op_umax_vec: 1986 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2)); 1987 break; 1988 case INDEX_op_ssadd_vec: 1989 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2)); 1990 break; 1991 case INDEX_op_usadd_vec: 1992 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2)); 1993 break; 1994 case INDEX_op_sssub_vec: 1995 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2)); 1996 break; 1997 case INDEX_op_ussub_vec: 1998 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2)); 1999 break; 2000 case INDEX_op_shlv_vec: 2001 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2)); 2002 break; 2003 case INDEX_op_shrv_vec: 2004 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2)); 2005 break; 2006 case INDEX_op_sarv_vec: 2007 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2)); 2008 break; 2009 case INDEX_op_shli_vec: 2010 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2)); 2011 break; 2012 case INDEX_op_shri_vec: 2013 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2)); 2014 break; 2015 case INDEX_op_sari_vec: 2016 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2)); 2017 break; 2018 case INDEX_op_rotrv_vec: 2019 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2)); 2020 break; 2021 case INDEX_op_rotlv_vec: 2022 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */ 2023 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2)); 2024 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, 2025 temp_vec)); 2026 break; 2027 case INDEX_op_rotli_vec: 2028 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */ 2029 a2 = extract32(-a2, 0, 3 + vece); 2030 switch (vece) { 2031 case MO_8: 2032 tcg_out_opc_vrotri_b(s, a0, a1, a2); 2033 break; 2034 case MO_16: 2035 tcg_out_opc_vrotri_h(s, a0, a1, a2); 2036 break; 2037 case MO_32: 2038 tcg_out_opc_vrotri_w(s, a0, a1, a2); 2039 break; 2040 case MO_64: 2041 tcg_out_opc_vrotri_d(s, a0, a1, a2); 2042 break; 2043 default: 2044 g_assert_not_reached(); 2045 } 2046 break; 2047 case INDEX_op_bitsel_vec: 2048 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */ 2049 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1); 2050 break; 2051 case INDEX_op_dupm_vec: 2052 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 2053 break; 2054 default: 2055 g_assert_not_reached(); 2056 } 2057} 2058 2059int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 2060{ 2061 switch (opc) { 2062 case INDEX_op_ld_vec: 2063 case INDEX_op_st_vec: 2064 case INDEX_op_dup_vec: 2065 case INDEX_op_dupm_vec: 2066 case INDEX_op_cmp_vec: 2067 case INDEX_op_add_vec: 2068 case INDEX_op_sub_vec: 2069 case INDEX_op_and_vec: 2070 case INDEX_op_andc_vec: 2071 case INDEX_op_or_vec: 2072 case INDEX_op_orc_vec: 2073 case INDEX_op_xor_vec: 2074 case INDEX_op_nor_vec: 2075 case INDEX_op_not_vec: 2076 case INDEX_op_neg_vec: 2077 case INDEX_op_mul_vec: 2078 case INDEX_op_smin_vec: 2079 case INDEX_op_smax_vec: 2080 case INDEX_op_umin_vec: 2081 case INDEX_op_umax_vec: 2082 case INDEX_op_ssadd_vec: 2083 case INDEX_op_usadd_vec: 2084 case INDEX_op_sssub_vec: 2085 case INDEX_op_ussub_vec: 2086 case INDEX_op_shlv_vec: 2087 case INDEX_op_shrv_vec: 2088 case INDEX_op_sarv_vec: 2089 case INDEX_op_bitsel_vec: 2090 return 1; 2091 default: 2092 return 0; 2093 } 2094} 2095 2096void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 2097 TCGArg a0, ...) 2098{ 2099 g_assert_not_reached(); 2100} 2101 2102static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 2103{ 2104 switch (op) { 2105 case INDEX_op_goto_ptr: 2106 return C_O0_I1(r); 2107 2108 case INDEX_op_st8_i32: 2109 case INDEX_op_st8_i64: 2110 case INDEX_op_st16_i32: 2111 case INDEX_op_st16_i64: 2112 case INDEX_op_st32_i64: 2113 case INDEX_op_st_i32: 2114 case INDEX_op_st_i64: 2115 case INDEX_op_qemu_st_a32_i32: 2116 case INDEX_op_qemu_st_a64_i32: 2117 case INDEX_op_qemu_st_a32_i64: 2118 case INDEX_op_qemu_st_a64_i64: 2119 return C_O0_I2(rZ, r); 2120 2121 case INDEX_op_qemu_ld_a32_i128: 2122 case INDEX_op_qemu_ld_a64_i128: 2123 return C_N2_I1(r, r, r); 2124 2125 case INDEX_op_qemu_st_a32_i128: 2126 case INDEX_op_qemu_st_a64_i128: 2127 return C_O0_I3(r, r, r); 2128 2129 case INDEX_op_brcond_i32: 2130 case INDEX_op_brcond_i64: 2131 return C_O0_I2(rZ, rZ); 2132 2133 case INDEX_op_ext8s_i32: 2134 case INDEX_op_ext8s_i64: 2135 case INDEX_op_ext8u_i32: 2136 case INDEX_op_ext8u_i64: 2137 case INDEX_op_ext16s_i32: 2138 case INDEX_op_ext16s_i64: 2139 case INDEX_op_ext16u_i32: 2140 case INDEX_op_ext16u_i64: 2141 case INDEX_op_ext32s_i64: 2142 case INDEX_op_ext32u_i64: 2143 case INDEX_op_extu_i32_i64: 2144 case INDEX_op_extrl_i64_i32: 2145 case INDEX_op_extrh_i64_i32: 2146 case INDEX_op_ext_i32_i64: 2147 case INDEX_op_neg_i32: 2148 case INDEX_op_neg_i64: 2149 case INDEX_op_not_i32: 2150 case INDEX_op_not_i64: 2151 case INDEX_op_extract_i32: 2152 case INDEX_op_extract_i64: 2153 case INDEX_op_bswap16_i32: 2154 case INDEX_op_bswap16_i64: 2155 case INDEX_op_bswap32_i32: 2156 case INDEX_op_bswap32_i64: 2157 case INDEX_op_bswap64_i64: 2158 case INDEX_op_ld8s_i32: 2159 case INDEX_op_ld8s_i64: 2160 case INDEX_op_ld8u_i32: 2161 case INDEX_op_ld8u_i64: 2162 case INDEX_op_ld16s_i32: 2163 case INDEX_op_ld16s_i64: 2164 case INDEX_op_ld16u_i32: 2165 case INDEX_op_ld16u_i64: 2166 case INDEX_op_ld32s_i64: 2167 case INDEX_op_ld32u_i64: 2168 case INDEX_op_ld_i32: 2169 case INDEX_op_ld_i64: 2170 case INDEX_op_qemu_ld_a32_i32: 2171 case INDEX_op_qemu_ld_a64_i32: 2172 case INDEX_op_qemu_ld_a32_i64: 2173 case INDEX_op_qemu_ld_a64_i64: 2174 return C_O1_I1(r, r); 2175 2176 case INDEX_op_andc_i32: 2177 case INDEX_op_andc_i64: 2178 case INDEX_op_orc_i32: 2179 case INDEX_op_orc_i64: 2180 /* 2181 * LoongArch insns for these ops don't have reg-imm forms, but we 2182 * can express using andi/ori if ~constant satisfies 2183 * TCG_CT_CONST_U12. 2184 */ 2185 return C_O1_I2(r, r, rC); 2186 2187 case INDEX_op_shl_i32: 2188 case INDEX_op_shl_i64: 2189 case INDEX_op_shr_i32: 2190 case INDEX_op_shr_i64: 2191 case INDEX_op_sar_i32: 2192 case INDEX_op_sar_i64: 2193 case INDEX_op_rotl_i32: 2194 case INDEX_op_rotl_i64: 2195 case INDEX_op_rotr_i32: 2196 case INDEX_op_rotr_i64: 2197 return C_O1_I2(r, r, ri); 2198 2199 case INDEX_op_add_i32: 2200 return C_O1_I2(r, r, ri); 2201 case INDEX_op_add_i64: 2202 return C_O1_I2(r, r, rJ); 2203 2204 case INDEX_op_and_i32: 2205 case INDEX_op_and_i64: 2206 case INDEX_op_nor_i32: 2207 case INDEX_op_nor_i64: 2208 case INDEX_op_or_i32: 2209 case INDEX_op_or_i64: 2210 case INDEX_op_xor_i32: 2211 case INDEX_op_xor_i64: 2212 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 2213 return C_O1_I2(r, r, rU); 2214 2215 case INDEX_op_clz_i32: 2216 case INDEX_op_clz_i64: 2217 case INDEX_op_ctz_i32: 2218 case INDEX_op_ctz_i64: 2219 return C_O1_I2(r, r, rW); 2220 2221 case INDEX_op_deposit_i32: 2222 case INDEX_op_deposit_i64: 2223 /* Must deposit into the same register as input */ 2224 return C_O1_I2(r, 0, rZ); 2225 2226 case INDEX_op_sub_i32: 2227 case INDEX_op_setcond_i32: 2228 return C_O1_I2(r, rZ, ri); 2229 case INDEX_op_sub_i64: 2230 case INDEX_op_setcond_i64: 2231 return C_O1_I2(r, rZ, rJ); 2232 2233 case INDEX_op_mul_i32: 2234 case INDEX_op_mul_i64: 2235 case INDEX_op_mulsh_i32: 2236 case INDEX_op_mulsh_i64: 2237 case INDEX_op_muluh_i32: 2238 case INDEX_op_muluh_i64: 2239 case INDEX_op_div_i32: 2240 case INDEX_op_div_i64: 2241 case INDEX_op_divu_i32: 2242 case INDEX_op_divu_i64: 2243 case INDEX_op_rem_i32: 2244 case INDEX_op_rem_i64: 2245 case INDEX_op_remu_i32: 2246 case INDEX_op_remu_i64: 2247 return C_O1_I2(r, rZ, rZ); 2248 2249 case INDEX_op_movcond_i32: 2250 case INDEX_op_movcond_i64: 2251 return C_O1_I4(r, rZ, rJ, rZ, rZ); 2252 2253 case INDEX_op_ld_vec: 2254 case INDEX_op_dupm_vec: 2255 case INDEX_op_dup_vec: 2256 return C_O1_I1(w, r); 2257 2258 case INDEX_op_st_vec: 2259 return C_O0_I2(w, r); 2260 2261 case INDEX_op_cmp_vec: 2262 return C_O1_I2(w, w, wM); 2263 2264 case INDEX_op_add_vec: 2265 case INDEX_op_sub_vec: 2266 return C_O1_I2(w, w, wA); 2267 2268 case INDEX_op_and_vec: 2269 case INDEX_op_andc_vec: 2270 case INDEX_op_or_vec: 2271 case INDEX_op_orc_vec: 2272 case INDEX_op_xor_vec: 2273 case INDEX_op_nor_vec: 2274 case INDEX_op_mul_vec: 2275 case INDEX_op_smin_vec: 2276 case INDEX_op_smax_vec: 2277 case INDEX_op_umin_vec: 2278 case INDEX_op_umax_vec: 2279 case INDEX_op_ssadd_vec: 2280 case INDEX_op_usadd_vec: 2281 case INDEX_op_sssub_vec: 2282 case INDEX_op_ussub_vec: 2283 case INDEX_op_shlv_vec: 2284 case INDEX_op_shrv_vec: 2285 case INDEX_op_sarv_vec: 2286 case INDEX_op_rotrv_vec: 2287 case INDEX_op_rotlv_vec: 2288 return C_O1_I2(w, w, w); 2289 2290 case INDEX_op_not_vec: 2291 case INDEX_op_neg_vec: 2292 case INDEX_op_shli_vec: 2293 case INDEX_op_shri_vec: 2294 case INDEX_op_sari_vec: 2295 case INDEX_op_rotli_vec: 2296 return C_O1_I1(w, w); 2297 2298 case INDEX_op_bitsel_vec: 2299 return C_O1_I3(w, w, w, w); 2300 2301 default: 2302 g_assert_not_reached(); 2303 } 2304} 2305 2306static const int tcg_target_callee_save_regs[] = { 2307 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 2308 TCG_REG_S1, 2309 TCG_REG_S2, 2310 TCG_REG_S3, 2311 TCG_REG_S4, 2312 TCG_REG_S5, 2313 TCG_REG_S6, 2314 TCG_REG_S7, 2315 TCG_REG_S8, 2316 TCG_REG_S9, 2317 TCG_REG_RA, /* should be last for ABI compliance */ 2318}; 2319 2320/* Stack frame parameters. */ 2321#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 2322#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 2323#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2324#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 2325 + TCG_TARGET_STACK_ALIGN - 1) \ 2326 & -TCG_TARGET_STACK_ALIGN) 2327#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 2328 2329/* We're expecting to be able to use an immediate for frame allocation. */ 2330QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 2331 2332/* Generate global QEMU prologue and epilogue code */ 2333static void tcg_target_qemu_prologue(TCGContext *s) 2334{ 2335 int i; 2336 2337 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 2338 2339 /* TB prologue */ 2340 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 2341 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2342 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2343 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2344 } 2345 2346 if (!tcg_use_softmmu && guest_base) { 2347 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 2348 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2349 } 2350 2351 /* Call generated code */ 2352 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2353 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 2354 2355 /* Return path for goto_ptr. Set return value to 0 */ 2356 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2357 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 2358 2359 /* TB epilogue */ 2360 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 2361 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2362 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2363 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 2364 } 2365 2366 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 2367 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 2368} 2369 2370static void tcg_out_tb_start(TCGContext *s) 2371{ 2372 /* nothing to do */ 2373} 2374 2375static void tcg_target_init(TCGContext *s) 2376{ 2377 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 2378 2379 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 2380 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 2381 error_report("TCG: unaligned access support required; exiting"); 2382 exit(EXIT_FAILURE); 2383 } 2384 2385 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 2386 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 2387 2388 tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS; 2389 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 2390 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 2391 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 2392 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 2393 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 2394 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 2395 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 2396 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 2397 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 2398 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 2399 2400 if (cpuinfo & CPUINFO_LSX) { 2401 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 2402 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 2403 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 2404 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 2405 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 2406 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 2407 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 2408 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 2409 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 2410 } 2411 2412 s->reserved_regs = 0; 2413 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 2414 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 2415 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 2416 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 2417 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 2418 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 2419 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 2420 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 2421} 2422 2423typedef struct { 2424 DebugFrameHeader h; 2425 uint8_t fde_def_cfa[4]; 2426 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 2427} DebugFrame; 2428 2429#define ELF_HOST_MACHINE EM_LOONGARCH 2430 2431static const DebugFrame debug_frame = { 2432 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 2433 .h.cie.id = -1, 2434 .h.cie.version = 1, 2435 .h.cie.code_align = 1, 2436 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 2437 .h.cie.return_column = TCG_REG_RA, 2438 2439 /* Total FDE size does not include the "len" member. */ 2440 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 2441 2442 .fde_def_cfa = { 2443 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 2444 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 2445 (FRAME_SIZE >> 7) 2446 }, 2447 .fde_reg_ofs = { 2448 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 2449 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 2450 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 2451 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 2452 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 2453 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 2454 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 2455 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 2456 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 2457 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 2458 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 2459 } 2460}; 2461 2462void tcg_register_jit(const void *buf, size_t buf_size) 2463{ 2464 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 2465} 2466