1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name> 5 * 6 * Based on tcg/riscv/tcg-target.c.inc 7 * 8 * Copyright (c) 2018 SiFive, Inc 9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> 10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> 11 * Copyright (c) 2008 Fabrice Bellard 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this software and associated documentation files (the "Software"), to deal 15 * in the Software without restriction, including without limitation the rights 16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 * copies of the Software, and to permit persons to whom the Software is 18 * furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 * THE SOFTWARE. 30 */ 31 32#include "../tcg-ldst.c.inc" 33#include <asm/hwcap.h> 34 35bool use_lsx_instructions; 36 37#ifdef CONFIG_DEBUG_TCG 38static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 39 "zero", 40 "ra", 41 "tp", 42 "sp", 43 "a0", 44 "a1", 45 "a2", 46 "a3", 47 "a4", 48 "a5", 49 "a6", 50 "a7", 51 "t0", 52 "t1", 53 "t2", 54 "t3", 55 "t4", 56 "t5", 57 "t6", 58 "t7", 59 "t8", 60 "r21", /* reserved in the LP64* ABI, hence no ABI name */ 61 "s9", 62 "s0", 63 "s1", 64 "s2", 65 "s3", 66 "s4", 67 "s5", 68 "s6", 69 "s7", 70 "s8", 71 "vr0", 72 "vr1", 73 "vr2", 74 "vr3", 75 "vr4", 76 "vr5", 77 "vr6", 78 "vr7", 79 "vr8", 80 "vr9", 81 "vr10", 82 "vr11", 83 "vr12", 84 "vr13", 85 "vr14", 86 "vr15", 87 "vr16", 88 "vr17", 89 "vr18", 90 "vr19", 91 "vr20", 92 "vr21", 93 "vr22", 94 "vr23", 95 "vr24", 96 "vr25", 97 "vr26", 98 "vr27", 99 "vr28", 100 "vr29", 101 "vr30", 102 "vr31", 103}; 104#endif 105 106static const int tcg_target_reg_alloc_order[] = { 107 /* Registers preserved across calls */ 108 /* TCG_REG_S0 reserved for TCG_AREG0 */ 109 TCG_REG_S1, 110 TCG_REG_S2, 111 TCG_REG_S3, 112 TCG_REG_S4, 113 TCG_REG_S5, 114 TCG_REG_S6, 115 TCG_REG_S7, 116 TCG_REG_S8, 117 TCG_REG_S9, 118 119 /* Registers (potentially) clobbered across calls */ 120 TCG_REG_T0, 121 TCG_REG_T1, 122 TCG_REG_T2, 123 TCG_REG_T3, 124 TCG_REG_T4, 125 TCG_REG_T5, 126 TCG_REG_T6, 127 TCG_REG_T7, 128 TCG_REG_T8, 129 130 /* Argument registers, opposite order of allocation. */ 131 TCG_REG_A7, 132 TCG_REG_A6, 133 TCG_REG_A5, 134 TCG_REG_A4, 135 TCG_REG_A3, 136 TCG_REG_A2, 137 TCG_REG_A1, 138 TCG_REG_A0, 139 140 /* Vector registers */ 141 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, 142 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, 143 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, 144 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, 145 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, 146 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, 147 /* V24 - V31 are caller-saved, and skipped. */ 148}; 149 150static const int tcg_target_call_iarg_regs[] = { 151 TCG_REG_A0, 152 TCG_REG_A1, 153 TCG_REG_A2, 154 TCG_REG_A3, 155 TCG_REG_A4, 156 TCG_REG_A5, 157 TCG_REG_A6, 158 TCG_REG_A7, 159}; 160 161static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 162{ 163 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 164 tcg_debug_assert(slot >= 0 && slot <= 1); 165 return TCG_REG_A0 + slot; 166} 167 168#ifndef CONFIG_SOFTMMU 169#define USE_GUEST_BASE (guest_base != 0) 170#define TCG_GUEST_BASE_REG TCG_REG_S1 171#endif 172 173#define TCG_CT_CONST_ZERO 0x100 174#define TCG_CT_CONST_S12 0x200 175#define TCG_CT_CONST_S32 0x400 176#define TCG_CT_CONST_U12 0x800 177#define TCG_CT_CONST_C12 0x1000 178#define TCG_CT_CONST_WSZ 0x2000 179 180#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 181#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) 182 183static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) 184{ 185 return sextract64(val, pos, len); 186} 187 188/* test if a constant matches the constraint */ 189static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) 190{ 191 if (ct & TCG_CT_CONST) { 192 return true; 193 } 194 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 195 return true; 196 } 197 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { 198 return true; 199 } 200 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 201 return true; 202 } 203 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { 204 return true; 205 } 206 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { 207 return true; 208 } 209 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 210 return true; 211 } 212 return false; 213} 214 215/* 216 * Relocations 217 */ 218 219/* 220 * Relocation records defined in LoongArch ELF psABI v1.00 is way too 221 * complicated; a whopping stack machine is needed to stuff the fields, at 222 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are 223 * needed. 224 * 225 * Hence, define our own simpler relocation types. Numbers are chosen as to 226 * not collide with potential future additions to the true ELF relocation 227 * type enum. 228 */ 229 230/* Field Sk16, shifted right by 2; suitable for conditional jumps */ 231#define R_LOONGARCH_BR_SK16 256 232/* Field Sd10k16, shifted right by 2; suitable for B and BL */ 233#define R_LOONGARCH_BR_SD10K16 257 234 235static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 236{ 237 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 238 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 239 240 tcg_debug_assert((offset & 3) == 0); 241 offset >>= 2; 242 if (offset == sextreg(offset, 0, 16)) { 243 *src_rw = deposit64(*src_rw, 10, 16, offset); 244 return true; 245 } 246 247 return false; 248} 249 250static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, 251 const tcg_insn_unit *target) 252{ 253 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 254 intptr_t offset = (intptr_t)target - (intptr_t)src_rx; 255 256 tcg_debug_assert((offset & 3) == 0); 257 offset >>= 2; 258 if (offset == sextreg(offset, 0, 26)) { 259 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ 260 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ 261 return true; 262 } 263 264 return false; 265} 266 267static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 268 intptr_t value, intptr_t addend) 269{ 270 tcg_debug_assert(addend == 0); 271 switch (type) { 272 case R_LOONGARCH_BR_SK16: 273 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); 274 case R_LOONGARCH_BR_SD10K16: 275 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); 276 default: 277 g_assert_not_reached(); 278 } 279} 280 281#include "tcg-insn-defs.c.inc" 282 283/* 284 * TCG intrinsics 285 */ 286 287static void tcg_out_mb(TCGContext *s, TCGArg a0) 288{ 289 /* Baseline LoongArch only has the full barrier, unfortunately. */ 290 tcg_out_opc_dbar(s, 0); 291} 292 293static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 294{ 295 if (ret == arg) { 296 return true; 297 } 298 switch (type) { 299 case TCG_TYPE_I32: 300 case TCG_TYPE_I64: 301 /* 302 * Conventional register-register move used in LoongArch is 303 * `or dst, src, zero`. 304 */ 305 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); 306 break; 307 default: 308 g_assert_not_reached(); 309 } 310 return true; 311} 312 313/* Loads a 32-bit immediate into rd, sign-extended. */ 314static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) 315{ 316 tcg_target_long lo = sextreg(val, 0, 12); 317 tcg_target_long hi12 = sextreg(val, 12, 20); 318 319 /* Single-instruction cases. */ 320 if (hi12 == 0) { 321 /* val fits in uimm12: ori rd, zero, val */ 322 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); 323 return; 324 } 325 if (hi12 == sextreg(lo, 12, 20)) { 326 /* val fits in simm12: addi.w rd, zero, val */ 327 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); 328 return; 329 } 330 331 /* High bits must be set; load with lu12i.w + optional ori. */ 332 tcg_out_opc_lu12i_w(s, rd, hi12); 333 if (lo != 0) { 334 tcg_out_opc_ori(s, rd, rd, lo & 0xfff); 335 } 336} 337 338static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, 339 tcg_target_long val) 340{ 341 /* 342 * LoongArch conventionally loads 64-bit immediates in at most 4 steps, 343 * with dedicated instructions for filling the respective bitfields 344 * below: 345 * 346 * 6 5 4 3 347 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 348 * +-----------------------+---------------------------------------+... 349 * | hi52 | hi32 | 350 * +-----------------------+---------------------------------------+... 351 * 3 2 1 352 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 353 * ...+-------------------------------------+-------------------------+ 354 * | hi12 | lo | 355 * ...+-------------------------------------+-------------------------+ 356 * 357 * Check if val belong to one of the several fast cases, before falling 358 * back to the slow path. 359 */ 360 361 intptr_t pc_offset; 362 tcg_target_long val_lo, val_hi, pc_hi, offset_hi; 363 tcg_target_long hi12, hi32, hi52; 364 365 /* Value fits in signed i32. */ 366 if (type == TCG_TYPE_I32 || val == (int32_t)val) { 367 tcg_out_movi_i32(s, rd, val); 368 return; 369 } 370 371 /* PC-relative cases. */ 372 pc_offset = tcg_pcrel_diff(s, (void *)val); 373 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { 374 /* Single pcaddu2i. */ 375 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); 376 return; 377 } 378 379 if (pc_offset == (int32_t)pc_offset) { 380 /* Offset within 32 bits; load with pcalau12i + ori. */ 381 val_lo = sextreg(val, 0, 12); 382 val_hi = val >> 12; 383 pc_hi = (val - pc_offset) >> 12; 384 offset_hi = val_hi - pc_hi; 385 386 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); 387 tcg_out_opc_pcalau12i(s, rd, offset_hi); 388 if (val_lo != 0) { 389 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); 390 } 391 return; 392 } 393 394 hi12 = sextreg(val, 12, 20); 395 hi32 = sextreg(val, 32, 20); 396 hi52 = sextreg(val, 52, 12); 397 398 /* Single cu52i.d case. */ 399 if ((hi52 != 0) && (ctz64(val) >= 52)) { 400 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); 401 return; 402 } 403 404 /* Slow path. Initialize the low 32 bits, then concat high bits. */ 405 tcg_out_movi_i32(s, rd, val); 406 407 /* Load hi32 and hi52 explicitly when they are unexpected values. */ 408 if (hi32 != sextreg(hi12, 20, 20)) { 409 tcg_out_opc_cu32i_d(s, rd, hi32); 410 } 411 412 if (hi52 != sextreg(hi32, 20, 12)) { 413 tcg_out_opc_cu52i_d(s, rd, rd, hi52); 414 } 415} 416 417static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd, 418 TCGReg rs, tcg_target_long imm) 419{ 420 tcg_target_long lo12 = sextreg(imm, 0, 12); 421 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16); 422 423 /* 424 * Note that there's a hole in between hi16 and lo12: 425 * 426 * 3 2 1 0 427 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 428 * ...+-------------------------------+-------+-----------------------+ 429 * | hi16 | | lo12 | 430 * ...+-------------------------------+-------+-----------------------+ 431 * 432 * For bits within that hole, it's more efficient to use LU12I and ADD. 433 */ 434 if (imm == (hi16 << 16) + lo12) { 435 if (hi16) { 436 tcg_out_opc_addu16i_d(s, rd, rs, hi16); 437 rs = rd; 438 } 439 if (type == TCG_TYPE_I32) { 440 tcg_out_opc_addi_w(s, rd, rs, lo12); 441 } else if (lo12) { 442 tcg_out_opc_addi_d(s, rd, rs, lo12); 443 } else { 444 tcg_out_mov(s, type, rd, rs); 445 } 446 } else { 447 tcg_out_movi(s, type, TCG_REG_TMP0, imm); 448 if (type == TCG_TYPE_I32) { 449 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0); 450 } else { 451 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0); 452 } 453 } 454} 455 456static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 457{ 458 return false; 459} 460 461static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 462 tcg_target_long imm) 463{ 464 /* This function is only used for passing structs by reference. */ 465 g_assert_not_reached(); 466} 467 468static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) 469{ 470 tcg_out_opc_andi(s, ret, arg, 0xff); 471} 472 473static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) 474{ 475 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); 476} 477 478static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) 479{ 480 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); 481} 482 483static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 484{ 485 tcg_out_opc_sext_b(s, ret, arg); 486} 487 488static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 489{ 490 tcg_out_opc_sext_h(s, ret, arg); 491} 492 493static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) 494{ 495 tcg_out_opc_addi_w(s, ret, arg, 0); 496} 497 498static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 499{ 500 if (ret != arg) { 501 tcg_out_ext32s(s, ret, arg); 502 } 503} 504 505static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg) 506{ 507 tcg_out_ext32u(s, ret, arg); 508} 509 510static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) 511{ 512 tcg_out_ext32s(s, ret, arg); 513} 514 515static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, 516 TCGReg a0, TCGReg a1, TCGReg a2, 517 bool c2, bool is_32bit) 518{ 519 if (c2) { 520 /* 521 * Fast path: semantics already satisfied due to constraint and 522 * insn behavior, single instruction is enough. 523 */ 524 tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); 525 /* all clz/ctz insns belong to DJ-format */ 526 tcg_out32(s, encode_dj_insn(opc, a0, a1)); 527 return; 528 } 529 530 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); 531 /* a0 = a1 ? REG_TMP0 : a2 */ 532 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); 533 tcg_out_opc_masknez(s, a0, a2, a1); 534 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); 535} 536 537#define SETCOND_INV TCG_TARGET_NB_REGS 538#define SETCOND_NEZ (SETCOND_INV << 1) 539#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) 540 541static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, 542 TCGReg arg1, tcg_target_long arg2, bool c2) 543{ 544 int flags = 0; 545 546 switch (cond) { 547 case TCG_COND_EQ: /* -> NE */ 548 case TCG_COND_GE: /* -> LT */ 549 case TCG_COND_GEU: /* -> LTU */ 550 case TCG_COND_GT: /* -> LE */ 551 case TCG_COND_GTU: /* -> LEU */ 552 cond = tcg_invert_cond(cond); 553 flags ^= SETCOND_INV; 554 break; 555 default: 556 break; 557 } 558 559 switch (cond) { 560 case TCG_COND_LE: 561 case TCG_COND_LEU: 562 /* 563 * If we have a constant input, the most efficient way to implement 564 * LE is by adding 1 and using LT. Watch out for wrap around for LEU. 565 * We don't need to care for this for LE because the constant input 566 * is still constrained to int32_t, and INT32_MAX+1 is representable 567 * in the 64-bit temporary register. 568 */ 569 if (c2) { 570 if (cond == TCG_COND_LEU) { 571 /* unsigned <= -1 is true */ 572 if (arg2 == -1) { 573 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV)); 574 return ret; 575 } 576 cond = TCG_COND_LTU; 577 } else { 578 cond = TCG_COND_LT; 579 } 580 arg2 += 1; 581 } else { 582 TCGReg tmp = arg2; 583 arg2 = arg1; 584 arg1 = tmp; 585 cond = tcg_swap_cond(cond); /* LE -> GE */ 586 cond = tcg_invert_cond(cond); /* GE -> LT */ 587 flags ^= SETCOND_INV; 588 } 589 break; 590 default: 591 break; 592 } 593 594 switch (cond) { 595 case TCG_COND_NE: 596 flags |= SETCOND_NEZ; 597 if (!c2) { 598 tcg_out_opc_xor(s, ret, arg1, arg2); 599 } else if (arg2 == 0) { 600 ret = arg1; 601 } else if (arg2 >= 0 && arg2 <= 0xfff) { 602 tcg_out_opc_xori(s, ret, arg1, arg2); 603 } else { 604 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2); 605 } 606 break; 607 608 case TCG_COND_LT: 609 case TCG_COND_LTU: 610 if (c2) { 611 if (arg2 >= -0x800 && arg2 <= 0x7ff) { 612 if (cond == TCG_COND_LT) { 613 tcg_out_opc_slti(s, ret, arg1, arg2); 614 } else { 615 tcg_out_opc_sltui(s, ret, arg1, arg2); 616 } 617 break; 618 } 619 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2); 620 arg2 = TCG_REG_TMP0; 621 } 622 if (cond == TCG_COND_LT) { 623 tcg_out_opc_slt(s, ret, arg1, arg2); 624 } else { 625 tcg_out_opc_sltu(s, ret, arg1, arg2); 626 } 627 break; 628 629 default: 630 g_assert_not_reached(); 631 break; 632 } 633 634 return ret | flags; 635} 636 637static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, 638 TCGReg arg1, tcg_target_long arg2, bool c2) 639{ 640 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); 641 642 if (tmpflags != ret) { 643 TCGReg tmp = tmpflags & ~SETCOND_FLAGS; 644 645 switch (tmpflags & SETCOND_FLAGS) { 646 case SETCOND_INV: 647 /* Intermediate result is boolean: simply invert. */ 648 tcg_out_opc_xori(s, ret, tmp, 1); 649 break; 650 case SETCOND_NEZ: 651 /* Intermediate result is zero/non-zero: test != 0. */ 652 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); 653 break; 654 case SETCOND_NEZ | SETCOND_INV: 655 /* Intermediate result is zero/non-zero: test == 0. */ 656 tcg_out_opc_sltui(s, ret, tmp, 1); 657 break; 658 default: 659 g_assert_not_reached(); 660 } 661 } 662} 663 664static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, 665 TCGReg c1, tcg_target_long c2, bool const2, 666 TCGReg v1, TCGReg v2) 667{ 668 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); 669 TCGReg t; 670 671 /* Standardize the test below to t != 0. */ 672 if (tmpflags & SETCOND_INV) { 673 t = v1, v1 = v2, v2 = t; 674 } 675 676 t = tmpflags & ~SETCOND_FLAGS; 677 if (v1 == TCG_REG_ZERO) { 678 tcg_out_opc_masknez(s, ret, v2, t); 679 } else if (v2 == TCG_REG_ZERO) { 680 tcg_out_opc_maskeqz(s, ret, v1, t); 681 } else { 682 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */ 683 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */ 684 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2); 685 } 686} 687 688/* 689 * Branch helpers 690 */ 691 692static const struct { 693 LoongArchInsn op; 694 bool swap; 695} tcg_brcond_to_loongarch[] = { 696 [TCG_COND_EQ] = { OPC_BEQ, false }, 697 [TCG_COND_NE] = { OPC_BNE, false }, 698 [TCG_COND_LT] = { OPC_BGT, true }, 699 [TCG_COND_GE] = { OPC_BLE, true }, 700 [TCG_COND_LE] = { OPC_BLE, false }, 701 [TCG_COND_GT] = { OPC_BGT, false }, 702 [TCG_COND_LTU] = { OPC_BGTU, true }, 703 [TCG_COND_GEU] = { OPC_BLEU, true }, 704 [TCG_COND_LEU] = { OPC_BLEU, false }, 705 [TCG_COND_GTU] = { OPC_BGTU, false } 706}; 707 708static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, 709 TCGReg arg2, TCGLabel *l) 710{ 711 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; 712 713 tcg_debug_assert(op != 0); 714 715 if (tcg_brcond_to_loongarch[cond].swap) { 716 TCGReg t = arg1; 717 arg1 = arg2; 718 arg2 = t; 719 } 720 721 /* all conditional branch insns belong to DJSk16-format */ 722 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); 723 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); 724} 725 726static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) 727{ 728 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; 729 ptrdiff_t offset = tcg_pcrel_diff(s, arg); 730 731 tcg_debug_assert((offset & 3) == 0); 732 if (offset == sextreg(offset, 0, 28)) { 733 /* short jump: +/- 256MiB */ 734 if (tail) { 735 tcg_out_opc_b(s, offset >> 2); 736 } else { 737 tcg_out_opc_bl(s, offset >> 2); 738 } 739 } else if (offset == sextreg(offset, 0, 38)) { 740 /* long jump: +/- 256GiB */ 741 tcg_target_long lo = sextreg(offset, 0, 18); 742 tcg_target_long hi = offset - lo; 743 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); 744 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 745 } else { 746 /* far jump: 64-bit */ 747 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); 748 tcg_target_long hi = (tcg_target_long)arg - lo; 749 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); 750 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); 751 } 752} 753 754static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, 755 const TCGHelperInfo *info) 756{ 757 tcg_out_call_int(s, arg, false); 758} 759 760/* 761 * Load/store helpers 762 */ 763 764static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, 765 TCGReg addr, intptr_t offset) 766{ 767 intptr_t imm12 = sextreg(offset, 0, 12); 768 769 if (offset != imm12) { 770 intptr_t diff = tcg_pcrel_diff(s, (void *)offset); 771 772 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { 773 imm12 = sextreg(diff, 0, 12); 774 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); 775 } else { 776 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); 777 if (addr != TCG_REG_ZERO) { 778 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); 779 } 780 } 781 addr = TCG_REG_TMP2; 782 } 783 784 switch (opc) { 785 case OPC_LD_B: 786 case OPC_LD_BU: 787 case OPC_LD_H: 788 case OPC_LD_HU: 789 case OPC_LD_W: 790 case OPC_LD_WU: 791 case OPC_LD_D: 792 case OPC_ST_B: 793 case OPC_ST_H: 794 case OPC_ST_W: 795 case OPC_ST_D: 796 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); 797 break; 798 default: 799 g_assert_not_reached(); 800 } 801} 802 803static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, 804 TCGReg arg1, intptr_t arg2) 805{ 806 bool is_32bit = type == TCG_TYPE_I32; 807 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); 808} 809 810static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 811 TCGReg arg1, intptr_t arg2) 812{ 813 bool is_32bit = type == TCG_TYPE_I32; 814 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); 815} 816 817static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 818 TCGReg base, intptr_t ofs) 819{ 820 if (val == 0) { 821 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); 822 return true; 823 } 824 return false; 825} 826 827/* 828 * Load/store helpers for SoftMMU, and qemu_ld/st implementations 829 */ 830 831static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) 832{ 833 tcg_out_opc_b(s, 0); 834 return reloc_br_sd10k16(s->code_ptr - 1, target); 835} 836 837static const TCGLdstHelperParam ldst_helper_param = { 838 .ntmp = 1, .tmp = { TCG_REG_TMP0 } 839}; 840 841static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 842{ 843 MemOp opc = get_memop(l->oi); 844 845 /* resolve label address */ 846 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 847 return false; 848 } 849 850 tcg_out_ld_helper_args(s, l, &ldst_helper_param); 851 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false); 852 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); 853 return tcg_out_goto(s, l->raddr); 854} 855 856static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 857{ 858 MemOp opc = get_memop(l->oi); 859 860 /* resolve label address */ 861 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 862 return false; 863 } 864 865 tcg_out_st_helper_args(s, l, &ldst_helper_param); 866 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); 867 return tcg_out_goto(s, l->raddr); 868} 869 870typedef struct { 871 TCGReg base; 872 TCGReg index; 873 TCGAtomAlign aa; 874} HostAddress; 875 876bool tcg_target_has_memory_bswap(MemOp memop) 877{ 878 return false; 879} 880 881/* We expect to use a 12-bit negative offset from ENV. */ 882#define MIN_TLB_MASK_TABLE_OFS -(1 << 11) 883 884/* 885 * For softmmu, perform the TLB load and compare. 886 * For useronly, perform any required alignment tests. 887 * In both cases, return a TCGLabelQemuLdst structure if the slow path 888 * is required and fill in @h with the host address for the fast path. 889 */ 890static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 891 TCGReg addr_reg, MemOpIdx oi, 892 bool is_ld) 893{ 894 TCGType addr_type = s->addr_type; 895 TCGLabelQemuLdst *ldst = NULL; 896 MemOp opc = get_memop(oi); 897 MemOp a_bits; 898 899 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 900 a_bits = h->aa.align; 901 902#ifdef CONFIG_SOFTMMU 903 unsigned s_bits = opc & MO_SIZE; 904 int mem_index = get_mmuidx(oi); 905 int fast_ofs = tlb_mask_table_ofs(s, mem_index); 906 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 907 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 908 909 ldst = new_ldst_label(s); 910 ldst->is_ld = is_ld; 911 ldst->oi = oi; 912 ldst->addrlo_reg = addr_reg; 913 914 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); 915 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); 916 917 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, 918 s->page_bits - CPU_TLB_ENTRY_BITS); 919 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); 920 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); 921 922 /* Load the tlb comparator and the addend. */ 923 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); 924 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 925 is_ld ? offsetof(CPUTLBEntry, addr_read) 926 : offsetof(CPUTLBEntry, addr_write)); 927 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, 928 offsetof(CPUTLBEntry, addend)); 929 930 /* 931 * For aligned accesses, we check the first byte and include the alignment 932 * bits within the address. For unaligned access, we check that we don't 933 * cross pages using the address of the last byte of the access. 934 */ 935 if (a_bits < s_bits) { 936 unsigned a_mask = (1u << a_bits) - 1; 937 unsigned s_mask = (1u << s_bits) - 1; 938 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); 939 } else { 940 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); 941 } 942 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, 943 a_bits, s->page_bits - 1); 944 945 /* Compare masked address with the TLB entry. */ 946 ldst->label_ptr[0] = s->code_ptr; 947 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); 948 949 h->index = TCG_REG_TMP2; 950#else 951 if (a_bits) { 952 ldst = new_ldst_label(s); 953 954 ldst->is_ld = is_ld; 955 ldst->oi = oi; 956 ldst->addrlo_reg = addr_reg; 957 958 /* 959 * Without micro-architecture details, we don't know which of 960 * bstrpick or andi is faster, so use bstrpick as it's not 961 * constrained by imm field width. Not to say alignments >= 2^12 962 * are going to happen any time soon. 963 */ 964 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); 965 966 ldst->label_ptr[0] = s->code_ptr; 967 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); 968 } 969 970 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; 971#endif 972 973 if (addr_type == TCG_TYPE_I32) { 974 h->base = TCG_REG_TMP0; 975 tcg_out_ext32u(s, h->base, addr_reg); 976 } else { 977 h->base = addr_reg; 978 } 979 980 return ldst; 981} 982 983static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, 984 TCGReg rd, HostAddress h) 985{ 986 /* Byte swapping is left to middle-end expansion. */ 987 tcg_debug_assert((opc & MO_BSWAP) == 0); 988 989 switch (opc & MO_SSIZE) { 990 case MO_UB: 991 tcg_out_opc_ldx_bu(s, rd, h.base, h.index); 992 break; 993 case MO_SB: 994 tcg_out_opc_ldx_b(s, rd, h.base, h.index); 995 break; 996 case MO_UW: 997 tcg_out_opc_ldx_hu(s, rd, h.base, h.index); 998 break; 999 case MO_SW: 1000 tcg_out_opc_ldx_h(s, rd, h.base, h.index); 1001 break; 1002 case MO_UL: 1003 if (type == TCG_TYPE_I64) { 1004 tcg_out_opc_ldx_wu(s, rd, h.base, h.index); 1005 break; 1006 } 1007 /* fallthrough */ 1008 case MO_SL: 1009 tcg_out_opc_ldx_w(s, rd, h.base, h.index); 1010 break; 1011 case MO_UQ: 1012 tcg_out_opc_ldx_d(s, rd, h.base, h.index); 1013 break; 1014 default: 1015 g_assert_not_reached(); 1016 } 1017} 1018 1019static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1020 MemOpIdx oi, TCGType data_type) 1021{ 1022 TCGLabelQemuLdst *ldst; 1023 HostAddress h; 1024 1025 ldst = prepare_host_addr(s, &h, addr_reg, oi, true); 1026 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); 1027 1028 if (ldst) { 1029 ldst->type = data_type; 1030 ldst->datalo_reg = data_reg; 1031 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1032 } 1033} 1034 1035static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, 1036 TCGReg rd, HostAddress h) 1037{ 1038 /* Byte swapping is left to middle-end expansion. */ 1039 tcg_debug_assert((opc & MO_BSWAP) == 0); 1040 1041 switch (opc & MO_SIZE) { 1042 case MO_8: 1043 tcg_out_opc_stx_b(s, rd, h.base, h.index); 1044 break; 1045 case MO_16: 1046 tcg_out_opc_stx_h(s, rd, h.base, h.index); 1047 break; 1048 case MO_32: 1049 tcg_out_opc_stx_w(s, rd, h.base, h.index); 1050 break; 1051 case MO_64: 1052 tcg_out_opc_stx_d(s, rd, h.base, h.index); 1053 break; 1054 default: 1055 g_assert_not_reached(); 1056 } 1057} 1058 1059static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, 1060 MemOpIdx oi, TCGType data_type) 1061{ 1062 TCGLabelQemuLdst *ldst; 1063 HostAddress h; 1064 1065 ldst = prepare_host_addr(s, &h, addr_reg, oi, false); 1066 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); 1067 1068 if (ldst) { 1069 ldst->type = data_type; 1070 ldst->datalo_reg = data_reg; 1071 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1072 } 1073} 1074 1075/* 1076 * Entry-points 1077 */ 1078 1079static const tcg_insn_unit *tb_ret_addr; 1080 1081static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1082{ 1083 /* Reuse the zeroing that exists for goto_ptr. */ 1084 if (a0 == 0) { 1085 tcg_out_call_int(s, tcg_code_gen_epilogue, true); 1086 } else { 1087 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); 1088 tcg_out_call_int(s, tb_ret_addr, true); 1089 } 1090} 1091 1092static void tcg_out_goto_tb(TCGContext *s, int which) 1093{ 1094 /* 1095 * Direct branch, or load indirect address, to be patched 1096 * by tb_target_set_jmp_target. Check indirect load offset 1097 * in range early, regardless of direct branch distance, 1098 * via assert within tcg_out_opc_pcaddu2i. 1099 */ 1100 uintptr_t i_addr = get_jmp_target_addr(s, which); 1101 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr); 1102 1103 set_jmp_insn_offset(s, which); 1104 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2); 1105 1106 /* Finish the load and indirect branch. */ 1107 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0); 1108 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); 1109 set_jmp_reset_offset(s, which); 1110} 1111 1112void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1113 uintptr_t jmp_rx, uintptr_t jmp_rw) 1114{ 1115 uintptr_t d_addr = tb->jmp_target_addr[n]; 1116 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2; 1117 tcg_insn_unit insn; 1118 1119 /* Either directly branch, or load slot address for indirect branch. */ 1120 if (d_disp == sextreg(d_disp, 0, 26)) { 1121 insn = encode_sd10k16_insn(OPC_B, d_disp); 1122 } else { 1123 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n]; 1124 intptr_t i_disp = i_addr - jmp_rx; 1125 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2); 1126 } 1127 1128 qatomic_set((tcg_insn_unit *)jmp_rw, insn); 1129 flush_idcache_range(jmp_rx, jmp_rw, 4); 1130} 1131 1132static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1133 const TCGArg args[TCG_MAX_OP_ARGS], 1134 const int const_args[TCG_MAX_OP_ARGS]) 1135{ 1136 TCGArg a0 = args[0]; 1137 TCGArg a1 = args[1]; 1138 TCGArg a2 = args[2]; 1139 int c2 = const_args[2]; 1140 1141 switch (opc) { 1142 case INDEX_op_mb: 1143 tcg_out_mb(s, a0); 1144 break; 1145 1146 case INDEX_op_goto_ptr: 1147 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); 1148 break; 1149 1150 case INDEX_op_br: 1151 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), 1152 0); 1153 tcg_out_opc_b(s, 0); 1154 break; 1155 1156 case INDEX_op_brcond_i32: 1157 case INDEX_op_brcond_i64: 1158 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); 1159 break; 1160 1161 case INDEX_op_extrh_i64_i32: 1162 tcg_out_opc_srai_d(s, a0, a1, 32); 1163 break; 1164 1165 case INDEX_op_not_i32: 1166 case INDEX_op_not_i64: 1167 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); 1168 break; 1169 1170 case INDEX_op_nor_i32: 1171 case INDEX_op_nor_i64: 1172 if (c2) { 1173 tcg_out_opc_ori(s, a0, a1, a2); 1174 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); 1175 } else { 1176 tcg_out_opc_nor(s, a0, a1, a2); 1177 } 1178 break; 1179 1180 case INDEX_op_andc_i32: 1181 case INDEX_op_andc_i64: 1182 if (c2) { 1183 /* guaranteed to fit due to constraint */ 1184 tcg_out_opc_andi(s, a0, a1, ~a2); 1185 } else { 1186 tcg_out_opc_andn(s, a0, a1, a2); 1187 } 1188 break; 1189 1190 case INDEX_op_orc_i32: 1191 case INDEX_op_orc_i64: 1192 if (c2) { 1193 /* guaranteed to fit due to constraint */ 1194 tcg_out_opc_ori(s, a0, a1, ~a2); 1195 } else { 1196 tcg_out_opc_orn(s, a0, a1, a2); 1197 } 1198 break; 1199 1200 case INDEX_op_and_i32: 1201 case INDEX_op_and_i64: 1202 if (c2) { 1203 tcg_out_opc_andi(s, a0, a1, a2); 1204 } else { 1205 tcg_out_opc_and(s, a0, a1, a2); 1206 } 1207 break; 1208 1209 case INDEX_op_or_i32: 1210 case INDEX_op_or_i64: 1211 if (c2) { 1212 tcg_out_opc_ori(s, a0, a1, a2); 1213 } else { 1214 tcg_out_opc_or(s, a0, a1, a2); 1215 } 1216 break; 1217 1218 case INDEX_op_xor_i32: 1219 case INDEX_op_xor_i64: 1220 if (c2) { 1221 tcg_out_opc_xori(s, a0, a1, a2); 1222 } else { 1223 tcg_out_opc_xor(s, a0, a1, a2); 1224 } 1225 break; 1226 1227 case INDEX_op_extract_i32: 1228 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); 1229 break; 1230 case INDEX_op_extract_i64: 1231 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); 1232 break; 1233 1234 case INDEX_op_deposit_i32: 1235 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); 1236 break; 1237 case INDEX_op_deposit_i64: 1238 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); 1239 break; 1240 1241 case INDEX_op_bswap16_i32: 1242 case INDEX_op_bswap16_i64: 1243 tcg_out_opc_revb_2h(s, a0, a1); 1244 if (a2 & TCG_BSWAP_OS) { 1245 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); 1246 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1247 tcg_out_ext16u(s, a0, a0); 1248 } 1249 break; 1250 1251 case INDEX_op_bswap32_i32: 1252 /* All 32-bit values are computed sign-extended in the register. */ 1253 a2 = TCG_BSWAP_OS; 1254 /* fallthrough */ 1255 case INDEX_op_bswap32_i64: 1256 tcg_out_opc_revb_2w(s, a0, a1); 1257 if (a2 & TCG_BSWAP_OS) { 1258 tcg_out_ext32s(s, a0, a0); 1259 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 1260 tcg_out_ext32u(s, a0, a0); 1261 } 1262 break; 1263 1264 case INDEX_op_bswap64_i64: 1265 tcg_out_opc_revb_d(s, a0, a1); 1266 break; 1267 1268 case INDEX_op_clz_i32: 1269 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); 1270 break; 1271 case INDEX_op_clz_i64: 1272 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); 1273 break; 1274 1275 case INDEX_op_ctz_i32: 1276 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); 1277 break; 1278 case INDEX_op_ctz_i64: 1279 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); 1280 break; 1281 1282 case INDEX_op_shl_i32: 1283 if (c2) { 1284 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); 1285 } else { 1286 tcg_out_opc_sll_w(s, a0, a1, a2); 1287 } 1288 break; 1289 case INDEX_op_shl_i64: 1290 if (c2) { 1291 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); 1292 } else { 1293 tcg_out_opc_sll_d(s, a0, a1, a2); 1294 } 1295 break; 1296 1297 case INDEX_op_shr_i32: 1298 if (c2) { 1299 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); 1300 } else { 1301 tcg_out_opc_srl_w(s, a0, a1, a2); 1302 } 1303 break; 1304 case INDEX_op_shr_i64: 1305 if (c2) { 1306 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); 1307 } else { 1308 tcg_out_opc_srl_d(s, a0, a1, a2); 1309 } 1310 break; 1311 1312 case INDEX_op_sar_i32: 1313 if (c2) { 1314 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); 1315 } else { 1316 tcg_out_opc_sra_w(s, a0, a1, a2); 1317 } 1318 break; 1319 case INDEX_op_sar_i64: 1320 if (c2) { 1321 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); 1322 } else { 1323 tcg_out_opc_sra_d(s, a0, a1, a2); 1324 } 1325 break; 1326 1327 case INDEX_op_rotl_i32: 1328 /* transform into equivalent rotr/rotri */ 1329 if (c2) { 1330 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); 1331 } else { 1332 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1333 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); 1334 } 1335 break; 1336 case INDEX_op_rotl_i64: 1337 /* transform into equivalent rotr/rotri */ 1338 if (c2) { 1339 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); 1340 } else { 1341 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); 1342 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); 1343 } 1344 break; 1345 1346 case INDEX_op_rotr_i32: 1347 if (c2) { 1348 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); 1349 } else { 1350 tcg_out_opc_rotr_w(s, a0, a1, a2); 1351 } 1352 break; 1353 case INDEX_op_rotr_i64: 1354 if (c2) { 1355 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); 1356 } else { 1357 tcg_out_opc_rotr_d(s, a0, a1, a2); 1358 } 1359 break; 1360 1361 case INDEX_op_add_i32: 1362 if (c2) { 1363 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); 1364 } else { 1365 tcg_out_opc_add_w(s, a0, a1, a2); 1366 } 1367 break; 1368 case INDEX_op_add_i64: 1369 if (c2) { 1370 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); 1371 } else { 1372 tcg_out_opc_add_d(s, a0, a1, a2); 1373 } 1374 break; 1375 1376 case INDEX_op_sub_i32: 1377 if (c2) { 1378 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); 1379 } else { 1380 tcg_out_opc_sub_w(s, a0, a1, a2); 1381 } 1382 break; 1383 case INDEX_op_sub_i64: 1384 if (c2) { 1385 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); 1386 } else { 1387 tcg_out_opc_sub_d(s, a0, a1, a2); 1388 } 1389 break; 1390 1391 case INDEX_op_mul_i32: 1392 tcg_out_opc_mul_w(s, a0, a1, a2); 1393 break; 1394 case INDEX_op_mul_i64: 1395 tcg_out_opc_mul_d(s, a0, a1, a2); 1396 break; 1397 1398 case INDEX_op_mulsh_i32: 1399 tcg_out_opc_mulh_w(s, a0, a1, a2); 1400 break; 1401 case INDEX_op_mulsh_i64: 1402 tcg_out_opc_mulh_d(s, a0, a1, a2); 1403 break; 1404 1405 case INDEX_op_muluh_i32: 1406 tcg_out_opc_mulh_wu(s, a0, a1, a2); 1407 break; 1408 case INDEX_op_muluh_i64: 1409 tcg_out_opc_mulh_du(s, a0, a1, a2); 1410 break; 1411 1412 case INDEX_op_div_i32: 1413 tcg_out_opc_div_w(s, a0, a1, a2); 1414 break; 1415 case INDEX_op_div_i64: 1416 tcg_out_opc_div_d(s, a0, a1, a2); 1417 break; 1418 1419 case INDEX_op_divu_i32: 1420 tcg_out_opc_div_wu(s, a0, a1, a2); 1421 break; 1422 case INDEX_op_divu_i64: 1423 tcg_out_opc_div_du(s, a0, a1, a2); 1424 break; 1425 1426 case INDEX_op_rem_i32: 1427 tcg_out_opc_mod_w(s, a0, a1, a2); 1428 break; 1429 case INDEX_op_rem_i64: 1430 tcg_out_opc_mod_d(s, a0, a1, a2); 1431 break; 1432 1433 case INDEX_op_remu_i32: 1434 tcg_out_opc_mod_wu(s, a0, a1, a2); 1435 break; 1436 case INDEX_op_remu_i64: 1437 tcg_out_opc_mod_du(s, a0, a1, a2); 1438 break; 1439 1440 case INDEX_op_setcond_i32: 1441 case INDEX_op_setcond_i64: 1442 tcg_out_setcond(s, args[3], a0, a1, a2, c2); 1443 break; 1444 1445 case INDEX_op_movcond_i32: 1446 case INDEX_op_movcond_i64: 1447 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); 1448 break; 1449 1450 case INDEX_op_ld8s_i32: 1451 case INDEX_op_ld8s_i64: 1452 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); 1453 break; 1454 case INDEX_op_ld8u_i32: 1455 case INDEX_op_ld8u_i64: 1456 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); 1457 break; 1458 case INDEX_op_ld16s_i32: 1459 case INDEX_op_ld16s_i64: 1460 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); 1461 break; 1462 case INDEX_op_ld16u_i32: 1463 case INDEX_op_ld16u_i64: 1464 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); 1465 break; 1466 case INDEX_op_ld_i32: 1467 case INDEX_op_ld32s_i64: 1468 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); 1469 break; 1470 case INDEX_op_ld32u_i64: 1471 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); 1472 break; 1473 case INDEX_op_ld_i64: 1474 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); 1475 break; 1476 1477 case INDEX_op_st8_i32: 1478 case INDEX_op_st8_i64: 1479 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); 1480 break; 1481 case INDEX_op_st16_i32: 1482 case INDEX_op_st16_i64: 1483 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); 1484 break; 1485 case INDEX_op_st_i32: 1486 case INDEX_op_st32_i64: 1487 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); 1488 break; 1489 case INDEX_op_st_i64: 1490 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); 1491 break; 1492 1493 case INDEX_op_qemu_ld_a32_i32: 1494 case INDEX_op_qemu_ld_a64_i32: 1495 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1496 break; 1497 case INDEX_op_qemu_ld_a32_i64: 1498 case INDEX_op_qemu_ld_a64_i64: 1499 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1500 break; 1501 case INDEX_op_qemu_st_a32_i32: 1502 case INDEX_op_qemu_st_a64_i32: 1503 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1504 break; 1505 case INDEX_op_qemu_st_a32_i64: 1506 case INDEX_op_qemu_st_a64_i64: 1507 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1508 break; 1509 1510 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1511 case INDEX_op_mov_i64: 1512 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1513 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1514 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1515 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1516 case INDEX_op_ext8s_i64: 1517 case INDEX_op_ext8u_i32: 1518 case INDEX_op_ext8u_i64: 1519 case INDEX_op_ext16s_i32: 1520 case INDEX_op_ext16s_i64: 1521 case INDEX_op_ext16u_i32: 1522 case INDEX_op_ext16u_i64: 1523 case INDEX_op_ext32s_i64: 1524 case INDEX_op_ext32u_i64: 1525 case INDEX_op_ext_i32_i64: 1526 case INDEX_op_extu_i32_i64: 1527 case INDEX_op_extrl_i64_i32: 1528 default: 1529 g_assert_not_reached(); 1530 } 1531} 1532 1533static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 1534 TCGReg rd, TCGReg rs) 1535{ 1536 switch (vece) { 1537 case MO_8: 1538 tcg_out_opc_vreplgr2vr_b(s, rd, rs); 1539 break; 1540 case MO_16: 1541 tcg_out_opc_vreplgr2vr_h(s, rd, rs); 1542 break; 1543 case MO_32: 1544 tcg_out_opc_vreplgr2vr_w(s, rd, rs); 1545 break; 1546 case MO_64: 1547 tcg_out_opc_vreplgr2vr_d(s, rd, rs); 1548 break; 1549 default: 1550 g_assert_not_reached(); 1551 } 1552 return true; 1553} 1554 1555static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 1556 TCGReg r, TCGReg base, intptr_t offset) 1557{ 1558 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */ 1559 if (offset < -0x800 || offset > 0x7ff || \ 1560 (offset & ((1 << vece) - 1)) != 0) { 1561 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset); 1562 base = TCG_REG_TMP0; 1563 offset = 0; 1564 } 1565 offset >>= vece; 1566 1567 switch (vece) { 1568 case MO_8: 1569 tcg_out_opc_vldrepl_b(s, r, base, offset); 1570 break; 1571 case MO_16: 1572 tcg_out_opc_vldrepl_h(s, r, base, offset); 1573 break; 1574 case MO_32: 1575 tcg_out_opc_vldrepl_w(s, r, base, offset); 1576 break; 1577 case MO_64: 1578 tcg_out_opc_vldrepl_d(s, r, base, offset); 1579 break; 1580 default: 1581 g_assert_not_reached(); 1582 } 1583 return true; 1584} 1585 1586static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1587 TCGReg rd, int64_t v64) 1588{ 1589 /* Try vldi if imm can fit */ 1590 int64_t value = sextract64(v64, 0, 8 << vece); 1591 if (-0x200 <= value && value <= 0x1FF) { 1592 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF); 1593 tcg_out_opc_vldi(s, rd, imm); 1594 return; 1595 } 1596 1597 /* TODO: vldi patterns when imm 12 is set */ 1598 1599 /* Fallback to vreplgr2vr */ 1600 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value); 1601 switch (vece) { 1602 case MO_8: 1603 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0); 1604 break; 1605 case MO_16: 1606 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0); 1607 break; 1608 case MO_32: 1609 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0); 1610 break; 1611 case MO_64: 1612 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0); 1613 break; 1614 default: 1615 g_assert_not_reached(); 1616 } 1617} 1618 1619static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 1620 unsigned vecl, unsigned vece, 1621 const TCGArg args[TCG_MAX_OP_ARGS], 1622 const int const_args[TCG_MAX_OP_ARGS]) 1623{ 1624 TCGType type = vecl + TCG_TYPE_V64; 1625 TCGArg a0, a1, a2; 1626 TCGReg temp = TCG_REG_TMP0; 1627 1628 a0 = args[0]; 1629 a1 = args[1]; 1630 a2 = args[2]; 1631 1632 /* Currently only supports V128 */ 1633 tcg_debug_assert(type == TCG_TYPE_V128); 1634 1635 switch (opc) { 1636 case INDEX_op_st_vec: 1637 /* Try to fit vst imm */ 1638 if (-0x800 <= a2 && a2 <= 0x7ff) { 1639 tcg_out_opc_vst(s, a0, a1, a2); 1640 } else { 1641 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1642 tcg_out_opc_vstx(s, a0, a1, temp); 1643 } 1644 break; 1645 case INDEX_op_ld_vec: 1646 /* Try to fit vld imm */ 1647 if (-0x800 <= a2 && a2 <= 0x7ff) { 1648 tcg_out_opc_vld(s, a0, a1, a2); 1649 } else { 1650 tcg_out_movi(s, TCG_TYPE_I64, temp, a2); 1651 tcg_out_opc_vldx(s, a0, a1, temp); 1652 } 1653 break; 1654 case INDEX_op_dupm_vec: 1655 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 1656 break; 1657 default: 1658 g_assert_not_reached(); 1659 } 1660} 1661 1662int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 1663{ 1664 switch (opc) { 1665 case INDEX_op_ld_vec: 1666 case INDEX_op_st_vec: 1667 case INDEX_op_dup_vec: 1668 case INDEX_op_dupm_vec: 1669 return 1; 1670 default: 1671 return 0; 1672 } 1673} 1674 1675void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 1676 TCGArg a0, ...) 1677{ 1678 g_assert_not_reached(); 1679} 1680 1681static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1682{ 1683 switch (op) { 1684 case INDEX_op_goto_ptr: 1685 return C_O0_I1(r); 1686 1687 case INDEX_op_st8_i32: 1688 case INDEX_op_st8_i64: 1689 case INDEX_op_st16_i32: 1690 case INDEX_op_st16_i64: 1691 case INDEX_op_st32_i64: 1692 case INDEX_op_st_i32: 1693 case INDEX_op_st_i64: 1694 case INDEX_op_qemu_st_a32_i32: 1695 case INDEX_op_qemu_st_a64_i32: 1696 case INDEX_op_qemu_st_a32_i64: 1697 case INDEX_op_qemu_st_a64_i64: 1698 return C_O0_I2(rZ, r); 1699 1700 case INDEX_op_brcond_i32: 1701 case INDEX_op_brcond_i64: 1702 return C_O0_I2(rZ, rZ); 1703 1704 case INDEX_op_ext8s_i32: 1705 case INDEX_op_ext8s_i64: 1706 case INDEX_op_ext8u_i32: 1707 case INDEX_op_ext8u_i64: 1708 case INDEX_op_ext16s_i32: 1709 case INDEX_op_ext16s_i64: 1710 case INDEX_op_ext16u_i32: 1711 case INDEX_op_ext16u_i64: 1712 case INDEX_op_ext32s_i64: 1713 case INDEX_op_ext32u_i64: 1714 case INDEX_op_extu_i32_i64: 1715 case INDEX_op_extrl_i64_i32: 1716 case INDEX_op_extrh_i64_i32: 1717 case INDEX_op_ext_i32_i64: 1718 case INDEX_op_not_i32: 1719 case INDEX_op_not_i64: 1720 case INDEX_op_extract_i32: 1721 case INDEX_op_extract_i64: 1722 case INDEX_op_bswap16_i32: 1723 case INDEX_op_bswap16_i64: 1724 case INDEX_op_bswap32_i32: 1725 case INDEX_op_bswap32_i64: 1726 case INDEX_op_bswap64_i64: 1727 case INDEX_op_ld8s_i32: 1728 case INDEX_op_ld8s_i64: 1729 case INDEX_op_ld8u_i32: 1730 case INDEX_op_ld8u_i64: 1731 case INDEX_op_ld16s_i32: 1732 case INDEX_op_ld16s_i64: 1733 case INDEX_op_ld16u_i32: 1734 case INDEX_op_ld16u_i64: 1735 case INDEX_op_ld32s_i64: 1736 case INDEX_op_ld32u_i64: 1737 case INDEX_op_ld_i32: 1738 case INDEX_op_ld_i64: 1739 case INDEX_op_qemu_ld_a32_i32: 1740 case INDEX_op_qemu_ld_a64_i32: 1741 case INDEX_op_qemu_ld_a32_i64: 1742 case INDEX_op_qemu_ld_a64_i64: 1743 return C_O1_I1(r, r); 1744 1745 case INDEX_op_andc_i32: 1746 case INDEX_op_andc_i64: 1747 case INDEX_op_orc_i32: 1748 case INDEX_op_orc_i64: 1749 /* 1750 * LoongArch insns for these ops don't have reg-imm forms, but we 1751 * can express using andi/ori if ~constant satisfies 1752 * TCG_CT_CONST_U12. 1753 */ 1754 return C_O1_I2(r, r, rC); 1755 1756 case INDEX_op_shl_i32: 1757 case INDEX_op_shl_i64: 1758 case INDEX_op_shr_i32: 1759 case INDEX_op_shr_i64: 1760 case INDEX_op_sar_i32: 1761 case INDEX_op_sar_i64: 1762 case INDEX_op_rotl_i32: 1763 case INDEX_op_rotl_i64: 1764 case INDEX_op_rotr_i32: 1765 case INDEX_op_rotr_i64: 1766 return C_O1_I2(r, r, ri); 1767 1768 case INDEX_op_add_i32: 1769 return C_O1_I2(r, r, ri); 1770 case INDEX_op_add_i64: 1771 return C_O1_I2(r, r, rJ); 1772 1773 case INDEX_op_and_i32: 1774 case INDEX_op_and_i64: 1775 case INDEX_op_nor_i32: 1776 case INDEX_op_nor_i64: 1777 case INDEX_op_or_i32: 1778 case INDEX_op_or_i64: 1779 case INDEX_op_xor_i32: 1780 case INDEX_op_xor_i64: 1781 /* LoongArch reg-imm bitops have their imms ZERO-extended */ 1782 return C_O1_I2(r, r, rU); 1783 1784 case INDEX_op_clz_i32: 1785 case INDEX_op_clz_i64: 1786 case INDEX_op_ctz_i32: 1787 case INDEX_op_ctz_i64: 1788 return C_O1_I2(r, r, rW); 1789 1790 case INDEX_op_deposit_i32: 1791 case INDEX_op_deposit_i64: 1792 /* Must deposit into the same register as input */ 1793 return C_O1_I2(r, 0, rZ); 1794 1795 case INDEX_op_sub_i32: 1796 case INDEX_op_setcond_i32: 1797 return C_O1_I2(r, rZ, ri); 1798 case INDEX_op_sub_i64: 1799 case INDEX_op_setcond_i64: 1800 return C_O1_I2(r, rZ, rJ); 1801 1802 case INDEX_op_mul_i32: 1803 case INDEX_op_mul_i64: 1804 case INDEX_op_mulsh_i32: 1805 case INDEX_op_mulsh_i64: 1806 case INDEX_op_muluh_i32: 1807 case INDEX_op_muluh_i64: 1808 case INDEX_op_div_i32: 1809 case INDEX_op_div_i64: 1810 case INDEX_op_divu_i32: 1811 case INDEX_op_divu_i64: 1812 case INDEX_op_rem_i32: 1813 case INDEX_op_rem_i64: 1814 case INDEX_op_remu_i32: 1815 case INDEX_op_remu_i64: 1816 return C_O1_I2(r, rZ, rZ); 1817 1818 case INDEX_op_movcond_i32: 1819 case INDEX_op_movcond_i64: 1820 return C_O1_I4(r, rZ, rJ, rZ, rZ); 1821 1822 case INDEX_op_ld_vec: 1823 case INDEX_op_dupm_vec: 1824 case INDEX_op_dup_vec: 1825 return C_O1_I1(w, r); 1826 1827 case INDEX_op_st_vec: 1828 return C_O0_I2(w, r); 1829 1830 default: 1831 g_assert_not_reached(); 1832 } 1833} 1834 1835static const int tcg_target_callee_save_regs[] = { 1836 TCG_REG_S0, /* used for the global env (TCG_AREG0) */ 1837 TCG_REG_S1, 1838 TCG_REG_S2, 1839 TCG_REG_S3, 1840 TCG_REG_S4, 1841 TCG_REG_S5, 1842 TCG_REG_S6, 1843 TCG_REG_S7, 1844 TCG_REG_S8, 1845 TCG_REG_S9, 1846 TCG_REG_RA, /* should be last for ABI compliance */ 1847}; 1848 1849/* Stack frame parameters. */ 1850#define REG_SIZE (TCG_TARGET_REG_BITS / 8) 1851#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) 1852#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 1853#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ 1854 + TCG_TARGET_STACK_ALIGN - 1) \ 1855 & -TCG_TARGET_STACK_ALIGN) 1856#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) 1857 1858/* We're expecting to be able to use an immediate for frame allocation. */ 1859QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); 1860 1861/* Generate global QEMU prologue and epilogue code */ 1862static void tcg_target_qemu_prologue(TCGContext *s) 1863{ 1864 int i; 1865 1866 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); 1867 1868 /* TB prologue */ 1869 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); 1870 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1871 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1872 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1873 } 1874 1875#if !defined(CONFIG_SOFTMMU) 1876 if (USE_GUEST_BASE) { 1877 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); 1878 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1879 } 1880#endif 1881 1882 /* Call generated code */ 1883 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 1884 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); 1885 1886 /* Return path for goto_ptr. Set return value to 0 */ 1887 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1888 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); 1889 1890 /* TB epilogue */ 1891 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); 1892 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1893 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 1894 TCG_REG_SP, SAVE_OFS + i * REG_SIZE); 1895 } 1896 1897 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); 1898 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); 1899} 1900 1901static void tcg_target_init(TCGContext *s) 1902{ 1903 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 1904 1905 /* Server and desktop class cpus have UAL; embedded cpus do not. */ 1906 if (!(hwcap & HWCAP_LOONGARCH_UAL)) { 1907 error_report("TCG: unaligned access support required; exiting"); 1908 exit(EXIT_FAILURE); 1909 } 1910 1911 if (hwcap & HWCAP_LOONGARCH_LSX) { 1912 use_lsx_instructions = 1; 1913 } 1914 1915 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1916 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1917 1918 tcg_target_call_clobber_regs = ALL_GENERAL_REGS; 1919 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); 1920 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); 1921 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); 1922 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); 1923 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); 1924 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); 1925 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); 1926 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); 1927 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); 1928 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); 1929 1930 if (use_lsx_instructions) { 1931 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; 1932 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24); 1933 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25); 1934 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26); 1935 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27); 1936 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28); 1937 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29); 1938 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30); 1939 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31); 1940 } 1941 1942 s->reserved_regs = 0; 1943 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); 1944 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); 1945 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); 1946 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); 1947 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); 1948 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); 1949 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); 1950 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); 1951} 1952 1953typedef struct { 1954 DebugFrameHeader h; 1955 uint8_t fde_def_cfa[4]; 1956 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; 1957} DebugFrame; 1958 1959#define ELF_HOST_MACHINE EM_LOONGARCH 1960 1961static const DebugFrame debug_frame = { 1962 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ 1963 .h.cie.id = -1, 1964 .h.cie.version = 1, 1965 .h.cie.code_align = 1, 1966 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ 1967 .h.cie.return_column = TCG_REG_RA, 1968 1969 /* Total FDE size does not include the "len" member. */ 1970 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1971 1972 .fde_def_cfa = { 1973 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ 1974 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 1975 (FRAME_SIZE >> 7) 1976 }, 1977 .fde_reg_ofs = { 1978 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ 1979 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ 1980 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ 1981 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ 1982 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ 1983 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ 1984 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ 1985 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ 1986 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ 1987 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ 1988 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ 1989 } 1990}; 1991 1992void tcg_register_jit(const void *buf, size_t buf_size) 1993{ 1994 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1995} 1996