1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* We only support generating code for 64-bit mode. */ 26#ifndef __arch64__ 27#error "unsupported code generation mode" 28#endif 29 30/* Used for function call generation. */ 31#define TCG_REG_CALL_STACK TCG_REG_O6 32#define TCG_TARGET_STACK_BIAS 2047 33#define TCG_TARGET_STACK_ALIGN 16 34#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6 * 8 + TCG_TARGET_STACK_BIAS) 35#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND 36#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 37#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 38#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 39 40#ifdef CONFIG_DEBUG_TCG 41static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 42 "%g0", 43 "%g1", 44 "%g2", 45 "%g3", 46 "%g4", 47 "%g5", 48 "%g6", 49 "%g7", 50 "%o0", 51 "%o1", 52 "%o2", 53 "%o3", 54 "%o4", 55 "%o5", 56 "%o6", 57 "%o7", 58 "%l0", 59 "%l1", 60 "%l2", 61 "%l3", 62 "%l4", 63 "%l5", 64 "%l6", 65 "%l7", 66 "%i0", 67 "%i1", 68 "%i2", 69 "%i3", 70 "%i4", 71 "%i5", 72 "%i6", 73 "%i7", 74}; 75#endif 76 77#define TCG_CT_CONST_S11 0x100 78#define TCG_CT_CONST_S13 0x200 79 80#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 81 82/* Define some temporary registers. T3 is used for constant generation. */ 83#define TCG_REG_T1 TCG_REG_G1 84#define TCG_REG_T2 TCG_REG_G2 85#define TCG_REG_T3 TCG_REG_O7 86 87#ifndef CONFIG_SOFTMMU 88# define TCG_GUEST_BASE_REG TCG_REG_I5 89#endif 90 91#define TCG_REG_TB TCG_REG_I1 92 93static const int tcg_target_reg_alloc_order[] = { 94 TCG_REG_L0, 95 TCG_REG_L1, 96 TCG_REG_L2, 97 TCG_REG_L3, 98 TCG_REG_L4, 99 TCG_REG_L5, 100 TCG_REG_L6, 101 TCG_REG_L7, 102 103 TCG_REG_I0, 104 TCG_REG_I1, 105 TCG_REG_I2, 106 TCG_REG_I3, 107 TCG_REG_I4, 108 TCG_REG_I5, 109 110 TCG_REG_G3, 111 TCG_REG_G4, 112 TCG_REG_G5, 113 114 TCG_REG_O0, 115 TCG_REG_O1, 116 TCG_REG_O2, 117 TCG_REG_O3, 118 TCG_REG_O4, 119 TCG_REG_O5, 120}; 121 122static const int tcg_target_call_iarg_regs[6] = { 123 TCG_REG_O0, 124 TCG_REG_O1, 125 TCG_REG_O2, 126 TCG_REG_O3, 127 TCG_REG_O4, 128 TCG_REG_O5, 129}; 130 131static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 132{ 133 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 134 tcg_debug_assert(slot >= 0 && slot <= 3); 135 return TCG_REG_O0 + slot; 136} 137 138#define INSN_OP(x) ((x) << 30) 139#define INSN_OP2(x) ((x) << 22) 140#define INSN_OP3(x) ((x) << 19) 141#define INSN_OPF(x) ((x) << 5) 142#define INSN_RD(x) ((x) << 25) 143#define INSN_RS1(x) ((x) << 14) 144#define INSN_RS2(x) (x) 145#define INSN_ASI(x) ((x) << 5) 146 147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff)) 148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff)) 149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) 150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20)) 151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff) 152#define INSN_COND(x) ((x) << 25) 153 154#define COND_N 0x0 155#define COND_E 0x1 156#define COND_LE 0x2 157#define COND_L 0x3 158#define COND_LEU 0x4 159#define COND_CS 0x5 160#define COND_NEG 0x6 161#define COND_VS 0x7 162#define COND_A 0x8 163#define COND_NE 0x9 164#define COND_G 0xa 165#define COND_GE 0xb 166#define COND_GU 0xc 167#define COND_CC 0xd 168#define COND_POS 0xe 169#define COND_VC 0xf 170#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2)) 171 172#define RCOND_Z 1 173#define RCOND_LEZ 2 174#define RCOND_LZ 3 175#define RCOND_NZ 5 176#define RCOND_GZ 6 177#define RCOND_GEZ 7 178 179#define MOVCC_ICC (1 << 18) 180#define MOVCC_XCC (1 << 18 | 1 << 12) 181 182#define BPCC_ICC 0 183#define BPCC_XCC (2 << 20) 184#define BPCC_PT (1 << 19) 185#define BPCC_PN 0 186#define BPCC_A (1 << 29) 187 188#define BPR_PT BPCC_PT 189 190#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) 191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) 192#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) 193#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11)) 194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) 195#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) 196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) 197#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06)) 198#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) 199#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) 200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) 201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) 202#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) 203#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) 204#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) 205#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) 206#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) 207#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) 208#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) 209#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) 210#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) 211#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) 212 213#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) 214#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) 215 216#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) 217#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) 218#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) 219 220#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) 221#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) 222#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) 223 224#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) 225#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) 226#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) 227#define RETURN (INSN_OP(2) | INSN_OP3(0x39)) 228#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) 229#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) 230#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) 231#define CALL INSN_OP(1) 232#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) 233#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) 234#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) 235#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) 236#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) 237#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) 238#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) 239#define STB (INSN_OP(3) | INSN_OP3(0x05)) 240#define STH (INSN_OP(3) | INSN_OP3(0x06)) 241#define STW (INSN_OP(3) | INSN_OP3(0x04)) 242#define STX (INSN_OP(3) | INSN_OP3(0x0e)) 243#define LDUBA (INSN_OP(3) | INSN_OP3(0x11)) 244#define LDSBA (INSN_OP(3) | INSN_OP3(0x19)) 245#define LDUHA (INSN_OP(3) | INSN_OP3(0x12)) 246#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a)) 247#define LDUWA (INSN_OP(3) | INSN_OP3(0x10)) 248#define LDSWA (INSN_OP(3) | INSN_OP3(0x18)) 249#define LDXA (INSN_OP(3) | INSN_OP3(0x1b)) 250#define STBA (INSN_OP(3) | INSN_OP3(0x15)) 251#define STHA (INSN_OP(3) | INSN_OP3(0x16)) 252#define STWA (INSN_OP(3) | INSN_OP3(0x14)) 253#define STXA (INSN_OP(3) | INSN_OP3(0x1e)) 254 255#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13)) 256 257#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0) 258 259#ifndef ASI_PRIMARY_LITTLE 260#define ASI_PRIMARY_LITTLE 0x88 261#endif 262 263#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 264#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 265#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 266#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 267#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 268 269#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 270#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 271#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 272 273#ifndef use_vis3_instructions 274bool use_vis3_instructions; 275#endif 276 277static bool check_fit_i64(int64_t val, unsigned int bits) 278{ 279 return val == sextract64(val, 0, bits); 280} 281 282static bool check_fit_i32(int32_t val, unsigned int bits) 283{ 284 return val == sextract32(val, 0, bits); 285} 286 287#define check_fit_tl check_fit_i64 288#define check_fit_ptr check_fit_i64 289 290static bool patch_reloc(tcg_insn_unit *src_rw, int type, 291 intptr_t value, intptr_t addend) 292{ 293 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 294 uint32_t insn = *src_rw; 295 intptr_t pcrel; 296 297 value += addend; 298 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx); 299 300 switch (type) { 301 case R_SPARC_WDISP16: 302 if (!check_fit_ptr(pcrel >> 2, 16)) { 303 return false; 304 } 305 insn &= ~INSN_OFF16(-1); 306 insn |= INSN_OFF16(pcrel); 307 break; 308 case R_SPARC_WDISP19: 309 if (!check_fit_ptr(pcrel >> 2, 19)) { 310 return false; 311 } 312 insn &= ~INSN_OFF19(-1); 313 insn |= INSN_OFF19(pcrel); 314 break; 315 case R_SPARC_13: 316 if (!check_fit_ptr(value, 13)) { 317 return false; 318 } 319 insn &= ~INSN_IMM13(-1); 320 insn |= INSN_IMM13(value); 321 break; 322 default: 323 g_assert_not_reached(); 324 } 325 326 *src_rw = insn; 327 return true; 328} 329 330/* test if a constant matches the constraint */ 331static bool tcg_target_const_match(int64_t val, int ct, 332 TCGType type, TCGCond cond, int vece) 333{ 334 if (ct & TCG_CT_CONST) { 335 return 1; 336 } 337 338 if (type == TCG_TYPE_I32) { 339 val = (int32_t)val; 340 } 341 342 if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { 343 return 1; 344 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { 345 return 1; 346 } else { 347 return 0; 348 } 349} 350 351static void tcg_out_nop(TCGContext *s) 352{ 353 tcg_out32(s, NOP); 354} 355 356static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, 357 TCGReg rs2, int op) 358{ 359 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); 360} 361 362static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, 363 int32_t offset, int op) 364{ 365 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); 366} 367 368static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, 369 int32_t val2, int val2const, int op) 370{ 371 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) 372 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); 373} 374 375static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 376{ 377 if (ret != arg) { 378 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 379 } 380 return true; 381} 382 383static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg) 384{ 385 if (ret != arg) { 386 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 387 } else { 388 tcg_out_nop(s); 389 } 390} 391 392static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) 393{ 394 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); 395} 396 397/* A 13-bit constant sign-extended to 64 bits. */ 398static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) 399{ 400 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); 401} 402 403/* A 32-bit constant sign-extended to 64 bits. */ 404static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg) 405{ 406 tcg_out_sethi(s, ret, ~arg); 407 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); 408} 409 410/* A 32-bit constant zero-extended to 64 bits. */ 411static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg) 412{ 413 tcg_out_sethi(s, ret, arg); 414 if (arg & 0x3ff) { 415 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); 416 } 417} 418 419static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, 420 tcg_target_long arg, bool in_prologue, 421 TCGReg scratch) 422{ 423 tcg_target_long hi, lo = (int32_t)arg; 424 tcg_target_long test, lsb; 425 426 /* A 13-bit constant sign-extended to 64-bits. */ 427 if (check_fit_tl(arg, 13)) { 428 tcg_out_movi_s13(s, ret, arg); 429 return; 430 } 431 432 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ 433 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { 434 tcg_out_movi_u32(s, ret, arg); 435 return; 436 } 437 438 /* A 13-bit constant relative to the TB. */ 439 if (!in_prologue) { 440 test = tcg_tbrel_diff(s, (void *)arg); 441 if (check_fit_ptr(test, 13)) { 442 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD); 443 return; 444 } 445 } 446 447 /* A 32-bit constant sign-extended to 64-bits. */ 448 if (arg == lo) { 449 tcg_out_movi_s32(s, ret, arg); 450 return; 451 } 452 453 /* A 32-bit constant, shifted. */ 454 lsb = ctz64(arg); 455 test = (tcg_target_long)arg >> lsb; 456 if (lsb > 10 && test == extract64(test, 0, 21)) { 457 tcg_out_sethi(s, ret, test << 10); 458 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); 459 return; 460 } else if (test == (uint32_t)test || test == (int32_t)test) { 461 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch); 462 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); 463 return; 464 } 465 466 /* Use the constant pool, if possible. */ 467 if (!in_prologue) { 468 new_pool_label(s, arg, R_SPARC_13, s->code_ptr, 469 tcg_tbrel_diff(s, NULL)); 470 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB)); 471 return; 472 } 473 474 /* A 64-bit constant decomposed into 2 32-bit pieces. */ 475 if (check_fit_i32(lo, 13)) { 476 hi = (arg - lo) >> 32; 477 tcg_out_movi_u32(s, ret, hi); 478 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 479 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); 480 } else { 481 hi = arg >> 32; 482 tcg_out_movi_u32(s, ret, hi); 483 tcg_out_movi_u32(s, scratch, lo); 484 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 485 tcg_out_arith(s, ret, ret, scratch, ARITH_OR); 486 } 487} 488 489static void tcg_out_movi(TCGContext *s, TCGType type, 490 TCGReg ret, tcg_target_long arg) 491{ 492 tcg_debug_assert(ret != TCG_REG_T3); 493 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3); 494} 495 496static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 497{ 498 g_assert_not_reached(); 499} 500 501static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 502{ 503 g_assert_not_reached(); 504} 505 506static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 507{ 508 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND); 509} 510 511static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 512{ 513 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL); 514 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL); 515} 516 517static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 518{ 519 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA); 520} 521 522static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 523{ 524 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL); 525} 526 527static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 528{ 529 tcg_out_ext32s(s, rd, rs); 530} 531 532static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 533{ 534 tcg_out_ext32u(s, rd, rs); 535} 536 537static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 538{ 539 tcg_out_ext32u(s, rd, rs); 540} 541 542static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 543{ 544 return false; 545} 546 547static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 548 tcg_target_long imm) 549{ 550 /* This function is only used for passing structs by reference. */ 551 g_assert_not_reached(); 552} 553 554static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, 555 TCGReg a2, int op) 556{ 557 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); 558} 559 560static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, 561 intptr_t offset, int op) 562{ 563 if (check_fit_ptr(offset, 13)) { 564 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | 565 INSN_IMM13(offset)); 566 } else { 567 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset); 568 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op); 569 } 570} 571 572static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, 573 TCGReg arg1, intptr_t arg2) 574{ 575 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); 576} 577 578static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 579 TCGReg arg1, intptr_t arg2) 580{ 581 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); 582} 583 584static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 585 TCGReg base, intptr_t ofs) 586{ 587 if (val == 0) { 588 tcg_out_st(s, type, TCG_REG_G0, base, ofs); 589 return true; 590 } 591 return false; 592} 593 594static void tcg_out_sety(TCGContext *s, TCGReg rs) 595{ 596 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); 597} 598 599static const uint8_t tcg_cond_to_bcond[16] = { 600 [TCG_COND_EQ] = COND_E, 601 [TCG_COND_NE] = COND_NE, 602 [TCG_COND_TSTEQ] = COND_E, 603 [TCG_COND_TSTNE] = COND_NE, 604 [TCG_COND_LT] = COND_L, 605 [TCG_COND_GE] = COND_GE, 606 [TCG_COND_LE] = COND_LE, 607 [TCG_COND_GT] = COND_G, 608 [TCG_COND_LTU] = COND_CS, 609 [TCG_COND_GEU] = COND_CC, 610 [TCG_COND_LEU] = COND_LEU, 611 [TCG_COND_GTU] = COND_GU, 612}; 613 614static const uint8_t tcg_cond_to_rcond[16] = { 615 [TCG_COND_EQ] = RCOND_Z, 616 [TCG_COND_NE] = RCOND_NZ, 617 [TCG_COND_LT] = RCOND_LZ, 618 [TCG_COND_GT] = RCOND_GZ, 619 [TCG_COND_LE] = RCOND_LEZ, 620 [TCG_COND_GE] = RCOND_GEZ 621}; 622 623static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) 624{ 625 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); 626} 627 628static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) 629{ 630 int off19 = 0; 631 632 if (l->has_value) { 633 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); 634 } else { 635 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0); 636 } 637 tcg_out_bpcc0(s, scond, flags, off19); 638} 639 640static void tcg_out_cmp(TCGContext *s, TCGCond cond, 641 TCGReg c1, int32_t c2, int c2const) 642{ 643 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, 644 is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC); 645} 646 647static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, 648 int32_t arg2, int const_arg2, TCGLabel *l) 649{ 650 tcg_out_cmp(s, cond, arg1, arg2, const_arg2); 651 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); 652 tcg_out_nop(s); 653} 654 655static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, 656 int32_t v1, int v1const) 657{ 658 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) 659 | INSN_RS1(tcg_cond_to_bcond[cond]) 660 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); 661} 662 663static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 664 TCGReg c1, int32_t c2, int c2const, 665 int32_t v1, int v1const) 666{ 667 tcg_out_cmp(s, cond, c1, c2, c2const); 668 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); 669} 670 671static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, 672 int32_t arg2, int const_arg2, TCGLabel *l) 673{ 674 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ 675 int rcond = tcg_cond_to_rcond[cond]; 676 if (arg2 == 0 && rcond) { 677 int off16 = 0; 678 679 if (l->has_value) { 680 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); 681 } else { 682 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); 683 } 684 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) 685 | INSN_COND(rcond) | off16); 686 } else { 687 tcg_out_cmp(s, cond, arg1, arg2, const_arg2); 688 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); 689 } 690 tcg_out_nop(s); 691} 692 693static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1, 694 int32_t v1, int v1const) 695{ 696 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10) 697 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); 698} 699 700static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 701 TCGReg c1, int32_t c2, int c2const, 702 int32_t v1, int v1const) 703{ 704 /* For 64-bit signed comparisons vs zero, we can avoid the compare. 705 Note that the immediate range is one bit smaller, so we must check 706 for that as well. */ 707 int rcond = tcg_cond_to_rcond[cond]; 708 if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) { 709 tcg_out_movr(s, rcond, ret, c1, v1, v1const); 710 } else { 711 tcg_out_cmp(s, cond, c1, c2, c2const); 712 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); 713 } 714} 715 716static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 717 TCGReg c1, int32_t c2, int c2const, bool neg) 718{ 719 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ 720 switch (cond) { 721 case TCG_COND_LTU: 722 case TCG_COND_GEU: 723 /* The result of the comparison is in the carry bit. */ 724 break; 725 726 case TCG_COND_EQ: 727 case TCG_COND_NE: 728 /* For equality, we can transform to inequality vs zero. */ 729 if (c2 != 0) { 730 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); 731 c2 = TCG_REG_T1; 732 } else { 733 c2 = c1; 734 } 735 c1 = TCG_REG_G0, c2const = 0; 736 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); 737 break; 738 739 case TCG_COND_TSTEQ: 740 case TCG_COND_TSTNE: 741 /* Transform to inequality vs zero. */ 742 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND); 743 c1 = TCG_REG_G0; 744 c2 = TCG_REG_T1, c2const = 0; 745 cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU); 746 break; 747 748 case TCG_COND_GTU: 749 case TCG_COND_LEU: 750 /* If we don't need to load a constant into a register, we can 751 swap the operands on GTU/LEU. There's no benefit to loading 752 the constant into a temporary register. */ 753 if (!c2const || c2 == 0) { 754 TCGReg t = c1; 755 c1 = c2; 756 c2 = t; 757 c2const = 0; 758 cond = tcg_swap_cond(cond); 759 break; 760 } 761 /* FALLTHRU */ 762 763 default: 764 tcg_out_cmp(s, cond, c1, c2, c2const); 765 tcg_out_movi_s13(s, ret, 0); 766 tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1); 767 return; 768 } 769 770 tcg_out_cmp(s, cond, c1, c2, c2const); 771 if (cond == TCG_COND_LTU) { 772 if (neg) { 773 /* 0 - 0 - C = -C = (C ? -1 : 0) */ 774 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC); 775 } else { 776 /* 0 + 0 + C = C = (C ? 1 : 0) */ 777 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); 778 } 779 } else { 780 if (neg) { 781 /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */ 782 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC); 783 } else { 784 /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */ 785 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); 786 } 787 } 788} 789 790static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 791 TCGReg c1, int32_t c2, int c2const, bool neg) 792{ 793 int rcond; 794 795 if (use_vis3_instructions && !neg) { 796 switch (cond) { 797 case TCG_COND_NE: 798 if (c2 != 0) { 799 break; 800 } 801 c2 = c1, c2const = 0, c1 = TCG_REG_G0; 802 /* FALLTHRU */ 803 case TCG_COND_LTU: 804 tcg_out_cmp(s, cond, c1, c2, c2const); 805 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); 806 return; 807 default: 808 break; 809 } 810 } 811 812 /* For 64-bit signed comparisons vs zero, we can avoid the compare 813 if the input does not overlap the output. */ 814 rcond = tcg_cond_to_rcond[cond]; 815 if (c2 == 0 && rcond && c1 != ret) { 816 tcg_out_movi_s13(s, ret, 0); 817 tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1); 818 } else { 819 tcg_out_cmp(s, cond, c1, c2, c2const); 820 tcg_out_movi_s13(s, ret, 0); 821 tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1); 822 } 823} 824 825static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, 826 TCGReg al, TCGReg ah, int32_t bl, int blconst, 827 int32_t bh, int bhconst, int opl, int oph) 828{ 829 TCGReg tmp = TCG_REG_T1; 830 831 /* Note that the low parts are fully consumed before tmp is set. */ 832 if (rl != ah && (bhconst || rl != bh)) { 833 tmp = rl; 834 } 835 836 tcg_out_arithc(s, tmp, al, bl, blconst, opl); 837 tcg_out_arithc(s, rh, ah, bh, bhconst, oph); 838 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); 839} 840 841static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, 842 TCGReg al, TCGReg ah, int32_t bl, int blconst, 843 int32_t bh, int bhconst, bool is_sub) 844{ 845 TCGReg tmp = TCG_REG_T1; 846 847 /* Note that the low parts are fully consumed before tmp is set. */ 848 if (rl != ah && (bhconst || rl != bh)) { 849 tmp = rl; 850 } 851 852 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); 853 854 if (use_vis3_instructions && !is_sub) { 855 /* Note that ADDXC doesn't accept immediates. */ 856 if (bhconst && bh != 0) { 857 tcg_out_movi_s13(s, TCG_REG_T2, bh); 858 bh = TCG_REG_T2; 859 } 860 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); 861 } else if (bh == TCG_REG_G0) { 862 /* If we have a zero, we can perform the operation in two insns, 863 with the arithmetic first, and a conditional move into place. */ 864 if (rh == ah) { 865 tcg_out_arithi(s, TCG_REG_T2, ah, 1, 866 is_sub ? ARITH_SUB : ARITH_ADD); 867 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); 868 } else { 869 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); 870 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); 871 } 872 } else { 873 /* 874 * Otherwise adjust BH as if there is carry into T2. 875 * Note that constant BH is constrained to 11 bits for the MOVCC, 876 * so the adjustment fits 12 bits. 877 */ 878 if (bhconst) { 879 tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); 880 } else { 881 tcg_out_arithi(s, TCG_REG_T2, bh, 1, 882 is_sub ? ARITH_SUB : ARITH_ADD); 883 } 884 /* ... smoosh T2 back to original BH if carry is clear ... */ 885 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); 886 /* ... and finally perform the arithmetic with the new operand. */ 887 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); 888 } 889 890 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); 891} 892 893static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, 894 bool in_prologue, bool tail_call) 895{ 896 uintptr_t desti = (uintptr_t)dest; 897 898 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, 899 desti & ~0xfff, in_prologue, TCG_REG_T2); 900 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, 901 TCG_REG_T1, desti & 0xfff, JMPL); 902} 903 904static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, 905 bool in_prologue) 906{ 907 ptrdiff_t disp = tcg_pcrel_diff(s, dest); 908 909 if (disp == (int32_t)disp) { 910 tcg_out32(s, CALL | (uint32_t)disp >> 2); 911 } else { 912 tcg_out_jmpl_const(s, dest, in_prologue, false); 913 } 914} 915 916static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, 917 const TCGHelperInfo *info) 918{ 919 tcg_out_call_nodelay(s, dest, false); 920 tcg_out_nop(s); 921} 922 923static void tcg_out_mb(TCGContext *s, TCGArg a0) 924{ 925 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ 926 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); 927} 928 929/* Generate global QEMU prologue and epilogue code */ 930static void tcg_target_qemu_prologue(TCGContext *s) 931{ 932 int tmp_buf_size, frame_size; 933 934 /* 935 * The TCG temp buffer is at the top of the frame, immediately 936 * below the frame pointer. Use the logical (aligned) offset here; 937 * the stack bias is applied in temp_allocate_frame(). 938 */ 939 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); 940 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size); 941 942 /* 943 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is 944 * otherwise the minimal frame usable by callees. 945 */ 946 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; 947 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; 948 frame_size += TCG_TARGET_STACK_ALIGN - 1; 949 frame_size &= -TCG_TARGET_STACK_ALIGN; 950 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | 951 INSN_IMM13(-frame_size)); 952 953#ifndef CONFIG_SOFTMMU 954 if (guest_base != 0) { 955 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, 956 guest_base, true, TCG_REG_T1); 957 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 958 } 959#endif 960 961 /* We choose TCG_REG_TB such that no move is required. */ 962 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1); 963 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); 964 965 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); 966 /* delay slot */ 967 tcg_out_nop(s); 968 969 /* Epilogue for goto_ptr. */ 970 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 971 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 972 /* delay slot */ 973 tcg_out_movi_s13(s, TCG_REG_O0, 0); 974} 975 976static void tcg_out_tb_start(TCGContext *s) 977{ 978 /* nothing to do */ 979} 980 981static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 982{ 983 int i; 984 for (i = 0; i < count; ++i) { 985 p[i] = NOP; 986 } 987} 988 989static const TCGLdstHelperParam ldst_helper_param = { 990 .ntmp = 1, .tmp = { TCG_REG_T1 } 991}; 992 993static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 994{ 995 MemOp opc = get_memop(lb->oi); 996 MemOp sgn; 997 998 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, 999 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { 1000 return false; 1001 } 1002 1003 /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */ 1004 sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0; 1005 1006 tcg_out_ld_helper_args(s, lb, &ldst_helper_param); 1007 tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL); 1008 tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param); 1009 1010 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); 1011 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, 1012 (intptr_t)lb->raddr, 0); 1013} 1014 1015static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 1016{ 1017 MemOp opc = get_memop(lb->oi); 1018 1019 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, 1020 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { 1021 return false; 1022 } 1023 1024 tcg_out_st_helper_args(s, lb, &ldst_helper_param); 1025 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL); 1026 1027 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); 1028 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, 1029 (intptr_t)lb->raddr, 0); 1030} 1031 1032typedef struct { 1033 TCGReg base; 1034 TCGReg index; 1035 TCGAtomAlign aa; 1036} HostAddress; 1037 1038bool tcg_target_has_memory_bswap(MemOp memop) 1039{ 1040 return true; 1041} 1042 1043/* We expect to use a 13-bit negative offset from ENV. */ 1044#define MIN_TLB_MASK_TABLE_OFS -(1 << 12) 1045 1046/* 1047 * For system-mode, perform the TLB load and compare. 1048 * For user-mode, perform any required alignment tests. 1049 * In both cases, return a TCGLabelQemuLdst structure if the slow path 1050 * is required and fill in @h with the host address for the fast path. 1051 */ 1052static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, 1053 TCGReg addr_reg, MemOpIdx oi, 1054 bool is_ld) 1055{ 1056 TCGType addr_type = s->addr_type; 1057 TCGLabelQemuLdst *ldst = NULL; 1058 MemOp opc = get_memop(oi); 1059 MemOp s_bits = opc & MO_SIZE; 1060 unsigned a_mask; 1061 1062 /* We don't support unaligned accesses. */ 1063 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); 1064 h->aa.align = MAX(h->aa.align, s_bits); 1065 a_mask = (1u << h->aa.align) - 1; 1066 1067#ifdef CONFIG_SOFTMMU 1068 int mem_index = get_mmuidx(oi); 1069 int fast_off = tlb_mask_table_ofs(s, mem_index); 1070 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1071 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1072 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) 1073 : offsetof(CPUTLBEntry, addr_write); 1074 int add_off = offsetof(CPUTLBEntry, addend); 1075 int compare_mask; 1076 int cc; 1077 1078 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1079 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off); 1080 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off); 1081 1082 /* Extract the page index, shifted into place for tlb index. */ 1083 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 1084 s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL); 1085 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND); 1086 1087 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ 1088 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD); 1089 1090 /* 1091 * Load the tlb comparator and the addend. 1092 * Always load the entire 64-bit comparator for simplicity. 1093 * We will ignore the high bits via BPCC_ICC below. 1094 */ 1095 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off); 1096 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off); 1097 h->base = TCG_REG_T1; 1098 1099 /* Mask out the page offset, except for the required alignment. */ 1100 compare_mask = s->page_mask | a_mask; 1101 if (check_fit_tl(compare_mask, 13)) { 1102 tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND); 1103 } else { 1104 tcg_out_movi_s32(s, TCG_REG_T3, compare_mask); 1105 tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND); 1106 } 1107 tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0); 1108 1109 ldst = new_ldst_label(s); 1110 ldst->is_ld = is_ld; 1111 ldst->oi = oi; 1112 ldst->addr_reg = addr_reg; 1113 ldst->label_ptr[0] = s->code_ptr; 1114 1115 /* bne,pn %[xi]cc, label0 */ 1116 cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC; 1117 tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0); 1118#else 1119 /* 1120 * If the size equals the required alignment, we can skip the test 1121 * and allow host SIGBUS to deliver SIGBUS to the guest. 1122 * Otherwise, test for at least natural alignment and defer 1123 * everything else to the helper functions. 1124 */ 1125 if (s_bits != memop_alignment_bits(opc)) { 1126 tcg_debug_assert(check_fit_tl(a_mask, 13)); 1127 tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); 1128 1129 ldst = new_ldst_label(s); 1130 ldst->is_ld = is_ld; 1131 ldst->oi = oi; 1132 ldst->addr_reg = addr_reg; 1133 ldst->label_ptr[0] = s->code_ptr; 1134 1135 /* bne,pn %icc, label0 */ 1136 tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0); 1137 } 1138 h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0; 1139#endif 1140 1141 /* If the guest address must be zero-extended, do in the delay slot. */ 1142 if (addr_type == TCG_TYPE_I32) { 1143 tcg_out_ext32u(s, TCG_REG_T2, addr_reg); 1144 h->index = TCG_REG_T2; 1145 } else { 1146 if (ldst) { 1147 tcg_out_nop(s); 1148 } 1149 h->index = addr_reg; 1150 } 1151 return ldst; 1152} 1153 1154static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, 1155 MemOpIdx oi, TCGType data_type) 1156{ 1157 static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { 1158 [MO_UB] = LDUB, 1159 [MO_SB] = LDSB, 1160 [MO_UB | MO_LE] = LDUB, 1161 [MO_SB | MO_LE] = LDSB, 1162 1163 [MO_BEUW] = LDUH, 1164 [MO_BESW] = LDSH, 1165 [MO_BEUL] = LDUW, 1166 [MO_BESL] = LDSW, 1167 [MO_BEUQ] = LDX, 1168 [MO_BESQ] = LDX, 1169 1170 [MO_LEUW] = LDUH_LE, 1171 [MO_LESW] = LDSH_LE, 1172 [MO_LEUL] = LDUW_LE, 1173 [MO_LESL] = LDSW_LE, 1174 [MO_LEUQ] = LDX_LE, 1175 [MO_LESQ] = LDX_LE, 1176 }; 1177 1178 TCGLabelQemuLdst *ldst; 1179 HostAddress h; 1180 1181 ldst = prepare_host_addr(s, &h, addr, oi, true); 1182 1183 tcg_out_ldst_rr(s, data, h.base, h.index, 1184 ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]); 1185 1186 if (ldst) { 1187 ldst->type = data_type; 1188 ldst->datalo_reg = data; 1189 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1190 } 1191} 1192 1193static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, 1194 MemOpIdx oi, TCGType data_type) 1195{ 1196 static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = { 1197 [MO_UB] = STB, 1198 1199 [MO_BEUW] = STH, 1200 [MO_BEUL] = STW, 1201 [MO_BEUQ] = STX, 1202 1203 [MO_LEUW] = STH_LE, 1204 [MO_LEUL] = STW_LE, 1205 [MO_LEUQ] = STX_LE, 1206 }; 1207 1208 TCGLabelQemuLdst *ldst; 1209 HostAddress h; 1210 1211 ldst = prepare_host_addr(s, &h, addr, oi, false); 1212 1213 tcg_out_ldst_rr(s, data, h.base, h.index, 1214 st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]); 1215 1216 if (ldst) { 1217 ldst->type = data_type; 1218 ldst->datalo_reg = data; 1219 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); 1220 } 1221} 1222 1223static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1224{ 1225 if (check_fit_ptr(a0, 13)) { 1226 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1227 tcg_out_movi_s13(s, TCG_REG_O0, a0); 1228 return; 1229 } else { 1230 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0); 1231 if (check_fit_ptr(tb_diff, 13)) { 1232 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1233 /* Note that TCG_REG_TB has been unwound to O1. */ 1234 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD); 1235 return; 1236 } 1237 } 1238 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); 1239 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1240 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); 1241} 1242 1243static void tcg_out_goto_tb(TCGContext *s, int which) 1244{ 1245 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which)); 1246 1247 /* Load link and indirect branch. */ 1248 set_jmp_insn_offset(s, which); 1249 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off); 1250 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); 1251 /* delay slot */ 1252 tcg_out_nop(s); 1253 set_jmp_reset_offset(s, which); 1254 1255 /* 1256 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB 1257 * to the beginning of this TB. 1258 */ 1259 off = -tcg_current_code_size(s); 1260 if (check_fit_i32(off, 13)) { 1261 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD); 1262 } else { 1263 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off); 1264 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); 1265 } 1266} 1267 1268void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1269 uintptr_t jmp_rx, uintptr_t jmp_rw) 1270{ 1271} 1272 1273 1274static void tgen_add(TCGContext *s, TCGType type, 1275 TCGReg a0, TCGReg a1, TCGReg a2) 1276{ 1277 tcg_out_arith(s, a0, a1, a2, ARITH_ADD); 1278} 1279 1280static void tgen_addi(TCGContext *s, TCGType type, 1281 TCGReg a0, TCGReg a1, tcg_target_long a2) 1282{ 1283 tcg_out_arithi(s, a0, a1, a2, ARITH_ADD); 1284} 1285 1286static const TCGOutOpBinary outop_add = { 1287 .base.static_constraint = C_O1_I2(r, r, rJ), 1288 .out_rrr = tgen_add, 1289 .out_rri = tgen_addi, 1290}; 1291 1292static void tgen_and(TCGContext *s, TCGType type, 1293 TCGReg a0, TCGReg a1, TCGReg a2) 1294{ 1295 tcg_out_arith(s, a0, a1, a2, ARITH_AND); 1296} 1297 1298static void tgen_andi(TCGContext *s, TCGType type, 1299 TCGReg a0, TCGReg a1, tcg_target_long a2) 1300{ 1301 tcg_out_arithi(s, a0, a1, a2, ARITH_AND); 1302} 1303 1304static const TCGOutOpBinary outop_and = { 1305 .base.static_constraint = C_O1_I2(r, r, rJ), 1306 .out_rrr = tgen_and, 1307 .out_rri = tgen_andi, 1308}; 1309 1310static void tgen_andc(TCGContext *s, TCGType type, 1311 TCGReg a0, TCGReg a1, TCGReg a2) 1312{ 1313 tcg_out_arith(s, a0, a1, a2, ARITH_ANDN); 1314} 1315 1316static const TCGOutOpBinary outop_andc = { 1317 .base.static_constraint = C_O1_I2(r, r, r), 1318 .out_rrr = tgen_andc, 1319}; 1320 1321static void tgen_divs_rJ(TCGContext *s, TCGType type, 1322 TCGReg a0, TCGReg a1, TCGArg a2, bool c2) 1323{ 1324 uint32_t insn; 1325 1326 if (type == TCG_TYPE_I32) { 1327 /* Load Y with the sign extension of a1 to 64-bits. */ 1328 tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA); 1329 tcg_out_sety(s, TCG_REG_T1); 1330 insn = ARITH_SDIV; 1331 } else { 1332 insn = ARITH_SDIVX; 1333 } 1334 tcg_out_arithc(s, a0, a1, a2, c2, insn); 1335} 1336 1337static void tgen_divs(TCGContext *s, TCGType type, 1338 TCGReg a0, TCGReg a1, TCGReg a2) 1339{ 1340 tgen_divs_rJ(s, type, a0, a1, a2, false); 1341} 1342 1343static void tgen_divsi(TCGContext *s, TCGType type, 1344 TCGReg a0, TCGReg a1, tcg_target_long a2) 1345{ 1346 tgen_divs_rJ(s, type, a0, a1, a2, true); 1347} 1348 1349static const TCGOutOpBinary outop_divs = { 1350 .base.static_constraint = C_O1_I2(r, r, rJ), 1351 .out_rrr = tgen_divs, 1352 .out_rri = tgen_divsi, 1353}; 1354 1355static void tgen_divu_rJ(TCGContext *s, TCGType type, 1356 TCGReg a0, TCGReg a1, TCGArg a2, bool c2) 1357{ 1358 uint32_t insn; 1359 1360 if (type == TCG_TYPE_I32) { 1361 /* Load Y with the zero extension to 64-bits. */ 1362 tcg_out_sety(s, TCG_REG_G0); 1363 insn = ARITH_UDIV; 1364 } else { 1365 insn = ARITH_UDIVX; 1366 } 1367 tcg_out_arithc(s, a0, a1, a2, c2, insn); 1368} 1369 1370static void tgen_divu(TCGContext *s, TCGType type, 1371 TCGReg a0, TCGReg a1, TCGReg a2) 1372{ 1373 tgen_divu_rJ(s, type, a0, a1, a2, false); 1374} 1375 1376static void tgen_divui(TCGContext *s, TCGType type, 1377 TCGReg a0, TCGReg a1, tcg_target_long a2) 1378{ 1379 tgen_divu_rJ(s, type, a0, a1, a2, true); 1380} 1381 1382static const TCGOutOpBinary outop_divu = { 1383 .base.static_constraint = C_O1_I2(r, r, rJ), 1384 .out_rrr = tgen_divu, 1385 .out_rri = tgen_divui, 1386}; 1387 1388static const TCGOutOpBinary outop_eqv = { 1389 .base.static_constraint = C_NotImplemented, 1390}; 1391 1392static void tgen_mul(TCGContext *s, TCGType type, 1393 TCGReg a0, TCGReg a1, TCGReg a2) 1394{ 1395 uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX; 1396 tcg_out_arith(s, a0, a1, a2, insn); 1397} 1398 1399static void tgen_muli(TCGContext *s, TCGType type, 1400 TCGReg a0, TCGReg a1, tcg_target_long a2) 1401{ 1402 uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX; 1403 tcg_out_arithi(s, a0, a1, a2, insn); 1404} 1405 1406static const TCGOutOpBinary outop_mul = { 1407 .base.static_constraint = C_O1_I2(r, r, rJ), 1408 .out_rrr = tgen_mul, 1409 .out_rri = tgen_muli, 1410}; 1411 1412static const TCGOutOpBinary outop_mulsh = { 1413 .base.static_constraint = C_NotImplemented, 1414}; 1415 1416static void tgen_muluh(TCGContext *s, TCGType type, 1417 TCGReg a0, TCGReg a1, TCGReg a2) 1418{ 1419 tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI); 1420} 1421 1422static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags) 1423{ 1424 return (type == TCG_TYPE_I64 && use_vis3_instructions 1425 ? C_O1_I2(r, r, r) : C_NotImplemented); 1426} 1427 1428static const TCGOutOpBinary outop_muluh = { 1429 .base.static_constraint = C_Dynamic, 1430 .base.dynamic_constraint = cset_muluh, 1431 .out_rrr = tgen_muluh, 1432}; 1433 1434static const TCGOutOpBinary outop_nand = { 1435 .base.static_constraint = C_NotImplemented, 1436}; 1437 1438static const TCGOutOpBinary outop_nor = { 1439 .base.static_constraint = C_NotImplemented, 1440}; 1441 1442static void tgen_or(TCGContext *s, TCGType type, 1443 TCGReg a0, TCGReg a1, TCGReg a2) 1444{ 1445 tcg_out_arith(s, a0, a1, a2, ARITH_OR); 1446} 1447 1448static void tgen_ori(TCGContext *s, TCGType type, 1449 TCGReg a0, TCGReg a1, tcg_target_long a2) 1450{ 1451 tcg_out_arithi(s, a0, a1, a2, ARITH_OR); 1452} 1453 1454static const TCGOutOpBinary outop_or = { 1455 .base.static_constraint = C_O1_I2(r, r, rJ), 1456 .out_rrr = tgen_or, 1457 .out_rri = tgen_ori, 1458}; 1459 1460static void tgen_orc(TCGContext *s, TCGType type, 1461 TCGReg a0, TCGReg a1, TCGReg a2) 1462{ 1463 tcg_out_arith(s, a0, a1, a2, ARITH_ORN); 1464} 1465 1466static const TCGOutOpBinary outop_orc = { 1467 .base.static_constraint = C_O1_I2(r, r, r), 1468 .out_rrr = tgen_orc, 1469}; 1470 1471static void tgen_sub(TCGContext *s, TCGType type, 1472 TCGReg a0, TCGReg a1, TCGReg a2) 1473{ 1474 tcg_out_arith(s, a0, a1, a2, ARITH_SUB); 1475} 1476 1477static const TCGOutOpSubtract outop_sub = { 1478 .base.static_constraint = C_O1_I2(r, r, r), 1479 .out_rrr = tgen_sub, 1480}; 1481 1482static void tgen_xor(TCGContext *s, TCGType type, 1483 TCGReg a0, TCGReg a1, TCGReg a2) 1484{ 1485 tcg_out_arith(s, a0, a1, a2, ARITH_XOR); 1486} 1487 1488static void tgen_xori(TCGContext *s, TCGType type, 1489 TCGReg a0, TCGReg a1, tcg_target_long a2) 1490{ 1491 tcg_out_arithi(s, a0, a1, a2, ARITH_XOR); 1492} 1493 1494static const TCGOutOpBinary outop_xor = { 1495 .base.static_constraint = C_O1_I2(r, r, rJ), 1496 .out_rrr = tgen_xor, 1497 .out_rri = tgen_xori, 1498}; 1499 1500static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 1501{ 1502 tgen_sub(s, type, a0, TCG_REG_G0, a1); 1503} 1504 1505static const TCGOutOpUnary outop_neg = { 1506 .base.static_constraint = C_O1_I1(r, r), 1507 .out_rr = tgen_neg, 1508}; 1509 1510static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 1511{ 1512 tgen_orc(s, type, a0, TCG_REG_G0, a1); 1513} 1514 1515static const TCGOutOpUnary outop_not = { 1516 .base.static_constraint = C_O1_I1(r, r), 1517 .out_rr = tgen_not, 1518}; 1519 1520 1521static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 1522 const TCGArg args[TCG_MAX_OP_ARGS], 1523 const int const_args[TCG_MAX_OP_ARGS]) 1524{ 1525 TCGArg a0, a1, a2; 1526 int c, c2; 1527 1528 /* Hoist the loads of the most common arguments. */ 1529 a0 = args[0]; 1530 a1 = args[1]; 1531 a2 = args[2]; 1532 c2 = const_args[2]; 1533 1534 switch (opc) { 1535 case INDEX_op_goto_ptr: 1536 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); 1537 tcg_out_mov_delay(s, TCG_REG_TB, a0); 1538 break; 1539 case INDEX_op_br: 1540 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); 1541 tcg_out_nop(s); 1542 break; 1543 1544#define OP_32_64(x) \ 1545 glue(glue(case INDEX_op_, x), _i32): \ 1546 glue(glue(case INDEX_op_, x), _i64) 1547 1548 OP_32_64(ld8u): 1549 tcg_out_ldst(s, a0, a1, a2, LDUB); 1550 break; 1551 OP_32_64(ld8s): 1552 tcg_out_ldst(s, a0, a1, a2, LDSB); 1553 break; 1554 OP_32_64(ld16u): 1555 tcg_out_ldst(s, a0, a1, a2, LDUH); 1556 break; 1557 OP_32_64(ld16s): 1558 tcg_out_ldst(s, a0, a1, a2, LDSH); 1559 break; 1560 case INDEX_op_ld_i32: 1561 case INDEX_op_ld32u_i64: 1562 tcg_out_ldst(s, a0, a1, a2, LDUW); 1563 break; 1564 OP_32_64(st8): 1565 tcg_out_ldst(s, a0, a1, a2, STB); 1566 break; 1567 OP_32_64(st16): 1568 tcg_out_ldst(s, a0, a1, a2, STH); 1569 break; 1570 case INDEX_op_st_i32: 1571 case INDEX_op_st32_i64: 1572 tcg_out_ldst(s, a0, a1, a2, STW); 1573 break; 1574 case INDEX_op_shl_i32: 1575 c = SHIFT_SLL; 1576 do_shift32: 1577 /* Limit immediate shift count lest we create an illegal insn. */ 1578 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); 1579 break; 1580 case INDEX_op_shr_i32: 1581 c = SHIFT_SRL; 1582 goto do_shift32; 1583 case INDEX_op_sar_i32: 1584 c = SHIFT_SRA; 1585 goto do_shift32; 1586 1587 case INDEX_op_brcond_i32: 1588 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1589 break; 1590 case INDEX_op_setcond_i32: 1591 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false); 1592 break; 1593 case INDEX_op_negsetcond_i32: 1594 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true); 1595 break; 1596 case INDEX_op_movcond_i32: 1597 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1598 break; 1599 1600 case INDEX_op_add2_i32: 1601 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1602 args[4], const_args[4], args[5], const_args[5], 1603 ARITH_ADDCC, ARITH_ADDC); 1604 break; 1605 case INDEX_op_sub2_i32: 1606 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1607 args[4], const_args[4], args[5], const_args[5], 1608 ARITH_SUBCC, ARITH_SUBC); 1609 break; 1610 case INDEX_op_mulu2_i32: 1611 c = ARITH_UMUL; 1612 goto do_mul2; 1613 case INDEX_op_muls2_i32: 1614 c = ARITH_SMUL; 1615 do_mul2: 1616 /* The 32-bit multiply insns produce a full 64-bit result. */ 1617 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); 1618 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); 1619 break; 1620 1621 case INDEX_op_qemu_ld_i32: 1622 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1623 break; 1624 case INDEX_op_qemu_ld_i64: 1625 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1626 break; 1627 case INDEX_op_qemu_st_i32: 1628 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1629 break; 1630 case INDEX_op_qemu_st_i64: 1631 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1632 break; 1633 1634 case INDEX_op_ld32s_i64: 1635 tcg_out_ldst(s, a0, a1, a2, LDSW); 1636 break; 1637 case INDEX_op_ld_i64: 1638 tcg_out_ldst(s, a0, a1, a2, LDX); 1639 break; 1640 case INDEX_op_st_i64: 1641 tcg_out_ldst(s, a0, a1, a2, STX); 1642 break; 1643 case INDEX_op_shl_i64: 1644 c = SHIFT_SLLX; 1645 do_shift64: 1646 /* Limit immediate shift count lest we create an illegal insn. */ 1647 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); 1648 break; 1649 case INDEX_op_shr_i64: 1650 c = SHIFT_SRLX; 1651 goto do_shift64; 1652 case INDEX_op_sar_i64: 1653 c = SHIFT_SRAX; 1654 goto do_shift64; 1655 1656 case INDEX_op_brcond_i64: 1657 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1658 break; 1659 case INDEX_op_setcond_i64: 1660 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false); 1661 break; 1662 case INDEX_op_negsetcond_i64: 1663 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true); 1664 break; 1665 case INDEX_op_movcond_i64: 1666 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1667 break; 1668 case INDEX_op_add2_i64: 1669 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1670 const_args[4], args[5], const_args[5], false); 1671 break; 1672 case INDEX_op_sub2_i64: 1673 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1674 const_args[4], args[5], const_args[5], true); 1675 break; 1676 1677 case INDEX_op_mb: 1678 tcg_out_mb(s, a0); 1679 break; 1680 1681 case INDEX_op_extract_i64: 1682 tcg_debug_assert(a2 + args[3] == 32); 1683 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRL); 1684 break; 1685 case INDEX_op_sextract_i64: 1686 tcg_debug_assert(a2 + args[3] == 32); 1687 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA); 1688 break; 1689 1690 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1691 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1692 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1693 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 1694 case INDEX_op_extu_i32_i64: 1695 default: 1696 g_assert_not_reached(); 1697 } 1698} 1699 1700static TCGConstraintSetIndex 1701tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 1702{ 1703 switch (op) { 1704 case INDEX_op_goto_ptr: 1705 return C_O0_I1(r); 1706 1707 case INDEX_op_ld8u_i32: 1708 case INDEX_op_ld8u_i64: 1709 case INDEX_op_ld8s_i32: 1710 case INDEX_op_ld8s_i64: 1711 case INDEX_op_ld16u_i32: 1712 case INDEX_op_ld16u_i64: 1713 case INDEX_op_ld16s_i32: 1714 case INDEX_op_ld16s_i64: 1715 case INDEX_op_ld_i32: 1716 case INDEX_op_ld32u_i64: 1717 case INDEX_op_ld32s_i64: 1718 case INDEX_op_ld_i64: 1719 case INDEX_op_ext_i32_i64: 1720 case INDEX_op_extu_i32_i64: 1721 case INDEX_op_extract_i64: 1722 case INDEX_op_sextract_i64: 1723 case INDEX_op_qemu_ld_i32: 1724 case INDEX_op_qemu_ld_i64: 1725 return C_O1_I1(r, r); 1726 1727 case INDEX_op_st8_i32: 1728 case INDEX_op_st8_i64: 1729 case INDEX_op_st16_i32: 1730 case INDEX_op_st16_i64: 1731 case INDEX_op_st_i32: 1732 case INDEX_op_st32_i64: 1733 case INDEX_op_st_i64: 1734 case INDEX_op_qemu_st_i32: 1735 case INDEX_op_qemu_st_i64: 1736 return C_O0_I2(rz, r); 1737 1738 case INDEX_op_shl_i32: 1739 case INDEX_op_shl_i64: 1740 case INDEX_op_shr_i32: 1741 case INDEX_op_shr_i64: 1742 case INDEX_op_sar_i32: 1743 case INDEX_op_sar_i64: 1744 case INDEX_op_setcond_i32: 1745 case INDEX_op_setcond_i64: 1746 case INDEX_op_negsetcond_i32: 1747 case INDEX_op_negsetcond_i64: 1748 return C_O1_I2(r, rz, rJ); 1749 1750 case INDEX_op_brcond_i32: 1751 case INDEX_op_brcond_i64: 1752 return C_O0_I2(rz, rJ); 1753 case INDEX_op_movcond_i32: 1754 case INDEX_op_movcond_i64: 1755 return C_O1_I4(r, rz, rJ, rI, 0); 1756 case INDEX_op_add2_i32: 1757 case INDEX_op_add2_i64: 1758 case INDEX_op_sub2_i32: 1759 case INDEX_op_sub2_i64: 1760 return C_O2_I4(r, r, rz, rz, rJ, rJ); 1761 case INDEX_op_mulu2_i32: 1762 case INDEX_op_muls2_i32: 1763 return C_O2_I2(r, r, rz, rJ); 1764 1765 default: 1766 return C_NotImplemented; 1767 } 1768} 1769 1770static void tcg_target_init(TCGContext *s) 1771{ 1772 /* 1773 * Only probe for the platform and capabilities if we haven't already 1774 * determined maximum values at compile time. 1775 */ 1776#ifndef use_vis3_instructions 1777 { 1778 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 1779 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; 1780 } 1781#endif 1782 1783 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1784 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1785 1786 tcg_target_call_clobber_regs = 0; 1787 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1); 1788 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2); 1789 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3); 1790 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4); 1791 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5); 1792 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6); 1793 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7); 1794 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0); 1795 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1); 1796 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2); 1797 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3); 1798 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4); 1799 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5); 1800 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6); 1801 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7); 1802 1803 s->reserved_regs = 0; 1804 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ 1805 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ 1806 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ 1807 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */ 1808 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */ 1809 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ 1810 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ 1811 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ 1812 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */ 1813} 1814 1815#define ELF_HOST_MACHINE EM_SPARCV9 1816 1817typedef struct { 1818 DebugFrameHeader h; 1819 uint8_t fde_def_cfa[4]; 1820 uint8_t fde_win_save; 1821 uint8_t fde_ret_save[3]; 1822} DebugFrame; 1823 1824static const DebugFrame debug_frame = { 1825 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 1826 .h.cie.id = -1, 1827 .h.cie.version = 1, 1828 .h.cie.code_align = 1, 1829 .h.cie.data_align = -sizeof(void *) & 0x7f, 1830 .h.cie.return_column = 15, /* o7 */ 1831 1832 /* Total FDE size does not include the "len" member. */ 1833 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1834 1835 .fde_def_cfa = { 1836 12, 30, /* DW_CFA_def_cfa i6, 2047 */ 1837 (2047 & 0x7f) | 0x80, (2047 >> 7) 1838 }, 1839 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ 1840 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ 1841}; 1842 1843void tcg_register_jit(const void *buf, size_t buf_size) 1844{ 1845 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1846} 1847