1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* We only support generating code for 64-bit mode. */ 26#ifndef __arch64__ 27#error "unsupported code generation mode" 28#endif 29 30#include "../tcg-pool.c.inc" 31 32#ifdef CONFIG_DEBUG_TCG 33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 34 "%g0", 35 "%g1", 36 "%g2", 37 "%g3", 38 "%g4", 39 "%g5", 40 "%g6", 41 "%g7", 42 "%o0", 43 "%o1", 44 "%o2", 45 "%o3", 46 "%o4", 47 "%o5", 48 "%o6", 49 "%o7", 50 "%l0", 51 "%l1", 52 "%l2", 53 "%l3", 54 "%l4", 55 "%l5", 56 "%l6", 57 "%l7", 58 "%i0", 59 "%i1", 60 "%i2", 61 "%i3", 62 "%i4", 63 "%i5", 64 "%i6", 65 "%i7", 66}; 67#endif 68 69#define TCG_CT_CONST_S11 0x100 70#define TCG_CT_CONST_S13 0x200 71#define TCG_CT_CONST_ZERO 0x400 72 73/* 74 * For softmmu, we need to avoid conflicts with the first 3 75 * argument registers to perform the tlb lookup, and to call 76 * the helper function. 77 */ 78#ifdef CONFIG_SOFTMMU 79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3) 80#else 81#define SOFTMMU_RESERVE_REGS 0 82#endif 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) 85 86/* Define some temporary registers. T2 is used for constant generation. */ 87#define TCG_REG_T1 TCG_REG_G1 88#define TCG_REG_T2 TCG_REG_O7 89 90#ifndef CONFIG_SOFTMMU 91# define TCG_GUEST_BASE_REG TCG_REG_I5 92#endif 93 94#define TCG_REG_TB TCG_REG_I1 95 96static const int tcg_target_reg_alloc_order[] = { 97 TCG_REG_L0, 98 TCG_REG_L1, 99 TCG_REG_L2, 100 TCG_REG_L3, 101 TCG_REG_L4, 102 TCG_REG_L5, 103 TCG_REG_L6, 104 TCG_REG_L7, 105 106 TCG_REG_I0, 107 TCG_REG_I1, 108 TCG_REG_I2, 109 TCG_REG_I3, 110 TCG_REG_I4, 111 TCG_REG_I5, 112 113 TCG_REG_G2, 114 TCG_REG_G3, 115 TCG_REG_G4, 116 TCG_REG_G5, 117 118 TCG_REG_O0, 119 TCG_REG_O1, 120 TCG_REG_O2, 121 TCG_REG_O3, 122 TCG_REG_O4, 123 TCG_REG_O5, 124}; 125 126static const int tcg_target_call_iarg_regs[6] = { 127 TCG_REG_O0, 128 TCG_REG_O1, 129 TCG_REG_O2, 130 TCG_REG_O3, 131 TCG_REG_O4, 132 TCG_REG_O5, 133}; 134 135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 136{ 137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 138 tcg_debug_assert(slot >= 0 && slot <= 3); 139 return TCG_REG_O0 + slot; 140} 141 142#define INSN_OP(x) ((x) << 30) 143#define INSN_OP2(x) ((x) << 22) 144#define INSN_OP3(x) ((x) << 19) 145#define INSN_OPF(x) ((x) << 5) 146#define INSN_RD(x) ((x) << 25) 147#define INSN_RS1(x) ((x) << 14) 148#define INSN_RS2(x) (x) 149#define INSN_ASI(x) ((x) << 5) 150 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff)) 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff)) 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20)) 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff) 156#define INSN_COND(x) ((x) << 25) 157 158#define COND_N 0x0 159#define COND_E 0x1 160#define COND_LE 0x2 161#define COND_L 0x3 162#define COND_LEU 0x4 163#define COND_CS 0x5 164#define COND_NEG 0x6 165#define COND_VS 0x7 166#define COND_A 0x8 167#define COND_NE 0x9 168#define COND_G 0xa 169#define COND_GE 0xb 170#define COND_GU 0xc 171#define COND_CC 0xd 172#define COND_POS 0xe 173#define COND_VC 0xf 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2)) 175 176#define RCOND_Z 1 177#define RCOND_LEZ 2 178#define RCOND_LZ 3 179#define RCOND_NZ 5 180#define RCOND_GZ 6 181#define RCOND_GEZ 7 182 183#define MOVCC_ICC (1 << 18) 184#define MOVCC_XCC (1 << 18 | 1 << 12) 185 186#define BPCC_ICC 0 187#define BPCC_XCC (2 << 20) 188#define BPCC_PT (1 << 19) 189#define BPCC_PN 0 190#define BPCC_A (1 << 29) 191 192#define BPR_PT BPCC_PT 193 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11)) 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06)) 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) 203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) 204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) 205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) 206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) 209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) 210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) 211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) 212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) 213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) 216 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) 219 220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) 221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) 222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) 223 224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) 225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) 226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) 227 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39)) 232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) 233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) 234#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) 235#define CALL INSN_OP(1) 236#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) 237#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) 238#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) 239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) 240#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) 241#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) 242#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) 243#define STB (INSN_OP(3) | INSN_OP3(0x05)) 244#define STH (INSN_OP(3) | INSN_OP3(0x06)) 245#define STW (INSN_OP(3) | INSN_OP3(0x04)) 246#define STX (INSN_OP(3) | INSN_OP3(0x0e)) 247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11)) 248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19)) 249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12)) 250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a)) 251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10)) 252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18)) 253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b)) 254#define STBA (INSN_OP(3) | INSN_OP3(0x15)) 255#define STHA (INSN_OP(3) | INSN_OP3(0x16)) 256#define STWA (INSN_OP(3) | INSN_OP3(0x14)) 257#define STXA (INSN_OP(3) | INSN_OP3(0x1e)) 258 259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13)) 260 261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0) 262 263#ifndef ASI_PRIMARY_LITTLE 264#define ASI_PRIMARY_LITTLE 0x88 265#endif 266 267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 272 273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 276 277#ifndef use_vis3_instructions 278bool use_vis3_instructions; 279#endif 280 281static bool check_fit_i64(int64_t val, unsigned int bits) 282{ 283 return val == sextract64(val, 0, bits); 284} 285 286static bool check_fit_i32(int32_t val, unsigned int bits) 287{ 288 return val == sextract32(val, 0, bits); 289} 290 291#define check_fit_tl check_fit_i64 292#define check_fit_ptr check_fit_i64 293 294static bool patch_reloc(tcg_insn_unit *src_rw, int type, 295 intptr_t value, intptr_t addend) 296{ 297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 298 uint32_t insn = *src_rw; 299 intptr_t pcrel; 300 301 value += addend; 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx); 303 304 switch (type) { 305 case R_SPARC_WDISP16: 306 if (!check_fit_ptr(pcrel >> 2, 16)) { 307 return false; 308 } 309 insn &= ~INSN_OFF16(-1); 310 insn |= INSN_OFF16(pcrel); 311 break; 312 case R_SPARC_WDISP19: 313 if (!check_fit_ptr(pcrel >> 2, 19)) { 314 return false; 315 } 316 insn &= ~INSN_OFF19(-1); 317 insn |= INSN_OFF19(pcrel); 318 break; 319 case R_SPARC_13: 320 if (!check_fit_ptr(value, 13)) { 321 return false; 322 } 323 insn &= ~INSN_IMM13(-1); 324 insn |= INSN_IMM13(value); 325 break; 326 default: 327 g_assert_not_reached(); 328 } 329 330 *src_rw = insn; 331 return true; 332} 333 334/* test if a constant matches the constraint */ 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 336{ 337 if (ct & TCG_CT_CONST) { 338 return 1; 339 } 340 341 if (type == TCG_TYPE_I32) { 342 val = (int32_t)val; 343 } 344 345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 346 return 1; 347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { 348 return 1; 349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { 350 return 1; 351 } else { 352 return 0; 353 } 354} 355 356static void tcg_out_nop(TCGContext *s) 357{ 358 tcg_out32(s, NOP); 359} 360 361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, 362 TCGReg rs2, int op) 363{ 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); 365} 366 367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, 368 int32_t offset, int op) 369{ 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); 371} 372 373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, 374 int32_t val2, int val2const, int op) 375{ 376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) 377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); 378} 379 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 381{ 382 if (ret != arg) { 383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 384 } 385 return true; 386} 387 388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg) 389{ 390 if (ret != arg) { 391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 392 } else { 393 tcg_out_nop(s); 394 } 395} 396 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) 398{ 399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); 400} 401 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) 403{ 404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); 405} 406 407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) 408{ 409 if (check_fit_i32(arg, 13)) { 410 /* A 13-bit constant sign-extended to 64-bits. */ 411 tcg_out_movi_imm13(s, ret, arg); 412 } else { 413 /* A 32-bit constant zero-extended to 64 bits. */ 414 tcg_out_sethi(s, ret, arg); 415 if (arg & 0x3ff) { 416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); 417 } 418 } 419} 420 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, 422 tcg_target_long arg, bool in_prologue, 423 TCGReg scratch) 424{ 425 tcg_target_long hi, lo = (int32_t)arg; 426 tcg_target_long test, lsb; 427 428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ 429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { 430 tcg_out_movi_imm32(s, ret, arg); 431 return; 432 } 433 434 /* A 13-bit constant sign-extended to 64-bits. */ 435 if (check_fit_tl(arg, 13)) { 436 tcg_out_movi_imm13(s, ret, arg); 437 return; 438 } 439 440 /* A 13-bit constant relative to the TB. */ 441 if (!in_prologue) { 442 test = tcg_tbrel_diff(s, (void *)arg); 443 if (check_fit_ptr(test, 13)) { 444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD); 445 return; 446 } 447 } 448 449 /* A 32-bit constant sign-extended to 64-bits. */ 450 if (arg == lo) { 451 tcg_out_sethi(s, ret, ~arg); 452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); 453 return; 454 } 455 456 /* A 32-bit constant, shifted. */ 457 lsb = ctz64(arg); 458 test = (tcg_target_long)arg >> lsb; 459 if (lsb > 10 && test == extract64(test, 0, 21)) { 460 tcg_out_sethi(s, ret, test << 10); 461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); 462 return; 463 } else if (test == (uint32_t)test || test == (int32_t)test) { 464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch); 465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); 466 return; 467 } 468 469 /* Use the constant pool, if possible. */ 470 if (!in_prologue) { 471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr, 472 tcg_tbrel_diff(s, NULL)); 473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB)); 474 return; 475 } 476 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */ 478 if (check_fit_i32(lo, 13)) { 479 hi = (arg - lo) >> 32; 480 tcg_out_movi_imm32(s, ret, hi); 481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); 483 } else { 484 hi = arg >> 32; 485 tcg_out_movi_imm32(s, ret, hi); 486 tcg_out_movi_imm32(s, scratch, lo); 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR); 489 } 490} 491 492static void tcg_out_movi(TCGContext *s, TCGType type, 493 TCGReg ret, tcg_target_long arg) 494{ 495 tcg_debug_assert(ret != TCG_REG_T2); 496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); 497} 498 499static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 500{ 501 g_assert_not_reached(); 502} 503 504static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 505{ 506 g_assert_not_reached(); 507} 508 509static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 510{ 511 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND); 512} 513 514static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 515{ 516 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL); 517 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL); 518} 519 520static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 521{ 522 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA); 523} 524 525static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 526{ 527 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL); 528} 529 530static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 531{ 532 tcg_out_ext32s(s, rd, rs); 533} 534 535static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 536{ 537 tcg_out_ext32u(s, rd, rs); 538} 539 540static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 541{ 542 tcg_out_mov(s, TCG_TYPE_I32, rd, rs); 543} 544 545static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 546{ 547 return false; 548} 549 550static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 551 tcg_target_long imm) 552{ 553 /* This function is only used for passing structs by reference. */ 554 g_assert_not_reached(); 555} 556 557static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, 558 TCGReg a2, int op) 559{ 560 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); 561} 562 563static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, 564 intptr_t offset, int op) 565{ 566 if (check_fit_ptr(offset, 13)) { 567 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | 568 INSN_IMM13(offset)); 569 } else { 570 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset); 571 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op); 572 } 573} 574 575static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, 576 TCGReg arg1, intptr_t arg2) 577{ 578 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); 579} 580 581static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 582 TCGReg arg1, intptr_t arg2) 583{ 584 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); 585} 586 587static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 588 TCGReg base, intptr_t ofs) 589{ 590 if (val == 0) { 591 tcg_out_st(s, type, TCG_REG_G0, base, ofs); 592 return true; 593 } 594 return false; 595} 596 597static void tcg_out_sety(TCGContext *s, TCGReg rs) 598{ 599 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); 600} 601 602static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, 603 int32_t val2, int val2const, int uns) 604{ 605 /* Load Y with the sign/zero extension of RS1 to 64-bits. */ 606 if (uns) { 607 tcg_out_sety(s, TCG_REG_G0); 608 } else { 609 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA); 610 tcg_out_sety(s, TCG_REG_T1); 611 } 612 613 tcg_out_arithc(s, rd, rs1, val2, val2const, 614 uns ? ARITH_UDIV : ARITH_SDIV); 615} 616 617static const uint8_t tcg_cond_to_bcond[] = { 618 [TCG_COND_EQ] = COND_E, 619 [TCG_COND_NE] = COND_NE, 620 [TCG_COND_LT] = COND_L, 621 [TCG_COND_GE] = COND_GE, 622 [TCG_COND_LE] = COND_LE, 623 [TCG_COND_GT] = COND_G, 624 [TCG_COND_LTU] = COND_CS, 625 [TCG_COND_GEU] = COND_CC, 626 [TCG_COND_LEU] = COND_LEU, 627 [TCG_COND_GTU] = COND_GU, 628}; 629 630static const uint8_t tcg_cond_to_rcond[] = { 631 [TCG_COND_EQ] = RCOND_Z, 632 [TCG_COND_NE] = RCOND_NZ, 633 [TCG_COND_LT] = RCOND_LZ, 634 [TCG_COND_GT] = RCOND_GZ, 635 [TCG_COND_LE] = RCOND_LEZ, 636 [TCG_COND_GE] = RCOND_GEZ 637}; 638 639static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) 640{ 641 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); 642} 643 644static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) 645{ 646 int off19 = 0; 647 648 if (l->has_value) { 649 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); 650 } else { 651 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0); 652 } 653 tcg_out_bpcc0(s, scond, flags, off19); 654} 655 656static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) 657{ 658 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC); 659} 660 661static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, 662 int32_t arg2, int const_arg2, TCGLabel *l) 663{ 664 tcg_out_cmp(s, arg1, arg2, const_arg2); 665 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); 666 tcg_out_nop(s); 667} 668 669static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, 670 int32_t v1, int v1const) 671{ 672 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) 673 | INSN_RS1(tcg_cond_to_bcond[cond]) 674 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); 675} 676 677static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 678 TCGReg c1, int32_t c2, int c2const, 679 int32_t v1, int v1const) 680{ 681 tcg_out_cmp(s, c1, c2, c2const); 682 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); 683} 684 685static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, 686 int32_t arg2, int const_arg2, TCGLabel *l) 687{ 688 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ 689 if (arg2 == 0 && !is_unsigned_cond(cond)) { 690 int off16 = 0; 691 692 if (l->has_value) { 693 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); 694 } else { 695 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); 696 } 697 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) 698 | INSN_COND(tcg_cond_to_rcond[cond]) | off16); 699 } else { 700 tcg_out_cmp(s, arg1, arg2, const_arg2); 701 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); 702 } 703 tcg_out_nop(s); 704} 705 706static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, 707 int32_t v1, int v1const) 708{ 709 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) 710 | (tcg_cond_to_rcond[cond] << 10) 711 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); 712} 713 714static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 715 TCGReg c1, int32_t c2, int c2const, 716 int32_t v1, int v1const) 717{ 718 /* For 64-bit signed comparisons vs zero, we can avoid the compare. 719 Note that the immediate range is one bit smaller, so we must check 720 for that as well. */ 721 if (c2 == 0 && !is_unsigned_cond(cond) 722 && (!v1const || check_fit_i32(v1, 10))) { 723 tcg_out_movr(s, cond, ret, c1, v1, v1const); 724 } else { 725 tcg_out_cmp(s, c1, c2, c2const); 726 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); 727 } 728} 729 730static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 731 TCGReg c1, int32_t c2, int c2const) 732{ 733 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ 734 switch (cond) { 735 case TCG_COND_LTU: 736 case TCG_COND_GEU: 737 /* The result of the comparison is in the carry bit. */ 738 break; 739 740 case TCG_COND_EQ: 741 case TCG_COND_NE: 742 /* For equality, we can transform to inequality vs zero. */ 743 if (c2 != 0) { 744 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); 745 c2 = TCG_REG_T1; 746 } else { 747 c2 = c1; 748 } 749 c1 = TCG_REG_G0, c2const = 0; 750 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); 751 break; 752 753 case TCG_COND_GTU: 754 case TCG_COND_LEU: 755 /* If we don't need to load a constant into a register, we can 756 swap the operands on GTU/LEU. There's no benefit to loading 757 the constant into a temporary register. */ 758 if (!c2const || c2 == 0) { 759 TCGReg t = c1; 760 c1 = c2; 761 c2 = t; 762 c2const = 0; 763 cond = tcg_swap_cond(cond); 764 break; 765 } 766 /* FALLTHRU */ 767 768 default: 769 tcg_out_cmp(s, c1, c2, c2const); 770 tcg_out_movi_imm13(s, ret, 0); 771 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); 772 return; 773 } 774 775 tcg_out_cmp(s, c1, c2, c2const); 776 if (cond == TCG_COND_LTU) { 777 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); 778 } else { 779 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); 780 } 781} 782 783static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 784 TCGReg c1, int32_t c2, int c2const) 785{ 786 if (use_vis3_instructions) { 787 switch (cond) { 788 case TCG_COND_NE: 789 if (c2 != 0) { 790 break; 791 } 792 c2 = c1, c2const = 0, c1 = TCG_REG_G0; 793 /* FALLTHRU */ 794 case TCG_COND_LTU: 795 tcg_out_cmp(s, c1, c2, c2const); 796 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); 797 return; 798 default: 799 break; 800 } 801 } 802 803 /* For 64-bit signed comparisons vs zero, we can avoid the compare 804 if the input does not overlap the output. */ 805 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { 806 tcg_out_movi_imm13(s, ret, 0); 807 tcg_out_movr(s, cond, ret, c1, 1, 1); 808 } else { 809 tcg_out_cmp(s, c1, c2, c2const); 810 tcg_out_movi_imm13(s, ret, 0); 811 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); 812 } 813} 814 815static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, 816 TCGReg al, TCGReg ah, int32_t bl, int blconst, 817 int32_t bh, int bhconst, int opl, int oph) 818{ 819 TCGReg tmp = TCG_REG_T1; 820 821 /* Note that the low parts are fully consumed before tmp is set. */ 822 if (rl != ah && (bhconst || rl != bh)) { 823 tmp = rl; 824 } 825 826 tcg_out_arithc(s, tmp, al, bl, blconst, opl); 827 tcg_out_arithc(s, rh, ah, bh, bhconst, oph); 828 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); 829} 830 831static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, 832 TCGReg al, TCGReg ah, int32_t bl, int blconst, 833 int32_t bh, int bhconst, bool is_sub) 834{ 835 TCGReg tmp = TCG_REG_T1; 836 837 /* Note that the low parts are fully consumed before tmp is set. */ 838 if (rl != ah && (bhconst || rl != bh)) { 839 tmp = rl; 840 } 841 842 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); 843 844 if (use_vis3_instructions && !is_sub) { 845 /* Note that ADDXC doesn't accept immediates. */ 846 if (bhconst && bh != 0) { 847 tcg_out_movi_imm13(s, TCG_REG_T2, bh); 848 bh = TCG_REG_T2; 849 } 850 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); 851 } else if (bh == TCG_REG_G0) { 852 /* If we have a zero, we can perform the operation in two insns, 853 with the arithmetic first, and a conditional move into place. */ 854 if (rh == ah) { 855 tcg_out_arithi(s, TCG_REG_T2, ah, 1, 856 is_sub ? ARITH_SUB : ARITH_ADD); 857 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); 858 } else { 859 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); 860 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); 861 } 862 } else { 863 /* 864 * Otherwise adjust BH as if there is carry into T2. 865 * Note that constant BH is constrained to 11 bits for the MOVCC, 866 * so the adjustment fits 12 bits. 867 */ 868 if (bhconst) { 869 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); 870 } else { 871 tcg_out_arithi(s, TCG_REG_T2, bh, 1, 872 is_sub ? ARITH_SUB : ARITH_ADD); 873 } 874 /* ... smoosh T2 back to original BH if carry is clear ... */ 875 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); 876 /* ... and finally perform the arithmetic with the new operand. */ 877 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); 878 } 879 880 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); 881} 882 883static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, 884 bool in_prologue, bool tail_call) 885{ 886 uintptr_t desti = (uintptr_t)dest; 887 888 /* Be careful not to clobber %o7 for a tail call. */ 889 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, 890 desti & ~0xfff, in_prologue, 891 tail_call ? TCG_REG_G2 : TCG_REG_O7); 892 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, 893 TCG_REG_T1, desti & 0xfff, JMPL); 894} 895 896static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, 897 bool in_prologue) 898{ 899 ptrdiff_t disp = tcg_pcrel_diff(s, dest); 900 901 if (disp == (int32_t)disp) { 902 tcg_out32(s, CALL | (uint32_t)disp >> 2); 903 } else { 904 tcg_out_jmpl_const(s, dest, in_prologue, false); 905 } 906} 907 908static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, 909 const TCGHelperInfo *info) 910{ 911 tcg_out_call_nodelay(s, dest, false); 912 tcg_out_nop(s); 913} 914 915static void tcg_out_mb(TCGContext *s, TCGArg a0) 916{ 917 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ 918 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); 919} 920 921#ifdef CONFIG_SOFTMMU 922static const tcg_insn_unit *qemu_ld_trampoline[MO_SSIZE + 1]; 923static const tcg_insn_unit *qemu_st_trampoline[MO_SIZE + 1]; 924 925static void build_trampolines(TCGContext *s) 926{ 927 int i; 928 929 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { 930 if (qemu_ld_helpers[i] == NULL) { 931 continue; 932 } 933 934 /* May as well align the trampoline. */ 935 while ((uintptr_t)s->code_ptr & 15) { 936 tcg_out_nop(s); 937 } 938 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); 939 940 /* Set the retaddr operand. */ 941 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7); 942 /* Tail call. */ 943 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); 944 /* delay slot -- set the env argument */ 945 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 946 } 947 948 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { 949 if (qemu_st_helpers[i] == NULL) { 950 continue; 951 } 952 953 /* May as well align the trampoline. */ 954 while ((uintptr_t)s->code_ptr & 15) { 955 tcg_out_nop(s); 956 } 957 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); 958 959 /* Set the retaddr operand. */ 960 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7); 961 962 /* Tail call. */ 963 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); 964 /* delay slot -- set the env argument */ 965 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 966 } 967} 968#else 969static const tcg_insn_unit *qemu_unalign_ld_trampoline; 970static const tcg_insn_unit *qemu_unalign_st_trampoline; 971 972static void build_trampolines(TCGContext *s) 973{ 974 for (int ld = 0; ld < 2; ++ld) { 975 void *helper; 976 977 while ((uintptr_t)s->code_ptr & 15) { 978 tcg_out_nop(s); 979 } 980 981 if (ld) { 982 helper = helper_unaligned_ld; 983 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); 984 } else { 985 helper = helper_unaligned_st; 986 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); 987 } 988 989 /* Tail call. */ 990 tcg_out_jmpl_const(s, helper, true, true); 991 /* delay slot -- set the env argument */ 992 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 993 } 994} 995#endif 996 997/* Generate global QEMU prologue and epilogue code */ 998static void tcg_target_qemu_prologue(TCGContext *s) 999{ 1000 int tmp_buf_size, frame_size; 1001 1002 /* 1003 * The TCG temp buffer is at the top of the frame, immediately 1004 * below the frame pointer. Use the logical (aligned) offset here; 1005 * the stack bias is applied in temp_allocate_frame(). 1006 */ 1007 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); 1008 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size); 1009 1010 /* 1011 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is 1012 * otherwise the minimal frame usable by callees. 1013 */ 1014 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; 1015 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; 1016 frame_size += TCG_TARGET_STACK_ALIGN - 1; 1017 frame_size &= -TCG_TARGET_STACK_ALIGN; 1018 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | 1019 INSN_IMM13(-frame_size)); 1020 1021#ifndef CONFIG_SOFTMMU 1022 if (guest_base != 0) { 1023 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, 1024 guest_base, true, TCG_REG_T1); 1025 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1026 } 1027#endif 1028 1029 /* We choose TCG_REG_TB such that no move is required. */ 1030 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1); 1031 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); 1032 1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); 1034 /* delay slot */ 1035 tcg_out_nop(s); 1036 1037 /* Epilogue for goto_ptr. */ 1038 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1039 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1040 /* delay slot */ 1041 tcg_out_movi_imm13(s, TCG_REG_O0, 0); 1042 1043 build_trampolines(s); 1044} 1045 1046static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 1047{ 1048 int i; 1049 for (i = 0; i < count; ++i) { 1050 p[i] = NOP; 1051 } 1052} 1053 1054#if defined(CONFIG_SOFTMMU) 1055 1056/* We expect to use a 13-bit negative offset from ENV. */ 1057QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1058QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); 1059 1060/* Perform the TLB load and compare. 1061 1062 Inputs: 1063 ADDRLO and ADDRHI contain the possible two parts of the address. 1064 1065 MEM_INDEX and S_BITS are the memory context and log2 size of the load. 1066 1067 WHICH is the offset into the CPUTLBEntry structure of the slot to read. 1068 This should be offsetof addr_read or addr_write. 1069 1070 The result of the TLB comparison is in %[ix]cc. The sanitized address 1071 is in the returned register, maybe %o0. The TLB addend is in %o1. */ 1072 1073static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, 1074 MemOp opc, int which) 1075{ 1076 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1077 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1078 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1079 const TCGReg r0 = TCG_REG_O0; 1080 const TCGReg r1 = TCG_REG_O1; 1081 const TCGReg r2 = TCG_REG_O2; 1082 unsigned s_bits = opc & MO_SIZE; 1083 unsigned a_bits = get_alignment_bits(opc); 1084 tcg_target_long compare_mask; 1085 1086 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1087 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); 1088 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); 1089 1090 /* Extract the page index, shifted into place for tlb index. */ 1091 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, 1092 SHIFT_SRL); 1093 tcg_out_arith(s, r2, r2, r0, ARITH_AND); 1094 1095 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ 1096 tcg_out_arith(s, r2, r2, r1, ARITH_ADD); 1097 1098 /* Load the tlb comparator and the addend. */ 1099 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); 1100 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); 1101 1102 /* Mask out the page offset, except for the required alignment. 1103 We don't support unaligned accesses. */ 1104 if (a_bits < s_bits) { 1105 a_bits = s_bits; 1106 } 1107 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 1108 if (check_fit_tl(compare_mask, 13)) { 1109 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); 1110 } else { 1111 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); 1112 tcg_out_arith(s, r2, addr, r2, ARITH_AND); 1113 } 1114 tcg_out_cmp(s, r0, r2, 0); 1115 1116 /* If the guest address must be zero-extended, do so now. */ 1117 if (TARGET_LONG_BITS == 32) { 1118 tcg_out_ext32u(s, r0, addr); 1119 return r0; 1120 } 1121 return addr; 1122} 1123#endif /* CONFIG_SOFTMMU */ 1124 1125static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { 1126 [MO_UB] = LDUB, 1127 [MO_SB] = LDSB, 1128 [MO_UB | MO_LE] = LDUB, 1129 [MO_SB | MO_LE] = LDSB, 1130 1131 [MO_BEUW] = LDUH, 1132 [MO_BESW] = LDSH, 1133 [MO_BEUL] = LDUW, 1134 [MO_BESL] = LDSW, 1135 [MO_BEUQ] = LDX, 1136 [MO_BESQ] = LDX, 1137 1138 [MO_LEUW] = LDUH_LE, 1139 [MO_LESW] = LDSH_LE, 1140 [MO_LEUL] = LDUW_LE, 1141 [MO_LESL] = LDSW_LE, 1142 [MO_LEUQ] = LDX_LE, 1143 [MO_LESQ] = LDX_LE, 1144}; 1145 1146static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { 1147 [MO_UB] = STB, 1148 1149 [MO_BEUW] = STH, 1150 [MO_BEUL] = STW, 1151 [MO_BEUQ] = STX, 1152 1153 [MO_LEUW] = STH_LE, 1154 [MO_LEUL] = STW_LE, 1155 [MO_LEUQ] = STX_LE, 1156}; 1157 1158static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, 1159 MemOpIdx oi, TCGType data_type) 1160{ 1161 MemOp memop = get_memop(oi); 1162 tcg_insn_unit *label_ptr; 1163 1164#ifdef CONFIG_SOFTMMU 1165 unsigned memi = get_mmuidx(oi); 1166 TCGReg addrz; 1167 const tcg_insn_unit *func; 1168 1169 addrz = tcg_out_tlb_load(s, addr, memi, memop, 1170 offsetof(CPUTLBEntry, addr_read)); 1171 1172 /* The fast path is exactly one insn. Thus we can perform the 1173 entire TLB Hit in the (annulled) delay slot of the branch 1174 over the TLB Miss case. */ 1175 1176 /* beq,a,pt %[xi]cc, label0 */ 1177 label_ptr = s->code_ptr; 1178 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT 1179 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); 1180 /* delay slot */ 1181 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, 1182 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1183 1184 /* TLB Miss. */ 1185 1186 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); 1187 1188 /* We use the helpers to extend SB and SW data, leaving the case 1189 of SL needing explicit extending below. */ 1190 if ((memop & MO_SSIZE) == MO_SL) { 1191 func = qemu_ld_trampoline[MO_UL]; 1192 } else { 1193 func = qemu_ld_trampoline[memop & MO_SSIZE]; 1194 } 1195 tcg_debug_assert(func != NULL); 1196 tcg_out_call_nodelay(s, func, false); 1197 /* delay slot */ 1198 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); 1199 1200 /* We let the helper sign-extend SB and SW, but leave SL for here. */ 1201 if ((memop & MO_SSIZE) == MO_SL) { 1202 tcg_out_ext32s(s, data, TCG_REG_O0); 1203 } else { 1204 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); 1205 } 1206 1207 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1208#else 1209 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); 1210 unsigned a_bits = get_alignment_bits(memop); 1211 unsigned s_bits = memop & MO_SIZE; 1212 unsigned t_bits; 1213 1214 if (TARGET_LONG_BITS == 32) { 1215 tcg_out_ext32u(s, TCG_REG_T1, addr); 1216 addr = TCG_REG_T1; 1217 } 1218 1219 /* 1220 * Normal case: alignment equal to access size. 1221 */ 1222 if (a_bits == s_bits) { 1223 tcg_out_ldst_rr(s, data, addr, index, 1224 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1225 return; 1226 } 1227 1228 /* 1229 * Test for at least natural alignment, and assume most accesses 1230 * will be aligned -- perform a straight load in the delay slot. 1231 * This is required to preserve atomicity for aligned accesses. 1232 */ 1233 t_bits = MAX(a_bits, s_bits); 1234 tcg_debug_assert(t_bits < 13); 1235 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); 1236 1237 /* beq,a,pt %icc, label */ 1238 label_ptr = s->code_ptr; 1239 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); 1240 /* delay slot */ 1241 tcg_out_ldst_rr(s, data, addr, index, 1242 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1243 1244 if (a_bits >= s_bits) { 1245 /* 1246 * Overalignment: A successful alignment test will perform the memory 1247 * operation in the delay slot, and failure need only invoke the 1248 * handler for SIGBUS. 1249 */ 1250 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); 1251 /* delay slot -- move to low part of argument reg */ 1252 tcg_out_mov_delay(s, TCG_REG_O1, addr); 1253 } else { 1254 /* Underalignment: load by pieces of minimum alignment. */ 1255 int ld_opc, a_size, s_size, i; 1256 1257 /* 1258 * Force full address into T1 early; avoids problems with 1259 * overlap between @addr and @data. 1260 */ 1261 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); 1262 1263 a_size = 1 << a_bits; 1264 s_size = 1 << s_bits; 1265 if ((memop & MO_BSWAP) == MO_BE) { 1266 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; 1267 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); 1268 ld_opc = qemu_ld_opc[a_bits | MO_BE]; 1269 for (i = a_size; i < s_size; i += a_size) { 1270 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); 1271 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); 1272 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1273 } 1274 } else if (a_bits == 0) { 1275 ld_opc = LDUB; 1276 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); 1277 for (i = a_size; i < s_size; i += a_size) { 1278 if ((memop & MO_SIGN) && i == s_size - a_size) { 1279 ld_opc = LDSB; 1280 } 1281 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); 1282 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); 1283 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1284 } 1285 } else { 1286 ld_opc = qemu_ld_opc[a_bits | MO_LE]; 1287 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); 1288 for (i = a_size; i < s_size; i += a_size) { 1289 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); 1290 if ((memop & MO_SIGN) && i == s_size - a_size) { 1291 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; 1292 } 1293 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); 1294 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); 1295 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1296 } 1297 } 1298 } 1299 1300 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1301#endif /* CONFIG_SOFTMMU */ 1302} 1303 1304static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, 1305 MemOpIdx oi, TCGType data_type) 1306{ 1307 MemOp memop = get_memop(oi); 1308 tcg_insn_unit *label_ptr; 1309 1310#ifdef CONFIG_SOFTMMU 1311 unsigned memi = get_mmuidx(oi); 1312 TCGReg addrz; 1313 const tcg_insn_unit *func; 1314 1315 addrz = tcg_out_tlb_load(s, addr, memi, memop, 1316 offsetof(CPUTLBEntry, addr_write)); 1317 1318 /* The fast path is exactly one insn. Thus we can perform the entire 1319 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ 1320 /* beq,a,pt %[xi]cc, label0 */ 1321 label_ptr = s->code_ptr; 1322 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT 1323 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); 1324 /* delay slot */ 1325 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, 1326 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1327 1328 /* TLB Miss. */ 1329 1330 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); 1331 tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, 1332 TCG_REG_O2, data_type, memop & MO_SIZE, data); 1333 1334 func = qemu_st_trampoline[memop & MO_SIZE]; 1335 tcg_debug_assert(func != NULL); 1336 tcg_out_call_nodelay(s, func, false); 1337 /* delay slot */ 1338 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi); 1339 1340 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1341#else 1342 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); 1343 unsigned a_bits = get_alignment_bits(memop); 1344 unsigned s_bits = memop & MO_SIZE; 1345 unsigned t_bits; 1346 1347 if (TARGET_LONG_BITS == 32) { 1348 tcg_out_ext32u(s, TCG_REG_T1, addr); 1349 addr = TCG_REG_T1; 1350 } 1351 1352 /* 1353 * Normal case: alignment equal to access size. 1354 */ 1355 if (a_bits == s_bits) { 1356 tcg_out_ldst_rr(s, data, addr, index, 1357 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1358 return; 1359 } 1360 1361 /* 1362 * Test for at least natural alignment, and assume most accesses 1363 * will be aligned -- perform a straight store in the delay slot. 1364 * This is required to preserve atomicity for aligned accesses. 1365 */ 1366 t_bits = MAX(a_bits, s_bits); 1367 tcg_debug_assert(t_bits < 13); 1368 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); 1369 1370 /* beq,a,pt %icc, label */ 1371 label_ptr = s->code_ptr; 1372 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); 1373 /* delay slot */ 1374 tcg_out_ldst_rr(s, data, addr, index, 1375 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1376 1377 if (a_bits >= s_bits) { 1378 /* 1379 * Overalignment: A successful alignment test will perform the memory 1380 * operation in the delay slot, and failure need only invoke the 1381 * handler for SIGBUS. 1382 */ 1383 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); 1384 /* delay slot -- move to low part of argument reg */ 1385 tcg_out_mov_delay(s, TCG_REG_O1, addr); 1386 } else { 1387 /* Underalignment: store by pieces of minimum alignment. */ 1388 int st_opc, a_size, s_size, i; 1389 1390 /* 1391 * Force full address into T1 early; avoids problems with 1392 * overlap between @addr and @data. 1393 */ 1394 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); 1395 1396 a_size = 1 << a_bits; 1397 s_size = 1 << s_bits; 1398 if ((memop & MO_BSWAP) == MO_BE) { 1399 st_opc = qemu_st_opc[a_bits | MO_BE]; 1400 for (i = 0; i < s_size; i += a_size) { 1401 TCGReg d = data; 1402 int shift = (s_size - a_size - i) * 8; 1403 if (shift) { 1404 d = TCG_REG_T2; 1405 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); 1406 } 1407 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); 1408 } 1409 } else if (a_bits == 0) { 1410 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); 1411 for (i = 1; i < s_size; i++) { 1412 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); 1413 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); 1414 } 1415 } else { 1416 /* Note that ST*A with immediate asi must use indexed address. */ 1417 st_opc = qemu_st_opc[a_bits + MO_LE]; 1418 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); 1419 for (i = a_size; i < s_size; i += a_size) { 1420 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); 1421 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); 1422 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); 1423 } 1424 } 1425 } 1426 1427 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1428#endif /* CONFIG_SOFTMMU */ 1429} 1430 1431static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1432{ 1433 if (check_fit_ptr(a0, 13)) { 1434 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1435 tcg_out_movi_imm13(s, TCG_REG_O0, a0); 1436 return; 1437 } else { 1438 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0); 1439 if (check_fit_ptr(tb_diff, 13)) { 1440 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1441 /* Note that TCG_REG_TB has been unwound to O1. */ 1442 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD); 1443 return; 1444 } 1445 } 1446 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); 1447 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1448 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); 1449} 1450 1451static void tcg_out_goto_tb(TCGContext *s, int which) 1452{ 1453 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which)); 1454 1455 /* Load link and indirect branch. */ 1456 set_jmp_insn_offset(s, which); 1457 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off); 1458 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); 1459 /* delay slot */ 1460 tcg_out_nop(s); 1461 set_jmp_reset_offset(s, which); 1462 1463 /* 1464 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB 1465 * to the beginning of this TB. 1466 */ 1467 off = -tcg_current_code_size(s); 1468 if (check_fit_i32(off, 13)) { 1469 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD); 1470 } else { 1471 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off); 1472 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); 1473 } 1474} 1475 1476void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1477 uintptr_t jmp_rx, uintptr_t jmp_rw) 1478{ 1479} 1480 1481static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1482 const TCGArg args[TCG_MAX_OP_ARGS], 1483 const int const_args[TCG_MAX_OP_ARGS]) 1484{ 1485 TCGArg a0, a1, a2; 1486 int c, c2; 1487 1488 /* Hoist the loads of the most common arguments. */ 1489 a0 = args[0]; 1490 a1 = args[1]; 1491 a2 = args[2]; 1492 c2 = const_args[2]; 1493 1494 switch (opc) { 1495 case INDEX_op_goto_ptr: 1496 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); 1497 tcg_out_mov_delay(s, TCG_REG_TB, a0); 1498 break; 1499 case INDEX_op_br: 1500 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); 1501 tcg_out_nop(s); 1502 break; 1503 1504#define OP_32_64(x) \ 1505 glue(glue(case INDEX_op_, x), _i32): \ 1506 glue(glue(case INDEX_op_, x), _i64) 1507 1508 OP_32_64(ld8u): 1509 tcg_out_ldst(s, a0, a1, a2, LDUB); 1510 break; 1511 OP_32_64(ld8s): 1512 tcg_out_ldst(s, a0, a1, a2, LDSB); 1513 break; 1514 OP_32_64(ld16u): 1515 tcg_out_ldst(s, a0, a1, a2, LDUH); 1516 break; 1517 OP_32_64(ld16s): 1518 tcg_out_ldst(s, a0, a1, a2, LDSH); 1519 break; 1520 case INDEX_op_ld_i32: 1521 case INDEX_op_ld32u_i64: 1522 tcg_out_ldst(s, a0, a1, a2, LDUW); 1523 break; 1524 OP_32_64(st8): 1525 tcg_out_ldst(s, a0, a1, a2, STB); 1526 break; 1527 OP_32_64(st16): 1528 tcg_out_ldst(s, a0, a1, a2, STH); 1529 break; 1530 case INDEX_op_st_i32: 1531 case INDEX_op_st32_i64: 1532 tcg_out_ldst(s, a0, a1, a2, STW); 1533 break; 1534 OP_32_64(add): 1535 c = ARITH_ADD; 1536 goto gen_arith; 1537 OP_32_64(sub): 1538 c = ARITH_SUB; 1539 goto gen_arith; 1540 OP_32_64(and): 1541 c = ARITH_AND; 1542 goto gen_arith; 1543 OP_32_64(andc): 1544 c = ARITH_ANDN; 1545 goto gen_arith; 1546 OP_32_64(or): 1547 c = ARITH_OR; 1548 goto gen_arith; 1549 OP_32_64(orc): 1550 c = ARITH_ORN; 1551 goto gen_arith; 1552 OP_32_64(xor): 1553 c = ARITH_XOR; 1554 goto gen_arith; 1555 case INDEX_op_shl_i32: 1556 c = SHIFT_SLL; 1557 do_shift32: 1558 /* Limit immediate shift count lest we create an illegal insn. */ 1559 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); 1560 break; 1561 case INDEX_op_shr_i32: 1562 c = SHIFT_SRL; 1563 goto do_shift32; 1564 case INDEX_op_sar_i32: 1565 c = SHIFT_SRA; 1566 goto do_shift32; 1567 case INDEX_op_mul_i32: 1568 c = ARITH_UMUL; 1569 goto gen_arith; 1570 1571 OP_32_64(neg): 1572 c = ARITH_SUB; 1573 goto gen_arith1; 1574 OP_32_64(not): 1575 c = ARITH_ORN; 1576 goto gen_arith1; 1577 1578 case INDEX_op_div_i32: 1579 tcg_out_div32(s, a0, a1, a2, c2, 0); 1580 break; 1581 case INDEX_op_divu_i32: 1582 tcg_out_div32(s, a0, a1, a2, c2, 1); 1583 break; 1584 1585 case INDEX_op_brcond_i32: 1586 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1587 break; 1588 case INDEX_op_setcond_i32: 1589 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2); 1590 break; 1591 case INDEX_op_movcond_i32: 1592 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1593 break; 1594 1595 case INDEX_op_add2_i32: 1596 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1597 args[4], const_args[4], args[5], const_args[5], 1598 ARITH_ADDCC, ARITH_ADDC); 1599 break; 1600 case INDEX_op_sub2_i32: 1601 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1602 args[4], const_args[4], args[5], const_args[5], 1603 ARITH_SUBCC, ARITH_SUBC); 1604 break; 1605 case INDEX_op_mulu2_i32: 1606 c = ARITH_UMUL; 1607 goto do_mul2; 1608 case INDEX_op_muls2_i32: 1609 c = ARITH_SMUL; 1610 do_mul2: 1611 /* The 32-bit multiply insns produce a full 64-bit result. */ 1612 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); 1613 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); 1614 break; 1615 1616 case INDEX_op_qemu_ld_i32: 1617 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); 1618 break; 1619 case INDEX_op_qemu_ld_i64: 1620 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); 1621 break; 1622 case INDEX_op_qemu_st_i32: 1623 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); 1624 break; 1625 case INDEX_op_qemu_st_i64: 1626 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); 1627 break; 1628 1629 case INDEX_op_ld32s_i64: 1630 tcg_out_ldst(s, a0, a1, a2, LDSW); 1631 break; 1632 case INDEX_op_ld_i64: 1633 tcg_out_ldst(s, a0, a1, a2, LDX); 1634 break; 1635 case INDEX_op_st_i64: 1636 tcg_out_ldst(s, a0, a1, a2, STX); 1637 break; 1638 case INDEX_op_shl_i64: 1639 c = SHIFT_SLLX; 1640 do_shift64: 1641 /* Limit immediate shift count lest we create an illegal insn. */ 1642 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); 1643 break; 1644 case INDEX_op_shr_i64: 1645 c = SHIFT_SRLX; 1646 goto do_shift64; 1647 case INDEX_op_sar_i64: 1648 c = SHIFT_SRAX; 1649 goto do_shift64; 1650 case INDEX_op_mul_i64: 1651 c = ARITH_MULX; 1652 goto gen_arith; 1653 case INDEX_op_div_i64: 1654 c = ARITH_SDIVX; 1655 goto gen_arith; 1656 case INDEX_op_divu_i64: 1657 c = ARITH_UDIVX; 1658 goto gen_arith; 1659 case INDEX_op_extrh_i64_i32: 1660 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); 1661 break; 1662 1663 case INDEX_op_brcond_i64: 1664 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1665 break; 1666 case INDEX_op_setcond_i64: 1667 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2); 1668 break; 1669 case INDEX_op_movcond_i64: 1670 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1671 break; 1672 case INDEX_op_add2_i64: 1673 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1674 const_args[4], args[5], const_args[5], false); 1675 break; 1676 case INDEX_op_sub2_i64: 1677 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1678 const_args[4], args[5], const_args[5], true); 1679 break; 1680 case INDEX_op_muluh_i64: 1681 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); 1682 break; 1683 1684 gen_arith: 1685 tcg_out_arithc(s, a0, a1, a2, c2, c); 1686 break; 1687 1688 gen_arith1: 1689 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); 1690 break; 1691 1692 case INDEX_op_mb: 1693 tcg_out_mb(s, a0); 1694 break; 1695 1696 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1697 case INDEX_op_mov_i64: 1698 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1699 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1700 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1701 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ 1702 case INDEX_op_ext8s_i64: 1703 case INDEX_op_ext8u_i32: 1704 case INDEX_op_ext8u_i64: 1705 case INDEX_op_ext16s_i32: 1706 case INDEX_op_ext16s_i64: 1707 case INDEX_op_ext16u_i32: 1708 case INDEX_op_ext16u_i64: 1709 case INDEX_op_ext32s_i64: 1710 case INDEX_op_ext32u_i64: 1711 case INDEX_op_ext_i32_i64: 1712 case INDEX_op_extu_i32_i64: 1713 case INDEX_op_extrl_i64_i32: 1714 default: 1715 g_assert_not_reached(); 1716 } 1717} 1718 1719static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1720{ 1721 switch (op) { 1722 case INDEX_op_goto_ptr: 1723 return C_O0_I1(r); 1724 1725 case INDEX_op_ld8u_i32: 1726 case INDEX_op_ld8u_i64: 1727 case INDEX_op_ld8s_i32: 1728 case INDEX_op_ld8s_i64: 1729 case INDEX_op_ld16u_i32: 1730 case INDEX_op_ld16u_i64: 1731 case INDEX_op_ld16s_i32: 1732 case INDEX_op_ld16s_i64: 1733 case INDEX_op_ld_i32: 1734 case INDEX_op_ld32u_i64: 1735 case INDEX_op_ld32s_i64: 1736 case INDEX_op_ld_i64: 1737 case INDEX_op_neg_i32: 1738 case INDEX_op_neg_i64: 1739 case INDEX_op_not_i32: 1740 case INDEX_op_not_i64: 1741 case INDEX_op_ext32s_i64: 1742 case INDEX_op_ext32u_i64: 1743 case INDEX_op_ext_i32_i64: 1744 case INDEX_op_extu_i32_i64: 1745 case INDEX_op_extrl_i64_i32: 1746 case INDEX_op_extrh_i64_i32: 1747 return C_O1_I1(r, r); 1748 1749 case INDEX_op_st8_i32: 1750 case INDEX_op_st8_i64: 1751 case INDEX_op_st16_i32: 1752 case INDEX_op_st16_i64: 1753 case INDEX_op_st_i32: 1754 case INDEX_op_st32_i64: 1755 case INDEX_op_st_i64: 1756 return C_O0_I2(rZ, r); 1757 1758 case INDEX_op_add_i32: 1759 case INDEX_op_add_i64: 1760 case INDEX_op_mul_i32: 1761 case INDEX_op_mul_i64: 1762 case INDEX_op_div_i32: 1763 case INDEX_op_div_i64: 1764 case INDEX_op_divu_i32: 1765 case INDEX_op_divu_i64: 1766 case INDEX_op_sub_i32: 1767 case INDEX_op_sub_i64: 1768 case INDEX_op_and_i32: 1769 case INDEX_op_and_i64: 1770 case INDEX_op_andc_i32: 1771 case INDEX_op_andc_i64: 1772 case INDEX_op_or_i32: 1773 case INDEX_op_or_i64: 1774 case INDEX_op_orc_i32: 1775 case INDEX_op_orc_i64: 1776 case INDEX_op_xor_i32: 1777 case INDEX_op_xor_i64: 1778 case INDEX_op_shl_i32: 1779 case INDEX_op_shl_i64: 1780 case INDEX_op_shr_i32: 1781 case INDEX_op_shr_i64: 1782 case INDEX_op_sar_i32: 1783 case INDEX_op_sar_i64: 1784 case INDEX_op_setcond_i32: 1785 case INDEX_op_setcond_i64: 1786 return C_O1_I2(r, rZ, rJ); 1787 1788 case INDEX_op_brcond_i32: 1789 case INDEX_op_brcond_i64: 1790 return C_O0_I2(rZ, rJ); 1791 case INDEX_op_movcond_i32: 1792 case INDEX_op_movcond_i64: 1793 return C_O1_I4(r, rZ, rJ, rI, 0); 1794 case INDEX_op_add2_i32: 1795 case INDEX_op_add2_i64: 1796 case INDEX_op_sub2_i32: 1797 case INDEX_op_sub2_i64: 1798 return C_O2_I4(r, r, rZ, rZ, rJ, rJ); 1799 case INDEX_op_mulu2_i32: 1800 case INDEX_op_muls2_i32: 1801 return C_O2_I2(r, r, rZ, rJ); 1802 case INDEX_op_muluh_i64: 1803 return C_O1_I2(r, r, r); 1804 1805 case INDEX_op_qemu_ld_i32: 1806 case INDEX_op_qemu_ld_i64: 1807 return C_O1_I1(r, s); 1808 case INDEX_op_qemu_st_i32: 1809 case INDEX_op_qemu_st_i64: 1810 return C_O0_I2(sZ, s); 1811 1812 default: 1813 g_assert_not_reached(); 1814 } 1815} 1816 1817static void tcg_target_init(TCGContext *s) 1818{ 1819 /* 1820 * Only probe for the platform and capabilities if we haven't already 1821 * determined maximum values at compile time. 1822 */ 1823#ifndef use_vis3_instructions 1824 { 1825 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 1826 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; 1827 } 1828#endif 1829 1830 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1831 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1832 1833 tcg_target_call_clobber_regs = 0; 1834 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1); 1835 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2); 1836 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3); 1837 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4); 1838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5); 1839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6); 1840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7); 1841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0); 1842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1); 1843 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2); 1844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3); 1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4); 1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5); 1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6); 1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7); 1849 1850 s->reserved_regs = 0; 1851 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ 1852 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ 1853 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ 1854 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */ 1855 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */ 1856 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ 1857 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ 1858 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ 1859} 1860 1861#define ELF_HOST_MACHINE EM_SPARCV9 1862 1863typedef struct { 1864 DebugFrameHeader h; 1865 uint8_t fde_def_cfa[4]; 1866 uint8_t fde_win_save; 1867 uint8_t fde_ret_save[3]; 1868} DebugFrame; 1869 1870static const DebugFrame debug_frame = { 1871 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 1872 .h.cie.id = -1, 1873 .h.cie.version = 1, 1874 .h.cie.code_align = 1, 1875 .h.cie.data_align = -sizeof(void *) & 0x7f, 1876 .h.cie.return_column = 15, /* o7 */ 1877 1878 /* Total FDE size does not include the "len" member. */ 1879 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1880 1881 .fde_def_cfa = { 1882 12, 30, /* DW_CFA_def_cfa i6, 2047 */ 1883 (2047 & 0x7f) | 0x80, (2047 >> 7) 1884 }, 1885 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ 1886 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ 1887}; 1888 1889void tcg_register_jit(const void *buf, size_t buf_size) 1890{ 1891 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1892} 1893