1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* We only support generating code for 64-bit mode. */ 26#ifndef __arch64__ 27#error "unsupported code generation mode" 28#endif 29 30#include "../tcg-pool.c.inc" 31 32#ifdef CONFIG_DEBUG_TCG 33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 34 "%g0", 35 "%g1", 36 "%g2", 37 "%g3", 38 "%g4", 39 "%g5", 40 "%g6", 41 "%g7", 42 "%o0", 43 "%o1", 44 "%o2", 45 "%o3", 46 "%o4", 47 "%o5", 48 "%o6", 49 "%o7", 50 "%l0", 51 "%l1", 52 "%l2", 53 "%l3", 54 "%l4", 55 "%l5", 56 "%l6", 57 "%l7", 58 "%i0", 59 "%i1", 60 "%i2", 61 "%i3", 62 "%i4", 63 "%i5", 64 "%i6", 65 "%i7", 66}; 67#endif 68 69#define TCG_CT_CONST_S11 0x100 70#define TCG_CT_CONST_S13 0x200 71#define TCG_CT_CONST_ZERO 0x400 72 73/* 74 * For softmmu, we need to avoid conflicts with the first 3 75 * argument registers to perform the tlb lookup, and to call 76 * the helper function. 77 */ 78#ifdef CONFIG_SOFTMMU 79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3) 80#else 81#define SOFTMMU_RESERVE_REGS 0 82#endif 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) 85 86/* Define some temporary registers. T2 is used for constant generation. */ 87#define TCG_REG_T1 TCG_REG_G1 88#define TCG_REG_T2 TCG_REG_O7 89 90#ifndef CONFIG_SOFTMMU 91# define TCG_GUEST_BASE_REG TCG_REG_I5 92#endif 93 94#define TCG_REG_TB TCG_REG_I1 95 96static const int tcg_target_reg_alloc_order[] = { 97 TCG_REG_L0, 98 TCG_REG_L1, 99 TCG_REG_L2, 100 TCG_REG_L3, 101 TCG_REG_L4, 102 TCG_REG_L5, 103 TCG_REG_L6, 104 TCG_REG_L7, 105 106 TCG_REG_I0, 107 TCG_REG_I1, 108 TCG_REG_I2, 109 TCG_REG_I3, 110 TCG_REG_I4, 111 TCG_REG_I5, 112 113 TCG_REG_G2, 114 TCG_REG_G3, 115 TCG_REG_G4, 116 TCG_REG_G5, 117 118 TCG_REG_O0, 119 TCG_REG_O1, 120 TCG_REG_O2, 121 TCG_REG_O3, 122 TCG_REG_O4, 123 TCG_REG_O5, 124}; 125 126static const int tcg_target_call_iarg_regs[6] = { 127 TCG_REG_O0, 128 TCG_REG_O1, 129 TCG_REG_O2, 130 TCG_REG_O3, 131 TCG_REG_O4, 132 TCG_REG_O5, 133}; 134 135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 136{ 137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 138 tcg_debug_assert(slot >= 0 && slot <= 3); 139 return TCG_REG_O0 + slot; 140} 141 142#define INSN_OP(x) ((x) << 30) 143#define INSN_OP2(x) ((x) << 22) 144#define INSN_OP3(x) ((x) << 19) 145#define INSN_OPF(x) ((x) << 5) 146#define INSN_RD(x) ((x) << 25) 147#define INSN_RS1(x) ((x) << 14) 148#define INSN_RS2(x) (x) 149#define INSN_ASI(x) ((x) << 5) 150 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff)) 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff)) 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20)) 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff) 156#define INSN_COND(x) ((x) << 25) 157 158#define COND_N 0x0 159#define COND_E 0x1 160#define COND_LE 0x2 161#define COND_L 0x3 162#define COND_LEU 0x4 163#define COND_CS 0x5 164#define COND_NEG 0x6 165#define COND_VS 0x7 166#define COND_A 0x8 167#define COND_NE 0x9 168#define COND_G 0xa 169#define COND_GE 0xb 170#define COND_GU 0xc 171#define COND_CC 0xd 172#define COND_POS 0xe 173#define COND_VC 0xf 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2)) 175 176#define RCOND_Z 1 177#define RCOND_LEZ 2 178#define RCOND_LZ 3 179#define RCOND_NZ 5 180#define RCOND_GZ 6 181#define RCOND_GEZ 7 182 183#define MOVCC_ICC (1 << 18) 184#define MOVCC_XCC (1 << 18 | 1 << 12) 185 186#define BPCC_ICC 0 187#define BPCC_XCC (2 << 20) 188#define BPCC_PT (1 << 19) 189#define BPCC_PN 0 190#define BPCC_A (1 << 29) 191 192#define BPR_PT BPCC_PT 193 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11)) 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06)) 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) 203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) 204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) 205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) 206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) 209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) 210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) 211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) 212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) 213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) 216 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) 219 220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) 221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) 222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) 223 224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) 225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) 226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) 227 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39)) 232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) 233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) 234#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) 235#define CALL INSN_OP(1) 236#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) 237#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) 238#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) 239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) 240#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) 241#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) 242#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) 243#define STB (INSN_OP(3) | INSN_OP3(0x05)) 244#define STH (INSN_OP(3) | INSN_OP3(0x06)) 245#define STW (INSN_OP(3) | INSN_OP3(0x04)) 246#define STX (INSN_OP(3) | INSN_OP3(0x0e)) 247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11)) 248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19)) 249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12)) 250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a)) 251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10)) 252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18)) 253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b)) 254#define STBA (INSN_OP(3) | INSN_OP3(0x15)) 255#define STHA (INSN_OP(3) | INSN_OP3(0x16)) 256#define STWA (INSN_OP(3) | INSN_OP3(0x14)) 257#define STXA (INSN_OP(3) | INSN_OP3(0x1e)) 258 259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13)) 260 261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0) 262 263#ifndef ASI_PRIMARY_LITTLE 264#define ASI_PRIMARY_LITTLE 0x88 265#endif 266 267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 272 273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE)) 274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) 275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) 276 277#ifndef use_vis3_instructions 278bool use_vis3_instructions; 279#endif 280 281static bool check_fit_i64(int64_t val, unsigned int bits) 282{ 283 return val == sextract64(val, 0, bits); 284} 285 286static bool check_fit_i32(int32_t val, unsigned int bits) 287{ 288 return val == sextract32(val, 0, bits); 289} 290 291#define check_fit_tl check_fit_i64 292#define check_fit_ptr check_fit_i64 293 294static bool patch_reloc(tcg_insn_unit *src_rw, int type, 295 intptr_t value, intptr_t addend) 296{ 297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 298 uint32_t insn = *src_rw; 299 intptr_t pcrel; 300 301 value += addend; 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx); 303 304 switch (type) { 305 case R_SPARC_WDISP16: 306 if (!check_fit_ptr(pcrel >> 2, 16)) { 307 return false; 308 } 309 insn &= ~INSN_OFF16(-1); 310 insn |= INSN_OFF16(pcrel); 311 break; 312 case R_SPARC_WDISP19: 313 if (!check_fit_ptr(pcrel >> 2, 19)) { 314 return false; 315 } 316 insn &= ~INSN_OFF19(-1); 317 insn |= INSN_OFF19(pcrel); 318 break; 319 case R_SPARC_13: 320 if (!check_fit_ptr(value, 13)) { 321 return false; 322 } 323 insn &= ~INSN_IMM13(-1); 324 insn |= INSN_IMM13(value); 325 break; 326 default: 327 g_assert_not_reached(); 328 } 329 330 *src_rw = insn; 331 return true; 332} 333 334/* test if a constant matches the constraint */ 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 336{ 337 if (ct & TCG_CT_CONST) { 338 return 1; 339 } 340 341 if (type == TCG_TYPE_I32) { 342 val = (int32_t)val; 343 } 344 345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 346 return 1; 347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { 348 return 1; 349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { 350 return 1; 351 } else { 352 return 0; 353 } 354} 355 356static void tcg_out_nop(TCGContext *s) 357{ 358 tcg_out32(s, NOP); 359} 360 361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, 362 TCGReg rs2, int op) 363{ 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); 365} 366 367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, 368 int32_t offset, int op) 369{ 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); 371} 372 373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, 374 int32_t val2, int val2const, int op) 375{ 376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) 377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); 378} 379 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 381{ 382 if (ret != arg) { 383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 384 } 385 return true; 386} 387 388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg) 389{ 390 if (ret != arg) { 391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); 392 } else { 393 tcg_out_nop(s); 394 } 395} 396 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) 398{ 399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); 400} 401 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) 403{ 404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); 405} 406 407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) 408{ 409 if (check_fit_i32(arg, 13)) { 410 /* A 13-bit constant sign-extended to 64-bits. */ 411 tcg_out_movi_imm13(s, ret, arg); 412 } else { 413 /* A 32-bit constant zero-extended to 64 bits. */ 414 tcg_out_sethi(s, ret, arg); 415 if (arg & 0x3ff) { 416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); 417 } 418 } 419} 420 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, 422 tcg_target_long arg, bool in_prologue, 423 TCGReg scratch) 424{ 425 tcg_target_long hi, lo = (int32_t)arg; 426 tcg_target_long test, lsb; 427 428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ 429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { 430 tcg_out_movi_imm32(s, ret, arg); 431 return; 432 } 433 434 /* A 13-bit constant sign-extended to 64-bits. */ 435 if (check_fit_tl(arg, 13)) { 436 tcg_out_movi_imm13(s, ret, arg); 437 return; 438 } 439 440 /* A 13-bit constant relative to the TB. */ 441 if (!in_prologue) { 442 test = tcg_tbrel_diff(s, (void *)arg); 443 if (check_fit_ptr(test, 13)) { 444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD); 445 return; 446 } 447 } 448 449 /* A 32-bit constant sign-extended to 64-bits. */ 450 if (arg == lo) { 451 tcg_out_sethi(s, ret, ~arg); 452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); 453 return; 454 } 455 456 /* A 32-bit constant, shifted. */ 457 lsb = ctz64(arg); 458 test = (tcg_target_long)arg >> lsb; 459 if (lsb > 10 && test == extract64(test, 0, 21)) { 460 tcg_out_sethi(s, ret, test << 10); 461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); 462 return; 463 } else if (test == (uint32_t)test || test == (int32_t)test) { 464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch); 465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); 466 return; 467 } 468 469 /* Use the constant pool, if possible. */ 470 if (!in_prologue) { 471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr, 472 tcg_tbrel_diff(s, NULL)); 473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB)); 474 return; 475 } 476 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */ 478 if (check_fit_i32(lo, 13)) { 479 hi = (arg - lo) >> 32; 480 tcg_out_movi_imm32(s, ret, hi); 481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); 483 } else { 484 hi = arg >> 32; 485 tcg_out_movi_imm32(s, ret, hi); 486 tcg_out_movi_imm32(s, scratch, lo); 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR); 489 } 490} 491 492static void tcg_out_movi(TCGContext *s, TCGType type, 493 TCGReg ret, tcg_target_long arg) 494{ 495 tcg_debug_assert(ret != TCG_REG_T2); 496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); 497} 498 499static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 500 tcg_target_long imm) 501{ 502 /* This function is only used for passing structs by reference. */ 503 g_assert_not_reached(); 504} 505 506static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, 507 TCGReg a2, int op) 508{ 509 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); 510} 511 512static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, 513 intptr_t offset, int op) 514{ 515 if (check_fit_ptr(offset, 13)) { 516 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | 517 INSN_IMM13(offset)); 518 } else { 519 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset); 520 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op); 521 } 522} 523 524static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, 525 TCGReg arg1, intptr_t arg2) 526{ 527 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); 528} 529 530static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 531 TCGReg arg1, intptr_t arg2) 532{ 533 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); 534} 535 536static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 537 TCGReg base, intptr_t ofs) 538{ 539 if (val == 0) { 540 tcg_out_st(s, type, TCG_REG_G0, base, ofs); 541 return true; 542 } 543 return false; 544} 545 546static void tcg_out_sety(TCGContext *s, TCGReg rs) 547{ 548 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); 549} 550 551static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, 552 int32_t val2, int val2const, int uns) 553{ 554 /* Load Y with the sign/zero extension of RS1 to 64-bits. */ 555 if (uns) { 556 tcg_out_sety(s, TCG_REG_G0); 557 } else { 558 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA); 559 tcg_out_sety(s, TCG_REG_T1); 560 } 561 562 tcg_out_arithc(s, rd, rs1, val2, val2const, 563 uns ? ARITH_UDIV : ARITH_SDIV); 564} 565 566static const uint8_t tcg_cond_to_bcond[] = { 567 [TCG_COND_EQ] = COND_E, 568 [TCG_COND_NE] = COND_NE, 569 [TCG_COND_LT] = COND_L, 570 [TCG_COND_GE] = COND_GE, 571 [TCG_COND_LE] = COND_LE, 572 [TCG_COND_GT] = COND_G, 573 [TCG_COND_LTU] = COND_CS, 574 [TCG_COND_GEU] = COND_CC, 575 [TCG_COND_LEU] = COND_LEU, 576 [TCG_COND_GTU] = COND_GU, 577}; 578 579static const uint8_t tcg_cond_to_rcond[] = { 580 [TCG_COND_EQ] = RCOND_Z, 581 [TCG_COND_NE] = RCOND_NZ, 582 [TCG_COND_LT] = RCOND_LZ, 583 [TCG_COND_GT] = RCOND_GZ, 584 [TCG_COND_LE] = RCOND_LEZ, 585 [TCG_COND_GE] = RCOND_GEZ 586}; 587 588static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) 589{ 590 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); 591} 592 593static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) 594{ 595 int off19 = 0; 596 597 if (l->has_value) { 598 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); 599 } else { 600 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0); 601 } 602 tcg_out_bpcc0(s, scond, flags, off19); 603} 604 605static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) 606{ 607 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC); 608} 609 610static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, 611 int32_t arg2, int const_arg2, TCGLabel *l) 612{ 613 tcg_out_cmp(s, arg1, arg2, const_arg2); 614 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); 615 tcg_out_nop(s); 616} 617 618static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, 619 int32_t v1, int v1const) 620{ 621 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) 622 | INSN_RS1(tcg_cond_to_bcond[cond]) 623 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); 624} 625 626static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 627 TCGReg c1, int32_t c2, int c2const, 628 int32_t v1, int v1const) 629{ 630 tcg_out_cmp(s, c1, c2, c2const); 631 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); 632} 633 634static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, 635 int32_t arg2, int const_arg2, TCGLabel *l) 636{ 637 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ 638 if (arg2 == 0 && !is_unsigned_cond(cond)) { 639 int off16 = 0; 640 641 if (l->has_value) { 642 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); 643 } else { 644 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); 645 } 646 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) 647 | INSN_COND(tcg_cond_to_rcond[cond]) | off16); 648 } else { 649 tcg_out_cmp(s, arg1, arg2, const_arg2); 650 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); 651 } 652 tcg_out_nop(s); 653} 654 655static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, 656 int32_t v1, int v1const) 657{ 658 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) 659 | (tcg_cond_to_rcond[cond] << 10) 660 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); 661} 662 663static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 664 TCGReg c1, int32_t c2, int c2const, 665 int32_t v1, int v1const) 666{ 667 /* For 64-bit signed comparisons vs zero, we can avoid the compare. 668 Note that the immediate range is one bit smaller, so we must check 669 for that as well. */ 670 if (c2 == 0 && !is_unsigned_cond(cond) 671 && (!v1const || check_fit_i32(v1, 10))) { 672 tcg_out_movr(s, cond, ret, c1, v1, v1const); 673 } else { 674 tcg_out_cmp(s, c1, c2, c2const); 675 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); 676 } 677} 678 679static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, 680 TCGReg c1, int32_t c2, int c2const) 681{ 682 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ 683 switch (cond) { 684 case TCG_COND_LTU: 685 case TCG_COND_GEU: 686 /* The result of the comparison is in the carry bit. */ 687 break; 688 689 case TCG_COND_EQ: 690 case TCG_COND_NE: 691 /* For equality, we can transform to inequality vs zero. */ 692 if (c2 != 0) { 693 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); 694 c2 = TCG_REG_T1; 695 } else { 696 c2 = c1; 697 } 698 c1 = TCG_REG_G0, c2const = 0; 699 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); 700 break; 701 702 case TCG_COND_GTU: 703 case TCG_COND_LEU: 704 /* If we don't need to load a constant into a register, we can 705 swap the operands on GTU/LEU. There's no benefit to loading 706 the constant into a temporary register. */ 707 if (!c2const || c2 == 0) { 708 TCGReg t = c1; 709 c1 = c2; 710 c2 = t; 711 c2const = 0; 712 cond = tcg_swap_cond(cond); 713 break; 714 } 715 /* FALLTHRU */ 716 717 default: 718 tcg_out_cmp(s, c1, c2, c2const); 719 tcg_out_movi_imm13(s, ret, 0); 720 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); 721 return; 722 } 723 724 tcg_out_cmp(s, c1, c2, c2const); 725 if (cond == TCG_COND_LTU) { 726 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); 727 } else { 728 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); 729 } 730} 731 732static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, 733 TCGReg c1, int32_t c2, int c2const) 734{ 735 if (use_vis3_instructions) { 736 switch (cond) { 737 case TCG_COND_NE: 738 if (c2 != 0) { 739 break; 740 } 741 c2 = c1, c2const = 0, c1 = TCG_REG_G0; 742 /* FALLTHRU */ 743 case TCG_COND_LTU: 744 tcg_out_cmp(s, c1, c2, c2const); 745 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); 746 return; 747 default: 748 break; 749 } 750 } 751 752 /* For 64-bit signed comparisons vs zero, we can avoid the compare 753 if the input does not overlap the output. */ 754 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { 755 tcg_out_movi_imm13(s, ret, 0); 756 tcg_out_movr(s, cond, ret, c1, 1, 1); 757 } else { 758 tcg_out_cmp(s, c1, c2, c2const); 759 tcg_out_movi_imm13(s, ret, 0); 760 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); 761 } 762} 763 764static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, 765 TCGReg al, TCGReg ah, int32_t bl, int blconst, 766 int32_t bh, int bhconst, int opl, int oph) 767{ 768 TCGReg tmp = TCG_REG_T1; 769 770 /* Note that the low parts are fully consumed before tmp is set. */ 771 if (rl != ah && (bhconst || rl != bh)) { 772 tmp = rl; 773 } 774 775 tcg_out_arithc(s, tmp, al, bl, blconst, opl); 776 tcg_out_arithc(s, rh, ah, bh, bhconst, oph); 777 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); 778} 779 780static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, 781 TCGReg al, TCGReg ah, int32_t bl, int blconst, 782 int32_t bh, int bhconst, bool is_sub) 783{ 784 TCGReg tmp = TCG_REG_T1; 785 786 /* Note that the low parts are fully consumed before tmp is set. */ 787 if (rl != ah && (bhconst || rl != bh)) { 788 tmp = rl; 789 } 790 791 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); 792 793 if (use_vis3_instructions && !is_sub) { 794 /* Note that ADDXC doesn't accept immediates. */ 795 if (bhconst && bh != 0) { 796 tcg_out_movi_imm13(s, TCG_REG_T2, bh); 797 bh = TCG_REG_T2; 798 } 799 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); 800 } else if (bh == TCG_REG_G0) { 801 /* If we have a zero, we can perform the operation in two insns, 802 with the arithmetic first, and a conditional move into place. */ 803 if (rh == ah) { 804 tcg_out_arithi(s, TCG_REG_T2, ah, 1, 805 is_sub ? ARITH_SUB : ARITH_ADD); 806 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); 807 } else { 808 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); 809 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); 810 } 811 } else { 812 /* 813 * Otherwise adjust BH as if there is carry into T2. 814 * Note that constant BH is constrained to 11 bits for the MOVCC, 815 * so the adjustment fits 12 bits. 816 */ 817 if (bhconst) { 818 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); 819 } else { 820 tcg_out_arithi(s, TCG_REG_T2, bh, 1, 821 is_sub ? ARITH_SUB : ARITH_ADD); 822 } 823 /* ... smoosh T2 back to original BH if carry is clear ... */ 824 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); 825 /* ... and finally perform the arithmetic with the new operand. */ 826 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); 827 } 828 829 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); 830} 831 832static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, 833 bool in_prologue, bool tail_call) 834{ 835 uintptr_t desti = (uintptr_t)dest; 836 837 /* Be careful not to clobber %o7 for a tail call. */ 838 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, 839 desti & ~0xfff, in_prologue, 840 tail_call ? TCG_REG_G2 : TCG_REG_O7); 841 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, 842 TCG_REG_T1, desti & 0xfff, JMPL); 843} 844 845static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, 846 bool in_prologue) 847{ 848 ptrdiff_t disp = tcg_pcrel_diff(s, dest); 849 850 if (disp == (int32_t)disp) { 851 tcg_out32(s, CALL | (uint32_t)disp >> 2); 852 } else { 853 tcg_out_jmpl_const(s, dest, in_prologue, false); 854 } 855} 856 857static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, 858 const TCGHelperInfo *info) 859{ 860 tcg_out_call_nodelay(s, dest, false); 861 tcg_out_nop(s); 862} 863 864static void tcg_out_mb(TCGContext *s, TCGArg a0) 865{ 866 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ 867 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); 868} 869 870#ifdef CONFIG_SOFTMMU 871static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1]; 872static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1]; 873 874static void emit_extend(TCGContext *s, TCGReg r, int op) 875{ 876 /* Emit zero extend of 8, 16 or 32 bit data as 877 * required by the MO_* value op; do nothing for 64 bit. 878 */ 879 switch (op & MO_SIZE) { 880 case MO_8: 881 tcg_out_arithi(s, r, r, 0xff, ARITH_AND); 882 break; 883 case MO_16: 884 tcg_out_arithi(s, r, r, 16, SHIFT_SLL); 885 tcg_out_arithi(s, r, r, 16, SHIFT_SRL); 886 break; 887 case MO_32: 888 tcg_out_arith(s, r, r, 0, SHIFT_SRL); 889 break; 890 case MO_64: 891 break; 892 } 893} 894 895static void build_trampolines(TCGContext *s) 896{ 897 static void * const qemu_ld_helpers[] = { 898 [MO_UB] = helper_ret_ldub_mmu, 899 [MO_SB] = helper_ret_ldsb_mmu, 900 [MO_LEUW] = helper_le_lduw_mmu, 901 [MO_LESW] = helper_le_ldsw_mmu, 902 [MO_LEUL] = helper_le_ldul_mmu, 903 [MO_LEUQ] = helper_le_ldq_mmu, 904 [MO_BEUW] = helper_be_lduw_mmu, 905 [MO_BESW] = helper_be_ldsw_mmu, 906 [MO_BEUL] = helper_be_ldul_mmu, 907 [MO_BEUQ] = helper_be_ldq_mmu, 908 }; 909 static void * const qemu_st_helpers[] = { 910 [MO_UB] = helper_ret_stb_mmu, 911 [MO_LEUW] = helper_le_stw_mmu, 912 [MO_LEUL] = helper_le_stl_mmu, 913 [MO_LEUQ] = helper_le_stq_mmu, 914 [MO_BEUW] = helper_be_stw_mmu, 915 [MO_BEUL] = helper_be_stl_mmu, 916 [MO_BEUQ] = helper_be_stq_mmu, 917 }; 918 919 int i; 920 921 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { 922 if (qemu_ld_helpers[i] == NULL) { 923 continue; 924 } 925 926 /* May as well align the trampoline. */ 927 while ((uintptr_t)s->code_ptr & 15) { 928 tcg_out_nop(s); 929 } 930 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); 931 932 /* Set the retaddr operand. */ 933 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7); 934 /* Tail call. */ 935 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); 936 /* delay slot -- set the env argument */ 937 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 938 } 939 940 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { 941 if (qemu_st_helpers[i] == NULL) { 942 continue; 943 } 944 945 /* May as well align the trampoline. */ 946 while ((uintptr_t)s->code_ptr & 15) { 947 tcg_out_nop(s); 948 } 949 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); 950 951 emit_extend(s, TCG_REG_O2, i); 952 953 /* Set the retaddr operand. */ 954 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7); 955 956 /* Tail call. */ 957 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); 958 /* delay slot -- set the env argument */ 959 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 960 } 961} 962#else 963static const tcg_insn_unit *qemu_unalign_ld_trampoline; 964static const tcg_insn_unit *qemu_unalign_st_trampoline; 965 966static void build_trampolines(TCGContext *s) 967{ 968 for (int ld = 0; ld < 2; ++ld) { 969 void *helper; 970 971 while ((uintptr_t)s->code_ptr & 15) { 972 tcg_out_nop(s); 973 } 974 975 if (ld) { 976 helper = helper_unaligned_ld; 977 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); 978 } else { 979 helper = helper_unaligned_st; 980 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); 981 } 982 983 /* Tail call. */ 984 tcg_out_jmpl_const(s, helper, true, true); 985 /* delay slot -- set the env argument */ 986 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); 987 } 988} 989#endif 990 991/* Generate global QEMU prologue and epilogue code */ 992static void tcg_target_qemu_prologue(TCGContext *s) 993{ 994 int tmp_buf_size, frame_size; 995 996 /* 997 * The TCG temp buffer is at the top of the frame, immediately 998 * below the frame pointer. Use the logical (aligned) offset here; 999 * the stack bias is applied in temp_allocate_frame(). 1000 */ 1001 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); 1002 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size); 1003 1004 /* 1005 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is 1006 * otherwise the minimal frame usable by callees. 1007 */ 1008 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; 1009 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; 1010 frame_size += TCG_TARGET_STACK_ALIGN - 1; 1011 frame_size &= -TCG_TARGET_STACK_ALIGN; 1012 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | 1013 INSN_IMM13(-frame_size)); 1014 1015#ifndef CONFIG_SOFTMMU 1016 if (guest_base != 0) { 1017 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, 1018 guest_base, true, TCG_REG_T1); 1019 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 1020 } 1021#endif 1022 1023 /* We choose TCG_REG_TB such that no move is required. */ 1024 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1); 1025 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); 1026 1027 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); 1028 /* delay slot */ 1029 tcg_out_nop(s); 1030 1031 /* Epilogue for goto_ptr. */ 1032 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1034 /* delay slot */ 1035 tcg_out_movi_imm13(s, TCG_REG_O0, 0); 1036 1037 build_trampolines(s); 1038} 1039 1040static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 1041{ 1042 int i; 1043 for (i = 0; i < count; ++i) { 1044 p[i] = NOP; 1045 } 1046} 1047 1048#if defined(CONFIG_SOFTMMU) 1049 1050/* We expect to use a 13-bit negative offset from ENV. */ 1051QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1052QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); 1053 1054/* Perform the TLB load and compare. 1055 1056 Inputs: 1057 ADDRLO and ADDRHI contain the possible two parts of the address. 1058 1059 MEM_INDEX and S_BITS are the memory context and log2 size of the load. 1060 1061 WHICH is the offset into the CPUTLBEntry structure of the slot to read. 1062 This should be offsetof addr_read or addr_write. 1063 1064 The result of the TLB comparison is in %[ix]cc. The sanitized address 1065 is in the returned register, maybe %o0. The TLB addend is in %o1. */ 1066 1067static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, 1068 MemOp opc, int which) 1069{ 1070 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1071 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1072 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1073 const TCGReg r0 = TCG_REG_O0; 1074 const TCGReg r1 = TCG_REG_O1; 1075 const TCGReg r2 = TCG_REG_O2; 1076 unsigned s_bits = opc & MO_SIZE; 1077 unsigned a_bits = get_alignment_bits(opc); 1078 tcg_target_long compare_mask; 1079 1080 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1081 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); 1082 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); 1083 1084 /* Extract the page index, shifted into place for tlb index. */ 1085 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, 1086 SHIFT_SRL); 1087 tcg_out_arith(s, r2, r2, r0, ARITH_AND); 1088 1089 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ 1090 tcg_out_arith(s, r2, r2, r1, ARITH_ADD); 1091 1092 /* Load the tlb comparator and the addend. */ 1093 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); 1094 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); 1095 1096 /* Mask out the page offset, except for the required alignment. 1097 We don't support unaligned accesses. */ 1098 if (a_bits < s_bits) { 1099 a_bits = s_bits; 1100 } 1101 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); 1102 if (check_fit_tl(compare_mask, 13)) { 1103 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); 1104 } else { 1105 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); 1106 tcg_out_arith(s, r2, addr, r2, ARITH_AND); 1107 } 1108 tcg_out_cmp(s, r0, r2, 0); 1109 1110 /* If the guest address must be zero-extended, do so now. */ 1111 if (TARGET_LONG_BITS == 32) { 1112 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); 1113 return r0; 1114 } 1115 return addr; 1116} 1117#endif /* CONFIG_SOFTMMU */ 1118 1119static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { 1120 [MO_UB] = LDUB, 1121 [MO_SB] = LDSB, 1122 [MO_UB | MO_LE] = LDUB, 1123 [MO_SB | MO_LE] = LDSB, 1124 1125 [MO_BEUW] = LDUH, 1126 [MO_BESW] = LDSH, 1127 [MO_BEUL] = LDUW, 1128 [MO_BESL] = LDSW, 1129 [MO_BEUQ] = LDX, 1130 [MO_BESQ] = LDX, 1131 1132 [MO_LEUW] = LDUH_LE, 1133 [MO_LESW] = LDSH_LE, 1134 [MO_LEUL] = LDUW_LE, 1135 [MO_LESL] = LDSW_LE, 1136 [MO_LEUQ] = LDX_LE, 1137 [MO_LESQ] = LDX_LE, 1138}; 1139 1140static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { 1141 [MO_UB] = STB, 1142 1143 [MO_BEUW] = STH, 1144 [MO_BEUL] = STW, 1145 [MO_BEUQ] = STX, 1146 1147 [MO_LEUW] = STH_LE, 1148 [MO_LEUL] = STW_LE, 1149 [MO_LEUQ] = STX_LE, 1150}; 1151 1152static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, 1153 MemOpIdx oi, bool is_64) 1154{ 1155 MemOp memop = get_memop(oi); 1156 tcg_insn_unit *label_ptr; 1157 1158#ifdef CONFIG_SOFTMMU 1159 unsigned memi = get_mmuidx(oi); 1160 TCGReg addrz; 1161 const tcg_insn_unit *func; 1162 1163 addrz = tcg_out_tlb_load(s, addr, memi, memop, 1164 offsetof(CPUTLBEntry, addr_read)); 1165 1166 /* The fast path is exactly one insn. Thus we can perform the 1167 entire TLB Hit in the (annulled) delay slot of the branch 1168 over the TLB Miss case. */ 1169 1170 /* beq,a,pt %[xi]cc, label0 */ 1171 label_ptr = s->code_ptr; 1172 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT 1173 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); 1174 /* delay slot */ 1175 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, 1176 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1177 1178 /* TLB Miss. */ 1179 1180 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); 1181 1182 /* We use the helpers to extend SB and SW data, leaving the case 1183 of SL needing explicit extending below. */ 1184 if ((memop & MO_SSIZE) == MO_SL) { 1185 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; 1186 } else { 1187 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; 1188 } 1189 tcg_debug_assert(func != NULL); 1190 tcg_out_call_nodelay(s, func, false); 1191 /* delay slot */ 1192 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); 1193 1194 /* We let the helper sign-extend SB and SW, but leave SL for here. */ 1195 if (is_64 && (memop & MO_SSIZE) == MO_SL) { 1196 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); 1197 } else { 1198 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); 1199 } 1200 1201 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1202#else 1203 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); 1204 unsigned a_bits = get_alignment_bits(memop); 1205 unsigned s_bits = memop & MO_SIZE; 1206 unsigned t_bits; 1207 1208 if (TARGET_LONG_BITS == 32) { 1209 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); 1210 addr = TCG_REG_T1; 1211 } 1212 1213 /* 1214 * Normal case: alignment equal to access size. 1215 */ 1216 if (a_bits == s_bits) { 1217 tcg_out_ldst_rr(s, data, addr, index, 1218 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1219 return; 1220 } 1221 1222 /* 1223 * Test for at least natural alignment, and assume most accesses 1224 * will be aligned -- perform a straight load in the delay slot. 1225 * This is required to preserve atomicity for aligned accesses. 1226 */ 1227 t_bits = MAX(a_bits, s_bits); 1228 tcg_debug_assert(t_bits < 13); 1229 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); 1230 1231 /* beq,a,pt %icc, label */ 1232 label_ptr = s->code_ptr; 1233 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); 1234 /* delay slot */ 1235 tcg_out_ldst_rr(s, data, addr, index, 1236 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); 1237 1238 if (a_bits >= s_bits) { 1239 /* 1240 * Overalignment: A successful alignment test will perform the memory 1241 * operation in the delay slot, and failure need only invoke the 1242 * handler for SIGBUS. 1243 */ 1244 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); 1245 /* delay slot -- move to low part of argument reg */ 1246 tcg_out_mov_delay(s, TCG_REG_O1, addr); 1247 } else { 1248 /* Underalignment: load by pieces of minimum alignment. */ 1249 int ld_opc, a_size, s_size, i; 1250 1251 /* 1252 * Force full address into T1 early; avoids problems with 1253 * overlap between @addr and @data. 1254 */ 1255 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); 1256 1257 a_size = 1 << a_bits; 1258 s_size = 1 << s_bits; 1259 if ((memop & MO_BSWAP) == MO_BE) { 1260 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; 1261 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); 1262 ld_opc = qemu_ld_opc[a_bits | MO_BE]; 1263 for (i = a_size; i < s_size; i += a_size) { 1264 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); 1265 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); 1266 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1267 } 1268 } else if (a_bits == 0) { 1269 ld_opc = LDUB; 1270 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); 1271 for (i = a_size; i < s_size; i += a_size) { 1272 if ((memop & MO_SIGN) && i == s_size - a_size) { 1273 ld_opc = LDSB; 1274 } 1275 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); 1276 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); 1277 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1278 } 1279 } else { 1280 ld_opc = qemu_ld_opc[a_bits | MO_LE]; 1281 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); 1282 for (i = a_size; i < s_size; i += a_size) { 1283 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); 1284 if ((memop & MO_SIGN) && i == s_size - a_size) { 1285 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; 1286 } 1287 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); 1288 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); 1289 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); 1290 } 1291 } 1292 } 1293 1294 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1295#endif /* CONFIG_SOFTMMU */ 1296} 1297 1298static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, 1299 MemOpIdx oi) 1300{ 1301 MemOp memop = get_memop(oi); 1302 tcg_insn_unit *label_ptr; 1303 1304#ifdef CONFIG_SOFTMMU 1305 unsigned memi = get_mmuidx(oi); 1306 TCGReg addrz; 1307 const tcg_insn_unit *func; 1308 1309 addrz = tcg_out_tlb_load(s, addr, memi, memop, 1310 offsetof(CPUTLBEntry, addr_write)); 1311 1312 /* The fast path is exactly one insn. Thus we can perform the entire 1313 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ 1314 /* beq,a,pt %[xi]cc, label0 */ 1315 label_ptr = s->code_ptr; 1316 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT 1317 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); 1318 /* delay slot */ 1319 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, 1320 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1321 1322 /* TLB Miss. */ 1323 1324 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); 1325 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data); 1326 1327 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; 1328 tcg_debug_assert(func != NULL); 1329 tcg_out_call_nodelay(s, func, false); 1330 /* delay slot */ 1331 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi); 1332 1333 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1334#else 1335 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); 1336 unsigned a_bits = get_alignment_bits(memop); 1337 unsigned s_bits = memop & MO_SIZE; 1338 unsigned t_bits; 1339 1340 if (TARGET_LONG_BITS == 32) { 1341 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); 1342 addr = TCG_REG_T1; 1343 } 1344 1345 /* 1346 * Normal case: alignment equal to access size. 1347 */ 1348 if (a_bits == s_bits) { 1349 tcg_out_ldst_rr(s, data, addr, index, 1350 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1351 return; 1352 } 1353 1354 /* 1355 * Test for at least natural alignment, and assume most accesses 1356 * will be aligned -- perform a straight store in the delay slot. 1357 * This is required to preserve atomicity for aligned accesses. 1358 */ 1359 t_bits = MAX(a_bits, s_bits); 1360 tcg_debug_assert(t_bits < 13); 1361 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); 1362 1363 /* beq,a,pt %icc, label */ 1364 label_ptr = s->code_ptr; 1365 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); 1366 /* delay slot */ 1367 tcg_out_ldst_rr(s, data, addr, index, 1368 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); 1369 1370 if (a_bits >= s_bits) { 1371 /* 1372 * Overalignment: A successful alignment test will perform the memory 1373 * operation in the delay slot, and failure need only invoke the 1374 * handler for SIGBUS. 1375 */ 1376 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); 1377 /* delay slot -- move to low part of argument reg */ 1378 tcg_out_mov_delay(s, TCG_REG_O1, addr); 1379 } else { 1380 /* Underalignment: store by pieces of minimum alignment. */ 1381 int st_opc, a_size, s_size, i; 1382 1383 /* 1384 * Force full address into T1 early; avoids problems with 1385 * overlap between @addr and @data. 1386 */ 1387 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); 1388 1389 a_size = 1 << a_bits; 1390 s_size = 1 << s_bits; 1391 if ((memop & MO_BSWAP) == MO_BE) { 1392 st_opc = qemu_st_opc[a_bits | MO_BE]; 1393 for (i = 0; i < s_size; i += a_size) { 1394 TCGReg d = data; 1395 int shift = (s_size - a_size - i) * 8; 1396 if (shift) { 1397 d = TCG_REG_T2; 1398 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); 1399 } 1400 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); 1401 } 1402 } else if (a_bits == 0) { 1403 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); 1404 for (i = 1; i < s_size; i++) { 1405 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); 1406 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); 1407 } 1408 } else { 1409 /* Note that ST*A with immediate asi must use indexed address. */ 1410 st_opc = qemu_st_opc[a_bits + MO_LE]; 1411 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); 1412 for (i = a_size; i < s_size; i += a_size) { 1413 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); 1414 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); 1415 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); 1416 } 1417 } 1418 } 1419 1420 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); 1421#endif /* CONFIG_SOFTMMU */ 1422} 1423 1424static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) 1425{ 1426 if (check_fit_ptr(a0, 13)) { 1427 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1428 tcg_out_movi_imm13(s, TCG_REG_O0, a0); 1429 return; 1430 } else { 1431 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0); 1432 if (check_fit_ptr(tb_diff, 13)) { 1433 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1434 /* Note that TCG_REG_TB has been unwound to O1. */ 1435 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD); 1436 return; 1437 } 1438 } 1439 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); 1440 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); 1441 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); 1442} 1443 1444static void tcg_out_goto_tb(TCGContext *s, int which) 1445{ 1446 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which)); 1447 1448 /* Direct branch will be patched by tb_target_set_jmp_target. */ 1449 set_jmp_insn_offset(s, which); 1450 tcg_out32(s, CALL); 1451 /* delay slot */ 1452 tcg_debug_assert(check_fit_ptr(off, 13)); 1453 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off); 1454 set_jmp_reset_offset(s, which); 1455 1456 /* 1457 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB 1458 * to the beginning of this TB. 1459 */ 1460 off = -tcg_current_code_size(s); 1461 if (check_fit_i32(off, 13)) { 1462 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD); 1463 } else { 1464 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off); 1465 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); 1466 } 1467} 1468 1469void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 1470 uintptr_t jmp_rx, uintptr_t jmp_rw) 1471{ 1472 uintptr_t addr = tb->jmp_target_addr[n]; 1473 intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2; 1474 tcg_insn_unit insn; 1475 1476 br_disp >>= 2; 1477 if (check_fit_ptr(br_disp, 19)) { 1478 /* ba,pt %icc, addr */ 1479 insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A) 1480 | BPCC_ICC | BPCC_PT, 0, 19, br_disp); 1481 } else if (check_fit_ptr(br_disp, 22)) { 1482 /* ba addr */ 1483 insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A), 1484 0, 22, br_disp); 1485 } else { 1486 /* The code_gen_buffer can't be larger than 2GB. */ 1487 tcg_debug_assert(check_fit_ptr(br_disp, 30)); 1488 /* call addr */ 1489 insn = deposit32(CALL, 0, 30, br_disp); 1490 } 1491 1492 qatomic_set((uint32_t *)jmp_rw, insn); 1493 flush_idcache_range(jmp_rx, jmp_rw, 4); 1494} 1495 1496static void tcg_out_op(TCGContext *s, TCGOpcode opc, 1497 const TCGArg args[TCG_MAX_OP_ARGS], 1498 const int const_args[TCG_MAX_OP_ARGS]) 1499{ 1500 TCGArg a0, a1, a2; 1501 int c, c2; 1502 1503 /* Hoist the loads of the most common arguments. */ 1504 a0 = args[0]; 1505 a1 = args[1]; 1506 a2 = args[2]; 1507 c2 = const_args[2]; 1508 1509 switch (opc) { 1510 case INDEX_op_goto_ptr: 1511 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); 1512 tcg_out_mov_delay(s, TCG_REG_TB, a0); 1513 break; 1514 case INDEX_op_br: 1515 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); 1516 tcg_out_nop(s); 1517 break; 1518 1519#define OP_32_64(x) \ 1520 glue(glue(case INDEX_op_, x), _i32): \ 1521 glue(glue(case INDEX_op_, x), _i64) 1522 1523 OP_32_64(ld8u): 1524 tcg_out_ldst(s, a0, a1, a2, LDUB); 1525 break; 1526 OP_32_64(ld8s): 1527 tcg_out_ldst(s, a0, a1, a2, LDSB); 1528 break; 1529 OP_32_64(ld16u): 1530 tcg_out_ldst(s, a0, a1, a2, LDUH); 1531 break; 1532 OP_32_64(ld16s): 1533 tcg_out_ldst(s, a0, a1, a2, LDSH); 1534 break; 1535 case INDEX_op_ld_i32: 1536 case INDEX_op_ld32u_i64: 1537 tcg_out_ldst(s, a0, a1, a2, LDUW); 1538 break; 1539 OP_32_64(st8): 1540 tcg_out_ldst(s, a0, a1, a2, STB); 1541 break; 1542 OP_32_64(st16): 1543 tcg_out_ldst(s, a0, a1, a2, STH); 1544 break; 1545 case INDEX_op_st_i32: 1546 case INDEX_op_st32_i64: 1547 tcg_out_ldst(s, a0, a1, a2, STW); 1548 break; 1549 OP_32_64(add): 1550 c = ARITH_ADD; 1551 goto gen_arith; 1552 OP_32_64(sub): 1553 c = ARITH_SUB; 1554 goto gen_arith; 1555 OP_32_64(and): 1556 c = ARITH_AND; 1557 goto gen_arith; 1558 OP_32_64(andc): 1559 c = ARITH_ANDN; 1560 goto gen_arith; 1561 OP_32_64(or): 1562 c = ARITH_OR; 1563 goto gen_arith; 1564 OP_32_64(orc): 1565 c = ARITH_ORN; 1566 goto gen_arith; 1567 OP_32_64(xor): 1568 c = ARITH_XOR; 1569 goto gen_arith; 1570 case INDEX_op_shl_i32: 1571 c = SHIFT_SLL; 1572 do_shift32: 1573 /* Limit immediate shift count lest we create an illegal insn. */ 1574 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); 1575 break; 1576 case INDEX_op_shr_i32: 1577 c = SHIFT_SRL; 1578 goto do_shift32; 1579 case INDEX_op_sar_i32: 1580 c = SHIFT_SRA; 1581 goto do_shift32; 1582 case INDEX_op_mul_i32: 1583 c = ARITH_UMUL; 1584 goto gen_arith; 1585 1586 OP_32_64(neg): 1587 c = ARITH_SUB; 1588 goto gen_arith1; 1589 OP_32_64(not): 1590 c = ARITH_ORN; 1591 goto gen_arith1; 1592 1593 case INDEX_op_div_i32: 1594 tcg_out_div32(s, a0, a1, a2, c2, 0); 1595 break; 1596 case INDEX_op_divu_i32: 1597 tcg_out_div32(s, a0, a1, a2, c2, 1); 1598 break; 1599 1600 case INDEX_op_brcond_i32: 1601 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1602 break; 1603 case INDEX_op_setcond_i32: 1604 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2); 1605 break; 1606 case INDEX_op_movcond_i32: 1607 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1608 break; 1609 1610 case INDEX_op_add2_i32: 1611 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1612 args[4], const_args[4], args[5], const_args[5], 1613 ARITH_ADDCC, ARITH_ADDC); 1614 break; 1615 case INDEX_op_sub2_i32: 1616 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], 1617 args[4], const_args[4], args[5], const_args[5], 1618 ARITH_SUBCC, ARITH_SUBC); 1619 break; 1620 case INDEX_op_mulu2_i32: 1621 c = ARITH_UMUL; 1622 goto do_mul2; 1623 case INDEX_op_muls2_i32: 1624 c = ARITH_SMUL; 1625 do_mul2: 1626 /* The 32-bit multiply insns produce a full 64-bit result. */ 1627 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); 1628 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); 1629 break; 1630 1631 case INDEX_op_qemu_ld_i32: 1632 tcg_out_qemu_ld(s, a0, a1, a2, false); 1633 break; 1634 case INDEX_op_qemu_ld_i64: 1635 tcg_out_qemu_ld(s, a0, a1, a2, true); 1636 break; 1637 case INDEX_op_qemu_st_i32: 1638 case INDEX_op_qemu_st_i64: 1639 tcg_out_qemu_st(s, a0, a1, a2); 1640 break; 1641 1642 case INDEX_op_ld32s_i64: 1643 tcg_out_ldst(s, a0, a1, a2, LDSW); 1644 break; 1645 case INDEX_op_ld_i64: 1646 tcg_out_ldst(s, a0, a1, a2, LDX); 1647 break; 1648 case INDEX_op_st_i64: 1649 tcg_out_ldst(s, a0, a1, a2, STX); 1650 break; 1651 case INDEX_op_shl_i64: 1652 c = SHIFT_SLLX; 1653 do_shift64: 1654 /* Limit immediate shift count lest we create an illegal insn. */ 1655 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); 1656 break; 1657 case INDEX_op_shr_i64: 1658 c = SHIFT_SRLX; 1659 goto do_shift64; 1660 case INDEX_op_sar_i64: 1661 c = SHIFT_SRAX; 1662 goto do_shift64; 1663 case INDEX_op_mul_i64: 1664 c = ARITH_MULX; 1665 goto gen_arith; 1666 case INDEX_op_div_i64: 1667 c = ARITH_SDIVX; 1668 goto gen_arith; 1669 case INDEX_op_divu_i64: 1670 c = ARITH_UDIVX; 1671 goto gen_arith; 1672 case INDEX_op_ext_i32_i64: 1673 case INDEX_op_ext32s_i64: 1674 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA); 1675 break; 1676 case INDEX_op_extu_i32_i64: 1677 case INDEX_op_ext32u_i64: 1678 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL); 1679 break; 1680 case INDEX_op_extrl_i64_i32: 1681 tcg_out_mov(s, TCG_TYPE_I32, a0, a1); 1682 break; 1683 case INDEX_op_extrh_i64_i32: 1684 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); 1685 break; 1686 1687 case INDEX_op_brcond_i64: 1688 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); 1689 break; 1690 case INDEX_op_setcond_i64: 1691 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2); 1692 break; 1693 case INDEX_op_movcond_i64: 1694 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); 1695 break; 1696 case INDEX_op_add2_i64: 1697 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1698 const_args[4], args[5], const_args[5], false); 1699 break; 1700 case INDEX_op_sub2_i64: 1701 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], 1702 const_args[4], args[5], const_args[5], true); 1703 break; 1704 case INDEX_op_muluh_i64: 1705 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); 1706 break; 1707 1708 gen_arith: 1709 tcg_out_arithc(s, a0, a1, a2, c2, c); 1710 break; 1711 1712 gen_arith1: 1713 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); 1714 break; 1715 1716 case INDEX_op_mb: 1717 tcg_out_mb(s, a0); 1718 break; 1719 1720 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 1721 case INDEX_op_mov_i64: 1722 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1723 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1724 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1725 default: 1726 tcg_abort(); 1727 } 1728} 1729 1730static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 1731{ 1732 switch (op) { 1733 case INDEX_op_goto_ptr: 1734 return C_O0_I1(r); 1735 1736 case INDEX_op_ld8u_i32: 1737 case INDEX_op_ld8u_i64: 1738 case INDEX_op_ld8s_i32: 1739 case INDEX_op_ld8s_i64: 1740 case INDEX_op_ld16u_i32: 1741 case INDEX_op_ld16u_i64: 1742 case INDEX_op_ld16s_i32: 1743 case INDEX_op_ld16s_i64: 1744 case INDEX_op_ld_i32: 1745 case INDEX_op_ld32u_i64: 1746 case INDEX_op_ld32s_i64: 1747 case INDEX_op_ld_i64: 1748 case INDEX_op_neg_i32: 1749 case INDEX_op_neg_i64: 1750 case INDEX_op_not_i32: 1751 case INDEX_op_not_i64: 1752 case INDEX_op_ext32s_i64: 1753 case INDEX_op_ext32u_i64: 1754 case INDEX_op_ext_i32_i64: 1755 case INDEX_op_extu_i32_i64: 1756 case INDEX_op_extrl_i64_i32: 1757 case INDEX_op_extrh_i64_i32: 1758 return C_O1_I1(r, r); 1759 1760 case INDEX_op_st8_i32: 1761 case INDEX_op_st8_i64: 1762 case INDEX_op_st16_i32: 1763 case INDEX_op_st16_i64: 1764 case INDEX_op_st_i32: 1765 case INDEX_op_st32_i64: 1766 case INDEX_op_st_i64: 1767 return C_O0_I2(rZ, r); 1768 1769 case INDEX_op_add_i32: 1770 case INDEX_op_add_i64: 1771 case INDEX_op_mul_i32: 1772 case INDEX_op_mul_i64: 1773 case INDEX_op_div_i32: 1774 case INDEX_op_div_i64: 1775 case INDEX_op_divu_i32: 1776 case INDEX_op_divu_i64: 1777 case INDEX_op_sub_i32: 1778 case INDEX_op_sub_i64: 1779 case INDEX_op_and_i32: 1780 case INDEX_op_and_i64: 1781 case INDEX_op_andc_i32: 1782 case INDEX_op_andc_i64: 1783 case INDEX_op_or_i32: 1784 case INDEX_op_or_i64: 1785 case INDEX_op_orc_i32: 1786 case INDEX_op_orc_i64: 1787 case INDEX_op_xor_i32: 1788 case INDEX_op_xor_i64: 1789 case INDEX_op_shl_i32: 1790 case INDEX_op_shl_i64: 1791 case INDEX_op_shr_i32: 1792 case INDEX_op_shr_i64: 1793 case INDEX_op_sar_i32: 1794 case INDEX_op_sar_i64: 1795 case INDEX_op_setcond_i32: 1796 case INDEX_op_setcond_i64: 1797 return C_O1_I2(r, rZ, rJ); 1798 1799 case INDEX_op_brcond_i32: 1800 case INDEX_op_brcond_i64: 1801 return C_O0_I2(rZ, rJ); 1802 case INDEX_op_movcond_i32: 1803 case INDEX_op_movcond_i64: 1804 return C_O1_I4(r, rZ, rJ, rI, 0); 1805 case INDEX_op_add2_i32: 1806 case INDEX_op_add2_i64: 1807 case INDEX_op_sub2_i32: 1808 case INDEX_op_sub2_i64: 1809 return C_O2_I4(r, r, rZ, rZ, rJ, rJ); 1810 case INDEX_op_mulu2_i32: 1811 case INDEX_op_muls2_i32: 1812 return C_O2_I2(r, r, rZ, rJ); 1813 case INDEX_op_muluh_i64: 1814 return C_O1_I2(r, r, r); 1815 1816 case INDEX_op_qemu_ld_i32: 1817 case INDEX_op_qemu_ld_i64: 1818 return C_O1_I1(r, s); 1819 case INDEX_op_qemu_st_i32: 1820 case INDEX_op_qemu_st_i64: 1821 return C_O0_I2(sZ, s); 1822 1823 default: 1824 g_assert_not_reached(); 1825 } 1826} 1827 1828static void tcg_target_init(TCGContext *s) 1829{ 1830 /* 1831 * Only probe for the platform and capabilities if we haven't already 1832 * determined maximum values at compile time. 1833 */ 1834#ifndef use_vis3_instructions 1835 { 1836 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 1837 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; 1838 } 1839#endif 1840 1841 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; 1842 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; 1843 1844 tcg_target_call_clobber_regs = 0; 1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1); 1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2); 1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3); 1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4); 1849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5); 1850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6); 1851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7); 1852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0); 1853 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1); 1854 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2); 1855 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3); 1856 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4); 1857 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5); 1858 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6); 1859 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7); 1860 1861 s->reserved_regs = 0; 1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ 1863 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ 1864 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ 1865 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */ 1866 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */ 1867 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ 1868 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ 1869 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ 1870} 1871 1872#define ELF_HOST_MACHINE EM_SPARCV9 1873 1874typedef struct { 1875 DebugFrameHeader h; 1876 uint8_t fde_def_cfa[4]; 1877 uint8_t fde_win_save; 1878 uint8_t fde_ret_save[3]; 1879} DebugFrame; 1880 1881static const DebugFrame debug_frame = { 1882 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 1883 .h.cie.id = -1, 1884 .h.cie.version = 1, 1885 .h.cie.code_align = 1, 1886 .h.cie.data_align = -sizeof(void *) & 0x7f, 1887 .h.cie.return_column = 15, /* o7 */ 1888 1889 /* Total FDE size does not include the "len" member. */ 1890 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), 1891 1892 .fde_def_cfa = { 1893 12, 30, /* DW_CFA_def_cfa i6, 2047 */ 1894 (2047 & 0x7f) | 0x80, (2047 >> 7) 1895 }, 1896 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ 1897 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ 1898}; 1899 1900void tcg_register_jit(const void *buf, size_t buf_size) 1901{ 1902 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 1903} 1904