1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25#include "elf.h" 26#include "../tcg-pool.c.inc" 27#include "../tcg-ldst.c.inc" 28 29/* 30 * Standardize on the _CALL_FOO symbols used by GCC: 31 * Apple XCode does not define _CALL_DARWIN. 32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit). 33 */ 34#if !defined(_CALL_SYSV) && \ 35 !defined(_CALL_DARWIN) && \ 36 !defined(_CALL_AIX) && \ 37 !defined(_CALL_ELF) 38# if defined(__APPLE__) 39# define _CALL_DARWIN 40# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32 41# define _CALL_SYSV 42# else 43# error "Unknown ABI" 44# endif 45#endif 46 47#if TCG_TARGET_REG_BITS == 64 48# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND 49#else 50# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 51#endif 52#ifdef _CALL_SYSV 53# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 54#else 55# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 56#endif 57 58/* For some memory operations, we need a scratch that isn't R0. For the AIX 59 calling convention, we can re-use the TOC register since we'll be reloading 60 it at every call. Otherwise R12 will do nicely as neither a call-saved 61 register nor a parameter register. */ 62#ifdef _CALL_AIX 63# define TCG_REG_TMP1 TCG_REG_R2 64#else 65# define TCG_REG_TMP1 TCG_REG_R12 66#endif 67 68#define TCG_VEC_TMP1 TCG_REG_V0 69#define TCG_VEC_TMP2 TCG_REG_V1 70 71#define TCG_REG_TB TCG_REG_R31 72#define USE_REG_TB (TCG_TARGET_REG_BITS == 64) 73 74/* Shorthand for size of a pointer. Avoid promotion to unsigned. */ 75#define SZP ((int)sizeof(void *)) 76 77/* Shorthand for size of a register. */ 78#define SZR (TCG_TARGET_REG_BITS / 8) 79 80#define TCG_CT_CONST_S16 0x100 81#define TCG_CT_CONST_U16 0x200 82#define TCG_CT_CONST_S32 0x400 83#define TCG_CT_CONST_U32 0x800 84#define TCG_CT_CONST_ZERO 0x1000 85#define TCG_CT_CONST_MONE 0x2000 86#define TCG_CT_CONST_WSZ 0x4000 87 88#define ALL_GENERAL_REGS 0xffffffffu 89#define ALL_VECTOR_REGS 0xffffffff00000000ull 90 91#ifdef CONFIG_SOFTMMU 92#define ALL_QLOAD_REGS \ 93 (ALL_GENERAL_REGS & \ 94 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5))) 95#define ALL_QSTORE_REGS \ 96 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \ 97 (1 << TCG_REG_R5) | (1 << TCG_REG_R6))) 98#else 99#define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3)) 100#define ALL_QSTORE_REGS ALL_QLOAD_REGS 101#endif 102 103TCGPowerISA have_isa; 104static bool have_isel; 105bool have_altivec; 106bool have_vsx; 107 108#ifndef CONFIG_SOFTMMU 109#define TCG_GUEST_BASE_REG 30 110#endif 111 112#ifdef CONFIG_DEBUG_TCG 113static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = { 114 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 115 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 116 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 117 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 118 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", 119 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", 120 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", 121 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", 122}; 123#endif 124 125static const int tcg_target_reg_alloc_order[] = { 126 TCG_REG_R14, /* call saved registers */ 127 TCG_REG_R15, 128 TCG_REG_R16, 129 TCG_REG_R17, 130 TCG_REG_R18, 131 TCG_REG_R19, 132 TCG_REG_R20, 133 TCG_REG_R21, 134 TCG_REG_R22, 135 TCG_REG_R23, 136 TCG_REG_R24, 137 TCG_REG_R25, 138 TCG_REG_R26, 139 TCG_REG_R27, 140 TCG_REG_R28, 141 TCG_REG_R29, 142 TCG_REG_R30, 143 TCG_REG_R31, 144 TCG_REG_R12, /* call clobbered, non-arguments */ 145 TCG_REG_R11, 146 TCG_REG_R2, 147 TCG_REG_R13, 148 TCG_REG_R10, /* call clobbered, arguments */ 149 TCG_REG_R9, 150 TCG_REG_R8, 151 TCG_REG_R7, 152 TCG_REG_R6, 153 TCG_REG_R5, 154 TCG_REG_R4, 155 TCG_REG_R3, 156 157 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */ 158 TCG_REG_V2, /* call clobbered, vectors */ 159 TCG_REG_V3, 160 TCG_REG_V4, 161 TCG_REG_V5, 162 TCG_REG_V6, 163 TCG_REG_V7, 164 TCG_REG_V8, 165 TCG_REG_V9, 166 TCG_REG_V10, 167 TCG_REG_V11, 168 TCG_REG_V12, 169 TCG_REG_V13, 170 TCG_REG_V14, 171 TCG_REG_V15, 172 TCG_REG_V16, 173 TCG_REG_V17, 174 TCG_REG_V18, 175 TCG_REG_V19, 176}; 177 178static const int tcg_target_call_iarg_regs[] = { 179 TCG_REG_R3, 180 TCG_REG_R4, 181 TCG_REG_R5, 182 TCG_REG_R6, 183 TCG_REG_R7, 184 TCG_REG_R8, 185 TCG_REG_R9, 186 TCG_REG_R10 187}; 188 189static const int tcg_target_call_oarg_regs[] = { 190 TCG_REG_R3, 191 TCG_REG_R4 192}; 193 194static const int tcg_target_callee_save_regs[] = { 195#ifdef _CALL_DARWIN 196 TCG_REG_R11, 197#endif 198 TCG_REG_R14, 199 TCG_REG_R15, 200 TCG_REG_R16, 201 TCG_REG_R17, 202 TCG_REG_R18, 203 TCG_REG_R19, 204 TCG_REG_R20, 205 TCG_REG_R21, 206 TCG_REG_R22, 207 TCG_REG_R23, 208 TCG_REG_R24, 209 TCG_REG_R25, 210 TCG_REG_R26, 211 TCG_REG_R27, /* currently used for the global env */ 212 TCG_REG_R28, 213 TCG_REG_R29, 214 TCG_REG_R30, 215 TCG_REG_R31 216}; 217 218static inline bool in_range_b(tcg_target_long target) 219{ 220 return target == sextract64(target, 0, 26); 221} 222 223static uint32_t reloc_pc24_val(const tcg_insn_unit *pc, 224 const tcg_insn_unit *target) 225{ 226 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); 227 tcg_debug_assert(in_range_b(disp)); 228 return disp & 0x3fffffc; 229} 230 231static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 232{ 233 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 234 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx); 235 236 if (in_range_b(disp)) { 237 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc); 238 return true; 239 } 240 return false; 241} 242 243static uint16_t reloc_pc14_val(const tcg_insn_unit *pc, 244 const tcg_insn_unit *target) 245{ 246 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); 247 tcg_debug_assert(disp == (int16_t) disp); 248 return disp & 0xfffc; 249} 250 251static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target) 252{ 253 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); 254 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx); 255 256 if (disp == (int16_t) disp) { 257 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc); 258 return true; 259 } 260 return false; 261} 262 263/* test if a constant matches the constraint */ 264static bool tcg_target_const_match(int64_t val, TCGType type, int ct) 265{ 266 if (ct & TCG_CT_CONST) { 267 return 1; 268 } 269 270 /* The only 32-bit constraint we use aside from 271 TCG_CT_CONST is TCG_CT_CONST_S16. */ 272 if (type == TCG_TYPE_I32) { 273 val = (int32_t)val; 274 } 275 276 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { 277 return 1; 278 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { 279 return 1; 280 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 281 return 1; 282 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { 283 return 1; 284 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { 285 return 1; 286 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { 287 return 1; 288 } else if ((ct & TCG_CT_CONST_WSZ) 289 && val == (type == TCG_TYPE_I32 ? 32 : 64)) { 290 return 1; 291 } 292 return 0; 293} 294 295#define OPCD(opc) ((opc)<<26) 296#define XO19(opc) (OPCD(19)|((opc)<<1)) 297#define MD30(opc) (OPCD(30)|((opc)<<2)) 298#define MDS30(opc) (OPCD(30)|((opc)<<1)) 299#define XO31(opc) (OPCD(31)|((opc)<<1)) 300#define XO58(opc) (OPCD(58)|(opc)) 301#define XO62(opc) (OPCD(62)|(opc)) 302#define VX4(opc) (OPCD(4)|(opc)) 303 304#define B OPCD( 18) 305#define BC OPCD( 16) 306#define LBZ OPCD( 34) 307#define LHZ OPCD( 40) 308#define LHA OPCD( 42) 309#define LWZ OPCD( 32) 310#define LWZUX XO31( 55) 311#define STB OPCD( 38) 312#define STH OPCD( 44) 313#define STW OPCD( 36) 314 315#define STD XO62( 0) 316#define STDU XO62( 1) 317#define STDX XO31(149) 318 319#define LD XO58( 0) 320#define LDX XO31( 21) 321#define LDU XO58( 1) 322#define LDUX XO31( 53) 323#define LWA XO58( 2) 324#define LWAX XO31(341) 325 326#define ADDIC OPCD( 12) 327#define ADDI OPCD( 14) 328#define ADDIS OPCD( 15) 329#define ORI OPCD( 24) 330#define ORIS OPCD( 25) 331#define XORI OPCD( 26) 332#define XORIS OPCD( 27) 333#define ANDI OPCD( 28) 334#define ANDIS OPCD( 29) 335#define MULLI OPCD( 7) 336#define CMPLI OPCD( 10) 337#define CMPI OPCD( 11) 338#define SUBFIC OPCD( 8) 339 340#define LWZU OPCD( 33) 341#define STWU OPCD( 37) 342 343#define RLWIMI OPCD( 20) 344#define RLWINM OPCD( 21) 345#define RLWNM OPCD( 23) 346 347#define RLDICL MD30( 0) 348#define RLDICR MD30( 1) 349#define RLDIMI MD30( 3) 350#define RLDCL MDS30( 8) 351 352#define BCLR XO19( 16) 353#define BCCTR XO19(528) 354#define CRAND XO19(257) 355#define CRANDC XO19(129) 356#define CRNAND XO19(225) 357#define CROR XO19(449) 358#define CRNOR XO19( 33) 359 360#define EXTSB XO31(954) 361#define EXTSH XO31(922) 362#define EXTSW XO31(986) 363#define ADD XO31(266) 364#define ADDE XO31(138) 365#define ADDME XO31(234) 366#define ADDZE XO31(202) 367#define ADDC XO31( 10) 368#define AND XO31( 28) 369#define SUBF XO31( 40) 370#define SUBFC XO31( 8) 371#define SUBFE XO31(136) 372#define SUBFME XO31(232) 373#define SUBFZE XO31(200) 374#define OR XO31(444) 375#define XOR XO31(316) 376#define MULLW XO31(235) 377#define MULHW XO31( 75) 378#define MULHWU XO31( 11) 379#define DIVW XO31(491) 380#define DIVWU XO31(459) 381#define MODSW XO31(779) 382#define MODUW XO31(267) 383#define CMP XO31( 0) 384#define CMPL XO31( 32) 385#define LHBRX XO31(790) 386#define LWBRX XO31(534) 387#define LDBRX XO31(532) 388#define STHBRX XO31(918) 389#define STWBRX XO31(662) 390#define STDBRX XO31(660) 391#define MFSPR XO31(339) 392#define MTSPR XO31(467) 393#define SRAWI XO31(824) 394#define NEG XO31(104) 395#define MFCR XO31( 19) 396#define MFOCRF (MFCR | (1u << 20)) 397#define NOR XO31(124) 398#define CNTLZW XO31( 26) 399#define CNTLZD XO31( 58) 400#define CNTTZW XO31(538) 401#define CNTTZD XO31(570) 402#define CNTPOPW XO31(378) 403#define CNTPOPD XO31(506) 404#define ANDC XO31( 60) 405#define ORC XO31(412) 406#define EQV XO31(284) 407#define NAND XO31(476) 408#define ISEL XO31( 15) 409 410#define MULLD XO31(233) 411#define MULHD XO31( 73) 412#define MULHDU XO31( 9) 413#define DIVD XO31(489) 414#define DIVDU XO31(457) 415#define MODSD XO31(777) 416#define MODUD XO31(265) 417 418#define LBZX XO31( 87) 419#define LHZX XO31(279) 420#define LHAX XO31(343) 421#define LWZX XO31( 23) 422#define STBX XO31(215) 423#define STHX XO31(407) 424#define STWX XO31(151) 425 426#define EIEIO XO31(854) 427#define HWSYNC XO31(598) 428#define LWSYNC (HWSYNC | (1u << 21)) 429 430#define SPR(a, b) ((((a)<<5)|(b))<<11) 431#define LR SPR(8, 0) 432#define CTR SPR(9, 0) 433 434#define SLW XO31( 24) 435#define SRW XO31(536) 436#define SRAW XO31(792) 437 438#define SLD XO31( 27) 439#define SRD XO31(539) 440#define SRAD XO31(794) 441#define SRADI XO31(413<<1) 442 443#define BRH XO31(219) 444#define BRW XO31(155) 445#define BRD XO31(187) 446 447#define TW XO31( 4) 448#define TRAP (TW | TO(31)) 449 450#define NOP ORI /* ori 0,0,0 */ 451 452#define LVX XO31(103) 453#define LVEBX XO31(7) 454#define LVEHX XO31(39) 455#define LVEWX XO31(71) 456#define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ 457#define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ 458#define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */ 459#define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */ 460#define LXSD (OPCD(57) | 2) /* v3.00 */ 461#define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */ 462 463#define STVX XO31(231) 464#define STVEWX XO31(199) 465#define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ 466#define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */ 467#define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */ 468#define STXSD (OPCD(61) | 2) /* v3.00 */ 469 470#define VADDSBS VX4(768) 471#define VADDUBS VX4(512) 472#define VADDUBM VX4(0) 473#define VADDSHS VX4(832) 474#define VADDUHS VX4(576) 475#define VADDUHM VX4(64) 476#define VADDSWS VX4(896) 477#define VADDUWS VX4(640) 478#define VADDUWM VX4(128) 479#define VADDUDM VX4(192) /* v2.07 */ 480 481#define VSUBSBS VX4(1792) 482#define VSUBUBS VX4(1536) 483#define VSUBUBM VX4(1024) 484#define VSUBSHS VX4(1856) 485#define VSUBUHS VX4(1600) 486#define VSUBUHM VX4(1088) 487#define VSUBSWS VX4(1920) 488#define VSUBUWS VX4(1664) 489#define VSUBUWM VX4(1152) 490#define VSUBUDM VX4(1216) /* v2.07 */ 491 492#define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */ 493#define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */ 494 495#define VMAXSB VX4(258) 496#define VMAXSH VX4(322) 497#define VMAXSW VX4(386) 498#define VMAXSD VX4(450) /* v2.07 */ 499#define VMAXUB VX4(2) 500#define VMAXUH VX4(66) 501#define VMAXUW VX4(130) 502#define VMAXUD VX4(194) /* v2.07 */ 503#define VMINSB VX4(770) 504#define VMINSH VX4(834) 505#define VMINSW VX4(898) 506#define VMINSD VX4(962) /* v2.07 */ 507#define VMINUB VX4(514) 508#define VMINUH VX4(578) 509#define VMINUW VX4(642) 510#define VMINUD VX4(706) /* v2.07 */ 511 512#define VCMPEQUB VX4(6) 513#define VCMPEQUH VX4(70) 514#define VCMPEQUW VX4(134) 515#define VCMPEQUD VX4(199) /* v2.07 */ 516#define VCMPGTSB VX4(774) 517#define VCMPGTSH VX4(838) 518#define VCMPGTSW VX4(902) 519#define VCMPGTSD VX4(967) /* v2.07 */ 520#define VCMPGTUB VX4(518) 521#define VCMPGTUH VX4(582) 522#define VCMPGTUW VX4(646) 523#define VCMPGTUD VX4(711) /* v2.07 */ 524#define VCMPNEB VX4(7) /* v3.00 */ 525#define VCMPNEH VX4(71) /* v3.00 */ 526#define VCMPNEW VX4(135) /* v3.00 */ 527 528#define VSLB VX4(260) 529#define VSLH VX4(324) 530#define VSLW VX4(388) 531#define VSLD VX4(1476) /* v2.07 */ 532#define VSRB VX4(516) 533#define VSRH VX4(580) 534#define VSRW VX4(644) 535#define VSRD VX4(1732) /* v2.07 */ 536#define VSRAB VX4(772) 537#define VSRAH VX4(836) 538#define VSRAW VX4(900) 539#define VSRAD VX4(964) /* v2.07 */ 540#define VRLB VX4(4) 541#define VRLH VX4(68) 542#define VRLW VX4(132) 543#define VRLD VX4(196) /* v2.07 */ 544 545#define VMULEUB VX4(520) 546#define VMULEUH VX4(584) 547#define VMULEUW VX4(648) /* v2.07 */ 548#define VMULOUB VX4(8) 549#define VMULOUH VX4(72) 550#define VMULOUW VX4(136) /* v2.07 */ 551#define VMULUWM VX4(137) /* v2.07 */ 552#define VMULLD VX4(457) /* v3.10 */ 553#define VMSUMUHM VX4(38) 554 555#define VMRGHB VX4(12) 556#define VMRGHH VX4(76) 557#define VMRGHW VX4(140) 558#define VMRGLB VX4(268) 559#define VMRGLH VX4(332) 560#define VMRGLW VX4(396) 561 562#define VPKUHUM VX4(14) 563#define VPKUWUM VX4(78) 564 565#define VAND VX4(1028) 566#define VANDC VX4(1092) 567#define VNOR VX4(1284) 568#define VOR VX4(1156) 569#define VXOR VX4(1220) 570#define VEQV VX4(1668) /* v2.07 */ 571#define VNAND VX4(1412) /* v2.07 */ 572#define VORC VX4(1348) /* v2.07 */ 573 574#define VSPLTB VX4(524) 575#define VSPLTH VX4(588) 576#define VSPLTW VX4(652) 577#define VSPLTISB VX4(780) 578#define VSPLTISH VX4(844) 579#define VSPLTISW VX4(908) 580 581#define VSLDOI VX4(44) 582 583#define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ 584#define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ 585#define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */ 586 587#define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */ 588#define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */ 589#define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */ 590#define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */ 591#define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */ 592#define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */ 593 594#define RT(r) ((r)<<21) 595#define RS(r) ((r)<<21) 596#define RA(r) ((r)<<16) 597#define RB(r) ((r)<<11) 598#define TO(t) ((t)<<21) 599#define SH(s) ((s)<<11) 600#define MB(b) ((b)<<6) 601#define ME(e) ((e)<<1) 602#define BO(o) ((o)<<21) 603#define MB64(b) ((b)<<5) 604#define FXM(b) (1 << (19 - (b))) 605 606#define VRT(r) (((r) & 31) << 21) 607#define VRA(r) (((r) & 31) << 16) 608#define VRB(r) (((r) & 31) << 11) 609#define VRC(r) (((r) & 31) << 6) 610 611#define LK 1 612 613#define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) 614#define SAB(s, a, b) (RS(s) | RA(a) | RB(b)) 615#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) 616#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) 617 618#define BF(n) ((n)<<23) 619#define BI(n, c) (((c)+((n)*4))<<16) 620#define BT(n, c) (((c)+((n)*4))<<21) 621#define BA(n, c) (((c)+((n)*4))<<16) 622#define BB(n, c) (((c)+((n)*4))<<11) 623#define BC_(n, c) (((c)+((n)*4))<<6) 624 625#define BO_COND_TRUE BO(12) 626#define BO_COND_FALSE BO( 4) 627#define BO_ALWAYS BO(20) 628 629enum { 630 CR_LT, 631 CR_GT, 632 CR_EQ, 633 CR_SO 634}; 635 636static const uint32_t tcg_to_bc[] = { 637 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, 638 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, 639 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, 640 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, 641 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, 642 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, 643 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, 644 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, 645 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, 646 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, 647}; 648 649/* The low bit here is set if the RA and RB fields must be inverted. */ 650static const uint32_t tcg_to_isel[] = { 651 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), 652 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, 653 [TCG_COND_LT] = ISEL | BC_(7, CR_LT), 654 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, 655 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, 656 [TCG_COND_GT] = ISEL | BC_(7, CR_GT), 657 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), 658 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, 659 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, 660 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), 661}; 662 663static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 664 intptr_t value, intptr_t addend) 665{ 666 const tcg_insn_unit *target; 667 int16_t lo; 668 int32_t hi; 669 670 value += addend; 671 target = (const tcg_insn_unit *)value; 672 673 switch (type) { 674 case R_PPC_REL14: 675 return reloc_pc14(code_ptr, target); 676 case R_PPC_REL24: 677 return reloc_pc24(code_ptr, target); 678 case R_PPC_ADDR16: 679 /* 680 * We are (slightly) abusing this relocation type. In particular, 681 * assert that the low 2 bits are zero, and do not modify them. 682 * That way we can use this with LD et al that have opcode bits 683 * in the low 2 bits of the insn. 684 */ 685 if ((value & 3) || value != (int16_t)value) { 686 return false; 687 } 688 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc); 689 break; 690 case R_PPC_ADDR32: 691 /* 692 * We are abusing this relocation type. Again, this points to 693 * a pair of insns, lis + load. This is an absolute address 694 * relocation for PPC32 so the lis cannot be removed. 695 */ 696 lo = value; 697 hi = value - lo; 698 if (hi + lo != value) { 699 return false; 700 } 701 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16); 702 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo); 703 break; 704 default: 705 g_assert_not_reached(); 706 } 707 return true; 708} 709 710static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, 711 TCGReg base, tcg_target_long offset); 712 713static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 714{ 715 if (ret == arg) { 716 return true; 717 } 718 switch (type) { 719 case TCG_TYPE_I64: 720 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 721 /* fallthru */ 722 case TCG_TYPE_I32: 723 if (ret < TCG_REG_V0) { 724 if (arg < TCG_REG_V0) { 725 tcg_out32(s, OR | SAB(arg, ret, arg)); 726 break; 727 } else if (have_isa_2_07) { 728 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD) 729 | VRT(arg) | RA(ret)); 730 break; 731 } else { 732 /* Altivec does not support vector->integer moves. */ 733 return false; 734 } 735 } else if (arg < TCG_REG_V0) { 736 if (have_isa_2_07) { 737 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD) 738 | VRT(ret) | RA(arg)); 739 break; 740 } else { 741 /* Altivec does not support integer->vector moves. */ 742 return false; 743 } 744 } 745 /* fallthru */ 746 case TCG_TYPE_V64: 747 case TCG_TYPE_V128: 748 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0); 749 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg)); 750 break; 751 default: 752 g_assert_not_reached(); 753 } 754 return true; 755} 756 757static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, 758 int sh, int mb) 759{ 760 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 761 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); 762 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); 763 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); 764} 765 766static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, 767 int sh, int mb, int me) 768{ 769 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); 770} 771 772static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src) 773{ 774 tcg_out32(s, EXTSB | RA(dst) | RS(src)); 775} 776 777static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src) 778{ 779 tcg_out32(s, EXTSH | RA(dst) | RS(src)); 780} 781 782static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src) 783{ 784 tcg_out32(s, ANDI | SAI(src, dst, 0xffff)); 785} 786 787static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src) 788{ 789 tcg_out32(s, EXTSW | RA(dst) | RS(src)); 790} 791 792static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) 793{ 794 tcg_out_rld(s, RLDICL, dst, src, 0, 32); 795} 796 797static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) 798{ 799 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); 800} 801 802static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) 803{ 804 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); 805} 806 807static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c) 808{ 809 /* Limit immediate shift count lest we create an illegal insn. */ 810 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31)); 811} 812 813static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c) 814{ 815 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31); 816} 817 818static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) 819{ 820 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); 821} 822 823static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c) 824{ 825 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2)); 826} 827 828static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags) 829{ 830 TCGReg tmp = dst == src ? TCG_REG_R0 : dst; 831 832 if (have_isa_3_10) { 833 tcg_out32(s, BRH | RA(dst) | RS(src)); 834 if (flags & TCG_BSWAP_OS) { 835 tcg_out_ext16s(s, dst, dst); 836 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 837 tcg_out_ext16u(s, dst, dst); 838 } 839 return; 840 } 841 842 /* 843 * In the following, 844 * dep(a, b, m) -> (a & ~m) | (b & m) 845 * 846 * Begin with: src = xxxxabcd 847 */ 848 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */ 849 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31); 850 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */ 851 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23); 852 853 if (flags & TCG_BSWAP_OS) { 854 tcg_out_ext16s(s, dst, tmp); 855 } else { 856 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); 857 } 858} 859 860static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags) 861{ 862 TCGReg tmp = dst == src ? TCG_REG_R0 : dst; 863 864 if (have_isa_3_10) { 865 tcg_out32(s, BRW | RA(dst) | RS(src)); 866 if (flags & TCG_BSWAP_OS) { 867 tcg_out_ext32s(s, dst, dst); 868 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { 869 tcg_out_ext32u(s, dst, dst); 870 } 871 return; 872 } 873 874 /* 875 * Stolen from gcc's builtin_bswap32. 876 * In the following, 877 * dep(a, b, m) -> (a & ~m) | (b & m) 878 * 879 * Begin with: src = xxxxabcd 880 */ 881 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */ 882 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31); 883 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */ 884 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7); 885 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */ 886 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23); 887 888 if (flags & TCG_BSWAP_OS) { 889 tcg_out_ext32s(s, dst, tmp); 890 } else { 891 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); 892 } 893} 894 895static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src) 896{ 897 TCGReg t0 = dst == src ? TCG_REG_R0 : dst; 898 TCGReg t1 = dst == src ? dst : TCG_REG_R0; 899 900 if (have_isa_3_10) { 901 tcg_out32(s, BRD | RA(dst) | RS(src)); 902 return; 903 } 904 905 /* 906 * In the following, 907 * dep(a, b, m) -> (a & ~m) | (b & m) 908 * 909 * Begin with: src = abcdefgh 910 */ 911 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */ 912 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31); 913 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */ 914 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7); 915 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */ 916 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23); 917 918 /* t0 = rol64(t0, 32) = hgfe0000 */ 919 tcg_out_rld(s, RLDICL, t0, t0, 32, 0); 920 /* t1 = rol64(src, 32) = efghabcd */ 921 tcg_out_rld(s, RLDICL, t1, src, 32, 0); 922 923 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */ 924 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31); 925 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */ 926 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7); 927 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */ 928 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23); 929 930 tcg_out_mov(s, TCG_TYPE_REG, dst, t0); 931} 932 933/* Emit a move into ret of arg, if it can be done in one insn. */ 934static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg) 935{ 936 if (arg == (int16_t)arg) { 937 tcg_out32(s, ADDI | TAI(ret, 0, arg)); 938 return true; 939 } 940 if (arg == (int32_t)arg && (arg & 0xffff) == 0) { 941 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); 942 return true; 943 } 944 return false; 945} 946 947static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, 948 tcg_target_long arg, bool in_prologue) 949{ 950 intptr_t tb_diff; 951 tcg_target_long tmp; 952 int shift; 953 954 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); 955 956 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 957 arg = (int32_t)arg; 958 } 959 960 /* Load 16-bit immediates with one insn. */ 961 if (tcg_out_movi_one(s, ret, arg)) { 962 return; 963 } 964 965 /* Load addresses within the TB with one insn. */ 966 tb_diff = tcg_tbrel_diff(s, (void *)arg); 967 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) { 968 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff)); 969 return; 970 } 971 972 /* Load 32-bit immediates with two insns. Note that we've already 973 eliminated bare ADDIS, so we know both insns are required. */ 974 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { 975 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); 976 tcg_out32(s, ORI | SAI(ret, ret, arg)); 977 return; 978 } 979 if (arg == (uint32_t)arg && !(arg & 0x8000)) { 980 tcg_out32(s, ADDI | TAI(ret, 0, arg)); 981 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); 982 return; 983 } 984 985 /* Load masked 16-bit value. */ 986 if (arg > 0 && (arg & 0x8000)) { 987 tmp = arg | 0x7fff; 988 if ((tmp & (tmp + 1)) == 0) { 989 int mb = clz64(tmp + 1) + 1; 990 tcg_out32(s, ADDI | TAI(ret, 0, arg)); 991 tcg_out_rld(s, RLDICL, ret, ret, 0, mb); 992 return; 993 } 994 } 995 996 /* Load common masks with 2 insns. */ 997 shift = ctz64(arg); 998 tmp = arg >> shift; 999 if (tmp == (int16_t)tmp) { 1000 tcg_out32(s, ADDI | TAI(ret, 0, tmp)); 1001 tcg_out_shli64(s, ret, ret, shift); 1002 return; 1003 } 1004 shift = clz64(arg); 1005 if (tcg_out_movi_one(s, ret, arg << shift)) { 1006 tcg_out_shri64(s, ret, ret, shift); 1007 return; 1008 } 1009 1010 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */ 1011 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) { 1012 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff); 1013 return; 1014 } 1015 1016 /* Use the constant pool, if possible. */ 1017 if (!in_prologue && USE_REG_TB) { 1018 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr, 1019 tcg_tbrel_diff(s, NULL)); 1020 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); 1021 return; 1022 } 1023 1024 tmp = arg >> 31 >> 1; 1025 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp); 1026 if (tmp) { 1027 tcg_out_shli64(s, ret, ret, 32); 1028 } 1029 if (arg & 0xffff0000) { 1030 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); 1031 } 1032 if (arg & 0xffff) { 1033 tcg_out32(s, ORI | SAI(ret, ret, arg)); 1034 } 1035} 1036 1037static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 1038 TCGReg ret, int64_t val) 1039{ 1040 uint32_t load_insn; 1041 int rel, low; 1042 intptr_t add; 1043 1044 switch (vece) { 1045 case MO_8: 1046 low = (int8_t)val; 1047 if (low >= -16 && low < 16) { 1048 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); 1049 return; 1050 } 1051 if (have_isa_3_00) { 1052 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); 1053 return; 1054 } 1055 break; 1056 1057 case MO_16: 1058 low = (int16_t)val; 1059 if (low >= -16 && low < 16) { 1060 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); 1061 return; 1062 } 1063 break; 1064 1065 case MO_32: 1066 low = (int32_t)val; 1067 if (low >= -16 && low < 16) { 1068 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); 1069 return; 1070 } 1071 break; 1072 } 1073 1074 /* 1075 * Otherwise we must load the value from the constant pool. 1076 */ 1077 if (USE_REG_TB) { 1078 rel = R_PPC_ADDR16; 1079 add = tcg_tbrel_diff(s, NULL); 1080 } else { 1081 rel = R_PPC_ADDR32; 1082 add = 0; 1083 } 1084 1085 if (have_vsx) { 1086 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX; 1087 load_insn |= VRT(ret) | RB(TCG_REG_TMP1); 1088 if (TCG_TARGET_REG_BITS == 64) { 1089 new_pool_label(s, val, rel, s->code_ptr, add); 1090 } else { 1091 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val); 1092 } 1093 } else { 1094 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); 1095 if (TCG_TARGET_REG_BITS == 64) { 1096 new_pool_l2(s, rel, s->code_ptr, add, val, val); 1097 } else { 1098 new_pool_l4(s, rel, s->code_ptr, add, 1099 val >> 32, val, val >> 32, val); 1100 } 1101 } 1102 1103 if (USE_REG_TB) { 1104 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0)); 1105 load_insn |= RA(TCG_REG_TB); 1106 } else { 1107 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0)); 1108 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); 1109 } 1110 tcg_out32(s, load_insn); 1111} 1112 1113static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, 1114 tcg_target_long arg) 1115{ 1116 switch (type) { 1117 case TCG_TYPE_I32: 1118 case TCG_TYPE_I64: 1119 tcg_debug_assert(ret < TCG_REG_V0); 1120 tcg_out_movi_int(s, type, ret, arg, false); 1121 break; 1122 1123 default: 1124 g_assert_not_reached(); 1125 } 1126} 1127 1128static bool mask_operand(uint32_t c, int *mb, int *me) 1129{ 1130 uint32_t lsb, test; 1131 1132 /* Accept a bit pattern like: 1133 0....01....1 1134 1....10....0 1135 0..01..10..0 1136 Keep track of the transitions. */ 1137 if (c == 0 || c == -1) { 1138 return false; 1139 } 1140 test = c; 1141 lsb = test & -test; 1142 test += lsb; 1143 if (test & (test - 1)) { 1144 return false; 1145 } 1146 1147 *me = clz32(lsb); 1148 *mb = test ? clz32(test & -test) + 1 : 0; 1149 return true; 1150} 1151 1152static bool mask64_operand(uint64_t c, int *mb, int *me) 1153{ 1154 uint64_t lsb; 1155 1156 if (c == 0) { 1157 return false; 1158 } 1159 1160 lsb = c & -c; 1161 /* Accept 1..10..0. */ 1162 if (c == -lsb) { 1163 *mb = 0; 1164 *me = clz64(lsb); 1165 return true; 1166 } 1167 /* Accept 0..01..1. */ 1168 if (lsb == 1 && (c & (c + 1)) == 0) { 1169 *mb = clz64(c + 1) + 1; 1170 *me = 63; 1171 return true; 1172 } 1173 return false; 1174} 1175 1176static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) 1177{ 1178 int mb, me; 1179 1180 if (mask_operand(c, &mb, &me)) { 1181 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); 1182 } else if ((c & 0xffff) == c) { 1183 tcg_out32(s, ANDI | SAI(src, dst, c)); 1184 return; 1185 } else if ((c & 0xffff0000) == c) { 1186 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); 1187 return; 1188 } else { 1189 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); 1190 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); 1191 } 1192} 1193 1194static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) 1195{ 1196 int mb, me; 1197 1198 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 1199 if (mask64_operand(c, &mb, &me)) { 1200 if (mb == 0) { 1201 tcg_out_rld(s, RLDICR, dst, src, 0, me); 1202 } else { 1203 tcg_out_rld(s, RLDICL, dst, src, 0, mb); 1204 } 1205 } else if ((c & 0xffff) == c) { 1206 tcg_out32(s, ANDI | SAI(src, dst, c)); 1207 return; 1208 } else if ((c & 0xffff0000) == c) { 1209 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); 1210 return; 1211 } else { 1212 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); 1213 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); 1214 } 1215} 1216 1217static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, 1218 int op_lo, int op_hi) 1219{ 1220 if (c >> 16) { 1221 tcg_out32(s, op_hi | SAI(src, dst, c >> 16)); 1222 src = dst; 1223 } 1224 if (c & 0xffff) { 1225 tcg_out32(s, op_lo | SAI(src, dst, c)); 1226 src = dst; 1227 } 1228} 1229 1230static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) 1231{ 1232 tcg_out_zori32(s, dst, src, c, ORI, ORIS); 1233} 1234 1235static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) 1236{ 1237 tcg_out_zori32(s, dst, src, c, XORI, XORIS); 1238} 1239 1240static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target) 1241{ 1242 ptrdiff_t disp = tcg_pcrel_diff(s, target); 1243 if (in_range_b(disp)) { 1244 tcg_out32(s, B | (disp & 0x3fffffc) | mask); 1245 } else { 1246 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target); 1247 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); 1248 tcg_out32(s, BCCTR | BO_ALWAYS | mask); 1249 } 1250} 1251 1252static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, 1253 TCGReg base, tcg_target_long offset) 1254{ 1255 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; 1256 bool is_int_store = false; 1257 TCGReg rs = TCG_REG_TMP1; 1258 1259 switch (opi) { 1260 case LD: case LWA: 1261 align = 3; 1262 /* FALLTHRU */ 1263 default: 1264 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) { 1265 rs = rt; 1266 break; 1267 } 1268 break; 1269 case LXSD: 1270 case STXSD: 1271 align = 3; 1272 break; 1273 case LXV: 1274 case STXV: 1275 align = 15; 1276 break; 1277 case STD: 1278 align = 3; 1279 /* FALLTHRU */ 1280 case STB: case STH: case STW: 1281 is_int_store = true; 1282 break; 1283 } 1284 1285 /* For unaligned, or very large offsets, use the indexed form. */ 1286 if (offset & align || offset != (int32_t)offset || opi == 0) { 1287 if (rs == base) { 1288 rs = TCG_REG_R0; 1289 } 1290 tcg_debug_assert(!is_int_store || rs != rt); 1291 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); 1292 tcg_out32(s, opx | TAB(rt & 31, base, rs)); 1293 return; 1294 } 1295 1296 l0 = (int16_t)offset; 1297 offset = (offset - l0) >> 16; 1298 l1 = (int16_t)offset; 1299 1300 if (l1 < 0 && orig >= 0) { 1301 extra = 0x4000; 1302 l1 = (int16_t)(offset - 0x4000); 1303 } 1304 if (l1) { 1305 tcg_out32(s, ADDIS | TAI(rs, base, l1)); 1306 base = rs; 1307 } 1308 if (extra) { 1309 tcg_out32(s, ADDIS | TAI(rs, base, extra)); 1310 base = rs; 1311 } 1312 if (opi != ADDI || base != rt || l0 != 0) { 1313 tcg_out32(s, opi | TAI(rt & 31, base, l0)); 1314 } 1315} 1316 1317static void tcg_out_vsldoi(TCGContext *s, TCGReg ret, 1318 TCGReg va, TCGReg vb, int shb) 1319{ 1320 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6)); 1321} 1322 1323static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, 1324 TCGReg base, intptr_t offset) 1325{ 1326 int shift; 1327 1328 switch (type) { 1329 case TCG_TYPE_I32: 1330 if (ret < TCG_REG_V0) { 1331 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset); 1332 break; 1333 } 1334 if (have_isa_2_07 && have_vsx) { 1335 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset); 1336 break; 1337 } 1338 tcg_debug_assert((offset & 3) == 0); 1339 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset); 1340 shift = (offset - 4) & 0xc; 1341 if (shift) { 1342 tcg_out_vsldoi(s, ret, ret, ret, shift); 1343 } 1344 break; 1345 case TCG_TYPE_I64: 1346 if (ret < TCG_REG_V0) { 1347 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 1348 tcg_out_mem_long(s, LD, LDX, ret, base, offset); 1349 break; 1350 } 1351 /* fallthru */ 1352 case TCG_TYPE_V64: 1353 tcg_debug_assert(ret >= TCG_REG_V0); 1354 if (have_vsx) { 1355 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX, 1356 ret, base, offset); 1357 break; 1358 } 1359 tcg_debug_assert((offset & 7) == 0); 1360 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16); 1361 if (offset & 8) { 1362 tcg_out_vsldoi(s, ret, ret, ret, 8); 1363 } 1364 break; 1365 case TCG_TYPE_V128: 1366 tcg_debug_assert(ret >= TCG_REG_V0); 1367 tcg_debug_assert((offset & 15) == 0); 1368 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0, 1369 LVX, ret, base, offset); 1370 break; 1371 default: 1372 g_assert_not_reached(); 1373 } 1374} 1375 1376static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, 1377 TCGReg base, intptr_t offset) 1378{ 1379 int shift; 1380 1381 switch (type) { 1382 case TCG_TYPE_I32: 1383 if (arg < TCG_REG_V0) { 1384 tcg_out_mem_long(s, STW, STWX, arg, base, offset); 1385 break; 1386 } 1387 if (have_isa_2_07 && have_vsx) { 1388 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset); 1389 break; 1390 } 1391 assert((offset & 3) == 0); 1392 tcg_debug_assert((offset & 3) == 0); 1393 shift = (offset - 4) & 0xc; 1394 if (shift) { 1395 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift); 1396 arg = TCG_VEC_TMP1; 1397 } 1398 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); 1399 break; 1400 case TCG_TYPE_I64: 1401 if (arg < TCG_REG_V0) { 1402 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 1403 tcg_out_mem_long(s, STD, STDX, arg, base, offset); 1404 break; 1405 } 1406 /* fallthru */ 1407 case TCG_TYPE_V64: 1408 tcg_debug_assert(arg >= TCG_REG_V0); 1409 if (have_vsx) { 1410 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0, 1411 STXSDX, arg, base, offset); 1412 break; 1413 } 1414 tcg_debug_assert((offset & 7) == 0); 1415 if (offset & 8) { 1416 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8); 1417 arg = TCG_VEC_TMP1; 1418 } 1419 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); 1420 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4); 1421 break; 1422 case TCG_TYPE_V128: 1423 tcg_debug_assert(arg >= TCG_REG_V0); 1424 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0, 1425 STVX, arg, base, offset); 1426 break; 1427 default: 1428 g_assert_not_reached(); 1429 } 1430} 1431 1432static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 1433 TCGReg base, intptr_t ofs) 1434{ 1435 return false; 1436} 1437 1438static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, 1439 int const_arg2, int cr, TCGType type) 1440{ 1441 int imm; 1442 uint32_t op; 1443 1444 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); 1445 1446 /* Simplify the comparisons below wrt CMPI. */ 1447 if (type == TCG_TYPE_I32) { 1448 arg2 = (int32_t)arg2; 1449 } 1450 1451 switch (cond) { 1452 case TCG_COND_EQ: 1453 case TCG_COND_NE: 1454 if (const_arg2) { 1455 if ((int16_t) arg2 == arg2) { 1456 op = CMPI; 1457 imm = 1; 1458 break; 1459 } else if ((uint16_t) arg2 == arg2) { 1460 op = CMPLI; 1461 imm = 1; 1462 break; 1463 } 1464 } 1465 op = CMPL; 1466 imm = 0; 1467 break; 1468 1469 case TCG_COND_LT: 1470 case TCG_COND_GE: 1471 case TCG_COND_LE: 1472 case TCG_COND_GT: 1473 if (const_arg2) { 1474 if ((int16_t) arg2 == arg2) { 1475 op = CMPI; 1476 imm = 1; 1477 break; 1478 } 1479 } 1480 op = CMP; 1481 imm = 0; 1482 break; 1483 1484 case TCG_COND_LTU: 1485 case TCG_COND_GEU: 1486 case TCG_COND_LEU: 1487 case TCG_COND_GTU: 1488 if (const_arg2) { 1489 if ((uint16_t) arg2 == arg2) { 1490 op = CMPLI; 1491 imm = 1; 1492 break; 1493 } 1494 } 1495 op = CMPL; 1496 imm = 0; 1497 break; 1498 1499 default: 1500 tcg_abort(); 1501 } 1502 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); 1503 1504 if (imm) { 1505 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); 1506 } else { 1507 if (const_arg2) { 1508 tcg_out_movi(s, type, TCG_REG_R0, arg2); 1509 arg2 = TCG_REG_R0; 1510 } 1511 tcg_out32(s, op | RA(arg1) | RB(arg2)); 1512 } 1513} 1514 1515static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, 1516 TCGReg dst, TCGReg src) 1517{ 1518 if (type == TCG_TYPE_I32) { 1519 tcg_out32(s, CNTLZW | RS(src) | RA(dst)); 1520 tcg_out_shri32(s, dst, dst, 5); 1521 } else { 1522 tcg_out32(s, CNTLZD | RS(src) | RA(dst)); 1523 tcg_out_shri64(s, dst, dst, 6); 1524 } 1525} 1526 1527static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) 1528{ 1529 /* X != 0 implies X + -1 generates a carry. Extra addition 1530 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ 1531 if (dst != src) { 1532 tcg_out32(s, ADDIC | TAI(dst, src, -1)); 1533 tcg_out32(s, SUBFE | TAB(dst, dst, src)); 1534 } else { 1535 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1)); 1536 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); 1537 } 1538} 1539 1540static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, 1541 bool const_arg2) 1542{ 1543 if (const_arg2) { 1544 if ((uint32_t)arg2 == arg2) { 1545 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); 1546 } else { 1547 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); 1548 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); 1549 } 1550 } else { 1551 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); 1552 } 1553 return TCG_REG_R0; 1554} 1555 1556static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, 1557 TCGArg arg0, TCGArg arg1, TCGArg arg2, 1558 int const_arg2) 1559{ 1560 int crop, sh; 1561 1562 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); 1563 1564 /* Ignore high bits of a potential constant arg2. */ 1565 if (type == TCG_TYPE_I32) { 1566 arg2 = (uint32_t)arg2; 1567 } 1568 1569 /* Handle common and trivial cases before handling anything else. */ 1570 if (arg2 == 0) { 1571 switch (cond) { 1572 case TCG_COND_EQ: 1573 tcg_out_setcond_eq0(s, type, arg0, arg1); 1574 return; 1575 case TCG_COND_NE: 1576 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 1577 tcg_out_ext32u(s, TCG_REG_R0, arg1); 1578 arg1 = TCG_REG_R0; 1579 } 1580 tcg_out_setcond_ne0(s, arg0, arg1); 1581 return; 1582 case TCG_COND_GE: 1583 tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); 1584 arg1 = arg0; 1585 /* FALLTHRU */ 1586 case TCG_COND_LT: 1587 /* Extract the sign bit. */ 1588 if (type == TCG_TYPE_I32) { 1589 tcg_out_shri32(s, arg0, arg1, 31); 1590 } else { 1591 tcg_out_shri64(s, arg0, arg1, 63); 1592 } 1593 return; 1594 default: 1595 break; 1596 } 1597 } 1598 1599 /* If we have ISEL, we can implement everything with 3 or 4 insns. 1600 All other cases below are also at least 3 insns, so speed up the 1601 code generator by not considering them and always using ISEL. */ 1602 if (have_isel) { 1603 int isel, tab; 1604 1605 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); 1606 1607 isel = tcg_to_isel[cond]; 1608 1609 tcg_out_movi(s, type, arg0, 1); 1610 if (isel & 1) { 1611 /* arg0 = (bc ? 0 : 1) */ 1612 tab = TAB(arg0, 0, arg0); 1613 isel &= ~1; 1614 } else { 1615 /* arg0 = (bc ? 1 : 0) */ 1616 tcg_out_movi(s, type, TCG_REG_R0, 0); 1617 tab = TAB(arg0, arg0, TCG_REG_R0); 1618 } 1619 tcg_out32(s, isel | tab); 1620 return; 1621 } 1622 1623 switch (cond) { 1624 case TCG_COND_EQ: 1625 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); 1626 tcg_out_setcond_eq0(s, type, arg0, arg1); 1627 return; 1628 1629 case TCG_COND_NE: 1630 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); 1631 /* Discard the high bits only once, rather than both inputs. */ 1632 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { 1633 tcg_out_ext32u(s, TCG_REG_R0, arg1); 1634 arg1 = TCG_REG_R0; 1635 } 1636 tcg_out_setcond_ne0(s, arg0, arg1); 1637 return; 1638 1639 case TCG_COND_GT: 1640 case TCG_COND_GTU: 1641 sh = 30; 1642 crop = 0; 1643 goto crtest; 1644 1645 case TCG_COND_LT: 1646 case TCG_COND_LTU: 1647 sh = 29; 1648 crop = 0; 1649 goto crtest; 1650 1651 case TCG_COND_GE: 1652 case TCG_COND_GEU: 1653 sh = 31; 1654 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); 1655 goto crtest; 1656 1657 case TCG_COND_LE: 1658 case TCG_COND_LEU: 1659 sh = 31; 1660 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); 1661 crtest: 1662 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); 1663 if (crop) { 1664 tcg_out32(s, crop); 1665 } 1666 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); 1667 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); 1668 break; 1669 1670 default: 1671 tcg_abort(); 1672 } 1673} 1674 1675static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l) 1676{ 1677 if (l->has_value) { 1678 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr); 1679 } else { 1680 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0); 1681 } 1682 tcg_out32(s, bc); 1683} 1684 1685static void tcg_out_brcond(TCGContext *s, TCGCond cond, 1686 TCGArg arg1, TCGArg arg2, int const_arg2, 1687 TCGLabel *l, TCGType type) 1688{ 1689 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); 1690 tcg_out_bc(s, tcg_to_bc[cond], l); 1691} 1692 1693static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, 1694 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, 1695 TCGArg v2, bool const_c2) 1696{ 1697 /* If for some reason both inputs are zero, don't produce bad code. */ 1698 if (v1 == 0 && v2 == 0) { 1699 tcg_out_movi(s, type, dest, 0); 1700 return; 1701 } 1702 1703 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); 1704 1705 if (have_isel) { 1706 int isel = tcg_to_isel[cond]; 1707 1708 /* Swap the V operands if the operation indicates inversion. */ 1709 if (isel & 1) { 1710 int t = v1; 1711 v1 = v2; 1712 v2 = t; 1713 isel &= ~1; 1714 } 1715 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */ 1716 if (v2 == 0) { 1717 tcg_out_movi(s, type, TCG_REG_R0, 0); 1718 } 1719 tcg_out32(s, isel | TAB(dest, v1, v2)); 1720 } else { 1721 if (dest == v2) { 1722 cond = tcg_invert_cond(cond); 1723 v2 = v1; 1724 } else if (dest != v1) { 1725 if (v1 == 0) { 1726 tcg_out_movi(s, type, dest, 0); 1727 } else { 1728 tcg_out_mov(s, type, dest, v1); 1729 } 1730 } 1731 /* Branch forward over one insn */ 1732 tcg_out32(s, tcg_to_bc[cond] | 8); 1733 if (v2 == 0) { 1734 tcg_out_movi(s, type, dest, 0); 1735 } else { 1736 tcg_out_mov(s, type, dest, v2); 1737 } 1738 } 1739} 1740 1741static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, 1742 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2) 1743{ 1744 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) { 1745 tcg_out32(s, opc | RA(a0) | RS(a1)); 1746 } else { 1747 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type); 1748 /* Note that the only other valid constant for a2 is 0. */ 1749 if (have_isel) { 1750 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1)); 1751 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0)); 1752 } else if (!const_a2 && a0 == a2) { 1753 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8); 1754 tcg_out32(s, opc | RA(a0) | RS(a1)); 1755 } else { 1756 tcg_out32(s, opc | RA(a0) | RS(a1)); 1757 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8); 1758 if (const_a2) { 1759 tcg_out_movi(s, type, a0, 0); 1760 } else { 1761 tcg_out_mov(s, type, a0, a2); 1762 } 1763 } 1764 } 1765} 1766 1767static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, 1768 const int *const_args) 1769{ 1770 static const struct { uint8_t bit1, bit2; } bits[] = { 1771 [TCG_COND_LT ] = { CR_LT, CR_LT }, 1772 [TCG_COND_LE ] = { CR_LT, CR_GT }, 1773 [TCG_COND_GT ] = { CR_GT, CR_GT }, 1774 [TCG_COND_GE ] = { CR_GT, CR_LT }, 1775 [TCG_COND_LTU] = { CR_LT, CR_LT }, 1776 [TCG_COND_LEU] = { CR_LT, CR_GT }, 1777 [TCG_COND_GTU] = { CR_GT, CR_GT }, 1778 [TCG_COND_GEU] = { CR_GT, CR_LT }, 1779 }; 1780 1781 TCGCond cond = args[4], cond2; 1782 TCGArg al, ah, bl, bh; 1783 int blconst, bhconst; 1784 int op, bit1, bit2; 1785 1786 al = args[0]; 1787 ah = args[1]; 1788 bl = args[2]; 1789 bh = args[3]; 1790 blconst = const_args[2]; 1791 bhconst = const_args[3]; 1792 1793 switch (cond) { 1794 case TCG_COND_EQ: 1795 op = CRAND; 1796 goto do_equality; 1797 case TCG_COND_NE: 1798 op = CRNAND; 1799 do_equality: 1800 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); 1801 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); 1802 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); 1803 break; 1804 1805 case TCG_COND_LT: 1806 case TCG_COND_LE: 1807 case TCG_COND_GT: 1808 case TCG_COND_GE: 1809 case TCG_COND_LTU: 1810 case TCG_COND_LEU: 1811 case TCG_COND_GTU: 1812 case TCG_COND_GEU: 1813 bit1 = bits[cond].bit1; 1814 bit2 = bits[cond].bit2; 1815 op = (bit1 != bit2 ? CRANDC : CRAND); 1816 cond2 = tcg_unsigned_cond(cond); 1817 1818 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); 1819 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); 1820 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); 1821 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ)); 1822 break; 1823 1824 default: 1825 tcg_abort(); 1826 } 1827} 1828 1829static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, 1830 const int *const_args) 1831{ 1832 tcg_out_cmp2(s, args + 1, const_args + 1); 1833 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); 1834 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31); 1835} 1836 1837static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, 1838 const int *const_args) 1839{ 1840 tcg_out_cmp2(s, args, const_args); 1841 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5])); 1842} 1843 1844static void tcg_out_mb(TCGContext *s, TCGArg a0) 1845{ 1846 uint32_t insn; 1847 1848 if (a0 & TCG_MO_ST_LD) { 1849 insn = HWSYNC; 1850 } else { 1851 insn = LWSYNC; 1852 } 1853 1854 tcg_out32(s, insn); 1855} 1856 1857static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2) 1858{ 1859 if (HOST_BIG_ENDIAN) { 1860 return (uint64_t)i1 << 32 | i2; 1861 } 1862 return (uint64_t)i2 << 32 | i1; 1863} 1864 1865static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw, 1866 tcg_insn_unit i0, tcg_insn_unit i1) 1867{ 1868#if TCG_TARGET_REG_BITS == 64 1869 qatomic_set((uint64_t *)rw, make_pair(i0, i1)); 1870 flush_idcache_range(rx, rw, 8); 1871#else 1872 qemu_build_not_reached(); 1873#endif 1874} 1875 1876static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw, 1877 tcg_insn_unit i0, tcg_insn_unit i1, 1878 tcg_insn_unit i2, tcg_insn_unit i3) 1879{ 1880 uint64_t p[2]; 1881 1882 p[!HOST_BIG_ENDIAN] = make_pair(i0, i1); 1883 p[HOST_BIG_ENDIAN] = make_pair(i2, i3); 1884 1885 /* 1886 * There's no convenient way to get the compiler to allocate a pair 1887 * of registers at an even index, so copy into r6/r7 and clobber. 1888 */ 1889 asm("mr %%r6, %1\n\t" 1890 "mr %%r7, %2\n\t" 1891 "stq %%r6, %0" 1892 : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7"); 1893 flush_idcache_range(rx, rw, 16); 1894} 1895 1896void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, 1897 uintptr_t jmp_rw, uintptr_t addr) 1898{ 1899 tcg_insn_unit i0, i1, i2, i3; 1900 intptr_t tb_diff = addr - tc_ptr; 1901 intptr_t br_diff = addr - (jmp_rx + 4); 1902 intptr_t lo, hi; 1903 1904 if (TCG_TARGET_REG_BITS == 32) { 1905 intptr_t diff = addr - jmp_rx; 1906 tcg_debug_assert(in_range_b(diff)); 1907 qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc)); 1908 flush_idcache_range(jmp_rx, jmp_rw, 4); 1909 return; 1910 } 1911 1912 /* 1913 * For 16-bit displacements, we can use a single add + branch. 1914 * This happens quite often. 1915 */ 1916 if (tb_diff == (int16_t)tb_diff) { 1917 i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); 1918 i1 = B | (br_diff & 0x3fffffc); 1919 ppc64_replace2(jmp_rx, jmp_rw, i0, i1); 1920 return; 1921 } 1922 1923 lo = (int16_t)tb_diff; 1924 hi = (int32_t)(tb_diff - lo); 1925 assert(tb_diff == hi + lo); 1926 i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); 1927 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); 1928 1929 /* 1930 * Without stq from 2.07, we can only update two insns, 1931 * and those must be the ones that load the target address. 1932 */ 1933 if (!have_isa_2_07) { 1934 ppc64_replace2(jmp_rx, jmp_rw, i0, i1); 1935 return; 1936 } 1937 1938 /* 1939 * For 26-bit displacements, we can use a direct branch. 1940 * Otherwise we still need the indirect branch, which we 1941 * must restore after a potential direct branch write. 1942 */ 1943 br_diff -= 4; 1944 if (in_range_b(br_diff)) { 1945 i2 = B | (br_diff & 0x3fffffc); 1946 i3 = NOP; 1947 } else { 1948 i2 = MTSPR | RS(TCG_REG_TB) | CTR; 1949 i3 = BCCTR | BO_ALWAYS; 1950 } 1951 ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3); 1952} 1953 1954static void tcg_out_call_int(TCGContext *s, int lk, 1955 const tcg_insn_unit *target) 1956{ 1957#ifdef _CALL_AIX 1958 /* Look through the descriptor. If the branch is in range, and we 1959 don't have to spend too much effort on building the toc. */ 1960 const void *tgt = ((const void * const *)target)[0]; 1961 uintptr_t toc = ((const uintptr_t *)target)[1]; 1962 intptr_t diff = tcg_pcrel_diff(s, tgt); 1963 1964 if (in_range_b(diff) && toc == (uint32_t)toc) { 1965 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); 1966 tcg_out_b(s, lk, tgt); 1967 } else { 1968 /* Fold the low bits of the constant into the addresses below. */ 1969 intptr_t arg = (intptr_t)target; 1970 int ofs = (int16_t)arg; 1971 1972 if (ofs + 8 < 0x8000) { 1973 arg -= ofs; 1974 } else { 1975 ofs = 0; 1976 } 1977 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg); 1978 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); 1979 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); 1980 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); 1981 tcg_out32(s, BCCTR | BO_ALWAYS | lk); 1982 } 1983#elif defined(_CALL_ELF) && _CALL_ELF == 2 1984 intptr_t diff; 1985 1986 /* In the ELFv2 ABI, we have to set up r12 to contain the destination 1987 address, which the callee uses to compute its TOC address. */ 1988 /* FIXME: when the branch is in range, we could avoid r12 load if we 1989 knew that the destination uses the same TOC, and what its local 1990 entry point offset is. */ 1991 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target); 1992 1993 diff = tcg_pcrel_diff(s, target); 1994 if (in_range_b(diff)) { 1995 tcg_out_b(s, lk, target); 1996 } else { 1997 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); 1998 tcg_out32(s, BCCTR | BO_ALWAYS | lk); 1999 } 2000#else 2001 tcg_out_b(s, lk, target); 2002#endif 2003} 2004 2005static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, 2006 const TCGHelperInfo *info) 2007{ 2008 tcg_out_call_int(s, LK, target); 2009} 2010 2011static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = { 2012 [MO_UB] = LBZX, 2013 [MO_UW] = LHZX, 2014 [MO_UL] = LWZX, 2015 [MO_UQ] = LDX, 2016 [MO_SW] = LHAX, 2017 [MO_SL] = LWAX, 2018 [MO_BSWAP | MO_UB] = LBZX, 2019 [MO_BSWAP | MO_UW] = LHBRX, 2020 [MO_BSWAP | MO_UL] = LWBRX, 2021 [MO_BSWAP | MO_UQ] = LDBRX, 2022}; 2023 2024static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { 2025 [MO_UB] = STBX, 2026 [MO_UW] = STHX, 2027 [MO_UL] = STWX, 2028 [MO_UQ] = STDX, 2029 [MO_BSWAP | MO_UB] = STBX, 2030 [MO_BSWAP | MO_UW] = STHBRX, 2031 [MO_BSWAP | MO_UL] = STWBRX, 2032 [MO_BSWAP | MO_UQ] = STDBRX, 2033}; 2034 2035static const uint32_t qemu_exts_opc[4] = { 2036 EXTSB, EXTSH, EXTSW, 0 2037}; 2038 2039#if defined (CONFIG_SOFTMMU) 2040/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, 2041 * int mmu_idx, uintptr_t ra) 2042 */ 2043static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { 2044 [MO_UB] = helper_ret_ldub_mmu, 2045 [MO_LEUW] = helper_le_lduw_mmu, 2046 [MO_LEUL] = helper_le_ldul_mmu, 2047 [MO_LEUQ] = helper_le_ldq_mmu, 2048 [MO_BEUW] = helper_be_lduw_mmu, 2049 [MO_BEUL] = helper_be_ldul_mmu, 2050 [MO_BEUQ] = helper_be_ldq_mmu, 2051}; 2052 2053/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, 2054 * uintxx_t val, int mmu_idx, uintptr_t ra) 2055 */ 2056static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { 2057 [MO_UB] = helper_ret_stb_mmu, 2058 [MO_LEUW] = helper_le_stw_mmu, 2059 [MO_LEUL] = helper_le_stl_mmu, 2060 [MO_LEUQ] = helper_le_stq_mmu, 2061 [MO_BEUW] = helper_be_stw_mmu, 2062 [MO_BEUL] = helper_be_stl_mmu, 2063 [MO_BEUQ] = helper_be_stq_mmu, 2064}; 2065 2066/* We expect to use a 16-bit negative offset from ENV. */ 2067QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 2068QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); 2069 2070/* Perform the TLB load and compare. Places the result of the comparison 2071 in CR7, loads the addend of the TLB into R3, and returns the register 2072 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ 2073 2074static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, 2075 TCGReg addrlo, TCGReg addrhi, 2076 int mem_index, bool is_read) 2077{ 2078 int cmp_off 2079 = (is_read 2080 ? offsetof(CPUTLBEntry, addr_read) 2081 : offsetof(CPUTLBEntry, addr_write)); 2082 int fast_off = TLB_MASK_TABLE_OFS(mem_index); 2083 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 2084 int table_off = fast_off + offsetof(CPUTLBDescFast, table); 2085 unsigned s_bits = opc & MO_SIZE; 2086 unsigned a_bits = get_alignment_bits(opc); 2087 2088 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 2089 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off); 2090 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off); 2091 2092 /* Extract the page index, shifted into place for tlb index. */ 2093 if (TCG_TARGET_REG_BITS == 32) { 2094 tcg_out_shri32(s, TCG_REG_TMP1, addrlo, 2095 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 2096 } else { 2097 tcg_out_shri64(s, TCG_REG_TMP1, addrlo, 2098 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 2099 } 2100 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1)); 2101 2102 /* Load the TLB comparator. */ 2103 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { 2104 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32 2105 ? LWZUX : LDUX); 2106 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4)); 2107 } else { 2108 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4)); 2109 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 2110 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); 2111 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); 2112 } else { 2113 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); 2114 } 2115 } 2116 2117 /* Load the TLB addend for use on the fast path. Do this asap 2118 to minimize any load use delay. */ 2119 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, 2120 offsetof(CPUTLBEntry, addend)); 2121 2122 /* Clear the non-page, non-alignment bits from the address */ 2123 if (TCG_TARGET_REG_BITS == 32) { 2124 /* We don't support unaligned accesses on 32-bits. 2125 * Preserve the bottom bits and thus trigger a comparison 2126 * failure on unaligned accesses. 2127 */ 2128 if (a_bits < s_bits) { 2129 a_bits = s_bits; 2130 } 2131 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, 2132 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); 2133 } else { 2134 TCGReg t = addrlo; 2135 2136 /* If the access is unaligned, we need to make sure we fail if we 2137 * cross a page boundary. The trick is to add the access size-1 2138 * to the address before masking the low bits. That will make the 2139 * address overflow to the next page if we cross a page boundary, 2140 * which will then force a mismatch of the TLB compare. 2141 */ 2142 if (a_bits < s_bits) { 2143 unsigned a_mask = (1 << a_bits) - 1; 2144 unsigned s_mask = (1 << s_bits) - 1; 2145 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); 2146 t = TCG_REG_R0; 2147 } 2148 2149 /* Mask the address for the requested alignment. */ 2150 if (TARGET_LONG_BITS == 32) { 2151 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, 2152 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); 2153 /* Zero-extend the address for use in the final address. */ 2154 tcg_out_ext32u(s, TCG_REG_R4, addrlo); 2155 addrlo = TCG_REG_R4; 2156 } else if (a_bits == 0) { 2157 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); 2158 } else { 2159 tcg_out_rld(s, RLDICL, TCG_REG_R0, t, 2160 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); 2161 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); 2162 } 2163 } 2164 2165 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 2166 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 2167 0, 7, TCG_TYPE_I32); 2168 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); 2169 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); 2170 } else { 2171 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 2172 0, 7, TCG_TYPE_TL); 2173 } 2174 2175 return addrlo; 2176} 2177 2178/* Record the context of a call to the out of line helper code for the slow 2179 path for a load or store, so that we can later generate the correct 2180 helper code. */ 2181static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, 2182 TCGReg datalo_reg, TCGReg datahi_reg, 2183 TCGReg addrlo_reg, TCGReg addrhi_reg, 2184 tcg_insn_unit *raddr, tcg_insn_unit *lptr) 2185{ 2186 TCGLabelQemuLdst *label = new_ldst_label(s); 2187 2188 label->is_ld = is_ld; 2189 label->oi = oi; 2190 label->datalo_reg = datalo_reg; 2191 label->datahi_reg = datahi_reg; 2192 label->addrlo_reg = addrlo_reg; 2193 label->addrhi_reg = addrhi_reg; 2194 label->raddr = tcg_splitwx_to_rx(raddr); 2195 label->label_ptr[0] = lptr; 2196} 2197 2198static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 2199{ 2200 MemOpIdx oi = lb->oi; 2201 MemOp opc = get_memop(oi); 2202 TCGReg hi, lo, arg = TCG_REG_R3; 2203 2204 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 2205 return false; 2206 } 2207 2208 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); 2209 2210 lo = lb->addrlo_reg; 2211 hi = lb->addrhi_reg; 2212 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 2213 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); 2214 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); 2215 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); 2216 } else { 2217 /* If the address needed to be zero-extended, we'll have already 2218 placed it in R4. The only remaining case is 64-bit guest. */ 2219 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); 2220 } 2221 2222 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); 2223 tcg_out32(s, MFSPR | RT(arg) | LR); 2224 2225 tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); 2226 2227 lo = lb->datalo_reg; 2228 hi = lb->datahi_reg; 2229 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { 2230 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4); 2231 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3); 2232 } else if (opc & MO_SIGN) { 2233 uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; 2234 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3)); 2235 } else { 2236 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3); 2237 } 2238 2239 tcg_out_b(s, 0, lb->raddr); 2240 return true; 2241} 2242 2243static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) 2244{ 2245 MemOpIdx oi = lb->oi; 2246 MemOp opc = get_memop(oi); 2247 MemOp s_bits = opc & MO_SIZE; 2248 TCGReg hi, lo, arg = TCG_REG_R3; 2249 2250 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 2251 return false; 2252 } 2253 2254 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); 2255 2256 lo = lb->addrlo_reg; 2257 hi = lb->addrhi_reg; 2258 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 2259 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); 2260 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); 2261 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); 2262 } else { 2263 /* If the address needed to be zero-extended, we'll have already 2264 placed it in R4. The only remaining case is 64-bit guest. */ 2265 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); 2266 } 2267 2268 lo = lb->datalo_reg; 2269 hi = lb->datahi_reg; 2270 if (TCG_TARGET_REG_BITS == 32) { 2271 switch (s_bits) { 2272 case MO_64: 2273 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); 2274 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); 2275 /* FALLTHRU */ 2276 case MO_32: 2277 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); 2278 break; 2279 default: 2280 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31); 2281 break; 2282 } 2283 } else { 2284 if (s_bits == MO_64) { 2285 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo); 2286 } else { 2287 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits)); 2288 } 2289 } 2290 2291 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); 2292 tcg_out32(s, MFSPR | RT(arg) | LR); 2293 2294 tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); 2295 2296 tcg_out_b(s, 0, lb->raddr); 2297 return true; 2298} 2299#else 2300 2301static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, 2302 TCGReg addrhi, unsigned a_bits) 2303{ 2304 unsigned a_mask = (1 << a_bits) - 1; 2305 TCGLabelQemuLdst *label = new_ldst_label(s); 2306 2307 label->is_ld = is_ld; 2308 label->addrlo_reg = addrlo; 2309 label->addrhi_reg = addrhi; 2310 2311 /* We are expecting a_bits to max out at 7, much lower than ANDI. */ 2312 tcg_debug_assert(a_bits < 16); 2313 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask)); 2314 2315 label->label_ptr[0] = s->code_ptr; 2316 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); 2317 2318 label->raddr = tcg_splitwx_to_rx(s->code_ptr); 2319} 2320 2321static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) 2322{ 2323 if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { 2324 return false; 2325 } 2326 2327 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { 2328 TCGReg arg = TCG_REG_R4; 2329 2330 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); 2331 if (l->addrlo_reg != arg) { 2332 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); 2333 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); 2334 } else if (l->addrhi_reg != arg + 1) { 2335 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); 2336 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); 2337 } else { 2338 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg); 2339 tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1); 2340 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0); 2341 } 2342 } else { 2343 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg); 2344 } 2345 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0); 2346 2347 /* "Tail call" to the helper, with the return address back inline. */ 2348 tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld 2349 : helper_unaligned_st)); 2350 return true; 2351} 2352 2353static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 2354{ 2355 return tcg_out_fail_alignment(s, l); 2356} 2357 2358static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 2359{ 2360 return tcg_out_fail_alignment(s, l); 2361} 2362 2363#endif /* SOFTMMU */ 2364 2365static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) 2366{ 2367 TCGReg datalo, datahi, addrlo, rbase; 2368 TCGReg addrhi __attribute__((unused)); 2369 MemOpIdx oi; 2370 MemOp opc, s_bits; 2371#ifdef CONFIG_SOFTMMU 2372 int mem_index; 2373 tcg_insn_unit *label_ptr; 2374#else 2375 unsigned a_bits; 2376#endif 2377 2378 datalo = *args++; 2379 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 2380 addrlo = *args++; 2381 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 2382 oi = *args++; 2383 opc = get_memop(oi); 2384 s_bits = opc & MO_SIZE; 2385 2386#ifdef CONFIG_SOFTMMU 2387 mem_index = get_mmuidx(oi); 2388 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true); 2389 2390 /* Load a pointer into the current opcode w/conditional branch-link. */ 2391 label_ptr = s->code_ptr; 2392 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); 2393 2394 rbase = TCG_REG_R3; 2395#else /* !CONFIG_SOFTMMU */ 2396 a_bits = get_alignment_bits(opc); 2397 if (a_bits) { 2398 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); 2399 } 2400 rbase = guest_base ? TCG_GUEST_BASE_REG : 0; 2401 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 2402 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); 2403 addrlo = TCG_REG_TMP1; 2404 } 2405#endif 2406 2407 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { 2408 if (opc & MO_BSWAP) { 2409 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); 2410 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); 2411 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); 2412 } else if (rbase != 0) { 2413 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); 2414 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); 2415 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); 2416 } else if (addrlo == datahi) { 2417 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); 2418 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); 2419 } else { 2420 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); 2421 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); 2422 } 2423 } else { 2424 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; 2425 if (!have_isa_2_06 && insn == LDBRX) { 2426 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); 2427 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); 2428 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); 2429 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); 2430 } else if (insn) { 2431 tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); 2432 } else { 2433 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; 2434 tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); 2435 insn = qemu_exts_opc[s_bits]; 2436 tcg_out32(s, insn | RA(datalo) | RS(datalo)); 2437 } 2438 } 2439 2440#ifdef CONFIG_SOFTMMU 2441 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, 2442 s->code_ptr, label_ptr); 2443#endif 2444} 2445 2446static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) 2447{ 2448 TCGReg datalo, datahi, addrlo, rbase; 2449 TCGReg addrhi __attribute__((unused)); 2450 MemOpIdx oi; 2451 MemOp opc, s_bits; 2452#ifdef CONFIG_SOFTMMU 2453 int mem_index; 2454 tcg_insn_unit *label_ptr; 2455#else 2456 unsigned a_bits; 2457#endif 2458 2459 datalo = *args++; 2460 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); 2461 addrlo = *args++; 2462 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); 2463 oi = *args++; 2464 opc = get_memop(oi); 2465 s_bits = opc & MO_SIZE; 2466 2467#ifdef CONFIG_SOFTMMU 2468 mem_index = get_mmuidx(oi); 2469 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false); 2470 2471 /* Load a pointer into the current opcode w/conditional branch-link. */ 2472 label_ptr = s->code_ptr; 2473 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); 2474 2475 rbase = TCG_REG_R3; 2476#else /* !CONFIG_SOFTMMU */ 2477 a_bits = get_alignment_bits(opc); 2478 if (a_bits) { 2479 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); 2480 } 2481 rbase = guest_base ? TCG_GUEST_BASE_REG : 0; 2482 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { 2483 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); 2484 addrlo = TCG_REG_TMP1; 2485 } 2486#endif 2487 2488 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { 2489 if (opc & MO_BSWAP) { 2490 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); 2491 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); 2492 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); 2493 } else if (rbase != 0) { 2494 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); 2495 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); 2496 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); 2497 } else { 2498 tcg_out32(s, STW | TAI(datahi, addrlo, 0)); 2499 tcg_out32(s, STW | TAI(datalo, addrlo, 4)); 2500 } 2501 } else { 2502 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; 2503 if (!have_isa_2_06 && insn == STDBRX) { 2504 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); 2505 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); 2506 tcg_out_shri64(s, TCG_REG_R0, datalo, 32); 2507 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); 2508 } else { 2509 tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); 2510 } 2511 } 2512 2513#ifdef CONFIG_SOFTMMU 2514 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, 2515 s->code_ptr, label_ptr); 2516#endif 2517} 2518 2519static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 2520{ 2521 int i; 2522 for (i = 0; i < count; ++i) { 2523 p[i] = NOP; 2524 } 2525} 2526 2527/* Parameters for function call generation, used in tcg.c. */ 2528#define TCG_TARGET_STACK_ALIGN 16 2529 2530#ifdef _CALL_AIX 2531# define LINK_AREA_SIZE (6 * SZR) 2532# define LR_OFFSET (1 * SZR) 2533# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR) 2534#elif defined(_CALL_DARWIN) 2535# define LINK_AREA_SIZE (6 * SZR) 2536# define LR_OFFSET (2 * SZR) 2537#elif TCG_TARGET_REG_BITS == 64 2538# if defined(_CALL_ELF) && _CALL_ELF == 2 2539# define LINK_AREA_SIZE (4 * SZR) 2540# define LR_OFFSET (1 * SZR) 2541# endif 2542#else /* TCG_TARGET_REG_BITS == 32 */ 2543# if defined(_CALL_SYSV) 2544# define LINK_AREA_SIZE (2 * SZR) 2545# define LR_OFFSET (1 * SZR) 2546# endif 2547#endif 2548#ifndef LR_OFFSET 2549# error "Unhandled abi" 2550#endif 2551#ifndef TCG_TARGET_CALL_STACK_OFFSET 2552# define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE 2553#endif 2554 2555#define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) 2556#define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR) 2557 2558#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \ 2559 + TCG_STATIC_CALL_ARGS_SIZE \ 2560 + CPU_TEMP_BUF_SIZE \ 2561 + REG_SAVE_SIZE \ 2562 + TCG_TARGET_STACK_ALIGN - 1) \ 2563 & -TCG_TARGET_STACK_ALIGN) 2564 2565#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE) 2566 2567static void tcg_target_qemu_prologue(TCGContext *s) 2568{ 2569 int i; 2570 2571#ifdef _CALL_AIX 2572 const void **desc = (const void **)s->code_ptr; 2573 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */ 2574 desc[1] = 0; /* environment pointer */ 2575 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */ 2576#endif 2577 2578 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE, 2579 CPU_TEMP_BUF_SIZE); 2580 2581 /* Prologue */ 2582 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); 2583 tcg_out32(s, (SZR == 8 ? STDU : STWU) 2584 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE)); 2585 2586 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { 2587 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2588 TCG_REG_R1, REG_SAVE_BOT + i * SZR); 2589 } 2590 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); 2591 2592#ifndef CONFIG_SOFTMMU 2593 if (guest_base) { 2594 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); 2595 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); 2596 } 2597#endif 2598 2599 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); 2600 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); 2601 if (USE_REG_TB) { 2602 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); 2603 } 2604 tcg_out32(s, BCCTR | BO_ALWAYS); 2605 2606 /* Epilogue */ 2607 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); 2608 2609 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); 2610 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { 2611 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], 2612 TCG_REG_R1, REG_SAVE_BOT + i * SZR); 2613 } 2614 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); 2615 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE)); 2616 tcg_out32(s, BCLR | BO_ALWAYS); 2617} 2618 2619static void tcg_out_op(TCGContext *s, TCGOpcode opc, 2620 const TCGArg args[TCG_MAX_OP_ARGS], 2621 const int const_args[TCG_MAX_OP_ARGS]) 2622{ 2623 TCGArg a0, a1, a2; 2624 2625 switch (opc) { 2626 case INDEX_op_exit_tb: 2627 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); 2628 tcg_out_b(s, 0, tcg_code_gen_epilogue); 2629 break; 2630 case INDEX_op_goto_tb: 2631 if (s->tb_jmp_insn_offset) { 2632 /* Direct jump. */ 2633 if (TCG_TARGET_REG_BITS == 64) { 2634 /* Ensure the next insns are 8 or 16-byte aligned. */ 2635 while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) { 2636 tcg_out32(s, NOP); 2637 } 2638 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); 2639 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0)); 2640 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0)); 2641 } else { 2642 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); 2643 tcg_out32(s, B); 2644 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); 2645 break; 2646 } 2647 } else { 2648 /* Indirect jump. */ 2649 tcg_debug_assert(s->tb_jmp_insn_offset == NULL); 2650 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0, 2651 (intptr_t)(s->tb_jmp_insn_offset + args[0])); 2652 } 2653 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); 2654 tcg_out32(s, BCCTR | BO_ALWAYS); 2655 set_jmp_reset_offset(s, args[0]); 2656 if (USE_REG_TB) { 2657 /* For the unlinked case, need to reset TCG_REG_TB. */ 2658 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB, 2659 -tcg_current_code_size(s)); 2660 } 2661 break; 2662 case INDEX_op_goto_ptr: 2663 tcg_out32(s, MTSPR | RS(args[0]) | CTR); 2664 if (USE_REG_TB) { 2665 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]); 2666 } 2667 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); 2668 tcg_out32(s, BCCTR | BO_ALWAYS); 2669 break; 2670 case INDEX_op_br: 2671 { 2672 TCGLabel *l = arg_label(args[0]); 2673 uint32_t insn = B; 2674 2675 if (l->has_value) { 2676 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), 2677 l->u.value_ptr); 2678 } else { 2679 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0); 2680 } 2681 tcg_out32(s, insn); 2682 } 2683 break; 2684 case INDEX_op_ld8u_i32: 2685 case INDEX_op_ld8u_i64: 2686 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); 2687 break; 2688 case INDEX_op_ld8s_i32: 2689 case INDEX_op_ld8s_i64: 2690 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); 2691 tcg_out_ext8s(s, args[0], args[0]); 2692 break; 2693 case INDEX_op_ld16u_i32: 2694 case INDEX_op_ld16u_i64: 2695 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); 2696 break; 2697 case INDEX_op_ld16s_i32: 2698 case INDEX_op_ld16s_i64: 2699 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); 2700 break; 2701 case INDEX_op_ld_i32: 2702 case INDEX_op_ld32u_i64: 2703 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); 2704 break; 2705 case INDEX_op_ld32s_i64: 2706 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); 2707 break; 2708 case INDEX_op_ld_i64: 2709 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); 2710 break; 2711 case INDEX_op_st8_i32: 2712 case INDEX_op_st8_i64: 2713 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); 2714 break; 2715 case INDEX_op_st16_i32: 2716 case INDEX_op_st16_i64: 2717 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); 2718 break; 2719 case INDEX_op_st_i32: 2720 case INDEX_op_st32_i64: 2721 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); 2722 break; 2723 case INDEX_op_st_i64: 2724 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); 2725 break; 2726 2727 case INDEX_op_add_i32: 2728 a0 = args[0], a1 = args[1], a2 = args[2]; 2729 if (const_args[2]) { 2730 do_addi_32: 2731 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); 2732 } else { 2733 tcg_out32(s, ADD | TAB(a0, a1, a2)); 2734 } 2735 break; 2736 case INDEX_op_sub_i32: 2737 a0 = args[0], a1 = args[1], a2 = args[2]; 2738 if (const_args[1]) { 2739 if (const_args[2]) { 2740 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); 2741 } else { 2742 tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); 2743 } 2744 } else if (const_args[2]) { 2745 a2 = -a2; 2746 goto do_addi_32; 2747 } else { 2748 tcg_out32(s, SUBF | TAB(a0, a2, a1)); 2749 } 2750 break; 2751 2752 case INDEX_op_and_i32: 2753 a0 = args[0], a1 = args[1], a2 = args[2]; 2754 if (const_args[2]) { 2755 tcg_out_andi32(s, a0, a1, a2); 2756 } else { 2757 tcg_out32(s, AND | SAB(a1, a0, a2)); 2758 } 2759 break; 2760 case INDEX_op_and_i64: 2761 a0 = args[0], a1 = args[1], a2 = args[2]; 2762 if (const_args[2]) { 2763 tcg_out_andi64(s, a0, a1, a2); 2764 } else { 2765 tcg_out32(s, AND | SAB(a1, a0, a2)); 2766 } 2767 break; 2768 case INDEX_op_or_i64: 2769 case INDEX_op_or_i32: 2770 a0 = args[0], a1 = args[1], a2 = args[2]; 2771 if (const_args[2]) { 2772 tcg_out_ori32(s, a0, a1, a2); 2773 } else { 2774 tcg_out32(s, OR | SAB(a1, a0, a2)); 2775 } 2776 break; 2777 case INDEX_op_xor_i64: 2778 case INDEX_op_xor_i32: 2779 a0 = args[0], a1 = args[1], a2 = args[2]; 2780 if (const_args[2]) { 2781 tcg_out_xori32(s, a0, a1, a2); 2782 } else { 2783 tcg_out32(s, XOR | SAB(a1, a0, a2)); 2784 } 2785 break; 2786 case INDEX_op_andc_i32: 2787 a0 = args[0], a1 = args[1], a2 = args[2]; 2788 if (const_args[2]) { 2789 tcg_out_andi32(s, a0, a1, ~a2); 2790 } else { 2791 tcg_out32(s, ANDC | SAB(a1, a0, a2)); 2792 } 2793 break; 2794 case INDEX_op_andc_i64: 2795 a0 = args[0], a1 = args[1], a2 = args[2]; 2796 if (const_args[2]) { 2797 tcg_out_andi64(s, a0, a1, ~a2); 2798 } else { 2799 tcg_out32(s, ANDC | SAB(a1, a0, a2)); 2800 } 2801 break; 2802 case INDEX_op_orc_i32: 2803 if (const_args[2]) { 2804 tcg_out_ori32(s, args[0], args[1], ~args[2]); 2805 break; 2806 } 2807 /* FALLTHRU */ 2808 case INDEX_op_orc_i64: 2809 tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); 2810 break; 2811 case INDEX_op_eqv_i32: 2812 if (const_args[2]) { 2813 tcg_out_xori32(s, args[0], args[1], ~args[2]); 2814 break; 2815 } 2816 /* FALLTHRU */ 2817 case INDEX_op_eqv_i64: 2818 tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); 2819 break; 2820 case INDEX_op_nand_i32: 2821 case INDEX_op_nand_i64: 2822 tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); 2823 break; 2824 case INDEX_op_nor_i32: 2825 case INDEX_op_nor_i64: 2826 tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); 2827 break; 2828 2829 case INDEX_op_clz_i32: 2830 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1], 2831 args[2], const_args[2]); 2832 break; 2833 case INDEX_op_ctz_i32: 2834 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1], 2835 args[2], const_args[2]); 2836 break; 2837 case INDEX_op_ctpop_i32: 2838 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0)); 2839 break; 2840 2841 case INDEX_op_clz_i64: 2842 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1], 2843 args[2], const_args[2]); 2844 break; 2845 case INDEX_op_ctz_i64: 2846 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1], 2847 args[2], const_args[2]); 2848 break; 2849 case INDEX_op_ctpop_i64: 2850 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0)); 2851 break; 2852 2853 case INDEX_op_mul_i32: 2854 a0 = args[0], a1 = args[1], a2 = args[2]; 2855 if (const_args[2]) { 2856 tcg_out32(s, MULLI | TAI(a0, a1, a2)); 2857 } else { 2858 tcg_out32(s, MULLW | TAB(a0, a1, a2)); 2859 } 2860 break; 2861 2862 case INDEX_op_div_i32: 2863 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); 2864 break; 2865 2866 case INDEX_op_divu_i32: 2867 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); 2868 break; 2869 2870 case INDEX_op_rem_i32: 2871 tcg_out32(s, MODSW | TAB(args[0], args[1], args[2])); 2872 break; 2873 2874 case INDEX_op_remu_i32: 2875 tcg_out32(s, MODUW | TAB(args[0], args[1], args[2])); 2876 break; 2877 2878 case INDEX_op_shl_i32: 2879 if (const_args[2]) { 2880 /* Limit immediate shift count lest we create an illegal insn. */ 2881 tcg_out_shli32(s, args[0], args[1], args[2] & 31); 2882 } else { 2883 tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); 2884 } 2885 break; 2886 case INDEX_op_shr_i32: 2887 if (const_args[2]) { 2888 /* Limit immediate shift count lest we create an illegal insn. */ 2889 tcg_out_shri32(s, args[0], args[1], args[2] & 31); 2890 } else { 2891 tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); 2892 } 2893 break; 2894 case INDEX_op_sar_i32: 2895 if (const_args[2]) { 2896 tcg_out_sari32(s, args[0], args[1], args[2]); 2897 } else { 2898 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); 2899 } 2900 break; 2901 case INDEX_op_rotl_i32: 2902 if (const_args[2]) { 2903 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); 2904 } else { 2905 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) 2906 | MB(0) | ME(31)); 2907 } 2908 break; 2909 case INDEX_op_rotr_i32: 2910 if (const_args[2]) { 2911 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); 2912 } else { 2913 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); 2914 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) 2915 | MB(0) | ME(31)); 2916 } 2917 break; 2918 2919 case INDEX_op_brcond_i32: 2920 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], 2921 arg_label(args[3]), TCG_TYPE_I32); 2922 break; 2923 case INDEX_op_brcond_i64: 2924 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], 2925 arg_label(args[3]), TCG_TYPE_I64); 2926 break; 2927 case INDEX_op_brcond2_i32: 2928 tcg_out_brcond2(s, args, const_args); 2929 break; 2930 2931 case INDEX_op_neg_i32: 2932 case INDEX_op_neg_i64: 2933 tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); 2934 break; 2935 2936 case INDEX_op_not_i32: 2937 case INDEX_op_not_i64: 2938 tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); 2939 break; 2940 2941 case INDEX_op_add_i64: 2942 a0 = args[0], a1 = args[1], a2 = args[2]; 2943 if (const_args[2]) { 2944 do_addi_64: 2945 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); 2946 } else { 2947 tcg_out32(s, ADD | TAB(a0, a1, a2)); 2948 } 2949 break; 2950 case INDEX_op_sub_i64: 2951 a0 = args[0], a1 = args[1], a2 = args[2]; 2952 if (const_args[1]) { 2953 if (const_args[2]) { 2954 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); 2955 } else { 2956 tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); 2957 } 2958 } else if (const_args[2]) { 2959 a2 = -a2; 2960 goto do_addi_64; 2961 } else { 2962 tcg_out32(s, SUBF | TAB(a0, a2, a1)); 2963 } 2964 break; 2965 2966 case INDEX_op_shl_i64: 2967 if (const_args[2]) { 2968 /* Limit immediate shift count lest we create an illegal insn. */ 2969 tcg_out_shli64(s, args[0], args[1], args[2] & 63); 2970 } else { 2971 tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); 2972 } 2973 break; 2974 case INDEX_op_shr_i64: 2975 if (const_args[2]) { 2976 /* Limit immediate shift count lest we create an illegal insn. */ 2977 tcg_out_shri64(s, args[0], args[1], args[2] & 63); 2978 } else { 2979 tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); 2980 } 2981 break; 2982 case INDEX_op_sar_i64: 2983 if (const_args[2]) { 2984 tcg_out_sari64(s, args[0], args[1], args[2]); 2985 } else { 2986 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); 2987 } 2988 break; 2989 case INDEX_op_rotl_i64: 2990 if (const_args[2]) { 2991 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); 2992 } else { 2993 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); 2994 } 2995 break; 2996 case INDEX_op_rotr_i64: 2997 if (const_args[2]) { 2998 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); 2999 } else { 3000 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); 3001 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); 3002 } 3003 break; 3004 3005 case INDEX_op_mul_i64: 3006 a0 = args[0], a1 = args[1], a2 = args[2]; 3007 if (const_args[2]) { 3008 tcg_out32(s, MULLI | TAI(a0, a1, a2)); 3009 } else { 3010 tcg_out32(s, MULLD | TAB(a0, a1, a2)); 3011 } 3012 break; 3013 case INDEX_op_div_i64: 3014 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); 3015 break; 3016 case INDEX_op_divu_i64: 3017 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); 3018 break; 3019 case INDEX_op_rem_i64: 3020 tcg_out32(s, MODSD | TAB(args[0], args[1], args[2])); 3021 break; 3022 case INDEX_op_remu_i64: 3023 tcg_out32(s, MODUD | TAB(args[0], args[1], args[2])); 3024 break; 3025 3026 case INDEX_op_qemu_ld_i32: 3027 tcg_out_qemu_ld(s, args, false); 3028 break; 3029 case INDEX_op_qemu_ld_i64: 3030 tcg_out_qemu_ld(s, args, true); 3031 break; 3032 case INDEX_op_qemu_st_i32: 3033 tcg_out_qemu_st(s, args, false); 3034 break; 3035 case INDEX_op_qemu_st_i64: 3036 tcg_out_qemu_st(s, args, true); 3037 break; 3038 3039 case INDEX_op_ext8s_i32: 3040 case INDEX_op_ext8s_i64: 3041 tcg_out_ext8s(s, args[0], args[1]); 3042 break; 3043 case INDEX_op_ext16s_i32: 3044 case INDEX_op_ext16s_i64: 3045 tcg_out_ext16s(s, args[0], args[1]); 3046 break; 3047 case INDEX_op_ext_i32_i64: 3048 case INDEX_op_ext32s_i64: 3049 tcg_out_ext32s(s, args[0], args[1]); 3050 break; 3051 case INDEX_op_extu_i32_i64: 3052 tcg_out_ext32u(s, args[0], args[1]); 3053 break; 3054 3055 case INDEX_op_setcond_i32: 3056 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], 3057 const_args[2]); 3058 break; 3059 case INDEX_op_setcond_i64: 3060 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], 3061 const_args[2]); 3062 break; 3063 case INDEX_op_setcond2_i32: 3064 tcg_out_setcond2(s, args, const_args); 3065 break; 3066 3067 case INDEX_op_bswap16_i32: 3068 case INDEX_op_bswap16_i64: 3069 tcg_out_bswap16(s, args[0], args[1], args[2]); 3070 break; 3071 case INDEX_op_bswap32_i32: 3072 tcg_out_bswap32(s, args[0], args[1], 0); 3073 break; 3074 case INDEX_op_bswap32_i64: 3075 tcg_out_bswap32(s, args[0], args[1], args[2]); 3076 break; 3077 case INDEX_op_bswap64_i64: 3078 tcg_out_bswap64(s, args[0], args[1]); 3079 break; 3080 3081 case INDEX_op_deposit_i32: 3082 if (const_args[2]) { 3083 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; 3084 tcg_out_andi32(s, args[0], args[0], ~mask); 3085 } else { 3086 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], 3087 32 - args[3] - args[4], 31 - args[3]); 3088 } 3089 break; 3090 case INDEX_op_deposit_i64: 3091 if (const_args[2]) { 3092 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; 3093 tcg_out_andi64(s, args[0], args[0], ~mask); 3094 } else { 3095 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], 3096 64 - args[3] - args[4]); 3097 } 3098 break; 3099 3100 case INDEX_op_extract_i32: 3101 tcg_out_rlw(s, RLWINM, args[0], args[1], 3102 32 - args[2], 32 - args[3], 31); 3103 break; 3104 case INDEX_op_extract_i64: 3105 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]); 3106 break; 3107 3108 case INDEX_op_movcond_i32: 3109 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], 3110 args[3], args[4], const_args[2]); 3111 break; 3112 case INDEX_op_movcond_i64: 3113 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], 3114 args[3], args[4], const_args[2]); 3115 break; 3116 3117#if TCG_TARGET_REG_BITS == 64 3118 case INDEX_op_add2_i64: 3119#else 3120 case INDEX_op_add2_i32: 3121#endif 3122 /* Note that the CA bit is defined based on the word size of the 3123 environment. So in 64-bit mode it's always carry-out of bit 63. 3124 The fallback code using deposit works just as well for 32-bit. */ 3125 a0 = args[0], a1 = args[1]; 3126 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { 3127 a0 = TCG_REG_R0; 3128 } 3129 if (const_args[4]) { 3130 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); 3131 } else { 3132 tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); 3133 } 3134 if (const_args[5]) { 3135 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); 3136 } else { 3137 tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); 3138 } 3139 if (a0 != args[0]) { 3140 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); 3141 } 3142 break; 3143 3144#if TCG_TARGET_REG_BITS == 64 3145 case INDEX_op_sub2_i64: 3146#else 3147 case INDEX_op_sub2_i32: 3148#endif 3149 a0 = args[0], a1 = args[1]; 3150 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) { 3151 a0 = TCG_REG_R0; 3152 } 3153 if (const_args[2]) { 3154 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2])); 3155 } else { 3156 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2])); 3157 } 3158 if (const_args[3]) { 3159 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); 3160 } else { 3161 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3])); 3162 } 3163 if (a0 != args[0]) { 3164 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); 3165 } 3166 break; 3167 3168 case INDEX_op_muluh_i32: 3169 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2])); 3170 break; 3171 case INDEX_op_mulsh_i32: 3172 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2])); 3173 break; 3174 case INDEX_op_muluh_i64: 3175 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); 3176 break; 3177 case INDEX_op_mulsh_i64: 3178 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); 3179 break; 3180 3181 case INDEX_op_mb: 3182 tcg_out_mb(s, args[0]); 3183 break; 3184 3185 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ 3186 case INDEX_op_mov_i64: 3187 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 3188 default: 3189 tcg_abort(); 3190 } 3191} 3192 3193int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) 3194{ 3195 switch (opc) { 3196 case INDEX_op_and_vec: 3197 case INDEX_op_or_vec: 3198 case INDEX_op_xor_vec: 3199 case INDEX_op_andc_vec: 3200 case INDEX_op_not_vec: 3201 case INDEX_op_nor_vec: 3202 case INDEX_op_eqv_vec: 3203 case INDEX_op_nand_vec: 3204 return 1; 3205 case INDEX_op_orc_vec: 3206 return have_isa_2_07; 3207 case INDEX_op_add_vec: 3208 case INDEX_op_sub_vec: 3209 case INDEX_op_smax_vec: 3210 case INDEX_op_smin_vec: 3211 case INDEX_op_umax_vec: 3212 case INDEX_op_umin_vec: 3213 case INDEX_op_shlv_vec: 3214 case INDEX_op_shrv_vec: 3215 case INDEX_op_sarv_vec: 3216 case INDEX_op_rotlv_vec: 3217 return vece <= MO_32 || have_isa_2_07; 3218 case INDEX_op_ssadd_vec: 3219 case INDEX_op_sssub_vec: 3220 case INDEX_op_usadd_vec: 3221 case INDEX_op_ussub_vec: 3222 return vece <= MO_32; 3223 case INDEX_op_cmp_vec: 3224 case INDEX_op_shli_vec: 3225 case INDEX_op_shri_vec: 3226 case INDEX_op_sari_vec: 3227 case INDEX_op_rotli_vec: 3228 return vece <= MO_32 || have_isa_2_07 ? -1 : 0; 3229 case INDEX_op_neg_vec: 3230 return vece >= MO_32 && have_isa_3_00; 3231 case INDEX_op_mul_vec: 3232 switch (vece) { 3233 case MO_8: 3234 case MO_16: 3235 return -1; 3236 case MO_32: 3237 return have_isa_2_07 ? 1 : -1; 3238 case MO_64: 3239 return have_isa_3_10; 3240 } 3241 return 0; 3242 case INDEX_op_bitsel_vec: 3243 return have_vsx; 3244 case INDEX_op_rotrv_vec: 3245 return -1; 3246 default: 3247 return 0; 3248 } 3249} 3250 3251static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 3252 TCGReg dst, TCGReg src) 3253{ 3254 tcg_debug_assert(dst >= TCG_REG_V0); 3255 3256 /* Splat from integer reg allowed via constraints for v3.00. */ 3257 if (src < TCG_REG_V0) { 3258 tcg_debug_assert(have_isa_3_00); 3259 switch (vece) { 3260 case MO_64: 3261 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src)); 3262 return true; 3263 case MO_32: 3264 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src)); 3265 return true; 3266 default: 3267 /* Fail, so that we fall back on either dupm or mov+dup. */ 3268 return false; 3269 } 3270 } 3271 3272 /* 3273 * Recall we use (or emulate) VSX integer loads, so the integer is 3274 * right justified within the left (zero-index) double-word. 3275 */ 3276 switch (vece) { 3277 case MO_8: 3278 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16)); 3279 break; 3280 case MO_16: 3281 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16)); 3282 break; 3283 case MO_32: 3284 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16)); 3285 break; 3286 case MO_64: 3287 if (have_vsx) { 3288 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src)); 3289 break; 3290 } 3291 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8); 3292 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8); 3293 break; 3294 default: 3295 g_assert_not_reached(); 3296 } 3297 return true; 3298} 3299 3300static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 3301 TCGReg out, TCGReg base, intptr_t offset) 3302{ 3303 int elt; 3304 3305 tcg_debug_assert(out >= TCG_REG_V0); 3306 switch (vece) { 3307 case MO_8: 3308 if (have_isa_3_00) { 3309 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16); 3310 } else { 3311 tcg_out_mem_long(s, 0, LVEBX, out, base, offset); 3312 } 3313 elt = extract32(offset, 0, 4); 3314#if !HOST_BIG_ENDIAN 3315 elt ^= 15; 3316#endif 3317 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16)); 3318 break; 3319 case MO_16: 3320 tcg_debug_assert((offset & 1) == 0); 3321 if (have_isa_3_00) { 3322 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16); 3323 } else { 3324 tcg_out_mem_long(s, 0, LVEHX, out, base, offset); 3325 } 3326 elt = extract32(offset, 1, 3); 3327#if !HOST_BIG_ENDIAN 3328 elt ^= 7; 3329#endif 3330 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); 3331 break; 3332 case MO_32: 3333 if (have_isa_3_00) { 3334 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset); 3335 break; 3336 } 3337 tcg_debug_assert((offset & 3) == 0); 3338 tcg_out_mem_long(s, 0, LVEWX, out, base, offset); 3339 elt = extract32(offset, 2, 2); 3340#if !HOST_BIG_ENDIAN 3341 elt ^= 3; 3342#endif 3343 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); 3344 break; 3345 case MO_64: 3346 if (have_vsx) { 3347 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset); 3348 break; 3349 } 3350 tcg_debug_assert((offset & 7) == 0); 3351 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); 3352 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); 3353 elt = extract32(offset, 3, 1); 3354#if !HOST_BIG_ENDIAN 3355 elt = !elt; 3356#endif 3357 if (elt) { 3358 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8); 3359 } else { 3360 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8); 3361 } 3362 break; 3363 default: 3364 g_assert_not_reached(); 3365 } 3366 return true; 3367} 3368 3369static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 3370 unsigned vecl, unsigned vece, 3371 const TCGArg args[TCG_MAX_OP_ARGS], 3372 const int const_args[TCG_MAX_OP_ARGS]) 3373{ 3374 static const uint32_t 3375 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM }, 3376 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, 3377 mul_op[4] = { 0, 0, VMULUWM, VMULLD }, 3378 neg_op[4] = { 0, 0, VNEGW, VNEGD }, 3379 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, 3380 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, 3381 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, 3382 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, 3383 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, 3384 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, 3385 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, 3386 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 }, 3387 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD }, 3388 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD }, 3389 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD }, 3390 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD }, 3391 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD }, 3392 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD }, 3393 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD }, 3394 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 }, 3395 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 }, 3396 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 }, 3397 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 }, 3398 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 }, 3399 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD }; 3400 3401 TCGType type = vecl + TCG_TYPE_V64; 3402 TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; 3403 uint32_t insn; 3404 3405 switch (opc) { 3406 case INDEX_op_ld_vec: 3407 tcg_out_ld(s, type, a0, a1, a2); 3408 return; 3409 case INDEX_op_st_vec: 3410 tcg_out_st(s, type, a0, a1, a2); 3411 return; 3412 case INDEX_op_dupm_vec: 3413 tcg_out_dupm_vec(s, type, vece, a0, a1, a2); 3414 return; 3415 3416 case INDEX_op_add_vec: 3417 insn = add_op[vece]; 3418 break; 3419 case INDEX_op_sub_vec: 3420 insn = sub_op[vece]; 3421 break; 3422 case INDEX_op_neg_vec: 3423 insn = neg_op[vece]; 3424 a2 = a1; 3425 a1 = 0; 3426 break; 3427 case INDEX_op_mul_vec: 3428 insn = mul_op[vece]; 3429 break; 3430 case INDEX_op_ssadd_vec: 3431 insn = ssadd_op[vece]; 3432 break; 3433 case INDEX_op_sssub_vec: 3434 insn = sssub_op[vece]; 3435 break; 3436 case INDEX_op_usadd_vec: 3437 insn = usadd_op[vece]; 3438 break; 3439 case INDEX_op_ussub_vec: 3440 insn = ussub_op[vece]; 3441 break; 3442 case INDEX_op_smin_vec: 3443 insn = smin_op[vece]; 3444 break; 3445 case INDEX_op_umin_vec: 3446 insn = umin_op[vece]; 3447 break; 3448 case INDEX_op_smax_vec: 3449 insn = smax_op[vece]; 3450 break; 3451 case INDEX_op_umax_vec: 3452 insn = umax_op[vece]; 3453 break; 3454 case INDEX_op_shlv_vec: 3455 insn = shlv_op[vece]; 3456 break; 3457 case INDEX_op_shrv_vec: 3458 insn = shrv_op[vece]; 3459 break; 3460 case INDEX_op_sarv_vec: 3461 insn = sarv_op[vece]; 3462 break; 3463 case INDEX_op_and_vec: 3464 insn = VAND; 3465 break; 3466 case INDEX_op_or_vec: 3467 insn = VOR; 3468 break; 3469 case INDEX_op_xor_vec: 3470 insn = VXOR; 3471 break; 3472 case INDEX_op_andc_vec: 3473 insn = VANDC; 3474 break; 3475 case INDEX_op_not_vec: 3476 insn = VNOR; 3477 a2 = a1; 3478 break; 3479 case INDEX_op_orc_vec: 3480 insn = VORC; 3481 break; 3482 case INDEX_op_nand_vec: 3483 insn = VNAND; 3484 break; 3485 case INDEX_op_nor_vec: 3486 insn = VNOR; 3487 break; 3488 case INDEX_op_eqv_vec: 3489 insn = VEQV; 3490 break; 3491 3492 case INDEX_op_cmp_vec: 3493 switch (args[3]) { 3494 case TCG_COND_EQ: 3495 insn = eq_op[vece]; 3496 break; 3497 case TCG_COND_NE: 3498 insn = ne_op[vece]; 3499 break; 3500 case TCG_COND_GT: 3501 insn = gts_op[vece]; 3502 break; 3503 case TCG_COND_GTU: 3504 insn = gtu_op[vece]; 3505 break; 3506 default: 3507 g_assert_not_reached(); 3508 } 3509 break; 3510 3511 case INDEX_op_bitsel_vec: 3512 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3])); 3513 return; 3514 3515 case INDEX_op_dup2_vec: 3516 assert(TCG_TARGET_REG_BITS == 32); 3517 /* With inputs a1 = xLxx, a2 = xHxx */ 3518 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */ 3519 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */ 3520 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */ 3521 return; 3522 3523 case INDEX_op_ppc_mrgh_vec: 3524 insn = mrgh_op[vece]; 3525 break; 3526 case INDEX_op_ppc_mrgl_vec: 3527 insn = mrgl_op[vece]; 3528 break; 3529 case INDEX_op_ppc_muleu_vec: 3530 insn = muleu_op[vece]; 3531 break; 3532 case INDEX_op_ppc_mulou_vec: 3533 insn = mulou_op[vece]; 3534 break; 3535 case INDEX_op_ppc_pkum_vec: 3536 insn = pkum_op[vece]; 3537 break; 3538 case INDEX_op_rotlv_vec: 3539 insn = rotl_op[vece]; 3540 break; 3541 case INDEX_op_ppc_msum_vec: 3542 tcg_debug_assert(vece == MO_16); 3543 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3])); 3544 return; 3545 3546 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ 3547 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ 3548 default: 3549 g_assert_not_reached(); 3550 } 3551 3552 tcg_debug_assert(insn != 0); 3553 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); 3554} 3555 3556static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0, 3557 TCGv_vec v1, TCGArg imm, TCGOpcode opci) 3558{ 3559 TCGv_vec t1; 3560 3561 if (vece == MO_32) { 3562 /* 3563 * Only 5 bits are significant, and VSPLTISB can represent -16..15. 3564 * So using negative numbers gets us the 4th bit easily. 3565 */ 3566 imm = sextract32(imm, 0, 5); 3567 } else { 3568 imm &= (8 << vece) - 1; 3569 } 3570 3571 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */ 3572 t1 = tcg_constant_vec(type, MO_8, imm); 3573 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0), 3574 tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 3575} 3576 3577static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, 3578 TCGv_vec v1, TCGv_vec v2, TCGCond cond) 3579{ 3580 bool need_swap = false, need_inv = false; 3581 3582 tcg_debug_assert(vece <= MO_32 || have_isa_2_07); 3583 3584 switch (cond) { 3585 case TCG_COND_EQ: 3586 case TCG_COND_GT: 3587 case TCG_COND_GTU: 3588 break; 3589 case TCG_COND_NE: 3590 if (have_isa_3_00 && vece <= MO_32) { 3591 break; 3592 } 3593 /* fall through */ 3594 case TCG_COND_LE: 3595 case TCG_COND_LEU: 3596 need_inv = true; 3597 break; 3598 case TCG_COND_LT: 3599 case TCG_COND_LTU: 3600 need_swap = true; 3601 break; 3602 case TCG_COND_GE: 3603 case TCG_COND_GEU: 3604 need_swap = need_inv = true; 3605 break; 3606 default: 3607 g_assert_not_reached(); 3608 } 3609 3610 if (need_inv) { 3611 cond = tcg_invert_cond(cond); 3612 } 3613 if (need_swap) { 3614 TCGv_vec t1; 3615 t1 = v1, v1 = v2, v2 = t1; 3616 cond = tcg_swap_cond(cond); 3617 } 3618 3619 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), 3620 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); 3621 3622 if (need_inv) { 3623 tcg_gen_not_vec(vece, v0, v0); 3624 } 3625} 3626 3627static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, 3628 TCGv_vec v1, TCGv_vec v2) 3629{ 3630 TCGv_vec t1 = tcg_temp_new_vec(type); 3631 TCGv_vec t2 = tcg_temp_new_vec(type); 3632 TCGv_vec c0, c16; 3633 3634 switch (vece) { 3635 case MO_8: 3636 case MO_16: 3637 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1), 3638 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3639 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2), 3640 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3641 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0), 3642 tcgv_vec_arg(t1), tcgv_vec_arg(t2)); 3643 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1), 3644 tcgv_vec_arg(t1), tcgv_vec_arg(t2)); 3645 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0), 3646 tcgv_vec_arg(v0), tcgv_vec_arg(t1)); 3647 break; 3648 3649 case MO_32: 3650 tcg_debug_assert(!have_isa_2_07); 3651 /* 3652 * Only 5 bits are significant, and VSPLTISB can represent -16..15. 3653 * So using -16 is a quick way to represent 16. 3654 */ 3655 c16 = tcg_constant_vec(type, MO_8, -16); 3656 c0 = tcg_constant_vec(type, MO_8, 0); 3657 3658 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1), 3659 tcgv_vec_arg(v2), tcgv_vec_arg(c16)); 3660 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2), 3661 tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 3662 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1), 3663 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0)); 3664 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1), 3665 tcgv_vec_arg(t1), tcgv_vec_arg(c16)); 3666 tcg_gen_add_vec(MO_32, v0, t1, t2); 3667 break; 3668 3669 default: 3670 g_assert_not_reached(); 3671 } 3672 tcg_temp_free_vec(t1); 3673 tcg_temp_free_vec(t2); 3674} 3675 3676void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, 3677 TCGArg a0, ...) 3678{ 3679 va_list va; 3680 TCGv_vec v0, v1, v2, t0; 3681 TCGArg a2; 3682 3683 va_start(va, a0); 3684 v0 = temp_tcgv_vec(arg_temp(a0)); 3685 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 3686 a2 = va_arg(va, TCGArg); 3687 3688 switch (opc) { 3689 case INDEX_op_shli_vec: 3690 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec); 3691 break; 3692 case INDEX_op_shri_vec: 3693 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec); 3694 break; 3695 case INDEX_op_sari_vec: 3696 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec); 3697 break; 3698 case INDEX_op_rotli_vec: 3699 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec); 3700 break; 3701 case INDEX_op_cmp_vec: 3702 v2 = temp_tcgv_vec(arg_temp(a2)); 3703 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); 3704 break; 3705 case INDEX_op_mul_vec: 3706 v2 = temp_tcgv_vec(arg_temp(a2)); 3707 expand_vec_mul(type, vece, v0, v1, v2); 3708 break; 3709 case INDEX_op_rotlv_vec: 3710 v2 = temp_tcgv_vec(arg_temp(a2)); 3711 t0 = tcg_temp_new_vec(type); 3712 tcg_gen_neg_vec(vece, t0, v2); 3713 tcg_gen_rotlv_vec(vece, v0, v1, t0); 3714 tcg_temp_free_vec(t0); 3715 break; 3716 default: 3717 g_assert_not_reached(); 3718 } 3719 va_end(va); 3720} 3721 3722static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) 3723{ 3724 switch (op) { 3725 case INDEX_op_goto_ptr: 3726 return C_O0_I1(r); 3727 3728 case INDEX_op_ld8u_i32: 3729 case INDEX_op_ld8s_i32: 3730 case INDEX_op_ld16u_i32: 3731 case INDEX_op_ld16s_i32: 3732 case INDEX_op_ld_i32: 3733 case INDEX_op_ctpop_i32: 3734 case INDEX_op_neg_i32: 3735 case INDEX_op_not_i32: 3736 case INDEX_op_ext8s_i32: 3737 case INDEX_op_ext16s_i32: 3738 case INDEX_op_bswap16_i32: 3739 case INDEX_op_bswap32_i32: 3740 case INDEX_op_extract_i32: 3741 case INDEX_op_ld8u_i64: 3742 case INDEX_op_ld8s_i64: 3743 case INDEX_op_ld16u_i64: 3744 case INDEX_op_ld16s_i64: 3745 case INDEX_op_ld32u_i64: 3746 case INDEX_op_ld32s_i64: 3747 case INDEX_op_ld_i64: 3748 case INDEX_op_ctpop_i64: 3749 case INDEX_op_neg_i64: 3750 case INDEX_op_not_i64: 3751 case INDEX_op_ext8s_i64: 3752 case INDEX_op_ext16s_i64: 3753 case INDEX_op_ext32s_i64: 3754 case INDEX_op_ext_i32_i64: 3755 case INDEX_op_extu_i32_i64: 3756 case INDEX_op_bswap16_i64: 3757 case INDEX_op_bswap32_i64: 3758 case INDEX_op_bswap64_i64: 3759 case INDEX_op_extract_i64: 3760 return C_O1_I1(r, r); 3761 3762 case INDEX_op_st8_i32: 3763 case INDEX_op_st16_i32: 3764 case INDEX_op_st_i32: 3765 case INDEX_op_st8_i64: 3766 case INDEX_op_st16_i64: 3767 case INDEX_op_st32_i64: 3768 case INDEX_op_st_i64: 3769 return C_O0_I2(r, r); 3770 3771 case INDEX_op_add_i32: 3772 case INDEX_op_and_i32: 3773 case INDEX_op_or_i32: 3774 case INDEX_op_xor_i32: 3775 case INDEX_op_andc_i32: 3776 case INDEX_op_orc_i32: 3777 case INDEX_op_eqv_i32: 3778 case INDEX_op_shl_i32: 3779 case INDEX_op_shr_i32: 3780 case INDEX_op_sar_i32: 3781 case INDEX_op_rotl_i32: 3782 case INDEX_op_rotr_i32: 3783 case INDEX_op_setcond_i32: 3784 case INDEX_op_and_i64: 3785 case INDEX_op_andc_i64: 3786 case INDEX_op_shl_i64: 3787 case INDEX_op_shr_i64: 3788 case INDEX_op_sar_i64: 3789 case INDEX_op_rotl_i64: 3790 case INDEX_op_rotr_i64: 3791 case INDEX_op_setcond_i64: 3792 return C_O1_I2(r, r, ri); 3793 3794 case INDEX_op_mul_i32: 3795 case INDEX_op_mul_i64: 3796 return C_O1_I2(r, r, rI); 3797 3798 case INDEX_op_div_i32: 3799 case INDEX_op_divu_i32: 3800 case INDEX_op_rem_i32: 3801 case INDEX_op_remu_i32: 3802 case INDEX_op_nand_i32: 3803 case INDEX_op_nor_i32: 3804 case INDEX_op_muluh_i32: 3805 case INDEX_op_mulsh_i32: 3806 case INDEX_op_orc_i64: 3807 case INDEX_op_eqv_i64: 3808 case INDEX_op_nand_i64: 3809 case INDEX_op_nor_i64: 3810 case INDEX_op_div_i64: 3811 case INDEX_op_divu_i64: 3812 case INDEX_op_rem_i64: 3813 case INDEX_op_remu_i64: 3814 case INDEX_op_mulsh_i64: 3815 case INDEX_op_muluh_i64: 3816 return C_O1_I2(r, r, r); 3817 3818 case INDEX_op_sub_i32: 3819 return C_O1_I2(r, rI, ri); 3820 case INDEX_op_add_i64: 3821 return C_O1_I2(r, r, rT); 3822 case INDEX_op_or_i64: 3823 case INDEX_op_xor_i64: 3824 return C_O1_I2(r, r, rU); 3825 case INDEX_op_sub_i64: 3826 return C_O1_I2(r, rI, rT); 3827 case INDEX_op_clz_i32: 3828 case INDEX_op_ctz_i32: 3829 case INDEX_op_clz_i64: 3830 case INDEX_op_ctz_i64: 3831 return C_O1_I2(r, r, rZW); 3832 3833 case INDEX_op_brcond_i32: 3834 case INDEX_op_brcond_i64: 3835 return C_O0_I2(r, ri); 3836 3837 case INDEX_op_movcond_i32: 3838 case INDEX_op_movcond_i64: 3839 return C_O1_I4(r, r, ri, rZ, rZ); 3840 case INDEX_op_deposit_i32: 3841 case INDEX_op_deposit_i64: 3842 return C_O1_I2(r, 0, rZ); 3843 case INDEX_op_brcond2_i32: 3844 return C_O0_I4(r, r, ri, ri); 3845 case INDEX_op_setcond2_i32: 3846 return C_O1_I4(r, r, r, ri, ri); 3847 case INDEX_op_add2_i64: 3848 case INDEX_op_add2_i32: 3849 return C_O2_I4(r, r, r, r, rI, rZM); 3850 case INDEX_op_sub2_i64: 3851 case INDEX_op_sub2_i32: 3852 return C_O2_I4(r, r, rI, rZM, r, r); 3853 3854 case INDEX_op_qemu_ld_i32: 3855 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 3856 ? C_O1_I1(r, L) 3857 : C_O1_I2(r, L, L)); 3858 3859 case INDEX_op_qemu_st_i32: 3860 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 3861 ? C_O0_I2(S, S) 3862 : C_O0_I3(S, S, S)); 3863 3864 case INDEX_op_qemu_ld_i64: 3865 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) 3866 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L) 3867 : C_O2_I2(L, L, L, L)); 3868 3869 case INDEX_op_qemu_st_i64: 3870 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S) 3871 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S) 3872 : C_O0_I4(S, S, S, S)); 3873 3874 case INDEX_op_add_vec: 3875 case INDEX_op_sub_vec: 3876 case INDEX_op_mul_vec: 3877 case INDEX_op_and_vec: 3878 case INDEX_op_or_vec: 3879 case INDEX_op_xor_vec: 3880 case INDEX_op_andc_vec: 3881 case INDEX_op_orc_vec: 3882 case INDEX_op_nor_vec: 3883 case INDEX_op_eqv_vec: 3884 case INDEX_op_nand_vec: 3885 case INDEX_op_cmp_vec: 3886 case INDEX_op_ssadd_vec: 3887 case INDEX_op_sssub_vec: 3888 case INDEX_op_usadd_vec: 3889 case INDEX_op_ussub_vec: 3890 case INDEX_op_smax_vec: 3891 case INDEX_op_smin_vec: 3892 case INDEX_op_umax_vec: 3893 case INDEX_op_umin_vec: 3894 case INDEX_op_shlv_vec: 3895 case INDEX_op_shrv_vec: 3896 case INDEX_op_sarv_vec: 3897 case INDEX_op_rotlv_vec: 3898 case INDEX_op_rotrv_vec: 3899 case INDEX_op_ppc_mrgh_vec: 3900 case INDEX_op_ppc_mrgl_vec: 3901 case INDEX_op_ppc_muleu_vec: 3902 case INDEX_op_ppc_mulou_vec: 3903 case INDEX_op_ppc_pkum_vec: 3904 case INDEX_op_dup2_vec: 3905 return C_O1_I2(v, v, v); 3906 3907 case INDEX_op_not_vec: 3908 case INDEX_op_neg_vec: 3909 return C_O1_I1(v, v); 3910 3911 case INDEX_op_dup_vec: 3912 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v); 3913 3914 case INDEX_op_ld_vec: 3915 case INDEX_op_dupm_vec: 3916 return C_O1_I1(v, r); 3917 3918 case INDEX_op_st_vec: 3919 return C_O0_I2(v, r); 3920 3921 case INDEX_op_bitsel_vec: 3922 case INDEX_op_ppc_msum_vec: 3923 return C_O1_I3(v, v, v, v); 3924 3925 default: 3926 g_assert_not_reached(); 3927 } 3928} 3929 3930static void tcg_target_init(TCGContext *s) 3931{ 3932 unsigned long hwcap = qemu_getauxval(AT_HWCAP); 3933 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2); 3934 3935 have_isa = tcg_isa_base; 3936 if (hwcap & PPC_FEATURE_ARCH_2_06) { 3937 have_isa = tcg_isa_2_06; 3938 } 3939#ifdef PPC_FEATURE2_ARCH_2_07 3940 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) { 3941 have_isa = tcg_isa_2_07; 3942 } 3943#endif 3944#ifdef PPC_FEATURE2_ARCH_3_00 3945 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) { 3946 have_isa = tcg_isa_3_00; 3947 } 3948#endif 3949#ifdef PPC_FEATURE2_ARCH_3_10 3950 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) { 3951 have_isa = tcg_isa_3_10; 3952 } 3953#endif 3954 3955#ifdef PPC_FEATURE2_HAS_ISEL 3956 /* Prefer explicit instruction from the kernel. */ 3957 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0; 3958#else 3959 /* Fall back to knowing Power7 (2.06) has ISEL. */ 3960 have_isel = have_isa_2_06; 3961#endif 3962 3963 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { 3964 have_altivec = true; 3965 /* We only care about the portion of VSX that overlaps Altivec. */ 3966 if (hwcap & PPC_FEATURE_HAS_VSX) { 3967 have_vsx = true; 3968 } 3969 } 3970 3971 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; 3972 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; 3973 if (have_altivec) { 3974 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; 3975 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; 3976 } 3977 3978 tcg_target_call_clobber_regs = 0; 3979 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); 3980 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2); 3981 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3); 3982 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4); 3983 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5); 3984 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6); 3985 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7); 3986 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8); 3987 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9); 3988 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10); 3989 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11); 3990 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); 3991 3992 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); 3993 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1); 3994 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2); 3995 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3); 3996 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4); 3997 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5); 3998 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6); 3999 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7); 4000 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8); 4001 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9); 4002 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10); 4003 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11); 4004 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12); 4005 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13); 4006 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14); 4007 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15); 4008 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16); 4009 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17); 4010 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18); 4011 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19); 4012 4013 s->reserved_regs = 0; 4014 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ 4015 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ 4016#if defined(_CALL_SYSV) 4017 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */ 4018#endif 4019#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 4020 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ 4021#endif 4022 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ 4023 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); 4024 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2); 4025 if (USE_REG_TB) { 4026 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */ 4027 } 4028} 4029 4030#ifdef __ELF__ 4031typedef struct { 4032 DebugFrameCIE cie; 4033 DebugFrameFDEHeader fde; 4034 uint8_t fde_def_cfa[4]; 4035 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3]; 4036} DebugFrame; 4037 4038/* We're expecting a 2 byte uleb128 encoded value. */ 4039QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); 4040 4041#if TCG_TARGET_REG_BITS == 64 4042# define ELF_HOST_MACHINE EM_PPC64 4043#else 4044# define ELF_HOST_MACHINE EM_PPC 4045#endif 4046 4047static DebugFrame debug_frame = { 4048 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ 4049 .cie.id = -1, 4050 .cie.version = 1, 4051 .cie.code_align = 1, 4052 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */ 4053 .cie.return_column = 65, 4054 4055 /* Total FDE size does not include the "len" member. */ 4056 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), 4057 4058 .fde_def_cfa = { 4059 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */ 4060 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ 4061 (FRAME_SIZE >> 7) 4062 }, 4063 .fde_reg_ofs = { 4064 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */ 4065 0x11, 65, (LR_OFFSET / -SZR) & 0x7f, 4066 } 4067}; 4068 4069void tcg_register_jit(const void *buf, size_t buf_size) 4070{ 4071 uint8_t *p = &debug_frame.fde_reg_ofs[3]; 4072 int i; 4073 4074 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) { 4075 p[0] = 0x80 + tcg_target_callee_save_regs[i]; 4076 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR; 4077 } 4078 4079 debug_frame.fde.func_start = (uintptr_t)buf; 4080 debug_frame.fde.func_len = buf_size; 4081 4082 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); 4083} 4084#endif /* __ELF__ */ 4085#undef VMULEUB 4086#undef VMULEUH 4087#undef VMULEUW 4088#undef VMULOUB 4089#undef VMULOUH 4090#undef VMULOUW 4091#undef VMSUMUHM 4092