xref: /openbmc/qemu/tcg/ppc/tcg-target.c.inc (revision 12fde9bcdb52118495d10c32ed375679f23e323c)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-pool.c.inc"
27#include "../tcg-ldst.c.inc"
28
29/*
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
33 */
34#if !defined(_CALL_SYSV) && \
35    !defined(_CALL_DARWIN) && \
36    !defined(_CALL_AIX) && \
37    !defined(_CALL_ELF)
38# if defined(__APPLE__)
39#  define _CALL_DARWIN
40# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
41#  define _CALL_SYSV
42# else
43#  error "Unknown ABI"
44# endif
45#endif
46
47#if TCG_TARGET_REG_BITS == 64
48# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_EXTEND
49# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_NORMAL
50#else
51# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_NORMAL
52# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_BY_REF
53#endif
54#ifdef _CALL_SYSV
55# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_EVEN
56# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_BY_REF
57#else
58# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_NORMAL
59# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_NORMAL
60#endif
61
62/* For some memory operations, we need a scratch that isn't R0.  For the AIX
63   calling convention, we can re-use the TOC register since we'll be reloading
64   it at every call.  Otherwise R12 will do nicely as neither a call-saved
65   register nor a parameter register.  */
66#ifdef _CALL_AIX
67# define TCG_REG_TMP1   TCG_REG_R2
68#else
69# define TCG_REG_TMP1   TCG_REG_R12
70#endif
71#define TCG_REG_TMP2    TCG_REG_R11
72
73#define TCG_VEC_TMP1    TCG_REG_V0
74#define TCG_VEC_TMP2    TCG_REG_V1
75
76#define TCG_REG_TB     TCG_REG_R31
77#define USE_REG_TB     (TCG_TARGET_REG_BITS == 64)
78
79/* Shorthand for size of a pointer.  Avoid promotion to unsigned.  */
80#define SZP  ((int)sizeof(void *))
81
82/* Shorthand for size of a register.  */
83#define SZR  (TCG_TARGET_REG_BITS / 8)
84
85#define TCG_CT_CONST_S16  0x100
86#define TCG_CT_CONST_S32  0x400
87#define TCG_CT_CONST_U32  0x800
88#define TCG_CT_CONST_ZERO 0x1000
89#define TCG_CT_CONST_MONE 0x2000
90#define TCG_CT_CONST_WSZ  0x4000
91
92#define ALL_GENERAL_REGS  0xffffffffu
93#define ALL_VECTOR_REGS   0xffffffff00000000ull
94
95TCGPowerISA have_isa;
96static bool have_isel;
97bool have_altivec;
98bool have_vsx;
99
100#ifndef CONFIG_SOFTMMU
101#define TCG_GUEST_BASE_REG 30
102#endif
103
104#ifdef CONFIG_DEBUG_TCG
105static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
106    "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
107    "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
108    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110    "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
111    "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
112    "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
113    "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
114};
115#endif
116
117static const int tcg_target_reg_alloc_order[] = {
118    TCG_REG_R14,  /* call saved registers */
119    TCG_REG_R15,
120    TCG_REG_R16,
121    TCG_REG_R17,
122    TCG_REG_R18,
123    TCG_REG_R19,
124    TCG_REG_R20,
125    TCG_REG_R21,
126    TCG_REG_R22,
127    TCG_REG_R23,
128    TCG_REG_R24,
129    TCG_REG_R25,
130    TCG_REG_R26,
131    TCG_REG_R27,
132    TCG_REG_R28,
133    TCG_REG_R29,
134    TCG_REG_R30,
135    TCG_REG_R31,
136    TCG_REG_R12,  /* call clobbered, non-arguments */
137    TCG_REG_R11,
138    TCG_REG_R2,
139    TCG_REG_R13,
140    TCG_REG_R10,  /* call clobbered, arguments */
141    TCG_REG_R9,
142    TCG_REG_R8,
143    TCG_REG_R7,
144    TCG_REG_R6,
145    TCG_REG_R5,
146    TCG_REG_R4,
147    TCG_REG_R3,
148
149    /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
150    TCG_REG_V2,   /* call clobbered, vectors */
151    TCG_REG_V3,
152    TCG_REG_V4,
153    TCG_REG_V5,
154    TCG_REG_V6,
155    TCG_REG_V7,
156    TCG_REG_V8,
157    TCG_REG_V9,
158    TCG_REG_V10,
159    TCG_REG_V11,
160    TCG_REG_V12,
161    TCG_REG_V13,
162    TCG_REG_V14,
163    TCG_REG_V15,
164    TCG_REG_V16,
165    TCG_REG_V17,
166    TCG_REG_V18,
167    TCG_REG_V19,
168};
169
170static const int tcg_target_call_iarg_regs[] = {
171    TCG_REG_R3,
172    TCG_REG_R4,
173    TCG_REG_R5,
174    TCG_REG_R6,
175    TCG_REG_R7,
176    TCG_REG_R8,
177    TCG_REG_R9,
178    TCG_REG_R10
179};
180
181static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
182{
183    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
184    tcg_debug_assert(slot >= 0 && slot <= 1);
185    return TCG_REG_R3 + slot;
186}
187
188static const int tcg_target_callee_save_regs[] = {
189#ifdef _CALL_DARWIN
190    TCG_REG_R11,
191#endif
192    TCG_REG_R14,
193    TCG_REG_R15,
194    TCG_REG_R16,
195    TCG_REG_R17,
196    TCG_REG_R18,
197    TCG_REG_R19,
198    TCG_REG_R20,
199    TCG_REG_R21,
200    TCG_REG_R22,
201    TCG_REG_R23,
202    TCG_REG_R24,
203    TCG_REG_R25,
204    TCG_REG_R26,
205    TCG_REG_R27, /* currently used for the global env */
206    TCG_REG_R28,
207    TCG_REG_R29,
208    TCG_REG_R30,
209    TCG_REG_R31
210};
211
212static inline bool in_range_b(tcg_target_long target)
213{
214    return target == sextract64(target, 0, 26);
215}
216
217static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
218			       const tcg_insn_unit *target)
219{
220    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
221    tcg_debug_assert(in_range_b(disp));
222    return disp & 0x3fffffc;
223}
224
225static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
226{
227    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
228    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
229
230    if (in_range_b(disp)) {
231        *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
232        return true;
233    }
234    return false;
235}
236
237static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
238			       const tcg_insn_unit *target)
239{
240    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
241    tcg_debug_assert(disp == (int16_t) disp);
242    return disp & 0xfffc;
243}
244
245static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
246{
247    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
248    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
249
250    if (disp == (int16_t) disp) {
251        *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
252        return true;
253    }
254    return false;
255}
256
257/* test if a constant matches the constraint */
258static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
259{
260    if (ct & TCG_CT_CONST) {
261        return 1;
262    }
263
264    /* The only 32-bit constraint we use aside from
265       TCG_CT_CONST is TCG_CT_CONST_S16.  */
266    if (type == TCG_TYPE_I32) {
267        val = (int32_t)val;
268    }
269
270    if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
271        return 1;
272    } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
273        return 1;
274    } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
275        return 1;
276    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
277        return 1;
278    } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
279        return 1;
280    } else if ((ct & TCG_CT_CONST_WSZ)
281               && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
282        return 1;
283    }
284    return 0;
285}
286
287#define OPCD(opc) ((opc)<<26)
288#define XO19(opc) (OPCD(19)|((opc)<<1))
289#define MD30(opc) (OPCD(30)|((opc)<<2))
290#define MDS30(opc) (OPCD(30)|((opc)<<1))
291#define XO31(opc) (OPCD(31)|((opc)<<1))
292#define XO58(opc) (OPCD(58)|(opc))
293#define XO62(opc) (OPCD(62)|(opc))
294#define VX4(opc)  (OPCD(4)|(opc))
295
296#define B      OPCD( 18)
297#define BC     OPCD( 16)
298#define LBZ    OPCD( 34)
299#define LHZ    OPCD( 40)
300#define LHA    OPCD( 42)
301#define LWZ    OPCD( 32)
302#define LWZUX  XO31( 55)
303#define STB    OPCD( 38)
304#define STH    OPCD( 44)
305#define STW    OPCD( 36)
306
307#define STD    XO62(  0)
308#define STDU   XO62(  1)
309#define STDX   XO31(149)
310
311#define LD     XO58(  0)
312#define LDX    XO31( 21)
313#define LDU    XO58(  1)
314#define LDUX   XO31( 53)
315#define LWA    XO58(  2)
316#define LWAX   XO31(341)
317
318#define ADDIC  OPCD( 12)
319#define ADDI   OPCD( 14)
320#define ADDIS  OPCD( 15)
321#define ORI    OPCD( 24)
322#define ORIS   OPCD( 25)
323#define XORI   OPCD( 26)
324#define XORIS  OPCD( 27)
325#define ANDI   OPCD( 28)
326#define ANDIS  OPCD( 29)
327#define MULLI  OPCD(  7)
328#define CMPLI  OPCD( 10)
329#define CMPI   OPCD( 11)
330#define SUBFIC OPCD( 8)
331
332#define LWZU   OPCD( 33)
333#define STWU   OPCD( 37)
334
335#define RLWIMI OPCD( 20)
336#define RLWINM OPCD( 21)
337#define RLWNM  OPCD( 23)
338
339#define RLDICL MD30(  0)
340#define RLDICR MD30(  1)
341#define RLDIMI MD30(  3)
342#define RLDCL  MDS30( 8)
343
344#define BCLR   XO19( 16)
345#define BCCTR  XO19(528)
346#define CRAND  XO19(257)
347#define CRANDC XO19(129)
348#define CRNAND XO19(225)
349#define CROR   XO19(449)
350#define CRNOR  XO19( 33)
351
352#define EXTSB  XO31(954)
353#define EXTSH  XO31(922)
354#define EXTSW  XO31(986)
355#define ADD    XO31(266)
356#define ADDE   XO31(138)
357#define ADDME  XO31(234)
358#define ADDZE  XO31(202)
359#define ADDC   XO31( 10)
360#define AND    XO31( 28)
361#define SUBF   XO31( 40)
362#define SUBFC  XO31(  8)
363#define SUBFE  XO31(136)
364#define SUBFME XO31(232)
365#define SUBFZE XO31(200)
366#define OR     XO31(444)
367#define XOR    XO31(316)
368#define MULLW  XO31(235)
369#define MULHW  XO31( 75)
370#define MULHWU XO31( 11)
371#define DIVW   XO31(491)
372#define DIVWU  XO31(459)
373#define MODSW  XO31(779)
374#define MODUW  XO31(267)
375#define CMP    XO31(  0)
376#define CMPL   XO31( 32)
377#define LHBRX  XO31(790)
378#define LWBRX  XO31(534)
379#define LDBRX  XO31(532)
380#define STHBRX XO31(918)
381#define STWBRX XO31(662)
382#define STDBRX XO31(660)
383#define MFSPR  XO31(339)
384#define MTSPR  XO31(467)
385#define SRAWI  XO31(824)
386#define NEG    XO31(104)
387#define MFCR   XO31( 19)
388#define MFOCRF (MFCR | (1u << 20))
389#define NOR    XO31(124)
390#define CNTLZW XO31( 26)
391#define CNTLZD XO31( 58)
392#define CNTTZW XO31(538)
393#define CNTTZD XO31(570)
394#define CNTPOPW XO31(378)
395#define CNTPOPD XO31(506)
396#define ANDC   XO31( 60)
397#define ORC    XO31(412)
398#define EQV    XO31(284)
399#define NAND   XO31(476)
400#define ISEL   XO31( 15)
401
402#define MULLD  XO31(233)
403#define MULHD  XO31( 73)
404#define MULHDU XO31(  9)
405#define DIVD   XO31(489)
406#define DIVDU  XO31(457)
407#define MODSD  XO31(777)
408#define MODUD  XO31(265)
409
410#define LBZX   XO31( 87)
411#define LHZX   XO31(279)
412#define LHAX   XO31(343)
413#define LWZX   XO31( 23)
414#define STBX   XO31(215)
415#define STHX   XO31(407)
416#define STWX   XO31(151)
417
418#define EIEIO  XO31(854)
419#define HWSYNC XO31(598)
420#define LWSYNC (HWSYNC | (1u << 21))
421
422#define SPR(a, b) ((((a)<<5)|(b))<<11)
423#define LR     SPR(8, 0)
424#define CTR    SPR(9, 0)
425
426#define SLW    XO31( 24)
427#define SRW    XO31(536)
428#define SRAW   XO31(792)
429
430#define SLD    XO31( 27)
431#define SRD    XO31(539)
432#define SRAD   XO31(794)
433#define SRADI  XO31(413<<1)
434
435#define BRH    XO31(219)
436#define BRW    XO31(155)
437#define BRD    XO31(187)
438
439#define TW     XO31( 4)
440#define TRAP   (TW | TO(31))
441
442#define NOP    ORI  /* ori 0,0,0 */
443
444#define LVX        XO31(103)
445#define LVEBX      XO31(7)
446#define LVEHX      XO31(39)
447#define LVEWX      XO31(71)
448#define LXSDX      (XO31(588) | 1)  /* v2.06, force tx=1 */
449#define LXVDSX     (XO31(332) | 1)  /* v2.06, force tx=1 */
450#define LXSIWZX    (XO31(12) | 1)   /* v2.07, force tx=1 */
451#define LXV        (OPCD(61) | 8 | 1)  /* v3.00, force tx=1 */
452#define LXSD       (OPCD(57) | 2)   /* v3.00 */
453#define LXVWSX     (XO31(364) | 1)  /* v3.00, force tx=1 */
454
455#define STVX       XO31(231)
456#define STVEWX     XO31(199)
457#define STXSDX     (XO31(716) | 1)  /* v2.06, force sx=1 */
458#define STXSIWX    (XO31(140) | 1)  /* v2.07, force sx=1 */
459#define STXV       (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
460#define STXSD      (OPCD(61) | 2)   /* v3.00 */
461
462#define VADDSBS    VX4(768)
463#define VADDUBS    VX4(512)
464#define VADDUBM    VX4(0)
465#define VADDSHS    VX4(832)
466#define VADDUHS    VX4(576)
467#define VADDUHM    VX4(64)
468#define VADDSWS    VX4(896)
469#define VADDUWS    VX4(640)
470#define VADDUWM    VX4(128)
471#define VADDUDM    VX4(192)       /* v2.07 */
472
473#define VSUBSBS    VX4(1792)
474#define VSUBUBS    VX4(1536)
475#define VSUBUBM    VX4(1024)
476#define VSUBSHS    VX4(1856)
477#define VSUBUHS    VX4(1600)
478#define VSUBUHM    VX4(1088)
479#define VSUBSWS    VX4(1920)
480#define VSUBUWS    VX4(1664)
481#define VSUBUWM    VX4(1152)
482#define VSUBUDM    VX4(1216)      /* v2.07 */
483
484#define VNEGW      (VX4(1538) | (6 << 16))  /* v3.00 */
485#define VNEGD      (VX4(1538) | (7 << 16))  /* v3.00 */
486
487#define VMAXSB     VX4(258)
488#define VMAXSH     VX4(322)
489#define VMAXSW     VX4(386)
490#define VMAXSD     VX4(450)       /* v2.07 */
491#define VMAXUB     VX4(2)
492#define VMAXUH     VX4(66)
493#define VMAXUW     VX4(130)
494#define VMAXUD     VX4(194)       /* v2.07 */
495#define VMINSB     VX4(770)
496#define VMINSH     VX4(834)
497#define VMINSW     VX4(898)
498#define VMINSD     VX4(962)       /* v2.07 */
499#define VMINUB     VX4(514)
500#define VMINUH     VX4(578)
501#define VMINUW     VX4(642)
502#define VMINUD     VX4(706)       /* v2.07 */
503
504#define VCMPEQUB   VX4(6)
505#define VCMPEQUH   VX4(70)
506#define VCMPEQUW   VX4(134)
507#define VCMPEQUD   VX4(199)       /* v2.07 */
508#define VCMPGTSB   VX4(774)
509#define VCMPGTSH   VX4(838)
510#define VCMPGTSW   VX4(902)
511#define VCMPGTSD   VX4(967)       /* v2.07 */
512#define VCMPGTUB   VX4(518)
513#define VCMPGTUH   VX4(582)
514#define VCMPGTUW   VX4(646)
515#define VCMPGTUD   VX4(711)       /* v2.07 */
516#define VCMPNEB    VX4(7)         /* v3.00 */
517#define VCMPNEH    VX4(71)        /* v3.00 */
518#define VCMPNEW    VX4(135)       /* v3.00 */
519
520#define VSLB       VX4(260)
521#define VSLH       VX4(324)
522#define VSLW       VX4(388)
523#define VSLD       VX4(1476)      /* v2.07 */
524#define VSRB       VX4(516)
525#define VSRH       VX4(580)
526#define VSRW       VX4(644)
527#define VSRD       VX4(1732)      /* v2.07 */
528#define VSRAB      VX4(772)
529#define VSRAH      VX4(836)
530#define VSRAW      VX4(900)
531#define VSRAD      VX4(964)       /* v2.07 */
532#define VRLB       VX4(4)
533#define VRLH       VX4(68)
534#define VRLW       VX4(132)
535#define VRLD       VX4(196)       /* v2.07 */
536
537#define VMULEUB    VX4(520)
538#define VMULEUH    VX4(584)
539#define VMULEUW    VX4(648)       /* v2.07 */
540#define VMULOUB    VX4(8)
541#define VMULOUH    VX4(72)
542#define VMULOUW    VX4(136)       /* v2.07 */
543#define VMULUWM    VX4(137)       /* v2.07 */
544#define VMULLD     VX4(457)       /* v3.10 */
545#define VMSUMUHM   VX4(38)
546
547#define VMRGHB     VX4(12)
548#define VMRGHH     VX4(76)
549#define VMRGHW     VX4(140)
550#define VMRGLB     VX4(268)
551#define VMRGLH     VX4(332)
552#define VMRGLW     VX4(396)
553
554#define VPKUHUM    VX4(14)
555#define VPKUWUM    VX4(78)
556
557#define VAND       VX4(1028)
558#define VANDC      VX4(1092)
559#define VNOR       VX4(1284)
560#define VOR        VX4(1156)
561#define VXOR       VX4(1220)
562#define VEQV       VX4(1668)      /* v2.07 */
563#define VNAND      VX4(1412)      /* v2.07 */
564#define VORC       VX4(1348)      /* v2.07 */
565
566#define VSPLTB     VX4(524)
567#define VSPLTH     VX4(588)
568#define VSPLTW     VX4(652)
569#define VSPLTISB   VX4(780)
570#define VSPLTISH   VX4(844)
571#define VSPLTISW   VX4(908)
572
573#define VSLDOI     VX4(44)
574
575#define XXPERMDI   (OPCD(60) | (10 << 3) | 7)  /* v2.06, force ax=bx=tx=1 */
576#define XXSEL      (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
577#define XXSPLTIB   (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
578
579#define MFVSRD     (XO31(51) | 1)   /* v2.07, force sx=1 */
580#define MFVSRWZ    (XO31(115) | 1)  /* v2.07, force sx=1 */
581#define MTVSRD     (XO31(179) | 1)  /* v2.07, force tx=1 */
582#define MTVSRWZ    (XO31(243) | 1)  /* v2.07, force tx=1 */
583#define MTVSRDD    (XO31(435) | 1)  /* v3.00, force tx=1 */
584#define MTVSRWS    (XO31(403) | 1)  /* v3.00, force tx=1 */
585
586#define RT(r) ((r)<<21)
587#define RS(r) ((r)<<21)
588#define RA(r) ((r)<<16)
589#define RB(r) ((r)<<11)
590#define TO(t) ((t)<<21)
591#define SH(s) ((s)<<11)
592#define MB(b) ((b)<<6)
593#define ME(e) ((e)<<1)
594#define BO(o) ((o)<<21)
595#define MB64(b) ((b)<<5)
596#define FXM(b) (1 << (19 - (b)))
597
598#define VRT(r)  (((r) & 31) << 21)
599#define VRA(r)  (((r) & 31) << 16)
600#define VRB(r)  (((r) & 31) << 11)
601#define VRC(r)  (((r) & 31) <<  6)
602
603#define LK    1
604
605#define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
606#define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
607#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
608#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
609
610#define BF(n)    ((n)<<23)
611#define BI(n, c) (((c)+((n)*4))<<16)
612#define BT(n, c) (((c)+((n)*4))<<21)
613#define BA(n, c) (((c)+((n)*4))<<16)
614#define BB(n, c) (((c)+((n)*4))<<11)
615#define BC_(n, c) (((c)+((n)*4))<<6)
616
617#define BO_COND_TRUE  BO(12)
618#define BO_COND_FALSE BO( 4)
619#define BO_ALWAYS     BO(20)
620
621enum {
622    CR_LT,
623    CR_GT,
624    CR_EQ,
625    CR_SO
626};
627
628static const uint32_t tcg_to_bc[] = {
629    [TCG_COND_EQ]  = BC | BI(7, CR_EQ) | BO_COND_TRUE,
630    [TCG_COND_NE]  = BC | BI(7, CR_EQ) | BO_COND_FALSE,
631    [TCG_COND_LT]  = BC | BI(7, CR_LT) | BO_COND_TRUE,
632    [TCG_COND_GE]  = BC | BI(7, CR_LT) | BO_COND_FALSE,
633    [TCG_COND_LE]  = BC | BI(7, CR_GT) | BO_COND_FALSE,
634    [TCG_COND_GT]  = BC | BI(7, CR_GT) | BO_COND_TRUE,
635    [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
636    [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
637    [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
638    [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
639};
640
641/* The low bit here is set if the RA and RB fields must be inverted.  */
642static const uint32_t tcg_to_isel[] = {
643    [TCG_COND_EQ]  = ISEL | BC_(7, CR_EQ),
644    [TCG_COND_NE]  = ISEL | BC_(7, CR_EQ) | 1,
645    [TCG_COND_LT]  = ISEL | BC_(7, CR_LT),
646    [TCG_COND_GE]  = ISEL | BC_(7, CR_LT) | 1,
647    [TCG_COND_LE]  = ISEL | BC_(7, CR_GT) | 1,
648    [TCG_COND_GT]  = ISEL | BC_(7, CR_GT),
649    [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
650    [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
651    [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
652    [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
653};
654
655static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
656                        intptr_t value, intptr_t addend)
657{
658    const tcg_insn_unit *target;
659    int16_t lo;
660    int32_t hi;
661
662    value += addend;
663    target = (const tcg_insn_unit *)value;
664
665    switch (type) {
666    case R_PPC_REL14:
667        return reloc_pc14(code_ptr, target);
668    case R_PPC_REL24:
669        return reloc_pc24(code_ptr, target);
670    case R_PPC_ADDR16:
671        /*
672         * We are (slightly) abusing this relocation type.  In particular,
673         * assert that the low 2 bits are zero, and do not modify them.
674         * That way we can use this with LD et al that have opcode bits
675         * in the low 2 bits of the insn.
676         */
677        if ((value & 3) || value != (int16_t)value) {
678            return false;
679        }
680        *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
681        break;
682    case R_PPC_ADDR32:
683        /*
684         * We are abusing this relocation type.  Again, this points to
685         * a pair of insns, lis + load.  This is an absolute address
686         * relocation for PPC32 so the lis cannot be removed.
687         */
688        lo = value;
689        hi = value - lo;
690        if (hi + lo != value) {
691            return false;
692        }
693        code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
694        code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
695        break;
696    default:
697        g_assert_not_reached();
698    }
699    return true;
700}
701
702static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
703                             TCGReg base, tcg_target_long offset);
704
705static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
706{
707    if (ret == arg) {
708        return true;
709    }
710    switch (type) {
711    case TCG_TYPE_I64:
712        tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
713        /* fallthru */
714    case TCG_TYPE_I32:
715        if (ret < TCG_REG_V0) {
716            if (arg < TCG_REG_V0) {
717                tcg_out32(s, OR | SAB(arg, ret, arg));
718                break;
719            } else if (have_isa_2_07) {
720                tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
721                          | VRT(arg) | RA(ret));
722                break;
723            } else {
724                /* Altivec does not support vector->integer moves.  */
725                return false;
726            }
727        } else if (arg < TCG_REG_V0) {
728            if (have_isa_2_07) {
729                tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
730                          | VRT(ret) | RA(arg));
731                break;
732            } else {
733                /* Altivec does not support integer->vector moves.  */
734                return false;
735            }
736        }
737        /* fallthru */
738    case TCG_TYPE_V64:
739    case TCG_TYPE_V128:
740        tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
741        tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
742        break;
743    default:
744        g_assert_not_reached();
745    }
746    return true;
747}
748
749static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
750                               int sh, int mb)
751{
752    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
753    sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
754    mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
755    tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
756}
757
758static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
759                               int sh, int mb, int me)
760{
761    tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
762}
763
764static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
765{
766    tcg_out32(s, EXTSB | RA(dst) | RS(src));
767}
768
769static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
770{
771    tcg_out32(s, ANDI | SAI(src, dst, 0xff));
772}
773
774static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
775{
776    tcg_out32(s, EXTSH | RA(dst) | RS(src));
777}
778
779static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
780{
781    tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
782}
783
784static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
785{
786    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
787    tcg_out32(s, EXTSW | RA(dst) | RS(src));
788}
789
790static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
791{
792    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
793    tcg_out_rld(s, RLDICL, dst, src, 0, 32);
794}
795
796static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
797{
798    tcg_out_ext32s(s, dst, src);
799}
800
801static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
802{
803    tcg_out_ext32u(s, dst, src);
804}
805
806static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
807{
808    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
809    tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
810}
811
812static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
813{
814    tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
815}
816
817static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
818{
819    tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
820}
821
822static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
823{
824    /* Limit immediate shift count lest we create an illegal insn.  */
825    tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
826}
827
828static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
829{
830    tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
831}
832
833static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
834{
835    tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
836}
837
838static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
839{
840    tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
841}
842
843static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
844{
845    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
846
847    if (have_isa_3_10) {
848        tcg_out32(s, BRH | RA(dst) | RS(src));
849        if (flags & TCG_BSWAP_OS) {
850            tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
851        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
852            tcg_out_ext16u(s, dst, dst);
853        }
854        return;
855    }
856
857    /*
858     * In the following,
859     *   dep(a, b, m) -> (a & ~m) | (b & m)
860     *
861     * Begin with:                              src = xxxxabcd
862     */
863    /* tmp = rol32(src, 24) & 0x000000ff            = 0000000c */
864    tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
865    /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00)    = 000000dc */
866    tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
867
868    if (flags & TCG_BSWAP_OS) {
869        tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
870    } else {
871        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
872    }
873}
874
875static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
876{
877    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
878
879    if (have_isa_3_10) {
880        tcg_out32(s, BRW | RA(dst) | RS(src));
881        if (flags & TCG_BSWAP_OS) {
882            tcg_out_ext32s(s, dst, dst);
883        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
884            tcg_out_ext32u(s, dst, dst);
885        }
886        return;
887    }
888
889    /*
890     * Stolen from gcc's builtin_bswap32.
891     * In the following,
892     *   dep(a, b, m) -> (a & ~m) | (b & m)
893     *
894     * Begin with:                              src = xxxxabcd
895     */
896    /* tmp = rol32(src, 8) & 0xffffffff             = 0000bcda */
897    tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
898    /* tmp = dep(tmp, rol32(src, 24), 0xff000000)   = 0000dcda */
899    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
900    /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00)   = 0000dcba */
901    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
902
903    if (flags & TCG_BSWAP_OS) {
904        tcg_out_ext32s(s, dst, tmp);
905    } else {
906        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
907    }
908}
909
910static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
911{
912    TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
913    TCGReg t1 = dst == src ? dst : TCG_REG_R0;
914
915    if (have_isa_3_10) {
916        tcg_out32(s, BRD | RA(dst) | RS(src));
917        return;
918    }
919
920    /*
921     * In the following,
922     *   dep(a, b, m) -> (a & ~m) | (b & m)
923     *
924     * Begin with:                              src = abcdefgh
925     */
926    /* t0 = rol32(src, 8) & 0xffffffff              = 0000fghe */
927    tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
928    /* t0 = dep(t0, rol32(src, 24), 0xff000000)     = 0000hghe */
929    tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
930    /* t0 = dep(t0, rol32(src, 24), 0x0000ff00)     = 0000hgfe */
931    tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
932
933    /* t0 = rol64(t0, 32)                           = hgfe0000 */
934    tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
935    /* t1 = rol64(src, 32)                          = efghabcd */
936    tcg_out_rld(s, RLDICL, t1, src, 32, 0);
937
938    /* t0 = dep(t0, rol32(t1, 24), 0xffffffff)      = hgfebcda */
939    tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
940    /* t0 = dep(t0, rol32(t1, 24), 0xff000000)      = hgfedcda */
941    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
942    /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00)      = hgfedcba */
943    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
944
945    tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
946}
947
948/* Emit a move into ret of arg, if it can be done in one insn.  */
949static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
950{
951    if (arg == (int16_t)arg) {
952        tcg_out32(s, ADDI | TAI(ret, 0, arg));
953        return true;
954    }
955    if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
956        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
957        return true;
958    }
959    return false;
960}
961
962static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
963                             tcg_target_long arg, bool in_prologue)
964{
965    intptr_t tb_diff;
966    tcg_target_long tmp;
967    int shift;
968
969    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
970
971    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
972        arg = (int32_t)arg;
973    }
974
975    /* Load 16-bit immediates with one insn.  */
976    if (tcg_out_movi_one(s, ret, arg)) {
977        return;
978    }
979
980    /* Load addresses within the TB with one insn.  */
981    tb_diff = tcg_tbrel_diff(s, (void *)arg);
982    if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
983        tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
984        return;
985    }
986
987    /* Load 32-bit immediates with two insns.  Note that we've already
988       eliminated bare ADDIS, so we know both insns are required.  */
989    if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
990        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
991        tcg_out32(s, ORI | SAI(ret, ret, arg));
992        return;
993    }
994    if (arg == (uint32_t)arg && !(arg & 0x8000)) {
995        tcg_out32(s, ADDI | TAI(ret, 0, arg));
996        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
997        return;
998    }
999
1000    /* Load masked 16-bit value.  */
1001    if (arg > 0 && (arg & 0x8000)) {
1002        tmp = arg | 0x7fff;
1003        if ((tmp & (tmp + 1)) == 0) {
1004            int mb = clz64(tmp + 1) + 1;
1005            tcg_out32(s, ADDI | TAI(ret, 0, arg));
1006            tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1007            return;
1008        }
1009    }
1010
1011    /* Load common masks with 2 insns.  */
1012    shift = ctz64(arg);
1013    tmp = arg >> shift;
1014    if (tmp == (int16_t)tmp) {
1015        tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1016        tcg_out_shli64(s, ret, ret, shift);
1017        return;
1018    }
1019    shift = clz64(arg);
1020    if (tcg_out_movi_one(s, ret, arg << shift)) {
1021        tcg_out_shri64(s, ret, ret, shift);
1022        return;
1023    }
1024
1025    /* Load addresses within 2GB of TB with 2 (or rarely 3) insns.  */
1026    if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1027        tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1028        return;
1029    }
1030
1031    /* Use the constant pool, if possible.  */
1032    if (!in_prologue && USE_REG_TB) {
1033        new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1034                       tcg_tbrel_diff(s, NULL));
1035        tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1036        return;
1037    }
1038
1039    tmp = arg >> 31 >> 1;
1040    tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1041    if (tmp) {
1042        tcg_out_shli64(s, ret, ret, 32);
1043    }
1044    if (arg & 0xffff0000) {
1045        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1046    }
1047    if (arg & 0xffff) {
1048        tcg_out32(s, ORI | SAI(ret, ret, arg));
1049    }
1050}
1051
1052static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1053                             TCGReg ret, int64_t val)
1054{
1055    uint32_t load_insn;
1056    int rel, low;
1057    intptr_t add;
1058
1059    switch (vece) {
1060    case MO_8:
1061        low = (int8_t)val;
1062        if (low >= -16 && low < 16) {
1063            tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1064            return;
1065        }
1066        if (have_isa_3_00) {
1067            tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1068            return;
1069        }
1070        break;
1071
1072    case MO_16:
1073        low = (int16_t)val;
1074        if (low >= -16 && low < 16) {
1075            tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1076            return;
1077        }
1078        break;
1079
1080    case MO_32:
1081        low = (int32_t)val;
1082        if (low >= -16 && low < 16) {
1083            tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1084            return;
1085        }
1086        break;
1087    }
1088
1089    /*
1090     * Otherwise we must load the value from the constant pool.
1091     */
1092    if (USE_REG_TB) {
1093        rel = R_PPC_ADDR16;
1094        add = tcg_tbrel_diff(s, NULL);
1095    } else {
1096        rel = R_PPC_ADDR32;
1097        add = 0;
1098    }
1099
1100    if (have_vsx) {
1101        load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1102        load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1103        if (TCG_TARGET_REG_BITS == 64) {
1104            new_pool_label(s, val, rel, s->code_ptr, add);
1105        } else {
1106            new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1107        }
1108    } else {
1109        load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1110        if (TCG_TARGET_REG_BITS == 64) {
1111            new_pool_l2(s, rel, s->code_ptr, add, val, val);
1112        } else {
1113            new_pool_l4(s, rel, s->code_ptr, add,
1114                        val >> 32, val, val >> 32, val);
1115        }
1116    }
1117
1118    if (USE_REG_TB) {
1119        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1120        load_insn |= RA(TCG_REG_TB);
1121    } else {
1122        tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1123        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1124    }
1125    tcg_out32(s, load_insn);
1126}
1127
1128static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1129                         tcg_target_long arg)
1130{
1131    switch (type) {
1132    case TCG_TYPE_I32:
1133    case TCG_TYPE_I64:
1134        tcg_debug_assert(ret < TCG_REG_V0);
1135        tcg_out_movi_int(s, type, ret, arg, false);
1136        break;
1137
1138    default:
1139        g_assert_not_reached();
1140    }
1141}
1142
1143static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1144{
1145    return false;
1146}
1147
1148static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1149                             tcg_target_long imm)
1150{
1151    /* This function is only used for passing structs by reference. */
1152    g_assert_not_reached();
1153}
1154
1155static bool mask_operand(uint32_t c, int *mb, int *me)
1156{
1157    uint32_t lsb, test;
1158
1159    /* Accept a bit pattern like:
1160           0....01....1
1161           1....10....0
1162           0..01..10..0
1163       Keep track of the transitions.  */
1164    if (c == 0 || c == -1) {
1165        return false;
1166    }
1167    test = c;
1168    lsb = test & -test;
1169    test += lsb;
1170    if (test & (test - 1)) {
1171        return false;
1172    }
1173
1174    *me = clz32(lsb);
1175    *mb = test ? clz32(test & -test) + 1 : 0;
1176    return true;
1177}
1178
1179static bool mask64_operand(uint64_t c, int *mb, int *me)
1180{
1181    uint64_t lsb;
1182
1183    if (c == 0) {
1184        return false;
1185    }
1186
1187    lsb = c & -c;
1188    /* Accept 1..10..0.  */
1189    if (c == -lsb) {
1190        *mb = 0;
1191        *me = clz64(lsb);
1192        return true;
1193    }
1194    /* Accept 0..01..1.  */
1195    if (lsb == 1 && (c & (c + 1)) == 0) {
1196        *mb = clz64(c + 1) + 1;
1197        *me = 63;
1198        return true;
1199    }
1200    return false;
1201}
1202
1203static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1204{
1205    int mb, me;
1206
1207    if (mask_operand(c, &mb, &me)) {
1208        tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1209    } else if ((c & 0xffff) == c) {
1210        tcg_out32(s, ANDI | SAI(src, dst, c));
1211        return;
1212    } else if ((c & 0xffff0000) == c) {
1213        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1214        return;
1215    } else {
1216        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1217        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1218    }
1219}
1220
1221static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1222{
1223    int mb, me;
1224
1225    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1226    if (mask64_operand(c, &mb, &me)) {
1227        if (mb == 0) {
1228            tcg_out_rld(s, RLDICR, dst, src, 0, me);
1229        } else {
1230            tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1231        }
1232    } else if ((c & 0xffff) == c) {
1233        tcg_out32(s, ANDI | SAI(src, dst, c));
1234        return;
1235    } else if ((c & 0xffff0000) == c) {
1236        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1237        return;
1238    } else {
1239        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1240        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1241    }
1242}
1243
1244static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1245                           int op_lo, int op_hi)
1246{
1247    if (c >> 16) {
1248        tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1249        src = dst;
1250    }
1251    if (c & 0xffff) {
1252        tcg_out32(s, op_lo | SAI(src, dst, c));
1253        src = dst;
1254    }
1255}
1256
1257static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1258{
1259    tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1260}
1261
1262static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1263{
1264    tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1265}
1266
1267static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1268{
1269    ptrdiff_t disp = tcg_pcrel_diff(s, target);
1270    if (in_range_b(disp)) {
1271        tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1272    } else {
1273        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1274        tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1275        tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1276    }
1277}
1278
1279static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1280                             TCGReg base, tcg_target_long offset)
1281{
1282    tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1283    bool is_int_store = false;
1284    TCGReg rs = TCG_REG_TMP1;
1285
1286    switch (opi) {
1287    case LD: case LWA:
1288        align = 3;
1289        /* FALLTHRU */
1290    default:
1291        if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1292            rs = rt;
1293            break;
1294        }
1295        break;
1296    case LXSD:
1297    case STXSD:
1298        align = 3;
1299        break;
1300    case LXV:
1301    case STXV:
1302        align = 15;
1303        break;
1304    case STD:
1305        align = 3;
1306        /* FALLTHRU */
1307    case STB: case STH: case STW:
1308        is_int_store = true;
1309        break;
1310    }
1311
1312    /* For unaligned, or very large offsets, use the indexed form.  */
1313    if (offset & align || offset != (int32_t)offset || opi == 0) {
1314        if (rs == base) {
1315            rs = TCG_REG_R0;
1316        }
1317        tcg_debug_assert(!is_int_store || rs != rt);
1318        tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1319        tcg_out32(s, opx | TAB(rt & 31, base, rs));
1320        return;
1321    }
1322
1323    l0 = (int16_t)offset;
1324    offset = (offset - l0) >> 16;
1325    l1 = (int16_t)offset;
1326
1327    if (l1 < 0 && orig >= 0) {
1328        extra = 0x4000;
1329        l1 = (int16_t)(offset - 0x4000);
1330    }
1331    if (l1) {
1332        tcg_out32(s, ADDIS | TAI(rs, base, l1));
1333        base = rs;
1334    }
1335    if (extra) {
1336        tcg_out32(s, ADDIS | TAI(rs, base, extra));
1337        base = rs;
1338    }
1339    if (opi != ADDI || base != rt || l0 != 0) {
1340        tcg_out32(s, opi | TAI(rt & 31, base, l0));
1341    }
1342}
1343
1344static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1345                           TCGReg va, TCGReg vb, int shb)
1346{
1347    tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1348}
1349
1350static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1351                       TCGReg base, intptr_t offset)
1352{
1353    int shift;
1354
1355    switch (type) {
1356    case TCG_TYPE_I32:
1357        if (ret < TCG_REG_V0) {
1358            tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1359            break;
1360        }
1361        if (have_isa_2_07 && have_vsx) {
1362            tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1363            break;
1364        }
1365        tcg_debug_assert((offset & 3) == 0);
1366        tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1367        shift = (offset - 4) & 0xc;
1368        if (shift) {
1369            tcg_out_vsldoi(s, ret, ret, ret, shift);
1370        }
1371        break;
1372    case TCG_TYPE_I64:
1373        if (ret < TCG_REG_V0) {
1374            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1375            tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1376            break;
1377        }
1378        /* fallthru */
1379    case TCG_TYPE_V64:
1380        tcg_debug_assert(ret >= TCG_REG_V0);
1381        if (have_vsx) {
1382            tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1383                             ret, base, offset);
1384            break;
1385        }
1386        tcg_debug_assert((offset & 7) == 0);
1387        tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1388        if (offset & 8) {
1389            tcg_out_vsldoi(s, ret, ret, ret, 8);
1390        }
1391        break;
1392    case TCG_TYPE_V128:
1393        tcg_debug_assert(ret >= TCG_REG_V0);
1394        tcg_debug_assert((offset & 15) == 0);
1395        tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1396                         LVX, ret, base, offset);
1397        break;
1398    default:
1399        g_assert_not_reached();
1400    }
1401}
1402
1403static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1404                              TCGReg base, intptr_t offset)
1405{
1406    int shift;
1407
1408    switch (type) {
1409    case TCG_TYPE_I32:
1410        if (arg < TCG_REG_V0) {
1411            tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1412            break;
1413        }
1414        if (have_isa_2_07 && have_vsx) {
1415            tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1416            break;
1417        }
1418        assert((offset & 3) == 0);
1419        tcg_debug_assert((offset & 3) == 0);
1420        shift = (offset - 4) & 0xc;
1421        if (shift) {
1422            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1423            arg = TCG_VEC_TMP1;
1424        }
1425        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1426        break;
1427    case TCG_TYPE_I64:
1428        if (arg < TCG_REG_V0) {
1429            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1430            tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1431            break;
1432        }
1433        /* fallthru */
1434    case TCG_TYPE_V64:
1435        tcg_debug_assert(arg >= TCG_REG_V0);
1436        if (have_vsx) {
1437            tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1438                             STXSDX, arg, base, offset);
1439            break;
1440        }
1441        tcg_debug_assert((offset & 7) == 0);
1442        if (offset & 8) {
1443            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1444            arg = TCG_VEC_TMP1;
1445        }
1446        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1447        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1448        break;
1449    case TCG_TYPE_V128:
1450        tcg_debug_assert(arg >= TCG_REG_V0);
1451        tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1452                         STVX, arg, base, offset);
1453        break;
1454    default:
1455        g_assert_not_reached();
1456    }
1457}
1458
1459static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1460                               TCGReg base, intptr_t ofs)
1461{
1462    return false;
1463}
1464
1465static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1466                        int const_arg2, int cr, TCGType type)
1467{
1468    int imm;
1469    uint32_t op;
1470
1471    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1472
1473    /* Simplify the comparisons below wrt CMPI.  */
1474    if (type == TCG_TYPE_I32) {
1475        arg2 = (int32_t)arg2;
1476    }
1477
1478    switch (cond) {
1479    case TCG_COND_EQ:
1480    case TCG_COND_NE:
1481        if (const_arg2) {
1482            if ((int16_t) arg2 == arg2) {
1483                op = CMPI;
1484                imm = 1;
1485                break;
1486            } else if ((uint16_t) arg2 == arg2) {
1487                op = CMPLI;
1488                imm = 1;
1489                break;
1490            }
1491        }
1492        op = CMPL;
1493        imm = 0;
1494        break;
1495
1496    case TCG_COND_LT:
1497    case TCG_COND_GE:
1498    case TCG_COND_LE:
1499    case TCG_COND_GT:
1500        if (const_arg2) {
1501            if ((int16_t) arg2 == arg2) {
1502                op = CMPI;
1503                imm = 1;
1504                break;
1505            }
1506        }
1507        op = CMP;
1508        imm = 0;
1509        break;
1510
1511    case TCG_COND_LTU:
1512    case TCG_COND_GEU:
1513    case TCG_COND_LEU:
1514    case TCG_COND_GTU:
1515        if (const_arg2) {
1516            if ((uint16_t) arg2 == arg2) {
1517                op = CMPLI;
1518                imm = 1;
1519                break;
1520            }
1521        }
1522        op = CMPL;
1523        imm = 0;
1524        break;
1525
1526    default:
1527        g_assert_not_reached();
1528    }
1529    op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1530
1531    if (imm) {
1532        tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1533    } else {
1534        if (const_arg2) {
1535            tcg_out_movi(s, type, TCG_REG_R0, arg2);
1536            arg2 = TCG_REG_R0;
1537        }
1538        tcg_out32(s, op | RA(arg1) | RB(arg2));
1539    }
1540}
1541
1542static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1543                                TCGReg dst, TCGReg src)
1544{
1545    if (type == TCG_TYPE_I32) {
1546        tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1547        tcg_out_shri32(s, dst, dst, 5);
1548    } else {
1549        tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1550        tcg_out_shri64(s, dst, dst, 6);
1551    }
1552}
1553
1554static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1555{
1556    /* X != 0 implies X + -1 generates a carry.  Extra addition
1557       trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.  */
1558    if (dst != src) {
1559        tcg_out32(s, ADDIC | TAI(dst, src, -1));
1560        tcg_out32(s, SUBFE | TAB(dst, dst, src));
1561    } else {
1562        tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1563        tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1564    }
1565}
1566
1567static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1568                                  bool const_arg2)
1569{
1570    if (const_arg2) {
1571        if ((uint32_t)arg2 == arg2) {
1572            tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1573        } else {
1574            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1575            tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1576        }
1577    } else {
1578        tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1579    }
1580    return TCG_REG_R0;
1581}
1582
1583static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1584                            TCGArg arg0, TCGArg arg1, TCGArg arg2,
1585                            int const_arg2)
1586{
1587    int crop, sh;
1588
1589    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1590
1591    /* Ignore high bits of a potential constant arg2.  */
1592    if (type == TCG_TYPE_I32) {
1593        arg2 = (uint32_t)arg2;
1594    }
1595
1596    /* Handle common and trivial cases before handling anything else.  */
1597    if (arg2 == 0) {
1598        switch (cond) {
1599        case TCG_COND_EQ:
1600            tcg_out_setcond_eq0(s, type, arg0, arg1);
1601            return;
1602        case TCG_COND_NE:
1603            if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1604                tcg_out_ext32u(s, TCG_REG_R0, arg1);
1605                arg1 = TCG_REG_R0;
1606            }
1607            tcg_out_setcond_ne0(s, arg0, arg1);
1608            return;
1609        case TCG_COND_GE:
1610            tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1611            arg1 = arg0;
1612            /* FALLTHRU */
1613        case TCG_COND_LT:
1614            /* Extract the sign bit.  */
1615            if (type == TCG_TYPE_I32) {
1616                tcg_out_shri32(s, arg0, arg1, 31);
1617            } else {
1618                tcg_out_shri64(s, arg0, arg1, 63);
1619            }
1620            return;
1621        default:
1622            break;
1623        }
1624    }
1625
1626    /* If we have ISEL, we can implement everything with 3 or 4 insns.
1627       All other cases below are also at least 3 insns, so speed up the
1628       code generator by not considering them and always using ISEL.  */
1629    if (have_isel) {
1630        int isel, tab;
1631
1632        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1633
1634        isel = tcg_to_isel[cond];
1635
1636        tcg_out_movi(s, type, arg0, 1);
1637        if (isel & 1) {
1638            /* arg0 = (bc ? 0 : 1) */
1639            tab = TAB(arg0, 0, arg0);
1640            isel &= ~1;
1641        } else {
1642            /* arg0 = (bc ? 1 : 0) */
1643            tcg_out_movi(s, type, TCG_REG_R0, 0);
1644            tab = TAB(arg0, arg0, TCG_REG_R0);
1645        }
1646        tcg_out32(s, isel | tab);
1647        return;
1648    }
1649
1650    switch (cond) {
1651    case TCG_COND_EQ:
1652        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1653        tcg_out_setcond_eq0(s, type, arg0, arg1);
1654        return;
1655
1656    case TCG_COND_NE:
1657        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1658        /* Discard the high bits only once, rather than both inputs.  */
1659        if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1660            tcg_out_ext32u(s, TCG_REG_R0, arg1);
1661            arg1 = TCG_REG_R0;
1662        }
1663        tcg_out_setcond_ne0(s, arg0, arg1);
1664        return;
1665
1666    case TCG_COND_GT:
1667    case TCG_COND_GTU:
1668        sh = 30;
1669        crop = 0;
1670        goto crtest;
1671
1672    case TCG_COND_LT:
1673    case TCG_COND_LTU:
1674        sh = 29;
1675        crop = 0;
1676        goto crtest;
1677
1678    case TCG_COND_GE:
1679    case TCG_COND_GEU:
1680        sh = 31;
1681        crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1682        goto crtest;
1683
1684    case TCG_COND_LE:
1685    case TCG_COND_LEU:
1686        sh = 31;
1687        crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1688    crtest:
1689        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1690        if (crop) {
1691            tcg_out32(s, crop);
1692        }
1693        tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1694        tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1695        break;
1696
1697    default:
1698        g_assert_not_reached();
1699    }
1700}
1701
1702static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1703{
1704    if (l->has_value) {
1705        bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1706    } else {
1707        tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1708    }
1709    tcg_out32(s, bc);
1710}
1711
1712static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1713                           TCGArg arg1, TCGArg arg2, int const_arg2,
1714                           TCGLabel *l, TCGType type)
1715{
1716    tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1717    tcg_out_bc(s, tcg_to_bc[cond], l);
1718}
1719
1720static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1721                            TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1722                            TCGArg v2, bool const_c2)
1723{
1724    /* If for some reason both inputs are zero, don't produce bad code.  */
1725    if (v1 == 0 && v2 == 0) {
1726        tcg_out_movi(s, type, dest, 0);
1727        return;
1728    }
1729
1730    tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1731
1732    if (have_isel) {
1733        int isel = tcg_to_isel[cond];
1734
1735        /* Swap the V operands if the operation indicates inversion.  */
1736        if (isel & 1) {
1737            int t = v1;
1738            v1 = v2;
1739            v2 = t;
1740            isel &= ~1;
1741        }
1742        /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand.  */
1743        if (v2 == 0) {
1744            tcg_out_movi(s, type, TCG_REG_R0, 0);
1745        }
1746        tcg_out32(s, isel | TAB(dest, v1, v2));
1747    } else {
1748        if (dest == v2) {
1749            cond = tcg_invert_cond(cond);
1750            v2 = v1;
1751        } else if (dest != v1) {
1752            if (v1 == 0) {
1753                tcg_out_movi(s, type, dest, 0);
1754            } else {
1755                tcg_out_mov(s, type, dest, v1);
1756            }
1757        }
1758        /* Branch forward over one insn */
1759        tcg_out32(s, tcg_to_bc[cond] | 8);
1760        if (v2 == 0) {
1761            tcg_out_movi(s, type, dest, 0);
1762        } else {
1763            tcg_out_mov(s, type, dest, v2);
1764        }
1765    }
1766}
1767
1768static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1769                          TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1770{
1771    if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1772        tcg_out32(s, opc | RA(a0) | RS(a1));
1773    } else {
1774        tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1775        /* Note that the only other valid constant for a2 is 0.  */
1776        if (have_isel) {
1777            tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1778            tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1779        } else if (!const_a2 && a0 == a2) {
1780            tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1781            tcg_out32(s, opc | RA(a0) | RS(a1));
1782        } else {
1783            tcg_out32(s, opc | RA(a0) | RS(a1));
1784            tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1785            if (const_a2) {
1786                tcg_out_movi(s, type, a0, 0);
1787            } else {
1788                tcg_out_mov(s, type, a0, a2);
1789            }
1790        }
1791    }
1792}
1793
1794static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1795                         const int *const_args)
1796{
1797    static const struct { uint8_t bit1, bit2; } bits[] = {
1798        [TCG_COND_LT ] = { CR_LT, CR_LT },
1799        [TCG_COND_LE ] = { CR_LT, CR_GT },
1800        [TCG_COND_GT ] = { CR_GT, CR_GT },
1801        [TCG_COND_GE ] = { CR_GT, CR_LT },
1802        [TCG_COND_LTU] = { CR_LT, CR_LT },
1803        [TCG_COND_LEU] = { CR_LT, CR_GT },
1804        [TCG_COND_GTU] = { CR_GT, CR_GT },
1805        [TCG_COND_GEU] = { CR_GT, CR_LT },
1806    };
1807
1808    TCGCond cond = args[4], cond2;
1809    TCGArg al, ah, bl, bh;
1810    int blconst, bhconst;
1811    int op, bit1, bit2;
1812
1813    al = args[0];
1814    ah = args[1];
1815    bl = args[2];
1816    bh = args[3];
1817    blconst = const_args[2];
1818    bhconst = const_args[3];
1819
1820    switch (cond) {
1821    case TCG_COND_EQ:
1822        op = CRAND;
1823        goto do_equality;
1824    case TCG_COND_NE:
1825        op = CRNAND;
1826    do_equality:
1827        tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1828        tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1829        tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1830        break;
1831
1832    case TCG_COND_LT:
1833    case TCG_COND_LE:
1834    case TCG_COND_GT:
1835    case TCG_COND_GE:
1836    case TCG_COND_LTU:
1837    case TCG_COND_LEU:
1838    case TCG_COND_GTU:
1839    case TCG_COND_GEU:
1840        bit1 = bits[cond].bit1;
1841        bit2 = bits[cond].bit2;
1842        op = (bit1 != bit2 ? CRANDC : CRAND);
1843        cond2 = tcg_unsigned_cond(cond);
1844
1845        tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1846        tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1847        tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1848        tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1849        break;
1850
1851    default:
1852        g_assert_not_reached();
1853    }
1854}
1855
1856static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1857                             const int *const_args)
1858{
1859    tcg_out_cmp2(s, args + 1, const_args + 1);
1860    tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1861    tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1862}
1863
1864static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1865                             const int *const_args)
1866{
1867    tcg_out_cmp2(s, args, const_args);
1868    tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1869}
1870
1871static void tcg_out_mb(TCGContext *s, TCGArg a0)
1872{
1873    uint32_t insn;
1874
1875    if (a0 & TCG_MO_ST_LD) {
1876        insn = HWSYNC;
1877    } else {
1878        insn = LWSYNC;
1879    }
1880
1881    tcg_out32(s, insn);
1882}
1883
1884static void tcg_out_call_int(TCGContext *s, int lk,
1885                             const tcg_insn_unit *target)
1886{
1887#ifdef _CALL_AIX
1888    /* Look through the descriptor.  If the branch is in range, and we
1889       don't have to spend too much effort on building the toc.  */
1890    const void *tgt = ((const void * const *)target)[0];
1891    uintptr_t toc = ((const uintptr_t *)target)[1];
1892    intptr_t diff = tcg_pcrel_diff(s, tgt);
1893
1894    if (in_range_b(diff) && toc == (uint32_t)toc) {
1895        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1896        tcg_out_b(s, lk, tgt);
1897    } else {
1898        /* Fold the low bits of the constant into the addresses below.  */
1899        intptr_t arg = (intptr_t)target;
1900        int ofs = (int16_t)arg;
1901
1902        if (ofs + 8 < 0x8000) {
1903            arg -= ofs;
1904        } else {
1905            ofs = 0;
1906        }
1907        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1908        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1909        tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1910        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1911        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1912    }
1913#elif defined(_CALL_ELF) && _CALL_ELF == 2
1914    intptr_t diff;
1915
1916    /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1917       address, which the callee uses to compute its TOC address.  */
1918    /* FIXME: when the branch is in range, we could avoid r12 load if we
1919       knew that the destination uses the same TOC, and what its local
1920       entry point offset is.  */
1921    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1922
1923    diff = tcg_pcrel_diff(s, target);
1924    if (in_range_b(diff)) {
1925        tcg_out_b(s, lk, target);
1926    } else {
1927        tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1928        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1929    }
1930#else
1931    tcg_out_b(s, lk, target);
1932#endif
1933}
1934
1935static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
1936                         const TCGHelperInfo *info)
1937{
1938    tcg_out_call_int(s, LK, target);
1939}
1940
1941static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
1942    [MO_UB] = LBZX,
1943    [MO_UW] = LHZX,
1944    [MO_UL] = LWZX,
1945    [MO_UQ] = LDX,
1946    [MO_SW] = LHAX,
1947    [MO_SL] = LWAX,
1948    [MO_BSWAP | MO_UB] = LBZX,
1949    [MO_BSWAP | MO_UW] = LHBRX,
1950    [MO_BSWAP | MO_UL] = LWBRX,
1951    [MO_BSWAP | MO_UQ] = LDBRX,
1952};
1953
1954static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
1955    [MO_UB] = STBX,
1956    [MO_UW] = STHX,
1957    [MO_UL] = STWX,
1958    [MO_UQ] = STDX,
1959    [MO_BSWAP | MO_UB] = STBX,
1960    [MO_BSWAP | MO_UW] = STHBRX,
1961    [MO_BSWAP | MO_UL] = STWBRX,
1962    [MO_BSWAP | MO_UQ] = STDBRX,
1963};
1964
1965static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1966{
1967    if (arg < 0) {
1968        arg = TCG_REG_TMP1;
1969    }
1970    tcg_out32(s, MFSPR | RT(arg) | LR);
1971    return arg;
1972}
1973
1974/*
1975 * For the purposes of ppc32 sorting 4 input registers into 4 argument
1976 * registers, there is an outside chance we would require 3 temps.
1977 */
1978static const TCGLdstHelperParam ldst_helper_param = {
1979    .ra_gen = ldst_ra_gen,
1980    .ntmp = 3,
1981    .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
1982};
1983
1984static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1985{
1986    MemOp opc = get_memop(lb->oi);
1987
1988    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1989        return false;
1990    }
1991
1992    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1993    tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]);
1994    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1995
1996    tcg_out_b(s, 0, lb->raddr);
1997    return true;
1998}
1999
2000static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2001{
2002    MemOp opc = get_memop(lb->oi);
2003
2004    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2005        return false;
2006    }
2007
2008    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
2009    tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]);
2010
2011    tcg_out_b(s, 0, lb->raddr);
2012    return true;
2013}
2014
2015typedef struct {
2016    TCGReg base;
2017    TCGReg index;
2018} HostAddress;
2019
2020bool tcg_target_has_memory_bswap(MemOp memop)
2021{
2022    return true;
2023}
2024
2025/*
2026 * For softmmu, perform the TLB load and compare.
2027 * For useronly, perform any required alignment tests.
2028 * In both cases, return a TCGLabelQemuLdst structure if the slow path
2029 * is required and fill in @h with the host address for the fast path.
2030 */
2031static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
2032                                           TCGReg addrlo, TCGReg addrhi,
2033                                           MemOpIdx oi, bool is_ld)
2034{
2035    TCGLabelQemuLdst *ldst = NULL;
2036    MemOp opc = get_memop(oi);
2037    unsigned a_bits = get_alignment_bits(opc);
2038
2039#ifdef CONFIG_SOFTMMU
2040    int mem_index = get_mmuidx(oi);
2041    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
2042                        : offsetof(CPUTLBEntry, addr_write);
2043    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
2044    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2045    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2046    unsigned s_bits = opc & MO_SIZE;
2047
2048    ldst = new_ldst_label(s);
2049    ldst->is_ld = is_ld;
2050    ldst->oi = oi;
2051    ldst->addrlo_reg = addrlo;
2052    ldst->addrhi_reg = addrhi;
2053
2054    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
2055    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
2056    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
2057    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
2058    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
2059
2060    /* Extract the page index, shifted into place for tlb index.  */
2061    if (TCG_TARGET_REG_BITS == 32) {
2062        tcg_out_shri32(s, TCG_REG_R0, addrlo,
2063                       TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2064    } else {
2065        tcg_out_shri64(s, TCG_REG_R0, addrlo,
2066                       TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2067    }
2068    tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
2069
2070    /* Load the (low part) TLB comparator into TMP2.  */
2071    if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2072        uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2073                        ? LWZUX : LDUX);
2074        tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
2075    } else {
2076        tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2077        if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2078            tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
2079                       TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN);
2080        } else {
2081            tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
2082        }
2083    }
2084
2085    /*
2086     * Load the TLB addend for use on the fast path.
2087     * Do this asap to minimize any load use delay.
2088     */
2089    if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2090        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2091                   offsetof(CPUTLBEntry, addend));
2092    }
2093
2094    /* Clear the non-page, non-alignment bits from the address in R0. */
2095    if (TCG_TARGET_REG_BITS == 32) {
2096        /*
2097         * We don't support unaligned accesses on 32-bits.
2098         * Preserve the bottom bits and thus trigger a comparison
2099         * failure on unaligned accesses.
2100         */
2101        if (a_bits < s_bits) {
2102            a_bits = s_bits;
2103        }
2104        tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2105                    (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2106    } else {
2107        TCGReg t = addrlo;
2108
2109        /*
2110         * If the access is unaligned, we need to make sure we fail if we
2111         * cross a page boundary.  The trick is to add the access size-1
2112         * to the address before masking the low bits.  That will make the
2113         * address overflow to the next page if we cross a page boundary,
2114         * which will then force a mismatch of the TLB compare.
2115         */
2116        if (a_bits < s_bits) {
2117            unsigned a_mask = (1 << a_bits) - 1;
2118            unsigned s_mask = (1 << s_bits) - 1;
2119            tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2120            t = TCG_REG_R0;
2121        }
2122
2123        /* Mask the address for the requested alignment.  */
2124        if (TARGET_LONG_BITS == 32) {
2125            tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2126                        (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2127        } else if (a_bits == 0) {
2128            tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2129        } else {
2130            tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2131                        64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2132            tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2133        }
2134    }
2135
2136    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2137        /* Low part comparison into cr7. */
2138        tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2139                    0, 7, TCG_TYPE_I32);
2140
2141        /* Load the high part TLB comparator into TMP2.  */
2142        tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2143                   cmp_off + 4 * !HOST_BIG_ENDIAN);
2144
2145        /* Load addend, deferred for this case. */
2146        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2147                   offsetof(CPUTLBEntry, addend));
2148
2149        /* High part comparison into cr6. */
2150        tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
2151
2152        /* Combine comparisons into cr7. */
2153        tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2154    } else {
2155        /* Full comparison into cr7. */
2156        tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2157                    0, 7, TCG_TYPE_TL);
2158    }
2159
2160    /* Load a pointer into the current opcode w/conditional branch-link. */
2161    ldst->label_ptr[0] = s->code_ptr;
2162    tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2163
2164    h->base = TCG_REG_TMP1;
2165#else
2166    if (a_bits) {
2167        ldst = new_ldst_label(s);
2168        ldst->is_ld = is_ld;
2169        ldst->oi = oi;
2170        ldst->addrlo_reg = addrlo;
2171        ldst->addrhi_reg = addrhi;
2172
2173        /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2174        tcg_debug_assert(a_bits < 16);
2175        tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
2176
2177        ldst->label_ptr[0] = s->code_ptr;
2178        tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2179    }
2180
2181    h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2182#endif
2183
2184    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2185        /* Zero-extend the guest address for use in the host address. */
2186        tcg_out_ext32u(s, TCG_REG_R0, addrlo);
2187        h->index = TCG_REG_R0;
2188    } else {
2189        h->index = addrlo;
2190    }
2191
2192    return ldst;
2193}
2194
2195static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
2196                            TCGReg addrlo, TCGReg addrhi,
2197                            MemOpIdx oi, TCGType data_type)
2198{
2199    MemOp opc = get_memop(oi);
2200    TCGLabelQemuLdst *ldst;
2201    HostAddress h;
2202
2203    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
2204
2205    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2206        if (opc & MO_BSWAP) {
2207            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2208            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2209            tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
2210        } else if (h.base != 0) {
2211            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2212            tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
2213            tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
2214        } else if (h.index == datahi) {
2215            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2216            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2217        } else {
2218            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2219            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2220        }
2221    } else {
2222        uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2223        if (!have_isa_2_06 && insn == LDBRX) {
2224            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2225            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2226            tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
2227            tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2228        } else if (insn) {
2229            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2230        } else {
2231            insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2232            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2233            tcg_out_movext(s, TCG_TYPE_REG, datalo,
2234                           TCG_TYPE_REG, opc & MO_SSIZE, datalo);
2235        }
2236    }
2237
2238    if (ldst) {
2239        ldst->type = data_type;
2240        ldst->datalo_reg = datalo;
2241        ldst->datahi_reg = datahi;
2242        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2243    }
2244}
2245
2246static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
2247                            TCGReg addrlo, TCGReg addrhi,
2248                            MemOpIdx oi, TCGType data_type)
2249{
2250    MemOp opc = get_memop(oi);
2251    TCGLabelQemuLdst *ldst;
2252    HostAddress h;
2253
2254    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
2255
2256    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2257        if (opc & MO_BSWAP) {
2258            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2259            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2260            tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
2261        } else if (h.base != 0) {
2262            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2263            tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
2264            tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
2265        } else {
2266            tcg_out32(s, STW | TAI(datahi, h.index, 0));
2267            tcg_out32(s, STW | TAI(datalo, h.index, 4));
2268        }
2269    } else {
2270        uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2271        if (!have_isa_2_06 && insn == STDBRX) {
2272            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2273            tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
2274            tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2275            tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
2276        } else {
2277            tcg_out32(s, insn | SAB(datalo, h.base, h.index));
2278        }
2279    }
2280
2281    if (ldst) {
2282        ldst->type = data_type;
2283        ldst->datalo_reg = datalo;
2284        ldst->datahi_reg = datahi;
2285        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2286    }
2287}
2288
2289static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2290{
2291    int i;
2292    for (i = 0; i < count; ++i) {
2293        p[i] = NOP;
2294    }
2295}
2296
2297/* Parameters for function call generation, used in tcg.c.  */
2298#define TCG_TARGET_STACK_ALIGN       16
2299
2300#ifdef _CALL_AIX
2301# define LINK_AREA_SIZE                (6 * SZR)
2302# define LR_OFFSET                     (1 * SZR)
2303# define TCG_TARGET_CALL_STACK_OFFSET  (LINK_AREA_SIZE + 8 * SZR)
2304#elif defined(_CALL_DARWIN)
2305# define LINK_AREA_SIZE                (6 * SZR)
2306# define LR_OFFSET                     (2 * SZR)
2307#elif TCG_TARGET_REG_BITS == 64
2308# if defined(_CALL_ELF) && _CALL_ELF == 2
2309#  define LINK_AREA_SIZE               (4 * SZR)
2310#  define LR_OFFSET                    (1 * SZR)
2311# endif
2312#else /* TCG_TARGET_REG_BITS == 32 */
2313# if defined(_CALL_SYSV)
2314#  define LINK_AREA_SIZE               (2 * SZR)
2315#  define LR_OFFSET                    (1 * SZR)
2316# endif
2317#endif
2318#ifndef LR_OFFSET
2319# error "Unhandled abi"
2320#endif
2321#ifndef TCG_TARGET_CALL_STACK_OFFSET
2322# define TCG_TARGET_CALL_STACK_OFFSET  LINK_AREA_SIZE
2323#endif
2324
2325#define CPU_TEMP_BUF_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2326#define REG_SAVE_SIZE      ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2327
2328#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET   \
2329                     + TCG_STATIC_CALL_ARGS_SIZE    \
2330                     + CPU_TEMP_BUF_SIZE            \
2331                     + REG_SAVE_SIZE                \
2332                     + TCG_TARGET_STACK_ALIGN - 1)  \
2333                    & -TCG_TARGET_STACK_ALIGN)
2334
2335#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2336
2337static void tcg_target_qemu_prologue(TCGContext *s)
2338{
2339    int i;
2340
2341#ifdef _CALL_AIX
2342    const void **desc = (const void **)s->code_ptr;
2343    desc[0] = tcg_splitwx_to_rx(desc + 2);  /* entry point */
2344    desc[1] = 0;                            /* environment pointer */
2345    s->code_ptr = (void *)(desc + 2);       /* skip over descriptor */
2346#endif
2347
2348    tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2349                  CPU_TEMP_BUF_SIZE);
2350
2351    /* Prologue */
2352    tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2353    tcg_out32(s, (SZR == 8 ? STDU : STWU)
2354              | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2355
2356    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2357        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2358                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2359    }
2360    tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2361
2362#ifndef CONFIG_SOFTMMU
2363    if (guest_base) {
2364        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2365        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2366    }
2367#endif
2368
2369    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2370    tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2371    if (USE_REG_TB) {
2372        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2373    }
2374    tcg_out32(s, BCCTR | BO_ALWAYS);
2375
2376    /* Epilogue */
2377    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2378
2379    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2380    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2381        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2382                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2383    }
2384    tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2385    tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2386    tcg_out32(s, BCLR | BO_ALWAYS);
2387}
2388
2389static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2390{
2391    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2392    tcg_out_b(s, 0, tcg_code_gen_epilogue);
2393}
2394
2395static void tcg_out_goto_tb(TCGContext *s, int which)
2396{
2397    uintptr_t ptr = get_jmp_target_addr(s, which);
2398
2399    if (USE_REG_TB) {
2400        ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
2401        tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
2402
2403        /* Direct branch will be patched by tb_target_set_jmp_target. */
2404        set_jmp_insn_offset(s, which);
2405        tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2406
2407        /* When branch is out of range, fall through to indirect. */
2408        tcg_out32(s, BCCTR | BO_ALWAYS);
2409
2410        /* For the unlinked case, need to reset TCG_REG_TB.  */
2411        set_jmp_reset_offset(s, which);
2412        tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2413                         -tcg_current_code_size(s));
2414    } else {
2415        /* Direct branch will be patched by tb_target_set_jmp_target. */
2416        set_jmp_insn_offset(s, which);
2417        tcg_out32(s, NOP);
2418
2419        /* When branch is out of range, fall through to indirect. */
2420        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
2421        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
2422        tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2423        tcg_out32(s, BCCTR | BO_ALWAYS);
2424        set_jmp_reset_offset(s, which);
2425    }
2426}
2427
2428void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2429                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2430{
2431    uintptr_t addr = tb->jmp_target_addr[n];
2432    intptr_t diff = addr - jmp_rx;
2433    tcg_insn_unit insn;
2434
2435    if (in_range_b(diff)) {
2436        insn = B | (diff & 0x3fffffc);
2437    } else if (USE_REG_TB) {
2438        insn = MTSPR | RS(TCG_REG_TB) | CTR;
2439    } else {
2440        insn = NOP;
2441    }
2442
2443    qatomic_set((uint32_t *)jmp_rw, insn);
2444    flush_idcache_range(jmp_rx, jmp_rw, 4);
2445}
2446
2447static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2448                       const TCGArg args[TCG_MAX_OP_ARGS],
2449                       const int const_args[TCG_MAX_OP_ARGS])
2450{
2451    TCGArg a0, a1, a2;
2452
2453    switch (opc) {
2454    case INDEX_op_goto_ptr:
2455        tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2456        if (USE_REG_TB) {
2457            tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2458        }
2459        tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2460        tcg_out32(s, BCCTR | BO_ALWAYS);
2461        break;
2462    case INDEX_op_br:
2463        {
2464            TCGLabel *l = arg_label(args[0]);
2465            uint32_t insn = B;
2466
2467            if (l->has_value) {
2468                insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2469                                       l->u.value_ptr);
2470            } else {
2471                tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2472            }
2473            tcg_out32(s, insn);
2474        }
2475        break;
2476    case INDEX_op_ld8u_i32:
2477    case INDEX_op_ld8u_i64:
2478        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2479        break;
2480    case INDEX_op_ld8s_i32:
2481    case INDEX_op_ld8s_i64:
2482        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2483        tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2484        break;
2485    case INDEX_op_ld16u_i32:
2486    case INDEX_op_ld16u_i64:
2487        tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2488        break;
2489    case INDEX_op_ld16s_i32:
2490    case INDEX_op_ld16s_i64:
2491        tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2492        break;
2493    case INDEX_op_ld_i32:
2494    case INDEX_op_ld32u_i64:
2495        tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2496        break;
2497    case INDEX_op_ld32s_i64:
2498        tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2499        break;
2500    case INDEX_op_ld_i64:
2501        tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2502        break;
2503    case INDEX_op_st8_i32:
2504    case INDEX_op_st8_i64:
2505        tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2506        break;
2507    case INDEX_op_st16_i32:
2508    case INDEX_op_st16_i64:
2509        tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2510        break;
2511    case INDEX_op_st_i32:
2512    case INDEX_op_st32_i64:
2513        tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2514        break;
2515    case INDEX_op_st_i64:
2516        tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2517        break;
2518
2519    case INDEX_op_add_i32:
2520        a0 = args[0], a1 = args[1], a2 = args[2];
2521        if (const_args[2]) {
2522        do_addi_32:
2523            tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2524        } else {
2525            tcg_out32(s, ADD | TAB(a0, a1, a2));
2526        }
2527        break;
2528    case INDEX_op_sub_i32:
2529        a0 = args[0], a1 = args[1], a2 = args[2];
2530        if (const_args[1]) {
2531            if (const_args[2]) {
2532                tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2533            } else {
2534                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2535            }
2536        } else if (const_args[2]) {
2537            a2 = -a2;
2538            goto do_addi_32;
2539        } else {
2540            tcg_out32(s, SUBF | TAB(a0, a2, a1));
2541        }
2542        break;
2543
2544    case INDEX_op_and_i32:
2545        a0 = args[0], a1 = args[1], a2 = args[2];
2546        if (const_args[2]) {
2547            tcg_out_andi32(s, a0, a1, a2);
2548        } else {
2549            tcg_out32(s, AND | SAB(a1, a0, a2));
2550        }
2551        break;
2552    case INDEX_op_and_i64:
2553        a0 = args[0], a1 = args[1], a2 = args[2];
2554        if (const_args[2]) {
2555            tcg_out_andi64(s, a0, a1, a2);
2556        } else {
2557            tcg_out32(s, AND | SAB(a1, a0, a2));
2558        }
2559        break;
2560    case INDEX_op_or_i64:
2561    case INDEX_op_or_i32:
2562        a0 = args[0], a1 = args[1], a2 = args[2];
2563        if (const_args[2]) {
2564            tcg_out_ori32(s, a0, a1, a2);
2565        } else {
2566            tcg_out32(s, OR | SAB(a1, a0, a2));
2567        }
2568        break;
2569    case INDEX_op_xor_i64:
2570    case INDEX_op_xor_i32:
2571        a0 = args[0], a1 = args[1], a2 = args[2];
2572        if (const_args[2]) {
2573            tcg_out_xori32(s, a0, a1, a2);
2574        } else {
2575            tcg_out32(s, XOR | SAB(a1, a0, a2));
2576        }
2577        break;
2578    case INDEX_op_andc_i32:
2579        a0 = args[0], a1 = args[1], a2 = args[2];
2580        if (const_args[2]) {
2581            tcg_out_andi32(s, a0, a1, ~a2);
2582        } else {
2583            tcg_out32(s, ANDC | SAB(a1, a0, a2));
2584        }
2585        break;
2586    case INDEX_op_andc_i64:
2587        a0 = args[0], a1 = args[1], a2 = args[2];
2588        if (const_args[2]) {
2589            tcg_out_andi64(s, a0, a1, ~a2);
2590        } else {
2591            tcg_out32(s, ANDC | SAB(a1, a0, a2));
2592        }
2593        break;
2594    case INDEX_op_orc_i32:
2595        if (const_args[2]) {
2596            tcg_out_ori32(s, args[0], args[1], ~args[2]);
2597            break;
2598        }
2599        /* FALLTHRU */
2600    case INDEX_op_orc_i64:
2601        tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2602        break;
2603    case INDEX_op_eqv_i32:
2604        if (const_args[2]) {
2605            tcg_out_xori32(s, args[0], args[1], ~args[2]);
2606            break;
2607        }
2608        /* FALLTHRU */
2609    case INDEX_op_eqv_i64:
2610        tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2611        break;
2612    case INDEX_op_nand_i32:
2613    case INDEX_op_nand_i64:
2614        tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2615        break;
2616    case INDEX_op_nor_i32:
2617    case INDEX_op_nor_i64:
2618        tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2619        break;
2620
2621    case INDEX_op_clz_i32:
2622        tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2623                      args[2], const_args[2]);
2624        break;
2625    case INDEX_op_ctz_i32:
2626        tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2627                      args[2], const_args[2]);
2628        break;
2629    case INDEX_op_ctpop_i32:
2630        tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2631        break;
2632
2633    case INDEX_op_clz_i64:
2634        tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2635                      args[2], const_args[2]);
2636        break;
2637    case INDEX_op_ctz_i64:
2638        tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2639                      args[2], const_args[2]);
2640        break;
2641    case INDEX_op_ctpop_i64:
2642        tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2643        break;
2644
2645    case INDEX_op_mul_i32:
2646        a0 = args[0], a1 = args[1], a2 = args[2];
2647        if (const_args[2]) {
2648            tcg_out32(s, MULLI | TAI(a0, a1, a2));
2649        } else {
2650            tcg_out32(s, MULLW | TAB(a0, a1, a2));
2651        }
2652        break;
2653
2654    case INDEX_op_div_i32:
2655        tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2656        break;
2657
2658    case INDEX_op_divu_i32:
2659        tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2660        break;
2661
2662    case INDEX_op_rem_i32:
2663        tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
2664        break;
2665
2666    case INDEX_op_remu_i32:
2667        tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
2668        break;
2669
2670    case INDEX_op_shl_i32:
2671        if (const_args[2]) {
2672            /* Limit immediate shift count lest we create an illegal insn.  */
2673            tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2674        } else {
2675            tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2676        }
2677        break;
2678    case INDEX_op_shr_i32:
2679        if (const_args[2]) {
2680            /* Limit immediate shift count lest we create an illegal insn.  */
2681            tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2682        } else {
2683            tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2684        }
2685        break;
2686    case INDEX_op_sar_i32:
2687        if (const_args[2]) {
2688            tcg_out_sari32(s, args[0], args[1], args[2]);
2689        } else {
2690            tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2691        }
2692        break;
2693    case INDEX_op_rotl_i32:
2694        if (const_args[2]) {
2695            tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2696        } else {
2697            tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2698                         | MB(0) | ME(31));
2699        }
2700        break;
2701    case INDEX_op_rotr_i32:
2702        if (const_args[2]) {
2703            tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2704        } else {
2705            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2706            tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2707                         | MB(0) | ME(31));
2708        }
2709        break;
2710
2711    case INDEX_op_brcond_i32:
2712        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2713                       arg_label(args[3]), TCG_TYPE_I32);
2714        break;
2715    case INDEX_op_brcond_i64:
2716        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2717                       arg_label(args[3]), TCG_TYPE_I64);
2718        break;
2719    case INDEX_op_brcond2_i32:
2720        tcg_out_brcond2(s, args, const_args);
2721        break;
2722
2723    case INDEX_op_neg_i32:
2724    case INDEX_op_neg_i64:
2725        tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2726        break;
2727
2728    case INDEX_op_not_i32:
2729    case INDEX_op_not_i64:
2730        tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2731        break;
2732
2733    case INDEX_op_add_i64:
2734        a0 = args[0], a1 = args[1], a2 = args[2];
2735        if (const_args[2]) {
2736        do_addi_64:
2737            tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2738        } else {
2739            tcg_out32(s, ADD | TAB(a0, a1, a2));
2740        }
2741        break;
2742    case INDEX_op_sub_i64:
2743        a0 = args[0], a1 = args[1], a2 = args[2];
2744        if (const_args[1]) {
2745            if (const_args[2]) {
2746                tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2747            } else {
2748                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2749            }
2750        } else if (const_args[2]) {
2751            a2 = -a2;
2752            goto do_addi_64;
2753        } else {
2754            tcg_out32(s, SUBF | TAB(a0, a2, a1));
2755        }
2756        break;
2757
2758    case INDEX_op_shl_i64:
2759        if (const_args[2]) {
2760            /* Limit immediate shift count lest we create an illegal insn.  */
2761            tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2762        } else {
2763            tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2764        }
2765        break;
2766    case INDEX_op_shr_i64:
2767        if (const_args[2]) {
2768            /* Limit immediate shift count lest we create an illegal insn.  */
2769            tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2770        } else {
2771            tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2772        }
2773        break;
2774    case INDEX_op_sar_i64:
2775        if (const_args[2]) {
2776            tcg_out_sari64(s, args[0], args[1], args[2]);
2777        } else {
2778            tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2779        }
2780        break;
2781    case INDEX_op_rotl_i64:
2782        if (const_args[2]) {
2783            tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2784        } else {
2785            tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2786        }
2787        break;
2788    case INDEX_op_rotr_i64:
2789        if (const_args[2]) {
2790            tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2791        } else {
2792            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2793            tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2794        }
2795        break;
2796
2797    case INDEX_op_mul_i64:
2798        a0 = args[0], a1 = args[1], a2 = args[2];
2799        if (const_args[2]) {
2800            tcg_out32(s, MULLI | TAI(a0, a1, a2));
2801        } else {
2802            tcg_out32(s, MULLD | TAB(a0, a1, a2));
2803        }
2804        break;
2805    case INDEX_op_div_i64:
2806        tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2807        break;
2808    case INDEX_op_divu_i64:
2809        tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2810        break;
2811    case INDEX_op_rem_i64:
2812        tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
2813        break;
2814    case INDEX_op_remu_i64:
2815        tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
2816        break;
2817
2818    case INDEX_op_qemu_ld_i32:
2819        if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2820            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2821                            args[2], TCG_TYPE_I32);
2822        } else {
2823            tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2824                            args[3], TCG_TYPE_I32);
2825        }
2826        break;
2827    case INDEX_op_qemu_ld_i64:
2828        if (TCG_TARGET_REG_BITS == 64) {
2829            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2830                            args[2], TCG_TYPE_I64);
2831        } else if (TARGET_LONG_BITS == 32) {
2832            tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2833                            args[3], TCG_TYPE_I64);
2834        } else {
2835            tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2836                            args[4], TCG_TYPE_I64);
2837        }
2838        break;
2839    case INDEX_op_qemu_st_i32:
2840        if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2841            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2842                            args[2], TCG_TYPE_I32);
2843        } else {
2844            tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2845                            args[3], TCG_TYPE_I32);
2846        }
2847        break;
2848    case INDEX_op_qemu_st_i64:
2849        if (TCG_TARGET_REG_BITS == 64) {
2850            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2851                            args[2], TCG_TYPE_I64);
2852        } else if (TARGET_LONG_BITS == 32) {
2853            tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2854                            args[3], TCG_TYPE_I64);
2855        } else {
2856            tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2857                            args[4], TCG_TYPE_I64);
2858        }
2859        break;
2860
2861    case INDEX_op_setcond_i32:
2862        tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2863                        const_args[2]);
2864        break;
2865    case INDEX_op_setcond_i64:
2866        tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2867                        const_args[2]);
2868        break;
2869    case INDEX_op_setcond2_i32:
2870        tcg_out_setcond2(s, args, const_args);
2871        break;
2872
2873    case INDEX_op_bswap16_i32:
2874    case INDEX_op_bswap16_i64:
2875        tcg_out_bswap16(s, args[0], args[1], args[2]);
2876        break;
2877    case INDEX_op_bswap32_i32:
2878        tcg_out_bswap32(s, args[0], args[1], 0);
2879        break;
2880    case INDEX_op_bswap32_i64:
2881        tcg_out_bswap32(s, args[0], args[1], args[2]);
2882        break;
2883    case INDEX_op_bswap64_i64:
2884        tcg_out_bswap64(s, args[0], args[1]);
2885        break;
2886
2887    case INDEX_op_deposit_i32:
2888        if (const_args[2]) {
2889            uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2890            tcg_out_andi32(s, args[0], args[0], ~mask);
2891        } else {
2892            tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2893                        32 - args[3] - args[4], 31 - args[3]);
2894        }
2895        break;
2896    case INDEX_op_deposit_i64:
2897        if (const_args[2]) {
2898            uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2899            tcg_out_andi64(s, args[0], args[0], ~mask);
2900        } else {
2901            tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2902                        64 - args[3] - args[4]);
2903        }
2904        break;
2905
2906    case INDEX_op_extract_i32:
2907        tcg_out_rlw(s, RLWINM, args[0], args[1],
2908                    32 - args[2], 32 - args[3], 31);
2909        break;
2910    case INDEX_op_extract_i64:
2911        tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2912        break;
2913
2914    case INDEX_op_movcond_i32:
2915        tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2916                        args[3], args[4], const_args[2]);
2917        break;
2918    case INDEX_op_movcond_i64:
2919        tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2920                        args[3], args[4], const_args[2]);
2921        break;
2922
2923#if TCG_TARGET_REG_BITS == 64
2924    case INDEX_op_add2_i64:
2925#else
2926    case INDEX_op_add2_i32:
2927#endif
2928        /* Note that the CA bit is defined based on the word size of the
2929           environment.  So in 64-bit mode it's always carry-out of bit 63.
2930           The fallback code using deposit works just as well for 32-bit.  */
2931        a0 = args[0], a1 = args[1];
2932        if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2933            a0 = TCG_REG_R0;
2934        }
2935        if (const_args[4]) {
2936            tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2937        } else {
2938            tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2939        }
2940        if (const_args[5]) {
2941            tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2942        } else {
2943            tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2944        }
2945        if (a0 != args[0]) {
2946            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2947        }
2948        break;
2949
2950#if TCG_TARGET_REG_BITS == 64
2951    case INDEX_op_sub2_i64:
2952#else
2953    case INDEX_op_sub2_i32:
2954#endif
2955        a0 = args[0], a1 = args[1];
2956        if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2957            a0 = TCG_REG_R0;
2958        }
2959        if (const_args[2]) {
2960            tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
2961        } else {
2962            tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
2963        }
2964        if (const_args[3]) {
2965            tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2966        } else {
2967            tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
2968        }
2969        if (a0 != args[0]) {
2970            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2971        }
2972        break;
2973
2974    case INDEX_op_muluh_i32:
2975        tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
2976        break;
2977    case INDEX_op_mulsh_i32:
2978        tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
2979        break;
2980    case INDEX_op_muluh_i64:
2981        tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2982        break;
2983    case INDEX_op_mulsh_i64:
2984        tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
2985        break;
2986
2987    case INDEX_op_mb:
2988        tcg_out_mb(s, args[0]);
2989        break;
2990
2991    case INDEX_op_mov_i32:   /* Always emitted via tcg_out_mov.  */
2992    case INDEX_op_mov_i64:
2993    case INDEX_op_call:      /* Always emitted via tcg_out_call.  */
2994    case INDEX_op_exit_tb:   /* Always emitted via tcg_out_exit_tb.  */
2995    case INDEX_op_goto_tb:   /* Always emitted via tcg_out_goto_tb.  */
2996    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2997    case INDEX_op_ext8s_i64:
2998    case INDEX_op_ext8u_i32:
2999    case INDEX_op_ext8u_i64:
3000    case INDEX_op_ext16s_i32:
3001    case INDEX_op_ext16s_i64:
3002    case INDEX_op_ext16u_i32:
3003    case INDEX_op_ext16u_i64:
3004    case INDEX_op_ext32s_i64:
3005    case INDEX_op_ext32u_i64:
3006    case INDEX_op_ext_i32_i64:
3007    case INDEX_op_extu_i32_i64:
3008    case INDEX_op_extrl_i64_i32:
3009    default:
3010        g_assert_not_reached();
3011    }
3012}
3013
3014int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3015{
3016    switch (opc) {
3017    case INDEX_op_and_vec:
3018    case INDEX_op_or_vec:
3019    case INDEX_op_xor_vec:
3020    case INDEX_op_andc_vec:
3021    case INDEX_op_not_vec:
3022    case INDEX_op_nor_vec:
3023    case INDEX_op_eqv_vec:
3024    case INDEX_op_nand_vec:
3025        return 1;
3026    case INDEX_op_orc_vec:
3027        return have_isa_2_07;
3028    case INDEX_op_add_vec:
3029    case INDEX_op_sub_vec:
3030    case INDEX_op_smax_vec:
3031    case INDEX_op_smin_vec:
3032    case INDEX_op_umax_vec:
3033    case INDEX_op_umin_vec:
3034    case INDEX_op_shlv_vec:
3035    case INDEX_op_shrv_vec:
3036    case INDEX_op_sarv_vec:
3037    case INDEX_op_rotlv_vec:
3038        return vece <= MO_32 || have_isa_2_07;
3039    case INDEX_op_ssadd_vec:
3040    case INDEX_op_sssub_vec:
3041    case INDEX_op_usadd_vec:
3042    case INDEX_op_ussub_vec:
3043        return vece <= MO_32;
3044    case INDEX_op_cmp_vec:
3045    case INDEX_op_shli_vec:
3046    case INDEX_op_shri_vec:
3047    case INDEX_op_sari_vec:
3048    case INDEX_op_rotli_vec:
3049        return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3050    case INDEX_op_neg_vec:
3051        return vece >= MO_32 && have_isa_3_00;
3052    case INDEX_op_mul_vec:
3053        switch (vece) {
3054        case MO_8:
3055        case MO_16:
3056            return -1;
3057        case MO_32:
3058            return have_isa_2_07 ? 1 : -1;
3059        case MO_64:
3060            return have_isa_3_10;
3061        }
3062        return 0;
3063    case INDEX_op_bitsel_vec:
3064        return have_vsx;
3065    case INDEX_op_rotrv_vec:
3066        return -1;
3067    default:
3068        return 0;
3069    }
3070}
3071
3072static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3073                            TCGReg dst, TCGReg src)
3074{
3075    tcg_debug_assert(dst >= TCG_REG_V0);
3076
3077    /* Splat from integer reg allowed via constraints for v3.00.  */
3078    if (src < TCG_REG_V0) {
3079        tcg_debug_assert(have_isa_3_00);
3080        switch (vece) {
3081        case MO_64:
3082            tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3083            return true;
3084        case MO_32:
3085            tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3086            return true;
3087        default:
3088            /* Fail, so that we fall back on either dupm or mov+dup.  */
3089            return false;
3090        }
3091    }
3092
3093    /*
3094     * Recall we use (or emulate) VSX integer loads, so the integer is
3095     * right justified within the left (zero-index) double-word.
3096     */
3097    switch (vece) {
3098    case MO_8:
3099        tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3100        break;
3101    case MO_16:
3102        tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3103        break;
3104    case MO_32:
3105        tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3106        break;
3107    case MO_64:
3108        if (have_vsx) {
3109            tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3110            break;
3111        }
3112        tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3113        tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3114        break;
3115    default:
3116        g_assert_not_reached();
3117    }
3118    return true;
3119}
3120
3121static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3122                             TCGReg out, TCGReg base, intptr_t offset)
3123{
3124    int elt;
3125
3126    tcg_debug_assert(out >= TCG_REG_V0);
3127    switch (vece) {
3128    case MO_8:
3129        if (have_isa_3_00) {
3130            tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3131        } else {
3132            tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3133        }
3134        elt = extract32(offset, 0, 4);
3135#if !HOST_BIG_ENDIAN
3136        elt ^= 15;
3137#endif
3138        tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3139        break;
3140    case MO_16:
3141        tcg_debug_assert((offset & 1) == 0);
3142        if (have_isa_3_00) {
3143            tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3144        } else {
3145            tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3146        }
3147        elt = extract32(offset, 1, 3);
3148#if !HOST_BIG_ENDIAN
3149        elt ^= 7;
3150#endif
3151        tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3152        break;
3153    case MO_32:
3154        if (have_isa_3_00) {
3155            tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3156            break;
3157        }
3158        tcg_debug_assert((offset & 3) == 0);
3159        tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3160        elt = extract32(offset, 2, 2);
3161#if !HOST_BIG_ENDIAN
3162        elt ^= 3;
3163#endif
3164        tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3165        break;
3166    case MO_64:
3167        if (have_vsx) {
3168            tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3169            break;
3170        }
3171        tcg_debug_assert((offset & 7) == 0);
3172        tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3173        tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3174        elt = extract32(offset, 3, 1);
3175#if !HOST_BIG_ENDIAN
3176        elt = !elt;
3177#endif
3178        if (elt) {
3179            tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3180        } else {
3181            tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3182        }
3183        break;
3184    default:
3185        g_assert_not_reached();
3186    }
3187    return true;
3188}
3189
3190static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3191                           unsigned vecl, unsigned vece,
3192                           const TCGArg args[TCG_MAX_OP_ARGS],
3193                           const int const_args[TCG_MAX_OP_ARGS])
3194{
3195    static const uint32_t
3196        add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3197        sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3198        mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3199        neg_op[4] = { 0, 0, VNEGW, VNEGD },
3200        eq_op[4]  = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3201        ne_op[4]  = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3202        gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3203        gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3204        ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3205        usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3206        sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3207        ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3208        umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3209        smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3210        umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3211        smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3212        shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3213        shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3214        sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3215        mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3216        mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3217        muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3218        mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3219        pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3220        rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3221
3222    TCGType type = vecl + TCG_TYPE_V64;
3223    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3224    uint32_t insn;
3225
3226    switch (opc) {
3227    case INDEX_op_ld_vec:
3228        tcg_out_ld(s, type, a0, a1, a2);
3229        return;
3230    case INDEX_op_st_vec:
3231        tcg_out_st(s, type, a0, a1, a2);
3232        return;
3233    case INDEX_op_dupm_vec:
3234        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3235        return;
3236
3237    case INDEX_op_add_vec:
3238        insn = add_op[vece];
3239        break;
3240    case INDEX_op_sub_vec:
3241        insn = sub_op[vece];
3242        break;
3243    case INDEX_op_neg_vec:
3244        insn = neg_op[vece];
3245        a2 = a1;
3246        a1 = 0;
3247        break;
3248    case INDEX_op_mul_vec:
3249        insn = mul_op[vece];
3250        break;
3251    case INDEX_op_ssadd_vec:
3252        insn = ssadd_op[vece];
3253        break;
3254    case INDEX_op_sssub_vec:
3255        insn = sssub_op[vece];
3256        break;
3257    case INDEX_op_usadd_vec:
3258        insn = usadd_op[vece];
3259        break;
3260    case INDEX_op_ussub_vec:
3261        insn = ussub_op[vece];
3262        break;
3263    case INDEX_op_smin_vec:
3264        insn = smin_op[vece];
3265        break;
3266    case INDEX_op_umin_vec:
3267        insn = umin_op[vece];
3268        break;
3269    case INDEX_op_smax_vec:
3270        insn = smax_op[vece];
3271        break;
3272    case INDEX_op_umax_vec:
3273        insn = umax_op[vece];
3274        break;
3275    case INDEX_op_shlv_vec:
3276        insn = shlv_op[vece];
3277        break;
3278    case INDEX_op_shrv_vec:
3279        insn = shrv_op[vece];
3280        break;
3281    case INDEX_op_sarv_vec:
3282        insn = sarv_op[vece];
3283        break;
3284    case INDEX_op_and_vec:
3285        insn = VAND;
3286        break;
3287    case INDEX_op_or_vec:
3288        insn = VOR;
3289        break;
3290    case INDEX_op_xor_vec:
3291        insn = VXOR;
3292        break;
3293    case INDEX_op_andc_vec:
3294        insn = VANDC;
3295        break;
3296    case INDEX_op_not_vec:
3297        insn = VNOR;
3298        a2 = a1;
3299        break;
3300    case INDEX_op_orc_vec:
3301        insn = VORC;
3302        break;
3303    case INDEX_op_nand_vec:
3304        insn = VNAND;
3305        break;
3306    case INDEX_op_nor_vec:
3307        insn = VNOR;
3308        break;
3309    case INDEX_op_eqv_vec:
3310        insn = VEQV;
3311        break;
3312
3313    case INDEX_op_cmp_vec:
3314        switch (args[3]) {
3315        case TCG_COND_EQ:
3316            insn = eq_op[vece];
3317            break;
3318        case TCG_COND_NE:
3319            insn = ne_op[vece];
3320            break;
3321        case TCG_COND_GT:
3322            insn = gts_op[vece];
3323            break;
3324        case TCG_COND_GTU:
3325            insn = gtu_op[vece];
3326            break;
3327        default:
3328            g_assert_not_reached();
3329        }
3330        break;
3331
3332    case INDEX_op_bitsel_vec:
3333        tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3334        return;
3335
3336    case INDEX_op_dup2_vec:
3337        assert(TCG_TARGET_REG_BITS == 32);
3338        /* With inputs a1 = xLxx, a2 = xHxx  */
3339        tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1));  /* a0  = xxHL */
3340        tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8);          /* tmp = HLxx */
3341        tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8);          /* a0  = HLHL */
3342        return;
3343
3344    case INDEX_op_ppc_mrgh_vec:
3345        insn = mrgh_op[vece];
3346        break;
3347    case INDEX_op_ppc_mrgl_vec:
3348        insn = mrgl_op[vece];
3349        break;
3350    case INDEX_op_ppc_muleu_vec:
3351        insn = muleu_op[vece];
3352        break;
3353    case INDEX_op_ppc_mulou_vec:
3354        insn = mulou_op[vece];
3355        break;
3356    case INDEX_op_ppc_pkum_vec:
3357        insn = pkum_op[vece];
3358        break;
3359    case INDEX_op_rotlv_vec:
3360        insn = rotl_op[vece];
3361        break;
3362    case INDEX_op_ppc_msum_vec:
3363        tcg_debug_assert(vece == MO_16);
3364        tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3365        return;
3366
3367    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
3368    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
3369    default:
3370        g_assert_not_reached();
3371    }
3372
3373    tcg_debug_assert(insn != 0);
3374    tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3375}
3376
3377static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3378                           TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3379{
3380    TCGv_vec t1;
3381
3382    if (vece == MO_32) {
3383        /*
3384         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3385         * So using negative numbers gets us the 4th bit easily.
3386         */
3387        imm = sextract32(imm, 0, 5);
3388    } else {
3389        imm &= (8 << vece) - 1;
3390    }
3391
3392    /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3393    t1 = tcg_constant_vec(type, MO_8, imm);
3394    vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3395              tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3396}
3397
3398static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3399                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3400{
3401    bool need_swap = false, need_inv = false;
3402
3403    tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3404
3405    switch (cond) {
3406    case TCG_COND_EQ:
3407    case TCG_COND_GT:
3408    case TCG_COND_GTU:
3409        break;
3410    case TCG_COND_NE:
3411        if (have_isa_3_00 && vece <= MO_32) {
3412            break;
3413        }
3414        /* fall through */
3415    case TCG_COND_LE:
3416    case TCG_COND_LEU:
3417        need_inv = true;
3418        break;
3419    case TCG_COND_LT:
3420    case TCG_COND_LTU:
3421        need_swap = true;
3422        break;
3423    case TCG_COND_GE:
3424    case TCG_COND_GEU:
3425        need_swap = need_inv = true;
3426        break;
3427    default:
3428        g_assert_not_reached();
3429    }
3430
3431    if (need_inv) {
3432        cond = tcg_invert_cond(cond);
3433    }
3434    if (need_swap) {
3435        TCGv_vec t1;
3436        t1 = v1, v1 = v2, v2 = t1;
3437        cond = tcg_swap_cond(cond);
3438    }
3439
3440    vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3441              tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3442
3443    if (need_inv) {
3444        tcg_gen_not_vec(vece, v0, v0);
3445    }
3446}
3447
3448static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3449                           TCGv_vec v1, TCGv_vec v2)
3450{
3451    TCGv_vec t1 = tcg_temp_new_vec(type);
3452    TCGv_vec t2 = tcg_temp_new_vec(type);
3453    TCGv_vec c0, c16;
3454
3455    switch (vece) {
3456    case MO_8:
3457    case MO_16:
3458        vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3459                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3460        vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3461                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3462        vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3463                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3464        vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3465                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3466        vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3467                  tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3468	break;
3469
3470    case MO_32:
3471        tcg_debug_assert(!have_isa_2_07);
3472        /*
3473         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3474         * So using -16 is a quick way to represent 16.
3475         */
3476        c16 = tcg_constant_vec(type, MO_8, -16);
3477        c0 = tcg_constant_vec(type, MO_8, 0);
3478
3479        vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3480                  tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3481        vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3482                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3483        vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3484                  tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3485        vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3486                  tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3487        tcg_gen_add_vec(MO_32, v0, t1, t2);
3488        break;
3489
3490    default:
3491        g_assert_not_reached();
3492    }
3493    tcg_temp_free_vec(t1);
3494    tcg_temp_free_vec(t2);
3495}
3496
3497void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3498                       TCGArg a0, ...)
3499{
3500    va_list va;
3501    TCGv_vec v0, v1, v2, t0;
3502    TCGArg a2;
3503
3504    va_start(va, a0);
3505    v0 = temp_tcgv_vec(arg_temp(a0));
3506    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3507    a2 = va_arg(va, TCGArg);
3508
3509    switch (opc) {
3510    case INDEX_op_shli_vec:
3511        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3512        break;
3513    case INDEX_op_shri_vec:
3514        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3515        break;
3516    case INDEX_op_sari_vec:
3517        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3518        break;
3519    case INDEX_op_rotli_vec:
3520        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3521        break;
3522    case INDEX_op_cmp_vec:
3523        v2 = temp_tcgv_vec(arg_temp(a2));
3524        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3525        break;
3526    case INDEX_op_mul_vec:
3527        v2 = temp_tcgv_vec(arg_temp(a2));
3528        expand_vec_mul(type, vece, v0, v1, v2);
3529        break;
3530    case INDEX_op_rotlv_vec:
3531        v2 = temp_tcgv_vec(arg_temp(a2));
3532        t0 = tcg_temp_new_vec(type);
3533        tcg_gen_neg_vec(vece, t0, v2);
3534        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3535        tcg_temp_free_vec(t0);
3536        break;
3537    default:
3538        g_assert_not_reached();
3539    }
3540    va_end(va);
3541}
3542
3543static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3544{
3545    switch (op) {
3546    case INDEX_op_goto_ptr:
3547        return C_O0_I1(r);
3548
3549    case INDEX_op_ld8u_i32:
3550    case INDEX_op_ld8s_i32:
3551    case INDEX_op_ld16u_i32:
3552    case INDEX_op_ld16s_i32:
3553    case INDEX_op_ld_i32:
3554    case INDEX_op_ctpop_i32:
3555    case INDEX_op_neg_i32:
3556    case INDEX_op_not_i32:
3557    case INDEX_op_ext8s_i32:
3558    case INDEX_op_ext16s_i32:
3559    case INDEX_op_bswap16_i32:
3560    case INDEX_op_bswap32_i32:
3561    case INDEX_op_extract_i32:
3562    case INDEX_op_ld8u_i64:
3563    case INDEX_op_ld8s_i64:
3564    case INDEX_op_ld16u_i64:
3565    case INDEX_op_ld16s_i64:
3566    case INDEX_op_ld32u_i64:
3567    case INDEX_op_ld32s_i64:
3568    case INDEX_op_ld_i64:
3569    case INDEX_op_ctpop_i64:
3570    case INDEX_op_neg_i64:
3571    case INDEX_op_not_i64:
3572    case INDEX_op_ext8s_i64:
3573    case INDEX_op_ext16s_i64:
3574    case INDEX_op_ext32s_i64:
3575    case INDEX_op_ext_i32_i64:
3576    case INDEX_op_extu_i32_i64:
3577    case INDEX_op_bswap16_i64:
3578    case INDEX_op_bswap32_i64:
3579    case INDEX_op_bswap64_i64:
3580    case INDEX_op_extract_i64:
3581        return C_O1_I1(r, r);
3582
3583    case INDEX_op_st8_i32:
3584    case INDEX_op_st16_i32:
3585    case INDEX_op_st_i32:
3586    case INDEX_op_st8_i64:
3587    case INDEX_op_st16_i64:
3588    case INDEX_op_st32_i64:
3589    case INDEX_op_st_i64:
3590        return C_O0_I2(r, r);
3591
3592    case INDEX_op_add_i32:
3593    case INDEX_op_and_i32:
3594    case INDEX_op_or_i32:
3595    case INDEX_op_xor_i32:
3596    case INDEX_op_andc_i32:
3597    case INDEX_op_orc_i32:
3598    case INDEX_op_eqv_i32:
3599    case INDEX_op_shl_i32:
3600    case INDEX_op_shr_i32:
3601    case INDEX_op_sar_i32:
3602    case INDEX_op_rotl_i32:
3603    case INDEX_op_rotr_i32:
3604    case INDEX_op_setcond_i32:
3605    case INDEX_op_and_i64:
3606    case INDEX_op_andc_i64:
3607    case INDEX_op_shl_i64:
3608    case INDEX_op_shr_i64:
3609    case INDEX_op_sar_i64:
3610    case INDEX_op_rotl_i64:
3611    case INDEX_op_rotr_i64:
3612    case INDEX_op_setcond_i64:
3613        return C_O1_I2(r, r, ri);
3614
3615    case INDEX_op_mul_i32:
3616    case INDEX_op_mul_i64:
3617        return C_O1_I2(r, r, rI);
3618
3619    case INDEX_op_div_i32:
3620    case INDEX_op_divu_i32:
3621    case INDEX_op_rem_i32:
3622    case INDEX_op_remu_i32:
3623    case INDEX_op_nand_i32:
3624    case INDEX_op_nor_i32:
3625    case INDEX_op_muluh_i32:
3626    case INDEX_op_mulsh_i32:
3627    case INDEX_op_orc_i64:
3628    case INDEX_op_eqv_i64:
3629    case INDEX_op_nand_i64:
3630    case INDEX_op_nor_i64:
3631    case INDEX_op_div_i64:
3632    case INDEX_op_divu_i64:
3633    case INDEX_op_rem_i64:
3634    case INDEX_op_remu_i64:
3635    case INDEX_op_mulsh_i64:
3636    case INDEX_op_muluh_i64:
3637        return C_O1_I2(r, r, r);
3638
3639    case INDEX_op_sub_i32:
3640        return C_O1_I2(r, rI, ri);
3641    case INDEX_op_add_i64:
3642        return C_O1_I2(r, r, rT);
3643    case INDEX_op_or_i64:
3644    case INDEX_op_xor_i64:
3645        return C_O1_I2(r, r, rU);
3646    case INDEX_op_sub_i64:
3647        return C_O1_I2(r, rI, rT);
3648    case INDEX_op_clz_i32:
3649    case INDEX_op_ctz_i32:
3650    case INDEX_op_clz_i64:
3651    case INDEX_op_ctz_i64:
3652        return C_O1_I2(r, r, rZW);
3653
3654    case INDEX_op_brcond_i32:
3655    case INDEX_op_brcond_i64:
3656        return C_O0_I2(r, ri);
3657
3658    case INDEX_op_movcond_i32:
3659    case INDEX_op_movcond_i64:
3660        return C_O1_I4(r, r, ri, rZ, rZ);
3661    case INDEX_op_deposit_i32:
3662    case INDEX_op_deposit_i64:
3663        return C_O1_I2(r, 0, rZ);
3664    case INDEX_op_brcond2_i32:
3665        return C_O0_I4(r, r, ri, ri);
3666    case INDEX_op_setcond2_i32:
3667        return C_O1_I4(r, r, r, ri, ri);
3668    case INDEX_op_add2_i64:
3669    case INDEX_op_add2_i32:
3670        return C_O2_I4(r, r, r, r, rI, rZM);
3671    case INDEX_op_sub2_i64:
3672    case INDEX_op_sub2_i32:
3673        return C_O2_I4(r, r, rI, rZM, r, r);
3674
3675    case INDEX_op_qemu_ld_i32:
3676        return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3677                ? C_O1_I1(r, r)
3678                : C_O1_I2(r, r, r));
3679
3680    case INDEX_op_qemu_st_i32:
3681        return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3682                ? C_O0_I2(r, r)
3683                : C_O0_I3(r, r, r));
3684
3685    case INDEX_op_qemu_ld_i64:
3686        return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
3687                : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, r)
3688                : C_O2_I2(r, r, r, r));
3689
3690    case INDEX_op_qemu_st_i64:
3691        return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
3692                : TARGET_LONG_BITS == 32 ? C_O0_I3(r, r, r)
3693                : C_O0_I4(r, r, r, r));
3694
3695    case INDEX_op_add_vec:
3696    case INDEX_op_sub_vec:
3697    case INDEX_op_mul_vec:
3698    case INDEX_op_and_vec:
3699    case INDEX_op_or_vec:
3700    case INDEX_op_xor_vec:
3701    case INDEX_op_andc_vec:
3702    case INDEX_op_orc_vec:
3703    case INDEX_op_nor_vec:
3704    case INDEX_op_eqv_vec:
3705    case INDEX_op_nand_vec:
3706    case INDEX_op_cmp_vec:
3707    case INDEX_op_ssadd_vec:
3708    case INDEX_op_sssub_vec:
3709    case INDEX_op_usadd_vec:
3710    case INDEX_op_ussub_vec:
3711    case INDEX_op_smax_vec:
3712    case INDEX_op_smin_vec:
3713    case INDEX_op_umax_vec:
3714    case INDEX_op_umin_vec:
3715    case INDEX_op_shlv_vec:
3716    case INDEX_op_shrv_vec:
3717    case INDEX_op_sarv_vec:
3718    case INDEX_op_rotlv_vec:
3719    case INDEX_op_rotrv_vec:
3720    case INDEX_op_ppc_mrgh_vec:
3721    case INDEX_op_ppc_mrgl_vec:
3722    case INDEX_op_ppc_muleu_vec:
3723    case INDEX_op_ppc_mulou_vec:
3724    case INDEX_op_ppc_pkum_vec:
3725    case INDEX_op_dup2_vec:
3726        return C_O1_I2(v, v, v);
3727
3728    case INDEX_op_not_vec:
3729    case INDEX_op_neg_vec:
3730        return C_O1_I1(v, v);
3731
3732    case INDEX_op_dup_vec:
3733        return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3734
3735    case INDEX_op_ld_vec:
3736    case INDEX_op_dupm_vec:
3737        return C_O1_I1(v, r);
3738
3739    case INDEX_op_st_vec:
3740        return C_O0_I2(v, r);
3741
3742    case INDEX_op_bitsel_vec:
3743    case INDEX_op_ppc_msum_vec:
3744        return C_O1_I3(v, v, v, v);
3745
3746    default:
3747        g_assert_not_reached();
3748    }
3749}
3750
3751static void tcg_target_init(TCGContext *s)
3752{
3753    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3754    unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3755
3756    have_isa = tcg_isa_base;
3757    if (hwcap & PPC_FEATURE_ARCH_2_06) {
3758        have_isa = tcg_isa_2_06;
3759    }
3760#ifdef PPC_FEATURE2_ARCH_2_07
3761    if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3762        have_isa = tcg_isa_2_07;
3763    }
3764#endif
3765#ifdef PPC_FEATURE2_ARCH_3_00
3766    if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3767        have_isa = tcg_isa_3_00;
3768    }
3769#endif
3770#ifdef PPC_FEATURE2_ARCH_3_10
3771    if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3772        have_isa = tcg_isa_3_10;
3773    }
3774#endif
3775
3776#ifdef PPC_FEATURE2_HAS_ISEL
3777    /* Prefer explicit instruction from the kernel. */
3778    have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3779#else
3780    /* Fall back to knowing Power7 (2.06) has ISEL. */
3781    have_isel = have_isa_2_06;
3782#endif
3783
3784    if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3785        have_altivec = true;
3786        /* We only care about the portion of VSX that overlaps Altivec. */
3787        if (hwcap & PPC_FEATURE_HAS_VSX) {
3788            have_vsx = true;
3789        }
3790    }
3791
3792    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3793    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3794    if (have_altivec) {
3795        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3796        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3797    }
3798
3799    tcg_target_call_clobber_regs = 0;
3800    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3801    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3802    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3803    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3804    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3805    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3806    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3807    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3808    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3809    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3810    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3811    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3812
3813    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3814    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3815    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3816    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3817    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3818    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3819    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3820    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3821    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3822    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3823    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3824    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3825    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3826    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3827    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3828    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3829    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3830    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3831    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3832    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3833
3834    s->reserved_regs = 0;
3835    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3836    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3837#if defined(_CALL_SYSV)
3838    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3839#endif
3840#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3841    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3842#endif
3843    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
3844    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
3845    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3846    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3847    if (USE_REG_TB) {
3848        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);  /* tb->tc_ptr */
3849    }
3850}
3851
3852#ifdef __ELF__
3853typedef struct {
3854    DebugFrameCIE cie;
3855    DebugFrameFDEHeader fde;
3856    uint8_t fde_def_cfa[4];
3857    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3858} DebugFrame;
3859
3860/* We're expecting a 2 byte uleb128 encoded value.  */
3861QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3862
3863#if TCG_TARGET_REG_BITS == 64
3864# define ELF_HOST_MACHINE EM_PPC64
3865#else
3866# define ELF_HOST_MACHINE EM_PPC
3867#endif
3868
3869static DebugFrame debug_frame = {
3870    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3871    .cie.id = -1,
3872    .cie.version = 1,
3873    .cie.code_align = 1,
3874    .cie.data_align = (-SZR & 0x7f),         /* sleb128 -SZR */
3875    .cie.return_column = 65,
3876
3877    /* Total FDE size does not include the "len" member.  */
3878    .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3879
3880    .fde_def_cfa = {
3881        12, TCG_REG_R1,                 /* DW_CFA_def_cfa r1, ... */
3882        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3883        (FRAME_SIZE >> 7)
3884    },
3885    .fde_reg_ofs = {
3886        /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3887        0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3888    }
3889};
3890
3891void tcg_register_jit(const void *buf, size_t buf_size)
3892{
3893    uint8_t *p = &debug_frame.fde_reg_ofs[3];
3894    int i;
3895
3896    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3897        p[0] = 0x80 + tcg_target_callee_save_regs[i];
3898        p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3899    }
3900
3901    debug_frame.fde.func_start = (uintptr_t)buf;
3902    debug_frame.fde.func_len = buf_size;
3903
3904    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3905}
3906#endif /* __ELF__ */
3907#undef VMULEUB
3908#undef VMULEUH
3909#undef VMULEUW
3910#undef VMULOUB
3911#undef VMULOUH
3912#undef VMULOUW
3913#undef VMSUMUHM
3914