xref: /openbmc/qemu/tcg/ppc/tcg-target.c.inc (revision 0cadc1eda1a3120c37c713ab6d6b7a02da0d2e6f)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-pool.c.inc"
27#include "../tcg-ldst.c.inc"
28
29/*
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
33 */
34#if !defined(_CALL_SYSV) && \
35    !defined(_CALL_DARWIN) && \
36    !defined(_CALL_AIX) && \
37    !defined(_CALL_ELF)
38# if defined(__APPLE__)
39#  define _CALL_DARWIN
40# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
41#  define _CALL_SYSV
42# else
43#  error "Unknown ABI"
44# endif
45#endif
46
47#if TCG_TARGET_REG_BITS == 64
48# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_EXTEND
49# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_NORMAL
50#else
51# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_NORMAL
52# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_BY_REF
53#endif
54#ifdef _CALL_SYSV
55# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_EVEN
56# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_BY_REF
57#else
58# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_NORMAL
59# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_NORMAL
60#endif
61
62/* For some memory operations, we need a scratch that isn't R0.  For the AIX
63   calling convention, we can re-use the TOC register since we'll be reloading
64   it at every call.  Otherwise R12 will do nicely as neither a call-saved
65   register nor a parameter register.  */
66#ifdef _CALL_AIX
67# define TCG_REG_TMP1   TCG_REG_R2
68#else
69# define TCG_REG_TMP1   TCG_REG_R12
70#endif
71#define TCG_REG_TMP2    TCG_REG_R11
72
73#define TCG_VEC_TMP1    TCG_REG_V0
74#define TCG_VEC_TMP2    TCG_REG_V1
75
76#define TCG_REG_TB     TCG_REG_R31
77#define USE_REG_TB     (TCG_TARGET_REG_BITS == 64)
78
79/* Shorthand for size of a pointer.  Avoid promotion to unsigned.  */
80#define SZP  ((int)sizeof(void *))
81
82/* Shorthand for size of a register.  */
83#define SZR  (TCG_TARGET_REG_BITS / 8)
84
85#define TCG_CT_CONST_S16  0x100
86#define TCG_CT_CONST_S32  0x400
87#define TCG_CT_CONST_U32  0x800
88#define TCG_CT_CONST_ZERO 0x1000
89#define TCG_CT_CONST_MONE 0x2000
90#define TCG_CT_CONST_WSZ  0x4000
91
92#define ALL_GENERAL_REGS  0xffffffffu
93#define ALL_VECTOR_REGS   0xffffffff00000000ull
94
95TCGPowerISA have_isa;
96static bool have_isel;
97bool have_altivec;
98bool have_vsx;
99
100#ifndef CONFIG_SOFTMMU
101#define TCG_GUEST_BASE_REG 30
102#endif
103
104#ifdef CONFIG_DEBUG_TCG
105static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
106    "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
107    "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
108    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110    "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
111    "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
112    "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
113    "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
114};
115#endif
116
117static const int tcg_target_reg_alloc_order[] = {
118    TCG_REG_R14,  /* call saved registers */
119    TCG_REG_R15,
120    TCG_REG_R16,
121    TCG_REG_R17,
122    TCG_REG_R18,
123    TCG_REG_R19,
124    TCG_REG_R20,
125    TCG_REG_R21,
126    TCG_REG_R22,
127    TCG_REG_R23,
128    TCG_REG_R24,
129    TCG_REG_R25,
130    TCG_REG_R26,
131    TCG_REG_R27,
132    TCG_REG_R28,
133    TCG_REG_R29,
134    TCG_REG_R30,
135    TCG_REG_R31,
136    TCG_REG_R12,  /* call clobbered, non-arguments */
137    TCG_REG_R11,
138    TCG_REG_R2,
139    TCG_REG_R13,
140    TCG_REG_R10,  /* call clobbered, arguments */
141    TCG_REG_R9,
142    TCG_REG_R8,
143    TCG_REG_R7,
144    TCG_REG_R6,
145    TCG_REG_R5,
146    TCG_REG_R4,
147    TCG_REG_R3,
148
149    /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
150    TCG_REG_V2,   /* call clobbered, vectors */
151    TCG_REG_V3,
152    TCG_REG_V4,
153    TCG_REG_V5,
154    TCG_REG_V6,
155    TCG_REG_V7,
156    TCG_REG_V8,
157    TCG_REG_V9,
158    TCG_REG_V10,
159    TCG_REG_V11,
160    TCG_REG_V12,
161    TCG_REG_V13,
162    TCG_REG_V14,
163    TCG_REG_V15,
164    TCG_REG_V16,
165    TCG_REG_V17,
166    TCG_REG_V18,
167    TCG_REG_V19,
168};
169
170static const int tcg_target_call_iarg_regs[] = {
171    TCG_REG_R3,
172    TCG_REG_R4,
173    TCG_REG_R5,
174    TCG_REG_R6,
175    TCG_REG_R7,
176    TCG_REG_R8,
177    TCG_REG_R9,
178    TCG_REG_R10
179};
180
181static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
182{
183    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
184    tcg_debug_assert(slot >= 0 && slot <= 1);
185    return TCG_REG_R3 + slot;
186}
187
188static const int tcg_target_callee_save_regs[] = {
189#ifdef _CALL_DARWIN
190    TCG_REG_R11,
191#endif
192    TCG_REG_R14,
193    TCG_REG_R15,
194    TCG_REG_R16,
195    TCG_REG_R17,
196    TCG_REG_R18,
197    TCG_REG_R19,
198    TCG_REG_R20,
199    TCG_REG_R21,
200    TCG_REG_R22,
201    TCG_REG_R23,
202    TCG_REG_R24,
203    TCG_REG_R25,
204    TCG_REG_R26,
205    TCG_REG_R27, /* currently used for the global env */
206    TCG_REG_R28,
207    TCG_REG_R29,
208    TCG_REG_R30,
209    TCG_REG_R31
210};
211
212static inline bool in_range_b(tcg_target_long target)
213{
214    return target == sextract64(target, 0, 26);
215}
216
217static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
218			       const tcg_insn_unit *target)
219{
220    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
221    tcg_debug_assert(in_range_b(disp));
222    return disp & 0x3fffffc;
223}
224
225static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
226{
227    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
228    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
229
230    if (in_range_b(disp)) {
231        *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
232        return true;
233    }
234    return false;
235}
236
237static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
238			       const tcg_insn_unit *target)
239{
240    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
241    tcg_debug_assert(disp == (int16_t) disp);
242    return disp & 0xfffc;
243}
244
245static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
246{
247    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
248    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
249
250    if (disp == (int16_t) disp) {
251        *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
252        return true;
253    }
254    return false;
255}
256
257/* test if a constant matches the constraint */
258static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
259{
260    if (ct & TCG_CT_CONST) {
261        return 1;
262    }
263
264    /* The only 32-bit constraint we use aside from
265       TCG_CT_CONST is TCG_CT_CONST_S16.  */
266    if (type == TCG_TYPE_I32) {
267        val = (int32_t)val;
268    }
269
270    if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
271        return 1;
272    } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
273        return 1;
274    } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
275        return 1;
276    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
277        return 1;
278    } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
279        return 1;
280    } else if ((ct & TCG_CT_CONST_WSZ)
281               && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
282        return 1;
283    }
284    return 0;
285}
286
287#define OPCD(opc) ((opc)<<26)
288#define XO19(opc) (OPCD(19)|((opc)<<1))
289#define MD30(opc) (OPCD(30)|((opc)<<2))
290#define MDS30(opc) (OPCD(30)|((opc)<<1))
291#define XO31(opc) (OPCD(31)|((opc)<<1))
292#define XO58(opc) (OPCD(58)|(opc))
293#define XO62(opc) (OPCD(62)|(opc))
294#define VX4(opc)  (OPCD(4)|(opc))
295
296#define B      OPCD( 18)
297#define BC     OPCD( 16)
298#define LBZ    OPCD( 34)
299#define LHZ    OPCD( 40)
300#define LHA    OPCD( 42)
301#define LWZ    OPCD( 32)
302#define LWZUX  XO31( 55)
303#define STB    OPCD( 38)
304#define STH    OPCD( 44)
305#define STW    OPCD( 36)
306
307#define STD    XO62(  0)
308#define STDU   XO62(  1)
309#define STDX   XO31(149)
310
311#define LD     XO58(  0)
312#define LDX    XO31( 21)
313#define LDU    XO58(  1)
314#define LDUX   XO31( 53)
315#define LWA    XO58(  2)
316#define LWAX   XO31(341)
317
318#define ADDIC  OPCD( 12)
319#define ADDI   OPCD( 14)
320#define ADDIS  OPCD( 15)
321#define ORI    OPCD( 24)
322#define ORIS   OPCD( 25)
323#define XORI   OPCD( 26)
324#define XORIS  OPCD( 27)
325#define ANDI   OPCD( 28)
326#define ANDIS  OPCD( 29)
327#define MULLI  OPCD(  7)
328#define CMPLI  OPCD( 10)
329#define CMPI   OPCD( 11)
330#define SUBFIC OPCD( 8)
331
332#define LWZU   OPCD( 33)
333#define STWU   OPCD( 37)
334
335#define RLWIMI OPCD( 20)
336#define RLWINM OPCD( 21)
337#define RLWNM  OPCD( 23)
338
339#define RLDICL MD30(  0)
340#define RLDICR MD30(  1)
341#define RLDIMI MD30(  3)
342#define RLDCL  MDS30( 8)
343
344#define BCLR   XO19( 16)
345#define BCCTR  XO19(528)
346#define CRAND  XO19(257)
347#define CRANDC XO19(129)
348#define CRNAND XO19(225)
349#define CROR   XO19(449)
350#define CRNOR  XO19( 33)
351
352#define EXTSB  XO31(954)
353#define EXTSH  XO31(922)
354#define EXTSW  XO31(986)
355#define ADD    XO31(266)
356#define ADDE   XO31(138)
357#define ADDME  XO31(234)
358#define ADDZE  XO31(202)
359#define ADDC   XO31( 10)
360#define AND    XO31( 28)
361#define SUBF   XO31( 40)
362#define SUBFC  XO31(  8)
363#define SUBFE  XO31(136)
364#define SUBFME XO31(232)
365#define SUBFZE XO31(200)
366#define OR     XO31(444)
367#define XOR    XO31(316)
368#define MULLW  XO31(235)
369#define MULHW  XO31( 75)
370#define MULHWU XO31( 11)
371#define DIVW   XO31(491)
372#define DIVWU  XO31(459)
373#define MODSW  XO31(779)
374#define MODUW  XO31(267)
375#define CMP    XO31(  0)
376#define CMPL   XO31( 32)
377#define LHBRX  XO31(790)
378#define LWBRX  XO31(534)
379#define LDBRX  XO31(532)
380#define STHBRX XO31(918)
381#define STWBRX XO31(662)
382#define STDBRX XO31(660)
383#define MFSPR  XO31(339)
384#define MTSPR  XO31(467)
385#define SRAWI  XO31(824)
386#define NEG    XO31(104)
387#define MFCR   XO31( 19)
388#define MFOCRF (MFCR | (1u << 20))
389#define NOR    XO31(124)
390#define CNTLZW XO31( 26)
391#define CNTLZD XO31( 58)
392#define CNTTZW XO31(538)
393#define CNTTZD XO31(570)
394#define CNTPOPW XO31(378)
395#define CNTPOPD XO31(506)
396#define ANDC   XO31( 60)
397#define ORC    XO31(412)
398#define EQV    XO31(284)
399#define NAND   XO31(476)
400#define ISEL   XO31( 15)
401
402#define MULLD  XO31(233)
403#define MULHD  XO31( 73)
404#define MULHDU XO31(  9)
405#define DIVD   XO31(489)
406#define DIVDU  XO31(457)
407#define MODSD  XO31(777)
408#define MODUD  XO31(265)
409
410#define LBZX   XO31( 87)
411#define LHZX   XO31(279)
412#define LHAX   XO31(343)
413#define LWZX   XO31( 23)
414#define STBX   XO31(215)
415#define STHX   XO31(407)
416#define STWX   XO31(151)
417
418#define EIEIO  XO31(854)
419#define HWSYNC XO31(598)
420#define LWSYNC (HWSYNC | (1u << 21))
421
422#define SPR(a, b) ((((a)<<5)|(b))<<11)
423#define LR     SPR(8, 0)
424#define CTR    SPR(9, 0)
425
426#define SLW    XO31( 24)
427#define SRW    XO31(536)
428#define SRAW   XO31(792)
429
430#define SLD    XO31( 27)
431#define SRD    XO31(539)
432#define SRAD   XO31(794)
433#define SRADI  XO31(413<<1)
434
435#define BRH    XO31(219)
436#define BRW    XO31(155)
437#define BRD    XO31(187)
438
439#define TW     XO31( 4)
440#define TRAP   (TW | TO(31))
441
442#define NOP    ORI  /* ori 0,0,0 */
443
444#define LVX        XO31(103)
445#define LVEBX      XO31(7)
446#define LVEHX      XO31(39)
447#define LVEWX      XO31(71)
448#define LXSDX      (XO31(588) | 1)  /* v2.06, force tx=1 */
449#define LXVDSX     (XO31(332) | 1)  /* v2.06, force tx=1 */
450#define LXSIWZX    (XO31(12) | 1)   /* v2.07, force tx=1 */
451#define LXV        (OPCD(61) | 8 | 1)  /* v3.00, force tx=1 */
452#define LXSD       (OPCD(57) | 2)   /* v3.00 */
453#define LXVWSX     (XO31(364) | 1)  /* v3.00, force tx=1 */
454
455#define STVX       XO31(231)
456#define STVEWX     XO31(199)
457#define STXSDX     (XO31(716) | 1)  /* v2.06, force sx=1 */
458#define STXSIWX    (XO31(140) | 1)  /* v2.07, force sx=1 */
459#define STXV       (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
460#define STXSD      (OPCD(61) | 2)   /* v3.00 */
461
462#define VADDSBS    VX4(768)
463#define VADDUBS    VX4(512)
464#define VADDUBM    VX4(0)
465#define VADDSHS    VX4(832)
466#define VADDUHS    VX4(576)
467#define VADDUHM    VX4(64)
468#define VADDSWS    VX4(896)
469#define VADDUWS    VX4(640)
470#define VADDUWM    VX4(128)
471#define VADDUDM    VX4(192)       /* v2.07 */
472
473#define VSUBSBS    VX4(1792)
474#define VSUBUBS    VX4(1536)
475#define VSUBUBM    VX4(1024)
476#define VSUBSHS    VX4(1856)
477#define VSUBUHS    VX4(1600)
478#define VSUBUHM    VX4(1088)
479#define VSUBSWS    VX4(1920)
480#define VSUBUWS    VX4(1664)
481#define VSUBUWM    VX4(1152)
482#define VSUBUDM    VX4(1216)      /* v2.07 */
483
484#define VNEGW      (VX4(1538) | (6 << 16))  /* v3.00 */
485#define VNEGD      (VX4(1538) | (7 << 16))  /* v3.00 */
486
487#define VMAXSB     VX4(258)
488#define VMAXSH     VX4(322)
489#define VMAXSW     VX4(386)
490#define VMAXSD     VX4(450)       /* v2.07 */
491#define VMAXUB     VX4(2)
492#define VMAXUH     VX4(66)
493#define VMAXUW     VX4(130)
494#define VMAXUD     VX4(194)       /* v2.07 */
495#define VMINSB     VX4(770)
496#define VMINSH     VX4(834)
497#define VMINSW     VX4(898)
498#define VMINSD     VX4(962)       /* v2.07 */
499#define VMINUB     VX4(514)
500#define VMINUH     VX4(578)
501#define VMINUW     VX4(642)
502#define VMINUD     VX4(706)       /* v2.07 */
503
504#define VCMPEQUB   VX4(6)
505#define VCMPEQUH   VX4(70)
506#define VCMPEQUW   VX4(134)
507#define VCMPEQUD   VX4(199)       /* v2.07 */
508#define VCMPGTSB   VX4(774)
509#define VCMPGTSH   VX4(838)
510#define VCMPGTSW   VX4(902)
511#define VCMPGTSD   VX4(967)       /* v2.07 */
512#define VCMPGTUB   VX4(518)
513#define VCMPGTUH   VX4(582)
514#define VCMPGTUW   VX4(646)
515#define VCMPGTUD   VX4(711)       /* v2.07 */
516#define VCMPNEB    VX4(7)         /* v3.00 */
517#define VCMPNEH    VX4(71)        /* v3.00 */
518#define VCMPNEW    VX4(135)       /* v3.00 */
519
520#define VSLB       VX4(260)
521#define VSLH       VX4(324)
522#define VSLW       VX4(388)
523#define VSLD       VX4(1476)      /* v2.07 */
524#define VSRB       VX4(516)
525#define VSRH       VX4(580)
526#define VSRW       VX4(644)
527#define VSRD       VX4(1732)      /* v2.07 */
528#define VSRAB      VX4(772)
529#define VSRAH      VX4(836)
530#define VSRAW      VX4(900)
531#define VSRAD      VX4(964)       /* v2.07 */
532#define VRLB       VX4(4)
533#define VRLH       VX4(68)
534#define VRLW       VX4(132)
535#define VRLD       VX4(196)       /* v2.07 */
536
537#define VMULEUB    VX4(520)
538#define VMULEUH    VX4(584)
539#define VMULEUW    VX4(648)       /* v2.07 */
540#define VMULOUB    VX4(8)
541#define VMULOUH    VX4(72)
542#define VMULOUW    VX4(136)       /* v2.07 */
543#define VMULUWM    VX4(137)       /* v2.07 */
544#define VMULLD     VX4(457)       /* v3.10 */
545#define VMSUMUHM   VX4(38)
546
547#define VMRGHB     VX4(12)
548#define VMRGHH     VX4(76)
549#define VMRGHW     VX4(140)
550#define VMRGLB     VX4(268)
551#define VMRGLH     VX4(332)
552#define VMRGLW     VX4(396)
553
554#define VPKUHUM    VX4(14)
555#define VPKUWUM    VX4(78)
556
557#define VAND       VX4(1028)
558#define VANDC      VX4(1092)
559#define VNOR       VX4(1284)
560#define VOR        VX4(1156)
561#define VXOR       VX4(1220)
562#define VEQV       VX4(1668)      /* v2.07 */
563#define VNAND      VX4(1412)      /* v2.07 */
564#define VORC       VX4(1348)      /* v2.07 */
565
566#define VSPLTB     VX4(524)
567#define VSPLTH     VX4(588)
568#define VSPLTW     VX4(652)
569#define VSPLTISB   VX4(780)
570#define VSPLTISH   VX4(844)
571#define VSPLTISW   VX4(908)
572
573#define VSLDOI     VX4(44)
574
575#define XXPERMDI   (OPCD(60) | (10 << 3) | 7)  /* v2.06, force ax=bx=tx=1 */
576#define XXSEL      (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
577#define XXSPLTIB   (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
578
579#define MFVSRD     (XO31(51) | 1)   /* v2.07, force sx=1 */
580#define MFVSRWZ    (XO31(115) | 1)  /* v2.07, force sx=1 */
581#define MTVSRD     (XO31(179) | 1)  /* v2.07, force tx=1 */
582#define MTVSRWZ    (XO31(243) | 1)  /* v2.07, force tx=1 */
583#define MTVSRDD    (XO31(435) | 1)  /* v3.00, force tx=1 */
584#define MTVSRWS    (XO31(403) | 1)  /* v3.00, force tx=1 */
585
586#define RT(r) ((r)<<21)
587#define RS(r) ((r)<<21)
588#define RA(r) ((r)<<16)
589#define RB(r) ((r)<<11)
590#define TO(t) ((t)<<21)
591#define SH(s) ((s)<<11)
592#define MB(b) ((b)<<6)
593#define ME(e) ((e)<<1)
594#define BO(o) ((o)<<21)
595#define MB64(b) ((b)<<5)
596#define FXM(b) (1 << (19 - (b)))
597
598#define VRT(r)  (((r) & 31) << 21)
599#define VRA(r)  (((r) & 31) << 16)
600#define VRB(r)  (((r) & 31) << 11)
601#define VRC(r)  (((r) & 31) <<  6)
602
603#define LK    1
604
605#define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
606#define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
607#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
608#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
609
610#define BF(n)    ((n)<<23)
611#define BI(n, c) (((c)+((n)*4))<<16)
612#define BT(n, c) (((c)+((n)*4))<<21)
613#define BA(n, c) (((c)+((n)*4))<<16)
614#define BB(n, c) (((c)+((n)*4))<<11)
615#define BC_(n, c) (((c)+((n)*4))<<6)
616
617#define BO_COND_TRUE  BO(12)
618#define BO_COND_FALSE BO( 4)
619#define BO_ALWAYS     BO(20)
620
621enum {
622    CR_LT,
623    CR_GT,
624    CR_EQ,
625    CR_SO
626};
627
628static const uint32_t tcg_to_bc[] = {
629    [TCG_COND_EQ]  = BC | BI(7, CR_EQ) | BO_COND_TRUE,
630    [TCG_COND_NE]  = BC | BI(7, CR_EQ) | BO_COND_FALSE,
631    [TCG_COND_LT]  = BC | BI(7, CR_LT) | BO_COND_TRUE,
632    [TCG_COND_GE]  = BC | BI(7, CR_LT) | BO_COND_FALSE,
633    [TCG_COND_LE]  = BC | BI(7, CR_GT) | BO_COND_FALSE,
634    [TCG_COND_GT]  = BC | BI(7, CR_GT) | BO_COND_TRUE,
635    [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
636    [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
637    [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
638    [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
639};
640
641/* The low bit here is set if the RA and RB fields must be inverted.  */
642static const uint32_t tcg_to_isel[] = {
643    [TCG_COND_EQ]  = ISEL | BC_(7, CR_EQ),
644    [TCG_COND_NE]  = ISEL | BC_(7, CR_EQ) | 1,
645    [TCG_COND_LT]  = ISEL | BC_(7, CR_LT),
646    [TCG_COND_GE]  = ISEL | BC_(7, CR_LT) | 1,
647    [TCG_COND_LE]  = ISEL | BC_(7, CR_GT) | 1,
648    [TCG_COND_GT]  = ISEL | BC_(7, CR_GT),
649    [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
650    [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
651    [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
652    [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
653};
654
655static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
656                        intptr_t value, intptr_t addend)
657{
658    const tcg_insn_unit *target;
659    int16_t lo;
660    int32_t hi;
661
662    value += addend;
663    target = (const tcg_insn_unit *)value;
664
665    switch (type) {
666    case R_PPC_REL14:
667        return reloc_pc14(code_ptr, target);
668    case R_PPC_REL24:
669        return reloc_pc24(code_ptr, target);
670    case R_PPC_ADDR16:
671        /*
672         * We are (slightly) abusing this relocation type.  In particular,
673         * assert that the low 2 bits are zero, and do not modify them.
674         * That way we can use this with LD et al that have opcode bits
675         * in the low 2 bits of the insn.
676         */
677        if ((value & 3) || value != (int16_t)value) {
678            return false;
679        }
680        *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
681        break;
682    case R_PPC_ADDR32:
683        /*
684         * We are abusing this relocation type.  Again, this points to
685         * a pair of insns, lis + load.  This is an absolute address
686         * relocation for PPC32 so the lis cannot be removed.
687         */
688        lo = value;
689        hi = value - lo;
690        if (hi + lo != value) {
691            return false;
692        }
693        code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
694        code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
695        break;
696    default:
697        g_assert_not_reached();
698    }
699    return true;
700}
701
702static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
703                             TCGReg base, tcg_target_long offset);
704
705static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
706{
707    if (ret == arg) {
708        return true;
709    }
710    switch (type) {
711    case TCG_TYPE_I64:
712        tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
713        /* fallthru */
714    case TCG_TYPE_I32:
715        if (ret < TCG_REG_V0) {
716            if (arg < TCG_REG_V0) {
717                tcg_out32(s, OR | SAB(arg, ret, arg));
718                break;
719            } else if (have_isa_2_07) {
720                tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
721                          | VRT(arg) | RA(ret));
722                break;
723            } else {
724                /* Altivec does not support vector->integer moves.  */
725                return false;
726            }
727        } else if (arg < TCG_REG_V0) {
728            if (have_isa_2_07) {
729                tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
730                          | VRT(ret) | RA(arg));
731                break;
732            } else {
733                /* Altivec does not support integer->vector moves.  */
734                return false;
735            }
736        }
737        /* fallthru */
738    case TCG_TYPE_V64:
739    case TCG_TYPE_V128:
740        tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
741        tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
742        break;
743    default:
744        g_assert_not_reached();
745    }
746    return true;
747}
748
749static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
750                               int sh, int mb)
751{
752    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
753    sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
754    mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
755    tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
756}
757
758static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
759                               int sh, int mb, int me)
760{
761    tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
762}
763
764static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
765{
766    tcg_out32(s, EXTSB | RA(dst) | RS(src));
767}
768
769static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
770{
771    tcg_out32(s, ANDI | SAI(src, dst, 0xff));
772}
773
774static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
775{
776    tcg_out32(s, EXTSH | RA(dst) | RS(src));
777}
778
779static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
780{
781    tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
782}
783
784static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
785{
786    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
787    tcg_out32(s, EXTSW | RA(dst) | RS(src));
788}
789
790static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
791{
792    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
793    tcg_out_rld(s, RLDICL, dst, src, 0, 32);
794}
795
796static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
797{
798    tcg_out_ext32s(s, dst, src);
799}
800
801static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
802{
803    tcg_out_ext32u(s, dst, src);
804}
805
806static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
807{
808    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
809    tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
810}
811
812static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
813{
814    tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
815}
816
817static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
818{
819    tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
820}
821
822static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
823{
824    /* Limit immediate shift count lest we create an illegal insn.  */
825    tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
826}
827
828static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
829{
830    tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
831}
832
833static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
834{
835    tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
836}
837
838static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
839{
840    tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
841}
842
843static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
844{
845    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
846
847    if (have_isa_3_10) {
848        tcg_out32(s, BRH | RA(dst) | RS(src));
849        if (flags & TCG_BSWAP_OS) {
850            tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
851        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
852            tcg_out_ext16u(s, dst, dst);
853        }
854        return;
855    }
856
857    /*
858     * In the following,
859     *   dep(a, b, m) -> (a & ~m) | (b & m)
860     *
861     * Begin with:                              src = xxxxabcd
862     */
863    /* tmp = rol32(src, 24) & 0x000000ff            = 0000000c */
864    tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
865    /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00)    = 000000dc */
866    tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
867
868    if (flags & TCG_BSWAP_OS) {
869        tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
870    } else {
871        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
872    }
873}
874
875static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
876{
877    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
878
879    if (have_isa_3_10) {
880        tcg_out32(s, BRW | RA(dst) | RS(src));
881        if (flags & TCG_BSWAP_OS) {
882            tcg_out_ext32s(s, dst, dst);
883        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
884            tcg_out_ext32u(s, dst, dst);
885        }
886        return;
887    }
888
889    /*
890     * Stolen from gcc's builtin_bswap32.
891     * In the following,
892     *   dep(a, b, m) -> (a & ~m) | (b & m)
893     *
894     * Begin with:                              src = xxxxabcd
895     */
896    /* tmp = rol32(src, 8) & 0xffffffff             = 0000bcda */
897    tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
898    /* tmp = dep(tmp, rol32(src, 24), 0xff000000)   = 0000dcda */
899    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
900    /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00)   = 0000dcba */
901    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
902
903    if (flags & TCG_BSWAP_OS) {
904        tcg_out_ext32s(s, dst, tmp);
905    } else {
906        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
907    }
908}
909
910static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
911{
912    TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
913    TCGReg t1 = dst == src ? dst : TCG_REG_R0;
914
915    if (have_isa_3_10) {
916        tcg_out32(s, BRD | RA(dst) | RS(src));
917        return;
918    }
919
920    /*
921     * In the following,
922     *   dep(a, b, m) -> (a & ~m) | (b & m)
923     *
924     * Begin with:                              src = abcdefgh
925     */
926    /* t0 = rol32(src, 8) & 0xffffffff              = 0000fghe */
927    tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
928    /* t0 = dep(t0, rol32(src, 24), 0xff000000)     = 0000hghe */
929    tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
930    /* t0 = dep(t0, rol32(src, 24), 0x0000ff00)     = 0000hgfe */
931    tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
932
933    /* t0 = rol64(t0, 32)                           = hgfe0000 */
934    tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
935    /* t1 = rol64(src, 32)                          = efghabcd */
936    tcg_out_rld(s, RLDICL, t1, src, 32, 0);
937
938    /* t0 = dep(t0, rol32(t1, 24), 0xffffffff)      = hgfebcda */
939    tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
940    /* t0 = dep(t0, rol32(t1, 24), 0xff000000)      = hgfedcda */
941    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
942    /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00)      = hgfedcba */
943    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
944
945    tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
946}
947
948/* Emit a move into ret of arg, if it can be done in one insn.  */
949static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
950{
951    if (arg == (int16_t)arg) {
952        tcg_out32(s, ADDI | TAI(ret, 0, arg));
953        return true;
954    }
955    if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
956        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
957        return true;
958    }
959    return false;
960}
961
962static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
963                             tcg_target_long arg, bool in_prologue)
964{
965    intptr_t tb_diff;
966    tcg_target_long tmp;
967    int shift;
968
969    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
970
971    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
972        arg = (int32_t)arg;
973    }
974
975    /* Load 16-bit immediates with one insn.  */
976    if (tcg_out_movi_one(s, ret, arg)) {
977        return;
978    }
979
980    /* Load addresses within the TB with one insn.  */
981    tb_diff = tcg_tbrel_diff(s, (void *)arg);
982    if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
983        tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
984        return;
985    }
986
987    /* Load 32-bit immediates with two insns.  Note that we've already
988       eliminated bare ADDIS, so we know both insns are required.  */
989    if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
990        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
991        tcg_out32(s, ORI | SAI(ret, ret, arg));
992        return;
993    }
994    if (arg == (uint32_t)arg && !(arg & 0x8000)) {
995        tcg_out32(s, ADDI | TAI(ret, 0, arg));
996        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
997        return;
998    }
999
1000    /* Load masked 16-bit value.  */
1001    if (arg > 0 && (arg & 0x8000)) {
1002        tmp = arg | 0x7fff;
1003        if ((tmp & (tmp + 1)) == 0) {
1004            int mb = clz64(tmp + 1) + 1;
1005            tcg_out32(s, ADDI | TAI(ret, 0, arg));
1006            tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1007            return;
1008        }
1009    }
1010
1011    /* Load common masks with 2 insns.  */
1012    shift = ctz64(arg);
1013    tmp = arg >> shift;
1014    if (tmp == (int16_t)tmp) {
1015        tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1016        tcg_out_shli64(s, ret, ret, shift);
1017        return;
1018    }
1019    shift = clz64(arg);
1020    if (tcg_out_movi_one(s, ret, arg << shift)) {
1021        tcg_out_shri64(s, ret, ret, shift);
1022        return;
1023    }
1024
1025    /* Load addresses within 2GB of TB with 2 (or rarely 3) insns.  */
1026    if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1027        tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1028        return;
1029    }
1030
1031    /* Use the constant pool, if possible.  */
1032    if (!in_prologue && USE_REG_TB) {
1033        new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1034                       tcg_tbrel_diff(s, NULL));
1035        tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1036        return;
1037    }
1038
1039    tmp = arg >> 31 >> 1;
1040    tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1041    if (tmp) {
1042        tcg_out_shli64(s, ret, ret, 32);
1043    }
1044    if (arg & 0xffff0000) {
1045        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1046    }
1047    if (arg & 0xffff) {
1048        tcg_out32(s, ORI | SAI(ret, ret, arg));
1049    }
1050}
1051
1052static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1053                             TCGReg ret, int64_t val)
1054{
1055    uint32_t load_insn;
1056    int rel, low;
1057    intptr_t add;
1058
1059    switch (vece) {
1060    case MO_8:
1061        low = (int8_t)val;
1062        if (low >= -16 && low < 16) {
1063            tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1064            return;
1065        }
1066        if (have_isa_3_00) {
1067            tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1068            return;
1069        }
1070        break;
1071
1072    case MO_16:
1073        low = (int16_t)val;
1074        if (low >= -16 && low < 16) {
1075            tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1076            return;
1077        }
1078        break;
1079
1080    case MO_32:
1081        low = (int32_t)val;
1082        if (low >= -16 && low < 16) {
1083            tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1084            return;
1085        }
1086        break;
1087    }
1088
1089    /*
1090     * Otherwise we must load the value from the constant pool.
1091     */
1092    if (USE_REG_TB) {
1093        rel = R_PPC_ADDR16;
1094        add = tcg_tbrel_diff(s, NULL);
1095    } else {
1096        rel = R_PPC_ADDR32;
1097        add = 0;
1098    }
1099
1100    if (have_vsx) {
1101        load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1102        load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1103        if (TCG_TARGET_REG_BITS == 64) {
1104            new_pool_label(s, val, rel, s->code_ptr, add);
1105        } else {
1106            new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1107        }
1108    } else {
1109        load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1110        if (TCG_TARGET_REG_BITS == 64) {
1111            new_pool_l2(s, rel, s->code_ptr, add, val, val);
1112        } else {
1113            new_pool_l4(s, rel, s->code_ptr, add,
1114                        val >> 32, val, val >> 32, val);
1115        }
1116    }
1117
1118    if (USE_REG_TB) {
1119        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1120        load_insn |= RA(TCG_REG_TB);
1121    } else {
1122        tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1123        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1124    }
1125    tcg_out32(s, load_insn);
1126}
1127
1128static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1129                         tcg_target_long arg)
1130{
1131    switch (type) {
1132    case TCG_TYPE_I32:
1133    case TCG_TYPE_I64:
1134        tcg_debug_assert(ret < TCG_REG_V0);
1135        tcg_out_movi_int(s, type, ret, arg, false);
1136        break;
1137
1138    default:
1139        g_assert_not_reached();
1140    }
1141}
1142
1143static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1144{
1145    return false;
1146}
1147
1148static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1149                             tcg_target_long imm)
1150{
1151    /* This function is only used for passing structs by reference. */
1152    g_assert_not_reached();
1153}
1154
1155static bool mask_operand(uint32_t c, int *mb, int *me)
1156{
1157    uint32_t lsb, test;
1158
1159    /* Accept a bit pattern like:
1160           0....01....1
1161           1....10....0
1162           0..01..10..0
1163       Keep track of the transitions.  */
1164    if (c == 0 || c == -1) {
1165        return false;
1166    }
1167    test = c;
1168    lsb = test & -test;
1169    test += lsb;
1170    if (test & (test - 1)) {
1171        return false;
1172    }
1173
1174    *me = clz32(lsb);
1175    *mb = test ? clz32(test & -test) + 1 : 0;
1176    return true;
1177}
1178
1179static bool mask64_operand(uint64_t c, int *mb, int *me)
1180{
1181    uint64_t lsb;
1182
1183    if (c == 0) {
1184        return false;
1185    }
1186
1187    lsb = c & -c;
1188    /* Accept 1..10..0.  */
1189    if (c == -lsb) {
1190        *mb = 0;
1191        *me = clz64(lsb);
1192        return true;
1193    }
1194    /* Accept 0..01..1.  */
1195    if (lsb == 1 && (c & (c + 1)) == 0) {
1196        *mb = clz64(c + 1) + 1;
1197        *me = 63;
1198        return true;
1199    }
1200    return false;
1201}
1202
1203static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1204{
1205    int mb, me;
1206
1207    if (mask_operand(c, &mb, &me)) {
1208        tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1209    } else if ((c & 0xffff) == c) {
1210        tcg_out32(s, ANDI | SAI(src, dst, c));
1211        return;
1212    } else if ((c & 0xffff0000) == c) {
1213        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1214        return;
1215    } else {
1216        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1217        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1218    }
1219}
1220
1221static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1222{
1223    int mb, me;
1224
1225    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1226    if (mask64_operand(c, &mb, &me)) {
1227        if (mb == 0) {
1228            tcg_out_rld(s, RLDICR, dst, src, 0, me);
1229        } else {
1230            tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1231        }
1232    } else if ((c & 0xffff) == c) {
1233        tcg_out32(s, ANDI | SAI(src, dst, c));
1234        return;
1235    } else if ((c & 0xffff0000) == c) {
1236        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1237        return;
1238    } else {
1239        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1240        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1241    }
1242}
1243
1244static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1245                           int op_lo, int op_hi)
1246{
1247    if (c >> 16) {
1248        tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1249        src = dst;
1250    }
1251    if (c & 0xffff) {
1252        tcg_out32(s, op_lo | SAI(src, dst, c));
1253        src = dst;
1254    }
1255}
1256
1257static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1258{
1259    tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1260}
1261
1262static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1263{
1264    tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1265}
1266
1267static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1268{
1269    ptrdiff_t disp = tcg_pcrel_diff(s, target);
1270    if (in_range_b(disp)) {
1271        tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1272    } else {
1273        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1274        tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1275        tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1276    }
1277}
1278
1279static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1280                             TCGReg base, tcg_target_long offset)
1281{
1282    tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1283    bool is_int_store = false;
1284    TCGReg rs = TCG_REG_TMP1;
1285
1286    switch (opi) {
1287    case LD: case LWA:
1288        align = 3;
1289        /* FALLTHRU */
1290    default:
1291        if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1292            rs = rt;
1293            break;
1294        }
1295        break;
1296    case LXSD:
1297    case STXSD:
1298        align = 3;
1299        break;
1300    case LXV:
1301    case STXV:
1302        align = 15;
1303        break;
1304    case STD:
1305        align = 3;
1306        /* FALLTHRU */
1307    case STB: case STH: case STW:
1308        is_int_store = true;
1309        break;
1310    }
1311
1312    /* For unaligned, or very large offsets, use the indexed form.  */
1313    if (offset & align || offset != (int32_t)offset || opi == 0) {
1314        if (rs == base) {
1315            rs = TCG_REG_R0;
1316        }
1317        tcg_debug_assert(!is_int_store || rs != rt);
1318        tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1319        tcg_out32(s, opx | TAB(rt & 31, base, rs));
1320        return;
1321    }
1322
1323    l0 = (int16_t)offset;
1324    offset = (offset - l0) >> 16;
1325    l1 = (int16_t)offset;
1326
1327    if (l1 < 0 && orig >= 0) {
1328        extra = 0x4000;
1329        l1 = (int16_t)(offset - 0x4000);
1330    }
1331    if (l1) {
1332        tcg_out32(s, ADDIS | TAI(rs, base, l1));
1333        base = rs;
1334    }
1335    if (extra) {
1336        tcg_out32(s, ADDIS | TAI(rs, base, extra));
1337        base = rs;
1338    }
1339    if (opi != ADDI || base != rt || l0 != 0) {
1340        tcg_out32(s, opi | TAI(rt & 31, base, l0));
1341    }
1342}
1343
1344static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1345                           TCGReg va, TCGReg vb, int shb)
1346{
1347    tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1348}
1349
1350static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1351                       TCGReg base, intptr_t offset)
1352{
1353    int shift;
1354
1355    switch (type) {
1356    case TCG_TYPE_I32:
1357        if (ret < TCG_REG_V0) {
1358            tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1359            break;
1360        }
1361        if (have_isa_2_07 && have_vsx) {
1362            tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1363            break;
1364        }
1365        tcg_debug_assert((offset & 3) == 0);
1366        tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1367        shift = (offset - 4) & 0xc;
1368        if (shift) {
1369            tcg_out_vsldoi(s, ret, ret, ret, shift);
1370        }
1371        break;
1372    case TCG_TYPE_I64:
1373        if (ret < TCG_REG_V0) {
1374            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1375            tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1376            break;
1377        }
1378        /* fallthru */
1379    case TCG_TYPE_V64:
1380        tcg_debug_assert(ret >= TCG_REG_V0);
1381        if (have_vsx) {
1382            tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1383                             ret, base, offset);
1384            break;
1385        }
1386        tcg_debug_assert((offset & 7) == 0);
1387        tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1388        if (offset & 8) {
1389            tcg_out_vsldoi(s, ret, ret, ret, 8);
1390        }
1391        break;
1392    case TCG_TYPE_V128:
1393        tcg_debug_assert(ret >= TCG_REG_V0);
1394        tcg_debug_assert((offset & 15) == 0);
1395        tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1396                         LVX, ret, base, offset);
1397        break;
1398    default:
1399        g_assert_not_reached();
1400    }
1401}
1402
1403static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1404                              TCGReg base, intptr_t offset)
1405{
1406    int shift;
1407
1408    switch (type) {
1409    case TCG_TYPE_I32:
1410        if (arg < TCG_REG_V0) {
1411            tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1412            break;
1413        }
1414        if (have_isa_2_07 && have_vsx) {
1415            tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1416            break;
1417        }
1418        assert((offset & 3) == 0);
1419        tcg_debug_assert((offset & 3) == 0);
1420        shift = (offset - 4) & 0xc;
1421        if (shift) {
1422            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1423            arg = TCG_VEC_TMP1;
1424        }
1425        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1426        break;
1427    case TCG_TYPE_I64:
1428        if (arg < TCG_REG_V0) {
1429            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1430            tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1431            break;
1432        }
1433        /* fallthru */
1434    case TCG_TYPE_V64:
1435        tcg_debug_assert(arg >= TCG_REG_V0);
1436        if (have_vsx) {
1437            tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1438                             STXSDX, arg, base, offset);
1439            break;
1440        }
1441        tcg_debug_assert((offset & 7) == 0);
1442        if (offset & 8) {
1443            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1444            arg = TCG_VEC_TMP1;
1445        }
1446        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1447        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1448        break;
1449    case TCG_TYPE_V128:
1450        tcg_debug_assert(arg >= TCG_REG_V0);
1451        tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1452                         STVX, arg, base, offset);
1453        break;
1454    default:
1455        g_assert_not_reached();
1456    }
1457}
1458
1459static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1460                               TCGReg base, intptr_t ofs)
1461{
1462    return false;
1463}
1464
1465static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1466                        int const_arg2, int cr, TCGType type)
1467{
1468    int imm;
1469    uint32_t op;
1470
1471    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1472
1473    /* Simplify the comparisons below wrt CMPI.  */
1474    if (type == TCG_TYPE_I32) {
1475        arg2 = (int32_t)arg2;
1476    }
1477
1478    switch (cond) {
1479    case TCG_COND_EQ:
1480    case TCG_COND_NE:
1481        if (const_arg2) {
1482            if ((int16_t) arg2 == arg2) {
1483                op = CMPI;
1484                imm = 1;
1485                break;
1486            } else if ((uint16_t) arg2 == arg2) {
1487                op = CMPLI;
1488                imm = 1;
1489                break;
1490            }
1491        }
1492        op = CMPL;
1493        imm = 0;
1494        break;
1495
1496    case TCG_COND_LT:
1497    case TCG_COND_GE:
1498    case TCG_COND_LE:
1499    case TCG_COND_GT:
1500        if (const_arg2) {
1501            if ((int16_t) arg2 == arg2) {
1502                op = CMPI;
1503                imm = 1;
1504                break;
1505            }
1506        }
1507        op = CMP;
1508        imm = 0;
1509        break;
1510
1511    case TCG_COND_LTU:
1512    case TCG_COND_GEU:
1513    case TCG_COND_LEU:
1514    case TCG_COND_GTU:
1515        if (const_arg2) {
1516            if ((uint16_t) arg2 == arg2) {
1517                op = CMPLI;
1518                imm = 1;
1519                break;
1520            }
1521        }
1522        op = CMPL;
1523        imm = 0;
1524        break;
1525
1526    default:
1527        g_assert_not_reached();
1528    }
1529    op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1530
1531    if (imm) {
1532        tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1533    } else {
1534        if (const_arg2) {
1535            tcg_out_movi(s, type, TCG_REG_R0, arg2);
1536            arg2 = TCG_REG_R0;
1537        }
1538        tcg_out32(s, op | RA(arg1) | RB(arg2));
1539    }
1540}
1541
1542static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1543                                TCGReg dst, TCGReg src)
1544{
1545    if (type == TCG_TYPE_I32) {
1546        tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1547        tcg_out_shri32(s, dst, dst, 5);
1548    } else {
1549        tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1550        tcg_out_shri64(s, dst, dst, 6);
1551    }
1552}
1553
1554static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1555{
1556    /* X != 0 implies X + -1 generates a carry.  Extra addition
1557       trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.  */
1558    if (dst != src) {
1559        tcg_out32(s, ADDIC | TAI(dst, src, -1));
1560        tcg_out32(s, SUBFE | TAB(dst, dst, src));
1561    } else {
1562        tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1563        tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1564    }
1565}
1566
1567static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1568                                  bool const_arg2)
1569{
1570    if (const_arg2) {
1571        if ((uint32_t)arg2 == arg2) {
1572            tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1573        } else {
1574            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1575            tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1576        }
1577    } else {
1578        tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1579    }
1580    return TCG_REG_R0;
1581}
1582
1583static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1584                            TCGArg arg0, TCGArg arg1, TCGArg arg2,
1585                            int const_arg2)
1586{
1587    int crop, sh;
1588
1589    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1590
1591    /* Ignore high bits of a potential constant arg2.  */
1592    if (type == TCG_TYPE_I32) {
1593        arg2 = (uint32_t)arg2;
1594    }
1595
1596    /* Handle common and trivial cases before handling anything else.  */
1597    if (arg2 == 0) {
1598        switch (cond) {
1599        case TCG_COND_EQ:
1600            tcg_out_setcond_eq0(s, type, arg0, arg1);
1601            return;
1602        case TCG_COND_NE:
1603            if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1604                tcg_out_ext32u(s, TCG_REG_R0, arg1);
1605                arg1 = TCG_REG_R0;
1606            }
1607            tcg_out_setcond_ne0(s, arg0, arg1);
1608            return;
1609        case TCG_COND_GE:
1610            tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1611            arg1 = arg0;
1612            /* FALLTHRU */
1613        case TCG_COND_LT:
1614            /* Extract the sign bit.  */
1615            if (type == TCG_TYPE_I32) {
1616                tcg_out_shri32(s, arg0, arg1, 31);
1617            } else {
1618                tcg_out_shri64(s, arg0, arg1, 63);
1619            }
1620            return;
1621        default:
1622            break;
1623        }
1624    }
1625
1626    /* If we have ISEL, we can implement everything with 3 or 4 insns.
1627       All other cases below are also at least 3 insns, so speed up the
1628       code generator by not considering them and always using ISEL.  */
1629    if (have_isel) {
1630        int isel, tab;
1631
1632        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1633
1634        isel = tcg_to_isel[cond];
1635
1636        tcg_out_movi(s, type, arg0, 1);
1637        if (isel & 1) {
1638            /* arg0 = (bc ? 0 : 1) */
1639            tab = TAB(arg0, 0, arg0);
1640            isel &= ~1;
1641        } else {
1642            /* arg0 = (bc ? 1 : 0) */
1643            tcg_out_movi(s, type, TCG_REG_R0, 0);
1644            tab = TAB(arg0, arg0, TCG_REG_R0);
1645        }
1646        tcg_out32(s, isel | tab);
1647        return;
1648    }
1649
1650    switch (cond) {
1651    case TCG_COND_EQ:
1652        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1653        tcg_out_setcond_eq0(s, type, arg0, arg1);
1654        return;
1655
1656    case TCG_COND_NE:
1657        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1658        /* Discard the high bits only once, rather than both inputs.  */
1659        if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1660            tcg_out_ext32u(s, TCG_REG_R0, arg1);
1661            arg1 = TCG_REG_R0;
1662        }
1663        tcg_out_setcond_ne0(s, arg0, arg1);
1664        return;
1665
1666    case TCG_COND_GT:
1667    case TCG_COND_GTU:
1668        sh = 30;
1669        crop = 0;
1670        goto crtest;
1671
1672    case TCG_COND_LT:
1673    case TCG_COND_LTU:
1674        sh = 29;
1675        crop = 0;
1676        goto crtest;
1677
1678    case TCG_COND_GE:
1679    case TCG_COND_GEU:
1680        sh = 31;
1681        crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1682        goto crtest;
1683
1684    case TCG_COND_LE:
1685    case TCG_COND_LEU:
1686        sh = 31;
1687        crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1688    crtest:
1689        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1690        if (crop) {
1691            tcg_out32(s, crop);
1692        }
1693        tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1694        tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1695        break;
1696
1697    default:
1698        g_assert_not_reached();
1699    }
1700}
1701
1702static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1703{
1704    if (l->has_value) {
1705        bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1706    } else {
1707        tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1708    }
1709    tcg_out32(s, bc);
1710}
1711
1712static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1713                           TCGArg arg1, TCGArg arg2, int const_arg2,
1714                           TCGLabel *l, TCGType type)
1715{
1716    tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1717    tcg_out_bc(s, tcg_to_bc[cond], l);
1718}
1719
1720static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1721                            TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1722                            TCGArg v2, bool const_c2)
1723{
1724    /* If for some reason both inputs are zero, don't produce bad code.  */
1725    if (v1 == 0 && v2 == 0) {
1726        tcg_out_movi(s, type, dest, 0);
1727        return;
1728    }
1729
1730    tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1731
1732    if (have_isel) {
1733        int isel = tcg_to_isel[cond];
1734
1735        /* Swap the V operands if the operation indicates inversion.  */
1736        if (isel & 1) {
1737            int t = v1;
1738            v1 = v2;
1739            v2 = t;
1740            isel &= ~1;
1741        }
1742        /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand.  */
1743        if (v2 == 0) {
1744            tcg_out_movi(s, type, TCG_REG_R0, 0);
1745        }
1746        tcg_out32(s, isel | TAB(dest, v1, v2));
1747    } else {
1748        if (dest == v2) {
1749            cond = tcg_invert_cond(cond);
1750            v2 = v1;
1751        } else if (dest != v1) {
1752            if (v1 == 0) {
1753                tcg_out_movi(s, type, dest, 0);
1754            } else {
1755                tcg_out_mov(s, type, dest, v1);
1756            }
1757        }
1758        /* Branch forward over one insn */
1759        tcg_out32(s, tcg_to_bc[cond] | 8);
1760        if (v2 == 0) {
1761            tcg_out_movi(s, type, dest, 0);
1762        } else {
1763            tcg_out_mov(s, type, dest, v2);
1764        }
1765    }
1766}
1767
1768static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1769                          TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1770{
1771    if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1772        tcg_out32(s, opc | RA(a0) | RS(a1));
1773    } else {
1774        tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1775        /* Note that the only other valid constant for a2 is 0.  */
1776        if (have_isel) {
1777            tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1778            tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1779        } else if (!const_a2 && a0 == a2) {
1780            tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1781            tcg_out32(s, opc | RA(a0) | RS(a1));
1782        } else {
1783            tcg_out32(s, opc | RA(a0) | RS(a1));
1784            tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1785            if (const_a2) {
1786                tcg_out_movi(s, type, a0, 0);
1787            } else {
1788                tcg_out_mov(s, type, a0, a2);
1789            }
1790        }
1791    }
1792}
1793
1794static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1795                         const int *const_args)
1796{
1797    static const struct { uint8_t bit1, bit2; } bits[] = {
1798        [TCG_COND_LT ] = { CR_LT, CR_LT },
1799        [TCG_COND_LE ] = { CR_LT, CR_GT },
1800        [TCG_COND_GT ] = { CR_GT, CR_GT },
1801        [TCG_COND_GE ] = { CR_GT, CR_LT },
1802        [TCG_COND_LTU] = { CR_LT, CR_LT },
1803        [TCG_COND_LEU] = { CR_LT, CR_GT },
1804        [TCG_COND_GTU] = { CR_GT, CR_GT },
1805        [TCG_COND_GEU] = { CR_GT, CR_LT },
1806    };
1807
1808    TCGCond cond = args[4], cond2;
1809    TCGArg al, ah, bl, bh;
1810    int blconst, bhconst;
1811    int op, bit1, bit2;
1812
1813    al = args[0];
1814    ah = args[1];
1815    bl = args[2];
1816    bh = args[3];
1817    blconst = const_args[2];
1818    bhconst = const_args[3];
1819
1820    switch (cond) {
1821    case TCG_COND_EQ:
1822        op = CRAND;
1823        goto do_equality;
1824    case TCG_COND_NE:
1825        op = CRNAND;
1826    do_equality:
1827        tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1828        tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1829        tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1830        break;
1831
1832    case TCG_COND_LT:
1833    case TCG_COND_LE:
1834    case TCG_COND_GT:
1835    case TCG_COND_GE:
1836    case TCG_COND_LTU:
1837    case TCG_COND_LEU:
1838    case TCG_COND_GTU:
1839    case TCG_COND_GEU:
1840        bit1 = bits[cond].bit1;
1841        bit2 = bits[cond].bit2;
1842        op = (bit1 != bit2 ? CRANDC : CRAND);
1843        cond2 = tcg_unsigned_cond(cond);
1844
1845        tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1846        tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1847        tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1848        tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1849        break;
1850
1851    default:
1852        g_assert_not_reached();
1853    }
1854}
1855
1856static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1857                             const int *const_args)
1858{
1859    tcg_out_cmp2(s, args + 1, const_args + 1);
1860    tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1861    tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1862}
1863
1864static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1865                             const int *const_args)
1866{
1867    tcg_out_cmp2(s, args, const_args);
1868    tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1869}
1870
1871static void tcg_out_mb(TCGContext *s, TCGArg a0)
1872{
1873    uint32_t insn;
1874
1875    if (a0 & TCG_MO_ST_LD) {
1876        insn = HWSYNC;
1877    } else {
1878        insn = LWSYNC;
1879    }
1880
1881    tcg_out32(s, insn);
1882}
1883
1884static void tcg_out_call_int(TCGContext *s, int lk,
1885                             const tcg_insn_unit *target)
1886{
1887#ifdef _CALL_AIX
1888    /* Look through the descriptor.  If the branch is in range, and we
1889       don't have to spend too much effort on building the toc.  */
1890    const void *tgt = ((const void * const *)target)[0];
1891    uintptr_t toc = ((const uintptr_t *)target)[1];
1892    intptr_t diff = tcg_pcrel_diff(s, tgt);
1893
1894    if (in_range_b(diff) && toc == (uint32_t)toc) {
1895        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1896        tcg_out_b(s, lk, tgt);
1897    } else {
1898        /* Fold the low bits of the constant into the addresses below.  */
1899        intptr_t arg = (intptr_t)target;
1900        int ofs = (int16_t)arg;
1901
1902        if (ofs + 8 < 0x8000) {
1903            arg -= ofs;
1904        } else {
1905            ofs = 0;
1906        }
1907        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1908        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1909        tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1910        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1911        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1912    }
1913#elif defined(_CALL_ELF) && _CALL_ELF == 2
1914    intptr_t diff;
1915
1916    /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1917       address, which the callee uses to compute its TOC address.  */
1918    /* FIXME: when the branch is in range, we could avoid r12 load if we
1919       knew that the destination uses the same TOC, and what its local
1920       entry point offset is.  */
1921    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1922
1923    diff = tcg_pcrel_diff(s, target);
1924    if (in_range_b(diff)) {
1925        tcg_out_b(s, lk, target);
1926    } else {
1927        tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1928        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1929    }
1930#else
1931    tcg_out_b(s, lk, target);
1932#endif
1933}
1934
1935static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
1936                         const TCGHelperInfo *info)
1937{
1938    tcg_out_call_int(s, LK, target);
1939}
1940
1941static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
1942    [MO_UB] = LBZX,
1943    [MO_UW] = LHZX,
1944    [MO_UL] = LWZX,
1945    [MO_UQ] = LDX,
1946    [MO_SW] = LHAX,
1947    [MO_SL] = LWAX,
1948    [MO_BSWAP | MO_UB] = LBZX,
1949    [MO_BSWAP | MO_UW] = LHBRX,
1950    [MO_BSWAP | MO_UL] = LWBRX,
1951    [MO_BSWAP | MO_UQ] = LDBRX,
1952};
1953
1954static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
1955    [MO_UB] = STBX,
1956    [MO_UW] = STHX,
1957    [MO_UL] = STWX,
1958    [MO_UQ] = STDX,
1959    [MO_BSWAP | MO_UB] = STBX,
1960    [MO_BSWAP | MO_UW] = STHBRX,
1961    [MO_BSWAP | MO_UL] = STWBRX,
1962    [MO_BSWAP | MO_UQ] = STDBRX,
1963};
1964
1965#if defined (CONFIG_SOFTMMU)
1966static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1967{
1968    if (arg < 0) {
1969        arg = TCG_REG_TMP1;
1970    }
1971    tcg_out32(s, MFSPR | RT(arg) | LR);
1972    return arg;
1973}
1974
1975/*
1976 * For the purposes of ppc32 sorting 4 input registers into 4 argument
1977 * registers, there is an outside chance we would require 3 temps.
1978 */
1979static const TCGLdstHelperParam ldst_helper_param = {
1980    .ra_gen = ldst_ra_gen,
1981    .ntmp = 3,
1982    .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
1983};
1984
1985static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1986{
1987    MemOp opc = get_memop(lb->oi);
1988
1989    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1990        return false;
1991    }
1992
1993    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1994    tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]);
1995    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1996
1997    tcg_out_b(s, 0, lb->raddr);
1998    return true;
1999}
2000
2001static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2002{
2003    MemOp opc = get_memop(lb->oi);
2004
2005    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2006        return false;
2007    }
2008
2009    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
2010    tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]);
2011
2012    tcg_out_b(s, 0, lb->raddr);
2013    return true;
2014}
2015#else
2016static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2017{
2018    if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2019        return false;
2020    }
2021
2022    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2023        TCGReg arg = TCG_REG_R4;
2024
2025        arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2026        if (l->addrlo_reg != arg) {
2027            tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2028            tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2029        } else if (l->addrhi_reg != arg + 1) {
2030            tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2031            tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2032        } else {
2033            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
2034            tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
2035            tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
2036        }
2037    } else {
2038        tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
2039    }
2040    tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
2041
2042    /* "Tail call" to the helper, with the return address back inline. */
2043    tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
2044                                          : helper_unaligned_st));
2045    return true;
2046}
2047
2048static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2049{
2050    return tcg_out_fail_alignment(s, l);
2051}
2052
2053static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2054{
2055    return tcg_out_fail_alignment(s, l);
2056}
2057#endif /* SOFTMMU */
2058
2059typedef struct {
2060    TCGReg base;
2061    TCGReg index;
2062} HostAddress;
2063
2064/*
2065 * For softmmu, perform the TLB load and compare.
2066 * For useronly, perform any required alignment tests.
2067 * In both cases, return a TCGLabelQemuLdst structure if the slow path
2068 * is required and fill in @h with the host address for the fast path.
2069 */
2070static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
2071                                           TCGReg addrlo, TCGReg addrhi,
2072                                           MemOpIdx oi, bool is_ld)
2073{
2074    TCGLabelQemuLdst *ldst = NULL;
2075    MemOp opc = get_memop(oi);
2076    unsigned a_bits = get_alignment_bits(opc);
2077
2078#ifdef CONFIG_SOFTMMU
2079    int mem_index = get_mmuidx(oi);
2080    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
2081                        : offsetof(CPUTLBEntry, addr_write);
2082    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
2083    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2084    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2085    unsigned s_bits = opc & MO_SIZE;
2086
2087    ldst = new_ldst_label(s);
2088    ldst->is_ld = is_ld;
2089    ldst->oi = oi;
2090    ldst->addrlo_reg = addrlo;
2091    ldst->addrhi_reg = addrhi;
2092
2093    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
2094    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
2095    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
2096    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
2097    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
2098
2099    /* Extract the page index, shifted into place for tlb index.  */
2100    if (TCG_TARGET_REG_BITS == 32) {
2101        tcg_out_shri32(s, TCG_REG_R0, addrlo,
2102                       TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2103    } else {
2104        tcg_out_shri64(s, TCG_REG_R0, addrlo,
2105                       TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2106    }
2107    tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
2108
2109    /* Load the (low part) TLB comparator into TMP2.  */
2110    if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2111        uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2112                        ? LWZUX : LDUX);
2113        tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
2114    } else {
2115        tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2116        if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2117            tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
2118                       TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN);
2119        } else {
2120            tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
2121        }
2122    }
2123
2124    /*
2125     * Load the TLB addend for use on the fast path.
2126     * Do this asap to minimize any load use delay.
2127     */
2128    if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2129        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2130                   offsetof(CPUTLBEntry, addend));
2131    }
2132
2133    /* Clear the non-page, non-alignment bits from the address in R0. */
2134    if (TCG_TARGET_REG_BITS == 32) {
2135        /*
2136         * We don't support unaligned accesses on 32-bits.
2137         * Preserve the bottom bits and thus trigger a comparison
2138         * failure on unaligned accesses.
2139         */
2140        if (a_bits < s_bits) {
2141            a_bits = s_bits;
2142        }
2143        tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2144                    (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2145    } else {
2146        TCGReg t = addrlo;
2147
2148        /*
2149         * If the access is unaligned, we need to make sure we fail if we
2150         * cross a page boundary.  The trick is to add the access size-1
2151         * to the address before masking the low bits.  That will make the
2152         * address overflow to the next page if we cross a page boundary,
2153         * which will then force a mismatch of the TLB compare.
2154         */
2155        if (a_bits < s_bits) {
2156            unsigned a_mask = (1 << a_bits) - 1;
2157            unsigned s_mask = (1 << s_bits) - 1;
2158            tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2159            t = TCG_REG_R0;
2160        }
2161
2162        /* Mask the address for the requested alignment.  */
2163        if (TARGET_LONG_BITS == 32) {
2164            tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2165                        (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2166        } else if (a_bits == 0) {
2167            tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2168        } else {
2169            tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2170                        64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2171            tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2172        }
2173    }
2174
2175    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2176        /* Low part comparison into cr7. */
2177        tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2178                    0, 7, TCG_TYPE_I32);
2179
2180        /* Load the high part TLB comparator into TMP2.  */
2181        tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2182                   cmp_off + 4 * !HOST_BIG_ENDIAN);
2183
2184        /* Load addend, deferred for this case. */
2185        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2186                   offsetof(CPUTLBEntry, addend));
2187
2188        /* High part comparison into cr6. */
2189        tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
2190
2191        /* Combine comparisons into cr7. */
2192        tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2193    } else {
2194        /* Full comparison into cr7. */
2195        tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2196                    0, 7, TCG_TYPE_TL);
2197    }
2198
2199    /* Load a pointer into the current opcode w/conditional branch-link. */
2200    ldst->label_ptr[0] = s->code_ptr;
2201    tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2202
2203    h->base = TCG_REG_TMP1;
2204#else
2205    if (a_bits) {
2206        ldst = new_ldst_label(s);
2207        ldst->is_ld = is_ld;
2208        ldst->oi = oi;
2209        ldst->addrlo_reg = addrlo;
2210        ldst->addrhi_reg = addrhi;
2211
2212        /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2213        tcg_debug_assert(a_bits < 16);
2214        tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
2215
2216        ldst->label_ptr[0] = s->code_ptr;
2217        tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2218    }
2219
2220    h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2221#endif
2222
2223    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2224        /* Zero-extend the guest address for use in the host address. */
2225        tcg_out_ext32u(s, TCG_REG_R0, addrlo);
2226        h->index = TCG_REG_R0;
2227    } else {
2228        h->index = addrlo;
2229    }
2230
2231    return ldst;
2232}
2233
2234static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
2235                            TCGReg addrlo, TCGReg addrhi,
2236                            MemOpIdx oi, TCGType data_type)
2237{
2238    MemOp opc = get_memop(oi);
2239    TCGLabelQemuLdst *ldst;
2240    HostAddress h;
2241
2242    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
2243
2244    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2245        if (opc & MO_BSWAP) {
2246            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2247            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2248            tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
2249        } else if (h.base != 0) {
2250            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2251            tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
2252            tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
2253        } else if (h.index == datahi) {
2254            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2255            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2256        } else {
2257            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2258            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2259        }
2260    } else {
2261        uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2262        if (!have_isa_2_06 && insn == LDBRX) {
2263            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2264            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2265            tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
2266            tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2267        } else if (insn) {
2268            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2269        } else {
2270            insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2271            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2272            tcg_out_movext(s, TCG_TYPE_REG, datalo,
2273                           TCG_TYPE_REG, opc & MO_SSIZE, datalo);
2274        }
2275    }
2276
2277    if (ldst) {
2278        ldst->type = data_type;
2279        ldst->datalo_reg = datalo;
2280        ldst->datahi_reg = datahi;
2281        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2282    }
2283}
2284
2285static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
2286                            TCGReg addrlo, TCGReg addrhi,
2287                            MemOpIdx oi, TCGType data_type)
2288{
2289    MemOp opc = get_memop(oi);
2290    TCGLabelQemuLdst *ldst;
2291    HostAddress h;
2292
2293    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
2294
2295    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2296        if (opc & MO_BSWAP) {
2297            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2298            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2299            tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
2300        } else if (h.base != 0) {
2301            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2302            tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
2303            tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
2304        } else {
2305            tcg_out32(s, STW | TAI(datahi, h.index, 0));
2306            tcg_out32(s, STW | TAI(datalo, h.index, 4));
2307        }
2308    } else {
2309        uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2310        if (!have_isa_2_06 && insn == STDBRX) {
2311            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2312            tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
2313            tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2314            tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
2315        } else {
2316            tcg_out32(s, insn | SAB(datalo, h.base, h.index));
2317        }
2318    }
2319
2320    if (ldst) {
2321        ldst->type = data_type;
2322        ldst->datalo_reg = datalo;
2323        ldst->datahi_reg = datahi;
2324        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2325    }
2326}
2327
2328static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2329{
2330    int i;
2331    for (i = 0; i < count; ++i) {
2332        p[i] = NOP;
2333    }
2334}
2335
2336/* Parameters for function call generation, used in tcg.c.  */
2337#define TCG_TARGET_STACK_ALIGN       16
2338
2339#ifdef _CALL_AIX
2340# define LINK_AREA_SIZE                (6 * SZR)
2341# define LR_OFFSET                     (1 * SZR)
2342# define TCG_TARGET_CALL_STACK_OFFSET  (LINK_AREA_SIZE + 8 * SZR)
2343#elif defined(_CALL_DARWIN)
2344# define LINK_AREA_SIZE                (6 * SZR)
2345# define LR_OFFSET                     (2 * SZR)
2346#elif TCG_TARGET_REG_BITS == 64
2347# if defined(_CALL_ELF) && _CALL_ELF == 2
2348#  define LINK_AREA_SIZE               (4 * SZR)
2349#  define LR_OFFSET                    (1 * SZR)
2350# endif
2351#else /* TCG_TARGET_REG_BITS == 32 */
2352# if defined(_CALL_SYSV)
2353#  define LINK_AREA_SIZE               (2 * SZR)
2354#  define LR_OFFSET                    (1 * SZR)
2355# endif
2356#endif
2357#ifndef LR_OFFSET
2358# error "Unhandled abi"
2359#endif
2360#ifndef TCG_TARGET_CALL_STACK_OFFSET
2361# define TCG_TARGET_CALL_STACK_OFFSET  LINK_AREA_SIZE
2362#endif
2363
2364#define CPU_TEMP_BUF_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2365#define REG_SAVE_SIZE      ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2366
2367#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET   \
2368                     + TCG_STATIC_CALL_ARGS_SIZE    \
2369                     + CPU_TEMP_BUF_SIZE            \
2370                     + REG_SAVE_SIZE                \
2371                     + TCG_TARGET_STACK_ALIGN - 1)  \
2372                    & -TCG_TARGET_STACK_ALIGN)
2373
2374#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2375
2376static void tcg_target_qemu_prologue(TCGContext *s)
2377{
2378    int i;
2379
2380#ifdef _CALL_AIX
2381    const void **desc = (const void **)s->code_ptr;
2382    desc[0] = tcg_splitwx_to_rx(desc + 2);  /* entry point */
2383    desc[1] = 0;                            /* environment pointer */
2384    s->code_ptr = (void *)(desc + 2);       /* skip over descriptor */
2385#endif
2386
2387    tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2388                  CPU_TEMP_BUF_SIZE);
2389
2390    /* Prologue */
2391    tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2392    tcg_out32(s, (SZR == 8 ? STDU : STWU)
2393              | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2394
2395    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2396        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2397                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2398    }
2399    tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2400
2401#ifndef CONFIG_SOFTMMU
2402    if (guest_base) {
2403        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2404        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2405    }
2406#endif
2407
2408    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2409    tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2410    if (USE_REG_TB) {
2411        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2412    }
2413    tcg_out32(s, BCCTR | BO_ALWAYS);
2414
2415    /* Epilogue */
2416    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2417
2418    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2419    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2420        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2421                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2422    }
2423    tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2424    tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2425    tcg_out32(s, BCLR | BO_ALWAYS);
2426}
2427
2428static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2429{
2430    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2431    tcg_out_b(s, 0, tcg_code_gen_epilogue);
2432}
2433
2434static void tcg_out_goto_tb(TCGContext *s, int which)
2435{
2436    uintptr_t ptr = get_jmp_target_addr(s, which);
2437
2438    if (USE_REG_TB) {
2439        ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
2440        tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
2441
2442        /* Direct branch will be patched by tb_target_set_jmp_target. */
2443        set_jmp_insn_offset(s, which);
2444        tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2445
2446        /* When branch is out of range, fall through to indirect. */
2447        tcg_out32(s, BCCTR | BO_ALWAYS);
2448
2449        /* For the unlinked case, need to reset TCG_REG_TB.  */
2450        set_jmp_reset_offset(s, which);
2451        tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2452                         -tcg_current_code_size(s));
2453    } else {
2454        /* Direct branch will be patched by tb_target_set_jmp_target. */
2455        set_jmp_insn_offset(s, which);
2456        tcg_out32(s, NOP);
2457
2458        /* When branch is out of range, fall through to indirect. */
2459        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
2460        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
2461        tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2462        tcg_out32(s, BCCTR | BO_ALWAYS);
2463        set_jmp_reset_offset(s, which);
2464    }
2465}
2466
2467void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2468                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2469{
2470    uintptr_t addr = tb->jmp_target_addr[n];
2471    intptr_t diff = addr - jmp_rx;
2472    tcg_insn_unit insn;
2473
2474    if (in_range_b(diff)) {
2475        insn = B | (diff & 0x3fffffc);
2476    } else if (USE_REG_TB) {
2477        insn = MTSPR | RS(TCG_REG_TB) | CTR;
2478    } else {
2479        insn = NOP;
2480    }
2481
2482    qatomic_set((uint32_t *)jmp_rw, insn);
2483    flush_idcache_range(jmp_rx, jmp_rw, 4);
2484}
2485
2486static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2487                       const TCGArg args[TCG_MAX_OP_ARGS],
2488                       const int const_args[TCG_MAX_OP_ARGS])
2489{
2490    TCGArg a0, a1, a2;
2491
2492    switch (opc) {
2493    case INDEX_op_goto_ptr:
2494        tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2495        if (USE_REG_TB) {
2496            tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2497        }
2498        tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2499        tcg_out32(s, BCCTR | BO_ALWAYS);
2500        break;
2501    case INDEX_op_br:
2502        {
2503            TCGLabel *l = arg_label(args[0]);
2504            uint32_t insn = B;
2505
2506            if (l->has_value) {
2507                insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2508                                       l->u.value_ptr);
2509            } else {
2510                tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2511            }
2512            tcg_out32(s, insn);
2513        }
2514        break;
2515    case INDEX_op_ld8u_i32:
2516    case INDEX_op_ld8u_i64:
2517        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2518        break;
2519    case INDEX_op_ld8s_i32:
2520    case INDEX_op_ld8s_i64:
2521        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2522        tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2523        break;
2524    case INDEX_op_ld16u_i32:
2525    case INDEX_op_ld16u_i64:
2526        tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2527        break;
2528    case INDEX_op_ld16s_i32:
2529    case INDEX_op_ld16s_i64:
2530        tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2531        break;
2532    case INDEX_op_ld_i32:
2533    case INDEX_op_ld32u_i64:
2534        tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2535        break;
2536    case INDEX_op_ld32s_i64:
2537        tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2538        break;
2539    case INDEX_op_ld_i64:
2540        tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2541        break;
2542    case INDEX_op_st8_i32:
2543    case INDEX_op_st8_i64:
2544        tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2545        break;
2546    case INDEX_op_st16_i32:
2547    case INDEX_op_st16_i64:
2548        tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2549        break;
2550    case INDEX_op_st_i32:
2551    case INDEX_op_st32_i64:
2552        tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2553        break;
2554    case INDEX_op_st_i64:
2555        tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2556        break;
2557
2558    case INDEX_op_add_i32:
2559        a0 = args[0], a1 = args[1], a2 = args[2];
2560        if (const_args[2]) {
2561        do_addi_32:
2562            tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2563        } else {
2564            tcg_out32(s, ADD | TAB(a0, a1, a2));
2565        }
2566        break;
2567    case INDEX_op_sub_i32:
2568        a0 = args[0], a1 = args[1], a2 = args[2];
2569        if (const_args[1]) {
2570            if (const_args[2]) {
2571                tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2572            } else {
2573                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2574            }
2575        } else if (const_args[2]) {
2576            a2 = -a2;
2577            goto do_addi_32;
2578        } else {
2579            tcg_out32(s, SUBF | TAB(a0, a2, a1));
2580        }
2581        break;
2582
2583    case INDEX_op_and_i32:
2584        a0 = args[0], a1 = args[1], a2 = args[2];
2585        if (const_args[2]) {
2586            tcg_out_andi32(s, a0, a1, a2);
2587        } else {
2588            tcg_out32(s, AND | SAB(a1, a0, a2));
2589        }
2590        break;
2591    case INDEX_op_and_i64:
2592        a0 = args[0], a1 = args[1], a2 = args[2];
2593        if (const_args[2]) {
2594            tcg_out_andi64(s, a0, a1, a2);
2595        } else {
2596            tcg_out32(s, AND | SAB(a1, a0, a2));
2597        }
2598        break;
2599    case INDEX_op_or_i64:
2600    case INDEX_op_or_i32:
2601        a0 = args[0], a1 = args[1], a2 = args[2];
2602        if (const_args[2]) {
2603            tcg_out_ori32(s, a0, a1, a2);
2604        } else {
2605            tcg_out32(s, OR | SAB(a1, a0, a2));
2606        }
2607        break;
2608    case INDEX_op_xor_i64:
2609    case INDEX_op_xor_i32:
2610        a0 = args[0], a1 = args[1], a2 = args[2];
2611        if (const_args[2]) {
2612            tcg_out_xori32(s, a0, a1, a2);
2613        } else {
2614            tcg_out32(s, XOR | SAB(a1, a0, a2));
2615        }
2616        break;
2617    case INDEX_op_andc_i32:
2618        a0 = args[0], a1 = args[1], a2 = args[2];
2619        if (const_args[2]) {
2620            tcg_out_andi32(s, a0, a1, ~a2);
2621        } else {
2622            tcg_out32(s, ANDC | SAB(a1, a0, a2));
2623        }
2624        break;
2625    case INDEX_op_andc_i64:
2626        a0 = args[0], a1 = args[1], a2 = args[2];
2627        if (const_args[2]) {
2628            tcg_out_andi64(s, a0, a1, ~a2);
2629        } else {
2630            tcg_out32(s, ANDC | SAB(a1, a0, a2));
2631        }
2632        break;
2633    case INDEX_op_orc_i32:
2634        if (const_args[2]) {
2635            tcg_out_ori32(s, args[0], args[1], ~args[2]);
2636            break;
2637        }
2638        /* FALLTHRU */
2639    case INDEX_op_orc_i64:
2640        tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2641        break;
2642    case INDEX_op_eqv_i32:
2643        if (const_args[2]) {
2644            tcg_out_xori32(s, args[0], args[1], ~args[2]);
2645            break;
2646        }
2647        /* FALLTHRU */
2648    case INDEX_op_eqv_i64:
2649        tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2650        break;
2651    case INDEX_op_nand_i32:
2652    case INDEX_op_nand_i64:
2653        tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2654        break;
2655    case INDEX_op_nor_i32:
2656    case INDEX_op_nor_i64:
2657        tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2658        break;
2659
2660    case INDEX_op_clz_i32:
2661        tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2662                      args[2], const_args[2]);
2663        break;
2664    case INDEX_op_ctz_i32:
2665        tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2666                      args[2], const_args[2]);
2667        break;
2668    case INDEX_op_ctpop_i32:
2669        tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2670        break;
2671
2672    case INDEX_op_clz_i64:
2673        tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2674                      args[2], const_args[2]);
2675        break;
2676    case INDEX_op_ctz_i64:
2677        tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2678                      args[2], const_args[2]);
2679        break;
2680    case INDEX_op_ctpop_i64:
2681        tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2682        break;
2683
2684    case INDEX_op_mul_i32:
2685        a0 = args[0], a1 = args[1], a2 = args[2];
2686        if (const_args[2]) {
2687            tcg_out32(s, MULLI | TAI(a0, a1, a2));
2688        } else {
2689            tcg_out32(s, MULLW | TAB(a0, a1, a2));
2690        }
2691        break;
2692
2693    case INDEX_op_div_i32:
2694        tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2695        break;
2696
2697    case INDEX_op_divu_i32:
2698        tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2699        break;
2700
2701    case INDEX_op_rem_i32:
2702        tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
2703        break;
2704
2705    case INDEX_op_remu_i32:
2706        tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
2707        break;
2708
2709    case INDEX_op_shl_i32:
2710        if (const_args[2]) {
2711            /* Limit immediate shift count lest we create an illegal insn.  */
2712            tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2713        } else {
2714            tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2715        }
2716        break;
2717    case INDEX_op_shr_i32:
2718        if (const_args[2]) {
2719            /* Limit immediate shift count lest we create an illegal insn.  */
2720            tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2721        } else {
2722            tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2723        }
2724        break;
2725    case INDEX_op_sar_i32:
2726        if (const_args[2]) {
2727            tcg_out_sari32(s, args[0], args[1], args[2]);
2728        } else {
2729            tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2730        }
2731        break;
2732    case INDEX_op_rotl_i32:
2733        if (const_args[2]) {
2734            tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2735        } else {
2736            tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2737                         | MB(0) | ME(31));
2738        }
2739        break;
2740    case INDEX_op_rotr_i32:
2741        if (const_args[2]) {
2742            tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2743        } else {
2744            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2745            tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2746                         | MB(0) | ME(31));
2747        }
2748        break;
2749
2750    case INDEX_op_brcond_i32:
2751        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2752                       arg_label(args[3]), TCG_TYPE_I32);
2753        break;
2754    case INDEX_op_brcond_i64:
2755        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2756                       arg_label(args[3]), TCG_TYPE_I64);
2757        break;
2758    case INDEX_op_brcond2_i32:
2759        tcg_out_brcond2(s, args, const_args);
2760        break;
2761
2762    case INDEX_op_neg_i32:
2763    case INDEX_op_neg_i64:
2764        tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2765        break;
2766
2767    case INDEX_op_not_i32:
2768    case INDEX_op_not_i64:
2769        tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2770        break;
2771
2772    case INDEX_op_add_i64:
2773        a0 = args[0], a1 = args[1], a2 = args[2];
2774        if (const_args[2]) {
2775        do_addi_64:
2776            tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2777        } else {
2778            tcg_out32(s, ADD | TAB(a0, a1, a2));
2779        }
2780        break;
2781    case INDEX_op_sub_i64:
2782        a0 = args[0], a1 = args[1], a2 = args[2];
2783        if (const_args[1]) {
2784            if (const_args[2]) {
2785                tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2786            } else {
2787                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2788            }
2789        } else if (const_args[2]) {
2790            a2 = -a2;
2791            goto do_addi_64;
2792        } else {
2793            tcg_out32(s, SUBF | TAB(a0, a2, a1));
2794        }
2795        break;
2796
2797    case INDEX_op_shl_i64:
2798        if (const_args[2]) {
2799            /* Limit immediate shift count lest we create an illegal insn.  */
2800            tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2801        } else {
2802            tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2803        }
2804        break;
2805    case INDEX_op_shr_i64:
2806        if (const_args[2]) {
2807            /* Limit immediate shift count lest we create an illegal insn.  */
2808            tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2809        } else {
2810            tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2811        }
2812        break;
2813    case INDEX_op_sar_i64:
2814        if (const_args[2]) {
2815            tcg_out_sari64(s, args[0], args[1], args[2]);
2816        } else {
2817            tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2818        }
2819        break;
2820    case INDEX_op_rotl_i64:
2821        if (const_args[2]) {
2822            tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2823        } else {
2824            tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2825        }
2826        break;
2827    case INDEX_op_rotr_i64:
2828        if (const_args[2]) {
2829            tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2830        } else {
2831            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2832            tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2833        }
2834        break;
2835
2836    case INDEX_op_mul_i64:
2837        a0 = args[0], a1 = args[1], a2 = args[2];
2838        if (const_args[2]) {
2839            tcg_out32(s, MULLI | TAI(a0, a1, a2));
2840        } else {
2841            tcg_out32(s, MULLD | TAB(a0, a1, a2));
2842        }
2843        break;
2844    case INDEX_op_div_i64:
2845        tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2846        break;
2847    case INDEX_op_divu_i64:
2848        tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2849        break;
2850    case INDEX_op_rem_i64:
2851        tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
2852        break;
2853    case INDEX_op_remu_i64:
2854        tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
2855        break;
2856
2857    case INDEX_op_qemu_ld_i32:
2858        if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2859            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2860                            args[2], TCG_TYPE_I32);
2861        } else {
2862            tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2863                            args[3], TCG_TYPE_I32);
2864        }
2865        break;
2866    case INDEX_op_qemu_ld_i64:
2867        if (TCG_TARGET_REG_BITS == 64) {
2868            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2869                            args[2], TCG_TYPE_I64);
2870        } else if (TARGET_LONG_BITS == 32) {
2871            tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2872                            args[3], TCG_TYPE_I64);
2873        } else {
2874            tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2875                            args[4], TCG_TYPE_I64);
2876        }
2877        break;
2878    case INDEX_op_qemu_st_i32:
2879        if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2880            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2881                            args[2], TCG_TYPE_I32);
2882        } else {
2883            tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2884                            args[3], TCG_TYPE_I32);
2885        }
2886        break;
2887    case INDEX_op_qemu_st_i64:
2888        if (TCG_TARGET_REG_BITS == 64) {
2889            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2890                            args[2], TCG_TYPE_I64);
2891        } else if (TARGET_LONG_BITS == 32) {
2892            tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2893                            args[3], TCG_TYPE_I64);
2894        } else {
2895            tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2896                            args[4], TCG_TYPE_I64);
2897        }
2898        break;
2899
2900    case INDEX_op_setcond_i32:
2901        tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2902                        const_args[2]);
2903        break;
2904    case INDEX_op_setcond_i64:
2905        tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2906                        const_args[2]);
2907        break;
2908    case INDEX_op_setcond2_i32:
2909        tcg_out_setcond2(s, args, const_args);
2910        break;
2911
2912    case INDEX_op_bswap16_i32:
2913    case INDEX_op_bswap16_i64:
2914        tcg_out_bswap16(s, args[0], args[1], args[2]);
2915        break;
2916    case INDEX_op_bswap32_i32:
2917        tcg_out_bswap32(s, args[0], args[1], 0);
2918        break;
2919    case INDEX_op_bswap32_i64:
2920        tcg_out_bswap32(s, args[0], args[1], args[2]);
2921        break;
2922    case INDEX_op_bswap64_i64:
2923        tcg_out_bswap64(s, args[0], args[1]);
2924        break;
2925
2926    case INDEX_op_deposit_i32:
2927        if (const_args[2]) {
2928            uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
2929            tcg_out_andi32(s, args[0], args[0], ~mask);
2930        } else {
2931            tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
2932                        32 - args[3] - args[4], 31 - args[3]);
2933        }
2934        break;
2935    case INDEX_op_deposit_i64:
2936        if (const_args[2]) {
2937            uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
2938            tcg_out_andi64(s, args[0], args[0], ~mask);
2939        } else {
2940            tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
2941                        64 - args[3] - args[4]);
2942        }
2943        break;
2944
2945    case INDEX_op_extract_i32:
2946        tcg_out_rlw(s, RLWINM, args[0], args[1],
2947                    32 - args[2], 32 - args[3], 31);
2948        break;
2949    case INDEX_op_extract_i64:
2950        tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
2951        break;
2952
2953    case INDEX_op_movcond_i32:
2954        tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
2955                        args[3], args[4], const_args[2]);
2956        break;
2957    case INDEX_op_movcond_i64:
2958        tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2959                        args[3], args[4], const_args[2]);
2960        break;
2961
2962#if TCG_TARGET_REG_BITS == 64
2963    case INDEX_op_add2_i64:
2964#else
2965    case INDEX_op_add2_i32:
2966#endif
2967        /* Note that the CA bit is defined based on the word size of the
2968           environment.  So in 64-bit mode it's always carry-out of bit 63.
2969           The fallback code using deposit works just as well for 32-bit.  */
2970        a0 = args[0], a1 = args[1];
2971        if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
2972            a0 = TCG_REG_R0;
2973        }
2974        if (const_args[4]) {
2975            tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
2976        } else {
2977            tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
2978        }
2979        if (const_args[5]) {
2980            tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
2981        } else {
2982            tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
2983        }
2984        if (a0 != args[0]) {
2985            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
2986        }
2987        break;
2988
2989#if TCG_TARGET_REG_BITS == 64
2990    case INDEX_op_sub2_i64:
2991#else
2992    case INDEX_op_sub2_i32:
2993#endif
2994        a0 = args[0], a1 = args[1];
2995        if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
2996            a0 = TCG_REG_R0;
2997        }
2998        if (const_args[2]) {
2999            tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3000        } else {
3001            tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3002        }
3003        if (const_args[3]) {
3004            tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3005        } else {
3006            tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3007        }
3008        if (a0 != args[0]) {
3009            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3010        }
3011        break;
3012
3013    case INDEX_op_muluh_i32:
3014        tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3015        break;
3016    case INDEX_op_mulsh_i32:
3017        tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3018        break;
3019    case INDEX_op_muluh_i64:
3020        tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3021        break;
3022    case INDEX_op_mulsh_i64:
3023        tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3024        break;
3025
3026    case INDEX_op_mb:
3027        tcg_out_mb(s, args[0]);
3028        break;
3029
3030    case INDEX_op_mov_i32:   /* Always emitted via tcg_out_mov.  */
3031    case INDEX_op_mov_i64:
3032    case INDEX_op_call:      /* Always emitted via tcg_out_call.  */
3033    case INDEX_op_exit_tb:   /* Always emitted via tcg_out_exit_tb.  */
3034    case INDEX_op_goto_tb:   /* Always emitted via tcg_out_goto_tb.  */
3035    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
3036    case INDEX_op_ext8s_i64:
3037    case INDEX_op_ext8u_i32:
3038    case INDEX_op_ext8u_i64:
3039    case INDEX_op_ext16s_i32:
3040    case INDEX_op_ext16s_i64:
3041    case INDEX_op_ext16u_i32:
3042    case INDEX_op_ext16u_i64:
3043    case INDEX_op_ext32s_i64:
3044    case INDEX_op_ext32u_i64:
3045    case INDEX_op_ext_i32_i64:
3046    case INDEX_op_extu_i32_i64:
3047    case INDEX_op_extrl_i64_i32:
3048    default:
3049        g_assert_not_reached();
3050    }
3051}
3052
3053int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3054{
3055    switch (opc) {
3056    case INDEX_op_and_vec:
3057    case INDEX_op_or_vec:
3058    case INDEX_op_xor_vec:
3059    case INDEX_op_andc_vec:
3060    case INDEX_op_not_vec:
3061    case INDEX_op_nor_vec:
3062    case INDEX_op_eqv_vec:
3063    case INDEX_op_nand_vec:
3064        return 1;
3065    case INDEX_op_orc_vec:
3066        return have_isa_2_07;
3067    case INDEX_op_add_vec:
3068    case INDEX_op_sub_vec:
3069    case INDEX_op_smax_vec:
3070    case INDEX_op_smin_vec:
3071    case INDEX_op_umax_vec:
3072    case INDEX_op_umin_vec:
3073    case INDEX_op_shlv_vec:
3074    case INDEX_op_shrv_vec:
3075    case INDEX_op_sarv_vec:
3076    case INDEX_op_rotlv_vec:
3077        return vece <= MO_32 || have_isa_2_07;
3078    case INDEX_op_ssadd_vec:
3079    case INDEX_op_sssub_vec:
3080    case INDEX_op_usadd_vec:
3081    case INDEX_op_ussub_vec:
3082        return vece <= MO_32;
3083    case INDEX_op_cmp_vec:
3084    case INDEX_op_shli_vec:
3085    case INDEX_op_shri_vec:
3086    case INDEX_op_sari_vec:
3087    case INDEX_op_rotli_vec:
3088        return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3089    case INDEX_op_neg_vec:
3090        return vece >= MO_32 && have_isa_3_00;
3091    case INDEX_op_mul_vec:
3092        switch (vece) {
3093        case MO_8:
3094        case MO_16:
3095            return -1;
3096        case MO_32:
3097            return have_isa_2_07 ? 1 : -1;
3098        case MO_64:
3099            return have_isa_3_10;
3100        }
3101        return 0;
3102    case INDEX_op_bitsel_vec:
3103        return have_vsx;
3104    case INDEX_op_rotrv_vec:
3105        return -1;
3106    default:
3107        return 0;
3108    }
3109}
3110
3111static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3112                            TCGReg dst, TCGReg src)
3113{
3114    tcg_debug_assert(dst >= TCG_REG_V0);
3115
3116    /* Splat from integer reg allowed via constraints for v3.00.  */
3117    if (src < TCG_REG_V0) {
3118        tcg_debug_assert(have_isa_3_00);
3119        switch (vece) {
3120        case MO_64:
3121            tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3122            return true;
3123        case MO_32:
3124            tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3125            return true;
3126        default:
3127            /* Fail, so that we fall back on either dupm or mov+dup.  */
3128            return false;
3129        }
3130    }
3131
3132    /*
3133     * Recall we use (or emulate) VSX integer loads, so the integer is
3134     * right justified within the left (zero-index) double-word.
3135     */
3136    switch (vece) {
3137    case MO_8:
3138        tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3139        break;
3140    case MO_16:
3141        tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3142        break;
3143    case MO_32:
3144        tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3145        break;
3146    case MO_64:
3147        if (have_vsx) {
3148            tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3149            break;
3150        }
3151        tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3152        tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3153        break;
3154    default:
3155        g_assert_not_reached();
3156    }
3157    return true;
3158}
3159
3160static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3161                             TCGReg out, TCGReg base, intptr_t offset)
3162{
3163    int elt;
3164
3165    tcg_debug_assert(out >= TCG_REG_V0);
3166    switch (vece) {
3167    case MO_8:
3168        if (have_isa_3_00) {
3169            tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3170        } else {
3171            tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3172        }
3173        elt = extract32(offset, 0, 4);
3174#if !HOST_BIG_ENDIAN
3175        elt ^= 15;
3176#endif
3177        tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3178        break;
3179    case MO_16:
3180        tcg_debug_assert((offset & 1) == 0);
3181        if (have_isa_3_00) {
3182            tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3183        } else {
3184            tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3185        }
3186        elt = extract32(offset, 1, 3);
3187#if !HOST_BIG_ENDIAN
3188        elt ^= 7;
3189#endif
3190        tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3191        break;
3192    case MO_32:
3193        if (have_isa_3_00) {
3194            tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3195            break;
3196        }
3197        tcg_debug_assert((offset & 3) == 0);
3198        tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3199        elt = extract32(offset, 2, 2);
3200#if !HOST_BIG_ENDIAN
3201        elt ^= 3;
3202#endif
3203        tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3204        break;
3205    case MO_64:
3206        if (have_vsx) {
3207            tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3208            break;
3209        }
3210        tcg_debug_assert((offset & 7) == 0);
3211        tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3212        tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3213        elt = extract32(offset, 3, 1);
3214#if !HOST_BIG_ENDIAN
3215        elt = !elt;
3216#endif
3217        if (elt) {
3218            tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3219        } else {
3220            tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3221        }
3222        break;
3223    default:
3224        g_assert_not_reached();
3225    }
3226    return true;
3227}
3228
3229static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3230                           unsigned vecl, unsigned vece,
3231                           const TCGArg args[TCG_MAX_OP_ARGS],
3232                           const int const_args[TCG_MAX_OP_ARGS])
3233{
3234    static const uint32_t
3235        add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3236        sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3237        mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3238        neg_op[4] = { 0, 0, VNEGW, VNEGD },
3239        eq_op[4]  = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3240        ne_op[4]  = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3241        gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3242        gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3243        ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3244        usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3245        sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3246        ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3247        umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3248        smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3249        umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3250        smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3251        shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3252        shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3253        sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3254        mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3255        mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3256        muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3257        mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3258        pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3259        rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3260
3261    TCGType type = vecl + TCG_TYPE_V64;
3262    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3263    uint32_t insn;
3264
3265    switch (opc) {
3266    case INDEX_op_ld_vec:
3267        tcg_out_ld(s, type, a0, a1, a2);
3268        return;
3269    case INDEX_op_st_vec:
3270        tcg_out_st(s, type, a0, a1, a2);
3271        return;
3272    case INDEX_op_dupm_vec:
3273        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3274        return;
3275
3276    case INDEX_op_add_vec:
3277        insn = add_op[vece];
3278        break;
3279    case INDEX_op_sub_vec:
3280        insn = sub_op[vece];
3281        break;
3282    case INDEX_op_neg_vec:
3283        insn = neg_op[vece];
3284        a2 = a1;
3285        a1 = 0;
3286        break;
3287    case INDEX_op_mul_vec:
3288        insn = mul_op[vece];
3289        break;
3290    case INDEX_op_ssadd_vec:
3291        insn = ssadd_op[vece];
3292        break;
3293    case INDEX_op_sssub_vec:
3294        insn = sssub_op[vece];
3295        break;
3296    case INDEX_op_usadd_vec:
3297        insn = usadd_op[vece];
3298        break;
3299    case INDEX_op_ussub_vec:
3300        insn = ussub_op[vece];
3301        break;
3302    case INDEX_op_smin_vec:
3303        insn = smin_op[vece];
3304        break;
3305    case INDEX_op_umin_vec:
3306        insn = umin_op[vece];
3307        break;
3308    case INDEX_op_smax_vec:
3309        insn = smax_op[vece];
3310        break;
3311    case INDEX_op_umax_vec:
3312        insn = umax_op[vece];
3313        break;
3314    case INDEX_op_shlv_vec:
3315        insn = shlv_op[vece];
3316        break;
3317    case INDEX_op_shrv_vec:
3318        insn = shrv_op[vece];
3319        break;
3320    case INDEX_op_sarv_vec:
3321        insn = sarv_op[vece];
3322        break;
3323    case INDEX_op_and_vec:
3324        insn = VAND;
3325        break;
3326    case INDEX_op_or_vec:
3327        insn = VOR;
3328        break;
3329    case INDEX_op_xor_vec:
3330        insn = VXOR;
3331        break;
3332    case INDEX_op_andc_vec:
3333        insn = VANDC;
3334        break;
3335    case INDEX_op_not_vec:
3336        insn = VNOR;
3337        a2 = a1;
3338        break;
3339    case INDEX_op_orc_vec:
3340        insn = VORC;
3341        break;
3342    case INDEX_op_nand_vec:
3343        insn = VNAND;
3344        break;
3345    case INDEX_op_nor_vec:
3346        insn = VNOR;
3347        break;
3348    case INDEX_op_eqv_vec:
3349        insn = VEQV;
3350        break;
3351
3352    case INDEX_op_cmp_vec:
3353        switch (args[3]) {
3354        case TCG_COND_EQ:
3355            insn = eq_op[vece];
3356            break;
3357        case TCG_COND_NE:
3358            insn = ne_op[vece];
3359            break;
3360        case TCG_COND_GT:
3361            insn = gts_op[vece];
3362            break;
3363        case TCG_COND_GTU:
3364            insn = gtu_op[vece];
3365            break;
3366        default:
3367            g_assert_not_reached();
3368        }
3369        break;
3370
3371    case INDEX_op_bitsel_vec:
3372        tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3373        return;
3374
3375    case INDEX_op_dup2_vec:
3376        assert(TCG_TARGET_REG_BITS == 32);
3377        /* With inputs a1 = xLxx, a2 = xHxx  */
3378        tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1));  /* a0  = xxHL */
3379        tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8);          /* tmp = HLxx */
3380        tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8);          /* a0  = HLHL */
3381        return;
3382
3383    case INDEX_op_ppc_mrgh_vec:
3384        insn = mrgh_op[vece];
3385        break;
3386    case INDEX_op_ppc_mrgl_vec:
3387        insn = mrgl_op[vece];
3388        break;
3389    case INDEX_op_ppc_muleu_vec:
3390        insn = muleu_op[vece];
3391        break;
3392    case INDEX_op_ppc_mulou_vec:
3393        insn = mulou_op[vece];
3394        break;
3395    case INDEX_op_ppc_pkum_vec:
3396        insn = pkum_op[vece];
3397        break;
3398    case INDEX_op_rotlv_vec:
3399        insn = rotl_op[vece];
3400        break;
3401    case INDEX_op_ppc_msum_vec:
3402        tcg_debug_assert(vece == MO_16);
3403        tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3404        return;
3405
3406    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
3407    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
3408    default:
3409        g_assert_not_reached();
3410    }
3411
3412    tcg_debug_assert(insn != 0);
3413    tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3414}
3415
3416static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3417                           TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3418{
3419    TCGv_vec t1;
3420
3421    if (vece == MO_32) {
3422        /*
3423         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3424         * So using negative numbers gets us the 4th bit easily.
3425         */
3426        imm = sextract32(imm, 0, 5);
3427    } else {
3428        imm &= (8 << vece) - 1;
3429    }
3430
3431    /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3432    t1 = tcg_constant_vec(type, MO_8, imm);
3433    vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3434              tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3435}
3436
3437static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3438                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3439{
3440    bool need_swap = false, need_inv = false;
3441
3442    tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3443
3444    switch (cond) {
3445    case TCG_COND_EQ:
3446    case TCG_COND_GT:
3447    case TCG_COND_GTU:
3448        break;
3449    case TCG_COND_NE:
3450        if (have_isa_3_00 && vece <= MO_32) {
3451            break;
3452        }
3453        /* fall through */
3454    case TCG_COND_LE:
3455    case TCG_COND_LEU:
3456        need_inv = true;
3457        break;
3458    case TCG_COND_LT:
3459    case TCG_COND_LTU:
3460        need_swap = true;
3461        break;
3462    case TCG_COND_GE:
3463    case TCG_COND_GEU:
3464        need_swap = need_inv = true;
3465        break;
3466    default:
3467        g_assert_not_reached();
3468    }
3469
3470    if (need_inv) {
3471        cond = tcg_invert_cond(cond);
3472    }
3473    if (need_swap) {
3474        TCGv_vec t1;
3475        t1 = v1, v1 = v2, v2 = t1;
3476        cond = tcg_swap_cond(cond);
3477    }
3478
3479    vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3480              tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3481
3482    if (need_inv) {
3483        tcg_gen_not_vec(vece, v0, v0);
3484    }
3485}
3486
3487static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3488                           TCGv_vec v1, TCGv_vec v2)
3489{
3490    TCGv_vec t1 = tcg_temp_new_vec(type);
3491    TCGv_vec t2 = tcg_temp_new_vec(type);
3492    TCGv_vec c0, c16;
3493
3494    switch (vece) {
3495    case MO_8:
3496    case MO_16:
3497        vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3498                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3499        vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3500                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3501        vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3502                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3503        vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3504                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3505        vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3506                  tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3507	break;
3508
3509    case MO_32:
3510        tcg_debug_assert(!have_isa_2_07);
3511        /*
3512         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3513         * So using -16 is a quick way to represent 16.
3514         */
3515        c16 = tcg_constant_vec(type, MO_8, -16);
3516        c0 = tcg_constant_vec(type, MO_8, 0);
3517
3518        vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3519                  tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3520        vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3521                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3522        vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3523                  tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3524        vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3525                  tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3526        tcg_gen_add_vec(MO_32, v0, t1, t2);
3527        break;
3528
3529    default:
3530        g_assert_not_reached();
3531    }
3532    tcg_temp_free_vec(t1);
3533    tcg_temp_free_vec(t2);
3534}
3535
3536void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3537                       TCGArg a0, ...)
3538{
3539    va_list va;
3540    TCGv_vec v0, v1, v2, t0;
3541    TCGArg a2;
3542
3543    va_start(va, a0);
3544    v0 = temp_tcgv_vec(arg_temp(a0));
3545    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3546    a2 = va_arg(va, TCGArg);
3547
3548    switch (opc) {
3549    case INDEX_op_shli_vec:
3550        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3551        break;
3552    case INDEX_op_shri_vec:
3553        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3554        break;
3555    case INDEX_op_sari_vec:
3556        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3557        break;
3558    case INDEX_op_rotli_vec:
3559        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3560        break;
3561    case INDEX_op_cmp_vec:
3562        v2 = temp_tcgv_vec(arg_temp(a2));
3563        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3564        break;
3565    case INDEX_op_mul_vec:
3566        v2 = temp_tcgv_vec(arg_temp(a2));
3567        expand_vec_mul(type, vece, v0, v1, v2);
3568        break;
3569    case INDEX_op_rotlv_vec:
3570        v2 = temp_tcgv_vec(arg_temp(a2));
3571        t0 = tcg_temp_new_vec(type);
3572        tcg_gen_neg_vec(vece, t0, v2);
3573        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3574        tcg_temp_free_vec(t0);
3575        break;
3576    default:
3577        g_assert_not_reached();
3578    }
3579    va_end(va);
3580}
3581
3582static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3583{
3584    switch (op) {
3585    case INDEX_op_goto_ptr:
3586        return C_O0_I1(r);
3587
3588    case INDEX_op_ld8u_i32:
3589    case INDEX_op_ld8s_i32:
3590    case INDEX_op_ld16u_i32:
3591    case INDEX_op_ld16s_i32:
3592    case INDEX_op_ld_i32:
3593    case INDEX_op_ctpop_i32:
3594    case INDEX_op_neg_i32:
3595    case INDEX_op_not_i32:
3596    case INDEX_op_ext8s_i32:
3597    case INDEX_op_ext16s_i32:
3598    case INDEX_op_bswap16_i32:
3599    case INDEX_op_bswap32_i32:
3600    case INDEX_op_extract_i32:
3601    case INDEX_op_ld8u_i64:
3602    case INDEX_op_ld8s_i64:
3603    case INDEX_op_ld16u_i64:
3604    case INDEX_op_ld16s_i64:
3605    case INDEX_op_ld32u_i64:
3606    case INDEX_op_ld32s_i64:
3607    case INDEX_op_ld_i64:
3608    case INDEX_op_ctpop_i64:
3609    case INDEX_op_neg_i64:
3610    case INDEX_op_not_i64:
3611    case INDEX_op_ext8s_i64:
3612    case INDEX_op_ext16s_i64:
3613    case INDEX_op_ext32s_i64:
3614    case INDEX_op_ext_i32_i64:
3615    case INDEX_op_extu_i32_i64:
3616    case INDEX_op_bswap16_i64:
3617    case INDEX_op_bswap32_i64:
3618    case INDEX_op_bswap64_i64:
3619    case INDEX_op_extract_i64:
3620        return C_O1_I1(r, r);
3621
3622    case INDEX_op_st8_i32:
3623    case INDEX_op_st16_i32:
3624    case INDEX_op_st_i32:
3625    case INDEX_op_st8_i64:
3626    case INDEX_op_st16_i64:
3627    case INDEX_op_st32_i64:
3628    case INDEX_op_st_i64:
3629        return C_O0_I2(r, r);
3630
3631    case INDEX_op_add_i32:
3632    case INDEX_op_and_i32:
3633    case INDEX_op_or_i32:
3634    case INDEX_op_xor_i32:
3635    case INDEX_op_andc_i32:
3636    case INDEX_op_orc_i32:
3637    case INDEX_op_eqv_i32:
3638    case INDEX_op_shl_i32:
3639    case INDEX_op_shr_i32:
3640    case INDEX_op_sar_i32:
3641    case INDEX_op_rotl_i32:
3642    case INDEX_op_rotr_i32:
3643    case INDEX_op_setcond_i32:
3644    case INDEX_op_and_i64:
3645    case INDEX_op_andc_i64:
3646    case INDEX_op_shl_i64:
3647    case INDEX_op_shr_i64:
3648    case INDEX_op_sar_i64:
3649    case INDEX_op_rotl_i64:
3650    case INDEX_op_rotr_i64:
3651    case INDEX_op_setcond_i64:
3652        return C_O1_I2(r, r, ri);
3653
3654    case INDEX_op_mul_i32:
3655    case INDEX_op_mul_i64:
3656        return C_O1_I2(r, r, rI);
3657
3658    case INDEX_op_div_i32:
3659    case INDEX_op_divu_i32:
3660    case INDEX_op_rem_i32:
3661    case INDEX_op_remu_i32:
3662    case INDEX_op_nand_i32:
3663    case INDEX_op_nor_i32:
3664    case INDEX_op_muluh_i32:
3665    case INDEX_op_mulsh_i32:
3666    case INDEX_op_orc_i64:
3667    case INDEX_op_eqv_i64:
3668    case INDEX_op_nand_i64:
3669    case INDEX_op_nor_i64:
3670    case INDEX_op_div_i64:
3671    case INDEX_op_divu_i64:
3672    case INDEX_op_rem_i64:
3673    case INDEX_op_remu_i64:
3674    case INDEX_op_mulsh_i64:
3675    case INDEX_op_muluh_i64:
3676        return C_O1_I2(r, r, r);
3677
3678    case INDEX_op_sub_i32:
3679        return C_O1_I2(r, rI, ri);
3680    case INDEX_op_add_i64:
3681        return C_O1_I2(r, r, rT);
3682    case INDEX_op_or_i64:
3683    case INDEX_op_xor_i64:
3684        return C_O1_I2(r, r, rU);
3685    case INDEX_op_sub_i64:
3686        return C_O1_I2(r, rI, rT);
3687    case INDEX_op_clz_i32:
3688    case INDEX_op_ctz_i32:
3689    case INDEX_op_clz_i64:
3690    case INDEX_op_ctz_i64:
3691        return C_O1_I2(r, r, rZW);
3692
3693    case INDEX_op_brcond_i32:
3694    case INDEX_op_brcond_i64:
3695        return C_O0_I2(r, ri);
3696
3697    case INDEX_op_movcond_i32:
3698    case INDEX_op_movcond_i64:
3699        return C_O1_I4(r, r, ri, rZ, rZ);
3700    case INDEX_op_deposit_i32:
3701    case INDEX_op_deposit_i64:
3702        return C_O1_I2(r, 0, rZ);
3703    case INDEX_op_brcond2_i32:
3704        return C_O0_I4(r, r, ri, ri);
3705    case INDEX_op_setcond2_i32:
3706        return C_O1_I4(r, r, r, ri, ri);
3707    case INDEX_op_add2_i64:
3708    case INDEX_op_add2_i32:
3709        return C_O2_I4(r, r, r, r, rI, rZM);
3710    case INDEX_op_sub2_i64:
3711    case INDEX_op_sub2_i32:
3712        return C_O2_I4(r, r, rI, rZM, r, r);
3713
3714    case INDEX_op_qemu_ld_i32:
3715        return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3716                ? C_O1_I1(r, r)
3717                : C_O1_I2(r, r, r));
3718
3719    case INDEX_op_qemu_st_i32:
3720        return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3721                ? C_O0_I2(r, r)
3722                : C_O0_I3(r, r, r));
3723
3724    case INDEX_op_qemu_ld_i64:
3725        return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
3726                : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, r)
3727                : C_O2_I2(r, r, r, r));
3728
3729    case INDEX_op_qemu_st_i64:
3730        return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
3731                : TARGET_LONG_BITS == 32 ? C_O0_I3(r, r, r)
3732                : C_O0_I4(r, r, r, r));
3733
3734    case INDEX_op_add_vec:
3735    case INDEX_op_sub_vec:
3736    case INDEX_op_mul_vec:
3737    case INDEX_op_and_vec:
3738    case INDEX_op_or_vec:
3739    case INDEX_op_xor_vec:
3740    case INDEX_op_andc_vec:
3741    case INDEX_op_orc_vec:
3742    case INDEX_op_nor_vec:
3743    case INDEX_op_eqv_vec:
3744    case INDEX_op_nand_vec:
3745    case INDEX_op_cmp_vec:
3746    case INDEX_op_ssadd_vec:
3747    case INDEX_op_sssub_vec:
3748    case INDEX_op_usadd_vec:
3749    case INDEX_op_ussub_vec:
3750    case INDEX_op_smax_vec:
3751    case INDEX_op_smin_vec:
3752    case INDEX_op_umax_vec:
3753    case INDEX_op_umin_vec:
3754    case INDEX_op_shlv_vec:
3755    case INDEX_op_shrv_vec:
3756    case INDEX_op_sarv_vec:
3757    case INDEX_op_rotlv_vec:
3758    case INDEX_op_rotrv_vec:
3759    case INDEX_op_ppc_mrgh_vec:
3760    case INDEX_op_ppc_mrgl_vec:
3761    case INDEX_op_ppc_muleu_vec:
3762    case INDEX_op_ppc_mulou_vec:
3763    case INDEX_op_ppc_pkum_vec:
3764    case INDEX_op_dup2_vec:
3765        return C_O1_I2(v, v, v);
3766
3767    case INDEX_op_not_vec:
3768    case INDEX_op_neg_vec:
3769        return C_O1_I1(v, v);
3770
3771    case INDEX_op_dup_vec:
3772        return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3773
3774    case INDEX_op_ld_vec:
3775    case INDEX_op_dupm_vec:
3776        return C_O1_I1(v, r);
3777
3778    case INDEX_op_st_vec:
3779        return C_O0_I2(v, r);
3780
3781    case INDEX_op_bitsel_vec:
3782    case INDEX_op_ppc_msum_vec:
3783        return C_O1_I3(v, v, v, v);
3784
3785    default:
3786        g_assert_not_reached();
3787    }
3788}
3789
3790static void tcg_target_init(TCGContext *s)
3791{
3792    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3793    unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3794
3795    have_isa = tcg_isa_base;
3796    if (hwcap & PPC_FEATURE_ARCH_2_06) {
3797        have_isa = tcg_isa_2_06;
3798    }
3799#ifdef PPC_FEATURE2_ARCH_2_07
3800    if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3801        have_isa = tcg_isa_2_07;
3802    }
3803#endif
3804#ifdef PPC_FEATURE2_ARCH_3_00
3805    if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3806        have_isa = tcg_isa_3_00;
3807    }
3808#endif
3809#ifdef PPC_FEATURE2_ARCH_3_10
3810    if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3811        have_isa = tcg_isa_3_10;
3812    }
3813#endif
3814
3815#ifdef PPC_FEATURE2_HAS_ISEL
3816    /* Prefer explicit instruction from the kernel. */
3817    have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3818#else
3819    /* Fall back to knowing Power7 (2.06) has ISEL. */
3820    have_isel = have_isa_2_06;
3821#endif
3822
3823    if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3824        have_altivec = true;
3825        /* We only care about the portion of VSX that overlaps Altivec. */
3826        if (hwcap & PPC_FEATURE_HAS_VSX) {
3827            have_vsx = true;
3828        }
3829    }
3830
3831    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3832    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3833    if (have_altivec) {
3834        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3835        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3836    }
3837
3838    tcg_target_call_clobber_regs = 0;
3839    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3840    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3841    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3842    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3843    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3844    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3845    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3846    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3847    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3848    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3849    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3850    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3851
3852    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3853    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3854    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3855    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3856    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3857    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3858    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3859    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3860    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3861    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3862    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3863    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3864    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3865    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3866    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3867    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3868    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3869    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3870    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3871    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3872
3873    s->reserved_regs = 0;
3874    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3875    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3876#if defined(_CALL_SYSV)
3877    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3878#endif
3879#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3880    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3881#endif
3882    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
3883    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
3884    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3885    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3886    if (USE_REG_TB) {
3887        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);  /* tb->tc_ptr */
3888    }
3889}
3890
3891#ifdef __ELF__
3892typedef struct {
3893    DebugFrameCIE cie;
3894    DebugFrameFDEHeader fde;
3895    uint8_t fde_def_cfa[4];
3896    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3897} DebugFrame;
3898
3899/* We're expecting a 2 byte uleb128 encoded value.  */
3900QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3901
3902#if TCG_TARGET_REG_BITS == 64
3903# define ELF_HOST_MACHINE EM_PPC64
3904#else
3905# define ELF_HOST_MACHINE EM_PPC
3906#endif
3907
3908static DebugFrame debug_frame = {
3909    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3910    .cie.id = -1,
3911    .cie.version = 1,
3912    .cie.code_align = 1,
3913    .cie.data_align = (-SZR & 0x7f),         /* sleb128 -SZR */
3914    .cie.return_column = 65,
3915
3916    /* Total FDE size does not include the "len" member.  */
3917    .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3918
3919    .fde_def_cfa = {
3920        12, TCG_REG_R1,                 /* DW_CFA_def_cfa r1, ... */
3921        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3922        (FRAME_SIZE >> 7)
3923    },
3924    .fde_reg_ofs = {
3925        /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
3926        0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
3927    }
3928};
3929
3930void tcg_register_jit(const void *buf, size_t buf_size)
3931{
3932    uint8_t *p = &debug_frame.fde_reg_ofs[3];
3933    int i;
3934
3935    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
3936        p[0] = 0x80 + tcg_target_callee_save_regs[i];
3937        p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
3938    }
3939
3940    debug_frame.fde.func_start = (uintptr_t)buf;
3941    debug_frame.fde.func_len = buf_size;
3942
3943    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3944}
3945#endif /* __ELF__ */
3946#undef VMULEUB
3947#undef VMULEUH
3948#undef VMULEUW
3949#undef VMULOUB
3950#undef VMULOUH
3951#undef VMULOUW
3952#undef VMSUMUHM
3953