xref: /openbmc/qemu/tcg/ppc/tcg-target.c.inc (revision abaabb2e)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-pool.c.inc"
27#include "../tcg-ldst.c.inc"
28
29/*
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV or _CALL_AIX.
33 */
34#if TCG_TARGET_REG_BITS == 64
35# ifdef _CALL_AIX
36    /* ok */
37# elif defined(_CALL_ELF) && _CALL_ELF == 1
38#  define _CALL_AIX
39# elif defined(_CALL_ELF) && _CALL_ELF == 2
40    /* ok */
41# else
42#  error "Unknown ABI"
43# endif
44#else
45# if defined(_CALL_SYSV) || defined(_CALL_DARWIN)
46    /* ok */
47# elif defined(__APPLE__)
48#  define _CALL_DARWIN
49# elif defined(__ELF__)
50#  define _CALL_SYSV
51# else
52#  error "Unknown ABI"
53# endif
54#endif
55
56#if TCG_TARGET_REG_BITS == 64
57# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_EXTEND
58# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_NORMAL
59#else
60# define TCG_TARGET_CALL_ARG_I32   TCG_CALL_ARG_NORMAL
61# define TCG_TARGET_CALL_RET_I128  TCG_CALL_RET_BY_REF
62#endif
63#ifdef _CALL_SYSV
64# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_EVEN
65# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_BY_REF
66#else
67# define TCG_TARGET_CALL_ARG_I64   TCG_CALL_ARG_NORMAL
68# define TCG_TARGET_CALL_ARG_I128  TCG_CALL_ARG_NORMAL
69#endif
70
71/* For some memory operations, we need a scratch that isn't R0.  For the AIX
72   calling convention, we can re-use the TOC register since we'll be reloading
73   it at every call.  Otherwise R12 will do nicely as neither a call-saved
74   register nor a parameter register.  */
75#ifdef _CALL_AIX
76# define TCG_REG_TMP1   TCG_REG_R2
77#else
78# define TCG_REG_TMP1   TCG_REG_R12
79#endif
80#define TCG_REG_TMP2    TCG_REG_R11
81
82#define TCG_VEC_TMP1    TCG_REG_V0
83#define TCG_VEC_TMP2    TCG_REG_V1
84
85#define TCG_REG_TB     TCG_REG_R31
86#define USE_REG_TB     (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00)
87
88/* Shorthand for size of a pointer.  Avoid promotion to unsigned.  */
89#define SZP  ((int)sizeof(void *))
90
91/* Shorthand for size of a register.  */
92#define SZR  (TCG_TARGET_REG_BITS / 8)
93
94#define TCG_CT_CONST_S16  0x100
95#define TCG_CT_CONST_U16  0x200
96#define TCG_CT_CONST_S32  0x400
97#define TCG_CT_CONST_U32  0x800
98#define TCG_CT_CONST_ZERO 0x1000
99#define TCG_CT_CONST_MONE 0x2000
100#define TCG_CT_CONST_WSZ  0x4000
101#define TCG_CT_CONST_CMP  0x8000
102
103#define ALL_GENERAL_REGS  0xffffffffu
104#define ALL_VECTOR_REGS   0xffffffff00000000ull
105
106#ifndef R_PPC64_PCREL34
107#define R_PPC64_PCREL34  132
108#endif
109
110#define have_isel  (cpuinfo & CPUINFO_ISEL)
111
112#define TCG_GUEST_BASE_REG  TCG_REG_R30
113
114#ifdef CONFIG_DEBUG_TCG
115static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
116    "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
117    "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
118    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
119    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
120    "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
121    "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
122    "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
123    "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
124};
125#endif
126
127static const int tcg_target_reg_alloc_order[] = {
128    TCG_REG_R14,  /* call saved registers */
129    TCG_REG_R15,
130    TCG_REG_R16,
131    TCG_REG_R17,
132    TCG_REG_R18,
133    TCG_REG_R19,
134    TCG_REG_R20,
135    TCG_REG_R21,
136    TCG_REG_R22,
137    TCG_REG_R23,
138    TCG_REG_R24,
139    TCG_REG_R25,
140    TCG_REG_R26,
141    TCG_REG_R27,
142    TCG_REG_R28,
143    TCG_REG_R29,
144    TCG_REG_R30,
145    TCG_REG_R31,
146    TCG_REG_R12,  /* call clobbered, non-arguments */
147    TCG_REG_R11,
148    TCG_REG_R2,
149    TCG_REG_R13,
150    TCG_REG_R10,  /* call clobbered, arguments */
151    TCG_REG_R9,
152    TCG_REG_R8,
153    TCG_REG_R7,
154    TCG_REG_R6,
155    TCG_REG_R5,
156    TCG_REG_R4,
157    TCG_REG_R3,
158
159    /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
160    TCG_REG_V2,   /* call clobbered, vectors */
161    TCG_REG_V3,
162    TCG_REG_V4,
163    TCG_REG_V5,
164    TCG_REG_V6,
165    TCG_REG_V7,
166    TCG_REG_V8,
167    TCG_REG_V9,
168    TCG_REG_V10,
169    TCG_REG_V11,
170    TCG_REG_V12,
171    TCG_REG_V13,
172    TCG_REG_V14,
173    TCG_REG_V15,
174    TCG_REG_V16,
175    TCG_REG_V17,
176    TCG_REG_V18,
177    TCG_REG_V19,
178};
179
180static const int tcg_target_call_iarg_regs[] = {
181    TCG_REG_R3,
182    TCG_REG_R4,
183    TCG_REG_R5,
184    TCG_REG_R6,
185    TCG_REG_R7,
186    TCG_REG_R8,
187    TCG_REG_R9,
188    TCG_REG_R10
189};
190
191static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
192{
193    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
194    tcg_debug_assert(slot >= 0 && slot <= 1);
195    return TCG_REG_R3 + slot;
196}
197
198static const int tcg_target_callee_save_regs[] = {
199#ifdef _CALL_DARWIN
200    TCG_REG_R11,
201#endif
202    TCG_REG_R14,
203    TCG_REG_R15,
204    TCG_REG_R16,
205    TCG_REG_R17,
206    TCG_REG_R18,
207    TCG_REG_R19,
208    TCG_REG_R20,
209    TCG_REG_R21,
210    TCG_REG_R22,
211    TCG_REG_R23,
212    TCG_REG_R24,
213    TCG_REG_R25,
214    TCG_REG_R26,
215    TCG_REG_R27, /* currently used for the global env */
216    TCG_REG_R28,
217    TCG_REG_R29,
218    TCG_REG_R30,
219    TCG_REG_R31
220};
221
222/* For PPC, we use TB+4 instead of TB as the base. */
223static inline ptrdiff_t ppc_tbrel_diff(TCGContext *s, const void *target)
224{
225    return tcg_tbrel_diff(s, target) - 4;
226}
227
228static inline bool in_range_b(tcg_target_long target)
229{
230    return target == sextract64(target, 0, 26);
231}
232
233static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
234                               const tcg_insn_unit *target)
235{
236    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
237    tcg_debug_assert(in_range_b(disp));
238    return disp & 0x3fffffc;
239}
240
241static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
242{
243    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
244    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
245
246    if (in_range_b(disp)) {
247        *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
248        return true;
249    }
250    return false;
251}
252
253static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
254                               const tcg_insn_unit *target)
255{
256    ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
257    tcg_debug_assert(disp == (int16_t) disp);
258    return disp & 0xfffc;
259}
260
261static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
262{
263    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
264    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
265
266    if (disp == (int16_t) disp) {
267        *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
268        return true;
269    }
270    return false;
271}
272
273static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
274{
275    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
276    ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
277
278    if (disp == sextract64(disp, 0, 34)) {
279        src_rw[0] = (src_rw[0] & ~0x3ffff) | ((disp >> 16) & 0x3ffff);
280        src_rw[1] = (src_rw[1] & ~0xffff) | (disp & 0xffff);
281        return true;
282    }
283    return false;
284}
285
286static bool mask_operand(uint32_t c, int *mb, int *me);
287static bool mask64_operand(uint64_t c, int *mb, int *me);
288
289/* test if a constant matches the constraint */
290static bool tcg_target_const_match(int64_t sval, int ct,
291                                   TCGType type, TCGCond cond, int vece)
292{
293    uint64_t uval = sval;
294    int mb, me;
295
296    if (ct & TCG_CT_CONST) {
297        return 1;
298    }
299
300    if (type == TCG_TYPE_I32) {
301        uval = (uint32_t)sval;
302        sval = (int32_t)sval;
303    }
304
305    if (ct & TCG_CT_CONST_CMP) {
306        switch (cond) {
307        case TCG_COND_EQ:
308        case TCG_COND_NE:
309            ct |= TCG_CT_CONST_S16 | TCG_CT_CONST_U16;
310            break;
311        case TCG_COND_LT:
312        case TCG_COND_GE:
313        case TCG_COND_LE:
314        case TCG_COND_GT:
315            ct |= TCG_CT_CONST_S16;
316            break;
317        case TCG_COND_LTU:
318        case TCG_COND_GEU:
319        case TCG_COND_LEU:
320        case TCG_COND_GTU:
321            ct |= TCG_CT_CONST_U16;
322            break;
323        case TCG_COND_TSTEQ:
324        case TCG_COND_TSTNE:
325            if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) {
326                return 1;
327            }
328            if (uval == (uint32_t)uval && mask_operand(uval, &mb, &me)) {
329                return 1;
330            }
331            if (TCG_TARGET_REG_BITS == 64 &&
332                mask64_operand(uval << clz64(uval), &mb, &me)) {
333                return 1;
334            }
335            return 0;
336        default:
337            g_assert_not_reached();
338        }
339    }
340
341    if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) {
342        return 1;
343    }
344    if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
345        return 1;
346    }
347    if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
348        return 1;
349    }
350    if ((ct & TCG_CT_CONST_U32) && uval == (uint32_t)uval) {
351        return 1;
352    }
353    if ((ct & TCG_CT_CONST_ZERO) && sval == 0) {
354        return 1;
355    }
356    if ((ct & TCG_CT_CONST_MONE) && sval == -1) {
357        return 1;
358    }
359    if ((ct & TCG_CT_CONST_WSZ) && sval == (type == TCG_TYPE_I32 ? 32 : 64)) {
360        return 1;
361    }
362    return 0;
363}
364
365#define OPCD(opc) ((opc)<<26)
366#define XO19(opc) (OPCD(19)|((opc)<<1))
367#define MD30(opc) (OPCD(30)|((opc)<<2))
368#define MDS30(opc) (OPCD(30)|((opc)<<1))
369#define XO31(opc) (OPCD(31)|((opc)<<1))
370#define XO58(opc) (OPCD(58)|(opc))
371#define XO62(opc) (OPCD(62)|(opc))
372#define VX4(opc)  (OPCD(4)|(opc))
373
374#define B      OPCD( 18)
375#define BC     OPCD( 16)
376
377#define LBZ    OPCD( 34)
378#define LHZ    OPCD( 40)
379#define LHA    OPCD( 42)
380#define LWZ    OPCD( 32)
381#define LWZUX  XO31( 55)
382#define LD     XO58(  0)
383#define LDX    XO31( 21)
384#define LDU    XO58(  1)
385#define LDUX   XO31( 53)
386#define LWA    XO58(  2)
387#define LWAX   XO31(341)
388#define LQ     OPCD( 56)
389
390#define STB    OPCD( 38)
391#define STH    OPCD( 44)
392#define STW    OPCD( 36)
393#define STD    XO62(  0)
394#define STDU   XO62(  1)
395#define STDX   XO31(149)
396#define STQ    XO62(  2)
397
398#define PLWA   OPCD( 41)
399#define PLD    OPCD( 57)
400#define PLXSD  OPCD( 42)
401#define PLXV   OPCD(25 * 2 + 1)  /* force tx=1 */
402
403#define PSTD   OPCD( 61)
404#define PSTXSD OPCD( 46)
405#define PSTXV  OPCD(27 * 2 + 1)  /* force sx=1 */
406
407#define ADDIC  OPCD( 12)
408#define ADDI   OPCD( 14)
409#define ADDIS  OPCD( 15)
410#define ORI    OPCD( 24)
411#define ORIS   OPCD( 25)
412#define XORI   OPCD( 26)
413#define XORIS  OPCD( 27)
414#define ANDI   OPCD( 28)
415#define ANDIS  OPCD( 29)
416#define MULLI  OPCD(  7)
417#define CMPLI  OPCD( 10)
418#define CMPI   OPCD( 11)
419#define SUBFIC OPCD( 8)
420
421#define LWZU   OPCD( 33)
422#define STWU   OPCD( 37)
423
424#define RLWIMI OPCD( 20)
425#define RLWINM OPCD( 21)
426#define RLWNM  OPCD( 23)
427
428#define RLDICL MD30(  0)
429#define RLDICR MD30(  1)
430#define RLDIMI MD30(  3)
431#define RLDCL  MDS30( 8)
432
433#define BCLR   XO19( 16)
434#define BCCTR  XO19(528)
435#define CRAND  XO19(257)
436#define CRANDC XO19(129)
437#define CRNAND XO19(225)
438#define CROR   XO19(449)
439#define CRNOR  XO19( 33)
440#define ADDPCIS XO19( 2)
441
442#define EXTSB  XO31(954)
443#define EXTSH  XO31(922)
444#define EXTSW  XO31(986)
445#define ADD    XO31(266)
446#define ADDE   XO31(138)
447#define ADDME  XO31(234)
448#define ADDZE  XO31(202)
449#define ADDC   XO31( 10)
450#define AND    XO31( 28)
451#define SUBF   XO31( 40)
452#define SUBFC  XO31(  8)
453#define SUBFE  XO31(136)
454#define SUBFME XO31(232)
455#define SUBFZE XO31(200)
456#define OR     XO31(444)
457#define XOR    XO31(316)
458#define MULLW  XO31(235)
459#define MULHW  XO31( 75)
460#define MULHWU XO31( 11)
461#define DIVW   XO31(491)
462#define DIVWU  XO31(459)
463#define MODSW  XO31(779)
464#define MODUW  XO31(267)
465#define CMP    XO31(  0)
466#define CMPL   XO31( 32)
467#define LHBRX  XO31(790)
468#define LWBRX  XO31(534)
469#define LDBRX  XO31(532)
470#define STHBRX XO31(918)
471#define STWBRX XO31(662)
472#define STDBRX XO31(660)
473#define MFSPR  XO31(339)
474#define MTSPR  XO31(467)
475#define SRAWI  XO31(824)
476#define NEG    XO31(104)
477#define MFCR   XO31( 19)
478#define MFOCRF (MFCR | (1u << 20))
479#define NOR    XO31(124)
480#define CNTLZW XO31( 26)
481#define CNTLZD XO31( 58)
482#define CNTTZW XO31(538)
483#define CNTTZD XO31(570)
484#define CNTPOPW XO31(378)
485#define CNTPOPD XO31(506)
486#define ANDC   XO31( 60)
487#define ORC    XO31(412)
488#define EQV    XO31(284)
489#define NAND   XO31(476)
490#define ISEL   XO31( 15)
491
492#define MULLD  XO31(233)
493#define MULHD  XO31( 73)
494#define MULHDU XO31(  9)
495#define DIVD   XO31(489)
496#define DIVDU  XO31(457)
497#define MODSD  XO31(777)
498#define MODUD  XO31(265)
499
500#define LBZX   XO31( 87)
501#define LHZX   XO31(279)
502#define LHAX   XO31(343)
503#define LWZX   XO31( 23)
504#define STBX   XO31(215)
505#define STHX   XO31(407)
506#define STWX   XO31(151)
507
508#define EIEIO  XO31(854)
509#define HWSYNC XO31(598)
510#define LWSYNC (HWSYNC | (1u << 21))
511
512#define SPR(a, b) ((((a)<<5)|(b))<<11)
513#define LR     SPR(8, 0)
514#define CTR    SPR(9, 0)
515
516#define SLW    XO31( 24)
517#define SRW    XO31(536)
518#define SRAW   XO31(792)
519
520#define SLD    XO31( 27)
521#define SRD    XO31(539)
522#define SRAD   XO31(794)
523#define SRADI  XO31(413<<1)
524
525#define BRH    XO31(219)
526#define BRW    XO31(155)
527#define BRD    XO31(187)
528
529#define TW     XO31( 4)
530#define TRAP   (TW | TO(31))
531
532#define SETBC    XO31(384)  /* v3.10 */
533#define SETBCR   XO31(416)  /* v3.10 */
534#define SETNBC   XO31(448)  /* v3.10 */
535#define SETNBCR  XO31(480)  /* v3.10 */
536
537#define NOP    ORI  /* ori 0,0,0 */
538
539#define LVX        XO31(103)
540#define LVEBX      XO31(7)
541#define LVEHX      XO31(39)
542#define LVEWX      XO31(71)
543#define LXSDX      (XO31(588) | 1)  /* v2.06, force tx=1 */
544#define LXVDSX     (XO31(332) | 1)  /* v2.06, force tx=1 */
545#define LXSIWZX    (XO31(12) | 1)   /* v2.07, force tx=1 */
546#define LXV        (OPCD(61) | 8 | 1)  /* v3.00, force tx=1 */
547#define LXSD       (OPCD(57) | 2)   /* v3.00 */
548#define LXVWSX     (XO31(364) | 1)  /* v3.00, force tx=1 */
549
550#define STVX       XO31(231)
551#define STVEWX     XO31(199)
552#define STXSDX     (XO31(716) | 1)  /* v2.06, force sx=1 */
553#define STXSIWX    (XO31(140) | 1)  /* v2.07, force sx=1 */
554#define STXV       (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
555#define STXSD      (OPCD(61) | 2)   /* v3.00 */
556
557#define VADDSBS    VX4(768)
558#define VADDUBS    VX4(512)
559#define VADDUBM    VX4(0)
560#define VADDSHS    VX4(832)
561#define VADDUHS    VX4(576)
562#define VADDUHM    VX4(64)
563#define VADDSWS    VX4(896)
564#define VADDUWS    VX4(640)
565#define VADDUWM    VX4(128)
566#define VADDUDM    VX4(192)       /* v2.07 */
567
568#define VSUBSBS    VX4(1792)
569#define VSUBUBS    VX4(1536)
570#define VSUBUBM    VX4(1024)
571#define VSUBSHS    VX4(1856)
572#define VSUBUHS    VX4(1600)
573#define VSUBUHM    VX4(1088)
574#define VSUBSWS    VX4(1920)
575#define VSUBUWS    VX4(1664)
576#define VSUBUWM    VX4(1152)
577#define VSUBUDM    VX4(1216)      /* v2.07 */
578
579#define VNEGW      (VX4(1538) | (6 << 16))  /* v3.00 */
580#define VNEGD      (VX4(1538) | (7 << 16))  /* v3.00 */
581
582#define VMAXSB     VX4(258)
583#define VMAXSH     VX4(322)
584#define VMAXSW     VX4(386)
585#define VMAXSD     VX4(450)       /* v2.07 */
586#define VMAXUB     VX4(2)
587#define VMAXUH     VX4(66)
588#define VMAXUW     VX4(130)
589#define VMAXUD     VX4(194)       /* v2.07 */
590#define VMINSB     VX4(770)
591#define VMINSH     VX4(834)
592#define VMINSW     VX4(898)
593#define VMINSD     VX4(962)       /* v2.07 */
594#define VMINUB     VX4(514)
595#define VMINUH     VX4(578)
596#define VMINUW     VX4(642)
597#define VMINUD     VX4(706)       /* v2.07 */
598
599#define VCMPEQUB   VX4(6)
600#define VCMPEQUH   VX4(70)
601#define VCMPEQUW   VX4(134)
602#define VCMPEQUD   VX4(199)       /* v2.07 */
603#define VCMPGTSB   VX4(774)
604#define VCMPGTSH   VX4(838)
605#define VCMPGTSW   VX4(902)
606#define VCMPGTSD   VX4(967)       /* v2.07 */
607#define VCMPGTUB   VX4(518)
608#define VCMPGTUH   VX4(582)
609#define VCMPGTUW   VX4(646)
610#define VCMPGTUD   VX4(711)       /* v2.07 */
611#define VCMPNEB    VX4(7)         /* v3.00 */
612#define VCMPNEH    VX4(71)        /* v3.00 */
613#define VCMPNEW    VX4(135)       /* v3.00 */
614
615#define VSLB       VX4(260)
616#define VSLH       VX4(324)
617#define VSLW       VX4(388)
618#define VSLD       VX4(1476)      /* v2.07 */
619#define VSRB       VX4(516)
620#define VSRH       VX4(580)
621#define VSRW       VX4(644)
622#define VSRD       VX4(1732)      /* v2.07 */
623#define VSRAB      VX4(772)
624#define VSRAH      VX4(836)
625#define VSRAW      VX4(900)
626#define VSRAD      VX4(964)       /* v2.07 */
627#define VRLB       VX4(4)
628#define VRLH       VX4(68)
629#define VRLW       VX4(132)
630#define VRLD       VX4(196)       /* v2.07 */
631
632#define VMULEUB    VX4(520)
633#define VMULEUH    VX4(584)
634#define VMULEUW    VX4(648)       /* v2.07 */
635#define VMULOUB    VX4(8)
636#define VMULOUH    VX4(72)
637#define VMULOUW    VX4(136)       /* v2.07 */
638#define VMULUWM    VX4(137)       /* v2.07 */
639#define VMULLD     VX4(457)       /* v3.10 */
640#define VMSUMUHM   VX4(38)
641
642#define VMRGHB     VX4(12)
643#define VMRGHH     VX4(76)
644#define VMRGHW     VX4(140)
645#define VMRGLB     VX4(268)
646#define VMRGLH     VX4(332)
647#define VMRGLW     VX4(396)
648
649#define VPKUHUM    VX4(14)
650#define VPKUWUM    VX4(78)
651
652#define VAND       VX4(1028)
653#define VANDC      VX4(1092)
654#define VNOR       VX4(1284)
655#define VOR        VX4(1156)
656#define VXOR       VX4(1220)
657#define VEQV       VX4(1668)      /* v2.07 */
658#define VNAND      VX4(1412)      /* v2.07 */
659#define VORC       VX4(1348)      /* v2.07 */
660
661#define VSPLTB     VX4(524)
662#define VSPLTH     VX4(588)
663#define VSPLTW     VX4(652)
664#define VSPLTISB   VX4(780)
665#define VSPLTISH   VX4(844)
666#define VSPLTISW   VX4(908)
667
668#define VSLDOI     VX4(44)
669
670#define XXPERMDI   (OPCD(60) | (10 << 3) | 7)  /* v2.06, force ax=bx=tx=1 */
671#define XXSEL      (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
672#define XXSPLTIB   (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
673
674#define MFVSRD     (XO31(51) | 1)   /* v2.07, force sx=1 */
675#define MFVSRWZ    (XO31(115) | 1)  /* v2.07, force sx=1 */
676#define MTVSRD     (XO31(179) | 1)  /* v2.07, force tx=1 */
677#define MTVSRWZ    (XO31(243) | 1)  /* v2.07, force tx=1 */
678#define MTVSRDD    (XO31(435) | 1)  /* v3.00, force tx=1 */
679#define MTVSRWS    (XO31(403) | 1)  /* v3.00, force tx=1 */
680
681#define RT(r) ((r)<<21)
682#define RS(r) ((r)<<21)
683#define RA(r) ((r)<<16)
684#define RB(r) ((r)<<11)
685#define TO(t) ((t)<<21)
686#define SH(s) ((s)<<11)
687#define MB(b) ((b)<<6)
688#define ME(e) ((e)<<1)
689#define BO(o) ((o)<<21)
690#define MB64(b) ((b)<<5)
691#define FXM(b) (1 << (19 - (b)))
692
693#define VRT(r)  (((r) & 31) << 21)
694#define VRA(r)  (((r) & 31) << 16)
695#define VRB(r)  (((r) & 31) << 11)
696#define VRC(r)  (((r) & 31) <<  6)
697
698#define LK    1
699
700#define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
701#define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
702#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
703#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
704
705#define BF(n)    ((n)<<23)
706#define BI(n, c) (((c)+((n)*4))<<16)
707#define BT(n, c) (((c)+((n)*4))<<21)
708#define BA(n, c) (((c)+((n)*4))<<16)
709#define BB(n, c) (((c)+((n)*4))<<11)
710#define BC_(n, c) (((c)+((n)*4))<<6)
711
712#define BO_COND_TRUE  BO(12)
713#define BO_COND_FALSE BO( 4)
714#define BO_ALWAYS     BO(20)
715
716enum {
717    CR_LT,
718    CR_GT,
719    CR_EQ,
720    CR_SO
721};
722
723static const uint32_t tcg_to_bc[16] = {
724    [TCG_COND_EQ]  = BC | BI(0, CR_EQ) | BO_COND_TRUE,
725    [TCG_COND_NE]  = BC | BI(0, CR_EQ) | BO_COND_FALSE,
726    [TCG_COND_TSTEQ]  = BC | BI(0, CR_EQ) | BO_COND_TRUE,
727    [TCG_COND_TSTNE]  = BC | BI(0, CR_EQ) | BO_COND_FALSE,
728    [TCG_COND_LT]  = BC | BI(0, CR_LT) | BO_COND_TRUE,
729    [TCG_COND_GE]  = BC | BI(0, CR_LT) | BO_COND_FALSE,
730    [TCG_COND_LE]  = BC | BI(0, CR_GT) | BO_COND_FALSE,
731    [TCG_COND_GT]  = BC | BI(0, CR_GT) | BO_COND_TRUE,
732    [TCG_COND_LTU] = BC | BI(0, CR_LT) | BO_COND_TRUE,
733    [TCG_COND_GEU] = BC | BI(0, CR_LT) | BO_COND_FALSE,
734    [TCG_COND_LEU] = BC | BI(0, CR_GT) | BO_COND_FALSE,
735    [TCG_COND_GTU] = BC | BI(0, CR_GT) | BO_COND_TRUE,
736};
737
738/* The low bit here is set if the RA and RB fields must be inverted.  */
739static const uint32_t tcg_to_isel[16] = {
740    [TCG_COND_EQ]  = ISEL | BC_(0, CR_EQ),
741    [TCG_COND_NE]  = ISEL | BC_(0, CR_EQ) | 1,
742    [TCG_COND_TSTEQ] = ISEL | BC_(0, CR_EQ),
743    [TCG_COND_TSTNE] = ISEL | BC_(0, CR_EQ) | 1,
744    [TCG_COND_LT]  = ISEL | BC_(0, CR_LT),
745    [TCG_COND_GE]  = ISEL | BC_(0, CR_LT) | 1,
746    [TCG_COND_LE]  = ISEL | BC_(0, CR_GT) | 1,
747    [TCG_COND_GT]  = ISEL | BC_(0, CR_GT),
748    [TCG_COND_LTU] = ISEL | BC_(0, CR_LT),
749    [TCG_COND_GEU] = ISEL | BC_(0, CR_LT) | 1,
750    [TCG_COND_LEU] = ISEL | BC_(0, CR_GT) | 1,
751    [TCG_COND_GTU] = ISEL | BC_(0, CR_GT),
752};
753
754static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
755                        intptr_t value, intptr_t addend)
756{
757    const tcg_insn_unit *target;
758    int16_t lo;
759    int32_t hi;
760
761    value += addend;
762    target = (const tcg_insn_unit *)value;
763
764    switch (type) {
765    case R_PPC_REL14:
766        return reloc_pc14(code_ptr, target);
767    case R_PPC_REL24:
768        return reloc_pc24(code_ptr, target);
769    case R_PPC64_PCREL34:
770        return reloc_pc34(code_ptr, target);
771    case R_PPC_ADDR16:
772        /*
773         * We are (slightly) abusing this relocation type.  In particular,
774         * assert that the low 2 bits are zero, and do not modify them.
775         * That way we can use this with LD et al that have opcode bits
776         * in the low 2 bits of the insn.
777         */
778        if ((value & 3) || value != (int16_t)value) {
779            return false;
780        }
781        *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
782        break;
783    case R_PPC_ADDR32:
784        /*
785         * We are abusing this relocation type.  Again, this points to
786         * a pair of insns, lis + load.  This is an absolute address
787         * relocation for PPC32 so the lis cannot be removed.
788         */
789        lo = value;
790        hi = value - lo;
791        if (hi + lo != value) {
792            return false;
793        }
794        code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
795        code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
796        break;
797    default:
798        g_assert_not_reached();
799    }
800    return true;
801}
802
803/* Ensure that the prefixed instruction does not cross a 64-byte boundary. */
804static bool tcg_out_need_prefix_align(TCGContext *s)
805{
806    return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c;
807}
808
809static void tcg_out_prefix_align(TCGContext *s)
810{
811    if (tcg_out_need_prefix_align(s)) {
812        tcg_out32(s, NOP);
813    }
814}
815
816static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target)
817{
818    return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
819}
820
821/* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */
822static void tcg_out_8ls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
823                          unsigned ra, tcg_target_long imm, bool r)
824{
825    tcg_insn_unit p, i;
826
827    p = OPCD(1) | (r << 20) | ((imm >> 16) & 0x3ffff);
828    i = opc | TAI(rt, ra, imm);
829
830    tcg_out_prefix_align(s);
831    tcg_out32(s, p);
832    tcg_out32(s, i);
833}
834
835/* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
836static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
837                          unsigned ra, tcg_target_long imm, bool r)
838{
839    tcg_insn_unit p, i;
840
841    p = OPCD(1) | (2 << 24) | (r << 20) | ((imm >> 16) & 0x3ffff);
842    i = opc | TAI(rt, ra, imm);
843
844    tcg_out_prefix_align(s);
845    tcg_out32(s, p);
846    tcg_out32(s, i);
847}
848
849static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
850                             TCGReg base, tcg_target_long offset);
851
852static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
853{
854    if (ret == arg) {
855        return true;
856    }
857    switch (type) {
858    case TCG_TYPE_I64:
859        tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
860        /* fallthru */
861    case TCG_TYPE_I32:
862        if (ret < TCG_REG_V0) {
863            if (arg < TCG_REG_V0) {
864                tcg_out32(s, OR | SAB(arg, ret, arg));
865                break;
866            } else if (have_isa_2_07) {
867                tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
868                          | VRT(arg) | RA(ret));
869                break;
870            } else {
871                /* Altivec does not support vector->integer moves.  */
872                return false;
873            }
874        } else if (arg < TCG_REG_V0) {
875            if (have_isa_2_07) {
876                tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
877                          | VRT(ret) | RA(arg));
878                break;
879            } else {
880                /* Altivec does not support integer->vector moves.  */
881                return false;
882            }
883        }
884        /* fallthru */
885    case TCG_TYPE_V64:
886    case TCG_TYPE_V128:
887        tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
888        tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
889        break;
890    default:
891        g_assert_not_reached();
892    }
893    return true;
894}
895
896static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
897                           int sh, int mb, bool rc)
898{
899    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
900    sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
901    mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
902    tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc);
903}
904
905static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
906                        int sh, int mb)
907{
908    tcg_out_rld_rc(s, op, ra, rs, sh, mb, false);
909}
910
911static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
912                           int sh, int mb, int me, bool rc)
913{
914    tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
915}
916
917static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
918                        int sh, int mb, int me)
919{
920    tcg_out_rlw_rc(s, op, ra, rs, sh, mb, me, false);
921}
922
923static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
924{
925    tcg_out32(s, EXTSB | RA(dst) | RS(src));
926}
927
928static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
929{
930    tcg_out32(s, ANDI | SAI(src, dst, 0xff));
931}
932
933static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
934{
935    tcg_out32(s, EXTSH | RA(dst) | RS(src));
936}
937
938static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
939{
940    tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
941}
942
943static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
944{
945    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
946    tcg_out32(s, EXTSW | RA(dst) | RS(src));
947}
948
949static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
950{
951    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
952    tcg_out_rld(s, RLDICL, dst, src, 0, 32);
953}
954
955static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
956{
957    tcg_out_ext32s(s, dst, src);
958}
959
960static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
961{
962    tcg_out_ext32u(s, dst, src);
963}
964
965static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
966{
967    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
968    tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
969}
970
971static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
972{
973    tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
974}
975
976static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
977{
978    tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
979}
980
981static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
982{
983    /* Limit immediate shift count lest we create an illegal insn.  */
984    tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
985}
986
987static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
988{
989    tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
990}
991
992static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
993{
994    tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
995}
996
997static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
998{
999    tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
1000}
1001
1002static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
1003{
1004    uint32_t d0, d1, d2;
1005
1006    tcg_debug_assert((imm & 0xffff) == 0);
1007    tcg_debug_assert(imm == (int32_t)imm);
1008
1009    d2 = extract32(imm, 16, 1);
1010    d1 = extract32(imm, 17, 5);
1011    d0 = extract32(imm, 22, 10);
1012    tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
1013}
1014
1015static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
1016{
1017    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
1018
1019    if (have_isa_3_10) {
1020        tcg_out32(s, BRH | RA(dst) | RS(src));
1021        if (flags & TCG_BSWAP_OS) {
1022            tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
1023        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1024            tcg_out_ext16u(s, dst, dst);
1025        }
1026        return;
1027    }
1028
1029    /*
1030     * In the following,
1031     *   dep(a, b, m) -> (a & ~m) | (b & m)
1032     *
1033     * Begin with:                              src = xxxxabcd
1034     */
1035    /* tmp = rol32(src, 24) & 0x000000ff            = 0000000c */
1036    tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
1037    /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00)    = 000000dc */
1038    tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
1039
1040    if (flags & TCG_BSWAP_OS) {
1041        tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
1042    } else {
1043        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
1044    }
1045}
1046
1047static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
1048{
1049    TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
1050
1051    if (have_isa_3_10) {
1052        tcg_out32(s, BRW | RA(dst) | RS(src));
1053        if (flags & TCG_BSWAP_OS) {
1054            tcg_out_ext32s(s, dst, dst);
1055        } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1056            tcg_out_ext32u(s, dst, dst);
1057        }
1058        return;
1059    }
1060
1061    /*
1062     * Stolen from gcc's builtin_bswap32.
1063     * In the following,
1064     *   dep(a, b, m) -> (a & ~m) | (b & m)
1065     *
1066     * Begin with:                              src = xxxxabcd
1067     */
1068    /* tmp = rol32(src, 8) & 0xffffffff             = 0000bcda */
1069    tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
1070    /* tmp = dep(tmp, rol32(src, 24), 0xff000000)   = 0000dcda */
1071    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
1072    /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00)   = 0000dcba */
1073    tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
1074
1075    if (flags & TCG_BSWAP_OS) {
1076        tcg_out_ext32s(s, dst, tmp);
1077    } else {
1078        tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
1079    }
1080}
1081
1082static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
1083{
1084    TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
1085    TCGReg t1 = dst == src ? dst : TCG_REG_R0;
1086
1087    if (have_isa_3_10) {
1088        tcg_out32(s, BRD | RA(dst) | RS(src));
1089        return;
1090    }
1091
1092    /*
1093     * In the following,
1094     *   dep(a, b, m) -> (a & ~m) | (b & m)
1095     *
1096     * Begin with:                              src = abcdefgh
1097     */
1098    /* t0 = rol32(src, 8) & 0xffffffff              = 0000fghe */
1099    tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
1100    /* t0 = dep(t0, rol32(src, 24), 0xff000000)     = 0000hghe */
1101    tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
1102    /* t0 = dep(t0, rol32(src, 24), 0x0000ff00)     = 0000hgfe */
1103    tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
1104
1105    /* t0 = rol64(t0, 32)                           = hgfe0000 */
1106    tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
1107    /* t1 = rol64(src, 32)                          = efghabcd */
1108    tcg_out_rld(s, RLDICL, t1, src, 32, 0);
1109
1110    /* t0 = dep(t0, rol32(t1, 24), 0xffffffff)      = hgfebcda */
1111    tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
1112    /* t0 = dep(t0, rol32(t1, 24), 0xff000000)      = hgfedcda */
1113    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
1114    /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00)      = hgfedcba */
1115    tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
1116
1117    tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
1118}
1119
1120/* Emit a move into ret of arg, if it can be done in one insn.  */
1121static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
1122{
1123    if (arg == (int16_t)arg) {
1124        tcg_out32(s, ADDI | TAI(ret, 0, arg));
1125        return true;
1126    }
1127    if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
1128        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
1129        return true;
1130    }
1131    return false;
1132}
1133
1134static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
1135                             tcg_target_long arg, bool in_prologue)
1136{
1137    intptr_t tb_diff;
1138    tcg_target_long tmp;
1139    int shift;
1140
1141    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1142
1143    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1144        arg = (int32_t)arg;
1145    }
1146
1147    /* Load 16-bit immediates with one insn.  */
1148    if (tcg_out_movi_one(s, ret, arg)) {
1149        return;
1150    }
1151
1152    /* Load addresses within the TB with one insn.  */
1153    tb_diff = ppc_tbrel_diff(s, (void *)arg);
1154    if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
1155        tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
1156        return;
1157    }
1158
1159    /*
1160     * Load values up to 34 bits, and pc-relative addresses,
1161     * with one prefixed insn.
1162     */
1163    if (have_isa_3_10) {
1164        if (arg == sextract64(arg, 0, 34)) {
1165            /* pli ret,value = paddi ret,0,value,0 */
1166            tcg_out_mls_d(s, ADDI, ret, 0, arg, 0);
1167            return;
1168        }
1169
1170        tmp = tcg_pcrel_diff_for_prefix(s, (void *)arg);
1171        if (tmp == sextract64(tmp, 0, 34)) {
1172            /* pla ret,value = paddi ret,0,value,1 */
1173            tcg_out_mls_d(s, ADDI, ret, 0, tmp, 1);
1174            return;
1175        }
1176    }
1177
1178    /* Load 32-bit immediates with two insns.  Note that we've already
1179       eliminated bare ADDIS, so we know both insns are required.  */
1180    if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
1181        tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
1182        tcg_out32(s, ORI | SAI(ret, ret, arg));
1183        return;
1184    }
1185    if (arg == (uint32_t)arg && !(arg & 0x8000)) {
1186        tcg_out32(s, ADDI | TAI(ret, 0, arg));
1187        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1188        return;
1189    }
1190
1191    /* Load masked 16-bit value.  */
1192    if (arg > 0 && (arg & 0x8000)) {
1193        tmp = arg | 0x7fff;
1194        if ((tmp & (tmp + 1)) == 0) {
1195            int mb = clz64(tmp + 1) + 1;
1196            tcg_out32(s, ADDI | TAI(ret, 0, arg));
1197            tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1198            return;
1199        }
1200    }
1201
1202    /* Load common masks with 2 insns.  */
1203    shift = ctz64(arg);
1204    tmp = arg >> shift;
1205    if (tmp == (int16_t)tmp) {
1206        tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1207        tcg_out_shli64(s, ret, ret, shift);
1208        return;
1209    }
1210    shift = clz64(arg);
1211    if (tcg_out_movi_one(s, ret, arg << shift)) {
1212        tcg_out_shri64(s, ret, ret, shift);
1213        return;
1214    }
1215
1216    /* Load addresses within 2GB with 2 insns. */
1217    if (have_isa_3_00) {
1218        intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4;
1219        int16_t lo = hi;
1220
1221        hi -= lo;
1222        if (hi == (int32_t)hi) {
1223            tcg_out_addpcis(s, TCG_REG_TMP2, hi);
1224            tcg_out32(s, ADDI | TAI(ret, TCG_REG_TMP2, lo));
1225            return;
1226        }
1227    }
1228
1229    /* Load addresses within 2GB of TB with 2 (or rarely 3) insns.  */
1230    if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1231        tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1232        return;
1233    }
1234
1235    /* Use the constant pool, if possible.  */
1236    if (!in_prologue && USE_REG_TB) {
1237        new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1238                       ppc_tbrel_diff(s, NULL));
1239        tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1240        return;
1241    }
1242    if (have_isa_3_10) {
1243        tcg_out_8ls_d(s, PLD, ret, 0, 0, 1);
1244        new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1245        return;
1246    }
1247    if (have_isa_3_00) {
1248        tcg_out_addpcis(s, TCG_REG_TMP2, 0);
1249        new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
1250        tcg_out32(s, LD | TAI(ret, TCG_REG_TMP2, 0));
1251        return;
1252    }
1253
1254    tmp = arg >> 31 >> 1;
1255    tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1256    if (tmp) {
1257        tcg_out_shli64(s, ret, ret, 32);
1258    }
1259    if (arg & 0xffff0000) {
1260        tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1261    }
1262    if (arg & 0xffff) {
1263        tcg_out32(s, ORI | SAI(ret, ret, arg));
1264    }
1265}
1266
1267static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1268                             TCGReg ret, int64_t val)
1269{
1270    uint32_t load_insn;
1271    int rel, low;
1272    intptr_t add;
1273
1274    switch (vece) {
1275    case MO_8:
1276        low = (int8_t)val;
1277        if (low >= -16 && low < 16) {
1278            tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1279            return;
1280        }
1281        if (have_isa_3_00) {
1282            tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1283            return;
1284        }
1285        break;
1286
1287    case MO_16:
1288        low = (int16_t)val;
1289        if (low >= -16 && low < 16) {
1290            tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1291            return;
1292        }
1293        break;
1294
1295    case MO_32:
1296        low = (int32_t)val;
1297        if (low >= -16 && low < 16) {
1298            tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1299            return;
1300        }
1301        break;
1302    }
1303
1304    /*
1305     * Otherwise we must load the value from the constant pool.
1306     */
1307    if (USE_REG_TB) {
1308        rel = R_PPC_ADDR16;
1309        add = ppc_tbrel_diff(s, NULL);
1310    } else if (have_isa_3_10) {
1311        if (type == TCG_TYPE_V64) {
1312            tcg_out_8ls_d(s, PLXSD, ret & 31, 0, 0, 1);
1313            new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0);
1314        } else {
1315            tcg_out_8ls_d(s, PLXV, ret & 31, 0, 0, 1);
1316            new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val);
1317        }
1318        return;
1319    } else if (have_isa_3_00) {
1320        tcg_out_addpcis(s, TCG_REG_TMP1, 0);
1321        rel = R_PPC_REL14;
1322        add = 0;
1323    } else {
1324        rel = R_PPC_ADDR32;
1325        add = 0;
1326    }
1327
1328    if (have_vsx) {
1329        load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1330        load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1331        if (TCG_TARGET_REG_BITS == 64) {
1332            new_pool_label(s, val, rel, s->code_ptr, add);
1333        } else {
1334            new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1335        }
1336    } else {
1337        load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1338        if (TCG_TARGET_REG_BITS == 64) {
1339            new_pool_l2(s, rel, s->code_ptr, add, val, val);
1340        } else {
1341            new_pool_l4(s, rel, s->code_ptr, add,
1342                        val >> 32, val, val >> 32, val);
1343        }
1344    }
1345
1346    if (USE_REG_TB) {
1347        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1348        load_insn |= RA(TCG_REG_TB);
1349    } else if (have_isa_3_00) {
1350        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1351    } else {
1352        tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1353        tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1354    }
1355    tcg_out32(s, load_insn);
1356}
1357
1358static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1359                         tcg_target_long arg)
1360{
1361    switch (type) {
1362    case TCG_TYPE_I32:
1363    case TCG_TYPE_I64:
1364        tcg_debug_assert(ret < TCG_REG_V0);
1365        tcg_out_movi_int(s, type, ret, arg, false);
1366        break;
1367
1368    default:
1369        g_assert_not_reached();
1370    }
1371}
1372
1373static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1374{
1375    return false;
1376}
1377
1378static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1379                             tcg_target_long imm)
1380{
1381    /* This function is only used for passing structs by reference. */
1382    g_assert_not_reached();
1383}
1384
1385static bool mask_operand(uint32_t c, int *mb, int *me)
1386{
1387    uint32_t lsb, test;
1388
1389    /* Accept a bit pattern like:
1390           0....01....1
1391           1....10....0
1392           0..01..10..0
1393       Keep track of the transitions.  */
1394    if (c == 0 || c == -1) {
1395        return false;
1396    }
1397    test = c;
1398    lsb = test & -test;
1399    test += lsb;
1400    if (test & (test - 1)) {
1401        return false;
1402    }
1403
1404    *me = clz32(lsb);
1405    *mb = test ? clz32(test & -test) + 1 : 0;
1406    return true;
1407}
1408
1409static bool mask64_operand(uint64_t c, int *mb, int *me)
1410{
1411    uint64_t lsb;
1412
1413    if (c == 0) {
1414        return false;
1415    }
1416
1417    lsb = c & -c;
1418    /* Accept 1..10..0.  */
1419    if (c == -lsb) {
1420        *mb = 0;
1421        *me = clz64(lsb);
1422        return true;
1423    }
1424    /* Accept 0..01..1.  */
1425    if (lsb == 1 && (c & (c + 1)) == 0) {
1426        *mb = clz64(c + 1) + 1;
1427        *me = 63;
1428        return true;
1429    }
1430    return false;
1431}
1432
1433static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1434{
1435    int mb, me;
1436
1437    if (mask_operand(c, &mb, &me)) {
1438        tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1439    } else if ((c & 0xffff) == c) {
1440        tcg_out32(s, ANDI | SAI(src, dst, c));
1441        return;
1442    } else if ((c & 0xffff0000) == c) {
1443        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1444        return;
1445    } else {
1446        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1447        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1448    }
1449}
1450
1451static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1452{
1453    int mb, me;
1454
1455    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1456    if (mask64_operand(c, &mb, &me)) {
1457        if (mb == 0) {
1458            tcg_out_rld(s, RLDICR, dst, src, 0, me);
1459        } else {
1460            tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1461        }
1462    } else if ((c & 0xffff) == c) {
1463        tcg_out32(s, ANDI | SAI(src, dst, c));
1464        return;
1465    } else if ((c & 0xffff0000) == c) {
1466        tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1467        return;
1468    } else {
1469        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1470        tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1471    }
1472}
1473
1474static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1475                           int op_lo, int op_hi)
1476{
1477    if (c >> 16) {
1478        tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1479        src = dst;
1480    }
1481    if (c & 0xffff) {
1482        tcg_out32(s, op_lo | SAI(src, dst, c));
1483        src = dst;
1484    }
1485}
1486
1487static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1488{
1489    tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1490}
1491
1492static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1493{
1494    tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1495}
1496
1497static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1498{
1499    ptrdiff_t disp = tcg_pcrel_diff(s, target);
1500    if (in_range_b(disp)) {
1501        tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1502    } else {
1503        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1504        tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1505        tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1506    }
1507}
1508
1509static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1510                             TCGReg base, tcg_target_long offset)
1511{
1512    tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1513    bool is_int_store = false;
1514    TCGReg rs = TCG_REG_TMP1;
1515
1516    switch (opi) {
1517    case LD: case LWA:
1518        align = 3;
1519        /* FALLTHRU */
1520    default:
1521        if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1522            rs = rt;
1523            break;
1524        }
1525        break;
1526    case LXSD:
1527    case STXSD:
1528        align = 3;
1529        break;
1530    case LXV:
1531    case STXV:
1532        align = 15;
1533        break;
1534    case STD:
1535        align = 3;
1536        /* FALLTHRU */
1537    case STB: case STH: case STW:
1538        is_int_store = true;
1539        break;
1540    }
1541
1542    /* For unaligned or large offsets, use the prefixed form. */
1543    if (have_isa_3_10
1544        && (offset != (int16_t)offset || (offset & align))
1545        && offset == sextract64(offset, 0, 34)) {
1546        /*
1547         * Note that the MLS:D insns retain their un-prefixed opcode,
1548         * while the 8LS:D insns use a different opcode space.
1549         */
1550        switch (opi) {
1551        case LBZ:
1552        case LHZ:
1553        case LHA:
1554        case LWZ:
1555        case STB:
1556        case STH:
1557        case STW:
1558        case ADDI:
1559            tcg_out_mls_d(s, opi, rt, base, offset, 0);
1560            return;
1561        case LWA:
1562            tcg_out_8ls_d(s, PLWA, rt, base, offset, 0);
1563            return;
1564        case LD:
1565            tcg_out_8ls_d(s, PLD, rt, base, offset, 0);
1566            return;
1567        case STD:
1568            tcg_out_8ls_d(s, PSTD, rt, base, offset, 0);
1569            return;
1570        case LXSD:
1571            tcg_out_8ls_d(s, PLXSD, rt & 31, base, offset, 0);
1572            return;
1573        case STXSD:
1574            tcg_out_8ls_d(s, PSTXSD, rt & 31, base, offset, 0);
1575            return;
1576        case LXV:
1577            tcg_out_8ls_d(s, PLXV, rt & 31, base, offset, 0);
1578            return;
1579        case STXV:
1580            tcg_out_8ls_d(s, PSTXV, rt & 31, base, offset, 0);
1581            return;
1582        }
1583    }
1584
1585    /* For unaligned, or very large offsets, use the indexed form.  */
1586    if (offset & align || offset != (int32_t)offset || opi == 0) {
1587        if (rs == base) {
1588            rs = TCG_REG_R0;
1589        }
1590        tcg_debug_assert(!is_int_store || rs != rt);
1591        tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1592        tcg_out32(s, opx | TAB(rt & 31, base, rs));
1593        return;
1594    }
1595
1596    l0 = (int16_t)offset;
1597    offset = (offset - l0) >> 16;
1598    l1 = (int16_t)offset;
1599
1600    if (l1 < 0 && orig >= 0) {
1601        extra = 0x4000;
1602        l1 = (int16_t)(offset - 0x4000);
1603    }
1604    if (l1) {
1605        tcg_out32(s, ADDIS | TAI(rs, base, l1));
1606        base = rs;
1607    }
1608    if (extra) {
1609        tcg_out32(s, ADDIS | TAI(rs, base, extra));
1610        base = rs;
1611    }
1612    if (opi != ADDI || base != rt || l0 != 0) {
1613        tcg_out32(s, opi | TAI(rt & 31, base, l0));
1614    }
1615}
1616
1617static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1618                           TCGReg va, TCGReg vb, int shb)
1619{
1620    tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1621}
1622
1623static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1624                       TCGReg base, intptr_t offset)
1625{
1626    int shift;
1627
1628    switch (type) {
1629    case TCG_TYPE_I32:
1630        if (ret < TCG_REG_V0) {
1631            tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1632            break;
1633        }
1634        if (have_isa_2_07 && have_vsx) {
1635            tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1636            break;
1637        }
1638        tcg_debug_assert((offset & 3) == 0);
1639        tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1640        shift = (offset - 4) & 0xc;
1641        if (shift) {
1642            tcg_out_vsldoi(s, ret, ret, ret, shift);
1643        }
1644        break;
1645    case TCG_TYPE_I64:
1646        if (ret < TCG_REG_V0) {
1647            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1648            tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1649            break;
1650        }
1651        /* fallthru */
1652    case TCG_TYPE_V64:
1653        tcg_debug_assert(ret >= TCG_REG_V0);
1654        if (have_vsx) {
1655            tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1656                             ret, base, offset);
1657            break;
1658        }
1659        tcg_debug_assert((offset & 7) == 0);
1660        tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1661        if (offset & 8) {
1662            tcg_out_vsldoi(s, ret, ret, ret, 8);
1663        }
1664        break;
1665    case TCG_TYPE_V128:
1666        tcg_debug_assert(ret >= TCG_REG_V0);
1667        tcg_debug_assert((offset & 15) == 0);
1668        tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1669                         LVX, ret, base, offset);
1670        break;
1671    default:
1672        g_assert_not_reached();
1673    }
1674}
1675
1676static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1677                              TCGReg base, intptr_t offset)
1678{
1679    int shift;
1680
1681    switch (type) {
1682    case TCG_TYPE_I32:
1683        if (arg < TCG_REG_V0) {
1684            tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1685            break;
1686        }
1687        if (have_isa_2_07 && have_vsx) {
1688            tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1689            break;
1690        }
1691        assert((offset & 3) == 0);
1692        tcg_debug_assert((offset & 3) == 0);
1693        shift = (offset - 4) & 0xc;
1694        if (shift) {
1695            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1696            arg = TCG_VEC_TMP1;
1697        }
1698        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1699        break;
1700    case TCG_TYPE_I64:
1701        if (arg < TCG_REG_V0) {
1702            tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1703            tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1704            break;
1705        }
1706        /* fallthru */
1707    case TCG_TYPE_V64:
1708        tcg_debug_assert(arg >= TCG_REG_V0);
1709        if (have_vsx) {
1710            tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1711                             STXSDX, arg, base, offset);
1712            break;
1713        }
1714        tcg_debug_assert((offset & 7) == 0);
1715        if (offset & 8) {
1716            tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1717            arg = TCG_VEC_TMP1;
1718        }
1719        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1720        tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1721        break;
1722    case TCG_TYPE_V128:
1723        tcg_debug_assert(arg >= TCG_REG_V0);
1724        tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1725                         STVX, arg, base, offset);
1726        break;
1727    default:
1728        g_assert_not_reached();
1729    }
1730}
1731
1732static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1733                               TCGReg base, intptr_t ofs)
1734{
1735    return false;
1736}
1737
1738/*
1739 * Set dest non-zero if and only if (arg1 & arg2) is non-zero.
1740 * If RC, then also set RC0.
1741 */
1742static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
1743                         bool const_arg2, TCGType type, bool rc)
1744{
1745    int mb, me;
1746
1747    if (!const_arg2) {
1748        tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc);
1749        return;
1750    }
1751
1752    if (type == TCG_TYPE_I32) {
1753        arg2 = (uint32_t)arg2;
1754    }
1755
1756    if ((arg2 & ~0xffff) == 0) {
1757        tcg_out32(s, ANDI | SAI(arg1, dest, arg2));
1758        return;
1759    }
1760    if ((arg2 & ~0xffff0000ull) == 0) {
1761        tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16));
1762        return;
1763    }
1764    if (arg2 == (uint32_t)arg2 && mask_operand(arg2, &mb, &me)) {
1765        tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
1766        return;
1767    }
1768    if (TCG_TARGET_REG_BITS == 64) {
1769        int sh = clz64(arg2);
1770        if (mask64_operand(arg2 << sh, &mb, &me)) {
1771            tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc);
1772            return;
1773        }
1774    }
1775    /* Constraints should satisfy this. */
1776    g_assert_not_reached();
1777}
1778
1779static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1780                        int const_arg2, int cr, TCGType type)
1781{
1782    int imm;
1783    uint32_t op;
1784
1785    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1786
1787    /*
1788     * Simplify the comparisons below wrt CMPI.
1789     * All of the tests are 16-bit, so a 32-bit sign extend always works.
1790     */
1791    if (type == TCG_TYPE_I32) {
1792        arg2 = (int32_t)arg2;
1793    }
1794
1795    switch (cond) {
1796    case TCG_COND_EQ:
1797    case TCG_COND_NE:
1798        if (const_arg2) {
1799            if ((int16_t) arg2 == arg2) {
1800                op = CMPI;
1801                imm = 1;
1802                break;
1803            } else if ((uint16_t) arg2 == arg2) {
1804                op = CMPLI;
1805                imm = 1;
1806                break;
1807            }
1808        }
1809        op = CMPL;
1810        imm = 0;
1811        break;
1812
1813    case TCG_COND_TSTEQ:
1814    case TCG_COND_TSTNE:
1815        tcg_debug_assert(cr == 0);
1816        tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, true);
1817        return;
1818
1819    case TCG_COND_LT:
1820    case TCG_COND_GE:
1821    case TCG_COND_LE:
1822    case TCG_COND_GT:
1823        if (const_arg2) {
1824            if ((int16_t) arg2 == arg2) {
1825                op = CMPI;
1826                imm = 1;
1827                break;
1828            }
1829        }
1830        op = CMP;
1831        imm = 0;
1832        break;
1833
1834    case TCG_COND_LTU:
1835    case TCG_COND_GEU:
1836    case TCG_COND_LEU:
1837    case TCG_COND_GTU:
1838        if (const_arg2) {
1839            if ((uint16_t) arg2 == arg2) {
1840                op = CMPLI;
1841                imm = 1;
1842                break;
1843            }
1844        }
1845        op = CMPL;
1846        imm = 0;
1847        break;
1848
1849    default:
1850        g_assert_not_reached();
1851    }
1852    op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1853
1854    if (imm) {
1855        tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1856    } else {
1857        if (const_arg2) {
1858            tcg_out_movi(s, type, TCG_REG_R0, arg2);
1859            arg2 = TCG_REG_R0;
1860        }
1861        tcg_out32(s, op | RA(arg1) | RB(arg2));
1862    }
1863}
1864
1865static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1866                                TCGReg dst, TCGReg src, bool neg)
1867{
1868    if (neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
1869        /*
1870         * X != 0 implies X + -1 generates a carry.
1871         * RT = (~X + X) + CA
1872         *    = -1 + CA
1873         *    = CA ? 0 : -1
1874         */
1875        tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1876        tcg_out32(s, SUBFE | TAB(dst, src, src));
1877        return;
1878    }
1879
1880    if (type == TCG_TYPE_I32) {
1881        tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1882        tcg_out_shri32(s, dst, dst, 5);
1883    } else {
1884        tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1885        tcg_out_shri64(s, dst, dst, 6);
1886    }
1887    if (neg) {
1888        tcg_out32(s, NEG | RT(dst) | RA(dst));
1889    }
1890}
1891
1892static void tcg_out_setcond_ne0(TCGContext *s, TCGType type,
1893                                TCGReg dst, TCGReg src, bool neg)
1894{
1895    if (!neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
1896        /*
1897         * X != 0 implies X + -1 generates a carry.  Extra addition
1898         * trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.
1899         */
1900        tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1901        tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1902        return;
1903    }
1904    tcg_out_setcond_eq0(s, type, dst, src, false);
1905    if (neg) {
1906        tcg_out32(s, ADDI | TAI(dst, dst, -1));
1907    } else {
1908        tcg_out_xori32(s, dst, dst, 1);
1909    }
1910}
1911
1912static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1913                                  bool const_arg2)
1914{
1915    if (const_arg2) {
1916        if ((uint32_t)arg2 == arg2) {
1917            tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1918        } else {
1919            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1920            tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1921        }
1922    } else {
1923        tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1924    }
1925    return TCG_REG_R0;
1926}
1927
1928static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1929                            TCGArg arg0, TCGArg arg1, TCGArg arg2,
1930                            int const_arg2, bool neg)
1931{
1932    int sh;
1933    bool inv;
1934
1935    tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1936
1937    /* Ignore high bits of a potential constant arg2.  */
1938    if (type == TCG_TYPE_I32) {
1939        arg2 = (uint32_t)arg2;
1940    }
1941
1942    /* With SETBC/SETBCR, we can always implement with 2 insns. */
1943    if (have_isa_3_10) {
1944        tcg_insn_unit bi, opc;
1945
1946        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
1947
1948        /* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
1949        bi = tcg_to_bc[cond] & (0x1f << 16);
1950        if (tcg_to_bc[cond] & BO(8)) {
1951            opc = neg ? SETNBC : SETBC;
1952        } else {
1953            opc = neg ? SETNBCR : SETBCR;
1954        }
1955        tcg_out32(s, opc | RT(arg0) | bi);
1956        return;
1957    }
1958
1959    /* Handle common and trivial cases before handling anything else.  */
1960    if (arg2 == 0) {
1961        switch (cond) {
1962        case TCG_COND_EQ:
1963            tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
1964            return;
1965        case TCG_COND_NE:
1966            tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
1967            return;
1968        case TCG_COND_GE:
1969            tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1970            arg1 = arg0;
1971            /* FALLTHRU */
1972        case TCG_COND_LT:
1973            /* Extract the sign bit.  */
1974            if (type == TCG_TYPE_I32) {
1975                if (neg) {
1976                    tcg_out_sari32(s, arg0, arg1, 31);
1977                } else {
1978                    tcg_out_shri32(s, arg0, arg1, 31);
1979                }
1980            } else {
1981                if (neg) {
1982                    tcg_out_sari64(s, arg0, arg1, 63);
1983                } else {
1984                    tcg_out_shri64(s, arg0, arg1, 63);
1985                }
1986            }
1987            return;
1988        default:
1989            break;
1990        }
1991    }
1992
1993    /* If we have ISEL, we can implement everything with 3 or 4 insns.
1994       All other cases below are also at least 3 insns, so speed up the
1995       code generator by not considering them and always using ISEL.  */
1996    if (have_isel) {
1997        int isel, tab;
1998
1999        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
2000
2001        isel = tcg_to_isel[cond];
2002
2003        tcg_out_movi(s, type, arg0, neg ? -1 : 1);
2004        if (isel & 1) {
2005            /* arg0 = (bc ? 0 : 1) */
2006            tab = TAB(arg0, 0, arg0);
2007            isel &= ~1;
2008        } else {
2009            /* arg0 = (bc ? 1 : 0) */
2010            tcg_out_movi(s, type, TCG_REG_R0, 0);
2011            tab = TAB(arg0, arg0, TCG_REG_R0);
2012        }
2013        tcg_out32(s, isel | tab);
2014        return;
2015    }
2016
2017    inv = false;
2018    switch (cond) {
2019    case TCG_COND_EQ:
2020        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
2021        tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
2022        break;
2023
2024    case TCG_COND_NE:
2025        arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
2026        tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
2027        break;
2028
2029    case TCG_COND_TSTEQ:
2030        tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
2031        tcg_out_setcond_eq0(s, type, arg0, TCG_REG_R0, neg);
2032        break;
2033
2034    case TCG_COND_TSTNE:
2035        tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
2036        tcg_out_setcond_ne0(s, type, arg0, TCG_REG_R0, neg);
2037        break;
2038
2039    case TCG_COND_LE:
2040    case TCG_COND_LEU:
2041        inv = true;
2042        /* fall through */
2043    case TCG_COND_GT:
2044    case TCG_COND_GTU:
2045        sh = 30; /* CR7 CR_GT */
2046        goto crtest;
2047
2048    case TCG_COND_GE:
2049    case TCG_COND_GEU:
2050        inv = true;
2051        /* fall through */
2052    case TCG_COND_LT:
2053    case TCG_COND_LTU:
2054        sh = 29; /* CR7 CR_LT */
2055        goto crtest;
2056
2057    crtest:
2058        tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
2059        tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
2060        tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
2061        if (neg && inv) {
2062            tcg_out32(s, ADDI | TAI(arg0, arg0, -1));
2063        } else if (neg) {
2064            tcg_out32(s, NEG | RT(arg0) | RA(arg0));
2065        } else if (inv) {
2066            tcg_out_xori32(s, arg0, arg0, 1);
2067        }
2068        break;
2069
2070    default:
2071        g_assert_not_reached();
2072    }
2073}
2074
2075static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
2076{
2077    tcg_out32(s, tcg_to_bc[cond] | bd);
2078}
2079
2080static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
2081{
2082    int bd = 0;
2083    if (l->has_value) {
2084        bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
2085    } else {
2086        tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
2087    }
2088    tcg_out_bc(s, cond, bd);
2089}
2090
2091static void tcg_out_brcond(TCGContext *s, TCGCond cond,
2092                           TCGArg arg1, TCGArg arg2, int const_arg2,
2093                           TCGLabel *l, TCGType type)
2094{
2095    tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
2096    tcg_out_bc_lab(s, cond, l);
2097}
2098
2099static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
2100                            TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
2101                            TCGArg v2, bool const_c2)
2102{
2103    /* If for some reason both inputs are zero, don't produce bad code.  */
2104    if (v1 == 0 && v2 == 0) {
2105        tcg_out_movi(s, type, dest, 0);
2106        return;
2107    }
2108
2109    tcg_out_cmp(s, cond, c1, c2, const_c2, 0, type);
2110
2111    if (have_isel) {
2112        int isel = tcg_to_isel[cond];
2113
2114        /* Swap the V operands if the operation indicates inversion.  */
2115        if (isel & 1) {
2116            int t = v1;
2117            v1 = v2;
2118            v2 = t;
2119            isel &= ~1;
2120        }
2121        /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand.  */
2122        if (v2 == 0) {
2123            tcg_out_movi(s, type, TCG_REG_R0, 0);
2124        }
2125        tcg_out32(s, isel | TAB(dest, v1, v2));
2126    } else {
2127        if (dest == v2) {
2128            cond = tcg_invert_cond(cond);
2129            v2 = v1;
2130        } else if (dest != v1) {
2131            if (v1 == 0) {
2132                tcg_out_movi(s, type, dest, 0);
2133            } else {
2134                tcg_out_mov(s, type, dest, v1);
2135            }
2136        }
2137        /* Branch forward over one insn */
2138        tcg_out_bc(s, cond, 8);
2139        if (v2 == 0) {
2140            tcg_out_movi(s, type, dest, 0);
2141        } else {
2142            tcg_out_mov(s, type, dest, v2);
2143        }
2144    }
2145}
2146
2147static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
2148                          TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
2149{
2150    if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
2151        tcg_out32(s, opc | RA(a0) | RS(a1));
2152    } else {
2153        tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 0, type);
2154        /* Note that the only other valid constant for a2 is 0.  */
2155        if (have_isel) {
2156            tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
2157            tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
2158        } else if (!const_a2 && a0 == a2) {
2159            tcg_out_bc(s, TCG_COND_EQ, 8);
2160            tcg_out32(s, opc | RA(a0) | RS(a1));
2161        } else {
2162            tcg_out32(s, opc | RA(a0) | RS(a1));
2163            tcg_out_bc(s, TCG_COND_NE, 8);
2164            if (const_a2) {
2165                tcg_out_movi(s, type, a0, 0);
2166            } else {
2167                tcg_out_mov(s, type, a0, a2);
2168            }
2169        }
2170    }
2171}
2172
2173static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
2174                         const int *const_args)
2175{
2176    static const struct { uint8_t bit1, bit2; } bits[] = {
2177        [TCG_COND_LT ] = { CR_LT, CR_LT },
2178        [TCG_COND_LE ] = { CR_LT, CR_GT },
2179        [TCG_COND_GT ] = { CR_GT, CR_GT },
2180        [TCG_COND_GE ] = { CR_GT, CR_LT },
2181        [TCG_COND_LTU] = { CR_LT, CR_LT },
2182        [TCG_COND_LEU] = { CR_LT, CR_GT },
2183        [TCG_COND_GTU] = { CR_GT, CR_GT },
2184        [TCG_COND_GEU] = { CR_GT, CR_LT },
2185    };
2186
2187    TCGCond cond = args[4], cond2;
2188    TCGArg al, ah, bl, bh;
2189    int blconst, bhconst;
2190    int op, bit1, bit2;
2191
2192    al = args[0];
2193    ah = args[1];
2194    bl = args[2];
2195    bh = args[3];
2196    blconst = const_args[2];
2197    bhconst = const_args[3];
2198
2199    switch (cond) {
2200    case TCG_COND_EQ:
2201        op = CRAND;
2202        goto do_equality;
2203    case TCG_COND_NE:
2204        op = CRNAND;
2205    do_equality:
2206        tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
2207        tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
2208        tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2209        break;
2210
2211    case TCG_COND_TSTEQ:
2212    case TCG_COND_TSTNE:
2213        if (blconst) {
2214            tcg_out_andi32(s, TCG_REG_R0, al, bl);
2215        } else {
2216            tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl));
2217        }
2218        if (bhconst) {
2219            tcg_out_andi32(s, TCG_REG_TMP1, ah, bh);
2220        } else {
2221            tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh));
2222        }
2223        tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1);
2224        break;
2225
2226    case TCG_COND_LT:
2227    case TCG_COND_LE:
2228    case TCG_COND_GT:
2229    case TCG_COND_GE:
2230    case TCG_COND_LTU:
2231    case TCG_COND_LEU:
2232    case TCG_COND_GTU:
2233    case TCG_COND_GEU:
2234        bit1 = bits[cond].bit1;
2235        bit2 = bits[cond].bit2;
2236        op = (bit1 != bit2 ? CRANDC : CRAND);
2237        cond2 = tcg_unsigned_cond(cond);
2238
2239        tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
2240        tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
2241        tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
2242        tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ));
2243        break;
2244
2245    default:
2246        g_assert_not_reached();
2247    }
2248}
2249
2250static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
2251                             const int *const_args)
2252{
2253    tcg_out_cmp2(s, args + 1, const_args + 1);
2254    tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
2255    tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
2256}
2257
2258static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
2259                            const int *const_args)
2260{
2261    tcg_out_cmp2(s, args, const_args);
2262    tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
2263}
2264
2265static void tcg_out_mb(TCGContext *s, TCGArg a0)
2266{
2267    uint32_t insn;
2268
2269    if (a0 & TCG_MO_ST_LD) {
2270        insn = HWSYNC;
2271    } else {
2272        insn = LWSYNC;
2273    }
2274
2275    tcg_out32(s, insn);
2276}
2277
2278static void tcg_out_call_int(TCGContext *s, int lk,
2279                             const tcg_insn_unit *target)
2280{
2281#ifdef _CALL_AIX
2282    /* Look through the descriptor.  If the branch is in range, and we
2283       don't have to spend too much effort on building the toc.  */
2284    const void *tgt = ((const void * const *)target)[0];
2285    uintptr_t toc = ((const uintptr_t *)target)[1];
2286    intptr_t diff = tcg_pcrel_diff(s, tgt);
2287
2288    if (in_range_b(diff) && toc == (uint32_t)toc) {
2289        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
2290        tcg_out_b(s, lk, tgt);
2291    } else {
2292        /* Fold the low bits of the constant into the addresses below.  */
2293        intptr_t arg = (intptr_t)target;
2294        int ofs = (int16_t)arg;
2295
2296        if (ofs + 8 < 0x8000) {
2297            arg -= ofs;
2298        } else {
2299            ofs = 0;
2300        }
2301        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
2302        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
2303        tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
2304        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
2305        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
2306    }
2307#elif defined(_CALL_ELF) && _CALL_ELF == 2
2308    intptr_t diff;
2309
2310    /* In the ELFv2 ABI, we have to set up r12 to contain the destination
2311       address, which the callee uses to compute its TOC address.  */
2312    /* FIXME: when the branch is in range, we could avoid r12 load if we
2313       knew that the destination uses the same TOC, and what its local
2314       entry point offset is.  */
2315    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
2316
2317    diff = tcg_pcrel_diff(s, target);
2318    if (in_range_b(diff)) {
2319        tcg_out_b(s, lk, target);
2320    } else {
2321        tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
2322        tcg_out32(s, BCCTR | BO_ALWAYS | lk);
2323    }
2324#else
2325    tcg_out_b(s, lk, target);
2326#endif
2327}
2328
2329static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
2330                         const TCGHelperInfo *info)
2331{
2332    tcg_out_call_int(s, LK, target);
2333}
2334
2335static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
2336    [MO_UB] = LBZX,
2337    [MO_UW] = LHZX,
2338    [MO_UL] = LWZX,
2339    [MO_UQ] = LDX,
2340    [MO_SW] = LHAX,
2341    [MO_SL] = LWAX,
2342    [MO_BSWAP | MO_UB] = LBZX,
2343    [MO_BSWAP | MO_UW] = LHBRX,
2344    [MO_BSWAP | MO_UL] = LWBRX,
2345    [MO_BSWAP | MO_UQ] = LDBRX,
2346};
2347
2348static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
2349    [MO_UB] = STBX,
2350    [MO_UW] = STHX,
2351    [MO_UL] = STWX,
2352    [MO_UQ] = STDX,
2353    [MO_BSWAP | MO_UB] = STBX,
2354    [MO_BSWAP | MO_UW] = STHBRX,
2355    [MO_BSWAP | MO_UL] = STWBRX,
2356    [MO_BSWAP | MO_UQ] = STDBRX,
2357};
2358
2359static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
2360{
2361    if (arg < 0) {
2362        arg = TCG_REG_TMP1;
2363    }
2364    tcg_out32(s, MFSPR | RT(arg) | LR);
2365    return arg;
2366}
2367
2368/*
2369 * For the purposes of ppc32 sorting 4 input registers into 4 argument
2370 * registers, there is an outside chance we would require 3 temps.
2371 */
2372static const TCGLdstHelperParam ldst_helper_param = {
2373    .ra_gen = ldst_ra_gen,
2374    .ntmp = 3,
2375    .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
2376};
2377
2378static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2379{
2380    MemOp opc = get_memop(lb->oi);
2381
2382    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2383        return false;
2384    }
2385
2386    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
2387    tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]);
2388    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
2389
2390    tcg_out_b(s, 0, lb->raddr);
2391    return true;
2392}
2393
2394static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2395{
2396    MemOp opc = get_memop(lb->oi);
2397
2398    if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2399        return false;
2400    }
2401
2402    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
2403    tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]);
2404
2405    tcg_out_b(s, 0, lb->raddr);
2406    return true;
2407}
2408
2409typedef struct {
2410    TCGReg base;
2411    TCGReg index;
2412    TCGAtomAlign aa;
2413} HostAddress;
2414
2415bool tcg_target_has_memory_bswap(MemOp memop)
2416{
2417    TCGAtomAlign aa;
2418
2419    if ((memop & MO_SIZE) <= MO_64) {
2420        return true;
2421    }
2422
2423    /*
2424     * Reject 16-byte memop with 16-byte atomicity,
2425     * but do allow a pair of 64-bit operations.
2426     */
2427    aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
2428    return aa.atom <= MO_64;
2429}
2430
2431/* We expect to use a 16-bit negative offset from ENV.  */
2432#define MIN_TLB_MASK_TABLE_OFS  -32768
2433
2434/*
2435 * For system-mode, perform the TLB load and compare.
2436 * For user-mode, perform any required alignment tests.
2437 * In both cases, return a TCGLabelQemuLdst structure if the slow path
2438 * is required and fill in @h with the host address for the fast path.
2439 */
2440static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
2441                                           TCGReg addrlo, TCGReg addrhi,
2442                                           MemOpIdx oi, bool is_ld)
2443{
2444    TCGType addr_type = s->addr_type;
2445    TCGLabelQemuLdst *ldst = NULL;
2446    MemOp opc = get_memop(oi);
2447    MemOp a_bits, s_bits;
2448
2449    /*
2450     * Book II, Section 1.4, Single-Copy Atomicity, specifies:
2451     *
2452     * Before 3.0, "An access that is not atomic is performed as a set of
2453     * smaller disjoint atomic accesses. In general, the number and alignment
2454     * of these accesses are implementation-dependent."  Thus MO_ATOM_IFALIGN.
2455     *
2456     * As of 3.0, "the non-atomic access is performed as described in
2457     * the corresponding list", which matches MO_ATOM_SUBALIGN.
2458     */
2459    s_bits = opc & MO_SIZE;
2460    h->aa = atom_and_align_for_opc(s, opc,
2461                                   have_isa_3_00 ? MO_ATOM_SUBALIGN
2462                                                 : MO_ATOM_IFALIGN,
2463                                   s_bits == MO_128);
2464    a_bits = h->aa.align;
2465
2466    if (tcg_use_softmmu) {
2467        int mem_index = get_mmuidx(oi);
2468        int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
2469                            : offsetof(CPUTLBEntry, addr_write);
2470        int fast_off = tlb_mask_table_ofs(s, mem_index);
2471        int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2472        int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2473
2474        ldst = new_ldst_label(s);
2475        ldst->is_ld = is_ld;
2476        ldst->oi = oi;
2477        ldst->addrlo_reg = addrlo;
2478        ldst->addrhi_reg = addrhi;
2479
2480        /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
2481        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
2482        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
2483
2484        /* Extract the page index, shifted into place for tlb index.  */
2485        if (TCG_TARGET_REG_BITS == 32) {
2486            tcg_out_shri32(s, TCG_REG_R0, addrlo,
2487                           s->page_bits - CPU_TLB_ENTRY_BITS);
2488        } else {
2489            tcg_out_shri64(s, TCG_REG_R0, addrlo,
2490                           s->page_bits - CPU_TLB_ENTRY_BITS);
2491        }
2492        tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
2493
2494        /*
2495         * Load the (low part) TLB comparator into TMP2.
2496         * For 64-bit host, always load the entire 64-bit slot for simplicity.
2497         * We will ignore the high bits with tcg_out_cmp(..., addr_type).
2498         */
2499        if (TCG_TARGET_REG_BITS == 64) {
2500            if (cmp_off == 0) {
2501                tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
2502                                        TCG_REG_TMP1, TCG_REG_TMP2));
2503            } else {
2504                tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
2505                                       TCG_REG_TMP1, TCG_REG_TMP2));
2506                tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
2507                           TCG_REG_TMP1, cmp_off);
2508            }
2509        } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
2510            tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
2511                                     TCG_REG_TMP1, TCG_REG_TMP2));
2512        } else {
2513            tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2514            tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2515                       cmp_off + 4 * HOST_BIG_ENDIAN);
2516        }
2517
2518        /*
2519         * Load the TLB addend for use on the fast path.
2520         * Do this asap to minimize any load use delay.
2521         */
2522        if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
2523            tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2524                       offsetof(CPUTLBEntry, addend));
2525        }
2526
2527        /* Clear the non-page, non-alignment bits from the address in R0. */
2528        if (TCG_TARGET_REG_BITS == 32) {
2529            /*
2530             * We don't support unaligned accesses on 32-bits.
2531             * Preserve the bottom bits and thus trigger a comparison
2532             * failure on unaligned accesses.
2533             */
2534            if (a_bits < s_bits) {
2535                a_bits = s_bits;
2536            }
2537            tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2538                        (32 - a_bits) & 31, 31 - s->page_bits);
2539        } else {
2540            TCGReg t = addrlo;
2541
2542            /*
2543             * If the access is unaligned, we need to make sure we fail if we
2544             * cross a page boundary.  The trick is to add the access size-1
2545             * to the address before masking the low bits.  That will make the
2546             * address overflow to the next page if we cross a page boundary,
2547             * which will then force a mismatch of the TLB compare.
2548             */
2549            if (a_bits < s_bits) {
2550                unsigned a_mask = (1 << a_bits) - 1;
2551                unsigned s_mask = (1 << s_bits) - 1;
2552                tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2553                t = TCG_REG_R0;
2554            }
2555
2556            /* Mask the address for the requested alignment.  */
2557            if (addr_type == TCG_TYPE_I32) {
2558                tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2559                            (32 - a_bits) & 31, 31 - s->page_bits);
2560            } else if (a_bits == 0) {
2561                tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
2562            } else {
2563                tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2564                            64 - s->page_bits, s->page_bits - a_bits);
2565                tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
2566            }
2567        }
2568
2569        if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
2570            /* Low part comparison into cr7. */
2571            tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2572                        0, 7, TCG_TYPE_I32);
2573
2574            /* Load the high part TLB comparator into TMP2.  */
2575            tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2576                       cmp_off + 4 * !HOST_BIG_ENDIAN);
2577
2578            /* Load addend, deferred for this case. */
2579            tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2580                       offsetof(CPUTLBEntry, addend));
2581
2582            /* High part comparison into cr6. */
2583            tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
2584                        0, 6, TCG_TYPE_I32);
2585
2586            /* Combine comparisons into cr0. */
2587            tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2588        } else {
2589            /* Full comparison into cr0. */
2590            tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2591                        0, 0, addr_type);
2592        }
2593
2594        /* Load a pointer into the current opcode w/conditional branch-link. */
2595        ldst->label_ptr[0] = s->code_ptr;
2596        tcg_out_bc(s, TCG_COND_NE, LK);
2597
2598        h->base = TCG_REG_TMP1;
2599    } else {
2600        if (a_bits) {
2601            ldst = new_ldst_label(s);
2602            ldst->is_ld = is_ld;
2603            ldst->oi = oi;
2604            ldst->addrlo_reg = addrlo;
2605            ldst->addrhi_reg = addrhi;
2606
2607            /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2608            tcg_debug_assert(a_bits < 16);
2609            tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
2610
2611            ldst->label_ptr[0] = s->code_ptr;
2612            tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2613        }
2614
2615        h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2616    }
2617
2618    if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
2619        /* Zero-extend the guest address for use in the host address. */
2620        tcg_out_ext32u(s, TCG_REG_TMP2, addrlo);
2621        h->index = TCG_REG_TMP2;
2622    } else {
2623        h->index = addrlo;
2624    }
2625
2626    return ldst;
2627}
2628
2629static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
2630                            TCGReg addrlo, TCGReg addrhi,
2631                            MemOpIdx oi, TCGType data_type)
2632{
2633    MemOp opc = get_memop(oi);
2634    TCGLabelQemuLdst *ldst;
2635    HostAddress h;
2636
2637    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
2638
2639    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2640        if (opc & MO_BSWAP) {
2641            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2642            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2643            tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
2644        } else if (h.base != 0) {
2645            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2646            tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
2647            tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
2648        } else if (h.index == datahi) {
2649            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2650            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2651        } else {
2652            tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2653            tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2654        }
2655    } else {
2656        uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2657        if (!have_isa_2_06 && insn == LDBRX) {
2658            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2659            tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2660            tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
2661            tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2662        } else if (insn) {
2663            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2664        } else {
2665            insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2666            tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2667            tcg_out_movext(s, TCG_TYPE_REG, datalo,
2668                           TCG_TYPE_REG, opc & MO_SSIZE, datalo);
2669        }
2670    }
2671
2672    if (ldst) {
2673        ldst->type = data_type;
2674        ldst->datalo_reg = datalo;
2675        ldst->datahi_reg = datahi;
2676        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2677    }
2678}
2679
2680static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
2681                            TCGReg addrlo, TCGReg addrhi,
2682                            MemOpIdx oi, TCGType data_type)
2683{
2684    MemOp opc = get_memop(oi);
2685    TCGLabelQemuLdst *ldst;
2686    HostAddress h;
2687
2688    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
2689
2690    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2691        if (opc & MO_BSWAP) {
2692            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2693            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2694            tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
2695        } else if (h.base != 0) {
2696            tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2697            tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
2698            tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
2699        } else {
2700            tcg_out32(s, STW | TAI(datahi, h.index, 0));
2701            tcg_out32(s, STW | TAI(datalo, h.index, 4));
2702        }
2703    } else {
2704        uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2705        if (!have_isa_2_06 && insn == STDBRX) {
2706            tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2707            tcg_out32(s, ADDI | TAI(TCG_REG_TMP2, h.index, 4));
2708            tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2709            tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP2));
2710        } else {
2711            tcg_out32(s, insn | SAB(datalo, h.base, h.index));
2712        }
2713    }
2714
2715    if (ldst) {
2716        ldst->type = data_type;
2717        ldst->datalo_reg = datalo;
2718        ldst->datahi_reg = datahi;
2719        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2720    }
2721}
2722
2723static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
2724                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
2725{
2726    TCGLabelQemuLdst *ldst;
2727    HostAddress h;
2728    bool need_bswap;
2729    uint32_t insn;
2730    TCGReg index;
2731
2732    ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
2733
2734    /* Compose the final address, as LQ/STQ have no indexing. */
2735    index = h.index;
2736    if (h.base != 0) {
2737        index = TCG_REG_TMP1;
2738        tcg_out32(s, ADD | TAB(index, h.base, h.index));
2739    }
2740    need_bswap = get_memop(oi) & MO_BSWAP;
2741
2742    if (h.aa.atom == MO_128) {
2743        tcg_debug_assert(!need_bswap);
2744        tcg_debug_assert(datalo & 1);
2745        tcg_debug_assert(datahi == datalo - 1);
2746        tcg_debug_assert(!is_ld || datahi != index);
2747        insn = is_ld ? LQ : STQ;
2748        tcg_out32(s, insn | TAI(datahi, index, 0));
2749    } else {
2750        TCGReg d1, d2;
2751
2752        if (HOST_BIG_ENDIAN ^ need_bswap) {
2753            d1 = datahi, d2 = datalo;
2754        } else {
2755            d1 = datalo, d2 = datahi;
2756        }
2757
2758        if (need_bswap) {
2759            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 8);
2760            insn = is_ld ? LDBRX : STDBRX;
2761            tcg_out32(s, insn | TAB(d1, 0, index));
2762            tcg_out32(s, insn | TAB(d2, index, TCG_REG_R0));
2763        } else {
2764            insn = is_ld ? LD : STD;
2765            tcg_out32(s, insn | TAI(d1, index, 0));
2766            tcg_out32(s, insn | TAI(d2, index, 8));
2767        }
2768    }
2769
2770    if (ldst) {
2771        ldst->type = TCG_TYPE_I128;
2772        ldst->datalo_reg = datalo;
2773        ldst->datahi_reg = datahi;
2774        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2775    }
2776}
2777
2778static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2779{
2780    int i;
2781    for (i = 0; i < count; ++i) {
2782        p[i] = NOP;
2783    }
2784}
2785
2786/* Parameters for function call generation, used in tcg.c.  */
2787#define TCG_TARGET_STACK_ALIGN       16
2788
2789#ifdef _CALL_AIX
2790# define LINK_AREA_SIZE                (6 * SZR)
2791# define LR_OFFSET                     (1 * SZR)
2792# define TCG_TARGET_CALL_STACK_OFFSET  (LINK_AREA_SIZE + 8 * SZR)
2793#elif defined(_CALL_DARWIN)
2794# define LINK_AREA_SIZE                (6 * SZR)
2795# define LR_OFFSET                     (2 * SZR)
2796#elif TCG_TARGET_REG_BITS == 64
2797# if defined(_CALL_ELF) && _CALL_ELF == 2
2798#  define LINK_AREA_SIZE               (4 * SZR)
2799#  define LR_OFFSET                    (1 * SZR)
2800# endif
2801#else /* TCG_TARGET_REG_BITS == 32 */
2802# if defined(_CALL_SYSV)
2803#  define LINK_AREA_SIZE               (2 * SZR)
2804#  define LR_OFFSET                    (1 * SZR)
2805# endif
2806#endif
2807#ifndef LR_OFFSET
2808# error "Unhandled abi"
2809#endif
2810#ifndef TCG_TARGET_CALL_STACK_OFFSET
2811# define TCG_TARGET_CALL_STACK_OFFSET  LINK_AREA_SIZE
2812#endif
2813
2814#define CPU_TEMP_BUF_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2815#define REG_SAVE_SIZE      ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2816
2817#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET   \
2818                     + TCG_STATIC_CALL_ARGS_SIZE    \
2819                     + CPU_TEMP_BUF_SIZE            \
2820                     + REG_SAVE_SIZE                \
2821                     + TCG_TARGET_STACK_ALIGN - 1)  \
2822                    & -TCG_TARGET_STACK_ALIGN)
2823
2824#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2825
2826static void tcg_target_qemu_prologue(TCGContext *s)
2827{
2828    int i;
2829
2830#ifdef _CALL_AIX
2831    const void **desc = (const void **)s->code_ptr;
2832    desc[0] = tcg_splitwx_to_rx(desc + 2);  /* entry point */
2833    desc[1] = 0;                            /* environment pointer */
2834    s->code_ptr = (void *)(desc + 2);       /* skip over descriptor */
2835#endif
2836
2837    tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2838                  CPU_TEMP_BUF_SIZE);
2839
2840    /* Prologue */
2841    tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2842    tcg_out32(s, (SZR == 8 ? STDU : STWU)
2843              | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2844
2845    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2846        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2847                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2848    }
2849    tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2850
2851    if (!tcg_use_softmmu && guest_base) {
2852        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2853        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2854    }
2855
2856    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2857    tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2858    tcg_out32(s, BCCTR | BO_ALWAYS);
2859
2860    /* Epilogue */
2861    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2862
2863    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2864    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2865        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2866                   TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2867    }
2868    tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2869    tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2870    tcg_out32(s, BCLR | BO_ALWAYS);
2871}
2872
2873static void tcg_out_tb_start(TCGContext *s)
2874{
2875    /* Load TCG_REG_TB. */
2876    if (USE_REG_TB) {
2877        if (have_isa_3_00) {
2878            /* lnia REG_TB */
2879            tcg_out_addpcis(s, TCG_REG_TB, 0);
2880        } else {
2881            /* bcl 20,31,$+4 (preferred form for getting nia) */
2882            tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
2883            tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
2884        }
2885    }
2886}
2887
2888static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2889{
2890    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2891    tcg_out_b(s, 0, tcg_code_gen_epilogue);
2892}
2893
2894static void tcg_out_goto_tb(TCGContext *s, int which)
2895{
2896    uintptr_t ptr = get_jmp_target_addr(s, which);
2897    int16_t lo;
2898
2899    /* Direct branch will be patched by tb_target_set_jmp_target. */
2900    set_jmp_insn_offset(s, which);
2901    tcg_out32(s, NOP);
2902
2903    /* When branch is out of range, fall through to indirect. */
2904    if (USE_REG_TB) {
2905        ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
2906        tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
2907    } else if (have_isa_3_10) {
2908        ptrdiff_t offset = tcg_pcrel_diff_for_prefix(s, (void *)ptr);
2909        tcg_out_8ls_d(s, PLD, TCG_REG_TMP1, 0, offset, 1);
2910    } else if (have_isa_3_00) {
2911        ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
2912        lo = offset;
2913        tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo);
2914        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
2915    } else {
2916        lo = ptr;
2917        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo);
2918        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
2919    }
2920
2921    tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2922    tcg_out32(s, BCCTR | BO_ALWAYS);
2923    set_jmp_reset_offset(s, which);
2924}
2925
2926void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2927                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2928{
2929    uintptr_t addr = tb->jmp_target_addr[n];
2930    intptr_t diff = addr - jmp_rx;
2931    tcg_insn_unit insn;
2932
2933    if (in_range_b(diff)) {
2934        insn = B | (diff & 0x3fffffc);
2935    } else {
2936        insn = NOP;
2937    }
2938
2939    qatomic_set((uint32_t *)jmp_rw, insn);
2940    flush_idcache_range(jmp_rx, jmp_rw, 4);
2941}
2942
2943static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2944                       const TCGArg args[TCG_MAX_OP_ARGS],
2945                       const int const_args[TCG_MAX_OP_ARGS])
2946{
2947    TCGArg a0, a1, a2;
2948
2949    switch (opc) {
2950    case INDEX_op_goto_ptr:
2951        tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2952        tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2953        tcg_out32(s, BCCTR | BO_ALWAYS);
2954        break;
2955    case INDEX_op_br:
2956        {
2957            TCGLabel *l = arg_label(args[0]);
2958            uint32_t insn = B;
2959
2960            if (l->has_value) {
2961                insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2962                                       l->u.value_ptr);
2963            } else {
2964                tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2965            }
2966            tcg_out32(s, insn);
2967        }
2968        break;
2969    case INDEX_op_ld8u_i32:
2970    case INDEX_op_ld8u_i64:
2971        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2972        break;
2973    case INDEX_op_ld8s_i32:
2974    case INDEX_op_ld8s_i64:
2975        tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2976        tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2977        break;
2978    case INDEX_op_ld16u_i32:
2979    case INDEX_op_ld16u_i64:
2980        tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2981        break;
2982    case INDEX_op_ld16s_i32:
2983    case INDEX_op_ld16s_i64:
2984        tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2985        break;
2986    case INDEX_op_ld_i32:
2987    case INDEX_op_ld32u_i64:
2988        tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2989        break;
2990    case INDEX_op_ld32s_i64:
2991        tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2992        break;
2993    case INDEX_op_ld_i64:
2994        tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2995        break;
2996    case INDEX_op_st8_i32:
2997    case INDEX_op_st8_i64:
2998        tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2999        break;
3000    case INDEX_op_st16_i32:
3001    case INDEX_op_st16_i64:
3002        tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
3003        break;
3004    case INDEX_op_st_i32:
3005    case INDEX_op_st32_i64:
3006        tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
3007        break;
3008    case INDEX_op_st_i64:
3009        tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
3010        break;
3011
3012    case INDEX_op_add_i32:
3013        a0 = args[0], a1 = args[1], a2 = args[2];
3014        if (const_args[2]) {
3015        do_addi_32:
3016            tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
3017        } else {
3018            tcg_out32(s, ADD | TAB(a0, a1, a2));
3019        }
3020        break;
3021    case INDEX_op_sub_i32:
3022        a0 = args[0], a1 = args[1], a2 = args[2];
3023        if (const_args[1]) {
3024            if (const_args[2]) {
3025                tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
3026            } else {
3027                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
3028            }
3029        } else if (const_args[2]) {
3030            a2 = -a2;
3031            goto do_addi_32;
3032        } else {
3033            tcg_out32(s, SUBF | TAB(a0, a2, a1));
3034        }
3035        break;
3036
3037    case INDEX_op_and_i32:
3038        a0 = args[0], a1 = args[1], a2 = args[2];
3039        if (const_args[2]) {
3040            tcg_out_andi32(s, a0, a1, a2);
3041        } else {
3042            tcg_out32(s, AND | SAB(a1, a0, a2));
3043        }
3044        break;
3045    case INDEX_op_and_i64:
3046        a0 = args[0], a1 = args[1], a2 = args[2];
3047        if (const_args[2]) {
3048            tcg_out_andi64(s, a0, a1, a2);
3049        } else {
3050            tcg_out32(s, AND | SAB(a1, a0, a2));
3051        }
3052        break;
3053    case INDEX_op_or_i64:
3054    case INDEX_op_or_i32:
3055        a0 = args[0], a1 = args[1], a2 = args[2];
3056        if (const_args[2]) {
3057            tcg_out_ori32(s, a0, a1, a2);
3058        } else {
3059            tcg_out32(s, OR | SAB(a1, a0, a2));
3060        }
3061        break;
3062    case INDEX_op_xor_i64:
3063    case INDEX_op_xor_i32:
3064        a0 = args[0], a1 = args[1], a2 = args[2];
3065        if (const_args[2]) {
3066            tcg_out_xori32(s, a0, a1, a2);
3067        } else {
3068            tcg_out32(s, XOR | SAB(a1, a0, a2));
3069        }
3070        break;
3071    case INDEX_op_andc_i32:
3072        a0 = args[0], a1 = args[1], a2 = args[2];
3073        if (const_args[2]) {
3074            tcg_out_andi32(s, a0, a1, ~a2);
3075        } else {
3076            tcg_out32(s, ANDC | SAB(a1, a0, a2));
3077        }
3078        break;
3079    case INDEX_op_andc_i64:
3080        a0 = args[0], a1 = args[1], a2 = args[2];
3081        if (const_args[2]) {
3082            tcg_out_andi64(s, a0, a1, ~a2);
3083        } else {
3084            tcg_out32(s, ANDC | SAB(a1, a0, a2));
3085        }
3086        break;
3087    case INDEX_op_orc_i32:
3088        if (const_args[2]) {
3089            tcg_out_ori32(s, args[0], args[1], ~args[2]);
3090            break;
3091        }
3092        /* FALLTHRU */
3093    case INDEX_op_orc_i64:
3094        tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
3095        break;
3096    case INDEX_op_eqv_i32:
3097        if (const_args[2]) {
3098            tcg_out_xori32(s, args[0], args[1], ~args[2]);
3099            break;
3100        }
3101        /* FALLTHRU */
3102    case INDEX_op_eqv_i64:
3103        tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
3104        break;
3105    case INDEX_op_nand_i32:
3106    case INDEX_op_nand_i64:
3107        tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
3108        break;
3109    case INDEX_op_nor_i32:
3110    case INDEX_op_nor_i64:
3111        tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
3112        break;
3113
3114    case INDEX_op_clz_i32:
3115        tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
3116                      args[2], const_args[2]);
3117        break;
3118    case INDEX_op_ctz_i32:
3119        tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
3120                      args[2], const_args[2]);
3121        break;
3122    case INDEX_op_ctpop_i32:
3123        tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
3124        break;
3125
3126    case INDEX_op_clz_i64:
3127        tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
3128                      args[2], const_args[2]);
3129        break;
3130    case INDEX_op_ctz_i64:
3131        tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
3132                      args[2], const_args[2]);
3133        break;
3134    case INDEX_op_ctpop_i64:
3135        tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
3136        break;
3137
3138    case INDEX_op_mul_i32:
3139        a0 = args[0], a1 = args[1], a2 = args[2];
3140        if (const_args[2]) {
3141            tcg_out32(s, MULLI | TAI(a0, a1, a2));
3142        } else {
3143            tcg_out32(s, MULLW | TAB(a0, a1, a2));
3144        }
3145        break;
3146
3147    case INDEX_op_div_i32:
3148        tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
3149        break;
3150
3151    case INDEX_op_divu_i32:
3152        tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
3153        break;
3154
3155    case INDEX_op_rem_i32:
3156        tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
3157        break;
3158
3159    case INDEX_op_remu_i32:
3160        tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
3161        break;
3162
3163    case INDEX_op_shl_i32:
3164        if (const_args[2]) {
3165            /* Limit immediate shift count lest we create an illegal insn.  */
3166            tcg_out_shli32(s, args[0], args[1], args[2] & 31);
3167        } else {
3168            tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
3169        }
3170        break;
3171    case INDEX_op_shr_i32:
3172        if (const_args[2]) {
3173            /* Limit immediate shift count lest we create an illegal insn.  */
3174            tcg_out_shri32(s, args[0], args[1], args[2] & 31);
3175        } else {
3176            tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
3177        }
3178        break;
3179    case INDEX_op_sar_i32:
3180        if (const_args[2]) {
3181            tcg_out_sari32(s, args[0], args[1], args[2]);
3182        } else {
3183            tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
3184        }
3185        break;
3186    case INDEX_op_rotl_i32:
3187        if (const_args[2]) {
3188            tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
3189        } else {
3190            tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
3191                         | MB(0) | ME(31));
3192        }
3193        break;
3194    case INDEX_op_rotr_i32:
3195        if (const_args[2]) {
3196            tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
3197        } else {
3198            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
3199            tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
3200                         | MB(0) | ME(31));
3201        }
3202        break;
3203
3204    case INDEX_op_brcond_i32:
3205        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
3206                       arg_label(args[3]), TCG_TYPE_I32);
3207        break;
3208    case INDEX_op_brcond_i64:
3209        tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
3210                       arg_label(args[3]), TCG_TYPE_I64);
3211        break;
3212    case INDEX_op_brcond2_i32:
3213        tcg_out_brcond2(s, args, const_args);
3214        break;
3215
3216    case INDEX_op_neg_i32:
3217    case INDEX_op_neg_i64:
3218        tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
3219        break;
3220
3221    case INDEX_op_not_i32:
3222    case INDEX_op_not_i64:
3223        tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
3224        break;
3225
3226    case INDEX_op_add_i64:
3227        a0 = args[0], a1 = args[1], a2 = args[2];
3228        if (const_args[2]) {
3229        do_addi_64:
3230            tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
3231        } else {
3232            tcg_out32(s, ADD | TAB(a0, a1, a2));
3233        }
3234        break;
3235    case INDEX_op_sub_i64:
3236        a0 = args[0], a1 = args[1], a2 = args[2];
3237        if (const_args[1]) {
3238            if (const_args[2]) {
3239                tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
3240            } else {
3241                tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
3242            }
3243        } else if (const_args[2]) {
3244            a2 = -a2;
3245            goto do_addi_64;
3246        } else {
3247            tcg_out32(s, SUBF | TAB(a0, a2, a1));
3248        }
3249        break;
3250
3251    case INDEX_op_shl_i64:
3252        if (const_args[2]) {
3253            /* Limit immediate shift count lest we create an illegal insn.  */
3254            tcg_out_shli64(s, args[0], args[1], args[2] & 63);
3255        } else {
3256            tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
3257        }
3258        break;
3259    case INDEX_op_shr_i64:
3260        if (const_args[2]) {
3261            /* Limit immediate shift count lest we create an illegal insn.  */
3262            tcg_out_shri64(s, args[0], args[1], args[2] & 63);
3263        } else {
3264            tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
3265        }
3266        break;
3267    case INDEX_op_sar_i64:
3268        if (const_args[2]) {
3269            tcg_out_sari64(s, args[0], args[1], args[2]);
3270        } else {
3271            tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
3272        }
3273        break;
3274    case INDEX_op_rotl_i64:
3275        if (const_args[2]) {
3276            tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
3277        } else {
3278            tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
3279        }
3280        break;
3281    case INDEX_op_rotr_i64:
3282        if (const_args[2]) {
3283            tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
3284        } else {
3285            tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
3286            tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
3287        }
3288        break;
3289
3290    case INDEX_op_mul_i64:
3291        a0 = args[0], a1 = args[1], a2 = args[2];
3292        if (const_args[2]) {
3293            tcg_out32(s, MULLI | TAI(a0, a1, a2));
3294        } else {
3295            tcg_out32(s, MULLD | TAB(a0, a1, a2));
3296        }
3297        break;
3298    case INDEX_op_div_i64:
3299        tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
3300        break;
3301    case INDEX_op_divu_i64:
3302        tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
3303        break;
3304    case INDEX_op_rem_i64:
3305        tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
3306        break;
3307    case INDEX_op_remu_i64:
3308        tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
3309        break;
3310
3311    case INDEX_op_qemu_ld_a64_i32:
3312        if (TCG_TARGET_REG_BITS == 32) {
3313            tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
3314                            args[3], TCG_TYPE_I32);
3315            break;
3316        }
3317        /* fall through */
3318    case INDEX_op_qemu_ld_a32_i32:
3319        tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
3320        break;
3321    case INDEX_op_qemu_ld_a32_i64:
3322        if (TCG_TARGET_REG_BITS == 64) {
3323            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
3324                            args[2], TCG_TYPE_I64);
3325        } else {
3326            tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
3327                            args[3], TCG_TYPE_I64);
3328        }
3329        break;
3330    case INDEX_op_qemu_ld_a64_i64:
3331        if (TCG_TARGET_REG_BITS == 64) {
3332            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
3333                            args[2], TCG_TYPE_I64);
3334        } else {
3335            tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
3336                            args[4], TCG_TYPE_I64);
3337        }
3338        break;
3339    case INDEX_op_qemu_ld_a32_i128:
3340    case INDEX_op_qemu_ld_a64_i128:
3341        tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
3342        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
3343        break;
3344
3345    case INDEX_op_qemu_st_a64_i32:
3346        if (TCG_TARGET_REG_BITS == 32) {
3347            tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
3348                            args[3], TCG_TYPE_I32);
3349            break;
3350        }
3351        /* fall through */
3352    case INDEX_op_qemu_st_a32_i32:
3353        tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
3354        break;
3355    case INDEX_op_qemu_st_a32_i64:
3356        if (TCG_TARGET_REG_BITS == 64) {
3357            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
3358                            args[2], TCG_TYPE_I64);
3359        } else {
3360            tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
3361                            args[3], TCG_TYPE_I64);
3362        }
3363        break;
3364    case INDEX_op_qemu_st_a64_i64:
3365        if (TCG_TARGET_REG_BITS == 64) {
3366            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
3367                            args[2], TCG_TYPE_I64);
3368        } else {
3369            tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
3370                            args[4], TCG_TYPE_I64);
3371        }
3372        break;
3373    case INDEX_op_qemu_st_a32_i128:
3374    case INDEX_op_qemu_st_a64_i128:
3375        tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
3376        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
3377        break;
3378
3379    case INDEX_op_setcond_i32:
3380        tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
3381                        const_args[2], false);
3382        break;
3383    case INDEX_op_setcond_i64:
3384        tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
3385                        const_args[2], false);
3386        break;
3387    case INDEX_op_negsetcond_i32:
3388        tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
3389                        const_args[2], true);
3390        break;
3391    case INDEX_op_negsetcond_i64:
3392        tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
3393                        const_args[2], true);
3394        break;
3395    case INDEX_op_setcond2_i32:
3396        tcg_out_setcond2(s, args, const_args);
3397        break;
3398
3399    case INDEX_op_bswap16_i32:
3400    case INDEX_op_bswap16_i64:
3401        tcg_out_bswap16(s, args[0], args[1], args[2]);
3402        break;
3403    case INDEX_op_bswap32_i32:
3404        tcg_out_bswap32(s, args[0], args[1], 0);
3405        break;
3406    case INDEX_op_bswap32_i64:
3407        tcg_out_bswap32(s, args[0], args[1], args[2]);
3408        break;
3409    case INDEX_op_bswap64_i64:
3410        tcg_out_bswap64(s, args[0], args[1]);
3411        break;
3412
3413    case INDEX_op_deposit_i32:
3414        if (const_args[2]) {
3415            uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3416            tcg_out_andi32(s, args[0], args[0], ~mask);
3417        } else {
3418            tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3419                        32 - args[3] - args[4], 31 - args[3]);
3420        }
3421        break;
3422    case INDEX_op_deposit_i64:
3423        if (const_args[2]) {
3424            uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3425            tcg_out_andi64(s, args[0], args[0], ~mask);
3426        } else {
3427            tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3428                        64 - args[3] - args[4]);
3429        }
3430        break;
3431
3432    case INDEX_op_extract_i32:
3433        tcg_out_rlw(s, RLWINM, args[0], args[1],
3434                    32 - args[2], 32 - args[3], 31);
3435        break;
3436    case INDEX_op_extract_i64:
3437        tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3438        break;
3439
3440    case INDEX_op_movcond_i32:
3441        tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3442                        args[3], args[4], const_args[2]);
3443        break;
3444    case INDEX_op_movcond_i64:
3445        tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3446                        args[3], args[4], const_args[2]);
3447        break;
3448
3449#if TCG_TARGET_REG_BITS == 64
3450    case INDEX_op_add2_i64:
3451#else
3452    case INDEX_op_add2_i32:
3453#endif
3454        /* Note that the CA bit is defined based on the word size of the
3455           environment.  So in 64-bit mode it's always carry-out of bit 63.
3456           The fallback code using deposit works just as well for 32-bit.  */
3457        a0 = args[0], a1 = args[1];
3458        if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3459            a0 = TCG_REG_R0;
3460        }
3461        if (const_args[4]) {
3462            tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3463        } else {
3464            tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3465        }
3466        if (const_args[5]) {
3467            tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3468        } else {
3469            tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3470        }
3471        if (a0 != args[0]) {
3472            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3473        }
3474        break;
3475
3476#if TCG_TARGET_REG_BITS == 64
3477    case INDEX_op_sub2_i64:
3478#else
3479    case INDEX_op_sub2_i32:
3480#endif
3481        a0 = args[0], a1 = args[1];
3482        if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3483            a0 = TCG_REG_R0;
3484        }
3485        if (const_args[2]) {
3486            tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3487        } else {
3488            tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3489        }
3490        if (const_args[3]) {
3491            tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3492        } else {
3493            tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3494        }
3495        if (a0 != args[0]) {
3496            tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3497        }
3498        break;
3499
3500    case INDEX_op_muluh_i32:
3501        tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3502        break;
3503    case INDEX_op_mulsh_i32:
3504        tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3505        break;
3506    case INDEX_op_muluh_i64:
3507        tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3508        break;
3509    case INDEX_op_mulsh_i64:
3510        tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3511        break;
3512
3513    case INDEX_op_mb:
3514        tcg_out_mb(s, args[0]);
3515        break;
3516
3517    case INDEX_op_mov_i32:   /* Always emitted via tcg_out_mov.  */
3518    case INDEX_op_mov_i64:
3519    case INDEX_op_call:      /* Always emitted via tcg_out_call.  */
3520    case INDEX_op_exit_tb:   /* Always emitted via tcg_out_exit_tb.  */
3521    case INDEX_op_goto_tb:   /* Always emitted via tcg_out_goto_tb.  */
3522    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
3523    case INDEX_op_ext8s_i64:
3524    case INDEX_op_ext8u_i32:
3525    case INDEX_op_ext8u_i64:
3526    case INDEX_op_ext16s_i32:
3527    case INDEX_op_ext16s_i64:
3528    case INDEX_op_ext16u_i32:
3529    case INDEX_op_ext16u_i64:
3530    case INDEX_op_ext32s_i64:
3531    case INDEX_op_ext32u_i64:
3532    case INDEX_op_ext_i32_i64:
3533    case INDEX_op_extu_i32_i64:
3534    case INDEX_op_extrl_i64_i32:
3535    default:
3536        g_assert_not_reached();
3537    }
3538}
3539
3540int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3541{
3542    switch (opc) {
3543    case INDEX_op_and_vec:
3544    case INDEX_op_or_vec:
3545    case INDEX_op_xor_vec:
3546    case INDEX_op_andc_vec:
3547    case INDEX_op_not_vec:
3548    case INDEX_op_nor_vec:
3549    case INDEX_op_eqv_vec:
3550    case INDEX_op_nand_vec:
3551        return 1;
3552    case INDEX_op_orc_vec:
3553        return have_isa_2_07;
3554    case INDEX_op_add_vec:
3555    case INDEX_op_sub_vec:
3556    case INDEX_op_smax_vec:
3557    case INDEX_op_smin_vec:
3558    case INDEX_op_umax_vec:
3559    case INDEX_op_umin_vec:
3560    case INDEX_op_shlv_vec:
3561    case INDEX_op_shrv_vec:
3562    case INDEX_op_sarv_vec:
3563    case INDEX_op_rotlv_vec:
3564        return vece <= MO_32 || have_isa_2_07;
3565    case INDEX_op_ssadd_vec:
3566    case INDEX_op_sssub_vec:
3567    case INDEX_op_usadd_vec:
3568    case INDEX_op_ussub_vec:
3569        return vece <= MO_32;
3570    case INDEX_op_shli_vec:
3571    case INDEX_op_shri_vec:
3572    case INDEX_op_sari_vec:
3573    case INDEX_op_rotli_vec:
3574        return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3575    case INDEX_op_cmp_vec:
3576    case INDEX_op_cmpsel_vec:
3577        return vece <= MO_32 || have_isa_2_07 ? 1 : 0;
3578    case INDEX_op_neg_vec:
3579        return vece >= MO_32 && have_isa_3_00;
3580    case INDEX_op_mul_vec:
3581        switch (vece) {
3582        case MO_8:
3583        case MO_16:
3584            return -1;
3585        case MO_32:
3586            return have_isa_2_07 ? 1 : -1;
3587        case MO_64:
3588            return have_isa_3_10;
3589        }
3590        return 0;
3591    case INDEX_op_bitsel_vec:
3592        return have_vsx;
3593    case INDEX_op_rotrv_vec:
3594        return -1;
3595    default:
3596        return 0;
3597    }
3598}
3599
3600static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3601                            TCGReg dst, TCGReg src)
3602{
3603    tcg_debug_assert(dst >= TCG_REG_V0);
3604
3605    /* Splat from integer reg allowed via constraints for v3.00.  */
3606    if (src < TCG_REG_V0) {
3607        tcg_debug_assert(have_isa_3_00);
3608        switch (vece) {
3609        case MO_64:
3610            tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3611            return true;
3612        case MO_32:
3613            tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3614            return true;
3615        default:
3616            /* Fail, so that we fall back on either dupm or mov+dup.  */
3617            return false;
3618        }
3619    }
3620
3621    /*
3622     * Recall we use (or emulate) VSX integer loads, so the integer is
3623     * right justified within the left (zero-index) double-word.
3624     */
3625    switch (vece) {
3626    case MO_8:
3627        tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3628        break;
3629    case MO_16:
3630        tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3631        break;
3632    case MO_32:
3633        tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3634        break;
3635    case MO_64:
3636        if (have_vsx) {
3637            tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3638            break;
3639        }
3640        tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3641        tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3642        break;
3643    default:
3644        g_assert_not_reached();
3645    }
3646    return true;
3647}
3648
3649static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3650                             TCGReg out, TCGReg base, intptr_t offset)
3651{
3652    int elt;
3653
3654    tcg_debug_assert(out >= TCG_REG_V0);
3655    switch (vece) {
3656    case MO_8:
3657        if (have_isa_3_00) {
3658            tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3659        } else {
3660            tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3661        }
3662        elt = extract32(offset, 0, 4);
3663#if !HOST_BIG_ENDIAN
3664        elt ^= 15;
3665#endif
3666        tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3667        break;
3668    case MO_16:
3669        tcg_debug_assert((offset & 1) == 0);
3670        if (have_isa_3_00) {
3671            tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3672        } else {
3673            tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3674        }
3675        elt = extract32(offset, 1, 3);
3676#if !HOST_BIG_ENDIAN
3677        elt ^= 7;
3678#endif
3679        tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3680        break;
3681    case MO_32:
3682        if (have_isa_3_00) {
3683            tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3684            break;
3685        }
3686        tcg_debug_assert((offset & 3) == 0);
3687        tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3688        elt = extract32(offset, 2, 2);
3689#if !HOST_BIG_ENDIAN
3690        elt ^= 3;
3691#endif
3692        tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3693        break;
3694    case MO_64:
3695        if (have_vsx) {
3696            tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3697            break;
3698        }
3699        tcg_debug_assert((offset & 7) == 0);
3700        tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3701        tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3702        elt = extract32(offset, 3, 1);
3703#if !HOST_BIG_ENDIAN
3704        elt = !elt;
3705#endif
3706        if (elt) {
3707            tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3708        } else {
3709            tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3710        }
3711        break;
3712    default:
3713        g_assert_not_reached();
3714    }
3715    return true;
3716}
3717
3718static void tcg_out_not_vec(TCGContext *s, TCGReg a0, TCGReg a1)
3719{
3720    tcg_out32(s, VNOR | VRT(a0) | VRA(a1) | VRB(a1));
3721}
3722
3723static void tcg_out_or_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
3724{
3725    tcg_out32(s, VOR | VRT(a0) | VRA(a1) | VRB(a2));
3726}
3727
3728static void tcg_out_orc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
3729{
3730    tcg_out32(s, VORC | VRT(a0) | VRA(a1) | VRB(a2));
3731}
3732
3733static void tcg_out_and_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
3734{
3735    tcg_out32(s, VAND | VRT(a0) | VRA(a1) | VRB(a2));
3736}
3737
3738static void tcg_out_andc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2)
3739{
3740    tcg_out32(s, VANDC | VRT(a0) | VRA(a1) | VRB(a2));
3741}
3742
3743static void tcg_out_bitsel_vec(TCGContext *s, TCGReg d,
3744                               TCGReg c, TCGReg t, TCGReg f)
3745{
3746    if (TCG_TARGET_HAS_bitsel_vec) {
3747        tcg_out32(s, XXSEL | VRT(d) | VRC(c) | VRB(t) | VRA(f));
3748    } else {
3749        tcg_out_and_vec(s, TCG_VEC_TMP2, t, c);
3750        tcg_out_andc_vec(s, d, f, c);
3751        tcg_out_or_vec(s, d, d, TCG_VEC_TMP2);
3752    }
3753}
3754
3755static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0,
3756                                  TCGReg a1, TCGReg a2, TCGCond cond)
3757{
3758    static const uint32_t
3759        eq_op[4]  = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3760        ne_op[4]  = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3761        gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3762        gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD };
3763    uint32_t insn;
3764
3765    bool need_swap = false, need_inv = false;
3766
3767    tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3768
3769    switch (cond) {
3770    case TCG_COND_EQ:
3771    case TCG_COND_GT:
3772    case TCG_COND_GTU:
3773        break;
3774    case TCG_COND_NE:
3775        if (have_isa_3_00 && vece <= MO_32) {
3776            break;
3777        }
3778        /* fall through */
3779    case TCG_COND_LE:
3780    case TCG_COND_LEU:
3781        need_inv = true;
3782        break;
3783    case TCG_COND_LT:
3784    case TCG_COND_LTU:
3785        need_swap = true;
3786        break;
3787    case TCG_COND_GE:
3788    case TCG_COND_GEU:
3789        need_swap = need_inv = true;
3790        break;
3791    default:
3792        g_assert_not_reached();
3793    }
3794
3795    if (need_inv) {
3796        cond = tcg_invert_cond(cond);
3797    }
3798    if (need_swap) {
3799        TCGReg swap = a1;
3800        a1 = a2;
3801        a2 = swap;
3802        cond = tcg_swap_cond(cond);
3803    }
3804
3805    switch (cond) {
3806    case TCG_COND_EQ:
3807        insn = eq_op[vece];
3808        break;
3809    case TCG_COND_NE:
3810        insn = ne_op[vece];
3811        break;
3812    case TCG_COND_GT:
3813        insn = gts_op[vece];
3814        break;
3815    case TCG_COND_GTU:
3816        insn = gtu_op[vece];
3817        break;
3818    default:
3819        g_assert_not_reached();
3820    }
3821    tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3822
3823    return need_inv;
3824}
3825
3826static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0,
3827                            TCGReg a1, TCGReg a2, TCGCond cond)
3828{
3829    if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) {
3830        tcg_out_not_vec(s, a0, a0);
3831    }
3832}
3833
3834static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0,
3835                               TCGReg c1, TCGReg c2, TCGArg v3, int const_v3,
3836                               TCGReg v4, TCGCond cond)
3837{
3838    bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP1, c1, c2, cond);
3839
3840    if (!const_v3) {
3841        if (inv) {
3842            tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v4, v3);
3843        } else {
3844            tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v3, v4);
3845        }
3846    } else if (v3) {
3847        if (inv) {
3848            tcg_out_orc_vec(s, a0, v4, TCG_VEC_TMP1);
3849        } else {
3850            tcg_out_or_vec(s, a0, v4, TCG_VEC_TMP1);
3851        }
3852    } else {
3853        if (inv) {
3854            tcg_out_and_vec(s, a0, v4, TCG_VEC_TMP1);
3855        } else {
3856            tcg_out_andc_vec(s, a0, v4, TCG_VEC_TMP1);
3857        }
3858    }
3859}
3860
3861static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3862                           unsigned vecl, unsigned vece,
3863                           const TCGArg args[TCG_MAX_OP_ARGS],
3864                           const int const_args[TCG_MAX_OP_ARGS])
3865{
3866    static const uint32_t
3867        add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3868        sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3869        mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3870        neg_op[4] = { 0, 0, VNEGW, VNEGD },
3871        ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3872        usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3873        sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3874        ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3875        umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3876        smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3877        umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3878        smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3879        shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3880        shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3881        sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3882        mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3883        mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3884        muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3885        mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3886        pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3887        rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3888
3889    TCGType type = vecl + TCG_TYPE_V64;
3890    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3891    uint32_t insn;
3892
3893    switch (opc) {
3894    case INDEX_op_ld_vec:
3895        tcg_out_ld(s, type, a0, a1, a2);
3896        return;
3897    case INDEX_op_st_vec:
3898        tcg_out_st(s, type, a0, a1, a2);
3899        return;
3900    case INDEX_op_dupm_vec:
3901        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3902        return;
3903
3904    case INDEX_op_add_vec:
3905        insn = add_op[vece];
3906        break;
3907    case INDEX_op_sub_vec:
3908        insn = sub_op[vece];
3909        break;
3910    case INDEX_op_neg_vec:
3911        insn = neg_op[vece];
3912        a2 = a1;
3913        a1 = 0;
3914        break;
3915    case INDEX_op_mul_vec:
3916        insn = mul_op[vece];
3917        break;
3918    case INDEX_op_ssadd_vec:
3919        insn = ssadd_op[vece];
3920        break;
3921    case INDEX_op_sssub_vec:
3922        insn = sssub_op[vece];
3923        break;
3924    case INDEX_op_usadd_vec:
3925        insn = usadd_op[vece];
3926        break;
3927    case INDEX_op_ussub_vec:
3928        insn = ussub_op[vece];
3929        break;
3930    case INDEX_op_smin_vec:
3931        insn = smin_op[vece];
3932        break;
3933    case INDEX_op_umin_vec:
3934        insn = umin_op[vece];
3935        break;
3936    case INDEX_op_smax_vec:
3937        insn = smax_op[vece];
3938        break;
3939    case INDEX_op_umax_vec:
3940        insn = umax_op[vece];
3941        break;
3942    case INDEX_op_shlv_vec:
3943        insn = shlv_op[vece];
3944        break;
3945    case INDEX_op_shrv_vec:
3946        insn = shrv_op[vece];
3947        break;
3948    case INDEX_op_sarv_vec:
3949        insn = sarv_op[vece];
3950        break;
3951    case INDEX_op_and_vec:
3952        tcg_out_and_vec(s, a0, a1, a2);
3953        return;
3954    case INDEX_op_or_vec:
3955        tcg_out_or_vec(s, a0, a1, a2);
3956        return;
3957    case INDEX_op_xor_vec:
3958        insn = VXOR;
3959        break;
3960    case INDEX_op_andc_vec:
3961        tcg_out_andc_vec(s, a0, a1, a2);
3962        return;
3963    case INDEX_op_not_vec:
3964        tcg_out_not_vec(s, a0, a1);
3965        return;
3966    case INDEX_op_orc_vec:
3967        tcg_out_orc_vec(s, a0, a1, a2);
3968        return;
3969    case INDEX_op_nand_vec:
3970        insn = VNAND;
3971        break;
3972    case INDEX_op_nor_vec:
3973        insn = VNOR;
3974        break;
3975    case INDEX_op_eqv_vec:
3976        insn = VEQV;
3977        break;
3978
3979    case INDEX_op_cmp_vec:
3980        tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]);
3981        return;
3982    case INDEX_op_cmpsel_vec:
3983        tcg_out_cmpsel_vec(s, vece, a0, a1, a2,
3984                           args[3], const_args[3], args[4], args[5]);
3985        return;
3986    case INDEX_op_bitsel_vec:
3987        tcg_out_bitsel_vec(s, a0, a1, a2, args[3]);
3988        return;
3989
3990    case INDEX_op_dup2_vec:
3991        assert(TCG_TARGET_REG_BITS == 32);
3992        /* With inputs a1 = xLxx, a2 = xHxx  */
3993        tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1));  /* a0  = xxHL */
3994        tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8);          /* tmp = HLxx */
3995        tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8);          /* a0  = HLHL */
3996        return;
3997
3998    case INDEX_op_ppc_mrgh_vec:
3999        insn = mrgh_op[vece];
4000        break;
4001    case INDEX_op_ppc_mrgl_vec:
4002        insn = mrgl_op[vece];
4003        break;
4004    case INDEX_op_ppc_muleu_vec:
4005        insn = muleu_op[vece];
4006        break;
4007    case INDEX_op_ppc_mulou_vec:
4008        insn = mulou_op[vece];
4009        break;
4010    case INDEX_op_ppc_pkum_vec:
4011        insn = pkum_op[vece];
4012        break;
4013    case INDEX_op_rotlv_vec:
4014        insn = rotl_op[vece];
4015        break;
4016    case INDEX_op_ppc_msum_vec:
4017        tcg_debug_assert(vece == MO_16);
4018        tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
4019        return;
4020
4021    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
4022    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
4023    default:
4024        g_assert_not_reached();
4025    }
4026
4027    tcg_debug_assert(insn != 0);
4028    tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
4029}
4030
4031static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
4032                           TCGv_vec v1, TCGArg imm, TCGOpcode opci)
4033{
4034    TCGv_vec t1;
4035
4036    if (vece == MO_32) {
4037        /*
4038         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
4039         * So using negative numbers gets us the 4th bit easily.
4040         */
4041        imm = sextract32(imm, 0, 5);
4042    } else {
4043        imm &= (8 << vece) - 1;
4044    }
4045
4046    /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
4047    t1 = tcg_constant_vec(type, MO_8, imm);
4048    vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
4049              tcgv_vec_arg(v1), tcgv_vec_arg(t1));
4050}
4051
4052static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
4053                           TCGv_vec v1, TCGv_vec v2)
4054{
4055    TCGv_vec t1 = tcg_temp_new_vec(type);
4056    TCGv_vec t2 = tcg_temp_new_vec(type);
4057    TCGv_vec c0, c16;
4058
4059    switch (vece) {
4060    case MO_8:
4061    case MO_16:
4062        vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
4063                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
4064        vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
4065                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
4066        vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
4067                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
4068        vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
4069                  tcgv_vec_arg(t1), tcgv_vec_arg(t2));
4070        vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
4071                  tcgv_vec_arg(v0), tcgv_vec_arg(t1));
4072        break;
4073
4074    case MO_32:
4075        tcg_debug_assert(!have_isa_2_07);
4076        /*
4077         * Only 5 bits are significant, and VSPLTISB can represent -16..15.
4078         * So using -16 is a quick way to represent 16.
4079         */
4080        c16 = tcg_constant_vec(type, MO_8, -16);
4081        c0 = tcg_constant_vec(type, MO_8, 0);
4082
4083        vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
4084                  tcgv_vec_arg(v2), tcgv_vec_arg(c16));
4085        vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
4086                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
4087        vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
4088                  tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
4089        vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
4090                  tcgv_vec_arg(t1), tcgv_vec_arg(c16));
4091        tcg_gen_add_vec(MO_32, v0, t1, t2);
4092        break;
4093
4094    default:
4095        g_assert_not_reached();
4096    }
4097    tcg_temp_free_vec(t1);
4098    tcg_temp_free_vec(t2);
4099}
4100
4101void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
4102                       TCGArg a0, ...)
4103{
4104    va_list va;
4105    TCGv_vec v0, v1, v2, t0;
4106    TCGArg a2;
4107
4108    va_start(va, a0);
4109    v0 = temp_tcgv_vec(arg_temp(a0));
4110    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
4111    a2 = va_arg(va, TCGArg);
4112
4113    switch (opc) {
4114    case INDEX_op_shli_vec:
4115        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
4116        break;
4117    case INDEX_op_shri_vec:
4118        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
4119        break;
4120    case INDEX_op_sari_vec:
4121        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
4122        break;
4123    case INDEX_op_rotli_vec:
4124        expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
4125        break;
4126    case INDEX_op_mul_vec:
4127        v2 = temp_tcgv_vec(arg_temp(a2));
4128        expand_vec_mul(type, vece, v0, v1, v2);
4129        break;
4130    case INDEX_op_rotlv_vec:
4131        v2 = temp_tcgv_vec(arg_temp(a2));
4132        t0 = tcg_temp_new_vec(type);
4133        tcg_gen_neg_vec(vece, t0, v2);
4134        tcg_gen_rotlv_vec(vece, v0, v1, t0);
4135        tcg_temp_free_vec(t0);
4136        break;
4137    default:
4138        g_assert_not_reached();
4139    }
4140    va_end(va);
4141}
4142
4143static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
4144{
4145    switch (op) {
4146    case INDEX_op_goto_ptr:
4147        return C_O0_I1(r);
4148
4149    case INDEX_op_ld8u_i32:
4150    case INDEX_op_ld8s_i32:
4151    case INDEX_op_ld16u_i32:
4152    case INDEX_op_ld16s_i32:
4153    case INDEX_op_ld_i32:
4154    case INDEX_op_ctpop_i32:
4155    case INDEX_op_neg_i32:
4156    case INDEX_op_not_i32:
4157    case INDEX_op_ext8s_i32:
4158    case INDEX_op_ext16s_i32:
4159    case INDEX_op_bswap16_i32:
4160    case INDEX_op_bswap32_i32:
4161    case INDEX_op_extract_i32:
4162    case INDEX_op_ld8u_i64:
4163    case INDEX_op_ld8s_i64:
4164    case INDEX_op_ld16u_i64:
4165    case INDEX_op_ld16s_i64:
4166    case INDEX_op_ld32u_i64:
4167    case INDEX_op_ld32s_i64:
4168    case INDEX_op_ld_i64:
4169    case INDEX_op_ctpop_i64:
4170    case INDEX_op_neg_i64:
4171    case INDEX_op_not_i64:
4172    case INDEX_op_ext8s_i64:
4173    case INDEX_op_ext16s_i64:
4174    case INDEX_op_ext32s_i64:
4175    case INDEX_op_ext_i32_i64:
4176    case INDEX_op_extu_i32_i64:
4177    case INDEX_op_bswap16_i64:
4178    case INDEX_op_bswap32_i64:
4179    case INDEX_op_bswap64_i64:
4180    case INDEX_op_extract_i64:
4181        return C_O1_I1(r, r);
4182
4183    case INDEX_op_st8_i32:
4184    case INDEX_op_st16_i32:
4185    case INDEX_op_st_i32:
4186    case INDEX_op_st8_i64:
4187    case INDEX_op_st16_i64:
4188    case INDEX_op_st32_i64:
4189    case INDEX_op_st_i64:
4190        return C_O0_I2(r, r);
4191
4192    case INDEX_op_add_i32:
4193    case INDEX_op_and_i32:
4194    case INDEX_op_or_i32:
4195    case INDEX_op_xor_i32:
4196    case INDEX_op_andc_i32:
4197    case INDEX_op_orc_i32:
4198    case INDEX_op_eqv_i32:
4199    case INDEX_op_shl_i32:
4200    case INDEX_op_shr_i32:
4201    case INDEX_op_sar_i32:
4202    case INDEX_op_rotl_i32:
4203    case INDEX_op_rotr_i32:
4204    case INDEX_op_and_i64:
4205    case INDEX_op_andc_i64:
4206    case INDEX_op_shl_i64:
4207    case INDEX_op_shr_i64:
4208    case INDEX_op_sar_i64:
4209    case INDEX_op_rotl_i64:
4210    case INDEX_op_rotr_i64:
4211        return C_O1_I2(r, r, ri);
4212
4213    case INDEX_op_mul_i32:
4214    case INDEX_op_mul_i64:
4215        return C_O1_I2(r, r, rI);
4216
4217    case INDEX_op_div_i32:
4218    case INDEX_op_divu_i32:
4219    case INDEX_op_rem_i32:
4220    case INDEX_op_remu_i32:
4221    case INDEX_op_nand_i32:
4222    case INDEX_op_nor_i32:
4223    case INDEX_op_muluh_i32:
4224    case INDEX_op_mulsh_i32:
4225    case INDEX_op_orc_i64:
4226    case INDEX_op_eqv_i64:
4227    case INDEX_op_nand_i64:
4228    case INDEX_op_nor_i64:
4229    case INDEX_op_div_i64:
4230    case INDEX_op_divu_i64:
4231    case INDEX_op_rem_i64:
4232    case INDEX_op_remu_i64:
4233    case INDEX_op_mulsh_i64:
4234    case INDEX_op_muluh_i64:
4235        return C_O1_I2(r, r, r);
4236
4237    case INDEX_op_sub_i32:
4238        return C_O1_I2(r, rI, ri);
4239    case INDEX_op_add_i64:
4240        return C_O1_I2(r, r, rT);
4241    case INDEX_op_or_i64:
4242    case INDEX_op_xor_i64:
4243        return C_O1_I2(r, r, rU);
4244    case INDEX_op_sub_i64:
4245        return C_O1_I2(r, rI, rT);
4246    case INDEX_op_clz_i32:
4247    case INDEX_op_ctz_i32:
4248    case INDEX_op_clz_i64:
4249    case INDEX_op_ctz_i64:
4250        return C_O1_I2(r, r, rZW);
4251
4252    case INDEX_op_brcond_i32:
4253    case INDEX_op_brcond_i64:
4254        return C_O0_I2(r, rC);
4255    case INDEX_op_setcond_i32:
4256    case INDEX_op_setcond_i64:
4257    case INDEX_op_negsetcond_i32:
4258    case INDEX_op_negsetcond_i64:
4259        return C_O1_I2(r, r, rC);
4260    case INDEX_op_movcond_i32:
4261    case INDEX_op_movcond_i64:
4262        return C_O1_I4(r, r, rC, rZ, rZ);
4263
4264    case INDEX_op_deposit_i32:
4265    case INDEX_op_deposit_i64:
4266        return C_O1_I2(r, 0, rZ);
4267    case INDEX_op_brcond2_i32:
4268        return C_O0_I4(r, r, ri, ri);
4269    case INDEX_op_setcond2_i32:
4270        return C_O1_I4(r, r, r, ri, ri);
4271    case INDEX_op_add2_i64:
4272    case INDEX_op_add2_i32:
4273        return C_O2_I4(r, r, r, r, rI, rZM);
4274    case INDEX_op_sub2_i64:
4275    case INDEX_op_sub2_i32:
4276        return C_O2_I4(r, r, rI, rZM, r, r);
4277
4278    case INDEX_op_qemu_ld_a32_i32:
4279        return C_O1_I1(r, r);
4280    case INDEX_op_qemu_ld_a64_i32:
4281        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
4282    case INDEX_op_qemu_ld_a32_i64:
4283        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
4284    case INDEX_op_qemu_ld_a64_i64:
4285        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
4286
4287    case INDEX_op_qemu_st_a32_i32:
4288        return C_O0_I2(r, r);
4289    case INDEX_op_qemu_st_a64_i32:
4290        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
4291    case INDEX_op_qemu_st_a32_i64:
4292        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
4293    case INDEX_op_qemu_st_a64_i64:
4294        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
4295
4296    case INDEX_op_qemu_ld_a32_i128:
4297    case INDEX_op_qemu_ld_a64_i128:
4298        return C_N1O1_I1(o, m, r);
4299    case INDEX_op_qemu_st_a32_i128:
4300    case INDEX_op_qemu_st_a64_i128:
4301        return C_O0_I3(o, m, r);
4302
4303    case INDEX_op_add_vec:
4304    case INDEX_op_sub_vec:
4305    case INDEX_op_mul_vec:
4306    case INDEX_op_and_vec:
4307    case INDEX_op_or_vec:
4308    case INDEX_op_xor_vec:
4309    case INDEX_op_andc_vec:
4310    case INDEX_op_orc_vec:
4311    case INDEX_op_nor_vec:
4312    case INDEX_op_eqv_vec:
4313    case INDEX_op_nand_vec:
4314    case INDEX_op_cmp_vec:
4315    case INDEX_op_ssadd_vec:
4316    case INDEX_op_sssub_vec:
4317    case INDEX_op_usadd_vec:
4318    case INDEX_op_ussub_vec:
4319    case INDEX_op_smax_vec:
4320    case INDEX_op_smin_vec:
4321    case INDEX_op_umax_vec:
4322    case INDEX_op_umin_vec:
4323    case INDEX_op_shlv_vec:
4324    case INDEX_op_shrv_vec:
4325    case INDEX_op_sarv_vec:
4326    case INDEX_op_rotlv_vec:
4327    case INDEX_op_rotrv_vec:
4328    case INDEX_op_ppc_mrgh_vec:
4329    case INDEX_op_ppc_mrgl_vec:
4330    case INDEX_op_ppc_muleu_vec:
4331    case INDEX_op_ppc_mulou_vec:
4332    case INDEX_op_ppc_pkum_vec:
4333    case INDEX_op_dup2_vec:
4334        return C_O1_I2(v, v, v);
4335
4336    case INDEX_op_not_vec:
4337    case INDEX_op_neg_vec:
4338        return C_O1_I1(v, v);
4339
4340    case INDEX_op_dup_vec:
4341        return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
4342
4343    case INDEX_op_ld_vec:
4344    case INDEX_op_dupm_vec:
4345        return C_O1_I1(v, r);
4346
4347    case INDEX_op_st_vec:
4348        return C_O0_I2(v, r);
4349
4350    case INDEX_op_bitsel_vec:
4351    case INDEX_op_ppc_msum_vec:
4352        return C_O1_I3(v, v, v, v);
4353    case INDEX_op_cmpsel_vec:
4354        return C_O1_I4(v, v, v, vZM, v);
4355
4356    default:
4357        g_assert_not_reached();
4358    }
4359}
4360
4361static void tcg_target_init(TCGContext *s)
4362{
4363    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
4364    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
4365    if (have_altivec) {
4366        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
4367        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
4368    }
4369
4370    tcg_target_call_clobber_regs = 0;
4371    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
4372    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
4373    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
4374    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
4375    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
4376    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
4377    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
4378    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
4379    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
4380    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
4381    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
4382    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
4383
4384    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
4385    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
4386    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
4387    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
4388    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
4389    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
4390    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
4391    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
4392    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
4393    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
4394    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
4395    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
4396    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
4397    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
4398    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
4399    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
4400    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
4401    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
4402    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
4403    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
4404
4405    s->reserved_regs = 0;
4406    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
4407    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
4408#if defined(_CALL_SYSV)
4409    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
4410#endif
4411#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
4412    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
4413#endif
4414    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
4415    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
4416    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
4417    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
4418    if (USE_REG_TB) {
4419        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);  /* tb->tc_ptr */
4420    }
4421}
4422
4423#ifdef __ELF__
4424typedef struct {
4425    DebugFrameCIE cie;
4426    DebugFrameFDEHeader fde;
4427    uint8_t fde_def_cfa[4];
4428    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
4429} DebugFrame;
4430
4431/* We're expecting a 2 byte uleb128 encoded value.  */
4432QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
4433
4434#if TCG_TARGET_REG_BITS == 64
4435# define ELF_HOST_MACHINE EM_PPC64
4436#else
4437# define ELF_HOST_MACHINE EM_PPC
4438#endif
4439
4440static DebugFrame debug_frame = {
4441    .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4442    .cie.id = -1,
4443    .cie.version = 1,
4444    .cie.code_align = 1,
4445    .cie.data_align = (-SZR & 0x7f),         /* sleb128 -SZR */
4446    .cie.return_column = 65,
4447
4448    /* Total FDE size does not include the "len" member.  */
4449    .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
4450
4451    .fde_def_cfa = {
4452        12, TCG_REG_R1,                 /* DW_CFA_def_cfa r1, ... */
4453        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
4454        (FRAME_SIZE >> 7)
4455    },
4456    .fde_reg_ofs = {
4457        /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
4458        0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4459    }
4460};
4461
4462void tcg_register_jit(const void *buf, size_t buf_size)
4463{
4464    uint8_t *p = &debug_frame.fde_reg_ofs[3];
4465    int i;
4466
4467    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
4468        p[0] = 0x80 + tcg_target_callee_save_regs[i];
4469        p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
4470    }
4471
4472    debug_frame.fde.func_start = (uintptr_t)buf;
4473    debug_frame.fde.func_len = buf_size;
4474
4475    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4476}
4477#endif /* __ELF__ */
4478#undef VMULEUB
4479#undef VMULEUH
4480#undef VMULEUW
4481#undef VMULOUB
4482#undef VMULOUH
4483#undef VMULOUW
4484#undef VMSUMUHM
4485