xref: /openbmc/qemu/tcg/s390x/tcg-target.c.inc (revision cbaddf30949a95f9133a6b3661ab5816db19833e)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27#include "../tcg-ldst.c.inc"
28#include "../tcg-pool.c.inc"
29#include "elf.h"
30
31#define TCG_CT_CONST_S16        (1 << 8)
32#define TCG_CT_CONST_S32        (1 << 9)
33#define TCG_CT_CONST_U32        (1 << 10)
34#define TCG_CT_CONST_ZERO       (1 << 11)
35#define TCG_CT_CONST_P32        (1 << 12)
36#define TCG_CT_CONST_INV        (1 << 13)
37#define TCG_CT_CONST_INVRISBG   (1 << 14)
38
39#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 16)
40#define ALL_VECTOR_REGS      MAKE_64BIT_MASK(32, 32)
41
42/* Several places within the instruction set 0 means "no register"
43   rather than TCG_REG_R0.  */
44#define TCG_REG_NONE    0
45
46/* A scratch register that may be be used throughout the backend.  */
47#define TCG_TMP0        TCG_REG_R1
48
49#define TCG_GUEST_BASE_REG TCG_REG_R13
50
51/* All of the following instructions are prefixed with their instruction
52   format, and are defined as 8- or 16-bit quantities, even when the two
53   halves of the 16-bit quantity may appear 32 bits apart in the insn.
54   This makes it easy to copy the values from the tables in Appendix B.  */
55typedef enum S390Opcode {
56    RIL_AFI     = 0xc209,
57    RIL_AGFI    = 0xc208,
58    RIL_ALFI    = 0xc20b,
59    RIL_ALGFI   = 0xc20a,
60    RIL_BRASL   = 0xc005,
61    RIL_BRCL    = 0xc004,
62    RIL_CFI     = 0xc20d,
63    RIL_CGFI    = 0xc20c,
64    RIL_CLFI    = 0xc20f,
65    RIL_CLGFI   = 0xc20e,
66    RIL_CLRL    = 0xc60f,
67    RIL_CLGRL   = 0xc60a,
68    RIL_CRL     = 0xc60d,
69    RIL_CGRL    = 0xc608,
70    RIL_IIHF    = 0xc008,
71    RIL_IILF    = 0xc009,
72    RIL_LARL    = 0xc000,
73    RIL_LGFI    = 0xc001,
74    RIL_LGRL    = 0xc408,
75    RIL_LLIHF   = 0xc00e,
76    RIL_LLILF   = 0xc00f,
77    RIL_LRL     = 0xc40d,
78    RIL_MSFI    = 0xc201,
79    RIL_MSGFI   = 0xc200,
80    RIL_NIHF    = 0xc00a,
81    RIL_NILF    = 0xc00b,
82    RIL_OIHF    = 0xc00c,
83    RIL_OILF    = 0xc00d,
84    RIL_SLFI    = 0xc205,
85    RIL_SLGFI   = 0xc204,
86    RIL_XIHF    = 0xc006,
87    RIL_XILF    = 0xc007,
88
89    RI_AGHI     = 0xa70b,
90    RI_AHI      = 0xa70a,
91    RI_BRC      = 0xa704,
92    RI_CHI      = 0xa70e,
93    RI_CGHI     = 0xa70f,
94    RI_IIHH     = 0xa500,
95    RI_IIHL     = 0xa501,
96    RI_IILH     = 0xa502,
97    RI_IILL     = 0xa503,
98    RI_LGHI     = 0xa709,
99    RI_LLIHH    = 0xa50c,
100    RI_LLIHL    = 0xa50d,
101    RI_LLILH    = 0xa50e,
102    RI_LLILL    = 0xa50f,
103    RI_MGHI     = 0xa70d,
104    RI_MHI      = 0xa70c,
105    RI_NIHH     = 0xa504,
106    RI_NIHL     = 0xa505,
107    RI_NILH     = 0xa506,
108    RI_NILL     = 0xa507,
109    RI_OIHH     = 0xa508,
110    RI_OIHL     = 0xa509,
111    RI_OILH     = 0xa50a,
112    RI_OILL     = 0xa50b,
113    RI_TMLL     = 0xa701,
114
115    RIEb_CGRJ    = 0xec64,
116    RIEb_CLGRJ   = 0xec65,
117    RIEb_CLRJ    = 0xec77,
118    RIEb_CRJ     = 0xec76,
119
120    RIEc_CGIJ    = 0xec7c,
121    RIEc_CIJ     = 0xec7e,
122    RIEc_CLGIJ   = 0xec7d,
123    RIEc_CLIJ    = 0xec7f,
124
125    RIEf_RISBG   = 0xec55,
126
127    RIEg_LOCGHI  = 0xec46,
128
129    RRE_AGR     = 0xb908,
130    RRE_ALGR    = 0xb90a,
131    RRE_ALCR    = 0xb998,
132    RRE_ALCGR   = 0xb988,
133    RRE_ALGFR   = 0xb91a,
134    RRE_CGR     = 0xb920,
135    RRE_CLGR    = 0xb921,
136    RRE_DLGR    = 0xb987,
137    RRE_DLR     = 0xb997,
138    RRE_DSGFR   = 0xb91d,
139    RRE_DSGR    = 0xb90d,
140    RRE_FLOGR   = 0xb983,
141    RRE_LGBR    = 0xb906,
142    RRE_LCGR    = 0xb903,
143    RRE_LGFR    = 0xb914,
144    RRE_LGHR    = 0xb907,
145    RRE_LGR     = 0xb904,
146    RRE_LLGCR   = 0xb984,
147    RRE_LLGFR   = 0xb916,
148    RRE_LLGHR   = 0xb985,
149    RRE_LRVR    = 0xb91f,
150    RRE_LRVGR   = 0xb90f,
151    RRE_LTGR    = 0xb902,
152    RRE_MLGR    = 0xb986,
153    RRE_MSGR    = 0xb90c,
154    RRE_MSR     = 0xb252,
155    RRE_NGR     = 0xb980,
156    RRE_OGR     = 0xb981,
157    RRE_SGR     = 0xb909,
158    RRE_SLGR    = 0xb90b,
159    RRE_SLBR    = 0xb999,
160    RRE_SLBGR   = 0xb989,
161    RRE_XGR     = 0xb982,
162
163    RRFa_MGRK   = 0xb9ec,
164    RRFa_MSRKC  = 0xb9fd,
165    RRFa_MSGRKC = 0xb9ed,
166    RRFa_NCRK   = 0xb9f5,
167    RRFa_NCGRK  = 0xb9e5,
168    RRFa_NNRK   = 0xb974,
169    RRFa_NNGRK  = 0xb964,
170    RRFa_NORK   = 0xb976,
171    RRFa_NOGRK  = 0xb966,
172    RRFa_NRK    = 0xb9f4,
173    RRFa_NGRK   = 0xb9e4,
174    RRFa_NXRK   = 0xb977,
175    RRFa_NXGRK  = 0xb967,
176    RRFa_OCRK   = 0xb975,
177    RRFa_OCGRK  = 0xb965,
178    RRFa_ORK    = 0xb9f6,
179    RRFa_OGRK   = 0xb9e6,
180    RRFa_SRK    = 0xb9f9,
181    RRFa_SGRK   = 0xb9e9,
182    RRFa_SLRK   = 0xb9fb,
183    RRFa_SLGRK  = 0xb9eb,
184    RRFa_XRK    = 0xb9f7,
185    RRFa_XGRK   = 0xb9e7,
186
187    RRFam_SELGR = 0xb9e3,
188
189    RRFc_LOCR   = 0xb9f2,
190    RRFc_LOCGR  = 0xb9e2,
191    RRFc_POPCNT = 0xb9e1,
192
193    RR_AR       = 0x1a,
194    RR_ALR      = 0x1e,
195    RR_BASR     = 0x0d,
196    RR_BCR      = 0x07,
197    RR_CLR      = 0x15,
198    RR_CR       = 0x19,
199    RR_DR       = 0x1d,
200    RR_LCR      = 0x13,
201    RR_LR       = 0x18,
202    RR_LTR      = 0x12,
203    RR_NR       = 0x14,
204    RR_OR       = 0x16,
205    RR_SR       = 0x1b,
206    RR_SLR      = 0x1f,
207    RR_XR       = 0x17,
208
209    RSY_RLL     = 0xeb1d,
210    RSY_RLLG    = 0xeb1c,
211    RSY_SLLG    = 0xeb0d,
212    RSY_SLLK    = 0xebdf,
213    RSY_SRAG    = 0xeb0a,
214    RSY_SRAK    = 0xebdc,
215    RSY_SRLG    = 0xeb0c,
216    RSY_SRLK    = 0xebde,
217
218    RS_SLL      = 0x89,
219    RS_SRA      = 0x8a,
220    RS_SRL      = 0x88,
221
222    RXY_AG      = 0xe308,
223    RXY_AY      = 0xe35a,
224    RXY_CG      = 0xe320,
225    RXY_CLG     = 0xe321,
226    RXY_CLY     = 0xe355,
227    RXY_CY      = 0xe359,
228    RXY_LAY     = 0xe371,
229    RXY_LB      = 0xe376,
230    RXY_LG      = 0xe304,
231    RXY_LGB     = 0xe377,
232    RXY_LGF     = 0xe314,
233    RXY_LGH     = 0xe315,
234    RXY_LHY     = 0xe378,
235    RXY_LLGC    = 0xe390,
236    RXY_LLGF    = 0xe316,
237    RXY_LLGH    = 0xe391,
238    RXY_LMG     = 0xeb04,
239    RXY_LPQ     = 0xe38f,
240    RXY_LRV     = 0xe31e,
241    RXY_LRVG    = 0xe30f,
242    RXY_LRVH    = 0xe31f,
243    RXY_LY      = 0xe358,
244    RXY_NG      = 0xe380,
245    RXY_OG      = 0xe381,
246    RXY_STCY    = 0xe372,
247    RXY_STG     = 0xe324,
248    RXY_STHY    = 0xe370,
249    RXY_STMG    = 0xeb24,
250    RXY_STPQ    = 0xe38e,
251    RXY_STRV    = 0xe33e,
252    RXY_STRVG   = 0xe32f,
253    RXY_STRVH   = 0xe33f,
254    RXY_STY     = 0xe350,
255    RXY_XG      = 0xe382,
256
257    RX_A        = 0x5a,
258    RX_C        = 0x59,
259    RX_L        = 0x58,
260    RX_LA       = 0x41,
261    RX_LH       = 0x48,
262    RX_ST       = 0x50,
263    RX_STC      = 0x42,
264    RX_STH      = 0x40,
265
266    VRIa_VGBM   = 0xe744,
267    VRIa_VREPI  = 0xe745,
268    VRIb_VGM    = 0xe746,
269    VRIc_VREP   = 0xe74d,
270
271    VRRa_VLC    = 0xe7de,
272    VRRa_VLP    = 0xe7df,
273    VRRa_VLR    = 0xe756,
274    VRRc_VA     = 0xe7f3,
275    VRRc_VCEQ   = 0xe7f8,   /* we leave the m5 cs field 0 */
276    VRRc_VCH    = 0xe7fb,   /* " */
277    VRRc_VCHL   = 0xe7f9,   /* " */
278    VRRc_VERLLV = 0xe773,
279    VRRc_VESLV  = 0xe770,
280    VRRc_VESRAV = 0xe77a,
281    VRRc_VESRLV = 0xe778,
282    VRRc_VML    = 0xe7a2,
283    VRRc_VMN    = 0xe7fe,
284    VRRc_VMNL   = 0xe7fc,
285    VRRc_VMX    = 0xe7ff,
286    VRRc_VMXL   = 0xe7fd,
287    VRRc_VN     = 0xe768,
288    VRRc_VNC    = 0xe769,
289    VRRc_VNN    = 0xe76e,
290    VRRc_VNO    = 0xe76b,
291    VRRc_VNX    = 0xe76c,
292    VRRc_VO     = 0xe76a,
293    VRRc_VOC    = 0xe76f,
294    VRRc_VPKS   = 0xe797,   /* we leave the m5 cs field 0 */
295    VRRc_VS     = 0xe7f7,
296    VRRa_VUPH   = 0xe7d7,
297    VRRa_VUPL   = 0xe7d6,
298    VRRc_VX     = 0xe76d,
299    VRRe_VSEL   = 0xe78d,
300    VRRf_VLVGP  = 0xe762,
301
302    VRSa_VERLL  = 0xe733,
303    VRSa_VESL   = 0xe730,
304    VRSa_VESRA  = 0xe73a,
305    VRSa_VESRL  = 0xe738,
306    VRSb_VLVG   = 0xe722,
307    VRSc_VLGV   = 0xe721,
308
309    VRX_VL      = 0xe706,
310    VRX_VLLEZ   = 0xe704,
311    VRX_VLREP   = 0xe705,
312    VRX_VST     = 0xe70e,
313    VRX_VSTEF   = 0xe70b,
314    VRX_VSTEG   = 0xe70a,
315
316    NOP         = 0x0707,
317} S390Opcode;
318
319#ifdef CONFIG_DEBUG_TCG
320static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
321    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
322    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
323    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
324    "%v0",  "%v1",  "%v2",  "%v3",  "%v4",  "%v5",  "%v6",  "%v7",
325    "%v8",  "%v9",  "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
326    "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
327    "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
328};
329#endif
330
331/* Since R6 is a potential argument register, choose it last of the
332   call-saved registers.  Likewise prefer the call-clobbered registers
333   in reverse order to maximize the chance of avoiding the arguments.  */
334static const int tcg_target_reg_alloc_order[] = {
335    /* Call saved registers.  */
336    TCG_REG_R13,
337    TCG_REG_R12,
338    TCG_REG_R11,
339    TCG_REG_R10,
340    TCG_REG_R9,
341    TCG_REG_R8,
342    TCG_REG_R7,
343    TCG_REG_R6,
344    /* Call clobbered registers.  */
345    TCG_REG_R14,
346    TCG_REG_R0,
347    TCG_REG_R1,
348    /* Argument registers, in reverse order of allocation.  */
349    TCG_REG_R5,
350    TCG_REG_R4,
351    TCG_REG_R3,
352    TCG_REG_R2,
353
354    /* V8-V15 are call saved, and omitted. */
355    TCG_REG_V0,
356    TCG_REG_V1,
357    TCG_REG_V2,
358    TCG_REG_V3,
359    TCG_REG_V4,
360    TCG_REG_V5,
361    TCG_REG_V6,
362    TCG_REG_V7,
363    TCG_REG_V16,
364    TCG_REG_V17,
365    TCG_REG_V18,
366    TCG_REG_V19,
367    TCG_REG_V20,
368    TCG_REG_V21,
369    TCG_REG_V22,
370    TCG_REG_V23,
371    TCG_REG_V24,
372    TCG_REG_V25,
373    TCG_REG_V26,
374    TCG_REG_V27,
375    TCG_REG_V28,
376    TCG_REG_V29,
377    TCG_REG_V30,
378    TCG_REG_V31,
379};
380
381static const int tcg_target_call_iarg_regs[] = {
382    TCG_REG_R2,
383    TCG_REG_R3,
384    TCG_REG_R4,
385    TCG_REG_R5,
386    TCG_REG_R6,
387};
388
389static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
390{
391    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
392    tcg_debug_assert(slot == 0);
393    return TCG_REG_R2;
394}
395
396#define S390_CC_EQ      8
397#define S390_CC_LT      4
398#define S390_CC_GT      2
399#define S390_CC_OV      1
400#define S390_CC_NE      (S390_CC_LT | S390_CC_GT)
401#define S390_CC_LE      (S390_CC_LT | S390_CC_EQ)
402#define S390_CC_GE      (S390_CC_GT | S390_CC_EQ)
403#define S390_CC_NEVER   0
404#define S390_CC_ALWAYS  15
405
406/* Condition codes that result from a COMPARE and COMPARE LOGICAL.  */
407static const uint8_t tcg_cond_to_s390_cond[] = {
408    [TCG_COND_EQ]  = S390_CC_EQ,
409    [TCG_COND_NE]  = S390_CC_NE,
410    [TCG_COND_LT]  = S390_CC_LT,
411    [TCG_COND_LE]  = S390_CC_LE,
412    [TCG_COND_GT]  = S390_CC_GT,
413    [TCG_COND_GE]  = S390_CC_GE,
414    [TCG_COND_LTU] = S390_CC_LT,
415    [TCG_COND_LEU] = S390_CC_LE,
416    [TCG_COND_GTU] = S390_CC_GT,
417    [TCG_COND_GEU] = S390_CC_GE,
418};
419
420/* Condition codes that result from a LOAD AND TEST.  Here, we have no
421   unsigned instruction variation, however since the test is vs zero we
422   can re-map the outcomes appropriately.  */
423static const uint8_t tcg_cond_to_ltr_cond[] = {
424    [TCG_COND_EQ]  = S390_CC_EQ,
425    [TCG_COND_NE]  = S390_CC_NE,
426    [TCG_COND_LT]  = S390_CC_LT,
427    [TCG_COND_LE]  = S390_CC_LE,
428    [TCG_COND_GT]  = S390_CC_GT,
429    [TCG_COND_GE]  = S390_CC_GE,
430    [TCG_COND_LTU] = S390_CC_NEVER,
431    [TCG_COND_LEU] = S390_CC_EQ,
432    [TCG_COND_GTU] = S390_CC_NE,
433    [TCG_COND_GEU] = S390_CC_ALWAYS,
434};
435
436static const tcg_insn_unit *tb_ret_addr;
437uint64_t s390_facilities[3];
438
439static inline bool is_general_reg(TCGReg r)
440{
441    return r <= TCG_REG_R15;
442}
443
444static inline bool is_vector_reg(TCGReg r)
445{
446    return r >= TCG_REG_V0 && r <= TCG_REG_V31;
447}
448
449static bool patch_reloc(tcg_insn_unit *src_rw, int type,
450                        intptr_t value, intptr_t addend)
451{
452    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
453    intptr_t pcrel2;
454    uint32_t old;
455
456    value += addend;
457    pcrel2 = (tcg_insn_unit *)value - src_rx;
458
459    switch (type) {
460    case R_390_PC16DBL:
461        if (pcrel2 == (int16_t)pcrel2) {
462            tcg_patch16(src_rw, pcrel2);
463            return true;
464        }
465        break;
466    case R_390_PC32DBL:
467        if (pcrel2 == (int32_t)pcrel2) {
468            tcg_patch32(src_rw, pcrel2);
469            return true;
470        }
471        break;
472    case R_390_20:
473        if (value == sextract64(value, 0, 20)) {
474            old = *(uint32_t *)src_rw & 0xf00000ff;
475            old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
476            tcg_patch32(src_rw, old);
477            return true;
478        }
479        break;
480    default:
481        g_assert_not_reached();
482    }
483    return false;
484}
485
486static int is_const_p16(uint64_t val)
487{
488    for (int i = 0; i < 4; ++i) {
489        uint64_t mask = 0xffffull << (i * 16);
490        if ((val & ~mask) == 0) {
491            return i;
492        }
493    }
494    return -1;
495}
496
497static int is_const_p32(uint64_t val)
498{
499    if ((val & 0xffffffff00000000ull) == 0) {
500        return 0;
501    }
502    if ((val & 0x00000000ffffffffull) == 0) {
503        return 1;
504    }
505    return -1;
506}
507
508/*
509 * Accept bit patterns like these:
510 *  0....01....1
511 *  1....10....0
512 *  1..10..01..1
513 *  0..01..10..0
514 * Copied from gcc sources.
515 */
516static bool risbg_mask(uint64_t c)
517{
518    uint64_t lsb;
519    /* We don't change the number of transitions by inverting,
520       so make sure we start with the LSB zero.  */
521    if (c & 1) {
522        c = ~c;
523    }
524    /* Reject all zeros or all ones.  */
525    if (c == 0) {
526        return false;
527    }
528    /* Find the first transition.  */
529    lsb = c & -c;
530    /* Invert to look for a second transition.  */
531    c = ~c;
532    /* Erase the first transition.  */
533    c &= -lsb;
534    /* Find the second transition, if any.  */
535    lsb = c & -c;
536    /* Match if all the bits are 1's, or if c is zero.  */
537    return c == -lsb;
538}
539
540/* Test if a constant matches the constraint. */
541static bool tcg_target_const_match(int64_t val, int ct,
542                                   TCGType type, TCGCond cond, int vece)
543{
544    if (ct & TCG_CT_CONST) {
545        return true;
546    }
547    if (type == TCG_TYPE_I32) {
548        val = (int32_t)val;
549    }
550
551    if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
552        return true;
553    }
554    if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
555        return true;
556    }
557    if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
558        return true;
559    }
560    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
561        return true;
562    }
563
564    if (ct & TCG_CT_CONST_INV) {
565        val = ~val;
566    }
567    /*
568     * Note that is_const_p16 is a subset of is_const_p32,
569     * so we don't need both constraints.
570     */
571    if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
572        return true;
573    }
574    if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
575        return true;
576    }
577    return false;
578}
579
580/* Emit instructions according to the given instruction format.  */
581
582static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
583{
584    tcg_out16(s, (op << 8) | (r1 << 4) | r2);
585}
586
587static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
588                             TCGReg r1, TCGReg r2)
589{
590    tcg_out32(s, (op << 16) | (r1 << 4) | r2);
591}
592
593/* RRF-a without the m4 field */
594static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
595                              TCGReg r1, TCGReg r2, TCGReg r3)
596{
597    tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
598}
599
600/* RRF-a with the m4 field */
601static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
602                               TCGReg r1, TCGReg r2, TCGReg r3, int m4)
603{
604    tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
605}
606
607static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
608                              TCGReg r1, TCGReg r2, int m3)
609{
610    tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
611}
612
613static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
614{
615    tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
616}
617
618static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
619                             int i2, int m3)
620{
621    tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
622    tcg_out32(s, (i2 << 16) | (op & 0xff));
623}
624
625static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
626{
627    tcg_out16(s, op | (r1 << 4));
628    tcg_out32(s, i2);
629}
630
631static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
632                            TCGReg b2, TCGReg r3, int disp)
633{
634    tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
635              | (disp & 0xfff));
636}
637
638static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
639                             TCGReg b2, TCGReg r3, int disp)
640{
641    tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
642    tcg_out32(s, (op & 0xff) | (b2 << 28)
643              | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
644}
645
646#define tcg_out_insn_RX   tcg_out_insn_RS
647#define tcg_out_insn_RXY  tcg_out_insn_RSY
648
649static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
650{
651    /*
652     * Shift bit 4 of each regno to its corresponding bit of RXB.
653     * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
654     * is the left-shift of the 4th operand.
655     */
656    return ((v1 & 0x10) << (4 + 3))
657         | ((v2 & 0x10) << (4 + 2))
658         | ((v3 & 0x10) << (4 + 1))
659         | ((v4 & 0x10) << (4 + 0));
660}
661
662static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
663                              TCGReg v1, uint16_t i2, int m3)
664{
665    tcg_debug_assert(is_vector_reg(v1));
666    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
667    tcg_out16(s, i2);
668    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
669}
670
671static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
672                              TCGReg v1, uint8_t i2, uint8_t i3, int m4)
673{
674    tcg_debug_assert(is_vector_reg(v1));
675    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
676    tcg_out16(s, (i2 << 8) | (i3 & 0xff));
677    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
678}
679
680static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
681                              TCGReg v1, uint16_t i2, TCGReg v3, int m4)
682{
683    tcg_debug_assert(is_vector_reg(v1));
684    tcg_debug_assert(is_vector_reg(v3));
685    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
686    tcg_out16(s, i2);
687    tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
688}
689
690static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
691                              TCGReg v1, TCGReg v2, int m3)
692{
693    tcg_debug_assert(is_vector_reg(v1));
694    tcg_debug_assert(is_vector_reg(v2));
695    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
696    tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
697}
698
699static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
700                              TCGReg v1, TCGReg v2, TCGReg v3, int m4)
701{
702    tcg_debug_assert(is_vector_reg(v1));
703    tcg_debug_assert(is_vector_reg(v2));
704    tcg_debug_assert(is_vector_reg(v3));
705    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
706    tcg_out16(s, v3 << 12);
707    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
708}
709
710static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
711                              TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
712{
713    tcg_debug_assert(is_vector_reg(v1));
714    tcg_debug_assert(is_vector_reg(v2));
715    tcg_debug_assert(is_vector_reg(v3));
716    tcg_debug_assert(is_vector_reg(v4));
717    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
718    tcg_out16(s, v3 << 12);
719    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
720}
721
722static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
723                              TCGReg v1, TCGReg r2, TCGReg r3)
724{
725    tcg_debug_assert(is_vector_reg(v1));
726    tcg_debug_assert(is_general_reg(r2));
727    tcg_debug_assert(is_general_reg(r3));
728    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
729    tcg_out16(s, r3 << 12);
730    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
731}
732
733static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
734                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
735{
736    tcg_debug_assert(is_vector_reg(v1));
737    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
738    tcg_debug_assert(is_general_reg(b2));
739    tcg_debug_assert(is_vector_reg(v3));
740    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
741    tcg_out16(s, b2 << 12 | d2);
742    tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
743}
744
745static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
746                              intptr_t d2, TCGReg b2, TCGReg r3, int m4)
747{
748    tcg_debug_assert(is_vector_reg(v1));
749    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
750    tcg_debug_assert(is_general_reg(b2));
751    tcg_debug_assert(is_general_reg(r3));
752    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
753    tcg_out16(s, b2 << 12 | d2);
754    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
755}
756
757static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
758                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
759{
760    tcg_debug_assert(is_general_reg(r1));
761    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
762    tcg_debug_assert(is_general_reg(b2));
763    tcg_debug_assert(is_vector_reg(v3));
764    tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
765    tcg_out16(s, b2 << 12 | d2);
766    tcg_out16(s, (op & 0x00ff) | RXB(0, v3, 0, 0) | (m4 << 12));
767}
768
769static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
770                             TCGReg b2, TCGReg x2, intptr_t d2, int m3)
771{
772    tcg_debug_assert(is_vector_reg(v1));
773    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
774    tcg_debug_assert(is_general_reg(x2));
775    tcg_debug_assert(is_general_reg(b2));
776    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
777    tcg_out16(s, (b2 << 12) | d2);
778    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
779}
780
781/* Emit an opcode with "type-checking" of the format.  */
782#define tcg_out_insn(S, FMT, OP, ...) \
783    glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
784
785
786/* emit 64-bit shifts */
787static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
788                         TCGReg src, TCGReg sh_reg, int sh_imm)
789{
790    tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
791}
792
793/* emit 32-bit shifts */
794static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
795                         TCGReg sh_reg, int sh_imm)
796{
797    tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
798}
799
800static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
801{
802    if (src == dst) {
803        return true;
804    }
805    switch (type) {
806    case TCG_TYPE_I32:
807        if (likely(is_general_reg(dst) && is_general_reg(src))) {
808            tcg_out_insn(s, RR, LR, dst, src);
809            break;
810        }
811        /* fallthru */
812
813    case TCG_TYPE_I64:
814        if (likely(is_general_reg(dst))) {
815            if (likely(is_general_reg(src))) {
816                tcg_out_insn(s, RRE, LGR, dst, src);
817            } else {
818                tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
819            }
820            break;
821        } else if (is_general_reg(src)) {
822            tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
823            break;
824        }
825        /* fallthru */
826
827    case TCG_TYPE_V64:
828    case TCG_TYPE_V128:
829        tcg_out_insn(s, VRRa, VLR, dst, src, 0);
830        break;
831
832    default:
833        g_assert_not_reached();
834    }
835    return true;
836}
837
838static const S390Opcode li_insns[4] = {
839    RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
840};
841static const S390Opcode oi_insns[4] = {
842    RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
843};
844static const S390Opcode lif_insns[2] = {
845    RIL_LLILF, RIL_LLIHF,
846};
847
848/* load a register with an immediate value */
849static void tcg_out_movi(TCGContext *s, TCGType type,
850                         TCGReg ret, tcg_target_long sval)
851{
852    tcg_target_ulong uval = sval;
853    ptrdiff_t pc_off;
854    int i;
855
856    if (type == TCG_TYPE_I32) {
857        uval = (uint32_t)sval;
858        sval = (int32_t)sval;
859    }
860
861    /* Try all 32-bit insns that can load it in one go.  */
862    if (sval >= -0x8000 && sval < 0x8000) {
863        tcg_out_insn(s, RI, LGHI, ret, sval);
864        return;
865    }
866
867    i = is_const_p16(uval);
868    if (i >= 0) {
869        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
870        return;
871    }
872
873    /* Try all 48-bit insns that can load it in one go.  */
874    if (sval == (int32_t)sval) {
875        tcg_out_insn(s, RIL, LGFI, ret, sval);
876        return;
877    }
878
879    i = is_const_p32(uval);
880    if (i >= 0) {
881        tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
882        return;
883    }
884
885    /* Try for PC-relative address load.  For odd addresses, add one. */
886    pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
887    if (pc_off == (int32_t)pc_off) {
888        tcg_out_insn(s, RIL, LARL, ret, pc_off);
889        if (sval & 1) {
890            tcg_out_insn(s, RI, AGHI, ret, 1);
891        }
892        return;
893    }
894
895    /* Otherwise, load it by parts. */
896    i = is_const_p16((uint32_t)uval);
897    if (i >= 0) {
898        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
899    } else {
900        tcg_out_insn(s, RIL, LLILF, ret, uval);
901    }
902    uval >>= 32;
903    i = is_const_p16(uval);
904    if (i >= 0) {
905        tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
906    } else {
907        tcg_out_insn(s, RIL, OIHF, ret, uval);
908    }
909}
910
911/* Emit a load/store type instruction.  Inputs are:
912   DATA:     The register to be loaded or stored.
913   BASE+OFS: The effective address.
914   OPC_RX:   If the operation has an RX format opcode (e.g. STC), otherwise 0.
915   OPC_RXY:  The RXY format opcode for the operation (e.g. STCY).  */
916
917static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
918                        TCGReg data, TCGReg base, TCGReg index,
919                        tcg_target_long ofs)
920{
921    if (ofs < -0x80000 || ofs >= 0x80000) {
922        /* Combine the low 20 bits of the offset with the actual load insn;
923           the high 44 bits must come from an immediate load.  */
924        tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
925        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
926        ofs = low;
927
928        /* If we were already given an index register, add it in.  */
929        if (index != TCG_REG_NONE) {
930            tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
931        }
932        index = TCG_TMP0;
933    }
934
935    if (opc_rx && ofs >= 0 && ofs < 0x1000) {
936        tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
937    } else {
938        tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
939    }
940}
941
942static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
943                            TCGReg data, TCGReg base, TCGReg index,
944                            tcg_target_long ofs, int m3)
945{
946    if (ofs < 0 || ofs >= 0x1000) {
947        if (ofs >= -0x80000 && ofs < 0x80000) {
948            tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
949            base = TCG_TMP0;
950            index = TCG_REG_NONE;
951            ofs = 0;
952        } else {
953            tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
954            if (index != TCG_REG_NONE) {
955                tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
956            }
957            index = TCG_TMP0;
958            ofs = 0;
959        }
960    }
961    tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
962}
963
964/* load data without address translation or endianness conversion */
965static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
966                       TCGReg base, intptr_t ofs)
967{
968    switch (type) {
969    case TCG_TYPE_I32:
970        if (likely(is_general_reg(data))) {
971            tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
972            break;
973        }
974        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
975        break;
976
977    case TCG_TYPE_I64:
978        if (likely(is_general_reg(data))) {
979            tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
980            break;
981        }
982        /* fallthru */
983
984    case TCG_TYPE_V64:
985        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
986        break;
987
988    case TCG_TYPE_V128:
989        /* Hint quadword aligned.  */
990        tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
991        break;
992
993    default:
994        g_assert_not_reached();
995    }
996}
997
998static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
999                       TCGReg base, intptr_t ofs)
1000{
1001    switch (type) {
1002    case TCG_TYPE_I32:
1003        if (likely(is_general_reg(data))) {
1004            tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1005        } else {
1006            tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1007        }
1008        break;
1009
1010    case TCG_TYPE_I64:
1011        if (likely(is_general_reg(data))) {
1012            tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1013            break;
1014        }
1015        /* fallthru */
1016
1017    case TCG_TYPE_V64:
1018        tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1019        break;
1020
1021    case TCG_TYPE_V128:
1022        /* Hint quadword aligned.  */
1023        tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1024        break;
1025
1026    default:
1027        g_assert_not_reached();
1028    }
1029}
1030
1031static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1032                               TCGReg base, intptr_t ofs)
1033{
1034    return false;
1035}
1036
1037static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1038{
1039    return false;
1040}
1041
1042static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1043                             tcg_target_long imm)
1044{
1045    /* This function is only used for passing structs by reference. */
1046    tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1047}
1048
1049static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1050                                 int msb, int lsb, int ofs, int z)
1051{
1052    /* Format RIE-f */
1053    tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1054    tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1055    tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1056}
1057
1058static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1059{
1060    tcg_out_insn(s, RRE, LGBR, dest, src);
1061}
1062
1063static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1064{
1065    tcg_out_insn(s, RRE, LLGCR, dest, src);
1066}
1067
1068static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1069{
1070    tcg_out_insn(s, RRE, LGHR, dest, src);
1071}
1072
1073static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1074{
1075    tcg_out_insn(s, RRE, LLGHR, dest, src);
1076}
1077
1078static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1079{
1080    tcg_out_insn(s, RRE, LGFR, dest, src);
1081}
1082
1083static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1084{
1085    tcg_out_insn(s, RRE, LLGFR, dest, src);
1086}
1087
1088static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1089{
1090    tcg_out_ext32s(s, dest, src);
1091}
1092
1093static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1094{
1095    tcg_out_ext32u(s, dest, src);
1096}
1097
1098static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1099{
1100    tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1101}
1102
1103static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1104{
1105    int msb, lsb;
1106    if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1107        /* Achieve wraparound by swapping msb and lsb.  */
1108        msb = 64 - ctz64(~val);
1109        lsb = clz64(~val) - 1;
1110    } else {
1111        msb = clz64(val);
1112        lsb = 63 - ctz64(val);
1113    }
1114    tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1115}
1116
1117static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1118{
1119    static const S390Opcode ni_insns[4] = {
1120        RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1121    };
1122    static const S390Opcode nif_insns[2] = {
1123        RIL_NILF, RIL_NIHF
1124    };
1125    uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1126    int i;
1127
1128    /* Look for the zero-extensions.  */
1129    if ((val & valid) == 0xffffffff) {
1130        tcg_out_ext32u(s, dest, dest);
1131        return;
1132    }
1133    if ((val & valid) == 0xff) {
1134        tcg_out_ext8u(s, dest, dest);
1135        return;
1136    }
1137    if ((val & valid) == 0xffff) {
1138        tcg_out_ext16u(s, dest, dest);
1139        return;
1140    }
1141
1142    i = is_const_p16(~val & valid);
1143    if (i >= 0) {
1144        tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1145        return;
1146    }
1147
1148    i = is_const_p32(~val & valid);
1149    tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1150    if (i >= 0) {
1151        tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1152        return;
1153    }
1154
1155    if (risbg_mask(val)) {
1156        tgen_andi_risbg(s, dest, dest, val);
1157        return;
1158    }
1159
1160    g_assert_not_reached();
1161}
1162
1163static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1164{
1165    static const S390Opcode oif_insns[2] = {
1166        RIL_OILF, RIL_OIHF
1167    };
1168
1169    int i;
1170
1171    i = is_const_p16(val);
1172    if (i >= 0) {
1173        tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1174        return;
1175    }
1176
1177    i = is_const_p32(val);
1178    if (i >= 0) {
1179        tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1180        return;
1181    }
1182
1183    g_assert_not_reached();
1184}
1185
1186static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1187{
1188    switch (is_const_p32(val)) {
1189    case 0:
1190        tcg_out_insn(s, RIL, XILF, dest, val);
1191        break;
1192    case 1:
1193        tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1194        break;
1195    default:
1196        g_assert_not_reached();
1197    }
1198}
1199
1200static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1201                     TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1202{
1203    bool is_unsigned = is_unsigned_cond(c);
1204    TCGCond inv_c = tcg_invert_cond(c);
1205    S390Opcode op;
1206
1207    if (c2const) {
1208        if (c2 == 0) {
1209            if (!(is_unsigned && need_carry)) {
1210                if (type == TCG_TYPE_I32) {
1211                    tcg_out_insn(s, RR, LTR, r1, r1);
1212                } else {
1213                    tcg_out_insn(s, RRE, LTGR, r1, r1);
1214                }
1215                *inv_cc = tcg_cond_to_ltr_cond[inv_c];
1216                return tcg_cond_to_ltr_cond[c];
1217            }
1218        }
1219
1220        if (!is_unsigned && c2 == (int16_t)c2) {
1221            op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1222            tcg_out_insn_RI(s, op, r1, c2);
1223            goto exit;
1224        }
1225
1226        if (type == TCG_TYPE_I32) {
1227            op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1228            tcg_out_insn_RIL(s, op, r1, c2);
1229            goto exit;
1230        }
1231
1232        /*
1233         * Constraints are for a signed 33-bit operand, which is a
1234         * convenient superset of this signed/unsigned test.
1235         */
1236        if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
1237            op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1238            tcg_out_insn_RIL(s, op, r1, c2);
1239            goto exit;
1240        }
1241
1242        /* Load everything else into a register. */
1243        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
1244        c2 = TCG_TMP0;
1245    }
1246
1247    if (type == TCG_TYPE_I32) {
1248        op = (is_unsigned ? RR_CLR : RR_CR);
1249        tcg_out_insn_RR(s, op, r1, c2);
1250    } else {
1251        op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1252        tcg_out_insn_RRE(s, op, r1, c2);
1253    }
1254
1255 exit:
1256    *inv_cc = tcg_cond_to_s390_cond[inv_c];
1257    return tcg_cond_to_s390_cond[c];
1258}
1259
1260static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1261                    TCGArg c2, bool c2const, bool need_carry)
1262{
1263    int inv_cc;
1264    return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1265}
1266
1267static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1268                         TCGReg dest, TCGReg c1, TCGArg c2,
1269                         bool c2const, bool neg)
1270{
1271    int cc;
1272
1273    /* With LOC2, we can always emit the minimum 3 insns.  */
1274    if (HAVE_FACILITY(LOAD_ON_COND2)) {
1275        /* Emit: d = 0, d = (cc ? 1 : d).  */
1276        cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1277        tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1278        tcg_out_insn(s, RIEg, LOCGHI, dest, neg ? -1 : 1, cc);
1279        return;
1280    }
1281
1282    switch (cond) {
1283    case TCG_COND_GEU:
1284    case TCG_COND_LTU:
1285    case TCG_COND_LT:
1286    case TCG_COND_GE:
1287        /* Swap operands so that we can use LEU/GTU/GT/LE.  */
1288        if (!c2const) {
1289            TCGReg t = c1;
1290            c1 = c2;
1291            c2 = t;
1292            cond = tcg_swap_cond(cond);
1293        }
1294        break;
1295    default:
1296        break;
1297    }
1298
1299    switch (cond) {
1300    case TCG_COND_NE:
1301        /* X != 0 is X > 0.  */
1302        if (c2const && c2 == 0) {
1303            cond = TCG_COND_GTU;
1304        } else {
1305            break;
1306        }
1307        /* fallthru */
1308
1309    case TCG_COND_GTU:
1310    case TCG_COND_GT:
1311        /*
1312         * The result of a compare has CC=2 for GT and CC=3 unused.
1313         * ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.
1314         */
1315        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1316        tcg_out_movi(s, type, dest, 0);
1317        tcg_out_insn(s, RRE, ALCGR, dest, dest);
1318        if (neg) {
1319            if (type == TCG_TYPE_I32) {
1320                tcg_out_insn(s, RR, LCR, dest, dest);
1321            } else {
1322                tcg_out_insn(s, RRE, LCGR, dest, dest);
1323            }
1324        }
1325        return;
1326
1327    case TCG_COND_EQ:
1328        /* X == 0 is X <= 0.  */
1329        if (c2const && c2 == 0) {
1330            cond = TCG_COND_LEU;
1331        } else {
1332            break;
1333        }
1334        /* fallthru */
1335
1336    case TCG_COND_LEU:
1337    case TCG_COND_LE:
1338        /*
1339         * As above, but we're looking for borrow, or !carry.
1340         * The second insn computes d - d - borrow, or -1 for true
1341         * and 0 for false.  So we must mask to 1 bit afterward.
1342         */
1343        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1344        tcg_out_insn(s, RRE, SLBGR, dest, dest);
1345        if (!neg) {
1346            tgen_andi(s, type, dest, 1);
1347        }
1348        return;
1349
1350    default:
1351        g_assert_not_reached();
1352    }
1353
1354    cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1355    /* Emit: d = 0, t = 1, d = (cc ? t : d).  */
1356    tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1357    tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, neg ? -1 : 1);
1358    tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1359}
1360
1361static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1362                             TCGArg v3, int v3const, TCGReg v4,
1363                             int cc, int inv_cc)
1364{
1365    TCGReg src;
1366
1367    if (v3const) {
1368        if (dest == v4) {
1369            if (HAVE_FACILITY(LOAD_ON_COND2)) {
1370                /* Emit: if (cc) dest = v3. */
1371                tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1372                return;
1373            }
1374            tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1375            src = TCG_TMP0;
1376        } else {
1377            /* LGR+LOCGHI is larger than LGHI+LOCGR. */
1378            tcg_out_insn(s, RI, LGHI, dest, v3);
1379            cc = inv_cc;
1380            src = v4;
1381        }
1382    } else {
1383        if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1384            /* Emit: dest = cc ? v3 : v4. */
1385            tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1386            return;
1387        }
1388        if (dest == v4) {
1389            src = v3;
1390        } else {
1391            tcg_out_mov(s, type, dest, v3);
1392            cc = inv_cc;
1393            src = v4;
1394        }
1395    }
1396
1397    /* Emit: if (cc) dest = src. */
1398    tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1399}
1400
1401static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1402                         TCGReg c1, TCGArg c2, int c2const,
1403                         TCGArg v3, int v3const, TCGReg v4)
1404{
1405    int cc, inv_cc;
1406
1407    cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1408    tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1409}
1410
1411static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1412                     TCGArg a2, int a2const)
1413{
1414    /* Since this sets both R and R+1, we have no choice but to store the
1415       result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.  */
1416    QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1417    tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1418
1419    if (a2const && a2 == 64) {
1420        tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1421        return;
1422    }
1423
1424    /*
1425     * Conditions from FLOGR are:
1426     *   2 -> one bit found
1427     *   8 -> no one bit found
1428     */
1429    tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1430}
1431
1432static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1433{
1434    /* With MIE3, and bit 0 of m4 set, we get the complete result. */
1435    if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1436        if (type == TCG_TYPE_I32) {
1437            tcg_out_ext32u(s, dest, src);
1438            src = dest;
1439        }
1440        tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1441        return;
1442    }
1443
1444    /* Without MIE3, each byte gets the count of bits for the byte. */
1445    tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1446
1447    /* Multiply to sum each byte at the top of the word. */
1448    if (type == TCG_TYPE_I32) {
1449        tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1450        tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1451    } else {
1452        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1453        tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1454        tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1455    }
1456}
1457
1458static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1459                         int ofs, int len, int z)
1460{
1461    int lsb = (63 - ofs);
1462    int msb = lsb - (len - 1);
1463    tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1464}
1465
1466static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1467                         int ofs, int len)
1468{
1469    tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1470}
1471
1472static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1473{
1474    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1475    if (off == (int16_t)off) {
1476        tcg_out_insn(s, RI, BRC, cc, off);
1477    } else if (off == (int32_t)off) {
1478        tcg_out_insn(s, RIL, BRCL, cc, off);
1479    } else {
1480        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1481        tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1482    }
1483}
1484
1485static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1486{
1487    if (l->has_value) {
1488        tgen_gotoi(s, cc, l->u.value_ptr);
1489    } else {
1490        tcg_out16(s, RI_BRC | (cc << 4));
1491        tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1492        s->code_ptr += 1;
1493    }
1494}
1495
1496static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1497                                TCGReg r1, TCGReg r2, TCGLabel *l)
1498{
1499    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1500    /* Format RIE-b */
1501    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1502    tcg_out16(s, 0);
1503    tcg_out16(s, cc << 12 | (opc & 0xff));
1504}
1505
1506static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1507                                    TCGReg r1, int i2, TCGLabel *l)
1508{
1509    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1510    /* Format RIE-c */
1511    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1512    tcg_out16(s, 0);
1513    tcg_out16(s, (i2 << 8) | (opc & 0xff));
1514}
1515
1516static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1517                        TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1518{
1519    int cc;
1520    bool is_unsigned = is_unsigned_cond(c);
1521    bool in_range;
1522    S390Opcode opc;
1523
1524    cc = tcg_cond_to_s390_cond[c];
1525
1526    if (!c2const) {
1527        opc = (type == TCG_TYPE_I32
1528               ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1529               : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1530        tgen_compare_branch(s, opc, cc, r1, c2, l);
1531        return;
1532    }
1533
1534    /*
1535     * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1536     * If the immediate we've been given does not fit that range, we'll
1537     * fall back to separate compare and branch instructions using the
1538     * larger comparison range afforded by COMPARE IMMEDIATE.
1539     */
1540    if (type == TCG_TYPE_I32) {
1541        if (is_unsigned) {
1542            opc = RIEc_CLIJ;
1543            in_range = (uint32_t)c2 == (uint8_t)c2;
1544        } else {
1545            opc = RIEc_CIJ;
1546            in_range = (int32_t)c2 == (int8_t)c2;
1547        }
1548    } else {
1549        if (is_unsigned) {
1550            opc = RIEc_CLGIJ;
1551            in_range = (uint64_t)c2 == (uint8_t)c2;
1552        } else {
1553            opc = RIEc_CGIJ;
1554            in_range = (int64_t)c2 == (int8_t)c2;
1555        }
1556    }
1557    if (in_range) {
1558        tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1559        return;
1560    }
1561
1562    cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1563    tgen_branch(s, cc, l);
1564}
1565
1566static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1567{
1568    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1569    if (off == (int32_t)off) {
1570        tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1571    } else {
1572        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1573        tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1574    }
1575}
1576
1577static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1578                         const TCGHelperInfo *info)
1579{
1580    tcg_out_call_int(s, dest);
1581}
1582
1583typedef struct {
1584    TCGReg base;
1585    TCGReg index;
1586    int disp;
1587    TCGAtomAlign aa;
1588} HostAddress;
1589
1590bool tcg_target_has_memory_bswap(MemOp memop)
1591{
1592    TCGAtomAlign aa;
1593
1594    if ((memop & MO_SIZE) <= MO_64) {
1595        return true;
1596    }
1597
1598    /*
1599     * Reject 16-byte memop with 16-byte atomicity,
1600     * but do allow a pair of 64-bit operations.
1601     */
1602    aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
1603    return aa.atom <= MO_64;
1604}
1605
1606static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1607                                   HostAddress h)
1608{
1609    switch (opc & (MO_SSIZE | MO_BSWAP)) {
1610    case MO_UB:
1611        tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1612        break;
1613    case MO_SB:
1614        tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1615        break;
1616
1617    case MO_UW | MO_BSWAP:
1618        /* swapped unsigned halfword load with upper bits zeroed */
1619        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1620        tcg_out_ext16u(s, data, data);
1621        break;
1622    case MO_UW:
1623        tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1624        break;
1625
1626    case MO_SW | MO_BSWAP:
1627        /* swapped sign-extended halfword load */
1628        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1629        tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1630        break;
1631    case MO_SW:
1632        tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1633        break;
1634
1635    case MO_UL | MO_BSWAP:
1636        /* swapped unsigned int load with upper bits zeroed */
1637        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1638        tcg_out_ext32u(s, data, data);
1639        break;
1640    case MO_UL:
1641        tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1642        break;
1643
1644    case MO_SL | MO_BSWAP:
1645        /* swapped sign-extended int load */
1646        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1647        tcg_out_ext32s(s, data, data);
1648        break;
1649    case MO_SL:
1650        tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1651        break;
1652
1653    case MO_UQ | MO_BSWAP:
1654        tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1655        break;
1656    case MO_UQ:
1657        tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1658        break;
1659
1660    default:
1661        g_assert_not_reached();
1662    }
1663}
1664
1665static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1666                                   HostAddress h)
1667{
1668    switch (opc & (MO_SIZE | MO_BSWAP)) {
1669    case MO_UB:
1670        if (h.disp >= 0 && h.disp < 0x1000) {
1671            tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1672        } else {
1673            tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1674        }
1675        break;
1676
1677    case MO_UW | MO_BSWAP:
1678        tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1679        break;
1680    case MO_UW:
1681        if (h.disp >= 0 && h.disp < 0x1000) {
1682            tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1683        } else {
1684            tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1685        }
1686        break;
1687
1688    case MO_UL | MO_BSWAP:
1689        tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1690        break;
1691    case MO_UL:
1692        if (h.disp >= 0 && h.disp < 0x1000) {
1693            tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1694        } else {
1695            tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1696        }
1697        break;
1698
1699    case MO_UQ | MO_BSWAP:
1700        tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1701        break;
1702    case MO_UQ:
1703        tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1704        break;
1705
1706    default:
1707        g_assert_not_reached();
1708    }
1709}
1710
1711static const TCGLdstHelperParam ldst_helper_param = {
1712    .ntmp = 1, .tmp = { TCG_TMP0 }
1713};
1714
1715static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1716{
1717    MemOp opc = get_memop(lb->oi);
1718
1719    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1720                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1721        return false;
1722    }
1723
1724    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1725    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1726    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1727
1728    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1729    return true;
1730}
1731
1732static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1733{
1734    MemOp opc = get_memop(lb->oi);
1735
1736    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1737                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1738        return false;
1739    }
1740
1741    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1742    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1743
1744    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1745    return true;
1746}
1747
1748/* We're expecting to use a 20-bit negative offset on the tlb memory ops.  */
1749#define MIN_TLB_MASK_TABLE_OFS  -(1 << 19)
1750
1751/*
1752 * For system-mode, perform the TLB load and compare.
1753 * For user-mode, perform any required alignment tests.
1754 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1755 * is required and fill in @h with the host address for the fast path.
1756 */
1757static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1758                                           TCGReg addr_reg, MemOpIdx oi,
1759                                           bool is_ld)
1760{
1761    TCGType addr_type = s->addr_type;
1762    TCGLabelQemuLdst *ldst = NULL;
1763    MemOp opc = get_memop(oi);
1764    MemOp s_bits = opc & MO_SIZE;
1765    unsigned a_mask;
1766
1767    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
1768    a_mask = (1 << h->aa.align) - 1;
1769
1770    if (tcg_use_softmmu) {
1771        unsigned s_mask = (1 << s_bits) - 1;
1772        int mem_index = get_mmuidx(oi);
1773        int fast_off = tlb_mask_table_ofs(s, mem_index);
1774        int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1775        int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1776        int ofs, a_off;
1777        uint64_t tlb_mask;
1778
1779        ldst = new_ldst_label(s);
1780        ldst->is_ld = is_ld;
1781        ldst->oi = oi;
1782        ldst->addrlo_reg = addr_reg;
1783
1784        tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1785                     s->page_bits - CPU_TLB_ENTRY_BITS);
1786
1787        tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1788        tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1789
1790        /*
1791         * For aligned accesses, we check the first byte and include the
1792         * alignment bits within the address.  For unaligned access, we
1793         * check that we don't cross pages using the address of the last
1794         * byte of the access.
1795         */
1796        a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
1797        tlb_mask = (uint64_t)s->page_mask | a_mask;
1798        if (a_off == 0) {
1799            tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1800        } else {
1801            tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1802            tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
1803        }
1804
1805        if (is_ld) {
1806            ofs = offsetof(CPUTLBEntry, addr_read);
1807        } else {
1808            ofs = offsetof(CPUTLBEntry, addr_write);
1809        }
1810        if (addr_type == TCG_TYPE_I32) {
1811            ofs += HOST_BIG_ENDIAN * 4;
1812            tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1813        } else {
1814            tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1815        }
1816
1817        tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1818        ldst->label_ptr[0] = s->code_ptr++;
1819
1820        h->index = TCG_TMP0;
1821        tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1822                     offsetof(CPUTLBEntry, addend));
1823
1824        if (addr_type == TCG_TYPE_I32) {
1825            tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1826            h->base = TCG_REG_NONE;
1827        } else {
1828            h->base = addr_reg;
1829        }
1830        h->disp = 0;
1831    } else {
1832        if (a_mask) {
1833            ldst = new_ldst_label(s);
1834            ldst->is_ld = is_ld;
1835            ldst->oi = oi;
1836            ldst->addrlo_reg = addr_reg;
1837
1838            /* We are expecting a_bits to max out at 7, much lower than TMLL. */
1839            tcg_debug_assert(a_mask <= 0xffff);
1840            tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1841
1842            tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
1843            ldst->label_ptr[0] = s->code_ptr++;
1844        }
1845
1846        h->base = addr_reg;
1847        if (addr_type == TCG_TYPE_I32) {
1848            tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1849            h->base = TCG_TMP0;
1850        }
1851        if (guest_base < 0x80000) {
1852            h->index = TCG_REG_NONE;
1853            h->disp = guest_base;
1854        } else {
1855            h->index = TCG_GUEST_BASE_REG;
1856            h->disp = 0;
1857        }
1858    }
1859
1860    return ldst;
1861}
1862
1863static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1864                            MemOpIdx oi, TCGType data_type)
1865{
1866    TCGLabelQemuLdst *ldst;
1867    HostAddress h;
1868
1869    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1870    tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1871
1872    if (ldst) {
1873        ldst->type = data_type;
1874        ldst->datalo_reg = data_reg;
1875        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1876    }
1877}
1878
1879static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1880                            MemOpIdx oi, TCGType data_type)
1881{
1882    TCGLabelQemuLdst *ldst;
1883    HostAddress h;
1884
1885    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1886    tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1887
1888    if (ldst) {
1889        ldst->type = data_type;
1890        ldst->datalo_reg = data_reg;
1891        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1892    }
1893}
1894
1895static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1896                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1897{
1898    TCGLabel *l1 = NULL, *l2 = NULL;
1899    TCGLabelQemuLdst *ldst;
1900    HostAddress h;
1901    bool need_bswap;
1902    bool use_pair;
1903    S390Opcode insn;
1904
1905    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1906
1907    use_pair = h.aa.atom < MO_128;
1908    need_bswap = get_memop(oi) & MO_BSWAP;
1909
1910    if (!use_pair) {
1911        /*
1912         * Atomicity requires we use LPQ.  If we've already checked for
1913         * 16-byte alignment, that's all we need.  If we arrive with
1914         * lesser alignment, we have determined that less than 16-byte
1915         * alignment can be satisfied with two 8-byte loads.
1916         */
1917        if (h.aa.align < MO_128) {
1918            use_pair = true;
1919            l1 = gen_new_label();
1920            l2 = gen_new_label();
1921
1922            tcg_out_insn(s, RI, TMLL, addr_reg, 15);
1923            tgen_branch(s, 7, l1); /* CC in {1,2,3} */
1924        }
1925
1926        tcg_debug_assert(!need_bswap);
1927        tcg_debug_assert(datalo & 1);
1928        tcg_debug_assert(datahi == datalo - 1);
1929        insn = is_ld ? RXY_LPQ : RXY_STPQ;
1930        tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp);
1931
1932        if (use_pair) {
1933            tgen_branch(s, S390_CC_ALWAYS, l2);
1934            tcg_out_label(s, l1);
1935        }
1936    }
1937    if (use_pair) {
1938        TCGReg d1, d2;
1939
1940        if (need_bswap) {
1941            d1 = datalo, d2 = datahi;
1942            insn = is_ld ? RXY_LRVG : RXY_STRVG;
1943        } else {
1944            d1 = datahi, d2 = datalo;
1945            insn = is_ld ? RXY_LG : RXY_STG;
1946        }
1947
1948        if (h.base == d1 || h.index == d1) {
1949            tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp);
1950            h.base = TCG_TMP0;
1951            h.index = TCG_REG_NONE;
1952            h.disp = 0;
1953        }
1954        tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp);
1955        tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8);
1956    }
1957    if (l2) {
1958        tcg_out_label(s, l2);
1959    }
1960
1961    if (ldst) {
1962        ldst->type = TCG_TYPE_I128;
1963        ldst->datalo_reg = datalo;
1964        ldst->datahi_reg = datahi;
1965        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1966    }
1967}
1968
1969static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1970{
1971    /* Reuse the zeroing that exists for goto_ptr.  */
1972    if (a0 == 0) {
1973        tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
1974    } else {
1975        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1976        tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1977    }
1978}
1979
1980static void tcg_out_goto_tb(TCGContext *s, int which)
1981{
1982    /*
1983     * Branch displacement must be aligned for atomic patching;
1984     * see if we need to add extra nop before branch
1985     */
1986    if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1987        tcg_out16(s, NOP);
1988    }
1989    tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1990    set_jmp_insn_offset(s, which);
1991    s->code_ptr += 2;
1992    set_jmp_reset_offset(s, which);
1993}
1994
1995void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1996                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1997{
1998    if (!HAVE_FACILITY(GEN_INST_EXT)) {
1999        return;
2000    }
2001    /* patch the branch destination */
2002    uintptr_t addr = tb->jmp_target_addr[n];
2003    intptr_t disp = addr - (jmp_rx - 2);
2004    qatomic_set((int32_t *)jmp_rw, disp / 2);
2005    /* no need to flush icache explicitly */
2006}
2007
2008# define OP_32_64(x) \
2009        case glue(glue(INDEX_op_,x),_i32): \
2010        case glue(glue(INDEX_op_,x),_i64)
2011
2012static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2013                              const TCGArg args[TCG_MAX_OP_ARGS],
2014                              const int const_args[TCG_MAX_OP_ARGS])
2015{
2016    S390Opcode op, op2;
2017    TCGArg a0, a1, a2;
2018
2019    switch (opc) {
2020    case INDEX_op_goto_ptr:
2021        a0 = args[0];
2022        tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
2023        break;
2024
2025    OP_32_64(ld8u):
2026        /* ??? LLC (RXY format) is only present with the extended-immediate
2027           facility, whereas LLGC is always present.  */
2028        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
2029        break;
2030
2031    OP_32_64(ld8s):
2032        /* ??? LB is no smaller than LGB, so no point to using it.  */
2033        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
2034        break;
2035
2036    OP_32_64(ld16u):
2037        /* ??? LLH (RXY format) is only present with the extended-immediate
2038           facility, whereas LLGH is always present.  */
2039        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
2040        break;
2041
2042    case INDEX_op_ld16s_i32:
2043        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
2044        break;
2045
2046    case INDEX_op_ld_i32:
2047        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2048        break;
2049
2050    OP_32_64(st8):
2051        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
2052                    TCG_REG_NONE, args[2]);
2053        break;
2054
2055    OP_32_64(st16):
2056        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2057                    TCG_REG_NONE, args[2]);
2058        break;
2059
2060    case INDEX_op_st_i32:
2061        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2062        break;
2063
2064    case INDEX_op_add_i32:
2065        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2066        if (const_args[2]) {
2067        do_addi_32:
2068            if (a0 == a1) {
2069                if (a2 == (int16_t)a2) {
2070                    tcg_out_insn(s, RI, AHI, a0, a2);
2071                    break;
2072                }
2073                tcg_out_insn(s, RIL, AFI, a0, a2);
2074                break;
2075            }
2076            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2077        } else if (a0 == a1) {
2078            tcg_out_insn(s, RR, AR, a0, a2);
2079        } else {
2080            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2081        }
2082        break;
2083    case INDEX_op_sub_i32:
2084        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2085        if (const_args[2]) {
2086            a2 = -a2;
2087            goto do_addi_32;
2088        } else if (a0 == a1) {
2089            tcg_out_insn(s, RR, SR, a0, a2);
2090        } else {
2091            tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2092        }
2093        break;
2094
2095    case INDEX_op_and_i32:
2096        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2097        if (const_args[2]) {
2098            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2099            tgen_andi(s, TCG_TYPE_I32, a0, a2);
2100        } else if (a0 == a1) {
2101            tcg_out_insn(s, RR, NR, a0, a2);
2102        } else {
2103            tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2104        }
2105        break;
2106    case INDEX_op_or_i32:
2107        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2108        if (const_args[2]) {
2109            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2110            tgen_ori(s, a0, a2);
2111        } else if (a0 == a1) {
2112            tcg_out_insn(s, RR, OR, a0, a2);
2113        } else {
2114            tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2115        }
2116        break;
2117    case INDEX_op_xor_i32:
2118        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2119        if (const_args[2]) {
2120            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2121            tcg_out_insn(s, RIL, XILF, a0, a2);
2122        } else if (a0 == a1) {
2123            tcg_out_insn(s, RR, XR, args[0], args[2]);
2124        } else {
2125            tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2126        }
2127        break;
2128
2129    case INDEX_op_andc_i32:
2130        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2131        if (const_args[2]) {
2132            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2133            tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2134	} else {
2135            tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2136	}
2137        break;
2138    case INDEX_op_orc_i32:
2139        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2140        if (const_args[2]) {
2141            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2142            tgen_ori(s, a0, (uint32_t)~a2);
2143        } else {
2144            tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2145        }
2146        break;
2147    case INDEX_op_eqv_i32:
2148        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2149        if (const_args[2]) {
2150            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2151            tcg_out_insn(s, RIL, XILF, a0, ~a2);
2152        } else {
2153            tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2154        }
2155        break;
2156    case INDEX_op_nand_i32:
2157        tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2158        break;
2159    case INDEX_op_nor_i32:
2160        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2161        break;
2162
2163    case INDEX_op_neg_i32:
2164        tcg_out_insn(s, RR, LCR, args[0], args[1]);
2165        break;
2166    case INDEX_op_not_i32:
2167        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2168        break;
2169
2170    case INDEX_op_mul_i32:
2171        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2172        if (const_args[2]) {
2173            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2174            if (a2 == (int16_t)a2) {
2175                tcg_out_insn(s, RI, MHI, a0, a2);
2176            } else {
2177                tcg_out_insn(s, RIL, MSFI, a0, a2);
2178            }
2179        } else if (a0 == a1) {
2180            tcg_out_insn(s, RRE, MSR, a0, a2);
2181        } else {
2182            tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2183        }
2184        break;
2185
2186    case INDEX_op_div2_i32:
2187        tcg_debug_assert(args[0] == args[2]);
2188        tcg_debug_assert(args[1] == args[3]);
2189        tcg_debug_assert((args[1] & 1) == 0);
2190        tcg_debug_assert(args[0] == args[1] + 1);
2191        tcg_out_insn(s, RR, DR, args[1], args[4]);
2192        break;
2193    case INDEX_op_divu2_i32:
2194        tcg_debug_assert(args[0] == args[2]);
2195        tcg_debug_assert(args[1] == args[3]);
2196        tcg_debug_assert((args[1] & 1) == 0);
2197        tcg_debug_assert(args[0] == args[1] + 1);
2198        tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2199        break;
2200
2201    case INDEX_op_shl_i32:
2202        op = RS_SLL;
2203        op2 = RSY_SLLK;
2204    do_shift32:
2205        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2206        if (a0 == a1) {
2207            if (const_args[2]) {
2208                tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2209            } else {
2210                tcg_out_sh32(s, op, a0, a2, 0);
2211            }
2212        } else {
2213            /* Using tcg_out_sh64 here for the format; it is a 32-bit shift.  */
2214            if (const_args[2]) {
2215                tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2216            } else {
2217                tcg_out_sh64(s, op2, a0, a1, a2, 0);
2218            }
2219        }
2220        break;
2221    case INDEX_op_shr_i32:
2222        op = RS_SRL;
2223        op2 = RSY_SRLK;
2224        goto do_shift32;
2225    case INDEX_op_sar_i32:
2226        op = RS_SRA;
2227        op2 = RSY_SRAK;
2228        goto do_shift32;
2229
2230    case INDEX_op_rotl_i32:
2231        /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
2232        if (const_args[2]) {
2233            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2234        } else {
2235            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2236        }
2237        break;
2238    case INDEX_op_rotr_i32:
2239        if (const_args[2]) {
2240            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2241                         TCG_REG_NONE, (32 - args[2]) & 31);
2242        } else {
2243            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2244            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2245        }
2246        break;
2247
2248    case INDEX_op_bswap16_i32:
2249        a0 = args[0], a1 = args[1], a2 = args[2];
2250        tcg_out_insn(s, RRE, LRVR, a0, a1);
2251        if (a2 & TCG_BSWAP_OS) {
2252            tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2253        } else {
2254            tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2255        }
2256        break;
2257    case INDEX_op_bswap16_i64:
2258        a0 = args[0], a1 = args[1], a2 = args[2];
2259        tcg_out_insn(s, RRE, LRVGR, a0, a1);
2260        if (a2 & TCG_BSWAP_OS) {
2261            tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2262        } else {
2263            tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2264        }
2265        break;
2266
2267    case INDEX_op_bswap32_i32:
2268        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2269        break;
2270    case INDEX_op_bswap32_i64:
2271        a0 = args[0], a1 = args[1], a2 = args[2];
2272        tcg_out_insn(s, RRE, LRVR, a0, a1);
2273        if (a2 & TCG_BSWAP_OS) {
2274            tcg_out_ext32s(s, a0, a0);
2275        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2276            tcg_out_ext32u(s, a0, a0);
2277        }
2278        break;
2279
2280    case INDEX_op_add2_i32:
2281        if (const_args[4]) {
2282            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2283        } else {
2284            tcg_out_insn(s, RR, ALR, args[0], args[4]);
2285        }
2286        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2287        break;
2288    case INDEX_op_sub2_i32:
2289        if (const_args[4]) {
2290            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2291        } else {
2292            tcg_out_insn(s, RR, SLR, args[0], args[4]);
2293        }
2294        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2295        break;
2296
2297    case INDEX_op_br:
2298        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2299        break;
2300
2301    case INDEX_op_brcond_i32:
2302        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2303                    args[1], const_args[1], arg_label(args[3]));
2304        break;
2305    case INDEX_op_setcond_i32:
2306        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2307                     args[2], const_args[2], false);
2308        break;
2309    case INDEX_op_negsetcond_i32:
2310        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2311                     args[2], const_args[2], true);
2312        break;
2313    case INDEX_op_movcond_i32:
2314        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2315                     args[2], const_args[2], args[3], const_args[3], args[4]);
2316        break;
2317
2318    case INDEX_op_qemu_ld_a32_i32:
2319    case INDEX_op_qemu_ld_a64_i32:
2320        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2321        break;
2322    case INDEX_op_qemu_ld_a32_i64:
2323    case INDEX_op_qemu_ld_a64_i64:
2324        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2325        break;
2326    case INDEX_op_qemu_st_a32_i32:
2327    case INDEX_op_qemu_st_a64_i32:
2328        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2329        break;
2330    case INDEX_op_qemu_st_a32_i64:
2331    case INDEX_op_qemu_st_a64_i64:
2332        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2333        break;
2334    case INDEX_op_qemu_ld_a32_i128:
2335    case INDEX_op_qemu_ld_a64_i128:
2336        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2337        break;
2338    case INDEX_op_qemu_st_a32_i128:
2339    case INDEX_op_qemu_st_a64_i128:
2340        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2341        break;
2342
2343    case INDEX_op_ld16s_i64:
2344        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2345        break;
2346    case INDEX_op_ld32u_i64:
2347        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2348        break;
2349    case INDEX_op_ld32s_i64:
2350        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2351        break;
2352    case INDEX_op_ld_i64:
2353        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2354        break;
2355
2356    case INDEX_op_st32_i64:
2357        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2358        break;
2359    case INDEX_op_st_i64:
2360        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2361        break;
2362
2363    case INDEX_op_add_i64:
2364        a0 = args[0], a1 = args[1], a2 = args[2];
2365        if (const_args[2]) {
2366        do_addi_64:
2367            if (a0 == a1) {
2368                if (a2 == (int16_t)a2) {
2369                    tcg_out_insn(s, RI, AGHI, a0, a2);
2370                    break;
2371                }
2372                if (a2 == (int32_t)a2) {
2373                    tcg_out_insn(s, RIL, AGFI, a0, a2);
2374                    break;
2375                }
2376                if (a2 == (uint32_t)a2) {
2377                    tcg_out_insn(s, RIL, ALGFI, a0, a2);
2378                    break;
2379                }
2380                if (-a2 == (uint32_t)-a2) {
2381                    tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2382                    break;
2383                }
2384            }
2385            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2386        } else if (a0 == a1) {
2387            tcg_out_insn(s, RRE, AGR, a0, a2);
2388        } else {
2389            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2390        }
2391        break;
2392    case INDEX_op_sub_i64:
2393        a0 = args[0], a1 = args[1], a2 = args[2];
2394        if (const_args[2]) {
2395            a2 = -a2;
2396            goto do_addi_64;
2397        } else {
2398            tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2399        }
2400        break;
2401
2402    case INDEX_op_and_i64:
2403        a0 = args[0], a1 = args[1], a2 = args[2];
2404        if (const_args[2]) {
2405            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2406            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2407        } else {
2408            tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2409        }
2410        break;
2411    case INDEX_op_or_i64:
2412        a0 = args[0], a1 = args[1], a2 = args[2];
2413        if (const_args[2]) {
2414            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2415            tgen_ori(s, a0, a2);
2416        } else {
2417            tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2418        }
2419        break;
2420    case INDEX_op_xor_i64:
2421        a0 = args[0], a1 = args[1], a2 = args[2];
2422        if (const_args[2]) {
2423            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2424            tgen_xori(s, a0, a2);
2425        } else {
2426            tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2427        }
2428        break;
2429
2430    case INDEX_op_andc_i64:
2431        a0 = args[0], a1 = args[1], a2 = args[2];
2432        if (const_args[2]) {
2433            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2434            tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2435        } else {
2436            tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2437        }
2438        break;
2439    case INDEX_op_orc_i64:
2440        a0 = args[0], a1 = args[1], a2 = args[2];
2441        if (const_args[2]) {
2442            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2443            tgen_ori(s, a0, ~a2);
2444        } else {
2445            tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2446        }
2447        break;
2448    case INDEX_op_eqv_i64:
2449        a0 = args[0], a1 = args[1], a2 = args[2];
2450        if (const_args[2]) {
2451            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2452            tgen_xori(s, a0, ~a2);
2453        } else {
2454            tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2455        }
2456        break;
2457    case INDEX_op_nand_i64:
2458        tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2459        break;
2460    case INDEX_op_nor_i64:
2461        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2462        break;
2463
2464    case INDEX_op_neg_i64:
2465        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2466        break;
2467    case INDEX_op_not_i64:
2468        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2469        break;
2470    case INDEX_op_bswap64_i64:
2471        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2472        break;
2473
2474    case INDEX_op_mul_i64:
2475        a0 = args[0], a1 = args[1], a2 = args[2];
2476        if (const_args[2]) {
2477            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2478            if (a2 == (int16_t)a2) {
2479                tcg_out_insn(s, RI, MGHI, a0, a2);
2480            } else {
2481                tcg_out_insn(s, RIL, MSGFI, a0, a2);
2482            }
2483        } else if (a0 == a1) {
2484            tcg_out_insn(s, RRE, MSGR, a0, a2);
2485        } else {
2486            tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2487        }
2488        break;
2489
2490    case INDEX_op_div2_i64:
2491        /*
2492         * ??? We get an unnecessary sign-extension of the dividend
2493         * into op0 with this definition, but as we do in fact always
2494         * produce both quotient and remainder using INDEX_op_div_i64
2495         * instead requires jumping through even more hoops.
2496         */
2497        tcg_debug_assert(args[0] == args[2]);
2498        tcg_debug_assert(args[1] == args[3]);
2499        tcg_debug_assert((args[1] & 1) == 0);
2500        tcg_debug_assert(args[0] == args[1] + 1);
2501        tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2502        break;
2503    case INDEX_op_divu2_i64:
2504        tcg_debug_assert(args[0] == args[2]);
2505        tcg_debug_assert(args[1] == args[3]);
2506        tcg_debug_assert((args[1] & 1) == 0);
2507        tcg_debug_assert(args[0] == args[1] + 1);
2508        tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2509        break;
2510    case INDEX_op_mulu2_i64:
2511        tcg_debug_assert(args[0] == args[2]);
2512        tcg_debug_assert((args[1] & 1) == 0);
2513        tcg_debug_assert(args[0] == args[1] + 1);
2514        tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2515        break;
2516    case INDEX_op_muls2_i64:
2517        tcg_debug_assert((args[1] & 1) == 0);
2518        tcg_debug_assert(args[0] == args[1] + 1);
2519        tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2520        break;
2521
2522    case INDEX_op_shl_i64:
2523        op = RSY_SLLG;
2524    do_shift64:
2525        if (const_args[2]) {
2526            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2527        } else {
2528            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2529        }
2530        break;
2531    case INDEX_op_shr_i64:
2532        op = RSY_SRLG;
2533        goto do_shift64;
2534    case INDEX_op_sar_i64:
2535        op = RSY_SRAG;
2536        goto do_shift64;
2537
2538    case INDEX_op_rotl_i64:
2539        if (const_args[2]) {
2540            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2541                         TCG_REG_NONE, args[2]);
2542        } else {
2543            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2544        }
2545        break;
2546    case INDEX_op_rotr_i64:
2547        if (const_args[2]) {
2548            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2549                         TCG_REG_NONE, (64 - args[2]) & 63);
2550        } else {
2551            /* We can use the smaller 32-bit negate because only the
2552               low 6 bits are examined for the rotate.  */
2553            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2554            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2555        }
2556        break;
2557
2558    case INDEX_op_add2_i64:
2559        if (const_args[4]) {
2560            if ((int64_t)args[4] >= 0) {
2561                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2562            } else {
2563                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2564            }
2565        } else {
2566            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2567        }
2568        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2569        break;
2570    case INDEX_op_sub2_i64:
2571        if (const_args[4]) {
2572            if ((int64_t)args[4] >= 0) {
2573                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2574            } else {
2575                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2576            }
2577        } else {
2578            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2579        }
2580        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2581        break;
2582
2583    case INDEX_op_brcond_i64:
2584        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2585                    args[1], const_args[1], arg_label(args[3]));
2586        break;
2587    case INDEX_op_setcond_i64:
2588        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2589                     args[2], const_args[2], false);
2590        break;
2591    case INDEX_op_negsetcond_i64:
2592        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2593                     args[2], const_args[2], true);
2594        break;
2595    case INDEX_op_movcond_i64:
2596        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2597                     args[2], const_args[2], args[3], const_args[3], args[4]);
2598        break;
2599
2600    OP_32_64(deposit):
2601        a0 = args[0], a1 = args[1], a2 = args[2];
2602        if (const_args[1]) {
2603            tgen_deposit(s, a0, a2, args[3], args[4], 1);
2604        } else {
2605            /* Since we can't support "0Z" as a constraint, we allow a1 in
2606               any register.  Fix things up as if a matching constraint.  */
2607            if (a0 != a1) {
2608                TCGType type = (opc == INDEX_op_deposit_i64);
2609                if (a0 == a2) {
2610                    tcg_out_mov(s, type, TCG_TMP0, a2);
2611                    a2 = TCG_TMP0;
2612                }
2613                tcg_out_mov(s, type, a0, a1);
2614            }
2615            tgen_deposit(s, a0, a2, args[3], args[4], 0);
2616        }
2617        break;
2618
2619    OP_32_64(extract):
2620        tgen_extract(s, args[0], args[1], args[2], args[3]);
2621        break;
2622
2623    case INDEX_op_clz_i64:
2624        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2625        break;
2626
2627    case INDEX_op_ctpop_i32:
2628        tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2629        break;
2630    case INDEX_op_ctpop_i64:
2631        tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2632        break;
2633
2634    case INDEX_op_mb:
2635        /* The host memory model is quite strong, we simply need to
2636           serialize the instruction stream.  */
2637        if (args[0] & TCG_MO_ST_LD) {
2638            /* fast-bcr-serialization facility (45) is present */
2639            tcg_out_insn(s, RR, BCR, 14, 0);
2640        }
2641        break;
2642
2643    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2644    case INDEX_op_mov_i64:
2645    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2646    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2647    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2648    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2649    case INDEX_op_ext8s_i64:
2650    case INDEX_op_ext8u_i32:
2651    case INDEX_op_ext8u_i64:
2652    case INDEX_op_ext16s_i32:
2653    case INDEX_op_ext16s_i64:
2654    case INDEX_op_ext16u_i32:
2655    case INDEX_op_ext16u_i64:
2656    case INDEX_op_ext32s_i64:
2657    case INDEX_op_ext32u_i64:
2658    case INDEX_op_ext_i32_i64:
2659    case INDEX_op_extu_i32_i64:
2660    case INDEX_op_extrl_i64_i32:
2661    default:
2662        g_assert_not_reached();
2663    }
2664}
2665
2666static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2667                            TCGReg dst, TCGReg src)
2668{
2669    if (is_general_reg(src)) {
2670        /* Replicate general register into two MO_64. */
2671        tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2672        if (vece == MO_64) {
2673            return true;
2674        }
2675        src = dst;
2676    }
2677
2678    /*
2679     * Recall that the "standard" integer, within a vector, is the
2680     * rightmost element of the leftmost doubleword, a-la VLLEZ.
2681     */
2682    tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2683    return true;
2684}
2685
2686static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2687                             TCGReg dst, TCGReg base, intptr_t offset)
2688{
2689    tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2690    return true;
2691}
2692
2693static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2694                             TCGReg dst, int64_t val)
2695{
2696    int i, mask, msb, lsb;
2697
2698    /* Look for int16_t elements.  */
2699    if (vece <= MO_16 ||
2700        (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2701        tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2702        return;
2703    }
2704
2705    /* Look for bit masks.  */
2706    if (vece == MO_32) {
2707        if (risbg_mask((int32_t)val)) {
2708            /* Handle wraparound by swapping msb and lsb.  */
2709            if ((val & 0x80000001u) == 0x80000001u) {
2710                msb = 32 - ctz32(~val);
2711                lsb = clz32(~val) - 1;
2712            } else {
2713                msb = clz32(val);
2714                lsb = 31 - ctz32(val);
2715            }
2716            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2717            return;
2718        }
2719    } else {
2720        if (risbg_mask(val)) {
2721            /* Handle wraparound by swapping msb and lsb.  */
2722            if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2723                /* Handle wraparound by swapping msb and lsb.  */
2724                msb = 64 - ctz64(~val);
2725                lsb = clz64(~val) - 1;
2726            } else {
2727                msb = clz64(val);
2728                lsb = 63 - ctz64(val);
2729            }
2730            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2731            return;
2732        }
2733    }
2734
2735    /* Look for all bytes 0x00 or 0xff.  */
2736    for (i = mask = 0; i < 8; i++) {
2737        uint8_t byte = val >> (i * 8);
2738        if (byte == 0xff) {
2739            mask |= 1 << i;
2740        } else if (byte != 0) {
2741            break;
2742        }
2743    }
2744    if (i == 8) {
2745        tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2746        return;
2747    }
2748
2749    /* Otherwise, stuff it in the constant pool.  */
2750    tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2751    new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2752    tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2753}
2754
2755static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2756                           unsigned vecl, unsigned vece,
2757                           const TCGArg args[TCG_MAX_OP_ARGS],
2758                           const int const_args[TCG_MAX_OP_ARGS])
2759{
2760    TCGType type = vecl + TCG_TYPE_V64;
2761    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2762
2763    switch (opc) {
2764    case INDEX_op_ld_vec:
2765        tcg_out_ld(s, type, a0, a1, a2);
2766        break;
2767    case INDEX_op_st_vec:
2768        tcg_out_st(s, type, a0, a1, a2);
2769        break;
2770    case INDEX_op_dupm_vec:
2771        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2772        break;
2773
2774    case INDEX_op_abs_vec:
2775        tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2776        break;
2777    case INDEX_op_neg_vec:
2778        tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2779        break;
2780    case INDEX_op_not_vec:
2781        tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2782        break;
2783
2784    case INDEX_op_add_vec:
2785        tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2786        break;
2787    case INDEX_op_sub_vec:
2788        tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2789        break;
2790    case INDEX_op_and_vec:
2791        tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2792        break;
2793    case INDEX_op_andc_vec:
2794        tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2795        break;
2796    case INDEX_op_mul_vec:
2797        tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2798        break;
2799    case INDEX_op_or_vec:
2800        tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2801        break;
2802    case INDEX_op_orc_vec:
2803        tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2804        break;
2805    case INDEX_op_xor_vec:
2806        tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2807        break;
2808    case INDEX_op_nand_vec:
2809        tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
2810        break;
2811    case INDEX_op_nor_vec:
2812        tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
2813        break;
2814    case INDEX_op_eqv_vec:
2815        tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
2816        break;
2817
2818    case INDEX_op_shli_vec:
2819        tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
2820        break;
2821    case INDEX_op_shri_vec:
2822        tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
2823        break;
2824    case INDEX_op_sari_vec:
2825        tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
2826        break;
2827    case INDEX_op_rotli_vec:
2828        tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
2829        break;
2830    case INDEX_op_shls_vec:
2831        tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
2832        break;
2833    case INDEX_op_shrs_vec:
2834        tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
2835        break;
2836    case INDEX_op_sars_vec:
2837        tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
2838        break;
2839    case INDEX_op_rotls_vec:
2840        tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
2841        break;
2842    case INDEX_op_shlv_vec:
2843        tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
2844        break;
2845    case INDEX_op_shrv_vec:
2846        tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
2847        break;
2848    case INDEX_op_sarv_vec:
2849        tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
2850        break;
2851    case INDEX_op_rotlv_vec:
2852        tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
2853        break;
2854
2855    case INDEX_op_smin_vec:
2856        tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
2857        break;
2858    case INDEX_op_smax_vec:
2859        tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
2860        break;
2861    case INDEX_op_umin_vec:
2862        tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
2863        break;
2864    case INDEX_op_umax_vec:
2865        tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
2866        break;
2867
2868    case INDEX_op_bitsel_vec:
2869        tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
2870        break;
2871
2872    case INDEX_op_cmp_vec:
2873        switch ((TCGCond)args[3]) {
2874        case TCG_COND_EQ:
2875            tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2876            break;
2877        case TCG_COND_GT:
2878            tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2879            break;
2880        case TCG_COND_GTU:
2881            tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2882            break;
2883        default:
2884            g_assert_not_reached();
2885        }
2886        break;
2887
2888    case INDEX_op_s390_vuph_vec:
2889        tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
2890        break;
2891    case INDEX_op_s390_vupl_vec:
2892        tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
2893        break;
2894    case INDEX_op_s390_vpks_vec:
2895        tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
2896        break;
2897
2898    case INDEX_op_mov_vec:   /* Always emitted via tcg_out_mov.  */
2899    case INDEX_op_dup_vec:   /* Always emitted via tcg_out_dup_vec.  */
2900    default:
2901        g_assert_not_reached();
2902    }
2903}
2904
2905int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2906{
2907    switch (opc) {
2908    case INDEX_op_abs_vec:
2909    case INDEX_op_add_vec:
2910    case INDEX_op_and_vec:
2911    case INDEX_op_andc_vec:
2912    case INDEX_op_bitsel_vec:
2913    case INDEX_op_eqv_vec:
2914    case INDEX_op_nand_vec:
2915    case INDEX_op_neg_vec:
2916    case INDEX_op_nor_vec:
2917    case INDEX_op_not_vec:
2918    case INDEX_op_or_vec:
2919    case INDEX_op_orc_vec:
2920    case INDEX_op_rotli_vec:
2921    case INDEX_op_rotls_vec:
2922    case INDEX_op_rotlv_vec:
2923    case INDEX_op_sari_vec:
2924    case INDEX_op_sars_vec:
2925    case INDEX_op_sarv_vec:
2926    case INDEX_op_shli_vec:
2927    case INDEX_op_shls_vec:
2928    case INDEX_op_shlv_vec:
2929    case INDEX_op_shri_vec:
2930    case INDEX_op_shrs_vec:
2931    case INDEX_op_shrv_vec:
2932    case INDEX_op_smax_vec:
2933    case INDEX_op_smin_vec:
2934    case INDEX_op_sub_vec:
2935    case INDEX_op_umax_vec:
2936    case INDEX_op_umin_vec:
2937    case INDEX_op_xor_vec:
2938        return 1;
2939    case INDEX_op_cmp_vec:
2940    case INDEX_op_cmpsel_vec:
2941    case INDEX_op_rotrv_vec:
2942        return -1;
2943    case INDEX_op_mul_vec:
2944        return vece < MO_64;
2945    case INDEX_op_ssadd_vec:
2946    case INDEX_op_sssub_vec:
2947        return vece < MO_64 ? -1 : 0;
2948    default:
2949        return 0;
2950    }
2951}
2952
2953static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
2954                                 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2955{
2956    bool need_swap = false, need_inv = false;
2957
2958    switch (cond) {
2959    case TCG_COND_EQ:
2960    case TCG_COND_GT:
2961    case TCG_COND_GTU:
2962        break;
2963    case TCG_COND_NE:
2964    case TCG_COND_LE:
2965    case TCG_COND_LEU:
2966        need_inv = true;
2967        break;
2968    case TCG_COND_LT:
2969    case TCG_COND_LTU:
2970        need_swap = true;
2971        break;
2972    case TCG_COND_GE:
2973    case TCG_COND_GEU:
2974        need_swap = need_inv = true;
2975        break;
2976    default:
2977        g_assert_not_reached();
2978    }
2979
2980    if (need_inv) {
2981        cond = tcg_invert_cond(cond);
2982    }
2983    if (need_swap) {
2984        TCGv_vec t1;
2985        t1 = v1, v1 = v2, v2 = t1;
2986        cond = tcg_swap_cond(cond);
2987    }
2988
2989    vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
2990              tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
2991
2992    return need_inv;
2993}
2994
2995static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
2996                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2997{
2998    if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
2999        tcg_gen_not_vec(vece, v0, v0);
3000    }
3001}
3002
3003static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
3004                              TCGv_vec c1, TCGv_vec c2,
3005                              TCGv_vec v3, TCGv_vec v4, TCGCond cond)
3006{
3007    TCGv_vec t = tcg_temp_new_vec(type);
3008
3009    if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
3010        /* Invert the sense of the compare by swapping arguments.  */
3011        tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
3012    } else {
3013        tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
3014    }
3015    tcg_temp_free_vec(t);
3016}
3017
3018static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
3019                           TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
3020{
3021    TCGv_vec h1 = tcg_temp_new_vec(type);
3022    TCGv_vec h2 = tcg_temp_new_vec(type);
3023    TCGv_vec l1 = tcg_temp_new_vec(type);
3024    TCGv_vec l2 = tcg_temp_new_vec(type);
3025
3026    tcg_debug_assert (vece < MO_64);
3027
3028    /* Unpack with sign-extension. */
3029    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3030              tcgv_vec_arg(h1), tcgv_vec_arg(v1));
3031    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3032              tcgv_vec_arg(h2), tcgv_vec_arg(v2));
3033
3034    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3035              tcgv_vec_arg(l1), tcgv_vec_arg(v1));
3036    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3037              tcgv_vec_arg(l2), tcgv_vec_arg(v2));
3038
3039    /* Arithmetic on a wider element size. */
3040    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
3041              tcgv_vec_arg(h1), tcgv_vec_arg(h2));
3042    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
3043              tcgv_vec_arg(l1), tcgv_vec_arg(l2));
3044
3045    /* Pack with saturation. */
3046    vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
3047              tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
3048
3049    tcg_temp_free_vec(h1);
3050    tcg_temp_free_vec(h2);
3051    tcg_temp_free_vec(l1);
3052    tcg_temp_free_vec(l2);
3053}
3054
3055void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3056                       TCGArg a0, ...)
3057{
3058    va_list va;
3059    TCGv_vec v0, v1, v2, v3, v4, t0;
3060
3061    va_start(va, a0);
3062    v0 = temp_tcgv_vec(arg_temp(a0));
3063    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3064    v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3065
3066    switch (opc) {
3067    case INDEX_op_cmp_vec:
3068        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3069        break;
3070
3071    case INDEX_op_cmpsel_vec:
3072        v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3073        v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3074        expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3075        break;
3076
3077    case INDEX_op_rotrv_vec:
3078        t0 = tcg_temp_new_vec(type);
3079        tcg_gen_neg_vec(vece, t0, v2);
3080        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3081        tcg_temp_free_vec(t0);
3082        break;
3083
3084    case INDEX_op_ssadd_vec:
3085        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3086        break;
3087    case INDEX_op_sssub_vec:
3088        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3089        break;
3090
3091    default:
3092        g_assert_not_reached();
3093    }
3094    va_end(va);
3095}
3096
3097static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3098{
3099    switch (op) {
3100    case INDEX_op_goto_ptr:
3101        return C_O0_I1(r);
3102
3103    case INDEX_op_ld8u_i32:
3104    case INDEX_op_ld8u_i64:
3105    case INDEX_op_ld8s_i32:
3106    case INDEX_op_ld8s_i64:
3107    case INDEX_op_ld16u_i32:
3108    case INDEX_op_ld16u_i64:
3109    case INDEX_op_ld16s_i32:
3110    case INDEX_op_ld16s_i64:
3111    case INDEX_op_ld_i32:
3112    case INDEX_op_ld32u_i64:
3113    case INDEX_op_ld32s_i64:
3114    case INDEX_op_ld_i64:
3115        return C_O1_I1(r, r);
3116
3117    case INDEX_op_st8_i32:
3118    case INDEX_op_st8_i64:
3119    case INDEX_op_st16_i32:
3120    case INDEX_op_st16_i64:
3121    case INDEX_op_st_i32:
3122    case INDEX_op_st32_i64:
3123    case INDEX_op_st_i64:
3124        return C_O0_I2(r, r);
3125
3126    case INDEX_op_add_i32:
3127    case INDEX_op_add_i64:
3128    case INDEX_op_shl_i64:
3129    case INDEX_op_shr_i64:
3130    case INDEX_op_sar_i64:
3131    case INDEX_op_rotl_i32:
3132    case INDEX_op_rotl_i64:
3133    case INDEX_op_rotr_i32:
3134    case INDEX_op_rotr_i64:
3135    case INDEX_op_setcond_i32:
3136    case INDEX_op_negsetcond_i32:
3137        return C_O1_I2(r, r, ri);
3138    case INDEX_op_setcond_i64:
3139    case INDEX_op_negsetcond_i64:
3140        return C_O1_I2(r, r, rJU);
3141
3142    case INDEX_op_clz_i64:
3143        return C_O1_I2(r, r, rI);
3144
3145    case INDEX_op_sub_i32:
3146    case INDEX_op_sub_i64:
3147    case INDEX_op_and_i32:
3148    case INDEX_op_or_i32:
3149    case INDEX_op_xor_i32:
3150        return C_O1_I2(r, r, ri);
3151    case INDEX_op_and_i64:
3152        return C_O1_I2(r, r, rNKR);
3153    case INDEX_op_or_i64:
3154    case INDEX_op_xor_i64:
3155        return C_O1_I2(r, r, rK);
3156
3157    case INDEX_op_andc_i32:
3158    case INDEX_op_orc_i32:
3159    case INDEX_op_eqv_i32:
3160        return C_O1_I2(r, r, ri);
3161    case INDEX_op_andc_i64:
3162        return C_O1_I2(r, r, rKR);
3163    case INDEX_op_orc_i64:
3164    case INDEX_op_eqv_i64:
3165        return C_O1_I2(r, r, rNK);
3166
3167    case INDEX_op_nand_i32:
3168    case INDEX_op_nand_i64:
3169    case INDEX_op_nor_i32:
3170    case INDEX_op_nor_i64:
3171        return C_O1_I2(r, r, r);
3172
3173    case INDEX_op_mul_i32:
3174        return (HAVE_FACILITY(MISC_INSN_EXT2)
3175                ? C_O1_I2(r, r, ri)
3176                : C_O1_I2(r, 0, ri));
3177    case INDEX_op_mul_i64:
3178        return (HAVE_FACILITY(MISC_INSN_EXT2)
3179                ? C_O1_I2(r, r, rJ)
3180                : C_O1_I2(r, 0, rJ));
3181
3182    case INDEX_op_shl_i32:
3183    case INDEX_op_shr_i32:
3184    case INDEX_op_sar_i32:
3185        return C_O1_I2(r, r, ri);
3186
3187    case INDEX_op_brcond_i32:
3188        return C_O0_I2(r, ri);
3189    case INDEX_op_brcond_i64:
3190        return C_O0_I2(r, rJU);
3191
3192    case INDEX_op_bswap16_i32:
3193    case INDEX_op_bswap16_i64:
3194    case INDEX_op_bswap32_i32:
3195    case INDEX_op_bswap32_i64:
3196    case INDEX_op_bswap64_i64:
3197    case INDEX_op_neg_i32:
3198    case INDEX_op_neg_i64:
3199    case INDEX_op_not_i32:
3200    case INDEX_op_not_i64:
3201    case INDEX_op_ext8s_i32:
3202    case INDEX_op_ext8s_i64:
3203    case INDEX_op_ext8u_i32:
3204    case INDEX_op_ext8u_i64:
3205    case INDEX_op_ext16s_i32:
3206    case INDEX_op_ext16s_i64:
3207    case INDEX_op_ext16u_i32:
3208    case INDEX_op_ext16u_i64:
3209    case INDEX_op_ext32s_i64:
3210    case INDEX_op_ext32u_i64:
3211    case INDEX_op_ext_i32_i64:
3212    case INDEX_op_extu_i32_i64:
3213    case INDEX_op_extract_i32:
3214    case INDEX_op_extract_i64:
3215    case INDEX_op_ctpop_i32:
3216    case INDEX_op_ctpop_i64:
3217        return C_O1_I1(r, r);
3218
3219    case INDEX_op_qemu_ld_a32_i32:
3220    case INDEX_op_qemu_ld_a64_i32:
3221    case INDEX_op_qemu_ld_a32_i64:
3222    case INDEX_op_qemu_ld_a64_i64:
3223        return C_O1_I1(r, r);
3224    case INDEX_op_qemu_st_a32_i64:
3225    case INDEX_op_qemu_st_a64_i64:
3226    case INDEX_op_qemu_st_a32_i32:
3227    case INDEX_op_qemu_st_a64_i32:
3228        return C_O0_I2(r, r);
3229    case INDEX_op_qemu_ld_a32_i128:
3230    case INDEX_op_qemu_ld_a64_i128:
3231        return C_O2_I1(o, m, r);
3232    case INDEX_op_qemu_st_a32_i128:
3233    case INDEX_op_qemu_st_a64_i128:
3234        return C_O0_I3(o, m, r);
3235
3236    case INDEX_op_deposit_i32:
3237    case INDEX_op_deposit_i64:
3238        return C_O1_I2(r, rZ, r);
3239
3240    case INDEX_op_movcond_i32:
3241        return C_O1_I4(r, r, ri, rI, r);
3242    case INDEX_op_movcond_i64:
3243        return C_O1_I4(r, r, rJU, rI, r);
3244
3245    case INDEX_op_div2_i32:
3246    case INDEX_op_div2_i64:
3247    case INDEX_op_divu2_i32:
3248    case INDEX_op_divu2_i64:
3249        return C_O2_I3(o, m, 0, 1, r);
3250
3251    case INDEX_op_mulu2_i64:
3252        return C_O2_I2(o, m, 0, r);
3253    case INDEX_op_muls2_i64:
3254        return C_O2_I2(o, m, r, r);
3255
3256    case INDEX_op_add2_i32:
3257    case INDEX_op_sub2_i32:
3258        return C_N1_O1_I4(r, r, 0, 1, ri, r);
3259
3260    case INDEX_op_add2_i64:
3261    case INDEX_op_sub2_i64:
3262        return C_N1_O1_I4(r, r, 0, 1, rJU, r);
3263
3264    case INDEX_op_st_vec:
3265        return C_O0_I2(v, r);
3266    case INDEX_op_ld_vec:
3267    case INDEX_op_dupm_vec:
3268        return C_O1_I1(v, r);
3269    case INDEX_op_dup_vec:
3270        return C_O1_I1(v, vr);
3271    case INDEX_op_abs_vec:
3272    case INDEX_op_neg_vec:
3273    case INDEX_op_not_vec:
3274    case INDEX_op_rotli_vec:
3275    case INDEX_op_sari_vec:
3276    case INDEX_op_shli_vec:
3277    case INDEX_op_shri_vec:
3278    case INDEX_op_s390_vuph_vec:
3279    case INDEX_op_s390_vupl_vec:
3280        return C_O1_I1(v, v);
3281    case INDEX_op_add_vec:
3282    case INDEX_op_sub_vec:
3283    case INDEX_op_and_vec:
3284    case INDEX_op_andc_vec:
3285    case INDEX_op_or_vec:
3286    case INDEX_op_orc_vec:
3287    case INDEX_op_xor_vec:
3288    case INDEX_op_nand_vec:
3289    case INDEX_op_nor_vec:
3290    case INDEX_op_eqv_vec:
3291    case INDEX_op_cmp_vec:
3292    case INDEX_op_mul_vec:
3293    case INDEX_op_rotlv_vec:
3294    case INDEX_op_rotrv_vec:
3295    case INDEX_op_shlv_vec:
3296    case INDEX_op_shrv_vec:
3297    case INDEX_op_sarv_vec:
3298    case INDEX_op_smax_vec:
3299    case INDEX_op_smin_vec:
3300    case INDEX_op_umax_vec:
3301    case INDEX_op_umin_vec:
3302    case INDEX_op_s390_vpks_vec:
3303        return C_O1_I2(v, v, v);
3304    case INDEX_op_rotls_vec:
3305    case INDEX_op_shls_vec:
3306    case INDEX_op_shrs_vec:
3307    case INDEX_op_sars_vec:
3308        return C_O1_I2(v, v, r);
3309    case INDEX_op_bitsel_vec:
3310        return C_O1_I3(v, v, v, v);
3311
3312    default:
3313        g_assert_not_reached();
3314    }
3315}
3316
3317/*
3318 * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3319 * Some distros have fixed this up locally, others have not.
3320 */
3321#ifndef HWCAP_S390_VXRS
3322#define HWCAP_S390_VXRS 2048
3323#endif
3324
3325static void query_s390_facilities(void)
3326{
3327    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3328    const char *which;
3329
3330    /* Is STORE FACILITY LIST EXTENDED available?  Honestly, I believe this
3331       is present on all 64-bit systems, but let's check for it anyway.  */
3332    if (hwcap & HWCAP_S390_STFLE) {
3333        register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3334        register void *r1 __asm__("1") = s390_facilities;
3335
3336        /* stfle 0(%r1) */
3337        asm volatile(".word 0xb2b0,0x1000"
3338                     : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3339    }
3340
3341    /*
3342     * Use of vector registers requires os support beyond the facility bit.
3343     * If the kernel does not advertise support, disable the facility bits.
3344     * There is nothing else we currently care about in the 3rd word, so
3345     * disable VECTOR with one store.
3346     */
3347    if (!(hwcap & HWCAP_S390_VXRS)) {
3348        s390_facilities[2] = 0;
3349    }
3350
3351    /*
3352     * Minimum supported cpu revision is z196.
3353     * Check for all required facilities.
3354     * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3355     */
3356    if (!HAVE_FACILITY(LONG_DISP)) {
3357        which = "long-displacement";
3358        goto fail;
3359    }
3360    if (!HAVE_FACILITY(EXT_IMM)) {
3361        which = "extended-immediate";
3362        goto fail;
3363    }
3364    if (!HAVE_FACILITY(GEN_INST_EXT)) {
3365        which = "general-instructions-extension";
3366        goto fail;
3367    }
3368    /*
3369     * Facility 45 is a big bin that contains: distinct-operands,
3370     * fast-BCR-serialization, high-word, population-count,
3371     * interlocked-access-1, and load/store-on-condition-1
3372     */
3373    if (!HAVE_FACILITY(45)) {
3374        which = "45";
3375        goto fail;
3376    }
3377    return;
3378
3379 fail:
3380    error_report("%s: missing required facility %s", __func__, which);
3381    exit(EXIT_FAILURE);
3382}
3383
3384static void tcg_target_init(TCGContext *s)
3385{
3386    query_s390_facilities();
3387
3388    tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3389    tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3390    if (HAVE_FACILITY(VECTOR)) {
3391        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3392        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3393    }
3394
3395    tcg_target_call_clobber_regs = 0;
3396    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3397    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3398    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3399    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3400    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3401    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3402    /* The r6 register is technically call-saved, but it's also a parameter
3403       register, so it can get killed by setup for the qemu_st helper.  */
3404    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3405    /* The return register can be considered call-clobbered.  */
3406    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3407
3408    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3409    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3410    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3411    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3412    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3413    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3414    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3415    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3416    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3417    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3418    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3419    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3420    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3421    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3422    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3423    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3424    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3425    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3426    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3427    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3428    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3429    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3430    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3431    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3432
3433    s->reserved_regs = 0;
3434    tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3435    /* XXX many insns can't be used with R0, so we better avoid it for now */
3436    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3437    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3438}
3439
3440#define FRAME_SIZE  ((int)(TCG_TARGET_CALL_STACK_OFFSET          \
3441                           + TCG_STATIC_CALL_ARGS_SIZE           \
3442                           + CPU_TEMP_BUF_NLONGS * sizeof(long)))
3443
3444static void tcg_target_qemu_prologue(TCGContext *s)
3445{
3446    /* stmg %r6,%r15,48(%r15) (save registers) */
3447    tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3448
3449    /* aghi %r15,-frame_size */
3450    tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3451
3452    tcg_set_frame(s, TCG_REG_CALL_STACK,
3453                  TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3454                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3455
3456    if (!tcg_use_softmmu && guest_base >= 0x80000) {
3457        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3458        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3459    }
3460
3461    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3462
3463    /* br %r3 (go to TB) */
3464    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3465
3466    /*
3467     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3468     * and fall through to the rest of the epilogue.
3469     */
3470    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3471    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3472
3473    /* TB epilogue */
3474    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3475
3476    /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3477    tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3478                 FRAME_SIZE + 48);
3479
3480    /* br %r14 (return) */
3481    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3482}
3483
3484static void tcg_out_tb_start(TCGContext *s)
3485{
3486    /* nothing to do */
3487}
3488
3489static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3490{
3491    memset(p, 0x07, count * sizeof(tcg_insn_unit));
3492}
3493
3494typedef struct {
3495    DebugFrameHeader h;
3496    uint8_t fde_def_cfa[4];
3497    uint8_t fde_reg_ofs[18];
3498} DebugFrame;
3499
3500/* We're expecting a 2 byte uleb128 encoded value.  */
3501QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3502
3503#define ELF_HOST_MACHINE  EM_S390
3504
3505static const DebugFrame debug_frame = {
3506    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3507    .h.cie.id = -1,
3508    .h.cie.version = 1,
3509    .h.cie.code_align = 1,
3510    .h.cie.data_align = 8,                /* sleb128 8 */
3511    .h.cie.return_column = TCG_REG_R14,
3512
3513    /* Total FDE size does not include the "len" member.  */
3514    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3515
3516    .fde_def_cfa = {
3517        12, TCG_REG_CALL_STACK,         /* DW_CFA_def_cfa %r15, ... */
3518        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3519        (FRAME_SIZE >> 7)
3520    },
3521    .fde_reg_ofs = {
3522        0x86, 6,                        /* DW_CFA_offset, %r6, 48 */
3523        0x87, 7,                        /* DW_CFA_offset, %r7, 56 */
3524        0x88, 8,                        /* DW_CFA_offset, %r8, 64 */
3525        0x89, 9,                        /* DW_CFA_offset, %r92, 72 */
3526        0x8a, 10,                       /* DW_CFA_offset, %r10, 80 */
3527        0x8b, 11,                       /* DW_CFA_offset, %r11, 88 */
3528        0x8c, 12,                       /* DW_CFA_offset, %r12, 96 */
3529        0x8d, 13,                       /* DW_CFA_offset, %r13, 104 */
3530        0x8e, 14,                       /* DW_CFA_offset, %r14, 112 */
3531    }
3532};
3533
3534void tcg_register_jit(const void *buf, size_t buf_size)
3535{
3536    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3537}
3538