xref: /openbmc/qemu/tcg/s390x/tcg-target.c.inc (revision feb58e3b)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27#include "../tcg-ldst.c.inc"
28#include "../tcg-pool.c.inc"
29#include "elf.h"
30
31#define TCG_CT_CONST_S16        (1 << 8)
32#define TCG_CT_CONST_S32        (1 << 9)
33#define TCG_CT_CONST_U32        (1 << 10)
34#define TCG_CT_CONST_ZERO       (1 << 11)
35#define TCG_CT_CONST_P32        (1 << 12)
36#define TCG_CT_CONST_INV        (1 << 13)
37#define TCG_CT_CONST_INVRISBG   (1 << 14)
38#define TCG_CT_CONST_CMP        (1 << 15)
39#define TCG_CT_CONST_M1         (1 << 16)
40
41#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 16)
42#define ALL_VECTOR_REGS      MAKE_64BIT_MASK(32, 32)
43
44/* Several places within the instruction set 0 means "no register"
45   rather than TCG_REG_R0.  */
46#define TCG_REG_NONE    0
47
48/* A scratch register that may be be used throughout the backend.  */
49#define TCG_TMP0        TCG_REG_R1
50#define TCG_VEC_TMP0    TCG_REG_V31
51
52#define TCG_GUEST_BASE_REG TCG_REG_R13
53
54/* All of the following instructions are prefixed with their instruction
55   format, and are defined as 8- or 16-bit quantities, even when the two
56   halves of the 16-bit quantity may appear 32 bits apart in the insn.
57   This makes it easy to copy the values from the tables in Appendix B.  */
58typedef enum S390Opcode {
59    RIL_AFI     = 0xc209,
60    RIL_AGFI    = 0xc208,
61    RIL_ALFI    = 0xc20b,
62    RIL_ALGFI   = 0xc20a,
63    RIL_BRASL   = 0xc005,
64    RIL_BRCL    = 0xc004,
65    RIL_CFI     = 0xc20d,
66    RIL_CGFI    = 0xc20c,
67    RIL_CLFI    = 0xc20f,
68    RIL_CLGFI   = 0xc20e,
69    RIL_CLRL    = 0xc60f,
70    RIL_CLGRL   = 0xc60a,
71    RIL_CRL     = 0xc60d,
72    RIL_CGRL    = 0xc608,
73    RIL_IIHF    = 0xc008,
74    RIL_IILF    = 0xc009,
75    RIL_LARL    = 0xc000,
76    RIL_LGFI    = 0xc001,
77    RIL_LGRL    = 0xc408,
78    RIL_LLIHF   = 0xc00e,
79    RIL_LLILF   = 0xc00f,
80    RIL_LRL     = 0xc40d,
81    RIL_MSFI    = 0xc201,
82    RIL_MSGFI   = 0xc200,
83    RIL_NIHF    = 0xc00a,
84    RIL_NILF    = 0xc00b,
85    RIL_OIHF    = 0xc00c,
86    RIL_OILF    = 0xc00d,
87    RIL_SLFI    = 0xc205,
88    RIL_SLGFI   = 0xc204,
89    RIL_XIHF    = 0xc006,
90    RIL_XILF    = 0xc007,
91
92    RI_AGHI     = 0xa70b,
93    RI_AHI      = 0xa70a,
94    RI_BRC      = 0xa704,
95    RI_CHI      = 0xa70e,
96    RI_CGHI     = 0xa70f,
97    RI_IIHH     = 0xa500,
98    RI_IIHL     = 0xa501,
99    RI_IILH     = 0xa502,
100    RI_IILL     = 0xa503,
101    RI_LGHI     = 0xa709,
102    RI_LLIHH    = 0xa50c,
103    RI_LLIHL    = 0xa50d,
104    RI_LLILH    = 0xa50e,
105    RI_LLILL    = 0xa50f,
106    RI_MGHI     = 0xa70d,
107    RI_MHI      = 0xa70c,
108    RI_NIHH     = 0xa504,
109    RI_NIHL     = 0xa505,
110    RI_NILH     = 0xa506,
111    RI_NILL     = 0xa507,
112    RI_OIHH     = 0xa508,
113    RI_OIHL     = 0xa509,
114    RI_OILH     = 0xa50a,
115    RI_OILL     = 0xa50b,
116    RI_TMLL     = 0xa701,
117    RI_TMLH     = 0xa700,
118    RI_TMHL     = 0xa703,
119    RI_TMHH     = 0xa702,
120
121    RIEb_CGRJ    = 0xec64,
122    RIEb_CLGRJ   = 0xec65,
123    RIEb_CLRJ    = 0xec77,
124    RIEb_CRJ     = 0xec76,
125
126    RIEc_CGIJ    = 0xec7c,
127    RIEc_CIJ     = 0xec7e,
128    RIEc_CLGIJ   = 0xec7d,
129    RIEc_CLIJ    = 0xec7f,
130
131    RIEf_RISBG   = 0xec55,
132
133    RIEg_LOCGHI  = 0xec46,
134
135    RRE_AGR     = 0xb908,
136    RRE_ALGR    = 0xb90a,
137    RRE_ALCR    = 0xb998,
138    RRE_ALCGR   = 0xb988,
139    RRE_ALGFR   = 0xb91a,
140    RRE_CGR     = 0xb920,
141    RRE_CLGR    = 0xb921,
142    RRE_DLGR    = 0xb987,
143    RRE_DLR     = 0xb997,
144    RRE_DSGFR   = 0xb91d,
145    RRE_DSGR    = 0xb90d,
146    RRE_FLOGR   = 0xb983,
147    RRE_LGBR    = 0xb906,
148    RRE_LCGR    = 0xb903,
149    RRE_LGFR    = 0xb914,
150    RRE_LGHR    = 0xb907,
151    RRE_LGR     = 0xb904,
152    RRE_LLGCR   = 0xb984,
153    RRE_LLGFR   = 0xb916,
154    RRE_LLGHR   = 0xb985,
155    RRE_LRVR    = 0xb91f,
156    RRE_LRVGR   = 0xb90f,
157    RRE_LTGR    = 0xb902,
158    RRE_MLGR    = 0xb986,
159    RRE_MSGR    = 0xb90c,
160    RRE_MSR     = 0xb252,
161    RRE_NGR     = 0xb980,
162    RRE_OGR     = 0xb981,
163    RRE_SGR     = 0xb909,
164    RRE_SLGR    = 0xb90b,
165    RRE_SLBR    = 0xb999,
166    RRE_SLBGR   = 0xb989,
167    RRE_XGR     = 0xb982,
168
169    RRFa_MGRK   = 0xb9ec,
170    RRFa_MSRKC  = 0xb9fd,
171    RRFa_MSGRKC = 0xb9ed,
172    RRFa_NCRK   = 0xb9f5,
173    RRFa_NCGRK  = 0xb9e5,
174    RRFa_NNRK   = 0xb974,
175    RRFa_NNGRK  = 0xb964,
176    RRFa_NORK   = 0xb976,
177    RRFa_NOGRK  = 0xb966,
178    RRFa_NRK    = 0xb9f4,
179    RRFa_NGRK   = 0xb9e4,
180    RRFa_NXRK   = 0xb977,
181    RRFa_NXGRK  = 0xb967,
182    RRFa_OCRK   = 0xb975,
183    RRFa_OCGRK  = 0xb965,
184    RRFa_ORK    = 0xb9f6,
185    RRFa_OGRK   = 0xb9e6,
186    RRFa_SRK    = 0xb9f9,
187    RRFa_SGRK   = 0xb9e9,
188    RRFa_SLRK   = 0xb9fb,
189    RRFa_SLGRK  = 0xb9eb,
190    RRFa_XRK    = 0xb9f7,
191    RRFa_XGRK   = 0xb9e7,
192
193    RRFam_SELGR = 0xb9e3,
194
195    RRFc_LOCR   = 0xb9f2,
196    RRFc_LOCGR  = 0xb9e2,
197    RRFc_POPCNT = 0xb9e1,
198
199    RR_AR       = 0x1a,
200    RR_ALR      = 0x1e,
201    RR_BASR     = 0x0d,
202    RR_BCR      = 0x07,
203    RR_CLR      = 0x15,
204    RR_CR       = 0x19,
205    RR_DR       = 0x1d,
206    RR_LCR      = 0x13,
207    RR_LR       = 0x18,
208    RR_LTR      = 0x12,
209    RR_NR       = 0x14,
210    RR_OR       = 0x16,
211    RR_SR       = 0x1b,
212    RR_SLR      = 0x1f,
213    RR_XR       = 0x17,
214
215    RSY_RLL     = 0xeb1d,
216    RSY_RLLG    = 0xeb1c,
217    RSY_SLLG    = 0xeb0d,
218    RSY_SLLK    = 0xebdf,
219    RSY_SRAG    = 0xeb0a,
220    RSY_SRAK    = 0xebdc,
221    RSY_SRLG    = 0xeb0c,
222    RSY_SRLK    = 0xebde,
223
224    RS_SLL      = 0x89,
225    RS_SRA      = 0x8a,
226    RS_SRL      = 0x88,
227
228    RXY_AG      = 0xe308,
229    RXY_AY      = 0xe35a,
230    RXY_CG      = 0xe320,
231    RXY_CLG     = 0xe321,
232    RXY_CLY     = 0xe355,
233    RXY_CY      = 0xe359,
234    RXY_LAY     = 0xe371,
235    RXY_LB      = 0xe376,
236    RXY_LG      = 0xe304,
237    RXY_LGB     = 0xe377,
238    RXY_LGF     = 0xe314,
239    RXY_LGH     = 0xe315,
240    RXY_LHY     = 0xe378,
241    RXY_LLGC    = 0xe390,
242    RXY_LLGF    = 0xe316,
243    RXY_LLGH    = 0xe391,
244    RXY_LMG     = 0xeb04,
245    RXY_LPQ     = 0xe38f,
246    RXY_LRV     = 0xe31e,
247    RXY_LRVG    = 0xe30f,
248    RXY_LRVH    = 0xe31f,
249    RXY_LY      = 0xe358,
250    RXY_NG      = 0xe380,
251    RXY_OG      = 0xe381,
252    RXY_STCY    = 0xe372,
253    RXY_STG     = 0xe324,
254    RXY_STHY    = 0xe370,
255    RXY_STMG    = 0xeb24,
256    RXY_STPQ    = 0xe38e,
257    RXY_STRV    = 0xe33e,
258    RXY_STRVG   = 0xe32f,
259    RXY_STRVH   = 0xe33f,
260    RXY_STY     = 0xe350,
261    RXY_XG      = 0xe382,
262
263    RX_A        = 0x5a,
264    RX_C        = 0x59,
265    RX_L        = 0x58,
266    RX_LA       = 0x41,
267    RX_LH       = 0x48,
268    RX_ST       = 0x50,
269    RX_STC      = 0x42,
270    RX_STH      = 0x40,
271
272    VRIa_VGBM   = 0xe744,
273    VRIa_VREPI  = 0xe745,
274    VRIb_VGM    = 0xe746,
275    VRIc_VREP   = 0xe74d,
276
277    VRRa_VLC    = 0xe7de,
278    VRRa_VLP    = 0xe7df,
279    VRRa_VLR    = 0xe756,
280    VRRc_VA     = 0xe7f3,
281    VRRc_VCEQ   = 0xe7f8,   /* we leave the m5 cs field 0 */
282    VRRc_VCH    = 0xe7fb,   /* " */
283    VRRc_VCHL   = 0xe7f9,   /* " */
284    VRRc_VERLLV = 0xe773,
285    VRRc_VESLV  = 0xe770,
286    VRRc_VESRAV = 0xe77a,
287    VRRc_VESRLV = 0xe778,
288    VRRc_VML    = 0xe7a2,
289    VRRc_VMN    = 0xe7fe,
290    VRRc_VMNL   = 0xe7fc,
291    VRRc_VMX    = 0xe7ff,
292    VRRc_VMXL   = 0xe7fd,
293    VRRc_VN     = 0xe768,
294    VRRc_VNC    = 0xe769,
295    VRRc_VNN    = 0xe76e,
296    VRRc_VNO    = 0xe76b,
297    VRRc_VNX    = 0xe76c,
298    VRRc_VO     = 0xe76a,
299    VRRc_VOC    = 0xe76f,
300    VRRc_VPKS   = 0xe797,   /* we leave the m5 cs field 0 */
301    VRRc_VS     = 0xe7f7,
302    VRRa_VUPH   = 0xe7d7,
303    VRRa_VUPL   = 0xe7d6,
304    VRRc_VX     = 0xe76d,
305    VRRe_VSEL   = 0xe78d,
306    VRRf_VLVGP  = 0xe762,
307
308    VRSa_VERLL  = 0xe733,
309    VRSa_VESL   = 0xe730,
310    VRSa_VESRA  = 0xe73a,
311    VRSa_VESRL  = 0xe738,
312    VRSb_VLVG   = 0xe722,
313    VRSc_VLGV   = 0xe721,
314
315    VRX_VL      = 0xe706,
316    VRX_VLLEZ   = 0xe704,
317    VRX_VLREP   = 0xe705,
318    VRX_VST     = 0xe70e,
319    VRX_VSTEF   = 0xe70b,
320    VRX_VSTEG   = 0xe70a,
321
322    NOP         = 0x0707,
323} S390Opcode;
324
325#ifdef CONFIG_DEBUG_TCG
326static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
327    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
328    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
329    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
330    "%v0",  "%v1",  "%v2",  "%v3",  "%v4",  "%v5",  "%v6",  "%v7",
331    "%v8",  "%v9",  "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
332    "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
333    "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
334};
335#endif
336
337/* Since R6 is a potential argument register, choose it last of the
338   call-saved registers.  Likewise prefer the call-clobbered registers
339   in reverse order to maximize the chance of avoiding the arguments.  */
340static const int tcg_target_reg_alloc_order[] = {
341    /* Call saved registers.  */
342    TCG_REG_R13,
343    TCG_REG_R12,
344    TCG_REG_R11,
345    TCG_REG_R10,
346    TCG_REG_R9,
347    TCG_REG_R8,
348    TCG_REG_R7,
349    TCG_REG_R6,
350    /* Call clobbered registers.  */
351    TCG_REG_R14,
352    TCG_REG_R0,
353    TCG_REG_R1,
354    /* Argument registers, in reverse order of allocation.  */
355    TCG_REG_R5,
356    TCG_REG_R4,
357    TCG_REG_R3,
358    TCG_REG_R2,
359
360    /* V8-V15 are call saved, and omitted. */
361    TCG_REG_V0,
362    TCG_REG_V1,
363    TCG_REG_V2,
364    TCG_REG_V3,
365    TCG_REG_V4,
366    TCG_REG_V5,
367    TCG_REG_V6,
368    TCG_REG_V7,
369    TCG_REG_V16,
370    TCG_REG_V17,
371    TCG_REG_V18,
372    TCG_REG_V19,
373    TCG_REG_V20,
374    TCG_REG_V21,
375    TCG_REG_V22,
376    TCG_REG_V23,
377    TCG_REG_V24,
378    TCG_REG_V25,
379    TCG_REG_V26,
380    TCG_REG_V27,
381    TCG_REG_V28,
382    TCG_REG_V29,
383    TCG_REG_V30,
384    TCG_REG_V31,
385};
386
387static const int tcg_target_call_iarg_regs[] = {
388    TCG_REG_R2,
389    TCG_REG_R3,
390    TCG_REG_R4,
391    TCG_REG_R5,
392    TCG_REG_R6,
393};
394
395static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
396{
397    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
398    tcg_debug_assert(slot == 0);
399    return TCG_REG_R2;
400}
401
402#define S390_CC_EQ      8
403#define S390_CC_LT      4
404#define S390_CC_GT      2
405#define S390_CC_OV      1
406#define S390_CC_NE      (S390_CC_LT | S390_CC_GT)
407#define S390_CC_LE      (S390_CC_LT | S390_CC_EQ)
408#define S390_CC_GE      (S390_CC_GT | S390_CC_EQ)
409#define S390_CC_NEVER   0
410#define S390_CC_ALWAYS  15
411
412#define S390_TM_EQ      8  /* CC == 0 */
413#define S390_TM_NE      7  /* CC in {1,2,3} */
414
415/* Condition codes that result from a COMPARE and COMPARE LOGICAL.  */
416static const uint8_t tcg_cond_to_s390_cond[16] = {
417    [TCG_COND_EQ]  = S390_CC_EQ,
418    [TCG_COND_NE]  = S390_CC_NE,
419    [TCG_COND_TSTEQ] = S390_CC_EQ,
420    [TCG_COND_TSTNE] = S390_CC_NE,
421    [TCG_COND_LT]  = S390_CC_LT,
422    [TCG_COND_LE]  = S390_CC_LE,
423    [TCG_COND_GT]  = S390_CC_GT,
424    [TCG_COND_GE]  = S390_CC_GE,
425    [TCG_COND_LTU] = S390_CC_LT,
426    [TCG_COND_LEU] = S390_CC_LE,
427    [TCG_COND_GTU] = S390_CC_GT,
428    [TCG_COND_GEU] = S390_CC_GE,
429};
430
431/* Condition codes that result from a LOAD AND TEST.  Here, we have no
432   unsigned instruction variation, however since the test is vs zero we
433   can re-map the outcomes appropriately.  */
434static const uint8_t tcg_cond_to_ltr_cond[16] = {
435    [TCG_COND_EQ]  = S390_CC_EQ,
436    [TCG_COND_NE]  = S390_CC_NE,
437    [TCG_COND_TSTEQ] = S390_CC_ALWAYS,
438    [TCG_COND_TSTNE] = S390_CC_NEVER,
439    [TCG_COND_LT]  = S390_CC_LT,
440    [TCG_COND_LE]  = S390_CC_LE,
441    [TCG_COND_GT]  = S390_CC_GT,
442    [TCG_COND_GE]  = S390_CC_GE,
443    [TCG_COND_LTU] = S390_CC_NEVER,
444    [TCG_COND_LEU] = S390_CC_EQ,
445    [TCG_COND_GTU] = S390_CC_NE,
446    [TCG_COND_GEU] = S390_CC_ALWAYS,
447};
448
449static const tcg_insn_unit *tb_ret_addr;
450uint64_t s390_facilities[3];
451
452static inline bool is_general_reg(TCGReg r)
453{
454    return r <= TCG_REG_R15;
455}
456
457static inline bool is_vector_reg(TCGReg r)
458{
459    return r >= TCG_REG_V0 && r <= TCG_REG_V31;
460}
461
462static bool patch_reloc(tcg_insn_unit *src_rw, int type,
463                        intptr_t value, intptr_t addend)
464{
465    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
466    intptr_t pcrel2;
467    uint32_t old;
468
469    value += addend;
470    pcrel2 = (tcg_insn_unit *)value - src_rx;
471
472    switch (type) {
473    case R_390_PC16DBL:
474        if (pcrel2 == (int16_t)pcrel2) {
475            tcg_patch16(src_rw, pcrel2);
476            return true;
477        }
478        break;
479    case R_390_PC32DBL:
480        if (pcrel2 == (int32_t)pcrel2) {
481            tcg_patch32(src_rw, pcrel2);
482            return true;
483        }
484        break;
485    case R_390_20:
486        if (value == sextract64(value, 0, 20)) {
487            old = *(uint32_t *)src_rw & 0xf00000ff;
488            old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
489            tcg_patch32(src_rw, old);
490            return true;
491        }
492        break;
493    default:
494        g_assert_not_reached();
495    }
496    return false;
497}
498
499static int is_const_p16(uint64_t val)
500{
501    for (int i = 0; i < 4; ++i) {
502        uint64_t mask = 0xffffull << (i * 16);
503        if ((val & ~mask) == 0) {
504            return i;
505        }
506    }
507    return -1;
508}
509
510static int is_const_p32(uint64_t val)
511{
512    if ((val & 0xffffffff00000000ull) == 0) {
513        return 0;
514    }
515    if ((val & 0x00000000ffffffffull) == 0) {
516        return 1;
517    }
518    return -1;
519}
520
521/*
522 * Accept bit patterns like these:
523 *  0....01....1
524 *  1....10....0
525 *  1..10..01..1
526 *  0..01..10..0
527 * Copied from gcc sources.
528 */
529static bool risbg_mask(uint64_t c)
530{
531    uint64_t lsb;
532    /* We don't change the number of transitions by inverting,
533       so make sure we start with the LSB zero.  */
534    if (c & 1) {
535        c = ~c;
536    }
537    /* Reject all zeros or all ones.  */
538    if (c == 0) {
539        return false;
540    }
541    /* Find the first transition.  */
542    lsb = c & -c;
543    /* Invert to look for a second transition.  */
544    c = ~c;
545    /* Erase the first transition.  */
546    c &= -lsb;
547    /* Find the second transition, if any.  */
548    lsb = c & -c;
549    /* Match if all the bits are 1's, or if c is zero.  */
550    return c == -lsb;
551}
552
553/* Test if a constant matches the constraint. */
554static bool tcg_target_const_match(int64_t val, int ct,
555                                   TCGType type, TCGCond cond, int vece)
556{
557    uint64_t uval = val;
558
559    if (ct & TCG_CT_CONST) {
560        return true;
561    }
562    if (type == TCG_TYPE_I32) {
563        uval = (uint32_t)val;
564        val = (int32_t)val;
565    }
566
567    if (ct & TCG_CT_CONST_CMP) {
568        if (is_tst_cond(cond)) {
569            if (is_const_p16(uval) >= 0) {
570                return true;  /* TMxx */
571            }
572            if (risbg_mask(uval)) {
573                return true;  /* RISBG */
574            }
575            return false;
576        }
577
578        if (type == TCG_TYPE_I32) {
579            return true;
580        }
581
582        switch (cond) {
583        case TCG_COND_EQ:
584        case TCG_COND_NE:
585            ct |= TCG_CT_CONST_S32 | TCG_CT_CONST_U32;  /* CGFI or CLGFI */
586            break;
587        case TCG_COND_LT:
588        case TCG_COND_GE:
589        case TCG_COND_LE:
590        case TCG_COND_GT:
591            ct |= TCG_CT_CONST_S32;  /* CGFI */
592            break;
593        case TCG_COND_LTU:
594        case TCG_COND_GEU:
595        case TCG_COND_LEU:
596        case TCG_COND_GTU:
597            ct |= TCG_CT_CONST_U32;  /* CLGFI */
598            break;
599        case TCG_COND_TSTNE:
600        case TCG_COND_TSTEQ:
601            /* checked above, fallthru */
602        default:
603            g_assert_not_reached();
604        }
605    }
606
607    if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
608        return true;
609    }
610    if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
611        return true;
612    }
613    if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
614        return true;
615    }
616    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
617        return true;
618    }
619    if ((ct & TCG_CT_CONST_M1) && val == -1) {
620        return true;
621    }
622
623    if (ct & TCG_CT_CONST_INV) {
624        val = ~val;
625    }
626    if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
627        return true;
628    }
629    if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
630        return true;
631    }
632    return false;
633}
634
635/* Emit instructions according to the given instruction format.  */
636
637static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
638{
639    tcg_out16(s, (op << 8) | (r1 << 4) | r2);
640}
641
642static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
643                             TCGReg r1, TCGReg r2)
644{
645    tcg_out32(s, (op << 16) | (r1 << 4) | r2);
646}
647
648/* RRF-a without the m4 field */
649static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
650                              TCGReg r1, TCGReg r2, TCGReg r3)
651{
652    tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
653}
654
655/* RRF-a with the m4 field */
656static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
657                               TCGReg r1, TCGReg r2, TCGReg r3, int m4)
658{
659    tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
660}
661
662static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
663                              TCGReg r1, TCGReg r2, int m3)
664{
665    tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
666}
667
668static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
669{
670    tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
671}
672
673static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
674                             int i2, int m3)
675{
676    tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
677    tcg_out32(s, (i2 << 16) | (op & 0xff));
678}
679
680static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
681{
682    tcg_out16(s, op | (r1 << 4));
683    tcg_out32(s, i2);
684}
685
686static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
687                            TCGReg b2, TCGReg r3, int disp)
688{
689    tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
690              | (disp & 0xfff));
691}
692
693static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
694                             TCGReg b2, TCGReg r3, int disp)
695{
696    tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
697    tcg_out32(s, (op & 0xff) | (b2 << 28)
698              | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
699}
700
701#define tcg_out_insn_RX   tcg_out_insn_RS
702#define tcg_out_insn_RXY  tcg_out_insn_RSY
703
704static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
705{
706    /*
707     * Shift bit 4 of each regno to its corresponding bit of RXB.
708     * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
709     * is the left-shift of the 4th operand.
710     */
711    return ((v1 & 0x10) << (4 + 3))
712         | ((v2 & 0x10) << (4 + 2))
713         | ((v3 & 0x10) << (4 + 1))
714         | ((v4 & 0x10) << (4 + 0));
715}
716
717static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
718                              TCGReg v1, uint16_t i2, int m3)
719{
720    tcg_debug_assert(is_vector_reg(v1));
721    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
722    tcg_out16(s, i2);
723    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
724}
725
726static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
727                              TCGReg v1, uint8_t i2, uint8_t i3, int m4)
728{
729    tcg_debug_assert(is_vector_reg(v1));
730    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
731    tcg_out16(s, (i2 << 8) | (i3 & 0xff));
732    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
733}
734
735static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
736                              TCGReg v1, uint16_t i2, TCGReg v3, int m4)
737{
738    tcg_debug_assert(is_vector_reg(v1));
739    tcg_debug_assert(is_vector_reg(v3));
740    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
741    tcg_out16(s, i2);
742    tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
743}
744
745static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
746                              TCGReg v1, TCGReg v2, int m3)
747{
748    tcg_debug_assert(is_vector_reg(v1));
749    tcg_debug_assert(is_vector_reg(v2));
750    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
751    tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
752}
753
754static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
755                              TCGReg v1, TCGReg v2, TCGReg v3, int m4)
756{
757    tcg_debug_assert(is_vector_reg(v1));
758    tcg_debug_assert(is_vector_reg(v2));
759    tcg_debug_assert(is_vector_reg(v3));
760    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
761    tcg_out16(s, v3 << 12);
762    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
763}
764
765static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
766                              TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
767{
768    tcg_debug_assert(is_vector_reg(v1));
769    tcg_debug_assert(is_vector_reg(v2));
770    tcg_debug_assert(is_vector_reg(v3));
771    tcg_debug_assert(is_vector_reg(v4));
772    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
773    tcg_out16(s, v3 << 12);
774    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
775}
776
777static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
778                              TCGReg v1, TCGReg r2, TCGReg r3)
779{
780    tcg_debug_assert(is_vector_reg(v1));
781    tcg_debug_assert(is_general_reg(r2));
782    tcg_debug_assert(is_general_reg(r3));
783    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
784    tcg_out16(s, r3 << 12);
785    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
786}
787
788static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
789                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
790{
791    tcg_debug_assert(is_vector_reg(v1));
792    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
793    tcg_debug_assert(is_general_reg(b2));
794    tcg_debug_assert(is_vector_reg(v3));
795    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
796    tcg_out16(s, b2 << 12 | d2);
797    tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
798}
799
800static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
801                              intptr_t d2, TCGReg b2, TCGReg r3, int m4)
802{
803    tcg_debug_assert(is_vector_reg(v1));
804    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
805    tcg_debug_assert(is_general_reg(b2));
806    tcg_debug_assert(is_general_reg(r3));
807    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
808    tcg_out16(s, b2 << 12 | d2);
809    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
810}
811
812static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
813                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
814{
815    tcg_debug_assert(is_general_reg(r1));
816    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
817    tcg_debug_assert(is_general_reg(b2));
818    tcg_debug_assert(is_vector_reg(v3));
819    tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
820    tcg_out16(s, b2 << 12 | d2);
821    tcg_out16(s, (op & 0x00ff) | RXB(0, v3, 0, 0) | (m4 << 12));
822}
823
824static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
825                             TCGReg b2, TCGReg x2, intptr_t d2, int m3)
826{
827    tcg_debug_assert(is_vector_reg(v1));
828    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
829    tcg_debug_assert(is_general_reg(x2));
830    tcg_debug_assert(is_general_reg(b2));
831    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
832    tcg_out16(s, (b2 << 12) | d2);
833    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
834}
835
836/* Emit an opcode with "type-checking" of the format.  */
837#define tcg_out_insn(S, FMT, OP, ...) \
838    glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
839
840
841/* emit 64-bit shifts */
842static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
843                         TCGReg src, TCGReg sh_reg, int sh_imm)
844{
845    tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
846}
847
848/* emit 32-bit shifts */
849static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
850                         TCGReg sh_reg, int sh_imm)
851{
852    tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
853}
854
855static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
856{
857    if (src == dst) {
858        return true;
859    }
860    switch (type) {
861    case TCG_TYPE_I32:
862        if (likely(is_general_reg(dst) && is_general_reg(src))) {
863            tcg_out_insn(s, RR, LR, dst, src);
864            break;
865        }
866        /* fallthru */
867
868    case TCG_TYPE_I64:
869        if (likely(is_general_reg(dst))) {
870            if (likely(is_general_reg(src))) {
871                tcg_out_insn(s, RRE, LGR, dst, src);
872            } else {
873                tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
874            }
875            break;
876        } else if (is_general_reg(src)) {
877            tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
878            break;
879        }
880        /* fallthru */
881
882    case TCG_TYPE_V64:
883    case TCG_TYPE_V128:
884        tcg_out_insn(s, VRRa, VLR, dst, src, 0);
885        break;
886
887    default:
888        g_assert_not_reached();
889    }
890    return true;
891}
892
893static const S390Opcode li_insns[4] = {
894    RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
895};
896static const S390Opcode oi_insns[4] = {
897    RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
898};
899static const S390Opcode lif_insns[2] = {
900    RIL_LLILF, RIL_LLIHF,
901};
902static const S390Opcode tm_insns[4] = {
903    RI_TMLL, RI_TMLH, RI_TMHL, RI_TMHH
904};
905
906/* load a register with an immediate value */
907static void tcg_out_movi(TCGContext *s, TCGType type,
908                         TCGReg ret, tcg_target_long sval)
909{
910    tcg_target_ulong uval = sval;
911    ptrdiff_t pc_off;
912    int i;
913
914    if (type == TCG_TYPE_I32) {
915        uval = (uint32_t)sval;
916        sval = (int32_t)sval;
917    }
918
919    /* Try all 32-bit insns that can load it in one go.  */
920    if (sval >= -0x8000 && sval < 0x8000) {
921        tcg_out_insn(s, RI, LGHI, ret, sval);
922        return;
923    }
924
925    i = is_const_p16(uval);
926    if (i >= 0) {
927        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
928        return;
929    }
930
931    /* Try all 48-bit insns that can load it in one go.  */
932    if (sval == (int32_t)sval) {
933        tcg_out_insn(s, RIL, LGFI, ret, sval);
934        return;
935    }
936
937    i = is_const_p32(uval);
938    if (i >= 0) {
939        tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
940        return;
941    }
942
943    /* Try for PC-relative address load.  For odd addresses, add one. */
944    pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
945    if (pc_off == (int32_t)pc_off) {
946        tcg_out_insn(s, RIL, LARL, ret, pc_off);
947        if (sval & 1) {
948            tcg_out_insn(s, RI, AGHI, ret, 1);
949        }
950        return;
951    }
952
953    /* Otherwise, load it by parts. */
954    i = is_const_p16((uint32_t)uval);
955    if (i >= 0) {
956        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
957    } else {
958        tcg_out_insn(s, RIL, LLILF, ret, uval);
959    }
960    uval >>= 32;
961    i = is_const_p16(uval);
962    if (i >= 0) {
963        tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
964    } else {
965        tcg_out_insn(s, RIL, OIHF, ret, uval);
966    }
967}
968
969/* Emit a load/store type instruction.  Inputs are:
970   DATA:     The register to be loaded or stored.
971   BASE+OFS: The effective address.
972   OPC_RX:   If the operation has an RX format opcode (e.g. STC), otherwise 0.
973   OPC_RXY:  The RXY format opcode for the operation (e.g. STCY).  */
974
975static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
976                        TCGReg data, TCGReg base, TCGReg index,
977                        tcg_target_long ofs)
978{
979    if (ofs < -0x80000 || ofs >= 0x80000) {
980        /* Combine the low 20 bits of the offset with the actual load insn;
981           the high 44 bits must come from an immediate load.  */
982        tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
983        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
984        ofs = low;
985
986        /* If we were already given an index register, add it in.  */
987        if (index != TCG_REG_NONE) {
988            tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
989        }
990        index = TCG_TMP0;
991    }
992
993    if (opc_rx && ofs >= 0 && ofs < 0x1000) {
994        tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
995    } else {
996        tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
997    }
998}
999
1000static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
1001                            TCGReg data, TCGReg base, TCGReg index,
1002                            tcg_target_long ofs, int m3)
1003{
1004    if (ofs < 0 || ofs >= 0x1000) {
1005        if (ofs >= -0x80000 && ofs < 0x80000) {
1006            tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
1007            base = TCG_TMP0;
1008            index = TCG_REG_NONE;
1009            ofs = 0;
1010        } else {
1011            tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
1012            if (index != TCG_REG_NONE) {
1013                tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
1014            }
1015            index = TCG_TMP0;
1016            ofs = 0;
1017        }
1018    }
1019    tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
1020}
1021
1022/* load data without address translation or endianness conversion */
1023static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
1024                       TCGReg base, intptr_t ofs)
1025{
1026    switch (type) {
1027    case TCG_TYPE_I32:
1028        if (likely(is_general_reg(data))) {
1029            tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
1030            break;
1031        }
1032        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
1033        break;
1034
1035    case TCG_TYPE_I64:
1036        if (likely(is_general_reg(data))) {
1037            tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
1038            break;
1039        }
1040        /* fallthru */
1041
1042    case TCG_TYPE_V64:
1043        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
1044        break;
1045
1046    case TCG_TYPE_V128:
1047        /* Hint quadword aligned.  */
1048        tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
1049        break;
1050
1051    default:
1052        g_assert_not_reached();
1053    }
1054}
1055
1056static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
1057                       TCGReg base, intptr_t ofs)
1058{
1059    switch (type) {
1060    case TCG_TYPE_I32:
1061        if (likely(is_general_reg(data))) {
1062            tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1063        } else {
1064            tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1065        }
1066        break;
1067
1068    case TCG_TYPE_I64:
1069        if (likely(is_general_reg(data))) {
1070            tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1071            break;
1072        }
1073        /* fallthru */
1074
1075    case TCG_TYPE_V64:
1076        tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1077        break;
1078
1079    case TCG_TYPE_V128:
1080        /* Hint quadword aligned.  */
1081        tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1082        break;
1083
1084    default:
1085        g_assert_not_reached();
1086    }
1087}
1088
1089static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1090                               TCGReg base, intptr_t ofs)
1091{
1092    return false;
1093}
1094
1095static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1096{
1097    return false;
1098}
1099
1100static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1101                             tcg_target_long imm)
1102{
1103    /* This function is only used for passing structs by reference. */
1104    tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1105}
1106
1107static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1108                                 int msb, int lsb, int ofs, int z)
1109{
1110    /* Format RIE-f */
1111    tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1112    tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1113    tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1114}
1115
1116static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1117{
1118    tcg_out_insn(s, RRE, LGBR, dest, src);
1119}
1120
1121static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1122{
1123    tcg_out_insn(s, RRE, LLGCR, dest, src);
1124}
1125
1126static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1127{
1128    tcg_out_insn(s, RRE, LGHR, dest, src);
1129}
1130
1131static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1132{
1133    tcg_out_insn(s, RRE, LLGHR, dest, src);
1134}
1135
1136static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1137{
1138    tcg_out_insn(s, RRE, LGFR, dest, src);
1139}
1140
1141static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1142{
1143    tcg_out_insn(s, RRE, LLGFR, dest, src);
1144}
1145
1146static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1147{
1148    tcg_out_ext32s(s, dest, src);
1149}
1150
1151static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1152{
1153    tcg_out_ext32u(s, dest, src);
1154}
1155
1156static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1157{
1158    tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1159}
1160
1161static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1162{
1163    int msb, lsb;
1164    if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1165        /* Achieve wraparound by swapping msb and lsb.  */
1166        msb = 64 - ctz64(~val);
1167        lsb = clz64(~val) - 1;
1168    } else {
1169        msb = clz64(val);
1170        lsb = 63 - ctz64(val);
1171    }
1172    tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1173}
1174
1175static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1176{
1177    static const S390Opcode ni_insns[4] = {
1178        RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1179    };
1180    static const S390Opcode nif_insns[2] = {
1181        RIL_NILF, RIL_NIHF
1182    };
1183    uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1184    int i;
1185
1186    /* Look for the zero-extensions.  */
1187    if ((val & valid) == 0xffffffff) {
1188        tcg_out_ext32u(s, dest, dest);
1189        return;
1190    }
1191    if ((val & valid) == 0xff) {
1192        tcg_out_ext8u(s, dest, dest);
1193        return;
1194    }
1195    if ((val & valid) == 0xffff) {
1196        tcg_out_ext16u(s, dest, dest);
1197        return;
1198    }
1199
1200    i = is_const_p16(~val & valid);
1201    if (i >= 0) {
1202        tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1203        return;
1204    }
1205
1206    i = is_const_p32(~val & valid);
1207    tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1208    if (i >= 0) {
1209        tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1210        return;
1211    }
1212
1213    if (risbg_mask(val)) {
1214        tgen_andi_risbg(s, dest, dest, val);
1215        return;
1216    }
1217
1218    g_assert_not_reached();
1219}
1220
1221static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1222{
1223    static const S390Opcode oif_insns[2] = {
1224        RIL_OILF, RIL_OIHF
1225    };
1226
1227    int i;
1228
1229    i = is_const_p16(val);
1230    if (i >= 0) {
1231        tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1232        return;
1233    }
1234
1235    i = is_const_p32(val);
1236    if (i >= 0) {
1237        tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1238        return;
1239    }
1240
1241    g_assert_not_reached();
1242}
1243
1244static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1245{
1246    switch (is_const_p32(val)) {
1247    case 0:
1248        tcg_out_insn(s, RIL, XILF, dest, val);
1249        break;
1250    case 1:
1251        tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1252        break;
1253    default:
1254        g_assert_not_reached();
1255    }
1256}
1257
1258static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1259                     TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1260{
1261    bool is_unsigned = is_unsigned_cond(c);
1262    TCGCond inv_c = tcg_invert_cond(c);
1263    S390Opcode op;
1264
1265    if (is_tst_cond(c)) {
1266        tcg_debug_assert(!need_carry);
1267
1268        if (!c2const) {
1269            if (type == TCG_TYPE_I32) {
1270                tcg_out_insn(s, RRFa, NRK, TCG_REG_R0, r1, c2);
1271            } else {
1272                tcg_out_insn(s, RRFa, NGRK, TCG_REG_R0, r1, c2);
1273            }
1274            goto exit;
1275        }
1276
1277        if (type == TCG_TYPE_I32) {
1278            c2 = (uint32_t)c2;
1279        }
1280
1281        int i = is_const_p16(c2);
1282        if (i >= 0) {
1283            tcg_out_insn_RI(s, tm_insns[i], r1, c2 >> (i * 16));
1284            *inv_cc = c == TCG_COND_TSTEQ ? S390_TM_NE : S390_TM_EQ;
1285            return *inv_cc ^ 15;
1286        }
1287
1288        if (risbg_mask(c2)) {
1289            tgen_andi_risbg(s, TCG_REG_R0, r1, c2);
1290            goto exit;
1291        }
1292        g_assert_not_reached();
1293    }
1294
1295    if (c2const) {
1296        if (c2 == 0) {
1297            if (!(is_unsigned && need_carry)) {
1298                if (type == TCG_TYPE_I32) {
1299                    tcg_out_insn(s, RR, LTR, r1, r1);
1300                } else {
1301                    tcg_out_insn(s, RRE, LTGR, r1, r1);
1302                }
1303                *inv_cc = tcg_cond_to_ltr_cond[inv_c];
1304                return tcg_cond_to_ltr_cond[c];
1305            }
1306        }
1307
1308        if (!is_unsigned && c2 == (int16_t)c2) {
1309            op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1310            tcg_out_insn_RI(s, op, r1, c2);
1311            goto exit;
1312        }
1313
1314        if (type == TCG_TYPE_I32) {
1315            op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1316            tcg_out_insn_RIL(s, op, r1, c2);
1317            goto exit;
1318        }
1319
1320        /* Should match TCG_CT_CONST_CMP. */
1321        switch (c) {
1322        case TCG_COND_LT:
1323        case TCG_COND_GE:
1324        case TCG_COND_LE:
1325        case TCG_COND_GT:
1326            tcg_debug_assert(c2 == (int32_t)c2);
1327            op = RIL_CGFI;
1328            break;
1329        case TCG_COND_EQ:
1330        case TCG_COND_NE:
1331            if (c2 == (int32_t)c2) {
1332                op = RIL_CGFI;
1333                break;
1334            }
1335            /* fall through */
1336        case TCG_COND_LTU:
1337        case TCG_COND_GEU:
1338        case TCG_COND_LEU:
1339        case TCG_COND_GTU:
1340            tcg_debug_assert(c2 == (uint32_t)c2);
1341            op = RIL_CLGFI;
1342            break;
1343        default:
1344            g_assert_not_reached();
1345        }
1346        tcg_out_insn_RIL(s, op, r1, c2);
1347    } else if (type == TCG_TYPE_I32) {
1348        op = (is_unsigned ? RR_CLR : RR_CR);
1349        tcg_out_insn_RR(s, op, r1, c2);
1350    } else {
1351        op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1352        tcg_out_insn_RRE(s, op, r1, c2);
1353    }
1354
1355 exit:
1356    *inv_cc = tcg_cond_to_s390_cond[inv_c];
1357    return tcg_cond_to_s390_cond[c];
1358}
1359
1360static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1361                    TCGArg c2, bool c2const, bool need_carry)
1362{
1363    int inv_cc;
1364    return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1365}
1366
1367static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1368                         TCGReg dest, TCGReg c1, TCGArg c2,
1369                         bool c2const, bool neg)
1370{
1371    int cc;
1372
1373    /* With LOC2, we can always emit the minimum 3 insns.  */
1374    if (HAVE_FACILITY(LOAD_ON_COND2)) {
1375        /* Emit: d = 0, d = (cc ? 1 : d).  */
1376        cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1377        tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1378        tcg_out_insn(s, RIEg, LOCGHI, dest, neg ? -1 : 1, cc);
1379        return;
1380    }
1381
1382    switch (cond) {
1383    case TCG_COND_GEU:
1384    case TCG_COND_LTU:
1385    case TCG_COND_LT:
1386    case TCG_COND_GE:
1387        /* Swap operands so that we can use LEU/GTU/GT/LE.  */
1388        if (!c2const) {
1389            TCGReg t = c1;
1390            c1 = c2;
1391            c2 = t;
1392            cond = tcg_swap_cond(cond);
1393        }
1394        break;
1395    default:
1396        break;
1397    }
1398
1399    switch (cond) {
1400    case TCG_COND_NE:
1401        /* X != 0 is X > 0.  */
1402        if (c2const && c2 == 0) {
1403            cond = TCG_COND_GTU;
1404        } else {
1405            break;
1406        }
1407        /* fallthru */
1408
1409    case TCG_COND_GTU:
1410    case TCG_COND_GT:
1411        /*
1412         * The result of a compare has CC=2 for GT and CC=3 unused.
1413         * ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.
1414         */
1415        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1416        tcg_out_movi(s, type, dest, 0);
1417        tcg_out_insn(s, RRE, ALCGR, dest, dest);
1418        if (neg) {
1419            if (type == TCG_TYPE_I32) {
1420                tcg_out_insn(s, RR, LCR, dest, dest);
1421            } else {
1422                tcg_out_insn(s, RRE, LCGR, dest, dest);
1423            }
1424        }
1425        return;
1426
1427    case TCG_COND_EQ:
1428        /* X == 0 is X <= 0.  */
1429        if (c2const && c2 == 0) {
1430            cond = TCG_COND_LEU;
1431        } else {
1432            break;
1433        }
1434        /* fallthru */
1435
1436    case TCG_COND_LEU:
1437    case TCG_COND_LE:
1438        /*
1439         * As above, but we're looking for borrow, or !carry.
1440         * The second insn computes d - d - borrow, or -1 for true
1441         * and 0 for false.  So we must mask to 1 bit afterward.
1442         */
1443        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1444        tcg_out_insn(s, RRE, SLBGR, dest, dest);
1445        if (!neg) {
1446            tgen_andi(s, type, dest, 1);
1447        }
1448        return;
1449
1450    default:
1451        g_assert_not_reached();
1452    }
1453
1454    cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1455    /* Emit: d = 0, t = 1, d = (cc ? t : d).  */
1456    tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1457    tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, neg ? -1 : 1);
1458    tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1459}
1460
1461static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1462                             TCGArg v3, int v3const, TCGReg v4,
1463                             int cc, int inv_cc)
1464{
1465    TCGReg src;
1466
1467    if (v3const) {
1468        if (dest == v4) {
1469            if (HAVE_FACILITY(LOAD_ON_COND2)) {
1470                /* Emit: if (cc) dest = v3. */
1471                tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1472                return;
1473            }
1474            tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1475            src = TCG_TMP0;
1476        } else {
1477            /* LGR+LOCGHI is larger than LGHI+LOCGR. */
1478            tcg_out_insn(s, RI, LGHI, dest, v3);
1479            cc = inv_cc;
1480            src = v4;
1481        }
1482    } else {
1483        if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1484            /* Emit: dest = cc ? v3 : v4. */
1485            tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1486            return;
1487        }
1488        if (dest == v4) {
1489            src = v3;
1490        } else {
1491            tcg_out_mov(s, type, dest, v3);
1492            cc = inv_cc;
1493            src = v4;
1494        }
1495    }
1496
1497    /* Emit: if (cc) dest = src. */
1498    tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1499}
1500
1501static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1502                         TCGReg c1, TCGArg c2, int c2const,
1503                         TCGArg v3, int v3const, TCGReg v4)
1504{
1505    int cc, inv_cc;
1506
1507    cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1508    tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1509}
1510
1511static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1512                     TCGArg a2, int a2const)
1513{
1514    /* Since this sets both R and R+1, we have no choice but to store the
1515       result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.  */
1516    QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1517    tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1518
1519    if (a2const && a2 == 64) {
1520        tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1521        return;
1522    }
1523
1524    /*
1525     * Conditions from FLOGR are:
1526     *   2 -> one bit found
1527     *   8 -> no one bit found
1528     */
1529    tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1530}
1531
1532static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1533{
1534    /* With MIE3, and bit 0 of m4 set, we get the complete result. */
1535    if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1536        if (type == TCG_TYPE_I32) {
1537            tcg_out_ext32u(s, dest, src);
1538            src = dest;
1539        }
1540        tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1541        return;
1542    }
1543
1544    /* Without MIE3, each byte gets the count of bits for the byte. */
1545    tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1546
1547    /* Multiply to sum each byte at the top of the word. */
1548    if (type == TCG_TYPE_I32) {
1549        tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1550        tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1551    } else {
1552        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1553        tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1554        tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1555    }
1556}
1557
1558static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1559                         int ofs, int len, int z)
1560{
1561    int lsb = (63 - ofs);
1562    int msb = lsb - (len - 1);
1563    tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1564}
1565
1566static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1567                         int ofs, int len)
1568{
1569    tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1570}
1571
1572static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1573{
1574    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1575    if (off == (int16_t)off) {
1576        tcg_out_insn(s, RI, BRC, cc, off);
1577    } else if (off == (int32_t)off) {
1578        tcg_out_insn(s, RIL, BRCL, cc, off);
1579    } else {
1580        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1581        tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1582    }
1583}
1584
1585static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1586{
1587    if (l->has_value) {
1588        tgen_gotoi(s, cc, l->u.value_ptr);
1589    } else {
1590        tcg_out16(s, RI_BRC | (cc << 4));
1591        tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1592        s->code_ptr += 1;
1593    }
1594}
1595
1596static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1597                                TCGReg r1, TCGReg r2, TCGLabel *l)
1598{
1599    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1600    /* Format RIE-b */
1601    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1602    tcg_out16(s, 0);
1603    tcg_out16(s, cc << 12 | (opc & 0xff));
1604}
1605
1606static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1607                                    TCGReg r1, int i2, TCGLabel *l)
1608{
1609    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1610    /* Format RIE-c */
1611    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1612    tcg_out16(s, 0);
1613    tcg_out16(s, (i2 << 8) | (opc & 0xff));
1614}
1615
1616static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1617                        TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1618{
1619    int cc;
1620
1621    if (!is_tst_cond(c)) {
1622        bool is_unsigned = is_unsigned_cond(c);
1623        bool in_range;
1624        S390Opcode opc;
1625
1626        cc = tcg_cond_to_s390_cond[c];
1627
1628        if (!c2const) {
1629            opc = (type == TCG_TYPE_I32
1630                   ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1631                   : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1632            tgen_compare_branch(s, opc, cc, r1, c2, l);
1633            return;
1634        }
1635
1636        /*
1637         * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1638         * If the immediate we've been given does not fit that range, we'll
1639         * fall back to separate compare and branch instructions using the
1640         * larger comparison range afforded by COMPARE IMMEDIATE.
1641         */
1642        if (type == TCG_TYPE_I32) {
1643            if (is_unsigned) {
1644                opc = RIEc_CLIJ;
1645                in_range = (uint32_t)c2 == (uint8_t)c2;
1646            } else {
1647                opc = RIEc_CIJ;
1648                in_range = (int32_t)c2 == (int8_t)c2;
1649            }
1650        } else {
1651            if (is_unsigned) {
1652                opc = RIEc_CLGIJ;
1653                in_range = (uint64_t)c2 == (uint8_t)c2;
1654            } else {
1655                opc = RIEc_CGIJ;
1656                in_range = (int64_t)c2 == (int8_t)c2;
1657            }
1658        }
1659        if (in_range) {
1660            tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1661            return;
1662        }
1663    }
1664
1665    cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1666    tgen_branch(s, cc, l);
1667}
1668
1669static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1670{
1671    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1672    if (off == (int32_t)off) {
1673        tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1674    } else {
1675        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1676        tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1677    }
1678}
1679
1680static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1681                         const TCGHelperInfo *info)
1682{
1683    tcg_out_call_int(s, dest);
1684}
1685
1686typedef struct {
1687    TCGReg base;
1688    TCGReg index;
1689    int disp;
1690    TCGAtomAlign aa;
1691} HostAddress;
1692
1693bool tcg_target_has_memory_bswap(MemOp memop)
1694{
1695    TCGAtomAlign aa;
1696
1697    if ((memop & MO_SIZE) <= MO_64) {
1698        return true;
1699    }
1700
1701    /*
1702     * Reject 16-byte memop with 16-byte atomicity,
1703     * but do allow a pair of 64-bit operations.
1704     */
1705    aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
1706    return aa.atom <= MO_64;
1707}
1708
1709static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1710                                   HostAddress h)
1711{
1712    switch (opc & (MO_SSIZE | MO_BSWAP)) {
1713    case MO_UB:
1714        tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1715        break;
1716    case MO_SB:
1717        tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1718        break;
1719
1720    case MO_UW | MO_BSWAP:
1721        /* swapped unsigned halfword load with upper bits zeroed */
1722        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1723        tcg_out_ext16u(s, data, data);
1724        break;
1725    case MO_UW:
1726        tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1727        break;
1728
1729    case MO_SW | MO_BSWAP:
1730        /* swapped sign-extended halfword load */
1731        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1732        tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1733        break;
1734    case MO_SW:
1735        tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1736        break;
1737
1738    case MO_UL | MO_BSWAP:
1739        /* swapped unsigned int load with upper bits zeroed */
1740        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1741        tcg_out_ext32u(s, data, data);
1742        break;
1743    case MO_UL:
1744        tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1745        break;
1746
1747    case MO_SL | MO_BSWAP:
1748        /* swapped sign-extended int load */
1749        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1750        tcg_out_ext32s(s, data, data);
1751        break;
1752    case MO_SL:
1753        tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1754        break;
1755
1756    case MO_UQ | MO_BSWAP:
1757        tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1758        break;
1759    case MO_UQ:
1760        tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1761        break;
1762
1763    default:
1764        g_assert_not_reached();
1765    }
1766}
1767
1768static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1769                                   HostAddress h)
1770{
1771    switch (opc & (MO_SIZE | MO_BSWAP)) {
1772    case MO_UB:
1773        if (h.disp >= 0 && h.disp < 0x1000) {
1774            tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1775        } else {
1776            tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1777        }
1778        break;
1779
1780    case MO_UW | MO_BSWAP:
1781        tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1782        break;
1783    case MO_UW:
1784        if (h.disp >= 0 && h.disp < 0x1000) {
1785            tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1786        } else {
1787            tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1788        }
1789        break;
1790
1791    case MO_UL | MO_BSWAP:
1792        tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1793        break;
1794    case MO_UL:
1795        if (h.disp >= 0 && h.disp < 0x1000) {
1796            tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1797        } else {
1798            tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1799        }
1800        break;
1801
1802    case MO_UQ | MO_BSWAP:
1803        tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1804        break;
1805    case MO_UQ:
1806        tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1807        break;
1808
1809    default:
1810        g_assert_not_reached();
1811    }
1812}
1813
1814static const TCGLdstHelperParam ldst_helper_param = {
1815    .ntmp = 1, .tmp = { TCG_TMP0 }
1816};
1817
1818static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1819{
1820    MemOp opc = get_memop(lb->oi);
1821
1822    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1823                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1824        return false;
1825    }
1826
1827    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1828    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1829    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1830
1831    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1832    return true;
1833}
1834
1835static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1836{
1837    MemOp opc = get_memop(lb->oi);
1838
1839    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1840                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1841        return false;
1842    }
1843
1844    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1845    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1846
1847    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1848    return true;
1849}
1850
1851/* We're expecting to use a 20-bit negative offset on the tlb memory ops.  */
1852#define MIN_TLB_MASK_TABLE_OFS  -(1 << 19)
1853
1854/*
1855 * For system-mode, perform the TLB load and compare.
1856 * For user-mode, perform any required alignment tests.
1857 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1858 * is required and fill in @h with the host address for the fast path.
1859 */
1860static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1861                                           TCGReg addr_reg, MemOpIdx oi,
1862                                           bool is_ld)
1863{
1864    TCGType addr_type = s->addr_type;
1865    TCGLabelQemuLdst *ldst = NULL;
1866    MemOp opc = get_memop(oi);
1867    MemOp s_bits = opc & MO_SIZE;
1868    unsigned a_mask;
1869
1870    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
1871    a_mask = (1 << h->aa.align) - 1;
1872
1873    if (tcg_use_softmmu) {
1874        unsigned s_mask = (1 << s_bits) - 1;
1875        int mem_index = get_mmuidx(oi);
1876        int fast_off = tlb_mask_table_ofs(s, mem_index);
1877        int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1878        int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1879        int ofs, a_off;
1880        uint64_t tlb_mask;
1881
1882        ldst = new_ldst_label(s);
1883        ldst->is_ld = is_ld;
1884        ldst->oi = oi;
1885        ldst->addrlo_reg = addr_reg;
1886
1887        tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1888                     s->page_bits - CPU_TLB_ENTRY_BITS);
1889
1890        tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1891        tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1892
1893        /*
1894         * For aligned accesses, we check the first byte and include the
1895         * alignment bits within the address.  For unaligned access, we
1896         * check that we don't cross pages using the address of the last
1897         * byte of the access.
1898         */
1899        a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
1900        tlb_mask = (uint64_t)s->page_mask | a_mask;
1901        if (a_off == 0) {
1902            tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1903        } else {
1904            tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1905            tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
1906        }
1907
1908        if (is_ld) {
1909            ofs = offsetof(CPUTLBEntry, addr_read);
1910        } else {
1911            ofs = offsetof(CPUTLBEntry, addr_write);
1912        }
1913        if (addr_type == TCG_TYPE_I32) {
1914            ofs += HOST_BIG_ENDIAN * 4;
1915            tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1916        } else {
1917            tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1918        }
1919
1920        tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1921        ldst->label_ptr[0] = s->code_ptr++;
1922
1923        h->index = TCG_TMP0;
1924        tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1925                     offsetof(CPUTLBEntry, addend));
1926
1927        if (addr_type == TCG_TYPE_I32) {
1928            tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1929            h->base = TCG_REG_NONE;
1930        } else {
1931            h->base = addr_reg;
1932        }
1933        h->disp = 0;
1934    } else {
1935        if (a_mask) {
1936            ldst = new_ldst_label(s);
1937            ldst->is_ld = is_ld;
1938            ldst->oi = oi;
1939            ldst->addrlo_reg = addr_reg;
1940
1941            tcg_debug_assert(a_mask <= 0xffff);
1942            tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1943
1944            tcg_out16(s, RI_BRC | (S390_TM_NE << 4));
1945            ldst->label_ptr[0] = s->code_ptr++;
1946        }
1947
1948        h->base = addr_reg;
1949        if (addr_type == TCG_TYPE_I32) {
1950            tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1951            h->base = TCG_TMP0;
1952        }
1953        if (guest_base < 0x80000) {
1954            h->index = TCG_REG_NONE;
1955            h->disp = guest_base;
1956        } else {
1957            h->index = TCG_GUEST_BASE_REG;
1958            h->disp = 0;
1959        }
1960    }
1961
1962    return ldst;
1963}
1964
1965static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1966                            MemOpIdx oi, TCGType data_type)
1967{
1968    TCGLabelQemuLdst *ldst;
1969    HostAddress h;
1970
1971    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1972    tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1973
1974    if (ldst) {
1975        ldst->type = data_type;
1976        ldst->datalo_reg = data_reg;
1977        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1978    }
1979}
1980
1981static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1982                            MemOpIdx oi, TCGType data_type)
1983{
1984    TCGLabelQemuLdst *ldst;
1985    HostAddress h;
1986
1987    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1988    tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1989
1990    if (ldst) {
1991        ldst->type = data_type;
1992        ldst->datalo_reg = data_reg;
1993        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1994    }
1995}
1996
1997static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1998                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1999{
2000    TCGLabel *l1 = NULL, *l2 = NULL;
2001    TCGLabelQemuLdst *ldst;
2002    HostAddress h;
2003    bool need_bswap;
2004    bool use_pair;
2005    S390Opcode insn;
2006
2007    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
2008
2009    use_pair = h.aa.atom < MO_128;
2010    need_bswap = get_memop(oi) & MO_BSWAP;
2011
2012    if (!use_pair) {
2013        /*
2014         * Atomicity requires we use LPQ.  If we've already checked for
2015         * 16-byte alignment, that's all we need.  If we arrive with
2016         * lesser alignment, we have determined that less than 16-byte
2017         * alignment can be satisfied with two 8-byte loads.
2018         */
2019        if (h.aa.align < MO_128) {
2020            use_pair = true;
2021            l1 = gen_new_label();
2022            l2 = gen_new_label();
2023
2024            tcg_out_insn(s, RI, TMLL, addr_reg, 15);
2025            tgen_branch(s, S390_TM_NE, l1);
2026        }
2027
2028        tcg_debug_assert(!need_bswap);
2029        tcg_debug_assert(datalo & 1);
2030        tcg_debug_assert(datahi == datalo - 1);
2031        insn = is_ld ? RXY_LPQ : RXY_STPQ;
2032        tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp);
2033
2034        if (use_pair) {
2035            tgen_branch(s, S390_CC_ALWAYS, l2);
2036            tcg_out_label(s, l1);
2037        }
2038    }
2039    if (use_pair) {
2040        TCGReg d1, d2;
2041
2042        if (need_bswap) {
2043            d1 = datalo, d2 = datahi;
2044            insn = is_ld ? RXY_LRVG : RXY_STRVG;
2045        } else {
2046            d1 = datahi, d2 = datalo;
2047            insn = is_ld ? RXY_LG : RXY_STG;
2048        }
2049
2050        if (h.base == d1 || h.index == d1) {
2051            tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp);
2052            h.base = TCG_TMP0;
2053            h.index = TCG_REG_NONE;
2054            h.disp = 0;
2055        }
2056        tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp);
2057        tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8);
2058    }
2059    if (l2) {
2060        tcg_out_label(s, l2);
2061    }
2062
2063    if (ldst) {
2064        ldst->type = TCG_TYPE_I128;
2065        ldst->datalo_reg = datalo;
2066        ldst->datahi_reg = datahi;
2067        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2068    }
2069}
2070
2071static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
2072{
2073    /* Reuse the zeroing that exists for goto_ptr.  */
2074    if (a0 == 0) {
2075        tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
2076    } else {
2077        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
2078        tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
2079    }
2080}
2081
2082static void tcg_out_goto_tb(TCGContext *s, int which)
2083{
2084    /*
2085     * Branch displacement must be aligned for atomic patching;
2086     * see if we need to add extra nop before branch
2087     */
2088    if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
2089        tcg_out16(s, NOP);
2090    }
2091    tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
2092    set_jmp_insn_offset(s, which);
2093    s->code_ptr += 2;
2094    set_jmp_reset_offset(s, which);
2095}
2096
2097void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2098                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2099{
2100    if (!HAVE_FACILITY(GEN_INST_EXT)) {
2101        return;
2102    }
2103    /* patch the branch destination */
2104    uintptr_t addr = tb->jmp_target_addr[n];
2105    intptr_t disp = addr - (jmp_rx - 2);
2106    qatomic_set((int32_t *)jmp_rw, disp / 2);
2107    /* no need to flush icache explicitly */
2108}
2109
2110# define OP_32_64(x) \
2111        case glue(glue(INDEX_op_,x),_i32): \
2112        case glue(glue(INDEX_op_,x),_i64)
2113
2114static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2115                              const TCGArg args[TCG_MAX_OP_ARGS],
2116                              const int const_args[TCG_MAX_OP_ARGS])
2117{
2118    S390Opcode op, op2;
2119    TCGArg a0, a1, a2;
2120
2121    switch (opc) {
2122    case INDEX_op_goto_ptr:
2123        a0 = args[0];
2124        tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
2125        break;
2126
2127    OP_32_64(ld8u):
2128        /* ??? LLC (RXY format) is only present with the extended-immediate
2129           facility, whereas LLGC is always present.  */
2130        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
2131        break;
2132
2133    OP_32_64(ld8s):
2134        /* ??? LB is no smaller than LGB, so no point to using it.  */
2135        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
2136        break;
2137
2138    OP_32_64(ld16u):
2139        /* ??? LLH (RXY format) is only present with the extended-immediate
2140           facility, whereas LLGH is always present.  */
2141        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
2142        break;
2143
2144    case INDEX_op_ld16s_i32:
2145        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
2146        break;
2147
2148    case INDEX_op_ld_i32:
2149        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2150        break;
2151
2152    OP_32_64(st8):
2153        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
2154                    TCG_REG_NONE, args[2]);
2155        break;
2156
2157    OP_32_64(st16):
2158        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2159                    TCG_REG_NONE, args[2]);
2160        break;
2161
2162    case INDEX_op_st_i32:
2163        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2164        break;
2165
2166    case INDEX_op_add_i32:
2167        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2168        if (const_args[2]) {
2169        do_addi_32:
2170            if (a0 == a1) {
2171                if (a2 == (int16_t)a2) {
2172                    tcg_out_insn(s, RI, AHI, a0, a2);
2173                    break;
2174                }
2175                tcg_out_insn(s, RIL, AFI, a0, a2);
2176                break;
2177            }
2178            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2179        } else if (a0 == a1) {
2180            tcg_out_insn(s, RR, AR, a0, a2);
2181        } else {
2182            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2183        }
2184        break;
2185    case INDEX_op_sub_i32:
2186        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2187        if (const_args[2]) {
2188            a2 = -a2;
2189            goto do_addi_32;
2190        } else if (a0 == a1) {
2191            tcg_out_insn(s, RR, SR, a0, a2);
2192        } else {
2193            tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2194        }
2195        break;
2196
2197    case INDEX_op_and_i32:
2198        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2199        if (const_args[2]) {
2200            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2201            tgen_andi(s, TCG_TYPE_I32, a0, a2);
2202        } else if (a0 == a1) {
2203            tcg_out_insn(s, RR, NR, a0, a2);
2204        } else {
2205            tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2206        }
2207        break;
2208    case INDEX_op_or_i32:
2209        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2210        if (const_args[2]) {
2211            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2212            tgen_ori(s, a0, a2);
2213        } else if (a0 == a1) {
2214            tcg_out_insn(s, RR, OR, a0, a2);
2215        } else {
2216            tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2217        }
2218        break;
2219    case INDEX_op_xor_i32:
2220        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2221        if (const_args[2]) {
2222            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2223            tcg_out_insn(s, RIL, XILF, a0, a2);
2224        } else if (a0 == a1) {
2225            tcg_out_insn(s, RR, XR, args[0], args[2]);
2226        } else {
2227            tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2228        }
2229        break;
2230
2231    case INDEX_op_andc_i32:
2232        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2233        if (const_args[2]) {
2234            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2235            tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2236	} else {
2237            tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2238	}
2239        break;
2240    case INDEX_op_orc_i32:
2241        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2242        if (const_args[2]) {
2243            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2244            tgen_ori(s, a0, (uint32_t)~a2);
2245        } else {
2246            tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2247        }
2248        break;
2249    case INDEX_op_eqv_i32:
2250        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2251        if (const_args[2]) {
2252            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2253            tcg_out_insn(s, RIL, XILF, a0, ~a2);
2254        } else {
2255            tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2256        }
2257        break;
2258    case INDEX_op_nand_i32:
2259        tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2260        break;
2261    case INDEX_op_nor_i32:
2262        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2263        break;
2264
2265    case INDEX_op_neg_i32:
2266        tcg_out_insn(s, RR, LCR, args[0], args[1]);
2267        break;
2268    case INDEX_op_not_i32:
2269        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2270        break;
2271
2272    case INDEX_op_mul_i32:
2273        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2274        if (const_args[2]) {
2275            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2276            if (a2 == (int16_t)a2) {
2277                tcg_out_insn(s, RI, MHI, a0, a2);
2278            } else {
2279                tcg_out_insn(s, RIL, MSFI, a0, a2);
2280            }
2281        } else if (a0 == a1) {
2282            tcg_out_insn(s, RRE, MSR, a0, a2);
2283        } else {
2284            tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2285        }
2286        break;
2287
2288    case INDEX_op_div2_i32:
2289        tcg_debug_assert(args[0] == args[2]);
2290        tcg_debug_assert(args[1] == args[3]);
2291        tcg_debug_assert((args[1] & 1) == 0);
2292        tcg_debug_assert(args[0] == args[1] + 1);
2293        tcg_out_insn(s, RR, DR, args[1], args[4]);
2294        break;
2295    case INDEX_op_divu2_i32:
2296        tcg_debug_assert(args[0] == args[2]);
2297        tcg_debug_assert(args[1] == args[3]);
2298        tcg_debug_assert((args[1] & 1) == 0);
2299        tcg_debug_assert(args[0] == args[1] + 1);
2300        tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2301        break;
2302
2303    case INDEX_op_shl_i32:
2304        op = RS_SLL;
2305        op2 = RSY_SLLK;
2306    do_shift32:
2307        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2308        if (a0 == a1) {
2309            if (const_args[2]) {
2310                tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2311            } else {
2312                tcg_out_sh32(s, op, a0, a2, 0);
2313            }
2314        } else {
2315            /* Using tcg_out_sh64 here for the format; it is a 32-bit shift.  */
2316            if (const_args[2]) {
2317                tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2318            } else {
2319                tcg_out_sh64(s, op2, a0, a1, a2, 0);
2320            }
2321        }
2322        break;
2323    case INDEX_op_shr_i32:
2324        op = RS_SRL;
2325        op2 = RSY_SRLK;
2326        goto do_shift32;
2327    case INDEX_op_sar_i32:
2328        op = RS_SRA;
2329        op2 = RSY_SRAK;
2330        goto do_shift32;
2331
2332    case INDEX_op_rotl_i32:
2333        /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
2334        if (const_args[2]) {
2335            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2336        } else {
2337            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2338        }
2339        break;
2340    case INDEX_op_rotr_i32:
2341        if (const_args[2]) {
2342            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2343                         TCG_REG_NONE, (32 - args[2]) & 31);
2344        } else {
2345            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2346            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2347        }
2348        break;
2349
2350    case INDEX_op_bswap16_i32:
2351        a0 = args[0], a1 = args[1], a2 = args[2];
2352        tcg_out_insn(s, RRE, LRVR, a0, a1);
2353        if (a2 & TCG_BSWAP_OS) {
2354            tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2355        } else {
2356            tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2357        }
2358        break;
2359    case INDEX_op_bswap16_i64:
2360        a0 = args[0], a1 = args[1], a2 = args[2];
2361        tcg_out_insn(s, RRE, LRVGR, a0, a1);
2362        if (a2 & TCG_BSWAP_OS) {
2363            tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2364        } else {
2365            tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2366        }
2367        break;
2368
2369    case INDEX_op_bswap32_i32:
2370        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2371        break;
2372    case INDEX_op_bswap32_i64:
2373        a0 = args[0], a1 = args[1], a2 = args[2];
2374        tcg_out_insn(s, RRE, LRVR, a0, a1);
2375        if (a2 & TCG_BSWAP_OS) {
2376            tcg_out_ext32s(s, a0, a0);
2377        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2378            tcg_out_ext32u(s, a0, a0);
2379        }
2380        break;
2381
2382    case INDEX_op_add2_i32:
2383        if (const_args[4]) {
2384            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2385        } else {
2386            tcg_out_insn(s, RR, ALR, args[0], args[4]);
2387        }
2388        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2389        break;
2390    case INDEX_op_sub2_i32:
2391        if (const_args[4]) {
2392            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2393        } else {
2394            tcg_out_insn(s, RR, SLR, args[0], args[4]);
2395        }
2396        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2397        break;
2398
2399    case INDEX_op_br:
2400        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2401        break;
2402
2403    case INDEX_op_brcond_i32:
2404        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2405                    args[1], const_args[1], arg_label(args[3]));
2406        break;
2407    case INDEX_op_setcond_i32:
2408        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2409                     args[2], const_args[2], false);
2410        break;
2411    case INDEX_op_negsetcond_i32:
2412        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2413                     args[2], const_args[2], true);
2414        break;
2415    case INDEX_op_movcond_i32:
2416        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2417                     args[2], const_args[2], args[3], const_args[3], args[4]);
2418        break;
2419
2420    case INDEX_op_qemu_ld_a32_i32:
2421    case INDEX_op_qemu_ld_a64_i32:
2422        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2423        break;
2424    case INDEX_op_qemu_ld_a32_i64:
2425    case INDEX_op_qemu_ld_a64_i64:
2426        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2427        break;
2428    case INDEX_op_qemu_st_a32_i32:
2429    case INDEX_op_qemu_st_a64_i32:
2430        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2431        break;
2432    case INDEX_op_qemu_st_a32_i64:
2433    case INDEX_op_qemu_st_a64_i64:
2434        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2435        break;
2436    case INDEX_op_qemu_ld_a32_i128:
2437    case INDEX_op_qemu_ld_a64_i128:
2438        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2439        break;
2440    case INDEX_op_qemu_st_a32_i128:
2441    case INDEX_op_qemu_st_a64_i128:
2442        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2443        break;
2444
2445    case INDEX_op_ld16s_i64:
2446        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2447        break;
2448    case INDEX_op_ld32u_i64:
2449        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2450        break;
2451    case INDEX_op_ld32s_i64:
2452        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2453        break;
2454    case INDEX_op_ld_i64:
2455        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2456        break;
2457
2458    case INDEX_op_st32_i64:
2459        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2460        break;
2461    case INDEX_op_st_i64:
2462        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2463        break;
2464
2465    case INDEX_op_add_i64:
2466        a0 = args[0], a1 = args[1], a2 = args[2];
2467        if (const_args[2]) {
2468        do_addi_64:
2469            if (a0 == a1) {
2470                if (a2 == (int16_t)a2) {
2471                    tcg_out_insn(s, RI, AGHI, a0, a2);
2472                    break;
2473                }
2474                if (a2 == (int32_t)a2) {
2475                    tcg_out_insn(s, RIL, AGFI, a0, a2);
2476                    break;
2477                }
2478                if (a2 == (uint32_t)a2) {
2479                    tcg_out_insn(s, RIL, ALGFI, a0, a2);
2480                    break;
2481                }
2482                if (-a2 == (uint32_t)-a2) {
2483                    tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2484                    break;
2485                }
2486            }
2487            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2488        } else if (a0 == a1) {
2489            tcg_out_insn(s, RRE, AGR, a0, a2);
2490        } else {
2491            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2492        }
2493        break;
2494    case INDEX_op_sub_i64:
2495        a0 = args[0], a1 = args[1], a2 = args[2];
2496        if (const_args[2]) {
2497            a2 = -a2;
2498            goto do_addi_64;
2499        } else {
2500            tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2501        }
2502        break;
2503
2504    case INDEX_op_and_i64:
2505        a0 = args[0], a1 = args[1], a2 = args[2];
2506        if (const_args[2]) {
2507            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2508            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2509        } else {
2510            tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2511        }
2512        break;
2513    case INDEX_op_or_i64:
2514        a0 = args[0], a1 = args[1], a2 = args[2];
2515        if (const_args[2]) {
2516            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2517            tgen_ori(s, a0, a2);
2518        } else {
2519            tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2520        }
2521        break;
2522    case INDEX_op_xor_i64:
2523        a0 = args[0], a1 = args[1], a2 = args[2];
2524        if (const_args[2]) {
2525            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2526            tgen_xori(s, a0, a2);
2527        } else {
2528            tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2529        }
2530        break;
2531
2532    case INDEX_op_andc_i64:
2533        a0 = args[0], a1 = args[1], a2 = args[2];
2534        if (const_args[2]) {
2535            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2536            tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2537        } else {
2538            tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2539        }
2540        break;
2541    case INDEX_op_orc_i64:
2542        a0 = args[0], a1 = args[1], a2 = args[2];
2543        if (const_args[2]) {
2544            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2545            tgen_ori(s, a0, ~a2);
2546        } else {
2547            tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2548        }
2549        break;
2550    case INDEX_op_eqv_i64:
2551        a0 = args[0], a1 = args[1], a2 = args[2];
2552        if (const_args[2]) {
2553            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2554            tgen_xori(s, a0, ~a2);
2555        } else {
2556            tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2557        }
2558        break;
2559    case INDEX_op_nand_i64:
2560        tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2561        break;
2562    case INDEX_op_nor_i64:
2563        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2564        break;
2565
2566    case INDEX_op_neg_i64:
2567        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2568        break;
2569    case INDEX_op_not_i64:
2570        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2571        break;
2572    case INDEX_op_bswap64_i64:
2573        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2574        break;
2575
2576    case INDEX_op_mul_i64:
2577        a0 = args[0], a1 = args[1], a2 = args[2];
2578        if (const_args[2]) {
2579            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2580            if (a2 == (int16_t)a2) {
2581                tcg_out_insn(s, RI, MGHI, a0, a2);
2582            } else {
2583                tcg_out_insn(s, RIL, MSGFI, a0, a2);
2584            }
2585        } else if (a0 == a1) {
2586            tcg_out_insn(s, RRE, MSGR, a0, a2);
2587        } else {
2588            tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2589        }
2590        break;
2591
2592    case INDEX_op_div2_i64:
2593        /*
2594         * ??? We get an unnecessary sign-extension of the dividend
2595         * into op0 with this definition, but as we do in fact always
2596         * produce both quotient and remainder using INDEX_op_div_i64
2597         * instead requires jumping through even more hoops.
2598         */
2599        tcg_debug_assert(args[0] == args[2]);
2600        tcg_debug_assert(args[1] == args[3]);
2601        tcg_debug_assert((args[1] & 1) == 0);
2602        tcg_debug_assert(args[0] == args[1] + 1);
2603        tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2604        break;
2605    case INDEX_op_divu2_i64:
2606        tcg_debug_assert(args[0] == args[2]);
2607        tcg_debug_assert(args[1] == args[3]);
2608        tcg_debug_assert((args[1] & 1) == 0);
2609        tcg_debug_assert(args[0] == args[1] + 1);
2610        tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2611        break;
2612    case INDEX_op_mulu2_i64:
2613        tcg_debug_assert(args[0] == args[2]);
2614        tcg_debug_assert((args[1] & 1) == 0);
2615        tcg_debug_assert(args[0] == args[1] + 1);
2616        tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2617        break;
2618    case INDEX_op_muls2_i64:
2619        tcg_debug_assert((args[1] & 1) == 0);
2620        tcg_debug_assert(args[0] == args[1] + 1);
2621        tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2622        break;
2623
2624    case INDEX_op_shl_i64:
2625        op = RSY_SLLG;
2626    do_shift64:
2627        if (const_args[2]) {
2628            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2629        } else {
2630            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2631        }
2632        break;
2633    case INDEX_op_shr_i64:
2634        op = RSY_SRLG;
2635        goto do_shift64;
2636    case INDEX_op_sar_i64:
2637        op = RSY_SRAG;
2638        goto do_shift64;
2639
2640    case INDEX_op_rotl_i64:
2641        if (const_args[2]) {
2642            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2643                         TCG_REG_NONE, args[2]);
2644        } else {
2645            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2646        }
2647        break;
2648    case INDEX_op_rotr_i64:
2649        if (const_args[2]) {
2650            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2651                         TCG_REG_NONE, (64 - args[2]) & 63);
2652        } else {
2653            /* We can use the smaller 32-bit negate because only the
2654               low 6 bits are examined for the rotate.  */
2655            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2656            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2657        }
2658        break;
2659
2660    case INDEX_op_add2_i64:
2661        if (const_args[4]) {
2662            if ((int64_t)args[4] >= 0) {
2663                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2664            } else {
2665                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2666            }
2667        } else {
2668            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2669        }
2670        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2671        break;
2672    case INDEX_op_sub2_i64:
2673        if (const_args[4]) {
2674            if ((int64_t)args[4] >= 0) {
2675                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2676            } else {
2677                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2678            }
2679        } else {
2680            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2681        }
2682        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2683        break;
2684
2685    case INDEX_op_brcond_i64:
2686        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2687                    args[1], const_args[1], arg_label(args[3]));
2688        break;
2689    case INDEX_op_setcond_i64:
2690        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2691                     args[2], const_args[2], false);
2692        break;
2693    case INDEX_op_negsetcond_i64:
2694        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2695                     args[2], const_args[2], true);
2696        break;
2697    case INDEX_op_movcond_i64:
2698        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2699                     args[2], const_args[2], args[3], const_args[3], args[4]);
2700        break;
2701
2702    OP_32_64(deposit):
2703        a0 = args[0], a1 = args[1], a2 = args[2];
2704        if (const_args[1]) {
2705            tgen_deposit(s, a0, a2, args[3], args[4], 1);
2706        } else {
2707            /* Since we can't support "0Z" as a constraint, we allow a1 in
2708               any register.  Fix things up as if a matching constraint.  */
2709            if (a0 != a1) {
2710                TCGType type = (opc == INDEX_op_deposit_i64);
2711                if (a0 == a2) {
2712                    tcg_out_mov(s, type, TCG_TMP0, a2);
2713                    a2 = TCG_TMP0;
2714                }
2715                tcg_out_mov(s, type, a0, a1);
2716            }
2717            tgen_deposit(s, a0, a2, args[3], args[4], 0);
2718        }
2719        break;
2720
2721    OP_32_64(extract):
2722        tgen_extract(s, args[0], args[1], args[2], args[3]);
2723        break;
2724
2725    case INDEX_op_clz_i64:
2726        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2727        break;
2728
2729    case INDEX_op_ctpop_i32:
2730        tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2731        break;
2732    case INDEX_op_ctpop_i64:
2733        tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2734        break;
2735
2736    case INDEX_op_mb:
2737        /* The host memory model is quite strong, we simply need to
2738           serialize the instruction stream.  */
2739        if (args[0] & TCG_MO_ST_LD) {
2740            /* fast-bcr-serialization facility (45) is present */
2741            tcg_out_insn(s, RR, BCR, 14, 0);
2742        }
2743        break;
2744
2745    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2746    case INDEX_op_mov_i64:
2747    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2748    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2749    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2750    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2751    case INDEX_op_ext8s_i64:
2752    case INDEX_op_ext8u_i32:
2753    case INDEX_op_ext8u_i64:
2754    case INDEX_op_ext16s_i32:
2755    case INDEX_op_ext16s_i64:
2756    case INDEX_op_ext16u_i32:
2757    case INDEX_op_ext16u_i64:
2758    case INDEX_op_ext32s_i64:
2759    case INDEX_op_ext32u_i64:
2760    case INDEX_op_ext_i32_i64:
2761    case INDEX_op_extu_i32_i64:
2762    case INDEX_op_extrl_i64_i32:
2763    default:
2764        g_assert_not_reached();
2765    }
2766}
2767
2768static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2769                            TCGReg dst, TCGReg src)
2770{
2771    if (is_general_reg(src)) {
2772        /* Replicate general register into two MO_64. */
2773        tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2774        if (vece == MO_64) {
2775            return true;
2776        }
2777        src = dst;
2778    }
2779
2780    /*
2781     * Recall that the "standard" integer, within a vector, is the
2782     * rightmost element of the leftmost doubleword, a-la VLLEZ.
2783     */
2784    tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2785    return true;
2786}
2787
2788static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2789                             TCGReg dst, TCGReg base, intptr_t offset)
2790{
2791    tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2792    return true;
2793}
2794
2795static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2796                             TCGReg dst, int64_t val)
2797{
2798    int i, mask, msb, lsb;
2799
2800    /* Look for int16_t elements.  */
2801    if (vece <= MO_16 ||
2802        (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2803        tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2804        return;
2805    }
2806
2807    /* Look for bit masks.  */
2808    if (vece == MO_32) {
2809        if (risbg_mask((int32_t)val)) {
2810            /* Handle wraparound by swapping msb and lsb.  */
2811            if ((val & 0x80000001u) == 0x80000001u) {
2812                msb = 32 - ctz32(~val);
2813                lsb = clz32(~val) - 1;
2814            } else {
2815                msb = clz32(val);
2816                lsb = 31 - ctz32(val);
2817            }
2818            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2819            return;
2820        }
2821    } else {
2822        if (risbg_mask(val)) {
2823            /* Handle wraparound by swapping msb and lsb.  */
2824            if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2825                /* Handle wraparound by swapping msb and lsb.  */
2826                msb = 64 - ctz64(~val);
2827                lsb = clz64(~val) - 1;
2828            } else {
2829                msb = clz64(val);
2830                lsb = 63 - ctz64(val);
2831            }
2832            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2833            return;
2834        }
2835    }
2836
2837    /* Look for all bytes 0x00 or 0xff.  */
2838    for (i = mask = 0; i < 8; i++) {
2839        uint8_t byte = val >> (i * 8);
2840        if (byte == 0xff) {
2841            mask |= 1 << i;
2842        } else if (byte != 0) {
2843            break;
2844        }
2845    }
2846    if (i == 8) {
2847        tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2848        return;
2849    }
2850
2851    /* Otherwise, stuff it in the constant pool.  */
2852    tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2853    new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2854    tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2855}
2856
2857static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0,
2858                                  TCGReg a1, TCGReg a2, TCGCond cond)
2859{
2860    bool need_swap = false, need_inv = false;
2861
2862    switch (cond) {
2863    case TCG_COND_EQ:
2864    case TCG_COND_GT:
2865    case TCG_COND_GTU:
2866        break;
2867    case TCG_COND_NE:
2868    case TCG_COND_LE:
2869    case TCG_COND_LEU:
2870        need_inv = true;
2871        break;
2872    case TCG_COND_LT:
2873    case TCG_COND_LTU:
2874        need_swap = true;
2875        break;
2876    case TCG_COND_GE:
2877    case TCG_COND_GEU:
2878        need_swap = need_inv = true;
2879        break;
2880    default:
2881        g_assert_not_reached();
2882    }
2883
2884    if (need_inv) {
2885        cond = tcg_invert_cond(cond);
2886    }
2887    if (need_swap) {
2888        TCGReg swap = a1;
2889        a1 = a2;
2890        a2 = swap;
2891        cond = tcg_swap_cond(cond);
2892    }
2893
2894    switch (cond) {
2895    case TCG_COND_EQ:
2896        tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2897        break;
2898    case TCG_COND_GT:
2899        tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2900        break;
2901    case TCG_COND_GTU:
2902        tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2903        break;
2904    default:
2905        g_assert_not_reached();
2906    }
2907    return need_inv;
2908}
2909
2910static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0,
2911                            TCGReg a1, TCGReg a2, TCGCond cond)
2912{
2913    if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) {
2914        tcg_out_insn(s, VRRc, VNO, a0, a0, a0, 0);
2915    }
2916}
2917
2918static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0,
2919                               TCGReg c1, TCGReg c2, TCGArg v3,
2920                               int const_v3, TCGReg v4, TCGCond cond)
2921{
2922    bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP0, c1, c2, cond);
2923
2924    if (!const_v3) {
2925        if (inv) {
2926            tcg_out_insn(s, VRRe, VSEL, a0, v4, v3, TCG_VEC_TMP0);
2927        } else {
2928            tcg_out_insn(s, VRRe, VSEL, a0, v3, v4, TCG_VEC_TMP0);
2929        }
2930    } else if (v3) {
2931        if (inv) {
2932            tcg_out_insn(s, VRRc, VOC, a0, v4, TCG_VEC_TMP0, 0);
2933        } else {
2934            tcg_out_insn(s, VRRc, VO, a0, v4, TCG_VEC_TMP0, 0);
2935        }
2936    } else {
2937        if (inv) {
2938            tcg_out_insn(s, VRRc, VN, a0, v4, TCG_VEC_TMP0, 0);
2939        } else {
2940            tcg_out_insn(s, VRRc, VNC, a0, v4, TCG_VEC_TMP0, 0);
2941        }
2942    }
2943}
2944
2945static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2946                           unsigned vecl, unsigned vece,
2947                           const TCGArg args[TCG_MAX_OP_ARGS],
2948                           const int const_args[TCG_MAX_OP_ARGS])
2949{
2950    TCGType type = vecl + TCG_TYPE_V64;
2951    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2952
2953    switch (opc) {
2954    case INDEX_op_ld_vec:
2955        tcg_out_ld(s, type, a0, a1, a2);
2956        break;
2957    case INDEX_op_st_vec:
2958        tcg_out_st(s, type, a0, a1, a2);
2959        break;
2960    case INDEX_op_dupm_vec:
2961        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2962        break;
2963
2964    case INDEX_op_abs_vec:
2965        tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2966        break;
2967    case INDEX_op_neg_vec:
2968        tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2969        break;
2970    case INDEX_op_not_vec:
2971        tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2972        break;
2973
2974    case INDEX_op_add_vec:
2975        tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2976        break;
2977    case INDEX_op_sub_vec:
2978        tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2979        break;
2980    case INDEX_op_and_vec:
2981        tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2982        break;
2983    case INDEX_op_andc_vec:
2984        tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2985        break;
2986    case INDEX_op_mul_vec:
2987        tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2988        break;
2989    case INDEX_op_or_vec:
2990        tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2991        break;
2992    case INDEX_op_orc_vec:
2993        tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2994        break;
2995    case INDEX_op_xor_vec:
2996        tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2997        break;
2998    case INDEX_op_nand_vec:
2999        tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
3000        break;
3001    case INDEX_op_nor_vec:
3002        tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
3003        break;
3004    case INDEX_op_eqv_vec:
3005        tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
3006        break;
3007
3008    case INDEX_op_shli_vec:
3009        tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
3010        break;
3011    case INDEX_op_shri_vec:
3012        tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
3013        break;
3014    case INDEX_op_sari_vec:
3015        tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
3016        break;
3017    case INDEX_op_rotli_vec:
3018        tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
3019        break;
3020    case INDEX_op_shls_vec:
3021        tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
3022        break;
3023    case INDEX_op_shrs_vec:
3024        tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
3025        break;
3026    case INDEX_op_sars_vec:
3027        tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
3028        break;
3029    case INDEX_op_rotls_vec:
3030        tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
3031        break;
3032    case INDEX_op_shlv_vec:
3033        tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
3034        break;
3035    case INDEX_op_shrv_vec:
3036        tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
3037        break;
3038    case INDEX_op_sarv_vec:
3039        tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
3040        break;
3041    case INDEX_op_rotlv_vec:
3042        tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
3043        break;
3044
3045    case INDEX_op_smin_vec:
3046        tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
3047        break;
3048    case INDEX_op_smax_vec:
3049        tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
3050        break;
3051    case INDEX_op_umin_vec:
3052        tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
3053        break;
3054    case INDEX_op_umax_vec:
3055        tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
3056        break;
3057
3058    case INDEX_op_bitsel_vec:
3059        tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
3060        break;
3061
3062    case INDEX_op_cmp_vec:
3063        tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]);
3064        break;
3065    case INDEX_op_cmpsel_vec:
3066        tcg_out_cmpsel_vec(s, vece, a0, a1, a2, args[3], const_args[3],
3067                           args[4], args[5]);
3068        break;
3069
3070    case INDEX_op_s390_vuph_vec:
3071        tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
3072        break;
3073    case INDEX_op_s390_vupl_vec:
3074        tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
3075        break;
3076    case INDEX_op_s390_vpks_vec:
3077        tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
3078        break;
3079
3080    case INDEX_op_mov_vec:   /* Always emitted via tcg_out_mov.  */
3081    case INDEX_op_dup_vec:   /* Always emitted via tcg_out_dup_vec.  */
3082    default:
3083        g_assert_not_reached();
3084    }
3085}
3086
3087int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3088{
3089    switch (opc) {
3090    case INDEX_op_abs_vec:
3091    case INDEX_op_add_vec:
3092    case INDEX_op_and_vec:
3093    case INDEX_op_andc_vec:
3094    case INDEX_op_bitsel_vec:
3095    case INDEX_op_eqv_vec:
3096    case INDEX_op_nand_vec:
3097    case INDEX_op_neg_vec:
3098    case INDEX_op_nor_vec:
3099    case INDEX_op_not_vec:
3100    case INDEX_op_or_vec:
3101    case INDEX_op_orc_vec:
3102    case INDEX_op_rotli_vec:
3103    case INDEX_op_rotls_vec:
3104    case INDEX_op_rotlv_vec:
3105    case INDEX_op_sari_vec:
3106    case INDEX_op_sars_vec:
3107    case INDEX_op_sarv_vec:
3108    case INDEX_op_shli_vec:
3109    case INDEX_op_shls_vec:
3110    case INDEX_op_shlv_vec:
3111    case INDEX_op_shri_vec:
3112    case INDEX_op_shrs_vec:
3113    case INDEX_op_shrv_vec:
3114    case INDEX_op_smax_vec:
3115    case INDEX_op_smin_vec:
3116    case INDEX_op_sub_vec:
3117    case INDEX_op_umax_vec:
3118    case INDEX_op_umin_vec:
3119    case INDEX_op_xor_vec:
3120    case INDEX_op_cmp_vec:
3121    case INDEX_op_cmpsel_vec:
3122        return 1;
3123    case INDEX_op_rotrv_vec:
3124        return -1;
3125    case INDEX_op_mul_vec:
3126        return vece < MO_64;
3127    case INDEX_op_ssadd_vec:
3128    case INDEX_op_sssub_vec:
3129        return vece < MO_64 ? -1 : 0;
3130    default:
3131        return 0;
3132    }
3133}
3134
3135static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
3136                           TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
3137{
3138    TCGv_vec h1 = tcg_temp_new_vec(type);
3139    TCGv_vec h2 = tcg_temp_new_vec(type);
3140    TCGv_vec l1 = tcg_temp_new_vec(type);
3141    TCGv_vec l2 = tcg_temp_new_vec(type);
3142
3143    tcg_debug_assert (vece < MO_64);
3144
3145    /* Unpack with sign-extension. */
3146    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3147              tcgv_vec_arg(h1), tcgv_vec_arg(v1));
3148    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3149              tcgv_vec_arg(h2), tcgv_vec_arg(v2));
3150
3151    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3152              tcgv_vec_arg(l1), tcgv_vec_arg(v1));
3153    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3154              tcgv_vec_arg(l2), tcgv_vec_arg(v2));
3155
3156    /* Arithmetic on a wider element size. */
3157    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
3158              tcgv_vec_arg(h1), tcgv_vec_arg(h2));
3159    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
3160              tcgv_vec_arg(l1), tcgv_vec_arg(l2));
3161
3162    /* Pack with saturation. */
3163    vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
3164              tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
3165
3166    tcg_temp_free_vec(h1);
3167    tcg_temp_free_vec(h2);
3168    tcg_temp_free_vec(l1);
3169    tcg_temp_free_vec(l2);
3170}
3171
3172void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3173                       TCGArg a0, ...)
3174{
3175    va_list va;
3176    TCGv_vec v0, v1, v2, t0;
3177
3178    va_start(va, a0);
3179    v0 = temp_tcgv_vec(arg_temp(a0));
3180    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3181    v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3182
3183    switch (opc) {
3184    case INDEX_op_rotrv_vec:
3185        t0 = tcg_temp_new_vec(type);
3186        tcg_gen_neg_vec(vece, t0, v2);
3187        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3188        tcg_temp_free_vec(t0);
3189        break;
3190
3191    case INDEX_op_ssadd_vec:
3192        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3193        break;
3194    case INDEX_op_sssub_vec:
3195        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3196        break;
3197
3198    default:
3199        g_assert_not_reached();
3200    }
3201    va_end(va);
3202}
3203
3204static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3205{
3206    switch (op) {
3207    case INDEX_op_goto_ptr:
3208        return C_O0_I1(r);
3209
3210    case INDEX_op_ld8u_i32:
3211    case INDEX_op_ld8u_i64:
3212    case INDEX_op_ld8s_i32:
3213    case INDEX_op_ld8s_i64:
3214    case INDEX_op_ld16u_i32:
3215    case INDEX_op_ld16u_i64:
3216    case INDEX_op_ld16s_i32:
3217    case INDEX_op_ld16s_i64:
3218    case INDEX_op_ld_i32:
3219    case INDEX_op_ld32u_i64:
3220    case INDEX_op_ld32s_i64:
3221    case INDEX_op_ld_i64:
3222        return C_O1_I1(r, r);
3223
3224    case INDEX_op_st8_i32:
3225    case INDEX_op_st8_i64:
3226    case INDEX_op_st16_i32:
3227    case INDEX_op_st16_i64:
3228    case INDEX_op_st_i32:
3229    case INDEX_op_st32_i64:
3230    case INDEX_op_st_i64:
3231        return C_O0_I2(r, r);
3232
3233    case INDEX_op_add_i32:
3234    case INDEX_op_add_i64:
3235    case INDEX_op_shl_i64:
3236    case INDEX_op_shr_i64:
3237    case INDEX_op_sar_i64:
3238    case INDEX_op_rotl_i32:
3239    case INDEX_op_rotl_i64:
3240    case INDEX_op_rotr_i32:
3241    case INDEX_op_rotr_i64:
3242        return C_O1_I2(r, r, ri);
3243    case INDEX_op_setcond_i32:
3244    case INDEX_op_negsetcond_i32:
3245    case INDEX_op_setcond_i64:
3246    case INDEX_op_negsetcond_i64:
3247        return C_O1_I2(r, r, rC);
3248
3249    case INDEX_op_clz_i64:
3250        return C_O1_I2(r, r, rI);
3251
3252    case INDEX_op_sub_i32:
3253    case INDEX_op_sub_i64:
3254    case INDEX_op_and_i32:
3255    case INDEX_op_or_i32:
3256    case INDEX_op_xor_i32:
3257        return C_O1_I2(r, r, ri);
3258    case INDEX_op_and_i64:
3259        return C_O1_I2(r, r, rNKR);
3260    case INDEX_op_or_i64:
3261    case INDEX_op_xor_i64:
3262        return C_O1_I2(r, r, rK);
3263
3264    case INDEX_op_andc_i32:
3265    case INDEX_op_orc_i32:
3266    case INDEX_op_eqv_i32:
3267        return C_O1_I2(r, r, ri);
3268    case INDEX_op_andc_i64:
3269        return C_O1_I2(r, r, rKR);
3270    case INDEX_op_orc_i64:
3271    case INDEX_op_eqv_i64:
3272        return C_O1_I2(r, r, rNK);
3273
3274    case INDEX_op_nand_i32:
3275    case INDEX_op_nand_i64:
3276    case INDEX_op_nor_i32:
3277    case INDEX_op_nor_i64:
3278        return C_O1_I2(r, r, r);
3279
3280    case INDEX_op_mul_i32:
3281        return (HAVE_FACILITY(MISC_INSN_EXT2)
3282                ? C_O1_I2(r, r, ri)
3283                : C_O1_I2(r, 0, ri));
3284    case INDEX_op_mul_i64:
3285        return (HAVE_FACILITY(MISC_INSN_EXT2)
3286                ? C_O1_I2(r, r, rJ)
3287                : C_O1_I2(r, 0, rJ));
3288
3289    case INDEX_op_shl_i32:
3290    case INDEX_op_shr_i32:
3291    case INDEX_op_sar_i32:
3292        return C_O1_I2(r, r, ri);
3293
3294    case INDEX_op_brcond_i32:
3295        return C_O0_I2(r, ri);
3296    case INDEX_op_brcond_i64:
3297        return C_O0_I2(r, rC);
3298
3299    case INDEX_op_bswap16_i32:
3300    case INDEX_op_bswap16_i64:
3301    case INDEX_op_bswap32_i32:
3302    case INDEX_op_bswap32_i64:
3303    case INDEX_op_bswap64_i64:
3304    case INDEX_op_neg_i32:
3305    case INDEX_op_neg_i64:
3306    case INDEX_op_not_i32:
3307    case INDEX_op_not_i64:
3308    case INDEX_op_ext8s_i32:
3309    case INDEX_op_ext8s_i64:
3310    case INDEX_op_ext8u_i32:
3311    case INDEX_op_ext8u_i64:
3312    case INDEX_op_ext16s_i32:
3313    case INDEX_op_ext16s_i64:
3314    case INDEX_op_ext16u_i32:
3315    case INDEX_op_ext16u_i64:
3316    case INDEX_op_ext32s_i64:
3317    case INDEX_op_ext32u_i64:
3318    case INDEX_op_ext_i32_i64:
3319    case INDEX_op_extu_i32_i64:
3320    case INDEX_op_extract_i32:
3321    case INDEX_op_extract_i64:
3322    case INDEX_op_ctpop_i32:
3323    case INDEX_op_ctpop_i64:
3324        return C_O1_I1(r, r);
3325
3326    case INDEX_op_qemu_ld_a32_i32:
3327    case INDEX_op_qemu_ld_a64_i32:
3328    case INDEX_op_qemu_ld_a32_i64:
3329    case INDEX_op_qemu_ld_a64_i64:
3330        return C_O1_I1(r, r);
3331    case INDEX_op_qemu_st_a32_i64:
3332    case INDEX_op_qemu_st_a64_i64:
3333    case INDEX_op_qemu_st_a32_i32:
3334    case INDEX_op_qemu_st_a64_i32:
3335        return C_O0_I2(r, r);
3336    case INDEX_op_qemu_ld_a32_i128:
3337    case INDEX_op_qemu_ld_a64_i128:
3338        return C_O2_I1(o, m, r);
3339    case INDEX_op_qemu_st_a32_i128:
3340    case INDEX_op_qemu_st_a64_i128:
3341        return C_O0_I3(o, m, r);
3342
3343    case INDEX_op_deposit_i32:
3344    case INDEX_op_deposit_i64:
3345        return C_O1_I2(r, rZ, r);
3346
3347    case INDEX_op_movcond_i32:
3348        return C_O1_I4(r, r, ri, rI, r);
3349    case INDEX_op_movcond_i64:
3350        return C_O1_I4(r, r, rC, rI, r);
3351
3352    case INDEX_op_div2_i32:
3353    case INDEX_op_div2_i64:
3354    case INDEX_op_divu2_i32:
3355    case INDEX_op_divu2_i64:
3356        return C_O2_I3(o, m, 0, 1, r);
3357
3358    case INDEX_op_mulu2_i64:
3359        return C_O2_I2(o, m, 0, r);
3360    case INDEX_op_muls2_i64:
3361        return C_O2_I2(o, m, r, r);
3362
3363    case INDEX_op_add2_i32:
3364    case INDEX_op_sub2_i32:
3365        return C_N1_O1_I4(r, r, 0, 1, ri, r);
3366
3367    case INDEX_op_add2_i64:
3368    case INDEX_op_sub2_i64:
3369        return C_N1_O1_I4(r, r, 0, 1, rJU, r);
3370
3371    case INDEX_op_st_vec:
3372        return C_O0_I2(v, r);
3373    case INDEX_op_ld_vec:
3374    case INDEX_op_dupm_vec:
3375        return C_O1_I1(v, r);
3376    case INDEX_op_dup_vec:
3377        return C_O1_I1(v, vr);
3378    case INDEX_op_abs_vec:
3379    case INDEX_op_neg_vec:
3380    case INDEX_op_not_vec:
3381    case INDEX_op_rotli_vec:
3382    case INDEX_op_sari_vec:
3383    case INDEX_op_shli_vec:
3384    case INDEX_op_shri_vec:
3385    case INDEX_op_s390_vuph_vec:
3386    case INDEX_op_s390_vupl_vec:
3387        return C_O1_I1(v, v);
3388    case INDEX_op_add_vec:
3389    case INDEX_op_sub_vec:
3390    case INDEX_op_and_vec:
3391    case INDEX_op_andc_vec:
3392    case INDEX_op_or_vec:
3393    case INDEX_op_orc_vec:
3394    case INDEX_op_xor_vec:
3395    case INDEX_op_nand_vec:
3396    case INDEX_op_nor_vec:
3397    case INDEX_op_eqv_vec:
3398    case INDEX_op_cmp_vec:
3399    case INDEX_op_mul_vec:
3400    case INDEX_op_rotlv_vec:
3401    case INDEX_op_rotrv_vec:
3402    case INDEX_op_shlv_vec:
3403    case INDEX_op_shrv_vec:
3404    case INDEX_op_sarv_vec:
3405    case INDEX_op_smax_vec:
3406    case INDEX_op_smin_vec:
3407    case INDEX_op_umax_vec:
3408    case INDEX_op_umin_vec:
3409    case INDEX_op_s390_vpks_vec:
3410        return C_O1_I2(v, v, v);
3411    case INDEX_op_rotls_vec:
3412    case INDEX_op_shls_vec:
3413    case INDEX_op_shrs_vec:
3414    case INDEX_op_sars_vec:
3415        return C_O1_I2(v, v, r);
3416    case INDEX_op_bitsel_vec:
3417        return C_O1_I3(v, v, v, v);
3418    case INDEX_op_cmpsel_vec:
3419        return (TCG_TARGET_HAS_orc_vec
3420                ? C_O1_I4(v, v, v, vZM, v)
3421                : C_O1_I4(v, v, v, vZ, v));
3422
3423    default:
3424        g_assert_not_reached();
3425    }
3426}
3427
3428/*
3429 * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3430 * Some distros have fixed this up locally, others have not.
3431 */
3432#ifndef HWCAP_S390_VXRS
3433#define HWCAP_S390_VXRS 2048
3434#endif
3435
3436static void query_s390_facilities(void)
3437{
3438    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3439    const char *which;
3440
3441    /* Is STORE FACILITY LIST EXTENDED available?  Honestly, I believe this
3442       is present on all 64-bit systems, but let's check for it anyway.  */
3443    if (hwcap & HWCAP_S390_STFLE) {
3444        register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3445        register void *r1 __asm__("1") = s390_facilities;
3446
3447        /* stfle 0(%r1) */
3448        asm volatile(".word 0xb2b0,0x1000"
3449                     : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3450    }
3451
3452    /*
3453     * Use of vector registers requires os support beyond the facility bit.
3454     * If the kernel does not advertise support, disable the facility bits.
3455     * There is nothing else we currently care about in the 3rd word, so
3456     * disable VECTOR with one store.
3457     */
3458    if (!(hwcap & HWCAP_S390_VXRS)) {
3459        s390_facilities[2] = 0;
3460    }
3461
3462    /*
3463     * Minimum supported cpu revision is z196.
3464     * Check for all required facilities.
3465     * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3466     */
3467    if (!HAVE_FACILITY(LONG_DISP)) {
3468        which = "long-displacement";
3469        goto fail;
3470    }
3471    if (!HAVE_FACILITY(EXT_IMM)) {
3472        which = "extended-immediate";
3473        goto fail;
3474    }
3475    if (!HAVE_FACILITY(GEN_INST_EXT)) {
3476        which = "general-instructions-extension";
3477        goto fail;
3478    }
3479    /*
3480     * Facility 45 is a big bin that contains: distinct-operands,
3481     * fast-BCR-serialization, high-word, population-count,
3482     * interlocked-access-1, and load/store-on-condition-1
3483     */
3484    if (!HAVE_FACILITY(45)) {
3485        which = "45";
3486        goto fail;
3487    }
3488    return;
3489
3490 fail:
3491    error_report("%s: missing required facility %s", __func__, which);
3492    exit(EXIT_FAILURE);
3493}
3494
3495static void tcg_target_init(TCGContext *s)
3496{
3497    query_s390_facilities();
3498
3499    tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3500    tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3501    if (HAVE_FACILITY(VECTOR)) {
3502        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3503        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3504    }
3505
3506    tcg_target_call_clobber_regs = 0;
3507    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3508    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3509    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3510    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3511    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3512    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3513    /* The r6 register is technically call-saved, but it's also a parameter
3514       register, so it can get killed by setup for the qemu_st helper.  */
3515    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3516    /* The return register can be considered call-clobbered.  */
3517    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3518
3519    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3520    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3521    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3522    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3523    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3524    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3525    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3526    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3527    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3528    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3529    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3530    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3531    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3532    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3533    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3534    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3535    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3536    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3537    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3538    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3539    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3540    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3541    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3542    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3543
3544    s->reserved_regs = 0;
3545    tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3546    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
3547    /* XXX many insns can't be used with R0, so we better avoid it for now */
3548    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3549    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3550}
3551
3552#define FRAME_SIZE  ((int)(TCG_TARGET_CALL_STACK_OFFSET          \
3553                           + TCG_STATIC_CALL_ARGS_SIZE           \
3554                           + CPU_TEMP_BUF_NLONGS * sizeof(long)))
3555
3556static void tcg_target_qemu_prologue(TCGContext *s)
3557{
3558    /* stmg %r6,%r15,48(%r15) (save registers) */
3559    tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3560
3561    /* aghi %r15,-frame_size */
3562    tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3563
3564    tcg_set_frame(s, TCG_REG_CALL_STACK,
3565                  TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3566                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3567
3568    if (!tcg_use_softmmu && guest_base >= 0x80000) {
3569        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3570        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3571    }
3572
3573    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3574
3575    /* br %r3 (go to TB) */
3576    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3577
3578    /*
3579     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3580     * and fall through to the rest of the epilogue.
3581     */
3582    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3583    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3584
3585    /* TB epilogue */
3586    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3587
3588    /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3589    tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3590                 FRAME_SIZE + 48);
3591
3592    /* br %r14 (return) */
3593    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3594}
3595
3596static void tcg_out_tb_start(TCGContext *s)
3597{
3598    /* nothing to do */
3599}
3600
3601static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3602{
3603    memset(p, 0x07, count * sizeof(tcg_insn_unit));
3604}
3605
3606typedef struct {
3607    DebugFrameHeader h;
3608    uint8_t fde_def_cfa[4];
3609    uint8_t fde_reg_ofs[18];
3610} DebugFrame;
3611
3612/* We're expecting a 2 byte uleb128 encoded value.  */
3613QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3614
3615#define ELF_HOST_MACHINE  EM_S390
3616
3617static const DebugFrame debug_frame = {
3618    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3619    .h.cie.id = -1,
3620    .h.cie.version = 1,
3621    .h.cie.code_align = 1,
3622    .h.cie.data_align = 8,                /* sleb128 8 */
3623    .h.cie.return_column = TCG_REG_R14,
3624
3625    /* Total FDE size does not include the "len" member.  */
3626    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3627
3628    .fde_def_cfa = {
3629        12, TCG_REG_CALL_STACK,         /* DW_CFA_def_cfa %r15, ... */
3630        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3631        (FRAME_SIZE >> 7)
3632    },
3633    .fde_reg_ofs = {
3634        0x86, 6,                        /* DW_CFA_offset, %r6, 48 */
3635        0x87, 7,                        /* DW_CFA_offset, %r7, 56 */
3636        0x88, 8,                        /* DW_CFA_offset, %r8, 64 */
3637        0x89, 9,                        /* DW_CFA_offset, %r92, 72 */
3638        0x8a, 10,                       /* DW_CFA_offset, %r10, 80 */
3639        0x8b, 11,                       /* DW_CFA_offset, %r11, 88 */
3640        0x8c, 12,                       /* DW_CFA_offset, %r12, 96 */
3641        0x8d, 13,                       /* DW_CFA_offset, %r13, 104 */
3642        0x8e, 14,                       /* DW_CFA_offset, %r14, 112 */
3643    }
3644};
3645
3646void tcg_register_jit(const void *buf, size_t buf_size)
3647{
3648    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3649}
3650