xref: /openbmc/qemu/tcg/s390x/tcg-target.c.inc (revision 238f43809a85a47cfbbc2e1d6aff4640fec30328)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27/* We only support generating code for 64-bit mode.  */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
32#include "../tcg-ldst.c.inc"
33#include "../tcg-pool.c.inc"
34#include "elf.h"
35
36#define TCG_CT_CONST_S16        (1 << 8)
37#define TCG_CT_CONST_S32        (1 << 9)
38#define TCG_CT_CONST_S33        (1 << 10)
39#define TCG_CT_CONST_ZERO       (1 << 11)
40#define TCG_CT_CONST_P32        (1 << 12)
41#define TCG_CT_CONST_INV        (1 << 13)
42#define TCG_CT_CONST_INVRISBG   (1 << 14)
43
44#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 16)
45#define ALL_VECTOR_REGS      MAKE_64BIT_MASK(32, 32)
46
47/* Several places within the instruction set 0 means "no register"
48   rather than TCG_REG_R0.  */
49#define TCG_REG_NONE    0
50
51/* A scratch register that may be be used throughout the backend.  */
52#define TCG_TMP0        TCG_REG_R1
53
54#ifndef CONFIG_SOFTMMU
55#define TCG_GUEST_BASE_REG TCG_REG_R13
56#endif
57
58/* All of the following instructions are prefixed with their instruction
59   format, and are defined as 8- or 16-bit quantities, even when the two
60   halves of the 16-bit quantity may appear 32 bits apart in the insn.
61   This makes it easy to copy the values from the tables in Appendix B.  */
62typedef enum S390Opcode {
63    RIL_AFI     = 0xc209,
64    RIL_AGFI    = 0xc208,
65    RIL_ALFI    = 0xc20b,
66    RIL_ALGFI   = 0xc20a,
67    RIL_BRASL   = 0xc005,
68    RIL_BRCL    = 0xc004,
69    RIL_CFI     = 0xc20d,
70    RIL_CGFI    = 0xc20c,
71    RIL_CLFI    = 0xc20f,
72    RIL_CLGFI   = 0xc20e,
73    RIL_CLRL    = 0xc60f,
74    RIL_CLGRL   = 0xc60a,
75    RIL_CRL     = 0xc60d,
76    RIL_CGRL    = 0xc608,
77    RIL_IIHF    = 0xc008,
78    RIL_IILF    = 0xc009,
79    RIL_LARL    = 0xc000,
80    RIL_LGFI    = 0xc001,
81    RIL_LGRL    = 0xc408,
82    RIL_LLIHF   = 0xc00e,
83    RIL_LLILF   = 0xc00f,
84    RIL_LRL     = 0xc40d,
85    RIL_MSFI    = 0xc201,
86    RIL_MSGFI   = 0xc200,
87    RIL_NIHF    = 0xc00a,
88    RIL_NILF    = 0xc00b,
89    RIL_OIHF    = 0xc00c,
90    RIL_OILF    = 0xc00d,
91    RIL_SLFI    = 0xc205,
92    RIL_SLGFI   = 0xc204,
93    RIL_XIHF    = 0xc006,
94    RIL_XILF    = 0xc007,
95
96    RI_AGHI     = 0xa70b,
97    RI_AHI      = 0xa70a,
98    RI_BRC      = 0xa704,
99    RI_CHI      = 0xa70e,
100    RI_CGHI     = 0xa70f,
101    RI_IIHH     = 0xa500,
102    RI_IIHL     = 0xa501,
103    RI_IILH     = 0xa502,
104    RI_IILL     = 0xa503,
105    RI_LGHI     = 0xa709,
106    RI_LLIHH    = 0xa50c,
107    RI_LLIHL    = 0xa50d,
108    RI_LLILH    = 0xa50e,
109    RI_LLILL    = 0xa50f,
110    RI_MGHI     = 0xa70d,
111    RI_MHI      = 0xa70c,
112    RI_NIHH     = 0xa504,
113    RI_NIHL     = 0xa505,
114    RI_NILH     = 0xa506,
115    RI_NILL     = 0xa507,
116    RI_OIHH     = 0xa508,
117    RI_OIHL     = 0xa509,
118    RI_OILH     = 0xa50a,
119    RI_OILL     = 0xa50b,
120    RI_TMLL     = 0xa701,
121
122    RIEb_CGRJ    = 0xec64,
123    RIEb_CLGRJ   = 0xec65,
124    RIEb_CLRJ    = 0xec77,
125    RIEb_CRJ     = 0xec76,
126
127    RIEc_CGIJ    = 0xec7c,
128    RIEc_CIJ     = 0xec7e,
129    RIEc_CLGIJ   = 0xec7d,
130    RIEc_CLIJ    = 0xec7f,
131
132    RIEf_RISBG   = 0xec55,
133
134    RIEg_LOCGHI  = 0xec46,
135
136    RRE_AGR     = 0xb908,
137    RRE_ALGR    = 0xb90a,
138    RRE_ALCR    = 0xb998,
139    RRE_ALCGR   = 0xb988,
140    RRE_ALGFR   = 0xb91a,
141    RRE_CGR     = 0xb920,
142    RRE_CLGR    = 0xb921,
143    RRE_DLGR    = 0xb987,
144    RRE_DLR     = 0xb997,
145    RRE_DSGFR   = 0xb91d,
146    RRE_DSGR    = 0xb90d,
147    RRE_FLOGR   = 0xb983,
148    RRE_LGBR    = 0xb906,
149    RRE_LCGR    = 0xb903,
150    RRE_LGFR    = 0xb914,
151    RRE_LGHR    = 0xb907,
152    RRE_LGR     = 0xb904,
153    RRE_LLGCR   = 0xb984,
154    RRE_LLGFR   = 0xb916,
155    RRE_LLGHR   = 0xb985,
156    RRE_LRVR    = 0xb91f,
157    RRE_LRVGR   = 0xb90f,
158    RRE_LTGR    = 0xb902,
159    RRE_MLGR    = 0xb986,
160    RRE_MSGR    = 0xb90c,
161    RRE_MSR     = 0xb252,
162    RRE_NGR     = 0xb980,
163    RRE_OGR     = 0xb981,
164    RRE_SGR     = 0xb909,
165    RRE_SLGR    = 0xb90b,
166    RRE_SLBR    = 0xb999,
167    RRE_SLBGR   = 0xb989,
168    RRE_XGR     = 0xb982,
169
170    RRFa_MGRK   = 0xb9ec,
171    RRFa_MSRKC  = 0xb9fd,
172    RRFa_MSGRKC = 0xb9ed,
173    RRFa_NCRK   = 0xb9f5,
174    RRFa_NCGRK  = 0xb9e5,
175    RRFa_NNRK   = 0xb974,
176    RRFa_NNGRK  = 0xb964,
177    RRFa_NORK   = 0xb976,
178    RRFa_NOGRK  = 0xb966,
179    RRFa_NRK    = 0xb9f4,
180    RRFa_NGRK   = 0xb9e4,
181    RRFa_NXRK   = 0xb977,
182    RRFa_NXGRK  = 0xb967,
183    RRFa_OCRK   = 0xb975,
184    RRFa_OCGRK  = 0xb965,
185    RRFa_ORK    = 0xb9f6,
186    RRFa_OGRK   = 0xb9e6,
187    RRFa_SRK    = 0xb9f9,
188    RRFa_SGRK   = 0xb9e9,
189    RRFa_SLRK   = 0xb9fb,
190    RRFa_SLGRK  = 0xb9eb,
191    RRFa_XRK    = 0xb9f7,
192    RRFa_XGRK   = 0xb9e7,
193
194    RRFam_SELGR = 0xb9e3,
195
196    RRFc_LOCR   = 0xb9f2,
197    RRFc_LOCGR  = 0xb9e2,
198    RRFc_POPCNT = 0xb9e1,
199
200    RR_AR       = 0x1a,
201    RR_ALR      = 0x1e,
202    RR_BASR     = 0x0d,
203    RR_BCR      = 0x07,
204    RR_CLR      = 0x15,
205    RR_CR       = 0x19,
206    RR_DR       = 0x1d,
207    RR_LCR      = 0x13,
208    RR_LR       = 0x18,
209    RR_LTR      = 0x12,
210    RR_NR       = 0x14,
211    RR_OR       = 0x16,
212    RR_SR       = 0x1b,
213    RR_SLR      = 0x1f,
214    RR_XR       = 0x17,
215
216    RSY_RLL     = 0xeb1d,
217    RSY_RLLG    = 0xeb1c,
218    RSY_SLLG    = 0xeb0d,
219    RSY_SLLK    = 0xebdf,
220    RSY_SRAG    = 0xeb0a,
221    RSY_SRAK    = 0xebdc,
222    RSY_SRLG    = 0xeb0c,
223    RSY_SRLK    = 0xebde,
224
225    RS_SLL      = 0x89,
226    RS_SRA      = 0x8a,
227    RS_SRL      = 0x88,
228
229    RXY_AG      = 0xe308,
230    RXY_AY      = 0xe35a,
231    RXY_CG      = 0xe320,
232    RXY_CLG     = 0xe321,
233    RXY_CLY     = 0xe355,
234    RXY_CY      = 0xe359,
235    RXY_LAY     = 0xe371,
236    RXY_LB      = 0xe376,
237    RXY_LG      = 0xe304,
238    RXY_LGB     = 0xe377,
239    RXY_LGF     = 0xe314,
240    RXY_LGH     = 0xe315,
241    RXY_LHY     = 0xe378,
242    RXY_LLGC    = 0xe390,
243    RXY_LLGF    = 0xe316,
244    RXY_LLGH    = 0xe391,
245    RXY_LMG     = 0xeb04,
246    RXY_LPQ     = 0xe38f,
247    RXY_LRV     = 0xe31e,
248    RXY_LRVG    = 0xe30f,
249    RXY_LRVH    = 0xe31f,
250    RXY_LY      = 0xe358,
251    RXY_NG      = 0xe380,
252    RXY_OG      = 0xe381,
253    RXY_STCY    = 0xe372,
254    RXY_STG     = 0xe324,
255    RXY_STHY    = 0xe370,
256    RXY_STMG    = 0xeb24,
257    RXY_STPQ    = 0xe38e,
258    RXY_STRV    = 0xe33e,
259    RXY_STRVG   = 0xe32f,
260    RXY_STRVH   = 0xe33f,
261    RXY_STY     = 0xe350,
262    RXY_XG      = 0xe382,
263
264    RX_A        = 0x5a,
265    RX_C        = 0x59,
266    RX_L        = 0x58,
267    RX_LA       = 0x41,
268    RX_LH       = 0x48,
269    RX_ST       = 0x50,
270    RX_STC      = 0x42,
271    RX_STH      = 0x40,
272
273    VRIa_VGBM   = 0xe744,
274    VRIa_VREPI  = 0xe745,
275    VRIb_VGM    = 0xe746,
276    VRIc_VREP   = 0xe74d,
277
278    VRRa_VLC    = 0xe7de,
279    VRRa_VLP    = 0xe7df,
280    VRRa_VLR    = 0xe756,
281    VRRc_VA     = 0xe7f3,
282    VRRc_VCEQ   = 0xe7f8,   /* we leave the m5 cs field 0 */
283    VRRc_VCH    = 0xe7fb,   /* " */
284    VRRc_VCHL   = 0xe7f9,   /* " */
285    VRRc_VERLLV = 0xe773,
286    VRRc_VESLV  = 0xe770,
287    VRRc_VESRAV = 0xe77a,
288    VRRc_VESRLV = 0xe778,
289    VRRc_VML    = 0xe7a2,
290    VRRc_VMN    = 0xe7fe,
291    VRRc_VMNL   = 0xe7fc,
292    VRRc_VMX    = 0xe7ff,
293    VRRc_VMXL   = 0xe7fd,
294    VRRc_VN     = 0xe768,
295    VRRc_VNC    = 0xe769,
296    VRRc_VNN    = 0xe76e,
297    VRRc_VNO    = 0xe76b,
298    VRRc_VNX    = 0xe76c,
299    VRRc_VO     = 0xe76a,
300    VRRc_VOC    = 0xe76f,
301    VRRc_VPKS   = 0xe797,   /* we leave the m5 cs field 0 */
302    VRRc_VS     = 0xe7f7,
303    VRRa_VUPH   = 0xe7d7,
304    VRRa_VUPL   = 0xe7d6,
305    VRRc_VX     = 0xe76d,
306    VRRe_VSEL   = 0xe78d,
307    VRRf_VLVGP  = 0xe762,
308
309    VRSa_VERLL  = 0xe733,
310    VRSa_VESL   = 0xe730,
311    VRSa_VESRA  = 0xe73a,
312    VRSa_VESRL  = 0xe738,
313    VRSb_VLVG   = 0xe722,
314    VRSc_VLGV   = 0xe721,
315
316    VRX_VL      = 0xe706,
317    VRX_VLLEZ   = 0xe704,
318    VRX_VLREP   = 0xe705,
319    VRX_VST     = 0xe70e,
320    VRX_VSTEF   = 0xe70b,
321    VRX_VSTEG   = 0xe70a,
322
323    NOP         = 0x0707,
324} S390Opcode;
325
326#ifdef CONFIG_DEBUG_TCG
327static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
328    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
329    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
330    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
331    "%v0",  "%v1",  "%v2",  "%v3",  "%v4",  "%v5",  "%v6",  "%v7",
332    "%v8",  "%v9",  "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
333    "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
334    "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
335};
336#endif
337
338/* Since R6 is a potential argument register, choose it last of the
339   call-saved registers.  Likewise prefer the call-clobbered registers
340   in reverse order to maximize the chance of avoiding the arguments.  */
341static const int tcg_target_reg_alloc_order[] = {
342    /* Call saved registers.  */
343    TCG_REG_R13,
344    TCG_REG_R12,
345    TCG_REG_R11,
346    TCG_REG_R10,
347    TCG_REG_R9,
348    TCG_REG_R8,
349    TCG_REG_R7,
350    TCG_REG_R6,
351    /* Call clobbered registers.  */
352    TCG_REG_R14,
353    TCG_REG_R0,
354    TCG_REG_R1,
355    /* Argument registers, in reverse order of allocation.  */
356    TCG_REG_R5,
357    TCG_REG_R4,
358    TCG_REG_R3,
359    TCG_REG_R2,
360
361    /* V8-V15 are call saved, and omitted. */
362    TCG_REG_V0,
363    TCG_REG_V1,
364    TCG_REG_V2,
365    TCG_REG_V3,
366    TCG_REG_V4,
367    TCG_REG_V5,
368    TCG_REG_V6,
369    TCG_REG_V7,
370    TCG_REG_V16,
371    TCG_REG_V17,
372    TCG_REG_V18,
373    TCG_REG_V19,
374    TCG_REG_V20,
375    TCG_REG_V21,
376    TCG_REG_V22,
377    TCG_REG_V23,
378    TCG_REG_V24,
379    TCG_REG_V25,
380    TCG_REG_V26,
381    TCG_REG_V27,
382    TCG_REG_V28,
383    TCG_REG_V29,
384    TCG_REG_V30,
385    TCG_REG_V31,
386};
387
388static const int tcg_target_call_iarg_regs[] = {
389    TCG_REG_R2,
390    TCG_REG_R3,
391    TCG_REG_R4,
392    TCG_REG_R5,
393    TCG_REG_R6,
394};
395
396static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
397{
398    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
399    tcg_debug_assert(slot == 0);
400    return TCG_REG_R2;
401}
402
403#define S390_CC_EQ      8
404#define S390_CC_LT      4
405#define S390_CC_GT      2
406#define S390_CC_OV      1
407#define S390_CC_NE      (S390_CC_LT | S390_CC_GT)
408#define S390_CC_LE      (S390_CC_LT | S390_CC_EQ)
409#define S390_CC_GE      (S390_CC_GT | S390_CC_EQ)
410#define S390_CC_NEVER   0
411#define S390_CC_ALWAYS  15
412
413/* Condition codes that result from a COMPARE and COMPARE LOGICAL.  */
414static const uint8_t tcg_cond_to_s390_cond[] = {
415    [TCG_COND_EQ]  = S390_CC_EQ,
416    [TCG_COND_NE]  = S390_CC_NE,
417    [TCG_COND_LT]  = S390_CC_LT,
418    [TCG_COND_LE]  = S390_CC_LE,
419    [TCG_COND_GT]  = S390_CC_GT,
420    [TCG_COND_GE]  = S390_CC_GE,
421    [TCG_COND_LTU] = S390_CC_LT,
422    [TCG_COND_LEU] = S390_CC_LE,
423    [TCG_COND_GTU] = S390_CC_GT,
424    [TCG_COND_GEU] = S390_CC_GE,
425};
426
427/* Condition codes that result from a LOAD AND TEST.  Here, we have no
428   unsigned instruction variation, however since the test is vs zero we
429   can re-map the outcomes appropriately.  */
430static const uint8_t tcg_cond_to_ltr_cond[] = {
431    [TCG_COND_EQ]  = S390_CC_EQ,
432    [TCG_COND_NE]  = S390_CC_NE,
433    [TCG_COND_LT]  = S390_CC_LT,
434    [TCG_COND_LE]  = S390_CC_LE,
435    [TCG_COND_GT]  = S390_CC_GT,
436    [TCG_COND_GE]  = S390_CC_GE,
437    [TCG_COND_LTU] = S390_CC_NEVER,
438    [TCG_COND_LEU] = S390_CC_EQ,
439    [TCG_COND_GTU] = S390_CC_NE,
440    [TCG_COND_GEU] = S390_CC_ALWAYS,
441};
442
443static const tcg_insn_unit *tb_ret_addr;
444uint64_t s390_facilities[3];
445
446static inline bool is_general_reg(TCGReg r)
447{
448    return r <= TCG_REG_R15;
449}
450
451static inline bool is_vector_reg(TCGReg r)
452{
453    return r >= TCG_REG_V0 && r <= TCG_REG_V31;
454}
455
456static bool patch_reloc(tcg_insn_unit *src_rw, int type,
457                        intptr_t value, intptr_t addend)
458{
459    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
460    intptr_t pcrel2;
461    uint32_t old;
462
463    value += addend;
464    pcrel2 = (tcg_insn_unit *)value - src_rx;
465
466    switch (type) {
467    case R_390_PC16DBL:
468        if (pcrel2 == (int16_t)pcrel2) {
469            tcg_patch16(src_rw, pcrel2);
470            return true;
471        }
472        break;
473    case R_390_PC32DBL:
474        if (pcrel2 == (int32_t)pcrel2) {
475            tcg_patch32(src_rw, pcrel2);
476            return true;
477        }
478        break;
479    case R_390_20:
480        if (value == sextract64(value, 0, 20)) {
481            old = *(uint32_t *)src_rw & 0xf00000ff;
482            old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
483            tcg_patch32(src_rw, old);
484            return true;
485        }
486        break;
487    default:
488        g_assert_not_reached();
489    }
490    return false;
491}
492
493static int is_const_p16(uint64_t val)
494{
495    for (int i = 0; i < 4; ++i) {
496        uint64_t mask = 0xffffull << (i * 16);
497        if ((val & ~mask) == 0) {
498            return i;
499        }
500    }
501    return -1;
502}
503
504static int is_const_p32(uint64_t val)
505{
506    if ((val & 0xffffffff00000000ull) == 0) {
507        return 0;
508    }
509    if ((val & 0x00000000ffffffffull) == 0) {
510        return 1;
511    }
512    return -1;
513}
514
515/*
516 * Accept bit patterns like these:
517 *  0....01....1
518 *  1....10....0
519 *  1..10..01..1
520 *  0..01..10..0
521 * Copied from gcc sources.
522 */
523static bool risbg_mask(uint64_t c)
524{
525    uint64_t lsb;
526    /* We don't change the number of transitions by inverting,
527       so make sure we start with the LSB zero.  */
528    if (c & 1) {
529        c = ~c;
530    }
531    /* Reject all zeros or all ones.  */
532    if (c == 0) {
533        return false;
534    }
535    /* Find the first transition.  */
536    lsb = c & -c;
537    /* Invert to look for a second transition.  */
538    c = ~c;
539    /* Erase the first transition.  */
540    c &= -lsb;
541    /* Find the second transition, if any.  */
542    lsb = c & -c;
543    /* Match if all the bits are 1's, or if c is zero.  */
544    return c == -lsb;
545}
546
547/* Test if a constant matches the constraint. */
548static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
549{
550    if (ct & TCG_CT_CONST) {
551        return 1;
552    }
553
554    if (type == TCG_TYPE_I32) {
555        val = (int32_t)val;
556    }
557
558    /* The following are mutually exclusive.  */
559    if (ct & TCG_CT_CONST_S16) {
560        return val == (int16_t)val;
561    } else if (ct & TCG_CT_CONST_S32) {
562        return val == (int32_t)val;
563    } else if (ct & TCG_CT_CONST_S33) {
564        return val >= -0xffffffffll && val <= 0xffffffffll;
565    } else if (ct & TCG_CT_CONST_ZERO) {
566        return val == 0;
567    }
568
569    if (ct & TCG_CT_CONST_INV) {
570        val = ~val;
571    }
572    /*
573     * Note that is_const_p16 is a subset of is_const_p32,
574     * so we don't need both constraints.
575     */
576    if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
577        return true;
578    }
579    if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
580        return true;
581    }
582
583    return 0;
584}
585
586/* Emit instructions according to the given instruction format.  */
587
588static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
589{
590    tcg_out16(s, (op << 8) | (r1 << 4) | r2);
591}
592
593static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
594                             TCGReg r1, TCGReg r2)
595{
596    tcg_out32(s, (op << 16) | (r1 << 4) | r2);
597}
598
599/* RRF-a without the m4 field */
600static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
601                              TCGReg r1, TCGReg r2, TCGReg r3)
602{
603    tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
604}
605
606/* RRF-a with the m4 field */
607static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
608                               TCGReg r1, TCGReg r2, TCGReg r3, int m4)
609{
610    tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
611}
612
613static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
614                              TCGReg r1, TCGReg r2, int m3)
615{
616    tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
617}
618
619static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
620{
621    tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
622}
623
624static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
625                             int i2, int m3)
626{
627    tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
628    tcg_out32(s, (i2 << 16) | (op & 0xff));
629}
630
631static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
632{
633    tcg_out16(s, op | (r1 << 4));
634    tcg_out32(s, i2);
635}
636
637static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
638                            TCGReg b2, TCGReg r3, int disp)
639{
640    tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
641              | (disp & 0xfff));
642}
643
644static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
645                             TCGReg b2, TCGReg r3, int disp)
646{
647    tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
648    tcg_out32(s, (op & 0xff) | (b2 << 28)
649              | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
650}
651
652#define tcg_out_insn_RX   tcg_out_insn_RS
653#define tcg_out_insn_RXY  tcg_out_insn_RSY
654
655static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
656{
657    /*
658     * Shift bit 4 of each regno to its corresponding bit of RXB.
659     * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
660     * is the left-shift of the 4th operand.
661     */
662    return ((v1 & 0x10) << (4 + 3))
663         | ((v2 & 0x10) << (4 + 2))
664         | ((v3 & 0x10) << (4 + 1))
665         | ((v4 & 0x10) << (4 + 0));
666}
667
668static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
669                              TCGReg v1, uint16_t i2, int m3)
670{
671    tcg_debug_assert(is_vector_reg(v1));
672    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
673    tcg_out16(s, i2);
674    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
675}
676
677static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
678                              TCGReg v1, uint8_t i2, uint8_t i3, int m4)
679{
680    tcg_debug_assert(is_vector_reg(v1));
681    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
682    tcg_out16(s, (i2 << 8) | (i3 & 0xff));
683    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
684}
685
686static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
687                              TCGReg v1, uint16_t i2, TCGReg v3, int m4)
688{
689    tcg_debug_assert(is_vector_reg(v1));
690    tcg_debug_assert(is_vector_reg(v3));
691    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
692    tcg_out16(s, i2);
693    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
694}
695
696static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
697                              TCGReg v1, TCGReg v2, int m3)
698{
699    tcg_debug_assert(is_vector_reg(v1));
700    tcg_debug_assert(is_vector_reg(v2));
701    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
702    tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
703}
704
705static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
706                              TCGReg v1, TCGReg v2, TCGReg v3, int m4)
707{
708    tcg_debug_assert(is_vector_reg(v1));
709    tcg_debug_assert(is_vector_reg(v2));
710    tcg_debug_assert(is_vector_reg(v3));
711    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
712    tcg_out16(s, v3 << 12);
713    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
714}
715
716static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
717                              TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
718{
719    tcg_debug_assert(is_vector_reg(v1));
720    tcg_debug_assert(is_vector_reg(v2));
721    tcg_debug_assert(is_vector_reg(v3));
722    tcg_debug_assert(is_vector_reg(v4));
723    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
724    tcg_out16(s, v3 << 12);
725    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
726}
727
728static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
729                              TCGReg v1, TCGReg r2, TCGReg r3)
730{
731    tcg_debug_assert(is_vector_reg(v1));
732    tcg_debug_assert(is_general_reg(r2));
733    tcg_debug_assert(is_general_reg(r3));
734    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
735    tcg_out16(s, r3 << 12);
736    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
737}
738
739static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
740                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
741{
742    tcg_debug_assert(is_vector_reg(v1));
743    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
744    tcg_debug_assert(is_general_reg(b2));
745    tcg_debug_assert(is_vector_reg(v3));
746    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
747    tcg_out16(s, b2 << 12 | d2);
748    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
749}
750
751static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
752                              intptr_t d2, TCGReg b2, TCGReg r3, int m4)
753{
754    tcg_debug_assert(is_vector_reg(v1));
755    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
756    tcg_debug_assert(is_general_reg(b2));
757    tcg_debug_assert(is_general_reg(r3));
758    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
759    tcg_out16(s, b2 << 12 | d2);
760    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
761}
762
763static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
764                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
765{
766    tcg_debug_assert(is_general_reg(r1));
767    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
768    tcg_debug_assert(is_general_reg(b2));
769    tcg_debug_assert(is_vector_reg(v3));
770    tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
771    tcg_out16(s, b2 << 12 | d2);
772    tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12));
773}
774
775static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
776                             TCGReg b2, TCGReg x2, intptr_t d2, int m3)
777{
778    tcg_debug_assert(is_vector_reg(v1));
779    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
780    tcg_debug_assert(is_general_reg(x2));
781    tcg_debug_assert(is_general_reg(b2));
782    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
783    tcg_out16(s, (b2 << 12) | d2);
784    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
785}
786
787/* Emit an opcode with "type-checking" of the format.  */
788#define tcg_out_insn(S, FMT, OP, ...) \
789    glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
790
791
792/* emit 64-bit shifts */
793static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
794                         TCGReg src, TCGReg sh_reg, int sh_imm)
795{
796    tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
797}
798
799/* emit 32-bit shifts */
800static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
801                         TCGReg sh_reg, int sh_imm)
802{
803    tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
804}
805
806static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
807{
808    if (src == dst) {
809        return true;
810    }
811    switch (type) {
812    case TCG_TYPE_I32:
813        if (likely(is_general_reg(dst) && is_general_reg(src))) {
814            tcg_out_insn(s, RR, LR, dst, src);
815            break;
816        }
817        /* fallthru */
818
819    case TCG_TYPE_I64:
820        if (likely(is_general_reg(dst))) {
821            if (likely(is_general_reg(src))) {
822                tcg_out_insn(s, RRE, LGR, dst, src);
823            } else {
824                tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
825            }
826            break;
827        } else if (is_general_reg(src)) {
828            tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
829            break;
830        }
831        /* fallthru */
832
833    case TCG_TYPE_V64:
834    case TCG_TYPE_V128:
835        tcg_out_insn(s, VRRa, VLR, dst, src, 0);
836        break;
837
838    default:
839        g_assert_not_reached();
840    }
841    return true;
842}
843
844static const S390Opcode li_insns[4] = {
845    RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
846};
847static const S390Opcode oi_insns[4] = {
848    RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
849};
850static const S390Opcode lif_insns[2] = {
851    RIL_LLILF, RIL_LLIHF,
852};
853
854/* load a register with an immediate value */
855static void tcg_out_movi(TCGContext *s, TCGType type,
856                         TCGReg ret, tcg_target_long sval)
857{
858    tcg_target_ulong uval = sval;
859    ptrdiff_t pc_off;
860    int i;
861
862    if (type == TCG_TYPE_I32) {
863        uval = (uint32_t)sval;
864        sval = (int32_t)sval;
865    }
866
867    /* Try all 32-bit insns that can load it in one go.  */
868    if (sval >= -0x8000 && sval < 0x8000) {
869        tcg_out_insn(s, RI, LGHI, ret, sval);
870        return;
871    }
872
873    i = is_const_p16(uval);
874    if (i >= 0) {
875        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
876        return;
877    }
878
879    /* Try all 48-bit insns that can load it in one go.  */
880    if (sval == (int32_t)sval) {
881        tcg_out_insn(s, RIL, LGFI, ret, sval);
882        return;
883    }
884
885    i = is_const_p32(uval);
886    if (i >= 0) {
887        tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
888        return;
889    }
890
891    /* Try for PC-relative address load.  For odd addresses, add one. */
892    pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
893    if (pc_off == (int32_t)pc_off) {
894        tcg_out_insn(s, RIL, LARL, ret, pc_off);
895        if (sval & 1) {
896            tcg_out_insn(s, RI, AGHI, ret, 1);
897        }
898        return;
899    }
900
901    /* Otherwise, load it by parts. */
902    i = is_const_p16((uint32_t)uval);
903    if (i >= 0) {
904        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
905    } else {
906        tcg_out_insn(s, RIL, LLILF, ret, uval);
907    }
908    uval >>= 32;
909    i = is_const_p16(uval);
910    if (i >= 0) {
911        tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
912    } else {
913        tcg_out_insn(s, RIL, OIHF, ret, uval);
914    }
915}
916
917/* Emit a load/store type instruction.  Inputs are:
918   DATA:     The register to be loaded or stored.
919   BASE+OFS: The effective address.
920   OPC_RX:   If the operation has an RX format opcode (e.g. STC), otherwise 0.
921   OPC_RXY:  The RXY format opcode for the operation (e.g. STCY).  */
922
923static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
924                        TCGReg data, TCGReg base, TCGReg index,
925                        tcg_target_long ofs)
926{
927    if (ofs < -0x80000 || ofs >= 0x80000) {
928        /* Combine the low 20 bits of the offset with the actual load insn;
929           the high 44 bits must come from an immediate load.  */
930        tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
931        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
932        ofs = low;
933
934        /* If we were already given an index register, add it in.  */
935        if (index != TCG_REG_NONE) {
936            tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
937        }
938        index = TCG_TMP0;
939    }
940
941    if (opc_rx && ofs >= 0 && ofs < 0x1000) {
942        tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
943    } else {
944        tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
945    }
946}
947
948static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
949                            TCGReg data, TCGReg base, TCGReg index,
950                            tcg_target_long ofs, int m3)
951{
952    if (ofs < 0 || ofs >= 0x1000) {
953        if (ofs >= -0x80000 && ofs < 0x80000) {
954            tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
955            base = TCG_TMP0;
956            index = TCG_REG_NONE;
957            ofs = 0;
958        } else {
959            tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
960            if (index != TCG_REG_NONE) {
961                tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
962            }
963            index = TCG_TMP0;
964            ofs = 0;
965        }
966    }
967    tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
968}
969
970/* load data without address translation or endianness conversion */
971static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
972                       TCGReg base, intptr_t ofs)
973{
974    switch (type) {
975    case TCG_TYPE_I32:
976        if (likely(is_general_reg(data))) {
977            tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
978            break;
979        }
980        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
981        break;
982
983    case TCG_TYPE_I64:
984        if (likely(is_general_reg(data))) {
985            tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
986            break;
987        }
988        /* fallthru */
989
990    case TCG_TYPE_V64:
991        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
992        break;
993
994    case TCG_TYPE_V128:
995        /* Hint quadword aligned.  */
996        tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
997        break;
998
999    default:
1000        g_assert_not_reached();
1001    }
1002}
1003
1004static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
1005                       TCGReg base, intptr_t ofs)
1006{
1007    switch (type) {
1008    case TCG_TYPE_I32:
1009        if (likely(is_general_reg(data))) {
1010            tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1011        } else {
1012            tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1013        }
1014        break;
1015
1016    case TCG_TYPE_I64:
1017        if (likely(is_general_reg(data))) {
1018            tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1019            break;
1020        }
1021        /* fallthru */
1022
1023    case TCG_TYPE_V64:
1024        tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1025        break;
1026
1027    case TCG_TYPE_V128:
1028        /* Hint quadword aligned.  */
1029        tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1030        break;
1031
1032    default:
1033        g_assert_not_reached();
1034    }
1035}
1036
1037static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1038                               TCGReg base, intptr_t ofs)
1039{
1040    return false;
1041}
1042
1043static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1044{
1045    return false;
1046}
1047
1048static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1049                             tcg_target_long imm)
1050{
1051    /* This function is only used for passing structs by reference. */
1052    tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1053}
1054
1055static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1056                                 int msb, int lsb, int ofs, int z)
1057{
1058    /* Format RIE-f */
1059    tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1060    tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1061    tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1062}
1063
1064static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1065{
1066    tcg_out_insn(s, RRE, LGBR, dest, src);
1067}
1068
1069static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1070{
1071    tcg_out_insn(s, RRE, LLGCR, dest, src);
1072}
1073
1074static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1075{
1076    tcg_out_insn(s, RRE, LGHR, dest, src);
1077}
1078
1079static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1080{
1081    tcg_out_insn(s, RRE, LLGHR, dest, src);
1082}
1083
1084static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1085{
1086    tcg_out_insn(s, RRE, LGFR, dest, src);
1087}
1088
1089static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1090{
1091    tcg_out_insn(s, RRE, LLGFR, dest, src);
1092}
1093
1094static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1095{
1096    tcg_out_ext32s(s, dest, src);
1097}
1098
1099static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1100{
1101    tcg_out_ext32u(s, dest, src);
1102}
1103
1104static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1105{
1106    tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1107}
1108
1109static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1110{
1111    int msb, lsb;
1112    if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1113        /* Achieve wraparound by swapping msb and lsb.  */
1114        msb = 64 - ctz64(~val);
1115        lsb = clz64(~val) - 1;
1116    } else {
1117        msb = clz64(val);
1118        lsb = 63 - ctz64(val);
1119    }
1120    tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1121}
1122
1123static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1124{
1125    static const S390Opcode ni_insns[4] = {
1126        RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1127    };
1128    static const S390Opcode nif_insns[2] = {
1129        RIL_NILF, RIL_NIHF
1130    };
1131    uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1132    int i;
1133
1134    /* Look for the zero-extensions.  */
1135    if ((val & valid) == 0xffffffff) {
1136        tcg_out_ext32u(s, dest, dest);
1137        return;
1138    }
1139    if ((val & valid) == 0xff) {
1140        tcg_out_ext8u(s, dest, dest);
1141        return;
1142    }
1143    if ((val & valid) == 0xffff) {
1144        tcg_out_ext16u(s, dest, dest);
1145        return;
1146    }
1147
1148    i = is_const_p16(~val & valid);
1149    if (i >= 0) {
1150        tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1151        return;
1152    }
1153
1154    i = is_const_p32(~val & valid);
1155    tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1156    if (i >= 0) {
1157        tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1158        return;
1159    }
1160
1161    if (risbg_mask(val)) {
1162        tgen_andi_risbg(s, dest, dest, val);
1163        return;
1164    }
1165
1166    g_assert_not_reached();
1167}
1168
1169static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1170{
1171    static const S390Opcode oif_insns[2] = {
1172        RIL_OILF, RIL_OIHF
1173    };
1174
1175    int i;
1176
1177    i = is_const_p16(val);
1178    if (i >= 0) {
1179        tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1180        return;
1181    }
1182
1183    i = is_const_p32(val);
1184    if (i >= 0) {
1185        tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1186        return;
1187    }
1188
1189    g_assert_not_reached();
1190}
1191
1192static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1193{
1194    switch (is_const_p32(val)) {
1195    case 0:
1196        tcg_out_insn(s, RIL, XILF, dest, val);
1197        break;
1198    case 1:
1199        tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1200        break;
1201    default:
1202        g_assert_not_reached();
1203    }
1204}
1205
1206static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1207                     TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1208{
1209    bool is_unsigned = is_unsigned_cond(c);
1210    TCGCond inv_c = tcg_invert_cond(c);
1211    S390Opcode op;
1212
1213    if (c2const) {
1214        if (c2 == 0) {
1215            if (!(is_unsigned && need_carry)) {
1216                if (type == TCG_TYPE_I32) {
1217                    tcg_out_insn(s, RR, LTR, r1, r1);
1218                } else {
1219                    tcg_out_insn(s, RRE, LTGR, r1, r1);
1220                }
1221                *inv_cc = tcg_cond_to_ltr_cond[inv_c];
1222                return tcg_cond_to_ltr_cond[c];
1223            }
1224        }
1225
1226        if (!is_unsigned && c2 == (int16_t)c2) {
1227            op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1228            tcg_out_insn_RI(s, op, r1, c2);
1229            goto exit;
1230        }
1231
1232        if (type == TCG_TYPE_I32) {
1233            op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1234            tcg_out_insn_RIL(s, op, r1, c2);
1235            goto exit;
1236        }
1237
1238        /*
1239         * Constraints are for a signed 33-bit operand, which is a
1240         * convenient superset of this signed/unsigned test.
1241         */
1242        if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
1243            op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1244            tcg_out_insn_RIL(s, op, r1, c2);
1245            goto exit;
1246        }
1247
1248        /* Load everything else into a register. */
1249        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
1250        c2 = TCG_TMP0;
1251    }
1252
1253    if (type == TCG_TYPE_I32) {
1254        op = (is_unsigned ? RR_CLR : RR_CR);
1255        tcg_out_insn_RR(s, op, r1, c2);
1256    } else {
1257        op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1258        tcg_out_insn_RRE(s, op, r1, c2);
1259    }
1260
1261 exit:
1262    *inv_cc = tcg_cond_to_s390_cond[inv_c];
1263    return tcg_cond_to_s390_cond[c];
1264}
1265
1266static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1267                    TCGArg c2, bool c2const, bool need_carry)
1268{
1269    int inv_cc;
1270    return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1271}
1272
1273static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1274                         TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1275{
1276    int cc;
1277
1278    /* With LOC2, we can always emit the minimum 3 insns.  */
1279    if (HAVE_FACILITY(LOAD_ON_COND2)) {
1280        /* Emit: d = 0, d = (cc ? 1 : d).  */
1281        cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1282        tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1283        tcg_out_insn(s, RIEg, LOCGHI, dest, 1, cc);
1284        return;
1285    }
1286
1287 restart:
1288    switch (cond) {
1289    case TCG_COND_NE:
1290        /* X != 0 is X > 0.  */
1291        if (c2const && c2 == 0) {
1292            cond = TCG_COND_GTU;
1293        } else {
1294            break;
1295        }
1296        /* fallthru */
1297
1298    case TCG_COND_GTU:
1299    case TCG_COND_GT:
1300        /* The result of a compare has CC=2 for GT and CC=3 unused.
1301           ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.  */
1302        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1303        tcg_out_movi(s, type, dest, 0);
1304        tcg_out_insn(s, RRE, ALCGR, dest, dest);
1305        return;
1306
1307    case TCG_COND_EQ:
1308        /* X == 0 is X <= 0.  */
1309        if (c2const && c2 == 0) {
1310            cond = TCG_COND_LEU;
1311        } else {
1312            break;
1313        }
1314        /* fallthru */
1315
1316    case TCG_COND_LEU:
1317    case TCG_COND_LE:
1318        /* As above, but we're looking for borrow, or !carry.
1319           The second insn computes d - d - borrow, or -1 for true
1320           and 0 for false.  So we must mask to 1 bit afterward.  */
1321        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1322        tcg_out_insn(s, RRE, SLBGR, dest, dest);
1323        tgen_andi(s, type, dest, 1);
1324        return;
1325
1326    case TCG_COND_GEU:
1327    case TCG_COND_LTU:
1328    case TCG_COND_LT:
1329    case TCG_COND_GE:
1330        /* Swap operands so that we can use LEU/GTU/GT/LE.  */
1331        if (!c2const) {
1332            TCGReg t = c1;
1333            c1 = c2;
1334            c2 = t;
1335            cond = tcg_swap_cond(cond);
1336            goto restart;
1337        }
1338        break;
1339
1340    default:
1341        g_assert_not_reached();
1342    }
1343
1344    cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1345    /* Emit: d = 0, t = 1, d = (cc ? t : d).  */
1346    tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1347    tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1348    tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1349}
1350
1351static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1352                             TCGArg v3, int v3const, TCGReg v4,
1353                             int cc, int inv_cc)
1354{
1355    TCGReg src;
1356
1357    if (v3const) {
1358        if (dest == v4) {
1359            if (HAVE_FACILITY(LOAD_ON_COND2)) {
1360                /* Emit: if (cc) dest = v3. */
1361                tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1362                return;
1363            }
1364            tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1365            src = TCG_TMP0;
1366        } else {
1367            /* LGR+LOCGHI is larger than LGHI+LOCGR. */
1368            tcg_out_insn(s, RI, LGHI, dest, v3);
1369            cc = inv_cc;
1370            src = v4;
1371        }
1372    } else {
1373        if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1374            /* Emit: dest = cc ? v3 : v4. */
1375            tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1376            return;
1377        }
1378        if (dest == v4) {
1379            src = v3;
1380        } else {
1381            tcg_out_mov(s, type, dest, v3);
1382            cc = inv_cc;
1383            src = v4;
1384        }
1385    }
1386
1387    /* Emit: if (cc) dest = src. */
1388    tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1389}
1390
1391static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1392                         TCGReg c1, TCGArg c2, int c2const,
1393                         TCGArg v3, int v3const, TCGReg v4)
1394{
1395    int cc, inv_cc;
1396
1397    cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1398    tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1399}
1400
1401static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1402                     TCGArg a2, int a2const)
1403{
1404    /* Since this sets both R and R+1, we have no choice but to store the
1405       result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.  */
1406    QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1407    tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1408
1409    if (a2const && a2 == 64) {
1410        tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1411        return;
1412    }
1413
1414    /*
1415     * Conditions from FLOGR are:
1416     *   2 -> one bit found
1417     *   8 -> no one bit found
1418     */
1419    tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1420}
1421
1422static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1423{
1424    /* With MIE3, and bit 0 of m4 set, we get the complete result. */
1425    if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1426        if (type == TCG_TYPE_I32) {
1427            tcg_out_ext32u(s, dest, src);
1428            src = dest;
1429        }
1430        tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1431        return;
1432    }
1433
1434    /* Without MIE3, each byte gets the count of bits for the byte. */
1435    tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1436
1437    /* Multiply to sum each byte at the top of the word. */
1438    if (type == TCG_TYPE_I32) {
1439        tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1440        tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1441    } else {
1442        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1443        tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1444        tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1445    }
1446}
1447
1448static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1449                         int ofs, int len, int z)
1450{
1451    int lsb = (63 - ofs);
1452    int msb = lsb - (len - 1);
1453    tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1454}
1455
1456static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1457                         int ofs, int len)
1458{
1459    tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1460}
1461
1462static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1463{
1464    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1465    if (off == (int16_t)off) {
1466        tcg_out_insn(s, RI, BRC, cc, off);
1467    } else if (off == (int32_t)off) {
1468        tcg_out_insn(s, RIL, BRCL, cc, off);
1469    } else {
1470        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1471        tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1472    }
1473}
1474
1475static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1476{
1477    if (l->has_value) {
1478        tgen_gotoi(s, cc, l->u.value_ptr);
1479    } else {
1480        tcg_out16(s, RI_BRC | (cc << 4));
1481        tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1482        s->code_ptr += 1;
1483    }
1484}
1485
1486static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1487                                TCGReg r1, TCGReg r2, TCGLabel *l)
1488{
1489    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1490    /* Format RIE-b */
1491    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1492    tcg_out16(s, 0);
1493    tcg_out16(s, cc << 12 | (opc & 0xff));
1494}
1495
1496static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1497                                    TCGReg r1, int i2, TCGLabel *l)
1498{
1499    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1500    /* Format RIE-c */
1501    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1502    tcg_out16(s, 0);
1503    tcg_out16(s, (i2 << 8) | (opc & 0xff));
1504}
1505
1506static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1507                        TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1508{
1509    int cc;
1510    bool is_unsigned = is_unsigned_cond(c);
1511    bool in_range;
1512    S390Opcode opc;
1513
1514    cc = tcg_cond_to_s390_cond[c];
1515
1516    if (!c2const) {
1517        opc = (type == TCG_TYPE_I32
1518               ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1519               : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1520        tgen_compare_branch(s, opc, cc, r1, c2, l);
1521        return;
1522    }
1523
1524    /*
1525     * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1526     * If the immediate we've been given does not fit that range, we'll
1527     * fall back to separate compare and branch instructions using the
1528     * larger comparison range afforded by COMPARE IMMEDIATE.
1529     */
1530    if (type == TCG_TYPE_I32) {
1531        if (is_unsigned) {
1532            opc = RIEc_CLIJ;
1533            in_range = (uint32_t)c2 == (uint8_t)c2;
1534        } else {
1535            opc = RIEc_CIJ;
1536            in_range = (int32_t)c2 == (int8_t)c2;
1537        }
1538    } else {
1539        if (is_unsigned) {
1540            opc = RIEc_CLGIJ;
1541            in_range = (uint64_t)c2 == (uint8_t)c2;
1542        } else {
1543            opc = RIEc_CGIJ;
1544            in_range = (int64_t)c2 == (int8_t)c2;
1545        }
1546    }
1547    if (in_range) {
1548        tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1549        return;
1550    }
1551
1552    cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1553    tgen_branch(s, cc, l);
1554}
1555
1556static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1557{
1558    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1559    if (off == (int32_t)off) {
1560        tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1561    } else {
1562        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1563        tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1564    }
1565}
1566
1567static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1568                         const TCGHelperInfo *info)
1569{
1570    tcg_out_call_int(s, dest);
1571}
1572
1573typedef struct {
1574    TCGReg base;
1575    TCGReg index;
1576    int disp;
1577    TCGAtomAlign aa;
1578} HostAddress;
1579
1580bool tcg_target_has_memory_bswap(MemOp memop)
1581{
1582    TCGAtomAlign aa;
1583
1584    if ((memop & MO_SIZE) <= MO_64) {
1585        return true;
1586    }
1587
1588    /*
1589     * Reject 16-byte memop with 16-byte atomicity,
1590     * but do allow a pair of 64-bit operations.
1591     */
1592    aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
1593    return aa.atom <= MO_64;
1594}
1595
1596static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1597                                   HostAddress h)
1598{
1599    switch (opc & (MO_SSIZE | MO_BSWAP)) {
1600    case MO_UB:
1601        tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1602        break;
1603    case MO_SB:
1604        tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1605        break;
1606
1607    case MO_UW | MO_BSWAP:
1608        /* swapped unsigned halfword load with upper bits zeroed */
1609        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1610        tcg_out_ext16u(s, data, data);
1611        break;
1612    case MO_UW:
1613        tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1614        break;
1615
1616    case MO_SW | MO_BSWAP:
1617        /* swapped sign-extended halfword load */
1618        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1619        tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1620        break;
1621    case MO_SW:
1622        tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1623        break;
1624
1625    case MO_UL | MO_BSWAP:
1626        /* swapped unsigned int load with upper bits zeroed */
1627        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1628        tcg_out_ext32u(s, data, data);
1629        break;
1630    case MO_UL:
1631        tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1632        break;
1633
1634    case MO_SL | MO_BSWAP:
1635        /* swapped sign-extended int load */
1636        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1637        tcg_out_ext32s(s, data, data);
1638        break;
1639    case MO_SL:
1640        tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1641        break;
1642
1643    case MO_UQ | MO_BSWAP:
1644        tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1645        break;
1646    case MO_UQ:
1647        tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1648        break;
1649
1650    default:
1651        g_assert_not_reached();
1652    }
1653}
1654
1655static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1656                                   HostAddress h)
1657{
1658    switch (opc & (MO_SIZE | MO_BSWAP)) {
1659    case MO_UB:
1660        if (h.disp >= 0 && h.disp < 0x1000) {
1661            tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1662        } else {
1663            tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1664        }
1665        break;
1666
1667    case MO_UW | MO_BSWAP:
1668        tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1669        break;
1670    case MO_UW:
1671        if (h.disp >= 0 && h.disp < 0x1000) {
1672            tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1673        } else {
1674            tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1675        }
1676        break;
1677
1678    case MO_UL | MO_BSWAP:
1679        tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1680        break;
1681    case MO_UL:
1682        if (h.disp >= 0 && h.disp < 0x1000) {
1683            tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1684        } else {
1685            tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1686        }
1687        break;
1688
1689    case MO_UQ | MO_BSWAP:
1690        tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1691        break;
1692    case MO_UQ:
1693        tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1694        break;
1695
1696    default:
1697        g_assert_not_reached();
1698    }
1699}
1700
1701static const TCGLdstHelperParam ldst_helper_param = {
1702    .ntmp = 1, .tmp = { TCG_TMP0 }
1703};
1704
1705static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1706{
1707    MemOp opc = get_memop(lb->oi);
1708
1709    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1710                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1711        return false;
1712    }
1713
1714    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1715    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1716    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1717
1718    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1719    return true;
1720}
1721
1722static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1723{
1724    MemOp opc = get_memop(lb->oi);
1725
1726    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1727                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1728        return false;
1729    }
1730
1731    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1732    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1733
1734    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1735    return true;
1736}
1737
1738/*
1739 * For softmmu, perform the TLB load and compare.
1740 * For useronly, perform any required alignment tests.
1741 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1742 * is required and fill in @h with the host address for the fast path.
1743 */
1744static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1745                                           TCGReg addr_reg, MemOpIdx oi,
1746                                           bool is_ld)
1747{
1748    TCGType addr_type = s->addr_type;
1749    TCGLabelQemuLdst *ldst = NULL;
1750    MemOp opc = get_memop(oi);
1751    MemOp s_bits = opc & MO_SIZE;
1752    unsigned a_mask;
1753
1754    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
1755    a_mask = (1 << h->aa.align) - 1;
1756
1757#ifdef CONFIG_SOFTMMU
1758    unsigned s_mask = (1 << s_bits) - 1;
1759    int mem_index = get_mmuidx(oi);
1760    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1761    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1762    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1763    int ofs, a_off;
1764    uint64_t tlb_mask;
1765
1766    ldst = new_ldst_label(s);
1767    ldst->is_ld = is_ld;
1768    ldst->oi = oi;
1769    ldst->addrlo_reg = addr_reg;
1770
1771    tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1772                 s->page_bits - CPU_TLB_ENTRY_BITS);
1773
1774    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1775    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
1776    tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1777    tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1778
1779    /*
1780     * For aligned accesses, we check the first byte and include the alignment
1781     * bits within the address.  For unaligned access, we check that we don't
1782     * cross pages using the address of the last byte of the access.
1783     */
1784    a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
1785    tlb_mask = (uint64_t)s->page_mask | a_mask;
1786    if (a_off == 0) {
1787        tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1788    } else {
1789        tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1790        tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
1791    }
1792
1793    if (is_ld) {
1794        ofs = offsetof(CPUTLBEntry, addr_read);
1795    } else {
1796        ofs = offsetof(CPUTLBEntry, addr_write);
1797    }
1798    if (addr_type == TCG_TYPE_I32) {
1799        ofs += HOST_BIG_ENDIAN * 4;
1800        tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1801    } else {
1802        tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1803    }
1804
1805    tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1806    ldst->label_ptr[0] = s->code_ptr++;
1807
1808    h->index = TCG_TMP0;
1809    tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1810                 offsetof(CPUTLBEntry, addend));
1811
1812    if (addr_type == TCG_TYPE_I32) {
1813        tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1814        h->base = TCG_REG_NONE;
1815    } else {
1816        h->base = addr_reg;
1817    }
1818    h->disp = 0;
1819#else
1820    if (a_mask) {
1821        ldst = new_ldst_label(s);
1822        ldst->is_ld = is_ld;
1823        ldst->oi = oi;
1824        ldst->addrlo_reg = addr_reg;
1825
1826        /* We are expecting a_bits to max out at 7, much lower than TMLL. */
1827        tcg_debug_assert(a_mask <= 0xffff);
1828        tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1829
1830        tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
1831        ldst->label_ptr[0] = s->code_ptr++;
1832    }
1833
1834    h->base = addr_reg;
1835    if (addr_type == TCG_TYPE_I32) {
1836        tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1837        h->base = TCG_TMP0;
1838    }
1839    if (guest_base < 0x80000) {
1840        h->index = TCG_REG_NONE;
1841        h->disp = guest_base;
1842    } else {
1843        h->index = TCG_GUEST_BASE_REG;
1844        h->disp = 0;
1845    }
1846#endif
1847
1848    return ldst;
1849}
1850
1851static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1852                            MemOpIdx oi, TCGType data_type)
1853{
1854    TCGLabelQemuLdst *ldst;
1855    HostAddress h;
1856
1857    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1858    tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1859
1860    if (ldst) {
1861        ldst->type = data_type;
1862        ldst->datalo_reg = data_reg;
1863        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1864    }
1865}
1866
1867static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1868                            MemOpIdx oi, TCGType data_type)
1869{
1870    TCGLabelQemuLdst *ldst;
1871    HostAddress h;
1872
1873    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1874    tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1875
1876    if (ldst) {
1877        ldst->type = data_type;
1878        ldst->datalo_reg = data_reg;
1879        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1880    }
1881}
1882
1883static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1884                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1885{
1886    TCGLabel *l1 = NULL, *l2 = NULL;
1887    TCGLabelQemuLdst *ldst;
1888    HostAddress h;
1889    bool need_bswap;
1890    bool use_pair;
1891    S390Opcode insn;
1892
1893    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1894
1895    use_pair = h.aa.atom < MO_128;
1896    need_bswap = get_memop(oi) & MO_BSWAP;
1897
1898    if (!use_pair) {
1899        /*
1900         * Atomicity requires we use LPQ.  If we've already checked for
1901         * 16-byte alignment, that's all we need.  If we arrive with
1902         * lesser alignment, we have determined that less than 16-byte
1903         * alignment can be satisfied with two 8-byte loads.
1904         */
1905        if (h.aa.align < MO_128) {
1906            use_pair = true;
1907            l1 = gen_new_label();
1908            l2 = gen_new_label();
1909
1910            tcg_out_insn(s, RI, TMLL, addr_reg, 15);
1911            tgen_branch(s, 7, l1); /* CC in {1,2,3} */
1912        }
1913
1914        tcg_debug_assert(!need_bswap);
1915        tcg_debug_assert(datalo & 1);
1916        tcg_debug_assert(datahi == datalo - 1);
1917        insn = is_ld ? RXY_LPQ : RXY_STPQ;
1918        tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp);
1919
1920        if (use_pair) {
1921            tgen_branch(s, S390_CC_ALWAYS, l2);
1922            tcg_out_label(s, l1);
1923        }
1924    }
1925    if (use_pair) {
1926        TCGReg d1, d2;
1927
1928        if (need_bswap) {
1929            d1 = datalo, d2 = datahi;
1930            insn = is_ld ? RXY_LRVG : RXY_STRVG;
1931        } else {
1932            d1 = datahi, d2 = datalo;
1933            insn = is_ld ? RXY_LG : RXY_STG;
1934        }
1935
1936        if (h.base == d1 || h.index == d1) {
1937            tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp);
1938            h.base = TCG_TMP0;
1939            h.index = TCG_REG_NONE;
1940            h.disp = 0;
1941        }
1942        tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp);
1943        tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8);
1944    }
1945    if (l2) {
1946        tcg_out_label(s, l2);
1947    }
1948
1949    if (ldst) {
1950        ldst->type = TCG_TYPE_I128;
1951        ldst->datalo_reg = datalo;
1952        ldst->datahi_reg = datahi;
1953        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1954    }
1955}
1956
1957static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1958{
1959    /* Reuse the zeroing that exists for goto_ptr.  */
1960    if (a0 == 0) {
1961        tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
1962    } else {
1963        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1964        tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1965    }
1966}
1967
1968static void tcg_out_goto_tb(TCGContext *s, int which)
1969{
1970    /*
1971     * Branch displacement must be aligned for atomic patching;
1972     * see if we need to add extra nop before branch
1973     */
1974    if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1975        tcg_out16(s, NOP);
1976    }
1977    tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1978    set_jmp_insn_offset(s, which);
1979    s->code_ptr += 2;
1980    set_jmp_reset_offset(s, which);
1981}
1982
1983void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1984                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1985{
1986    if (!HAVE_FACILITY(GEN_INST_EXT)) {
1987        return;
1988    }
1989    /* patch the branch destination */
1990    uintptr_t addr = tb->jmp_target_addr[n];
1991    intptr_t disp = addr - (jmp_rx - 2);
1992    qatomic_set((int32_t *)jmp_rw, disp / 2);
1993    /* no need to flush icache explicitly */
1994}
1995
1996# define OP_32_64(x) \
1997        case glue(glue(INDEX_op_,x),_i32): \
1998        case glue(glue(INDEX_op_,x),_i64)
1999
2000static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2001                              const TCGArg args[TCG_MAX_OP_ARGS],
2002                              const int const_args[TCG_MAX_OP_ARGS])
2003{
2004    S390Opcode op, op2;
2005    TCGArg a0, a1, a2;
2006
2007    switch (opc) {
2008    case INDEX_op_goto_ptr:
2009        a0 = args[0];
2010        tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
2011        break;
2012
2013    OP_32_64(ld8u):
2014        /* ??? LLC (RXY format) is only present with the extended-immediate
2015           facility, whereas LLGC is always present.  */
2016        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
2017        break;
2018
2019    OP_32_64(ld8s):
2020        /* ??? LB is no smaller than LGB, so no point to using it.  */
2021        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
2022        break;
2023
2024    OP_32_64(ld16u):
2025        /* ??? LLH (RXY format) is only present with the extended-immediate
2026           facility, whereas LLGH is always present.  */
2027        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
2028        break;
2029
2030    case INDEX_op_ld16s_i32:
2031        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
2032        break;
2033
2034    case INDEX_op_ld_i32:
2035        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2036        break;
2037
2038    OP_32_64(st8):
2039        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
2040                    TCG_REG_NONE, args[2]);
2041        break;
2042
2043    OP_32_64(st16):
2044        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2045                    TCG_REG_NONE, args[2]);
2046        break;
2047
2048    case INDEX_op_st_i32:
2049        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2050        break;
2051
2052    case INDEX_op_add_i32:
2053        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2054        if (const_args[2]) {
2055        do_addi_32:
2056            if (a0 == a1) {
2057                if (a2 == (int16_t)a2) {
2058                    tcg_out_insn(s, RI, AHI, a0, a2);
2059                    break;
2060                }
2061                tcg_out_insn(s, RIL, AFI, a0, a2);
2062                break;
2063            }
2064            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2065        } else if (a0 == a1) {
2066            tcg_out_insn(s, RR, AR, a0, a2);
2067        } else {
2068            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2069        }
2070        break;
2071    case INDEX_op_sub_i32:
2072        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2073        if (const_args[2]) {
2074            a2 = -a2;
2075            goto do_addi_32;
2076        } else if (a0 == a1) {
2077            tcg_out_insn(s, RR, SR, a0, a2);
2078        } else {
2079            tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2080        }
2081        break;
2082
2083    case INDEX_op_and_i32:
2084        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2085        if (const_args[2]) {
2086            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2087            tgen_andi(s, TCG_TYPE_I32, a0, a2);
2088        } else if (a0 == a1) {
2089            tcg_out_insn(s, RR, NR, a0, a2);
2090        } else {
2091            tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2092        }
2093        break;
2094    case INDEX_op_or_i32:
2095        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2096        if (const_args[2]) {
2097            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2098            tgen_ori(s, a0, a2);
2099        } else if (a0 == a1) {
2100            tcg_out_insn(s, RR, OR, a0, a2);
2101        } else {
2102            tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2103        }
2104        break;
2105    case INDEX_op_xor_i32:
2106        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2107        if (const_args[2]) {
2108            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2109            tcg_out_insn(s, RIL, XILF, a0, a2);
2110        } else if (a0 == a1) {
2111            tcg_out_insn(s, RR, XR, args[0], args[2]);
2112        } else {
2113            tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2114        }
2115        break;
2116
2117    case INDEX_op_andc_i32:
2118        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2119        if (const_args[2]) {
2120            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2121            tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2122	} else {
2123            tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2124	}
2125        break;
2126    case INDEX_op_orc_i32:
2127        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2128        if (const_args[2]) {
2129            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2130            tgen_ori(s, a0, (uint32_t)~a2);
2131        } else {
2132            tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2133        }
2134        break;
2135    case INDEX_op_eqv_i32:
2136        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2137        if (const_args[2]) {
2138            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2139            tcg_out_insn(s, RIL, XILF, a0, ~a2);
2140        } else {
2141            tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2142        }
2143        break;
2144    case INDEX_op_nand_i32:
2145        tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2146        break;
2147    case INDEX_op_nor_i32:
2148        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2149        break;
2150
2151    case INDEX_op_neg_i32:
2152        tcg_out_insn(s, RR, LCR, args[0], args[1]);
2153        break;
2154    case INDEX_op_not_i32:
2155        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2156        break;
2157
2158    case INDEX_op_mul_i32:
2159        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2160        if (const_args[2]) {
2161            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2162            if (a2 == (int16_t)a2) {
2163                tcg_out_insn(s, RI, MHI, a0, a2);
2164            } else {
2165                tcg_out_insn(s, RIL, MSFI, a0, a2);
2166            }
2167        } else if (a0 == a1) {
2168            tcg_out_insn(s, RRE, MSR, a0, a2);
2169        } else {
2170            tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2171        }
2172        break;
2173
2174    case INDEX_op_div2_i32:
2175        tcg_debug_assert(args[0] == args[2]);
2176        tcg_debug_assert(args[1] == args[3]);
2177        tcg_debug_assert((args[1] & 1) == 0);
2178        tcg_debug_assert(args[0] == args[1] + 1);
2179        tcg_out_insn(s, RR, DR, args[1], args[4]);
2180        break;
2181    case INDEX_op_divu2_i32:
2182        tcg_debug_assert(args[0] == args[2]);
2183        tcg_debug_assert(args[1] == args[3]);
2184        tcg_debug_assert((args[1] & 1) == 0);
2185        tcg_debug_assert(args[0] == args[1] + 1);
2186        tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2187        break;
2188
2189    case INDEX_op_shl_i32:
2190        op = RS_SLL;
2191        op2 = RSY_SLLK;
2192    do_shift32:
2193        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2194        if (a0 == a1) {
2195            if (const_args[2]) {
2196                tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2197            } else {
2198                tcg_out_sh32(s, op, a0, a2, 0);
2199            }
2200        } else {
2201            /* Using tcg_out_sh64 here for the format; it is a 32-bit shift.  */
2202            if (const_args[2]) {
2203                tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2204            } else {
2205                tcg_out_sh64(s, op2, a0, a1, a2, 0);
2206            }
2207        }
2208        break;
2209    case INDEX_op_shr_i32:
2210        op = RS_SRL;
2211        op2 = RSY_SRLK;
2212        goto do_shift32;
2213    case INDEX_op_sar_i32:
2214        op = RS_SRA;
2215        op2 = RSY_SRAK;
2216        goto do_shift32;
2217
2218    case INDEX_op_rotl_i32:
2219        /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
2220        if (const_args[2]) {
2221            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2222        } else {
2223            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2224        }
2225        break;
2226    case INDEX_op_rotr_i32:
2227        if (const_args[2]) {
2228            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2229                         TCG_REG_NONE, (32 - args[2]) & 31);
2230        } else {
2231            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2232            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2233        }
2234        break;
2235
2236    case INDEX_op_bswap16_i32:
2237        a0 = args[0], a1 = args[1], a2 = args[2];
2238        tcg_out_insn(s, RRE, LRVR, a0, a1);
2239        if (a2 & TCG_BSWAP_OS) {
2240            tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2241        } else {
2242            tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2243        }
2244        break;
2245    case INDEX_op_bswap16_i64:
2246        a0 = args[0], a1 = args[1], a2 = args[2];
2247        tcg_out_insn(s, RRE, LRVGR, a0, a1);
2248        if (a2 & TCG_BSWAP_OS) {
2249            tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2250        } else {
2251            tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2252        }
2253        break;
2254
2255    case INDEX_op_bswap32_i32:
2256        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2257        break;
2258    case INDEX_op_bswap32_i64:
2259        a0 = args[0], a1 = args[1], a2 = args[2];
2260        tcg_out_insn(s, RRE, LRVR, a0, a1);
2261        if (a2 & TCG_BSWAP_OS) {
2262            tcg_out_ext32s(s, a0, a0);
2263        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2264            tcg_out_ext32u(s, a0, a0);
2265        }
2266        break;
2267
2268    case INDEX_op_add2_i32:
2269        if (const_args[4]) {
2270            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2271        } else {
2272            tcg_out_insn(s, RR, ALR, args[0], args[4]);
2273        }
2274        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2275        break;
2276    case INDEX_op_sub2_i32:
2277        if (const_args[4]) {
2278            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2279        } else {
2280            tcg_out_insn(s, RR, SLR, args[0], args[4]);
2281        }
2282        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2283        break;
2284
2285    case INDEX_op_br:
2286        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2287        break;
2288
2289    case INDEX_op_brcond_i32:
2290        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2291                    args[1], const_args[1], arg_label(args[3]));
2292        break;
2293    case INDEX_op_setcond_i32:
2294        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2295                     args[2], const_args[2]);
2296        break;
2297    case INDEX_op_movcond_i32:
2298        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2299                     args[2], const_args[2], args[3], const_args[3], args[4]);
2300        break;
2301
2302    case INDEX_op_qemu_ld_a32_i32:
2303    case INDEX_op_qemu_ld_a64_i32:
2304        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2305        break;
2306    case INDEX_op_qemu_ld_a32_i64:
2307    case INDEX_op_qemu_ld_a64_i64:
2308        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2309        break;
2310    case INDEX_op_qemu_st_a32_i32:
2311    case INDEX_op_qemu_st_a64_i32:
2312        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2313        break;
2314    case INDEX_op_qemu_st_a32_i64:
2315    case INDEX_op_qemu_st_a64_i64:
2316        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2317        break;
2318    case INDEX_op_qemu_ld_a32_i128:
2319    case INDEX_op_qemu_ld_a64_i128:
2320        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2321        break;
2322    case INDEX_op_qemu_st_a32_i128:
2323    case INDEX_op_qemu_st_a64_i128:
2324        tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2325        break;
2326
2327    case INDEX_op_ld16s_i64:
2328        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2329        break;
2330    case INDEX_op_ld32u_i64:
2331        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2332        break;
2333    case INDEX_op_ld32s_i64:
2334        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2335        break;
2336    case INDEX_op_ld_i64:
2337        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2338        break;
2339
2340    case INDEX_op_st32_i64:
2341        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2342        break;
2343    case INDEX_op_st_i64:
2344        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2345        break;
2346
2347    case INDEX_op_add_i64:
2348        a0 = args[0], a1 = args[1], a2 = args[2];
2349        if (const_args[2]) {
2350        do_addi_64:
2351            if (a0 == a1) {
2352                if (a2 == (int16_t)a2) {
2353                    tcg_out_insn(s, RI, AGHI, a0, a2);
2354                    break;
2355                }
2356                if (a2 == (int32_t)a2) {
2357                    tcg_out_insn(s, RIL, AGFI, a0, a2);
2358                    break;
2359                }
2360                if (a2 == (uint32_t)a2) {
2361                    tcg_out_insn(s, RIL, ALGFI, a0, a2);
2362                    break;
2363                }
2364                if (-a2 == (uint32_t)-a2) {
2365                    tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2366                    break;
2367                }
2368            }
2369            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2370        } else if (a0 == a1) {
2371            tcg_out_insn(s, RRE, AGR, a0, a2);
2372        } else {
2373            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2374        }
2375        break;
2376    case INDEX_op_sub_i64:
2377        a0 = args[0], a1 = args[1], a2 = args[2];
2378        if (const_args[2]) {
2379            a2 = -a2;
2380            goto do_addi_64;
2381        } else {
2382            tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2383        }
2384        break;
2385
2386    case INDEX_op_and_i64:
2387        a0 = args[0], a1 = args[1], a2 = args[2];
2388        if (const_args[2]) {
2389            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2390            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2391        } else {
2392            tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2393        }
2394        break;
2395    case INDEX_op_or_i64:
2396        a0 = args[0], a1 = args[1], a2 = args[2];
2397        if (const_args[2]) {
2398            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2399            tgen_ori(s, a0, a2);
2400        } else {
2401            tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2402        }
2403        break;
2404    case INDEX_op_xor_i64:
2405        a0 = args[0], a1 = args[1], a2 = args[2];
2406        if (const_args[2]) {
2407            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2408            tgen_xori(s, a0, a2);
2409        } else {
2410            tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2411        }
2412        break;
2413
2414    case INDEX_op_andc_i64:
2415        a0 = args[0], a1 = args[1], a2 = args[2];
2416        if (const_args[2]) {
2417            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2418            tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2419        } else {
2420            tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2421        }
2422        break;
2423    case INDEX_op_orc_i64:
2424        a0 = args[0], a1 = args[1], a2 = args[2];
2425        if (const_args[2]) {
2426            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2427            tgen_ori(s, a0, ~a2);
2428        } else {
2429            tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2430        }
2431        break;
2432    case INDEX_op_eqv_i64:
2433        a0 = args[0], a1 = args[1], a2 = args[2];
2434        if (const_args[2]) {
2435            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2436            tgen_xori(s, a0, ~a2);
2437        } else {
2438            tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2439        }
2440        break;
2441    case INDEX_op_nand_i64:
2442        tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2443        break;
2444    case INDEX_op_nor_i64:
2445        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2446        break;
2447
2448    case INDEX_op_neg_i64:
2449        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2450        break;
2451    case INDEX_op_not_i64:
2452        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2453        break;
2454    case INDEX_op_bswap64_i64:
2455        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2456        break;
2457
2458    case INDEX_op_mul_i64:
2459        a0 = args[0], a1 = args[1], a2 = args[2];
2460        if (const_args[2]) {
2461            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2462            if (a2 == (int16_t)a2) {
2463                tcg_out_insn(s, RI, MGHI, a0, a2);
2464            } else {
2465                tcg_out_insn(s, RIL, MSGFI, a0, a2);
2466            }
2467        } else if (a0 == a1) {
2468            tcg_out_insn(s, RRE, MSGR, a0, a2);
2469        } else {
2470            tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2471        }
2472        break;
2473
2474    case INDEX_op_div2_i64:
2475        /*
2476         * ??? We get an unnecessary sign-extension of the dividend
2477         * into op0 with this definition, but as we do in fact always
2478         * produce both quotient and remainder using INDEX_op_div_i64
2479         * instead requires jumping through even more hoops.
2480         */
2481        tcg_debug_assert(args[0] == args[2]);
2482        tcg_debug_assert(args[1] == args[3]);
2483        tcg_debug_assert((args[1] & 1) == 0);
2484        tcg_debug_assert(args[0] == args[1] + 1);
2485        tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2486        break;
2487    case INDEX_op_divu2_i64:
2488        tcg_debug_assert(args[0] == args[2]);
2489        tcg_debug_assert(args[1] == args[3]);
2490        tcg_debug_assert((args[1] & 1) == 0);
2491        tcg_debug_assert(args[0] == args[1] + 1);
2492        tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2493        break;
2494    case INDEX_op_mulu2_i64:
2495        tcg_debug_assert(args[0] == args[2]);
2496        tcg_debug_assert((args[1] & 1) == 0);
2497        tcg_debug_assert(args[0] == args[1] + 1);
2498        tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2499        break;
2500    case INDEX_op_muls2_i64:
2501        tcg_debug_assert((args[1] & 1) == 0);
2502        tcg_debug_assert(args[0] == args[1] + 1);
2503        tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2504        break;
2505
2506    case INDEX_op_shl_i64:
2507        op = RSY_SLLG;
2508    do_shift64:
2509        if (const_args[2]) {
2510            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2511        } else {
2512            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2513        }
2514        break;
2515    case INDEX_op_shr_i64:
2516        op = RSY_SRLG;
2517        goto do_shift64;
2518    case INDEX_op_sar_i64:
2519        op = RSY_SRAG;
2520        goto do_shift64;
2521
2522    case INDEX_op_rotl_i64:
2523        if (const_args[2]) {
2524            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2525                         TCG_REG_NONE, args[2]);
2526        } else {
2527            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2528        }
2529        break;
2530    case INDEX_op_rotr_i64:
2531        if (const_args[2]) {
2532            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2533                         TCG_REG_NONE, (64 - args[2]) & 63);
2534        } else {
2535            /* We can use the smaller 32-bit negate because only the
2536               low 6 bits are examined for the rotate.  */
2537            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2538            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2539        }
2540        break;
2541
2542    case INDEX_op_add2_i64:
2543        if (const_args[4]) {
2544            if ((int64_t)args[4] >= 0) {
2545                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2546            } else {
2547                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2548            }
2549        } else {
2550            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2551        }
2552        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2553        break;
2554    case INDEX_op_sub2_i64:
2555        if (const_args[4]) {
2556            if ((int64_t)args[4] >= 0) {
2557                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2558            } else {
2559                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2560            }
2561        } else {
2562            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2563        }
2564        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2565        break;
2566
2567    case INDEX_op_brcond_i64:
2568        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2569                    args[1], const_args[1], arg_label(args[3]));
2570        break;
2571    case INDEX_op_setcond_i64:
2572        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2573                     args[2], const_args[2]);
2574        break;
2575    case INDEX_op_movcond_i64:
2576        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2577                     args[2], const_args[2], args[3], const_args[3], args[4]);
2578        break;
2579
2580    OP_32_64(deposit):
2581        a0 = args[0], a1 = args[1], a2 = args[2];
2582        if (const_args[1]) {
2583            tgen_deposit(s, a0, a2, args[3], args[4], 1);
2584        } else {
2585            /* Since we can't support "0Z" as a constraint, we allow a1 in
2586               any register.  Fix things up as if a matching constraint.  */
2587            if (a0 != a1) {
2588                TCGType type = (opc == INDEX_op_deposit_i64);
2589                if (a0 == a2) {
2590                    tcg_out_mov(s, type, TCG_TMP0, a2);
2591                    a2 = TCG_TMP0;
2592                }
2593                tcg_out_mov(s, type, a0, a1);
2594            }
2595            tgen_deposit(s, a0, a2, args[3], args[4], 0);
2596        }
2597        break;
2598
2599    OP_32_64(extract):
2600        tgen_extract(s, args[0], args[1], args[2], args[3]);
2601        break;
2602
2603    case INDEX_op_clz_i64:
2604        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2605        break;
2606
2607    case INDEX_op_ctpop_i32:
2608        tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2609        break;
2610    case INDEX_op_ctpop_i64:
2611        tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2612        break;
2613
2614    case INDEX_op_mb:
2615        /* The host memory model is quite strong, we simply need to
2616           serialize the instruction stream.  */
2617        if (args[0] & TCG_MO_ST_LD) {
2618            /* fast-bcr-serialization facility (45) is present */
2619            tcg_out_insn(s, RR, BCR, 14, 0);
2620        }
2621        break;
2622
2623    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2624    case INDEX_op_mov_i64:
2625    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2626    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2627    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2628    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2629    case INDEX_op_ext8s_i64:
2630    case INDEX_op_ext8u_i32:
2631    case INDEX_op_ext8u_i64:
2632    case INDEX_op_ext16s_i32:
2633    case INDEX_op_ext16s_i64:
2634    case INDEX_op_ext16u_i32:
2635    case INDEX_op_ext16u_i64:
2636    case INDEX_op_ext32s_i64:
2637    case INDEX_op_ext32u_i64:
2638    case INDEX_op_ext_i32_i64:
2639    case INDEX_op_extu_i32_i64:
2640    case INDEX_op_extrl_i64_i32:
2641    default:
2642        g_assert_not_reached();
2643    }
2644}
2645
2646static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2647                            TCGReg dst, TCGReg src)
2648{
2649    if (is_general_reg(src)) {
2650        /* Replicate general register into two MO_64. */
2651        tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2652        if (vece == MO_64) {
2653            return true;
2654        }
2655        src = dst;
2656    }
2657
2658    /*
2659     * Recall that the "standard" integer, within a vector, is the
2660     * rightmost element of the leftmost doubleword, a-la VLLEZ.
2661     */
2662    tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2663    return true;
2664}
2665
2666static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2667                             TCGReg dst, TCGReg base, intptr_t offset)
2668{
2669    tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2670    return true;
2671}
2672
2673static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2674                             TCGReg dst, int64_t val)
2675{
2676    int i, mask, msb, lsb;
2677
2678    /* Look for int16_t elements.  */
2679    if (vece <= MO_16 ||
2680        (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2681        tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2682        return;
2683    }
2684
2685    /* Look for bit masks.  */
2686    if (vece == MO_32) {
2687        if (risbg_mask((int32_t)val)) {
2688            /* Handle wraparound by swapping msb and lsb.  */
2689            if ((val & 0x80000001u) == 0x80000001u) {
2690                msb = 32 - ctz32(~val);
2691                lsb = clz32(~val) - 1;
2692            } else {
2693                msb = clz32(val);
2694                lsb = 31 - ctz32(val);
2695            }
2696            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2697            return;
2698        }
2699    } else {
2700        if (risbg_mask(val)) {
2701            /* Handle wraparound by swapping msb and lsb.  */
2702            if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2703                /* Handle wraparound by swapping msb and lsb.  */
2704                msb = 64 - ctz64(~val);
2705                lsb = clz64(~val) - 1;
2706            } else {
2707                msb = clz64(val);
2708                lsb = 63 - ctz64(val);
2709            }
2710            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2711            return;
2712        }
2713    }
2714
2715    /* Look for all bytes 0x00 or 0xff.  */
2716    for (i = mask = 0; i < 8; i++) {
2717        uint8_t byte = val >> (i * 8);
2718        if (byte == 0xff) {
2719            mask |= 1 << i;
2720        } else if (byte != 0) {
2721            break;
2722        }
2723    }
2724    if (i == 8) {
2725        tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2726        return;
2727    }
2728
2729    /* Otherwise, stuff it in the constant pool.  */
2730    tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2731    new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2732    tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2733}
2734
2735static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2736                           unsigned vecl, unsigned vece,
2737                           const TCGArg args[TCG_MAX_OP_ARGS],
2738                           const int const_args[TCG_MAX_OP_ARGS])
2739{
2740    TCGType type = vecl + TCG_TYPE_V64;
2741    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2742
2743    switch (opc) {
2744    case INDEX_op_ld_vec:
2745        tcg_out_ld(s, type, a0, a1, a2);
2746        break;
2747    case INDEX_op_st_vec:
2748        tcg_out_st(s, type, a0, a1, a2);
2749        break;
2750    case INDEX_op_dupm_vec:
2751        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2752        break;
2753
2754    case INDEX_op_abs_vec:
2755        tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2756        break;
2757    case INDEX_op_neg_vec:
2758        tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2759        break;
2760    case INDEX_op_not_vec:
2761        tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2762        break;
2763
2764    case INDEX_op_add_vec:
2765        tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2766        break;
2767    case INDEX_op_sub_vec:
2768        tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2769        break;
2770    case INDEX_op_and_vec:
2771        tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2772        break;
2773    case INDEX_op_andc_vec:
2774        tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2775        break;
2776    case INDEX_op_mul_vec:
2777        tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2778        break;
2779    case INDEX_op_or_vec:
2780        tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2781        break;
2782    case INDEX_op_orc_vec:
2783        tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2784        break;
2785    case INDEX_op_xor_vec:
2786        tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2787        break;
2788    case INDEX_op_nand_vec:
2789        tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
2790        break;
2791    case INDEX_op_nor_vec:
2792        tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
2793        break;
2794    case INDEX_op_eqv_vec:
2795        tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
2796        break;
2797
2798    case INDEX_op_shli_vec:
2799        tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
2800        break;
2801    case INDEX_op_shri_vec:
2802        tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
2803        break;
2804    case INDEX_op_sari_vec:
2805        tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
2806        break;
2807    case INDEX_op_rotli_vec:
2808        tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
2809        break;
2810    case INDEX_op_shls_vec:
2811        tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
2812        break;
2813    case INDEX_op_shrs_vec:
2814        tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
2815        break;
2816    case INDEX_op_sars_vec:
2817        tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
2818        break;
2819    case INDEX_op_rotls_vec:
2820        tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
2821        break;
2822    case INDEX_op_shlv_vec:
2823        tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
2824        break;
2825    case INDEX_op_shrv_vec:
2826        tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
2827        break;
2828    case INDEX_op_sarv_vec:
2829        tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
2830        break;
2831    case INDEX_op_rotlv_vec:
2832        tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
2833        break;
2834
2835    case INDEX_op_smin_vec:
2836        tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
2837        break;
2838    case INDEX_op_smax_vec:
2839        tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
2840        break;
2841    case INDEX_op_umin_vec:
2842        tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
2843        break;
2844    case INDEX_op_umax_vec:
2845        tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
2846        break;
2847
2848    case INDEX_op_bitsel_vec:
2849        tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
2850        break;
2851
2852    case INDEX_op_cmp_vec:
2853        switch ((TCGCond)args[3]) {
2854        case TCG_COND_EQ:
2855            tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2856            break;
2857        case TCG_COND_GT:
2858            tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2859            break;
2860        case TCG_COND_GTU:
2861            tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2862            break;
2863        default:
2864            g_assert_not_reached();
2865        }
2866        break;
2867
2868    case INDEX_op_s390_vuph_vec:
2869        tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
2870        break;
2871    case INDEX_op_s390_vupl_vec:
2872        tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
2873        break;
2874    case INDEX_op_s390_vpks_vec:
2875        tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
2876        break;
2877
2878    case INDEX_op_mov_vec:   /* Always emitted via tcg_out_mov.  */
2879    case INDEX_op_dup_vec:   /* Always emitted via tcg_out_dup_vec.  */
2880    default:
2881        g_assert_not_reached();
2882    }
2883}
2884
2885int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2886{
2887    switch (opc) {
2888    case INDEX_op_abs_vec:
2889    case INDEX_op_add_vec:
2890    case INDEX_op_and_vec:
2891    case INDEX_op_andc_vec:
2892    case INDEX_op_bitsel_vec:
2893    case INDEX_op_eqv_vec:
2894    case INDEX_op_nand_vec:
2895    case INDEX_op_neg_vec:
2896    case INDEX_op_nor_vec:
2897    case INDEX_op_not_vec:
2898    case INDEX_op_or_vec:
2899    case INDEX_op_orc_vec:
2900    case INDEX_op_rotli_vec:
2901    case INDEX_op_rotls_vec:
2902    case INDEX_op_rotlv_vec:
2903    case INDEX_op_sari_vec:
2904    case INDEX_op_sars_vec:
2905    case INDEX_op_sarv_vec:
2906    case INDEX_op_shli_vec:
2907    case INDEX_op_shls_vec:
2908    case INDEX_op_shlv_vec:
2909    case INDEX_op_shri_vec:
2910    case INDEX_op_shrs_vec:
2911    case INDEX_op_shrv_vec:
2912    case INDEX_op_smax_vec:
2913    case INDEX_op_smin_vec:
2914    case INDEX_op_sub_vec:
2915    case INDEX_op_umax_vec:
2916    case INDEX_op_umin_vec:
2917    case INDEX_op_xor_vec:
2918        return 1;
2919    case INDEX_op_cmp_vec:
2920    case INDEX_op_cmpsel_vec:
2921    case INDEX_op_rotrv_vec:
2922        return -1;
2923    case INDEX_op_mul_vec:
2924        return vece < MO_64;
2925    case INDEX_op_ssadd_vec:
2926    case INDEX_op_sssub_vec:
2927        return vece < MO_64 ? -1 : 0;
2928    default:
2929        return 0;
2930    }
2931}
2932
2933static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
2934                                 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2935{
2936    bool need_swap = false, need_inv = false;
2937
2938    switch (cond) {
2939    case TCG_COND_EQ:
2940    case TCG_COND_GT:
2941    case TCG_COND_GTU:
2942        break;
2943    case TCG_COND_NE:
2944    case TCG_COND_LE:
2945    case TCG_COND_LEU:
2946        need_inv = true;
2947        break;
2948    case TCG_COND_LT:
2949    case TCG_COND_LTU:
2950        need_swap = true;
2951        break;
2952    case TCG_COND_GE:
2953    case TCG_COND_GEU:
2954        need_swap = need_inv = true;
2955        break;
2956    default:
2957        g_assert_not_reached();
2958    }
2959
2960    if (need_inv) {
2961        cond = tcg_invert_cond(cond);
2962    }
2963    if (need_swap) {
2964        TCGv_vec t1;
2965        t1 = v1, v1 = v2, v2 = t1;
2966        cond = tcg_swap_cond(cond);
2967    }
2968
2969    vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
2970              tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
2971
2972    return need_inv;
2973}
2974
2975static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
2976                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2977{
2978    if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
2979        tcg_gen_not_vec(vece, v0, v0);
2980    }
2981}
2982
2983static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
2984                              TCGv_vec c1, TCGv_vec c2,
2985                              TCGv_vec v3, TCGv_vec v4, TCGCond cond)
2986{
2987    TCGv_vec t = tcg_temp_new_vec(type);
2988
2989    if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
2990        /* Invert the sense of the compare by swapping arguments.  */
2991        tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
2992    } else {
2993        tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
2994    }
2995    tcg_temp_free_vec(t);
2996}
2997
2998static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
2999                           TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
3000{
3001    TCGv_vec h1 = tcg_temp_new_vec(type);
3002    TCGv_vec h2 = tcg_temp_new_vec(type);
3003    TCGv_vec l1 = tcg_temp_new_vec(type);
3004    TCGv_vec l2 = tcg_temp_new_vec(type);
3005
3006    tcg_debug_assert (vece < MO_64);
3007
3008    /* Unpack with sign-extension. */
3009    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3010              tcgv_vec_arg(h1), tcgv_vec_arg(v1));
3011    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3012              tcgv_vec_arg(h2), tcgv_vec_arg(v2));
3013
3014    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3015              tcgv_vec_arg(l1), tcgv_vec_arg(v1));
3016    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3017              tcgv_vec_arg(l2), tcgv_vec_arg(v2));
3018
3019    /* Arithmetic on a wider element size. */
3020    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
3021              tcgv_vec_arg(h1), tcgv_vec_arg(h2));
3022    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
3023              tcgv_vec_arg(l1), tcgv_vec_arg(l2));
3024
3025    /* Pack with saturation. */
3026    vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
3027              tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
3028
3029    tcg_temp_free_vec(h1);
3030    tcg_temp_free_vec(h2);
3031    tcg_temp_free_vec(l1);
3032    tcg_temp_free_vec(l2);
3033}
3034
3035void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3036                       TCGArg a0, ...)
3037{
3038    va_list va;
3039    TCGv_vec v0, v1, v2, v3, v4, t0;
3040
3041    va_start(va, a0);
3042    v0 = temp_tcgv_vec(arg_temp(a0));
3043    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3044    v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3045
3046    switch (opc) {
3047    case INDEX_op_cmp_vec:
3048        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3049        break;
3050
3051    case INDEX_op_cmpsel_vec:
3052        v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3053        v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3054        expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3055        break;
3056
3057    case INDEX_op_rotrv_vec:
3058        t0 = tcg_temp_new_vec(type);
3059        tcg_gen_neg_vec(vece, t0, v2);
3060        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3061        tcg_temp_free_vec(t0);
3062        break;
3063
3064    case INDEX_op_ssadd_vec:
3065        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3066        break;
3067    case INDEX_op_sssub_vec:
3068        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3069        break;
3070
3071    default:
3072        g_assert_not_reached();
3073    }
3074    va_end(va);
3075}
3076
3077static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3078{
3079    switch (op) {
3080    case INDEX_op_goto_ptr:
3081        return C_O0_I1(r);
3082
3083    case INDEX_op_ld8u_i32:
3084    case INDEX_op_ld8u_i64:
3085    case INDEX_op_ld8s_i32:
3086    case INDEX_op_ld8s_i64:
3087    case INDEX_op_ld16u_i32:
3088    case INDEX_op_ld16u_i64:
3089    case INDEX_op_ld16s_i32:
3090    case INDEX_op_ld16s_i64:
3091    case INDEX_op_ld_i32:
3092    case INDEX_op_ld32u_i64:
3093    case INDEX_op_ld32s_i64:
3094    case INDEX_op_ld_i64:
3095        return C_O1_I1(r, r);
3096
3097    case INDEX_op_st8_i32:
3098    case INDEX_op_st8_i64:
3099    case INDEX_op_st16_i32:
3100    case INDEX_op_st16_i64:
3101    case INDEX_op_st_i32:
3102    case INDEX_op_st32_i64:
3103    case INDEX_op_st_i64:
3104        return C_O0_I2(r, r);
3105
3106    case INDEX_op_add_i32:
3107    case INDEX_op_add_i64:
3108    case INDEX_op_shl_i64:
3109    case INDEX_op_shr_i64:
3110    case INDEX_op_sar_i64:
3111    case INDEX_op_rotl_i32:
3112    case INDEX_op_rotl_i64:
3113    case INDEX_op_rotr_i32:
3114    case INDEX_op_rotr_i64:
3115    case INDEX_op_setcond_i32:
3116        return C_O1_I2(r, r, ri);
3117    case INDEX_op_setcond_i64:
3118        return C_O1_I2(r, r, rA);
3119
3120    case INDEX_op_clz_i64:
3121        return C_O1_I2(r, r, rI);
3122
3123    case INDEX_op_sub_i32:
3124    case INDEX_op_sub_i64:
3125    case INDEX_op_and_i32:
3126    case INDEX_op_or_i32:
3127    case INDEX_op_xor_i32:
3128        return C_O1_I2(r, r, ri);
3129    case INDEX_op_and_i64:
3130        return C_O1_I2(r, r, rNKR);
3131    case INDEX_op_or_i64:
3132    case INDEX_op_xor_i64:
3133        return C_O1_I2(r, r, rK);
3134
3135    case INDEX_op_andc_i32:
3136    case INDEX_op_orc_i32:
3137    case INDEX_op_eqv_i32:
3138        return C_O1_I2(r, r, ri);
3139    case INDEX_op_andc_i64:
3140        return C_O1_I2(r, r, rKR);
3141    case INDEX_op_orc_i64:
3142    case INDEX_op_eqv_i64:
3143        return C_O1_I2(r, r, rNK);
3144
3145    case INDEX_op_nand_i32:
3146    case INDEX_op_nand_i64:
3147    case INDEX_op_nor_i32:
3148    case INDEX_op_nor_i64:
3149        return C_O1_I2(r, r, r);
3150
3151    case INDEX_op_mul_i32:
3152        return (HAVE_FACILITY(MISC_INSN_EXT2)
3153                ? C_O1_I2(r, r, ri)
3154                : C_O1_I2(r, 0, ri));
3155    case INDEX_op_mul_i64:
3156        return (HAVE_FACILITY(MISC_INSN_EXT2)
3157                ? C_O1_I2(r, r, rJ)
3158                : C_O1_I2(r, 0, rJ));
3159
3160    case INDEX_op_shl_i32:
3161    case INDEX_op_shr_i32:
3162    case INDEX_op_sar_i32:
3163        return C_O1_I2(r, r, ri);
3164
3165    case INDEX_op_brcond_i32:
3166        return C_O0_I2(r, ri);
3167    case INDEX_op_brcond_i64:
3168        return C_O0_I2(r, rA);
3169
3170    case INDEX_op_bswap16_i32:
3171    case INDEX_op_bswap16_i64:
3172    case INDEX_op_bswap32_i32:
3173    case INDEX_op_bswap32_i64:
3174    case INDEX_op_bswap64_i64:
3175    case INDEX_op_neg_i32:
3176    case INDEX_op_neg_i64:
3177    case INDEX_op_not_i32:
3178    case INDEX_op_not_i64:
3179    case INDEX_op_ext8s_i32:
3180    case INDEX_op_ext8s_i64:
3181    case INDEX_op_ext8u_i32:
3182    case INDEX_op_ext8u_i64:
3183    case INDEX_op_ext16s_i32:
3184    case INDEX_op_ext16s_i64:
3185    case INDEX_op_ext16u_i32:
3186    case INDEX_op_ext16u_i64:
3187    case INDEX_op_ext32s_i64:
3188    case INDEX_op_ext32u_i64:
3189    case INDEX_op_ext_i32_i64:
3190    case INDEX_op_extu_i32_i64:
3191    case INDEX_op_extract_i32:
3192    case INDEX_op_extract_i64:
3193    case INDEX_op_ctpop_i32:
3194    case INDEX_op_ctpop_i64:
3195        return C_O1_I1(r, r);
3196
3197    case INDEX_op_qemu_ld_a32_i32:
3198    case INDEX_op_qemu_ld_a64_i32:
3199    case INDEX_op_qemu_ld_a32_i64:
3200    case INDEX_op_qemu_ld_a64_i64:
3201        return C_O1_I1(r, r);
3202    case INDEX_op_qemu_st_a32_i64:
3203    case INDEX_op_qemu_st_a64_i64:
3204    case INDEX_op_qemu_st_a32_i32:
3205    case INDEX_op_qemu_st_a64_i32:
3206        return C_O0_I2(r, r);
3207    case INDEX_op_qemu_ld_a32_i128:
3208    case INDEX_op_qemu_ld_a64_i128:
3209        return C_O2_I1(o, m, r);
3210    case INDEX_op_qemu_st_a32_i128:
3211    case INDEX_op_qemu_st_a64_i128:
3212        return C_O0_I3(o, m, r);
3213
3214    case INDEX_op_deposit_i32:
3215    case INDEX_op_deposit_i64:
3216        return C_O1_I2(r, rZ, r);
3217
3218    case INDEX_op_movcond_i32:
3219        return C_O1_I4(r, r, ri, rI, r);
3220    case INDEX_op_movcond_i64:
3221        return C_O1_I4(r, r, rA, rI, r);
3222
3223    case INDEX_op_div2_i32:
3224    case INDEX_op_div2_i64:
3225    case INDEX_op_divu2_i32:
3226    case INDEX_op_divu2_i64:
3227        return C_O2_I3(o, m, 0, 1, r);
3228
3229    case INDEX_op_mulu2_i64:
3230        return C_O2_I2(o, m, 0, r);
3231    case INDEX_op_muls2_i64:
3232        return C_O2_I2(o, m, r, r);
3233
3234    case INDEX_op_add2_i32:
3235    case INDEX_op_sub2_i32:
3236        return C_O2_I4(r, r, 0, 1, ri, r);
3237
3238    case INDEX_op_add2_i64:
3239    case INDEX_op_sub2_i64:
3240        return C_O2_I4(r, r, 0, 1, rA, r);
3241
3242    case INDEX_op_st_vec:
3243        return C_O0_I2(v, r);
3244    case INDEX_op_ld_vec:
3245    case INDEX_op_dupm_vec:
3246        return C_O1_I1(v, r);
3247    case INDEX_op_dup_vec:
3248        return C_O1_I1(v, vr);
3249    case INDEX_op_abs_vec:
3250    case INDEX_op_neg_vec:
3251    case INDEX_op_not_vec:
3252    case INDEX_op_rotli_vec:
3253    case INDEX_op_sari_vec:
3254    case INDEX_op_shli_vec:
3255    case INDEX_op_shri_vec:
3256    case INDEX_op_s390_vuph_vec:
3257    case INDEX_op_s390_vupl_vec:
3258        return C_O1_I1(v, v);
3259    case INDEX_op_add_vec:
3260    case INDEX_op_sub_vec:
3261    case INDEX_op_and_vec:
3262    case INDEX_op_andc_vec:
3263    case INDEX_op_or_vec:
3264    case INDEX_op_orc_vec:
3265    case INDEX_op_xor_vec:
3266    case INDEX_op_nand_vec:
3267    case INDEX_op_nor_vec:
3268    case INDEX_op_eqv_vec:
3269    case INDEX_op_cmp_vec:
3270    case INDEX_op_mul_vec:
3271    case INDEX_op_rotlv_vec:
3272    case INDEX_op_rotrv_vec:
3273    case INDEX_op_shlv_vec:
3274    case INDEX_op_shrv_vec:
3275    case INDEX_op_sarv_vec:
3276    case INDEX_op_smax_vec:
3277    case INDEX_op_smin_vec:
3278    case INDEX_op_umax_vec:
3279    case INDEX_op_umin_vec:
3280    case INDEX_op_s390_vpks_vec:
3281        return C_O1_I2(v, v, v);
3282    case INDEX_op_rotls_vec:
3283    case INDEX_op_shls_vec:
3284    case INDEX_op_shrs_vec:
3285    case INDEX_op_sars_vec:
3286        return C_O1_I2(v, v, r);
3287    case INDEX_op_bitsel_vec:
3288        return C_O1_I3(v, v, v, v);
3289
3290    default:
3291        g_assert_not_reached();
3292    }
3293}
3294
3295/*
3296 * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3297 * Some distros have fixed this up locally, others have not.
3298 */
3299#ifndef HWCAP_S390_VXRS
3300#define HWCAP_S390_VXRS 2048
3301#endif
3302
3303static void query_s390_facilities(void)
3304{
3305    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3306    const char *which;
3307
3308    /* Is STORE FACILITY LIST EXTENDED available?  Honestly, I believe this
3309       is present on all 64-bit systems, but let's check for it anyway.  */
3310    if (hwcap & HWCAP_S390_STFLE) {
3311        register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3312        register void *r1 __asm__("1") = s390_facilities;
3313
3314        /* stfle 0(%r1) */
3315        asm volatile(".word 0xb2b0,0x1000"
3316                     : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3317    }
3318
3319    /*
3320     * Use of vector registers requires os support beyond the facility bit.
3321     * If the kernel does not advertise support, disable the facility bits.
3322     * There is nothing else we currently care about in the 3rd word, so
3323     * disable VECTOR with one store.
3324     */
3325    if (!(hwcap & HWCAP_S390_VXRS)) {
3326        s390_facilities[2] = 0;
3327    }
3328
3329    /*
3330     * Minimum supported cpu revision is z196.
3331     * Check for all required facilities.
3332     * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3333     */
3334    if (!HAVE_FACILITY(LONG_DISP)) {
3335        which = "long-displacement";
3336        goto fail;
3337    }
3338    if (!HAVE_FACILITY(EXT_IMM)) {
3339        which = "extended-immediate";
3340        goto fail;
3341    }
3342    if (!HAVE_FACILITY(GEN_INST_EXT)) {
3343        which = "general-instructions-extension";
3344        goto fail;
3345    }
3346    /*
3347     * Facility 45 is a big bin that contains: distinct-operands,
3348     * fast-BCR-serialization, high-word, population-count,
3349     * interlocked-access-1, and load/store-on-condition-1
3350     */
3351    if (!HAVE_FACILITY(45)) {
3352        which = "45";
3353        goto fail;
3354    }
3355    return;
3356
3357 fail:
3358    error_report("%s: missing required facility %s", __func__, which);
3359    exit(EXIT_FAILURE);
3360}
3361
3362static void tcg_target_init(TCGContext *s)
3363{
3364    query_s390_facilities();
3365
3366    tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3367    tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3368    if (HAVE_FACILITY(VECTOR)) {
3369        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3370        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3371    }
3372
3373    tcg_target_call_clobber_regs = 0;
3374    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3375    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3376    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3377    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3378    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3379    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3380    /* The r6 register is technically call-saved, but it's also a parameter
3381       register, so it can get killed by setup for the qemu_st helper.  */
3382    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3383    /* The return register can be considered call-clobbered.  */
3384    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3385
3386    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3387    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3388    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3389    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3390    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3391    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3392    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3393    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3394    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3395    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3396    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3397    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3398    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3399    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3400    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3401    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3402    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3403    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3404    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3405    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3406    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3407    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3408    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3409    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3410
3411    s->reserved_regs = 0;
3412    tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3413    /* XXX many insns can't be used with R0, so we better avoid it for now */
3414    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3415    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3416}
3417
3418#define FRAME_SIZE  ((int)(TCG_TARGET_CALL_STACK_OFFSET          \
3419                           + TCG_STATIC_CALL_ARGS_SIZE           \
3420                           + CPU_TEMP_BUF_NLONGS * sizeof(long)))
3421
3422static void tcg_target_qemu_prologue(TCGContext *s)
3423{
3424    /* stmg %r6,%r15,48(%r15) (save registers) */
3425    tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3426
3427    /* aghi %r15,-frame_size */
3428    tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3429
3430    tcg_set_frame(s, TCG_REG_CALL_STACK,
3431                  TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3432                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3433
3434#ifndef CONFIG_SOFTMMU
3435    if (guest_base >= 0x80000) {
3436        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3437        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3438    }
3439#endif
3440
3441    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3442
3443    /* br %r3 (go to TB) */
3444    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3445
3446    /*
3447     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3448     * and fall through to the rest of the epilogue.
3449     */
3450    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3451    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3452
3453    /* TB epilogue */
3454    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3455
3456    /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3457    tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3458                 FRAME_SIZE + 48);
3459
3460    /* br %r14 (return) */
3461    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3462}
3463
3464static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3465{
3466    memset(p, 0x07, count * sizeof(tcg_insn_unit));
3467}
3468
3469typedef struct {
3470    DebugFrameHeader h;
3471    uint8_t fde_def_cfa[4];
3472    uint8_t fde_reg_ofs[18];
3473} DebugFrame;
3474
3475/* We're expecting a 2 byte uleb128 encoded value.  */
3476QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3477
3478#define ELF_HOST_MACHINE  EM_S390
3479
3480static const DebugFrame debug_frame = {
3481    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3482    .h.cie.id = -1,
3483    .h.cie.version = 1,
3484    .h.cie.code_align = 1,
3485    .h.cie.data_align = 8,                /* sleb128 8 */
3486    .h.cie.return_column = TCG_REG_R14,
3487
3488    /* Total FDE size does not include the "len" member.  */
3489    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3490
3491    .fde_def_cfa = {
3492        12, TCG_REG_CALL_STACK,         /* DW_CFA_def_cfa %r15, ... */
3493        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3494        (FRAME_SIZE >> 7)
3495    },
3496    .fde_reg_ofs = {
3497        0x86, 6,                        /* DW_CFA_offset, %r6, 48 */
3498        0x87, 7,                        /* DW_CFA_offset, %r7, 56 */
3499        0x88, 8,                        /* DW_CFA_offset, %r8, 64 */
3500        0x89, 9,                        /* DW_CFA_offset, %r92, 72 */
3501        0x8a, 10,                       /* DW_CFA_offset, %r10, 80 */
3502        0x8b, 11,                       /* DW_CFA_offset, %r11, 88 */
3503        0x8c, 12,                       /* DW_CFA_offset, %r12, 96 */
3504        0x8d, 13,                       /* DW_CFA_offset, %r13, 104 */
3505        0x8e, 14,                       /* DW_CFA_offset, %r14, 112 */
3506    }
3507};
3508
3509void tcg_register_jit(const void *buf, size_t buf_size)
3510{
3511    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3512}
3513