xref: /openbmc/qemu/tcg/s390x/tcg-target.c.inc (revision c4601322)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27/* We only support generating code for 64-bit mode.  */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
32#include "../tcg-ldst.c.inc"
33#include "../tcg-pool.c.inc"
34#include "elf.h"
35
36#define TCG_CT_CONST_S16        (1 << 8)
37#define TCG_CT_CONST_S32        (1 << 9)
38#define TCG_CT_CONST_S33        (1 << 10)
39#define TCG_CT_CONST_ZERO       (1 << 11)
40#define TCG_CT_CONST_P32        (1 << 12)
41#define TCG_CT_CONST_INV        (1 << 13)
42#define TCG_CT_CONST_INVRISBG   (1 << 14)
43
44#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 16)
45#define ALL_VECTOR_REGS      MAKE_64BIT_MASK(32, 32)
46
47/* Several places within the instruction set 0 means "no register"
48   rather than TCG_REG_R0.  */
49#define TCG_REG_NONE    0
50
51/* A scratch register that may be be used throughout the backend.  */
52#define TCG_TMP0        TCG_REG_R1
53
54#ifndef CONFIG_SOFTMMU
55#define TCG_GUEST_BASE_REG TCG_REG_R13
56#endif
57
58/* All of the following instructions are prefixed with their instruction
59   format, and are defined as 8- or 16-bit quantities, even when the two
60   halves of the 16-bit quantity may appear 32 bits apart in the insn.
61   This makes it easy to copy the values from the tables in Appendix B.  */
62typedef enum S390Opcode {
63    RIL_AFI     = 0xc209,
64    RIL_AGFI    = 0xc208,
65    RIL_ALFI    = 0xc20b,
66    RIL_ALGFI   = 0xc20a,
67    RIL_BRASL   = 0xc005,
68    RIL_BRCL    = 0xc004,
69    RIL_CFI     = 0xc20d,
70    RIL_CGFI    = 0xc20c,
71    RIL_CLFI    = 0xc20f,
72    RIL_CLGFI   = 0xc20e,
73    RIL_CLRL    = 0xc60f,
74    RIL_CLGRL   = 0xc60a,
75    RIL_CRL     = 0xc60d,
76    RIL_CGRL    = 0xc608,
77    RIL_IIHF    = 0xc008,
78    RIL_IILF    = 0xc009,
79    RIL_LARL    = 0xc000,
80    RIL_LGFI    = 0xc001,
81    RIL_LGRL    = 0xc408,
82    RIL_LLIHF   = 0xc00e,
83    RIL_LLILF   = 0xc00f,
84    RIL_LRL     = 0xc40d,
85    RIL_MSFI    = 0xc201,
86    RIL_MSGFI   = 0xc200,
87    RIL_NIHF    = 0xc00a,
88    RIL_NILF    = 0xc00b,
89    RIL_OIHF    = 0xc00c,
90    RIL_OILF    = 0xc00d,
91    RIL_SLFI    = 0xc205,
92    RIL_SLGFI   = 0xc204,
93    RIL_XIHF    = 0xc006,
94    RIL_XILF    = 0xc007,
95
96    RI_AGHI     = 0xa70b,
97    RI_AHI      = 0xa70a,
98    RI_BRC      = 0xa704,
99    RI_CHI      = 0xa70e,
100    RI_CGHI     = 0xa70f,
101    RI_IIHH     = 0xa500,
102    RI_IIHL     = 0xa501,
103    RI_IILH     = 0xa502,
104    RI_IILL     = 0xa503,
105    RI_LGHI     = 0xa709,
106    RI_LLIHH    = 0xa50c,
107    RI_LLIHL    = 0xa50d,
108    RI_LLILH    = 0xa50e,
109    RI_LLILL    = 0xa50f,
110    RI_MGHI     = 0xa70d,
111    RI_MHI      = 0xa70c,
112    RI_NIHH     = 0xa504,
113    RI_NIHL     = 0xa505,
114    RI_NILH     = 0xa506,
115    RI_NILL     = 0xa507,
116    RI_OIHH     = 0xa508,
117    RI_OIHL     = 0xa509,
118    RI_OILH     = 0xa50a,
119    RI_OILL     = 0xa50b,
120    RI_TMLL     = 0xa701,
121
122    RIEb_CGRJ    = 0xec64,
123    RIEb_CLGRJ   = 0xec65,
124    RIEb_CLRJ    = 0xec77,
125    RIEb_CRJ     = 0xec76,
126
127    RIEc_CGIJ    = 0xec7c,
128    RIEc_CIJ     = 0xec7e,
129    RIEc_CLGIJ   = 0xec7d,
130    RIEc_CLIJ    = 0xec7f,
131
132    RIEf_RISBG   = 0xec55,
133
134    RIEg_LOCGHI  = 0xec46,
135
136    RRE_AGR     = 0xb908,
137    RRE_ALGR    = 0xb90a,
138    RRE_ALCR    = 0xb998,
139    RRE_ALCGR   = 0xb988,
140    RRE_ALGFR   = 0xb91a,
141    RRE_CGR     = 0xb920,
142    RRE_CLGR    = 0xb921,
143    RRE_DLGR    = 0xb987,
144    RRE_DLR     = 0xb997,
145    RRE_DSGFR   = 0xb91d,
146    RRE_DSGR    = 0xb90d,
147    RRE_FLOGR   = 0xb983,
148    RRE_LGBR    = 0xb906,
149    RRE_LCGR    = 0xb903,
150    RRE_LGFR    = 0xb914,
151    RRE_LGHR    = 0xb907,
152    RRE_LGR     = 0xb904,
153    RRE_LLGCR   = 0xb984,
154    RRE_LLGFR   = 0xb916,
155    RRE_LLGHR   = 0xb985,
156    RRE_LRVR    = 0xb91f,
157    RRE_LRVGR   = 0xb90f,
158    RRE_LTGR    = 0xb902,
159    RRE_MLGR    = 0xb986,
160    RRE_MSGR    = 0xb90c,
161    RRE_MSR     = 0xb252,
162    RRE_NGR     = 0xb980,
163    RRE_OGR     = 0xb981,
164    RRE_SGR     = 0xb909,
165    RRE_SLGR    = 0xb90b,
166    RRE_SLBR    = 0xb999,
167    RRE_SLBGR   = 0xb989,
168    RRE_XGR     = 0xb982,
169
170    RRFa_MGRK   = 0xb9ec,
171    RRFa_MSRKC  = 0xb9fd,
172    RRFa_MSGRKC = 0xb9ed,
173    RRFa_NCRK   = 0xb9f5,
174    RRFa_NCGRK  = 0xb9e5,
175    RRFa_NNRK   = 0xb974,
176    RRFa_NNGRK  = 0xb964,
177    RRFa_NORK   = 0xb976,
178    RRFa_NOGRK  = 0xb966,
179    RRFa_NRK    = 0xb9f4,
180    RRFa_NGRK   = 0xb9e4,
181    RRFa_NXRK   = 0xb977,
182    RRFa_NXGRK  = 0xb967,
183    RRFa_OCRK   = 0xb975,
184    RRFa_OCGRK  = 0xb965,
185    RRFa_ORK    = 0xb9f6,
186    RRFa_OGRK   = 0xb9e6,
187    RRFa_SRK    = 0xb9f9,
188    RRFa_SGRK   = 0xb9e9,
189    RRFa_SLRK   = 0xb9fb,
190    RRFa_SLGRK  = 0xb9eb,
191    RRFa_XRK    = 0xb9f7,
192    RRFa_XGRK   = 0xb9e7,
193
194    RRFam_SELGR = 0xb9e3,
195
196    RRFc_LOCR   = 0xb9f2,
197    RRFc_LOCGR  = 0xb9e2,
198    RRFc_POPCNT = 0xb9e1,
199
200    RR_AR       = 0x1a,
201    RR_ALR      = 0x1e,
202    RR_BASR     = 0x0d,
203    RR_BCR      = 0x07,
204    RR_CLR      = 0x15,
205    RR_CR       = 0x19,
206    RR_DR       = 0x1d,
207    RR_LCR      = 0x13,
208    RR_LR       = 0x18,
209    RR_LTR      = 0x12,
210    RR_NR       = 0x14,
211    RR_OR       = 0x16,
212    RR_SR       = 0x1b,
213    RR_SLR      = 0x1f,
214    RR_XR       = 0x17,
215
216    RSY_RLL     = 0xeb1d,
217    RSY_RLLG    = 0xeb1c,
218    RSY_SLLG    = 0xeb0d,
219    RSY_SLLK    = 0xebdf,
220    RSY_SRAG    = 0xeb0a,
221    RSY_SRAK    = 0xebdc,
222    RSY_SRLG    = 0xeb0c,
223    RSY_SRLK    = 0xebde,
224
225    RS_SLL      = 0x89,
226    RS_SRA      = 0x8a,
227    RS_SRL      = 0x88,
228
229    RXY_AG      = 0xe308,
230    RXY_AY      = 0xe35a,
231    RXY_CG      = 0xe320,
232    RXY_CLG     = 0xe321,
233    RXY_CLY     = 0xe355,
234    RXY_CY      = 0xe359,
235    RXY_LAY     = 0xe371,
236    RXY_LB      = 0xe376,
237    RXY_LG      = 0xe304,
238    RXY_LGB     = 0xe377,
239    RXY_LGF     = 0xe314,
240    RXY_LGH     = 0xe315,
241    RXY_LHY     = 0xe378,
242    RXY_LLGC    = 0xe390,
243    RXY_LLGF    = 0xe316,
244    RXY_LLGH    = 0xe391,
245    RXY_LMG     = 0xeb04,
246    RXY_LRV     = 0xe31e,
247    RXY_LRVG    = 0xe30f,
248    RXY_LRVH    = 0xe31f,
249    RXY_LY      = 0xe358,
250    RXY_NG      = 0xe380,
251    RXY_OG      = 0xe381,
252    RXY_STCY    = 0xe372,
253    RXY_STG     = 0xe324,
254    RXY_STHY    = 0xe370,
255    RXY_STMG    = 0xeb24,
256    RXY_STRV    = 0xe33e,
257    RXY_STRVG   = 0xe32f,
258    RXY_STRVH   = 0xe33f,
259    RXY_STY     = 0xe350,
260    RXY_XG      = 0xe382,
261
262    RX_A        = 0x5a,
263    RX_C        = 0x59,
264    RX_L        = 0x58,
265    RX_LA       = 0x41,
266    RX_LH       = 0x48,
267    RX_ST       = 0x50,
268    RX_STC      = 0x42,
269    RX_STH      = 0x40,
270
271    VRIa_VGBM   = 0xe744,
272    VRIa_VREPI  = 0xe745,
273    VRIb_VGM    = 0xe746,
274    VRIc_VREP   = 0xe74d,
275
276    VRRa_VLC    = 0xe7de,
277    VRRa_VLP    = 0xe7df,
278    VRRa_VLR    = 0xe756,
279    VRRc_VA     = 0xe7f3,
280    VRRc_VCEQ   = 0xe7f8,   /* we leave the m5 cs field 0 */
281    VRRc_VCH    = 0xe7fb,   /* " */
282    VRRc_VCHL   = 0xe7f9,   /* " */
283    VRRc_VERLLV = 0xe773,
284    VRRc_VESLV  = 0xe770,
285    VRRc_VESRAV = 0xe77a,
286    VRRc_VESRLV = 0xe778,
287    VRRc_VML    = 0xe7a2,
288    VRRc_VMN    = 0xe7fe,
289    VRRc_VMNL   = 0xe7fc,
290    VRRc_VMX    = 0xe7ff,
291    VRRc_VMXL   = 0xe7fd,
292    VRRc_VN     = 0xe768,
293    VRRc_VNC    = 0xe769,
294    VRRc_VNN    = 0xe76e,
295    VRRc_VNO    = 0xe76b,
296    VRRc_VNX    = 0xe76c,
297    VRRc_VO     = 0xe76a,
298    VRRc_VOC    = 0xe76f,
299    VRRc_VPKS   = 0xe797,   /* we leave the m5 cs field 0 */
300    VRRc_VS     = 0xe7f7,
301    VRRa_VUPH   = 0xe7d7,
302    VRRa_VUPL   = 0xe7d6,
303    VRRc_VX     = 0xe76d,
304    VRRe_VSEL   = 0xe78d,
305    VRRf_VLVGP  = 0xe762,
306
307    VRSa_VERLL  = 0xe733,
308    VRSa_VESL   = 0xe730,
309    VRSa_VESRA  = 0xe73a,
310    VRSa_VESRL  = 0xe738,
311    VRSb_VLVG   = 0xe722,
312    VRSc_VLGV   = 0xe721,
313
314    VRX_VL      = 0xe706,
315    VRX_VLLEZ   = 0xe704,
316    VRX_VLREP   = 0xe705,
317    VRX_VST     = 0xe70e,
318    VRX_VSTEF   = 0xe70b,
319    VRX_VSTEG   = 0xe70a,
320
321    NOP         = 0x0707,
322} S390Opcode;
323
324#ifdef CONFIG_DEBUG_TCG
325static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
326    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
327    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
328    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
329    "%v0",  "%v1",  "%v2",  "%v3",  "%v4",  "%v5",  "%v6",  "%v7",
330    "%v8",  "%v9",  "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
331    "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
332    "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
333};
334#endif
335
336/* Since R6 is a potential argument register, choose it last of the
337   call-saved registers.  Likewise prefer the call-clobbered registers
338   in reverse order to maximize the chance of avoiding the arguments.  */
339static const int tcg_target_reg_alloc_order[] = {
340    /* Call saved registers.  */
341    TCG_REG_R13,
342    TCG_REG_R12,
343    TCG_REG_R11,
344    TCG_REG_R10,
345    TCG_REG_R9,
346    TCG_REG_R8,
347    TCG_REG_R7,
348    TCG_REG_R6,
349    /* Call clobbered registers.  */
350    TCG_REG_R14,
351    TCG_REG_R0,
352    TCG_REG_R1,
353    /* Argument registers, in reverse order of allocation.  */
354    TCG_REG_R5,
355    TCG_REG_R4,
356    TCG_REG_R3,
357    TCG_REG_R2,
358
359    /* V8-V15 are call saved, and omitted. */
360    TCG_REG_V0,
361    TCG_REG_V1,
362    TCG_REG_V2,
363    TCG_REG_V3,
364    TCG_REG_V4,
365    TCG_REG_V5,
366    TCG_REG_V6,
367    TCG_REG_V7,
368    TCG_REG_V16,
369    TCG_REG_V17,
370    TCG_REG_V18,
371    TCG_REG_V19,
372    TCG_REG_V20,
373    TCG_REG_V21,
374    TCG_REG_V22,
375    TCG_REG_V23,
376    TCG_REG_V24,
377    TCG_REG_V25,
378    TCG_REG_V26,
379    TCG_REG_V27,
380    TCG_REG_V28,
381    TCG_REG_V29,
382    TCG_REG_V30,
383    TCG_REG_V31,
384};
385
386static const int tcg_target_call_iarg_regs[] = {
387    TCG_REG_R2,
388    TCG_REG_R3,
389    TCG_REG_R4,
390    TCG_REG_R5,
391    TCG_REG_R6,
392};
393
394static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
395{
396    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
397    tcg_debug_assert(slot == 0);
398    return TCG_REG_R2;
399}
400
401#define S390_CC_EQ      8
402#define S390_CC_LT      4
403#define S390_CC_GT      2
404#define S390_CC_OV      1
405#define S390_CC_NE      (S390_CC_LT | S390_CC_GT)
406#define S390_CC_LE      (S390_CC_LT | S390_CC_EQ)
407#define S390_CC_GE      (S390_CC_GT | S390_CC_EQ)
408#define S390_CC_NEVER   0
409#define S390_CC_ALWAYS  15
410
411/* Condition codes that result from a COMPARE and COMPARE LOGICAL.  */
412static const uint8_t tcg_cond_to_s390_cond[] = {
413    [TCG_COND_EQ]  = S390_CC_EQ,
414    [TCG_COND_NE]  = S390_CC_NE,
415    [TCG_COND_LT]  = S390_CC_LT,
416    [TCG_COND_LE]  = S390_CC_LE,
417    [TCG_COND_GT]  = S390_CC_GT,
418    [TCG_COND_GE]  = S390_CC_GE,
419    [TCG_COND_LTU] = S390_CC_LT,
420    [TCG_COND_LEU] = S390_CC_LE,
421    [TCG_COND_GTU] = S390_CC_GT,
422    [TCG_COND_GEU] = S390_CC_GE,
423};
424
425/* Condition codes that result from a LOAD AND TEST.  Here, we have no
426   unsigned instruction variation, however since the test is vs zero we
427   can re-map the outcomes appropriately.  */
428static const uint8_t tcg_cond_to_ltr_cond[] = {
429    [TCG_COND_EQ]  = S390_CC_EQ,
430    [TCG_COND_NE]  = S390_CC_NE,
431    [TCG_COND_LT]  = S390_CC_LT,
432    [TCG_COND_LE]  = S390_CC_LE,
433    [TCG_COND_GT]  = S390_CC_GT,
434    [TCG_COND_GE]  = S390_CC_GE,
435    [TCG_COND_LTU] = S390_CC_NEVER,
436    [TCG_COND_LEU] = S390_CC_EQ,
437    [TCG_COND_GTU] = S390_CC_NE,
438    [TCG_COND_GEU] = S390_CC_ALWAYS,
439};
440
441#ifdef CONFIG_SOFTMMU
442static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
443    [MO_UB]   = helper_ret_ldub_mmu,
444    [MO_SB]   = helper_ret_ldsb_mmu,
445    [MO_LEUW] = helper_le_lduw_mmu,
446    [MO_LESW] = helper_le_ldsw_mmu,
447    [MO_LEUL] = helper_le_ldul_mmu,
448    [MO_LESL] = helper_le_ldsl_mmu,
449    [MO_LEUQ] = helper_le_ldq_mmu,
450    [MO_BEUW] = helper_be_lduw_mmu,
451    [MO_BESW] = helper_be_ldsw_mmu,
452    [MO_BEUL] = helper_be_ldul_mmu,
453    [MO_BESL] = helper_be_ldsl_mmu,
454    [MO_BEUQ] = helper_be_ldq_mmu,
455};
456
457static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
458    [MO_UB]   = helper_ret_stb_mmu,
459    [MO_LEUW] = helper_le_stw_mmu,
460    [MO_LEUL] = helper_le_stl_mmu,
461    [MO_LEUQ] = helper_le_stq_mmu,
462    [MO_BEUW] = helper_be_stw_mmu,
463    [MO_BEUL] = helper_be_stl_mmu,
464    [MO_BEUQ] = helper_be_stq_mmu,
465};
466#endif
467
468static const tcg_insn_unit *tb_ret_addr;
469uint64_t s390_facilities[3];
470
471static inline bool is_general_reg(TCGReg r)
472{
473    return r <= TCG_REG_R15;
474}
475
476static inline bool is_vector_reg(TCGReg r)
477{
478    return r >= TCG_REG_V0 && r <= TCG_REG_V31;
479}
480
481static bool patch_reloc(tcg_insn_unit *src_rw, int type,
482                        intptr_t value, intptr_t addend)
483{
484    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
485    intptr_t pcrel2;
486    uint32_t old;
487
488    value += addend;
489    pcrel2 = (tcg_insn_unit *)value - src_rx;
490
491    switch (type) {
492    case R_390_PC16DBL:
493        if (pcrel2 == (int16_t)pcrel2) {
494            tcg_patch16(src_rw, pcrel2);
495            return true;
496        }
497        break;
498    case R_390_PC32DBL:
499        if (pcrel2 == (int32_t)pcrel2) {
500            tcg_patch32(src_rw, pcrel2);
501            return true;
502        }
503        break;
504    case R_390_20:
505        if (value == sextract64(value, 0, 20)) {
506            old = *(uint32_t *)src_rw & 0xf00000ff;
507            old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
508            tcg_patch32(src_rw, old);
509            return true;
510        }
511        break;
512    default:
513        g_assert_not_reached();
514    }
515    return false;
516}
517
518static int is_const_p16(uint64_t val)
519{
520    for (int i = 0; i < 4; ++i) {
521        uint64_t mask = 0xffffull << (i * 16);
522        if ((val & ~mask) == 0) {
523            return i;
524        }
525    }
526    return -1;
527}
528
529static int is_const_p32(uint64_t val)
530{
531    if ((val & 0xffffffff00000000ull) == 0) {
532        return 0;
533    }
534    if ((val & 0x00000000ffffffffull) == 0) {
535        return 1;
536    }
537    return -1;
538}
539
540/*
541 * Accept bit patterns like these:
542 *  0....01....1
543 *  1....10....0
544 *  1..10..01..1
545 *  0..01..10..0
546 * Copied from gcc sources.
547 */
548static bool risbg_mask(uint64_t c)
549{
550    uint64_t lsb;
551    /* We don't change the number of transitions by inverting,
552       so make sure we start with the LSB zero.  */
553    if (c & 1) {
554        c = ~c;
555    }
556    /* Reject all zeros or all ones.  */
557    if (c == 0) {
558        return false;
559    }
560    /* Find the first transition.  */
561    lsb = c & -c;
562    /* Invert to look for a second transition.  */
563    c = ~c;
564    /* Erase the first transition.  */
565    c &= -lsb;
566    /* Find the second transition, if any.  */
567    lsb = c & -c;
568    /* Match if all the bits are 1's, or if c is zero.  */
569    return c == -lsb;
570}
571
572/* Test if a constant matches the constraint. */
573static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
574{
575    if (ct & TCG_CT_CONST) {
576        return 1;
577    }
578
579    if (type == TCG_TYPE_I32) {
580        val = (int32_t)val;
581    }
582
583    /* The following are mutually exclusive.  */
584    if (ct & TCG_CT_CONST_S16) {
585        return val == (int16_t)val;
586    } else if (ct & TCG_CT_CONST_S32) {
587        return val == (int32_t)val;
588    } else if (ct & TCG_CT_CONST_S33) {
589        return val >= -0xffffffffll && val <= 0xffffffffll;
590    } else if (ct & TCG_CT_CONST_ZERO) {
591        return val == 0;
592    }
593
594    if (ct & TCG_CT_CONST_INV) {
595        val = ~val;
596    }
597    /*
598     * Note that is_const_p16 is a subset of is_const_p32,
599     * so we don't need both constraints.
600     */
601    if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
602        return true;
603    }
604    if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
605        return true;
606    }
607
608    return 0;
609}
610
611/* Emit instructions according to the given instruction format.  */
612
613static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
614{
615    tcg_out16(s, (op << 8) | (r1 << 4) | r2);
616}
617
618static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
619                             TCGReg r1, TCGReg r2)
620{
621    tcg_out32(s, (op << 16) | (r1 << 4) | r2);
622}
623
624/* RRF-a without the m4 field */
625static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
626                              TCGReg r1, TCGReg r2, TCGReg r3)
627{
628    tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
629}
630
631/* RRF-a with the m4 field */
632static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
633                               TCGReg r1, TCGReg r2, TCGReg r3, int m4)
634{
635    tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
636}
637
638static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
639                              TCGReg r1, TCGReg r2, int m3)
640{
641    tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
642}
643
644static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
645{
646    tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
647}
648
649static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
650                             int i2, int m3)
651{
652    tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
653    tcg_out32(s, (i2 << 16) | (op & 0xff));
654}
655
656static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
657{
658    tcg_out16(s, op | (r1 << 4));
659    tcg_out32(s, i2);
660}
661
662static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
663                            TCGReg b2, TCGReg r3, int disp)
664{
665    tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
666              | (disp & 0xfff));
667}
668
669static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
670                             TCGReg b2, TCGReg r3, int disp)
671{
672    tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
673    tcg_out32(s, (op & 0xff) | (b2 << 28)
674              | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
675}
676
677#define tcg_out_insn_RX   tcg_out_insn_RS
678#define tcg_out_insn_RXY  tcg_out_insn_RSY
679
680static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
681{
682    /*
683     * Shift bit 4 of each regno to its corresponding bit of RXB.
684     * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
685     * is the left-shift of the 4th operand.
686     */
687    return ((v1 & 0x10) << (4 + 3))
688         | ((v2 & 0x10) << (4 + 2))
689         | ((v3 & 0x10) << (4 + 1))
690         | ((v4 & 0x10) << (4 + 0));
691}
692
693static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
694                              TCGReg v1, uint16_t i2, int m3)
695{
696    tcg_debug_assert(is_vector_reg(v1));
697    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
698    tcg_out16(s, i2);
699    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
700}
701
702static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
703                              TCGReg v1, uint8_t i2, uint8_t i3, int m4)
704{
705    tcg_debug_assert(is_vector_reg(v1));
706    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
707    tcg_out16(s, (i2 << 8) | (i3 & 0xff));
708    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
709}
710
711static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
712                              TCGReg v1, uint16_t i2, TCGReg v3, int m4)
713{
714    tcg_debug_assert(is_vector_reg(v1));
715    tcg_debug_assert(is_vector_reg(v3));
716    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
717    tcg_out16(s, i2);
718    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
719}
720
721static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
722                              TCGReg v1, TCGReg v2, int m3)
723{
724    tcg_debug_assert(is_vector_reg(v1));
725    tcg_debug_assert(is_vector_reg(v2));
726    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
727    tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
728}
729
730static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
731                              TCGReg v1, TCGReg v2, TCGReg v3, int m4)
732{
733    tcg_debug_assert(is_vector_reg(v1));
734    tcg_debug_assert(is_vector_reg(v2));
735    tcg_debug_assert(is_vector_reg(v3));
736    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
737    tcg_out16(s, v3 << 12);
738    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
739}
740
741static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
742                              TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
743{
744    tcg_debug_assert(is_vector_reg(v1));
745    tcg_debug_assert(is_vector_reg(v2));
746    tcg_debug_assert(is_vector_reg(v3));
747    tcg_debug_assert(is_vector_reg(v4));
748    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
749    tcg_out16(s, v3 << 12);
750    tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
751}
752
753static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
754                              TCGReg v1, TCGReg r2, TCGReg r3)
755{
756    tcg_debug_assert(is_vector_reg(v1));
757    tcg_debug_assert(is_general_reg(r2));
758    tcg_debug_assert(is_general_reg(r3));
759    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
760    tcg_out16(s, r3 << 12);
761    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
762}
763
764static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
765                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
766{
767    tcg_debug_assert(is_vector_reg(v1));
768    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
769    tcg_debug_assert(is_general_reg(b2));
770    tcg_debug_assert(is_vector_reg(v3));
771    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
772    tcg_out16(s, b2 << 12 | d2);
773    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
774}
775
776static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
777                              intptr_t d2, TCGReg b2, TCGReg r3, int m4)
778{
779    tcg_debug_assert(is_vector_reg(v1));
780    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
781    tcg_debug_assert(is_general_reg(b2));
782    tcg_debug_assert(is_general_reg(r3));
783    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
784    tcg_out16(s, b2 << 12 | d2);
785    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
786}
787
788static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
789                              intptr_t d2, TCGReg b2, TCGReg v3, int m4)
790{
791    tcg_debug_assert(is_general_reg(r1));
792    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
793    tcg_debug_assert(is_general_reg(b2));
794    tcg_debug_assert(is_vector_reg(v3));
795    tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
796    tcg_out16(s, b2 << 12 | d2);
797    tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12));
798}
799
800static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
801                             TCGReg b2, TCGReg x2, intptr_t d2, int m3)
802{
803    tcg_debug_assert(is_vector_reg(v1));
804    tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
805    tcg_debug_assert(is_general_reg(x2));
806    tcg_debug_assert(is_general_reg(b2));
807    tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
808    tcg_out16(s, (b2 << 12) | d2);
809    tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
810}
811
812/* Emit an opcode with "type-checking" of the format.  */
813#define tcg_out_insn(S, FMT, OP, ...) \
814    glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
815
816
817/* emit 64-bit shifts */
818static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
819                         TCGReg src, TCGReg sh_reg, int sh_imm)
820{
821    tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
822}
823
824/* emit 32-bit shifts */
825static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
826                         TCGReg sh_reg, int sh_imm)
827{
828    tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
829}
830
831static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
832{
833    if (src == dst) {
834        return true;
835    }
836    switch (type) {
837    case TCG_TYPE_I32:
838        if (likely(is_general_reg(dst) && is_general_reg(src))) {
839            tcg_out_insn(s, RR, LR, dst, src);
840            break;
841        }
842        /* fallthru */
843
844    case TCG_TYPE_I64:
845        if (likely(is_general_reg(dst))) {
846            if (likely(is_general_reg(src))) {
847                tcg_out_insn(s, RRE, LGR, dst, src);
848            } else {
849                tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
850            }
851            break;
852        } else if (is_general_reg(src)) {
853            tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
854            break;
855        }
856        /* fallthru */
857
858    case TCG_TYPE_V64:
859    case TCG_TYPE_V128:
860        tcg_out_insn(s, VRRa, VLR, dst, src, 0);
861        break;
862
863    default:
864        g_assert_not_reached();
865    }
866    return true;
867}
868
869static const S390Opcode li_insns[4] = {
870    RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
871};
872static const S390Opcode oi_insns[4] = {
873    RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
874};
875static const S390Opcode lif_insns[2] = {
876    RIL_LLILF, RIL_LLIHF,
877};
878
879/* load a register with an immediate value */
880static void tcg_out_movi(TCGContext *s, TCGType type,
881                         TCGReg ret, tcg_target_long sval)
882{
883    tcg_target_ulong uval = sval;
884    ptrdiff_t pc_off;
885    int i;
886
887    if (type == TCG_TYPE_I32) {
888        uval = (uint32_t)sval;
889        sval = (int32_t)sval;
890    }
891
892    /* Try all 32-bit insns that can load it in one go.  */
893    if (sval >= -0x8000 && sval < 0x8000) {
894        tcg_out_insn(s, RI, LGHI, ret, sval);
895        return;
896    }
897
898    i = is_const_p16(uval);
899    if (i >= 0) {
900        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
901        return;
902    }
903
904    /* Try all 48-bit insns that can load it in one go.  */
905    if (sval == (int32_t)sval) {
906        tcg_out_insn(s, RIL, LGFI, ret, sval);
907        return;
908    }
909
910    i = is_const_p32(uval);
911    if (i >= 0) {
912        tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
913        return;
914    }
915
916    /* Try for PC-relative address load.  For odd addresses, add one. */
917    pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
918    if (pc_off == (int32_t)pc_off) {
919        tcg_out_insn(s, RIL, LARL, ret, pc_off);
920        if (sval & 1) {
921            tcg_out_insn(s, RI, AGHI, ret, 1);
922        }
923        return;
924    }
925
926    /* Otherwise, load it by parts. */
927    i = is_const_p16((uint32_t)uval);
928    if (i >= 0) {
929        tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
930    } else {
931        tcg_out_insn(s, RIL, LLILF, ret, uval);
932    }
933    uval >>= 32;
934    i = is_const_p16(uval);
935    if (i >= 0) {
936        tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
937    } else {
938        tcg_out_insn(s, RIL, OIHF, ret, uval);
939    }
940}
941
942/* Emit a load/store type instruction.  Inputs are:
943   DATA:     The register to be loaded or stored.
944   BASE+OFS: The effective address.
945   OPC_RX:   If the operation has an RX format opcode (e.g. STC), otherwise 0.
946   OPC_RXY:  The RXY format opcode for the operation (e.g. STCY).  */
947
948static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
949                        TCGReg data, TCGReg base, TCGReg index,
950                        tcg_target_long ofs)
951{
952    if (ofs < -0x80000 || ofs >= 0x80000) {
953        /* Combine the low 20 bits of the offset with the actual load insn;
954           the high 44 bits must come from an immediate load.  */
955        tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
956        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
957        ofs = low;
958
959        /* If we were already given an index register, add it in.  */
960        if (index != TCG_REG_NONE) {
961            tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
962        }
963        index = TCG_TMP0;
964    }
965
966    if (opc_rx && ofs >= 0 && ofs < 0x1000) {
967        tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
968    } else {
969        tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
970    }
971}
972
973static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
974                            TCGReg data, TCGReg base, TCGReg index,
975                            tcg_target_long ofs, int m3)
976{
977    if (ofs < 0 || ofs >= 0x1000) {
978        if (ofs >= -0x80000 && ofs < 0x80000) {
979            tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
980            base = TCG_TMP0;
981            index = TCG_REG_NONE;
982            ofs = 0;
983        } else {
984            tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
985            if (index != TCG_REG_NONE) {
986                tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
987            }
988            index = TCG_TMP0;
989            ofs = 0;
990        }
991    }
992    tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
993}
994
995/* load data without address translation or endianness conversion */
996static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
997                       TCGReg base, intptr_t ofs)
998{
999    switch (type) {
1000    case TCG_TYPE_I32:
1001        if (likely(is_general_reg(data))) {
1002            tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
1003            break;
1004        }
1005        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
1006        break;
1007
1008    case TCG_TYPE_I64:
1009        if (likely(is_general_reg(data))) {
1010            tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
1011            break;
1012        }
1013        /* fallthru */
1014
1015    case TCG_TYPE_V64:
1016        tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
1017        break;
1018
1019    case TCG_TYPE_V128:
1020        /* Hint quadword aligned.  */
1021        tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
1022        break;
1023
1024    default:
1025        g_assert_not_reached();
1026    }
1027}
1028
1029static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
1030                       TCGReg base, intptr_t ofs)
1031{
1032    switch (type) {
1033    case TCG_TYPE_I32:
1034        if (likely(is_general_reg(data))) {
1035            tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1036        } else {
1037            tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1038        }
1039        break;
1040
1041    case TCG_TYPE_I64:
1042        if (likely(is_general_reg(data))) {
1043            tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1044            break;
1045        }
1046        /* fallthru */
1047
1048    case TCG_TYPE_V64:
1049        tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1050        break;
1051
1052    case TCG_TYPE_V128:
1053        /* Hint quadword aligned.  */
1054        tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1055        break;
1056
1057    default:
1058        g_assert_not_reached();
1059    }
1060}
1061
1062static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1063                               TCGReg base, intptr_t ofs)
1064{
1065    return false;
1066}
1067
1068static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1069{
1070    return false;
1071}
1072
1073static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1074                             tcg_target_long imm)
1075{
1076    /* This function is only used for passing structs by reference. */
1077    tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1078}
1079
1080static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1081                                 int msb, int lsb, int ofs, int z)
1082{
1083    /* Format RIE-f */
1084    tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1085    tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1086    tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1087}
1088
1089static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1090{
1091    tcg_out_insn(s, RRE, LGBR, dest, src);
1092}
1093
1094static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1095{
1096    tcg_out_insn(s, RRE, LLGCR, dest, src);
1097}
1098
1099static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1100{
1101    tcg_out_insn(s, RRE, LGHR, dest, src);
1102}
1103
1104static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1105{
1106    tcg_out_insn(s, RRE, LLGHR, dest, src);
1107}
1108
1109static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1110{
1111    tcg_out_insn(s, RRE, LGFR, dest, src);
1112}
1113
1114static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1115{
1116    tcg_out_insn(s, RRE, LLGFR, dest, src);
1117}
1118
1119static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1120{
1121    tcg_out_ext32s(s, dest, src);
1122}
1123
1124static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1125{
1126    tcg_out_ext32u(s, dest, src);
1127}
1128
1129static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1130{
1131    tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1132}
1133
1134static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1135{
1136    int msb, lsb;
1137    if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1138        /* Achieve wraparound by swapping msb and lsb.  */
1139        msb = 64 - ctz64(~val);
1140        lsb = clz64(~val) - 1;
1141    } else {
1142        msb = clz64(val);
1143        lsb = 63 - ctz64(val);
1144    }
1145    tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1146}
1147
1148static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1149{
1150    static const S390Opcode ni_insns[4] = {
1151        RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1152    };
1153    static const S390Opcode nif_insns[2] = {
1154        RIL_NILF, RIL_NIHF
1155    };
1156    uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1157    int i;
1158
1159    /* Look for the zero-extensions.  */
1160    if ((val & valid) == 0xffffffff) {
1161        tcg_out_ext32u(s, dest, dest);
1162        return;
1163    }
1164    if ((val & valid) == 0xff) {
1165        tcg_out_ext8u(s, dest, dest);
1166        return;
1167    }
1168    if ((val & valid) == 0xffff) {
1169        tcg_out_ext16u(s, dest, dest);
1170        return;
1171    }
1172
1173    i = is_const_p16(~val & valid);
1174    if (i >= 0) {
1175        tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1176        return;
1177    }
1178
1179    i = is_const_p32(~val & valid);
1180    tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1181    if (i >= 0) {
1182        tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1183        return;
1184    }
1185
1186    if (risbg_mask(val)) {
1187        tgen_andi_risbg(s, dest, dest, val);
1188        return;
1189    }
1190
1191    g_assert_not_reached();
1192}
1193
1194static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1195{
1196    static const S390Opcode oif_insns[2] = {
1197        RIL_OILF, RIL_OIHF
1198    };
1199
1200    int i;
1201
1202    i = is_const_p16(val);
1203    if (i >= 0) {
1204        tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1205        return;
1206    }
1207
1208    i = is_const_p32(val);
1209    if (i >= 0) {
1210        tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1211        return;
1212    }
1213
1214    g_assert_not_reached();
1215}
1216
1217static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1218{
1219    switch (is_const_p32(val)) {
1220    case 0:
1221        tcg_out_insn(s, RIL, XILF, dest, val);
1222        break;
1223    case 1:
1224        tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1225        break;
1226    default:
1227        g_assert_not_reached();
1228    }
1229}
1230
1231static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1232                     TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1233{
1234    bool is_unsigned = is_unsigned_cond(c);
1235    TCGCond inv_c = tcg_invert_cond(c);
1236    S390Opcode op;
1237
1238    if (c2const) {
1239        if (c2 == 0) {
1240            if (!(is_unsigned && need_carry)) {
1241                if (type == TCG_TYPE_I32) {
1242                    tcg_out_insn(s, RR, LTR, r1, r1);
1243                } else {
1244                    tcg_out_insn(s, RRE, LTGR, r1, r1);
1245                }
1246                *inv_cc = tcg_cond_to_ltr_cond[inv_c];
1247                return tcg_cond_to_ltr_cond[c];
1248            }
1249        }
1250
1251        if (!is_unsigned && c2 == (int16_t)c2) {
1252            op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1253            tcg_out_insn_RI(s, op, r1, c2);
1254            goto exit;
1255        }
1256
1257        if (type == TCG_TYPE_I32) {
1258            op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1259            tcg_out_insn_RIL(s, op, r1, c2);
1260            goto exit;
1261        }
1262
1263        /*
1264         * Constraints are for a signed 33-bit operand, which is a
1265         * convenient superset of this signed/unsigned test.
1266         */
1267        if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
1268            op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
1269            tcg_out_insn_RIL(s, op, r1, c2);
1270            goto exit;
1271        }
1272
1273        /* Load everything else into a register. */
1274        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
1275        c2 = TCG_TMP0;
1276    }
1277
1278    if (type == TCG_TYPE_I32) {
1279        op = (is_unsigned ? RR_CLR : RR_CR);
1280        tcg_out_insn_RR(s, op, r1, c2);
1281    } else {
1282        op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1283        tcg_out_insn_RRE(s, op, r1, c2);
1284    }
1285
1286 exit:
1287    *inv_cc = tcg_cond_to_s390_cond[inv_c];
1288    return tcg_cond_to_s390_cond[c];
1289}
1290
1291static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1292                    TCGArg c2, bool c2const, bool need_carry)
1293{
1294    int inv_cc;
1295    return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1296}
1297
1298static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1299                         TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1300{
1301    int cc;
1302
1303    /* With LOC2, we can always emit the minimum 3 insns.  */
1304    if (HAVE_FACILITY(LOAD_ON_COND2)) {
1305        /* Emit: d = 0, d = (cc ? 1 : d).  */
1306        cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1307        tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1308        tcg_out_insn(s, RIEg, LOCGHI, dest, 1, cc);
1309        return;
1310    }
1311
1312 restart:
1313    switch (cond) {
1314    case TCG_COND_NE:
1315        /* X != 0 is X > 0.  */
1316        if (c2const && c2 == 0) {
1317            cond = TCG_COND_GTU;
1318        } else {
1319            break;
1320        }
1321        /* fallthru */
1322
1323    case TCG_COND_GTU:
1324    case TCG_COND_GT:
1325        /* The result of a compare has CC=2 for GT and CC=3 unused.
1326           ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.  */
1327        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1328        tcg_out_movi(s, type, dest, 0);
1329        tcg_out_insn(s, RRE, ALCGR, dest, dest);
1330        return;
1331
1332    case TCG_COND_EQ:
1333        /* X == 0 is X <= 0.  */
1334        if (c2const && c2 == 0) {
1335            cond = TCG_COND_LEU;
1336        } else {
1337            break;
1338        }
1339        /* fallthru */
1340
1341    case TCG_COND_LEU:
1342    case TCG_COND_LE:
1343        /* As above, but we're looking for borrow, or !carry.
1344           The second insn computes d - d - borrow, or -1 for true
1345           and 0 for false.  So we must mask to 1 bit afterward.  */
1346        tgen_cmp(s, type, cond, c1, c2, c2const, true);
1347        tcg_out_insn(s, RRE, SLBGR, dest, dest);
1348        tgen_andi(s, type, dest, 1);
1349        return;
1350
1351    case TCG_COND_GEU:
1352    case TCG_COND_LTU:
1353    case TCG_COND_LT:
1354    case TCG_COND_GE:
1355        /* Swap operands so that we can use LEU/GTU/GT/LE.  */
1356        if (!c2const) {
1357            TCGReg t = c1;
1358            c1 = c2;
1359            c2 = t;
1360            cond = tcg_swap_cond(cond);
1361            goto restart;
1362        }
1363        break;
1364
1365    default:
1366        g_assert_not_reached();
1367    }
1368
1369    cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1370    /* Emit: d = 0, t = 1, d = (cc ? t : d).  */
1371    tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1372    tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1373    tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1374}
1375
1376static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1377                             TCGArg v3, int v3const, TCGReg v4,
1378                             int cc, int inv_cc)
1379{
1380    TCGReg src;
1381
1382    if (v3const) {
1383        if (dest == v4) {
1384            if (HAVE_FACILITY(LOAD_ON_COND2)) {
1385                /* Emit: if (cc) dest = v3. */
1386                tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1387                return;
1388            }
1389            tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1390            src = TCG_TMP0;
1391        } else {
1392            /* LGR+LOCGHI is larger than LGHI+LOCGR. */
1393            tcg_out_insn(s, RI, LGHI, dest, v3);
1394            cc = inv_cc;
1395            src = v4;
1396        }
1397    } else {
1398        if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1399            /* Emit: dest = cc ? v3 : v4. */
1400            tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1401            return;
1402        }
1403        if (dest == v4) {
1404            src = v3;
1405        } else {
1406            tcg_out_mov(s, type, dest, v3);
1407            cc = inv_cc;
1408            src = v4;
1409        }
1410    }
1411
1412    /* Emit: if (cc) dest = src. */
1413    tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1414}
1415
1416static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1417                         TCGReg c1, TCGArg c2, int c2const,
1418                         TCGArg v3, int v3const, TCGReg v4)
1419{
1420    int cc, inv_cc;
1421
1422    cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1423    tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1424}
1425
1426static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1427                     TCGArg a2, int a2const)
1428{
1429    /* Since this sets both R and R+1, we have no choice but to store the
1430       result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.  */
1431    QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1432    tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1433
1434    if (a2const && a2 == 64) {
1435        tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1436        return;
1437    }
1438
1439    /*
1440     * Conditions from FLOGR are:
1441     *   2 -> one bit found
1442     *   8 -> no one bit found
1443     */
1444    tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1445}
1446
1447static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1448{
1449    /* With MIE3, and bit 0 of m4 set, we get the complete result. */
1450    if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1451        if (type == TCG_TYPE_I32) {
1452            tcg_out_ext32u(s, dest, src);
1453            src = dest;
1454        }
1455        tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1456        return;
1457    }
1458
1459    /* Without MIE3, each byte gets the count of bits for the byte. */
1460    tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1461
1462    /* Multiply to sum each byte at the top of the word. */
1463    if (type == TCG_TYPE_I32) {
1464        tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1465        tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1466    } else {
1467        tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1468        tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1469        tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1470    }
1471}
1472
1473static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1474                         int ofs, int len, int z)
1475{
1476    int lsb = (63 - ofs);
1477    int msb = lsb - (len - 1);
1478    tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1479}
1480
1481static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1482                         int ofs, int len)
1483{
1484    tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1485}
1486
1487static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1488{
1489    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1490    if (off == (int16_t)off) {
1491        tcg_out_insn(s, RI, BRC, cc, off);
1492    } else if (off == (int32_t)off) {
1493        tcg_out_insn(s, RIL, BRCL, cc, off);
1494    } else {
1495        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1496        tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1497    }
1498}
1499
1500static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1501{
1502    if (l->has_value) {
1503        tgen_gotoi(s, cc, l->u.value_ptr);
1504    } else {
1505        tcg_out16(s, RI_BRC | (cc << 4));
1506        tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1507        s->code_ptr += 1;
1508    }
1509}
1510
1511static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1512                                TCGReg r1, TCGReg r2, TCGLabel *l)
1513{
1514    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1515    /* Format RIE-b */
1516    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1517    tcg_out16(s, 0);
1518    tcg_out16(s, cc << 12 | (opc & 0xff));
1519}
1520
1521static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1522                                    TCGReg r1, int i2, TCGLabel *l)
1523{
1524    tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1525    /* Format RIE-c */
1526    tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1527    tcg_out16(s, 0);
1528    tcg_out16(s, (i2 << 8) | (opc & 0xff));
1529}
1530
1531static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1532                        TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1533{
1534    int cc;
1535    bool is_unsigned = is_unsigned_cond(c);
1536    bool in_range;
1537    S390Opcode opc;
1538
1539    cc = tcg_cond_to_s390_cond[c];
1540
1541    if (!c2const) {
1542        opc = (type == TCG_TYPE_I32
1543               ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1544               : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1545        tgen_compare_branch(s, opc, cc, r1, c2, l);
1546        return;
1547    }
1548
1549    /*
1550     * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1551     * If the immediate we've been given does not fit that range, we'll
1552     * fall back to separate compare and branch instructions using the
1553     * larger comparison range afforded by COMPARE IMMEDIATE.
1554     */
1555    if (type == TCG_TYPE_I32) {
1556        if (is_unsigned) {
1557            opc = RIEc_CLIJ;
1558            in_range = (uint32_t)c2 == (uint8_t)c2;
1559        } else {
1560            opc = RIEc_CIJ;
1561            in_range = (int32_t)c2 == (int8_t)c2;
1562        }
1563    } else {
1564        if (is_unsigned) {
1565            opc = RIEc_CLGIJ;
1566            in_range = (uint64_t)c2 == (uint8_t)c2;
1567        } else {
1568            opc = RIEc_CGIJ;
1569            in_range = (int64_t)c2 == (int8_t)c2;
1570        }
1571    }
1572    if (in_range) {
1573        tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1574        return;
1575    }
1576
1577    cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1578    tgen_branch(s, cc, l);
1579}
1580
1581static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1582{
1583    ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1584    if (off == (int32_t)off) {
1585        tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1586    } else {
1587        tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1588        tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1589    }
1590}
1591
1592static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1593                         const TCGHelperInfo *info)
1594{
1595    tcg_out_call_int(s, dest);
1596}
1597
1598typedef struct {
1599    TCGReg base;
1600    TCGReg index;
1601    int disp;
1602} HostAddress;
1603
1604static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1605                                   HostAddress h)
1606{
1607    switch (opc & (MO_SSIZE | MO_BSWAP)) {
1608    case MO_UB:
1609        tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1610        break;
1611    case MO_SB:
1612        tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1613        break;
1614
1615    case MO_UW | MO_BSWAP:
1616        /* swapped unsigned halfword load with upper bits zeroed */
1617        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1618        tcg_out_ext16u(s, data, data);
1619        break;
1620    case MO_UW:
1621        tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1622        break;
1623
1624    case MO_SW | MO_BSWAP:
1625        /* swapped sign-extended halfword load */
1626        tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1627        tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1628        break;
1629    case MO_SW:
1630        tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1631        break;
1632
1633    case MO_UL | MO_BSWAP:
1634        /* swapped unsigned int load with upper bits zeroed */
1635        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1636        tcg_out_ext32u(s, data, data);
1637        break;
1638    case MO_UL:
1639        tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1640        break;
1641
1642    case MO_SL | MO_BSWAP:
1643        /* swapped sign-extended int load */
1644        tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1645        tcg_out_ext32s(s, data, data);
1646        break;
1647    case MO_SL:
1648        tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1649        break;
1650
1651    case MO_UQ | MO_BSWAP:
1652        tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1653        break;
1654    case MO_UQ:
1655        tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1656        break;
1657
1658    default:
1659        g_assert_not_reached();
1660    }
1661}
1662
1663static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1664                                   HostAddress h)
1665{
1666    switch (opc & (MO_SIZE | MO_BSWAP)) {
1667    case MO_UB:
1668        if (h.disp >= 0 && h.disp < 0x1000) {
1669            tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1670        } else {
1671            tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1672        }
1673        break;
1674
1675    case MO_UW | MO_BSWAP:
1676        tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1677        break;
1678    case MO_UW:
1679        if (h.disp >= 0 && h.disp < 0x1000) {
1680            tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1681        } else {
1682            tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1683        }
1684        break;
1685
1686    case MO_UL | MO_BSWAP:
1687        tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1688        break;
1689    case MO_UL:
1690        if (h.disp >= 0 && h.disp < 0x1000) {
1691            tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1692        } else {
1693            tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1694        }
1695        break;
1696
1697    case MO_UQ | MO_BSWAP:
1698        tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1699        break;
1700    case MO_UQ:
1701        tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1702        break;
1703
1704    default:
1705        g_assert_not_reached();
1706    }
1707}
1708
1709#if defined(CONFIG_SOFTMMU)
1710static const TCGLdstHelperParam ldst_helper_param = {
1711    .ntmp = 1, .tmp = { TCG_TMP0 }
1712};
1713
1714static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1715{
1716    MemOp opc = get_memop(lb->oi);
1717
1718    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1719                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1720        return false;
1721    }
1722
1723    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1724    tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1725    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1726
1727    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1728    return true;
1729}
1730
1731static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1732{
1733    MemOp opc = get_memop(lb->oi);
1734
1735    if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1736                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1737        return false;
1738    }
1739
1740    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1741    tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1742
1743    tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1744    return true;
1745}
1746#else
1747static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1748{
1749    if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL,
1750                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1751        return false;
1752    }
1753
1754    tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg);
1755    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1756
1757    /* "Tail call" to the helper, with the return address back inline. */
1758    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr);
1759    tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld
1760                                                 : helper_unaligned_st));
1761    return true;
1762}
1763
1764static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1765{
1766    return tcg_out_fail_alignment(s, l);
1767}
1768
1769static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1770{
1771    return tcg_out_fail_alignment(s, l);
1772}
1773#endif /* CONFIG_SOFTMMU */
1774
1775/*
1776 * For softmmu, perform the TLB load and compare.
1777 * For useronly, perform any required alignment tests.
1778 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1779 * is required and fill in @h with the host address for the fast path.
1780 */
1781static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1782                                           TCGReg addr_reg, MemOpIdx oi,
1783                                           bool is_ld)
1784{
1785    TCGLabelQemuLdst *ldst = NULL;
1786    MemOp opc = get_memop(oi);
1787    unsigned a_bits = get_alignment_bits(opc);
1788    unsigned a_mask = (1u << a_bits) - 1;
1789
1790#ifdef CONFIG_SOFTMMU
1791    unsigned s_bits = opc & MO_SIZE;
1792    unsigned s_mask = (1 << s_bits) - 1;
1793    int mem_index = get_mmuidx(oi);
1794    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1795    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1796    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1797    int ofs, a_off;
1798    uint64_t tlb_mask;
1799
1800    ldst = new_ldst_label(s);
1801    ldst->is_ld = is_ld;
1802    ldst->oi = oi;
1803    ldst->addrlo_reg = addr_reg;
1804
1805    tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1806                 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1807
1808    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1809    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
1810    tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1811    tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1812
1813    /*
1814     * For aligned accesses, we check the first byte and include the alignment
1815     * bits within the address.  For unaligned access, we check that we don't
1816     * cross pages using the address of the last byte of the access.
1817     */
1818    a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1819    tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1820    if (a_off == 0) {
1821        tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1822    } else {
1823        tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1824        tgen_andi(s, TCG_TYPE_TL, TCG_REG_R0, tlb_mask);
1825    }
1826
1827    if (is_ld) {
1828        ofs = offsetof(CPUTLBEntry, addr_read);
1829    } else {
1830        ofs = offsetof(CPUTLBEntry, addr_write);
1831    }
1832    if (TARGET_LONG_BITS == 32) {
1833        tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1834    } else {
1835        tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1836    }
1837
1838    tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1839    ldst->label_ptr[0] = s->code_ptr++;
1840
1841    h->index = TCG_TMP0;
1842    tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1843                 offsetof(CPUTLBEntry, addend));
1844
1845    if (TARGET_LONG_BITS == 32) {
1846        tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1847        h->base = TCG_REG_NONE;
1848    } else {
1849        h->base = addr_reg;
1850    }
1851    h->disp = 0;
1852#else
1853    if (a_mask) {
1854        ldst = new_ldst_label(s);
1855        ldst->is_ld = is_ld;
1856        ldst->oi = oi;
1857        ldst->addrlo_reg = addr_reg;
1858
1859        /* We are expecting a_bits to max out at 7, much lower than TMLL. */
1860        tcg_debug_assert(a_bits < 16);
1861        tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1862
1863        tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
1864        ldst->label_ptr[0] = s->code_ptr++;
1865    }
1866
1867    h->base = addr_reg;
1868    if (TARGET_LONG_BITS == 32) {
1869        tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1870        h->base = TCG_TMP0;
1871    }
1872    if (guest_base < 0x80000) {
1873        h->index = TCG_REG_NONE;
1874        h->disp = guest_base;
1875    } else {
1876        h->index = TCG_GUEST_BASE_REG;
1877        h->disp = 0;
1878    }
1879#endif
1880
1881    return ldst;
1882}
1883
1884static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1885                            MemOpIdx oi, TCGType data_type)
1886{
1887    TCGLabelQemuLdst *ldst;
1888    HostAddress h;
1889
1890    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1891    tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1892
1893    if (ldst) {
1894        ldst->type = data_type;
1895        ldst->datalo_reg = data_reg;
1896        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1897    }
1898}
1899
1900static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1901                            MemOpIdx oi, TCGType data_type)
1902{
1903    TCGLabelQemuLdst *ldst;
1904    HostAddress h;
1905
1906    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1907    tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1908
1909    if (ldst) {
1910        ldst->type = data_type;
1911        ldst->datalo_reg = data_reg;
1912        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1913    }
1914}
1915
1916static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1917{
1918    /* Reuse the zeroing that exists for goto_ptr.  */
1919    if (a0 == 0) {
1920        tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
1921    } else {
1922        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1923        tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1924    }
1925}
1926
1927static void tcg_out_goto_tb(TCGContext *s, int which)
1928{
1929    /*
1930     * Branch displacement must be aligned for atomic patching;
1931     * see if we need to add extra nop before branch
1932     */
1933    if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1934        tcg_out16(s, NOP);
1935    }
1936    tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1937    set_jmp_insn_offset(s, which);
1938    s->code_ptr += 2;
1939    set_jmp_reset_offset(s, which);
1940}
1941
1942void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1943                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1944{
1945    if (!HAVE_FACILITY(GEN_INST_EXT)) {
1946        return;
1947    }
1948    /* patch the branch destination */
1949    uintptr_t addr = tb->jmp_target_addr[n];
1950    intptr_t disp = addr - (jmp_rx - 2);
1951    qatomic_set((int32_t *)jmp_rw, disp / 2);
1952    /* no need to flush icache explicitly */
1953}
1954
1955# define OP_32_64(x) \
1956        case glue(glue(INDEX_op_,x),_i32): \
1957        case glue(glue(INDEX_op_,x),_i64)
1958
1959static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1960                              const TCGArg args[TCG_MAX_OP_ARGS],
1961                              const int const_args[TCG_MAX_OP_ARGS])
1962{
1963    S390Opcode op, op2;
1964    TCGArg a0, a1, a2;
1965
1966    switch (opc) {
1967    case INDEX_op_goto_ptr:
1968        a0 = args[0];
1969        tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
1970        break;
1971
1972    OP_32_64(ld8u):
1973        /* ??? LLC (RXY format) is only present with the extended-immediate
1974           facility, whereas LLGC is always present.  */
1975        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1976        break;
1977
1978    OP_32_64(ld8s):
1979        /* ??? LB is no smaller than LGB, so no point to using it.  */
1980        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1981        break;
1982
1983    OP_32_64(ld16u):
1984        /* ??? LLH (RXY format) is only present with the extended-immediate
1985           facility, whereas LLGH is always present.  */
1986        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1987        break;
1988
1989    case INDEX_op_ld16s_i32:
1990        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1991        break;
1992
1993    case INDEX_op_ld_i32:
1994        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1995        break;
1996
1997    OP_32_64(st8):
1998        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1999                    TCG_REG_NONE, args[2]);
2000        break;
2001
2002    OP_32_64(st16):
2003        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2004                    TCG_REG_NONE, args[2]);
2005        break;
2006
2007    case INDEX_op_st_i32:
2008        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2009        break;
2010
2011    case INDEX_op_add_i32:
2012        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2013        if (const_args[2]) {
2014        do_addi_32:
2015            if (a0 == a1) {
2016                if (a2 == (int16_t)a2) {
2017                    tcg_out_insn(s, RI, AHI, a0, a2);
2018                    break;
2019                }
2020                tcg_out_insn(s, RIL, AFI, a0, a2);
2021                break;
2022            }
2023            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2024        } else if (a0 == a1) {
2025            tcg_out_insn(s, RR, AR, a0, a2);
2026        } else {
2027            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2028        }
2029        break;
2030    case INDEX_op_sub_i32:
2031        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2032        if (const_args[2]) {
2033            a2 = -a2;
2034            goto do_addi_32;
2035        } else if (a0 == a1) {
2036            tcg_out_insn(s, RR, SR, a0, a2);
2037        } else {
2038            tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2039        }
2040        break;
2041
2042    case INDEX_op_and_i32:
2043        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2044        if (const_args[2]) {
2045            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2046            tgen_andi(s, TCG_TYPE_I32, a0, a2);
2047        } else if (a0 == a1) {
2048            tcg_out_insn(s, RR, NR, a0, a2);
2049        } else {
2050            tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2051        }
2052        break;
2053    case INDEX_op_or_i32:
2054        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2055        if (const_args[2]) {
2056            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2057            tgen_ori(s, a0, a2);
2058        } else if (a0 == a1) {
2059            tcg_out_insn(s, RR, OR, a0, a2);
2060        } else {
2061            tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2062        }
2063        break;
2064    case INDEX_op_xor_i32:
2065        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2066        if (const_args[2]) {
2067            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2068            tcg_out_insn(s, RIL, XILF, a0, a2);
2069        } else if (a0 == a1) {
2070            tcg_out_insn(s, RR, XR, args[0], args[2]);
2071        } else {
2072            tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2073        }
2074        break;
2075
2076    case INDEX_op_andc_i32:
2077        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2078        if (const_args[2]) {
2079            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2080            tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2081	} else {
2082            tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2083	}
2084        break;
2085    case INDEX_op_orc_i32:
2086        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2087        if (const_args[2]) {
2088            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2089            tgen_ori(s, a0, (uint32_t)~a2);
2090        } else {
2091            tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2092        }
2093        break;
2094    case INDEX_op_eqv_i32:
2095        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2096        if (const_args[2]) {
2097            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2098            tcg_out_insn(s, RIL, XILF, a0, ~a2);
2099        } else {
2100            tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2101        }
2102        break;
2103    case INDEX_op_nand_i32:
2104        tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2105        break;
2106    case INDEX_op_nor_i32:
2107        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2108        break;
2109
2110    case INDEX_op_neg_i32:
2111        tcg_out_insn(s, RR, LCR, args[0], args[1]);
2112        break;
2113    case INDEX_op_not_i32:
2114        tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2115        break;
2116
2117    case INDEX_op_mul_i32:
2118        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2119        if (const_args[2]) {
2120            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2121            if (a2 == (int16_t)a2) {
2122                tcg_out_insn(s, RI, MHI, a0, a2);
2123            } else {
2124                tcg_out_insn(s, RIL, MSFI, a0, a2);
2125            }
2126        } else if (a0 == a1) {
2127            tcg_out_insn(s, RRE, MSR, a0, a2);
2128        } else {
2129            tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2130        }
2131        break;
2132
2133    case INDEX_op_div2_i32:
2134        tcg_debug_assert(args[0] == args[2]);
2135        tcg_debug_assert(args[1] == args[3]);
2136        tcg_debug_assert((args[1] & 1) == 0);
2137        tcg_debug_assert(args[0] == args[1] + 1);
2138        tcg_out_insn(s, RR, DR, args[1], args[4]);
2139        break;
2140    case INDEX_op_divu2_i32:
2141        tcg_debug_assert(args[0] == args[2]);
2142        tcg_debug_assert(args[1] == args[3]);
2143        tcg_debug_assert((args[1] & 1) == 0);
2144        tcg_debug_assert(args[0] == args[1] + 1);
2145        tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2146        break;
2147
2148    case INDEX_op_shl_i32:
2149        op = RS_SLL;
2150        op2 = RSY_SLLK;
2151    do_shift32:
2152        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2153        if (a0 == a1) {
2154            if (const_args[2]) {
2155                tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2156            } else {
2157                tcg_out_sh32(s, op, a0, a2, 0);
2158            }
2159        } else {
2160            /* Using tcg_out_sh64 here for the format; it is a 32-bit shift.  */
2161            if (const_args[2]) {
2162                tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2163            } else {
2164                tcg_out_sh64(s, op2, a0, a1, a2, 0);
2165            }
2166        }
2167        break;
2168    case INDEX_op_shr_i32:
2169        op = RS_SRL;
2170        op2 = RSY_SRLK;
2171        goto do_shift32;
2172    case INDEX_op_sar_i32:
2173        op = RS_SRA;
2174        op2 = RSY_SRAK;
2175        goto do_shift32;
2176
2177    case INDEX_op_rotl_i32:
2178        /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
2179        if (const_args[2]) {
2180            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2181        } else {
2182            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2183        }
2184        break;
2185    case INDEX_op_rotr_i32:
2186        if (const_args[2]) {
2187            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2188                         TCG_REG_NONE, (32 - args[2]) & 31);
2189        } else {
2190            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2191            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2192        }
2193        break;
2194
2195    case INDEX_op_bswap16_i32:
2196        a0 = args[0], a1 = args[1], a2 = args[2];
2197        tcg_out_insn(s, RRE, LRVR, a0, a1);
2198        if (a2 & TCG_BSWAP_OS) {
2199            tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2200        } else {
2201            tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2202        }
2203        break;
2204    case INDEX_op_bswap16_i64:
2205        a0 = args[0], a1 = args[1], a2 = args[2];
2206        tcg_out_insn(s, RRE, LRVGR, a0, a1);
2207        if (a2 & TCG_BSWAP_OS) {
2208            tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2209        } else {
2210            tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2211        }
2212        break;
2213
2214    case INDEX_op_bswap32_i32:
2215        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2216        break;
2217    case INDEX_op_bswap32_i64:
2218        a0 = args[0], a1 = args[1], a2 = args[2];
2219        tcg_out_insn(s, RRE, LRVR, a0, a1);
2220        if (a2 & TCG_BSWAP_OS) {
2221            tcg_out_ext32s(s, a0, a0);
2222        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2223            tcg_out_ext32u(s, a0, a0);
2224        }
2225        break;
2226
2227    case INDEX_op_add2_i32:
2228        if (const_args[4]) {
2229            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2230        } else {
2231            tcg_out_insn(s, RR, ALR, args[0], args[4]);
2232        }
2233        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2234        break;
2235    case INDEX_op_sub2_i32:
2236        if (const_args[4]) {
2237            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2238        } else {
2239            tcg_out_insn(s, RR, SLR, args[0], args[4]);
2240        }
2241        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2242        break;
2243
2244    case INDEX_op_br:
2245        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2246        break;
2247
2248    case INDEX_op_brcond_i32:
2249        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2250                    args[1], const_args[1], arg_label(args[3]));
2251        break;
2252    case INDEX_op_setcond_i32:
2253        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2254                     args[2], const_args[2]);
2255        break;
2256    case INDEX_op_movcond_i32:
2257        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2258                     args[2], const_args[2], args[3], const_args[3], args[4]);
2259        break;
2260
2261    case INDEX_op_qemu_ld_i32:
2262        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2263        break;
2264    case INDEX_op_qemu_ld_i64:
2265        tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2266        break;
2267    case INDEX_op_qemu_st_i32:
2268        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2269        break;
2270    case INDEX_op_qemu_st_i64:
2271        tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2272        break;
2273
2274    case INDEX_op_ld16s_i64:
2275        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2276        break;
2277    case INDEX_op_ld32u_i64:
2278        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2279        break;
2280    case INDEX_op_ld32s_i64:
2281        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2282        break;
2283    case INDEX_op_ld_i64:
2284        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2285        break;
2286
2287    case INDEX_op_st32_i64:
2288        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2289        break;
2290    case INDEX_op_st_i64:
2291        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2292        break;
2293
2294    case INDEX_op_add_i64:
2295        a0 = args[0], a1 = args[1], a2 = args[2];
2296        if (const_args[2]) {
2297        do_addi_64:
2298            if (a0 == a1) {
2299                if (a2 == (int16_t)a2) {
2300                    tcg_out_insn(s, RI, AGHI, a0, a2);
2301                    break;
2302                }
2303                if (a2 == (int32_t)a2) {
2304                    tcg_out_insn(s, RIL, AGFI, a0, a2);
2305                    break;
2306                }
2307                if (a2 == (uint32_t)a2) {
2308                    tcg_out_insn(s, RIL, ALGFI, a0, a2);
2309                    break;
2310                }
2311                if (-a2 == (uint32_t)-a2) {
2312                    tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2313                    break;
2314                }
2315            }
2316            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2317        } else if (a0 == a1) {
2318            tcg_out_insn(s, RRE, AGR, a0, a2);
2319        } else {
2320            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2321        }
2322        break;
2323    case INDEX_op_sub_i64:
2324        a0 = args[0], a1 = args[1], a2 = args[2];
2325        if (const_args[2]) {
2326            a2 = -a2;
2327            goto do_addi_64;
2328        } else {
2329            tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2330        }
2331        break;
2332
2333    case INDEX_op_and_i64:
2334        a0 = args[0], a1 = args[1], a2 = args[2];
2335        if (const_args[2]) {
2336            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2337            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2338        } else {
2339            tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2340        }
2341        break;
2342    case INDEX_op_or_i64:
2343        a0 = args[0], a1 = args[1], a2 = args[2];
2344        if (const_args[2]) {
2345            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2346            tgen_ori(s, a0, a2);
2347        } else {
2348            tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2349        }
2350        break;
2351    case INDEX_op_xor_i64:
2352        a0 = args[0], a1 = args[1], a2 = args[2];
2353        if (const_args[2]) {
2354            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2355            tgen_xori(s, a0, a2);
2356        } else {
2357            tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2358        }
2359        break;
2360
2361    case INDEX_op_andc_i64:
2362        a0 = args[0], a1 = args[1], a2 = args[2];
2363        if (const_args[2]) {
2364            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2365            tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2366        } else {
2367            tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2368        }
2369        break;
2370    case INDEX_op_orc_i64:
2371        a0 = args[0], a1 = args[1], a2 = args[2];
2372        if (const_args[2]) {
2373            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2374            tgen_ori(s, a0, ~a2);
2375        } else {
2376            tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2377        }
2378        break;
2379    case INDEX_op_eqv_i64:
2380        a0 = args[0], a1 = args[1], a2 = args[2];
2381        if (const_args[2]) {
2382            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2383            tgen_xori(s, a0, ~a2);
2384        } else {
2385            tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2386        }
2387        break;
2388    case INDEX_op_nand_i64:
2389        tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2390        break;
2391    case INDEX_op_nor_i64:
2392        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2393        break;
2394
2395    case INDEX_op_neg_i64:
2396        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2397        break;
2398    case INDEX_op_not_i64:
2399        tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2400        break;
2401    case INDEX_op_bswap64_i64:
2402        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2403        break;
2404
2405    case INDEX_op_mul_i64:
2406        a0 = args[0], a1 = args[1], a2 = args[2];
2407        if (const_args[2]) {
2408            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2409            if (a2 == (int16_t)a2) {
2410                tcg_out_insn(s, RI, MGHI, a0, a2);
2411            } else {
2412                tcg_out_insn(s, RIL, MSGFI, a0, a2);
2413            }
2414        } else if (a0 == a1) {
2415            tcg_out_insn(s, RRE, MSGR, a0, a2);
2416        } else {
2417            tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2418        }
2419        break;
2420
2421    case INDEX_op_div2_i64:
2422        /*
2423         * ??? We get an unnecessary sign-extension of the dividend
2424         * into op0 with this definition, but as we do in fact always
2425         * produce both quotient and remainder using INDEX_op_div_i64
2426         * instead requires jumping through even more hoops.
2427         */
2428        tcg_debug_assert(args[0] == args[2]);
2429        tcg_debug_assert(args[1] == args[3]);
2430        tcg_debug_assert((args[1] & 1) == 0);
2431        tcg_debug_assert(args[0] == args[1] + 1);
2432        tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2433        break;
2434    case INDEX_op_divu2_i64:
2435        tcg_debug_assert(args[0] == args[2]);
2436        tcg_debug_assert(args[1] == args[3]);
2437        tcg_debug_assert((args[1] & 1) == 0);
2438        tcg_debug_assert(args[0] == args[1] + 1);
2439        tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2440        break;
2441    case INDEX_op_mulu2_i64:
2442        tcg_debug_assert(args[0] == args[2]);
2443        tcg_debug_assert((args[1] & 1) == 0);
2444        tcg_debug_assert(args[0] == args[1] + 1);
2445        tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2446        break;
2447    case INDEX_op_muls2_i64:
2448        tcg_debug_assert((args[1] & 1) == 0);
2449        tcg_debug_assert(args[0] == args[1] + 1);
2450        tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2451        break;
2452
2453    case INDEX_op_shl_i64:
2454        op = RSY_SLLG;
2455    do_shift64:
2456        if (const_args[2]) {
2457            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2458        } else {
2459            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2460        }
2461        break;
2462    case INDEX_op_shr_i64:
2463        op = RSY_SRLG;
2464        goto do_shift64;
2465    case INDEX_op_sar_i64:
2466        op = RSY_SRAG;
2467        goto do_shift64;
2468
2469    case INDEX_op_rotl_i64:
2470        if (const_args[2]) {
2471            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2472                         TCG_REG_NONE, args[2]);
2473        } else {
2474            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2475        }
2476        break;
2477    case INDEX_op_rotr_i64:
2478        if (const_args[2]) {
2479            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2480                         TCG_REG_NONE, (64 - args[2]) & 63);
2481        } else {
2482            /* We can use the smaller 32-bit negate because only the
2483               low 6 bits are examined for the rotate.  */
2484            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2485            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2486        }
2487        break;
2488
2489    case INDEX_op_add2_i64:
2490        if (const_args[4]) {
2491            if ((int64_t)args[4] >= 0) {
2492                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2493            } else {
2494                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2495            }
2496        } else {
2497            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2498        }
2499        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2500        break;
2501    case INDEX_op_sub2_i64:
2502        if (const_args[4]) {
2503            if ((int64_t)args[4] >= 0) {
2504                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2505            } else {
2506                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2507            }
2508        } else {
2509            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2510        }
2511        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2512        break;
2513
2514    case INDEX_op_brcond_i64:
2515        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2516                    args[1], const_args[1], arg_label(args[3]));
2517        break;
2518    case INDEX_op_setcond_i64:
2519        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2520                     args[2], const_args[2]);
2521        break;
2522    case INDEX_op_movcond_i64:
2523        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2524                     args[2], const_args[2], args[3], const_args[3], args[4]);
2525        break;
2526
2527    OP_32_64(deposit):
2528        a0 = args[0], a1 = args[1], a2 = args[2];
2529        if (const_args[1]) {
2530            tgen_deposit(s, a0, a2, args[3], args[4], 1);
2531        } else {
2532            /* Since we can't support "0Z" as a constraint, we allow a1 in
2533               any register.  Fix things up as if a matching constraint.  */
2534            if (a0 != a1) {
2535                TCGType type = (opc == INDEX_op_deposit_i64);
2536                if (a0 == a2) {
2537                    tcg_out_mov(s, type, TCG_TMP0, a2);
2538                    a2 = TCG_TMP0;
2539                }
2540                tcg_out_mov(s, type, a0, a1);
2541            }
2542            tgen_deposit(s, a0, a2, args[3], args[4], 0);
2543        }
2544        break;
2545
2546    OP_32_64(extract):
2547        tgen_extract(s, args[0], args[1], args[2], args[3]);
2548        break;
2549
2550    case INDEX_op_clz_i64:
2551        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2552        break;
2553
2554    case INDEX_op_ctpop_i32:
2555        tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2556        break;
2557    case INDEX_op_ctpop_i64:
2558        tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2559        break;
2560
2561    case INDEX_op_mb:
2562        /* The host memory model is quite strong, we simply need to
2563           serialize the instruction stream.  */
2564        if (args[0] & TCG_MO_ST_LD) {
2565            /* fast-bcr-serialization facility (45) is present */
2566            tcg_out_insn(s, RR, BCR, 14, 0);
2567        }
2568        break;
2569
2570    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2571    case INDEX_op_mov_i64:
2572    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2573    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2574    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2575    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2576    case INDEX_op_ext8s_i64:
2577    case INDEX_op_ext8u_i32:
2578    case INDEX_op_ext8u_i64:
2579    case INDEX_op_ext16s_i32:
2580    case INDEX_op_ext16s_i64:
2581    case INDEX_op_ext16u_i32:
2582    case INDEX_op_ext16u_i64:
2583    case INDEX_op_ext32s_i64:
2584    case INDEX_op_ext32u_i64:
2585    case INDEX_op_ext_i32_i64:
2586    case INDEX_op_extu_i32_i64:
2587    case INDEX_op_extrl_i64_i32:
2588    default:
2589        g_assert_not_reached();
2590    }
2591}
2592
2593static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2594                            TCGReg dst, TCGReg src)
2595{
2596    if (is_general_reg(src)) {
2597        /* Replicate general register into two MO_64. */
2598        tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2599        if (vece == MO_64) {
2600            return true;
2601        }
2602        src = dst;
2603    }
2604
2605    /*
2606     * Recall that the "standard" integer, within a vector, is the
2607     * rightmost element of the leftmost doubleword, a-la VLLEZ.
2608     */
2609    tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2610    return true;
2611}
2612
2613static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2614                             TCGReg dst, TCGReg base, intptr_t offset)
2615{
2616    tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2617    return true;
2618}
2619
2620static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2621                             TCGReg dst, int64_t val)
2622{
2623    int i, mask, msb, lsb;
2624
2625    /* Look for int16_t elements.  */
2626    if (vece <= MO_16 ||
2627        (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2628        tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2629        return;
2630    }
2631
2632    /* Look for bit masks.  */
2633    if (vece == MO_32) {
2634        if (risbg_mask((int32_t)val)) {
2635            /* Handle wraparound by swapping msb and lsb.  */
2636            if ((val & 0x80000001u) == 0x80000001u) {
2637                msb = 32 - ctz32(~val);
2638                lsb = clz32(~val) - 1;
2639            } else {
2640                msb = clz32(val);
2641                lsb = 31 - ctz32(val);
2642            }
2643            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2644            return;
2645        }
2646    } else {
2647        if (risbg_mask(val)) {
2648            /* Handle wraparound by swapping msb and lsb.  */
2649            if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2650                /* Handle wraparound by swapping msb and lsb.  */
2651                msb = 64 - ctz64(~val);
2652                lsb = clz64(~val) - 1;
2653            } else {
2654                msb = clz64(val);
2655                lsb = 63 - ctz64(val);
2656            }
2657            tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2658            return;
2659        }
2660    }
2661
2662    /* Look for all bytes 0x00 or 0xff.  */
2663    for (i = mask = 0; i < 8; i++) {
2664        uint8_t byte = val >> (i * 8);
2665        if (byte == 0xff) {
2666            mask |= 1 << i;
2667        } else if (byte != 0) {
2668            break;
2669        }
2670    }
2671    if (i == 8) {
2672        tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2673        return;
2674    }
2675
2676    /* Otherwise, stuff it in the constant pool.  */
2677    tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2678    new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2679    tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2680}
2681
2682static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2683                           unsigned vecl, unsigned vece,
2684                           const TCGArg args[TCG_MAX_OP_ARGS],
2685                           const int const_args[TCG_MAX_OP_ARGS])
2686{
2687    TCGType type = vecl + TCG_TYPE_V64;
2688    TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2689
2690    switch (opc) {
2691    case INDEX_op_ld_vec:
2692        tcg_out_ld(s, type, a0, a1, a2);
2693        break;
2694    case INDEX_op_st_vec:
2695        tcg_out_st(s, type, a0, a1, a2);
2696        break;
2697    case INDEX_op_dupm_vec:
2698        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2699        break;
2700
2701    case INDEX_op_abs_vec:
2702        tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2703        break;
2704    case INDEX_op_neg_vec:
2705        tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2706        break;
2707    case INDEX_op_not_vec:
2708        tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2709        break;
2710
2711    case INDEX_op_add_vec:
2712        tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2713        break;
2714    case INDEX_op_sub_vec:
2715        tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2716        break;
2717    case INDEX_op_and_vec:
2718        tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2719        break;
2720    case INDEX_op_andc_vec:
2721        tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2722        break;
2723    case INDEX_op_mul_vec:
2724        tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2725        break;
2726    case INDEX_op_or_vec:
2727        tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2728        break;
2729    case INDEX_op_orc_vec:
2730        tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2731        break;
2732    case INDEX_op_xor_vec:
2733        tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2734        break;
2735    case INDEX_op_nand_vec:
2736        tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
2737        break;
2738    case INDEX_op_nor_vec:
2739        tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
2740        break;
2741    case INDEX_op_eqv_vec:
2742        tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
2743        break;
2744
2745    case INDEX_op_shli_vec:
2746        tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
2747        break;
2748    case INDEX_op_shri_vec:
2749        tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
2750        break;
2751    case INDEX_op_sari_vec:
2752        tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
2753        break;
2754    case INDEX_op_rotli_vec:
2755        tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
2756        break;
2757    case INDEX_op_shls_vec:
2758        tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
2759        break;
2760    case INDEX_op_shrs_vec:
2761        tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
2762        break;
2763    case INDEX_op_sars_vec:
2764        tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
2765        break;
2766    case INDEX_op_rotls_vec:
2767        tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
2768        break;
2769    case INDEX_op_shlv_vec:
2770        tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
2771        break;
2772    case INDEX_op_shrv_vec:
2773        tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
2774        break;
2775    case INDEX_op_sarv_vec:
2776        tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
2777        break;
2778    case INDEX_op_rotlv_vec:
2779        tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
2780        break;
2781
2782    case INDEX_op_smin_vec:
2783        tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
2784        break;
2785    case INDEX_op_smax_vec:
2786        tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
2787        break;
2788    case INDEX_op_umin_vec:
2789        tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
2790        break;
2791    case INDEX_op_umax_vec:
2792        tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
2793        break;
2794
2795    case INDEX_op_bitsel_vec:
2796        tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
2797        break;
2798
2799    case INDEX_op_cmp_vec:
2800        switch ((TCGCond)args[3]) {
2801        case TCG_COND_EQ:
2802            tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2803            break;
2804        case TCG_COND_GT:
2805            tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2806            break;
2807        case TCG_COND_GTU:
2808            tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2809            break;
2810        default:
2811            g_assert_not_reached();
2812        }
2813        break;
2814
2815    case INDEX_op_s390_vuph_vec:
2816        tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
2817        break;
2818    case INDEX_op_s390_vupl_vec:
2819        tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
2820        break;
2821    case INDEX_op_s390_vpks_vec:
2822        tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
2823        break;
2824
2825    case INDEX_op_mov_vec:   /* Always emitted via tcg_out_mov.  */
2826    case INDEX_op_dup_vec:   /* Always emitted via tcg_out_dup_vec.  */
2827    default:
2828        g_assert_not_reached();
2829    }
2830}
2831
2832int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2833{
2834    switch (opc) {
2835    case INDEX_op_abs_vec:
2836    case INDEX_op_add_vec:
2837    case INDEX_op_and_vec:
2838    case INDEX_op_andc_vec:
2839    case INDEX_op_bitsel_vec:
2840    case INDEX_op_eqv_vec:
2841    case INDEX_op_nand_vec:
2842    case INDEX_op_neg_vec:
2843    case INDEX_op_nor_vec:
2844    case INDEX_op_not_vec:
2845    case INDEX_op_or_vec:
2846    case INDEX_op_orc_vec:
2847    case INDEX_op_rotli_vec:
2848    case INDEX_op_rotls_vec:
2849    case INDEX_op_rotlv_vec:
2850    case INDEX_op_sari_vec:
2851    case INDEX_op_sars_vec:
2852    case INDEX_op_sarv_vec:
2853    case INDEX_op_shli_vec:
2854    case INDEX_op_shls_vec:
2855    case INDEX_op_shlv_vec:
2856    case INDEX_op_shri_vec:
2857    case INDEX_op_shrs_vec:
2858    case INDEX_op_shrv_vec:
2859    case INDEX_op_smax_vec:
2860    case INDEX_op_smin_vec:
2861    case INDEX_op_sub_vec:
2862    case INDEX_op_umax_vec:
2863    case INDEX_op_umin_vec:
2864    case INDEX_op_xor_vec:
2865        return 1;
2866    case INDEX_op_cmp_vec:
2867    case INDEX_op_cmpsel_vec:
2868    case INDEX_op_rotrv_vec:
2869        return -1;
2870    case INDEX_op_mul_vec:
2871        return vece < MO_64;
2872    case INDEX_op_ssadd_vec:
2873    case INDEX_op_sssub_vec:
2874        return vece < MO_64 ? -1 : 0;
2875    default:
2876        return 0;
2877    }
2878}
2879
2880static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
2881                                 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2882{
2883    bool need_swap = false, need_inv = false;
2884
2885    switch (cond) {
2886    case TCG_COND_EQ:
2887    case TCG_COND_GT:
2888    case TCG_COND_GTU:
2889        break;
2890    case TCG_COND_NE:
2891    case TCG_COND_LE:
2892    case TCG_COND_LEU:
2893        need_inv = true;
2894        break;
2895    case TCG_COND_LT:
2896    case TCG_COND_LTU:
2897        need_swap = true;
2898        break;
2899    case TCG_COND_GE:
2900    case TCG_COND_GEU:
2901        need_swap = need_inv = true;
2902        break;
2903    default:
2904        g_assert_not_reached();
2905    }
2906
2907    if (need_inv) {
2908        cond = tcg_invert_cond(cond);
2909    }
2910    if (need_swap) {
2911        TCGv_vec t1;
2912        t1 = v1, v1 = v2, v2 = t1;
2913        cond = tcg_swap_cond(cond);
2914    }
2915
2916    vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
2917              tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
2918
2919    return need_inv;
2920}
2921
2922static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
2923                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2924{
2925    if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
2926        tcg_gen_not_vec(vece, v0, v0);
2927    }
2928}
2929
2930static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
2931                              TCGv_vec c1, TCGv_vec c2,
2932                              TCGv_vec v3, TCGv_vec v4, TCGCond cond)
2933{
2934    TCGv_vec t = tcg_temp_new_vec(type);
2935
2936    if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
2937        /* Invert the sense of the compare by swapping arguments.  */
2938        tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
2939    } else {
2940        tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
2941    }
2942    tcg_temp_free_vec(t);
2943}
2944
2945static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
2946                           TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
2947{
2948    TCGv_vec h1 = tcg_temp_new_vec(type);
2949    TCGv_vec h2 = tcg_temp_new_vec(type);
2950    TCGv_vec l1 = tcg_temp_new_vec(type);
2951    TCGv_vec l2 = tcg_temp_new_vec(type);
2952
2953    tcg_debug_assert (vece < MO_64);
2954
2955    /* Unpack with sign-extension. */
2956    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
2957              tcgv_vec_arg(h1), tcgv_vec_arg(v1));
2958    vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
2959              tcgv_vec_arg(h2), tcgv_vec_arg(v2));
2960
2961    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
2962              tcgv_vec_arg(l1), tcgv_vec_arg(v1));
2963    vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
2964              tcgv_vec_arg(l2), tcgv_vec_arg(v2));
2965
2966    /* Arithmetic on a wider element size. */
2967    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
2968              tcgv_vec_arg(h1), tcgv_vec_arg(h2));
2969    vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
2970              tcgv_vec_arg(l1), tcgv_vec_arg(l2));
2971
2972    /* Pack with saturation. */
2973    vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
2974              tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
2975
2976    tcg_temp_free_vec(h1);
2977    tcg_temp_free_vec(h2);
2978    tcg_temp_free_vec(l1);
2979    tcg_temp_free_vec(l2);
2980}
2981
2982void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2983                       TCGArg a0, ...)
2984{
2985    va_list va;
2986    TCGv_vec v0, v1, v2, v3, v4, t0;
2987
2988    va_start(va, a0);
2989    v0 = temp_tcgv_vec(arg_temp(a0));
2990    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2991    v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2992
2993    switch (opc) {
2994    case INDEX_op_cmp_vec:
2995        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
2996        break;
2997
2998    case INDEX_op_cmpsel_vec:
2999        v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3000        v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3001        expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3002        break;
3003
3004    case INDEX_op_rotrv_vec:
3005        t0 = tcg_temp_new_vec(type);
3006        tcg_gen_neg_vec(vece, t0, v2);
3007        tcg_gen_rotlv_vec(vece, v0, v1, t0);
3008        tcg_temp_free_vec(t0);
3009        break;
3010
3011    case INDEX_op_ssadd_vec:
3012        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3013        break;
3014    case INDEX_op_sssub_vec:
3015        expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3016        break;
3017
3018    default:
3019        g_assert_not_reached();
3020    }
3021    va_end(va);
3022}
3023
3024static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3025{
3026    switch (op) {
3027    case INDEX_op_goto_ptr:
3028        return C_O0_I1(r);
3029
3030    case INDEX_op_ld8u_i32:
3031    case INDEX_op_ld8u_i64:
3032    case INDEX_op_ld8s_i32:
3033    case INDEX_op_ld8s_i64:
3034    case INDEX_op_ld16u_i32:
3035    case INDEX_op_ld16u_i64:
3036    case INDEX_op_ld16s_i32:
3037    case INDEX_op_ld16s_i64:
3038    case INDEX_op_ld_i32:
3039    case INDEX_op_ld32u_i64:
3040    case INDEX_op_ld32s_i64:
3041    case INDEX_op_ld_i64:
3042        return C_O1_I1(r, r);
3043
3044    case INDEX_op_st8_i32:
3045    case INDEX_op_st8_i64:
3046    case INDEX_op_st16_i32:
3047    case INDEX_op_st16_i64:
3048    case INDEX_op_st_i32:
3049    case INDEX_op_st32_i64:
3050    case INDEX_op_st_i64:
3051        return C_O0_I2(r, r);
3052
3053    case INDEX_op_add_i32:
3054    case INDEX_op_add_i64:
3055    case INDEX_op_shl_i64:
3056    case INDEX_op_shr_i64:
3057    case INDEX_op_sar_i64:
3058    case INDEX_op_rotl_i32:
3059    case INDEX_op_rotl_i64:
3060    case INDEX_op_rotr_i32:
3061    case INDEX_op_rotr_i64:
3062    case INDEX_op_setcond_i32:
3063        return C_O1_I2(r, r, ri);
3064    case INDEX_op_setcond_i64:
3065        return C_O1_I2(r, r, rA);
3066
3067    case INDEX_op_clz_i64:
3068        return C_O1_I2(r, r, rI);
3069
3070    case INDEX_op_sub_i32:
3071    case INDEX_op_sub_i64:
3072    case INDEX_op_and_i32:
3073    case INDEX_op_or_i32:
3074    case INDEX_op_xor_i32:
3075        return C_O1_I2(r, r, ri);
3076    case INDEX_op_and_i64:
3077        return C_O1_I2(r, r, rNKR);
3078    case INDEX_op_or_i64:
3079    case INDEX_op_xor_i64:
3080        return C_O1_I2(r, r, rK);
3081
3082    case INDEX_op_andc_i32:
3083    case INDEX_op_orc_i32:
3084    case INDEX_op_eqv_i32:
3085        return C_O1_I2(r, r, ri);
3086    case INDEX_op_andc_i64:
3087        return C_O1_I2(r, r, rKR);
3088    case INDEX_op_orc_i64:
3089    case INDEX_op_eqv_i64:
3090        return C_O1_I2(r, r, rNK);
3091
3092    case INDEX_op_nand_i32:
3093    case INDEX_op_nand_i64:
3094    case INDEX_op_nor_i32:
3095    case INDEX_op_nor_i64:
3096        return C_O1_I2(r, r, r);
3097
3098    case INDEX_op_mul_i32:
3099        return (HAVE_FACILITY(MISC_INSN_EXT2)
3100                ? C_O1_I2(r, r, ri)
3101                : C_O1_I2(r, 0, ri));
3102    case INDEX_op_mul_i64:
3103        return (HAVE_FACILITY(MISC_INSN_EXT2)
3104                ? C_O1_I2(r, r, rJ)
3105                : C_O1_I2(r, 0, rJ));
3106
3107    case INDEX_op_shl_i32:
3108    case INDEX_op_shr_i32:
3109    case INDEX_op_sar_i32:
3110        return C_O1_I2(r, r, ri);
3111
3112    case INDEX_op_brcond_i32:
3113        return C_O0_I2(r, ri);
3114    case INDEX_op_brcond_i64:
3115        return C_O0_I2(r, rA);
3116
3117    case INDEX_op_bswap16_i32:
3118    case INDEX_op_bswap16_i64:
3119    case INDEX_op_bswap32_i32:
3120    case INDEX_op_bswap32_i64:
3121    case INDEX_op_bswap64_i64:
3122    case INDEX_op_neg_i32:
3123    case INDEX_op_neg_i64:
3124    case INDEX_op_not_i32:
3125    case INDEX_op_not_i64:
3126    case INDEX_op_ext8s_i32:
3127    case INDEX_op_ext8s_i64:
3128    case INDEX_op_ext8u_i32:
3129    case INDEX_op_ext8u_i64:
3130    case INDEX_op_ext16s_i32:
3131    case INDEX_op_ext16s_i64:
3132    case INDEX_op_ext16u_i32:
3133    case INDEX_op_ext16u_i64:
3134    case INDEX_op_ext32s_i64:
3135    case INDEX_op_ext32u_i64:
3136    case INDEX_op_ext_i32_i64:
3137    case INDEX_op_extu_i32_i64:
3138    case INDEX_op_extract_i32:
3139    case INDEX_op_extract_i64:
3140    case INDEX_op_ctpop_i32:
3141    case INDEX_op_ctpop_i64:
3142        return C_O1_I1(r, r);
3143
3144    case INDEX_op_qemu_ld_i32:
3145    case INDEX_op_qemu_ld_i64:
3146        return C_O1_I1(r, r);
3147    case INDEX_op_qemu_st_i64:
3148    case INDEX_op_qemu_st_i32:
3149        return C_O0_I2(r, r);
3150
3151    case INDEX_op_deposit_i32:
3152    case INDEX_op_deposit_i64:
3153        return C_O1_I2(r, rZ, r);
3154
3155    case INDEX_op_movcond_i32:
3156        return C_O1_I4(r, r, ri, rI, r);
3157    case INDEX_op_movcond_i64:
3158        return C_O1_I4(r, r, rA, rI, r);
3159
3160    case INDEX_op_div2_i32:
3161    case INDEX_op_div2_i64:
3162    case INDEX_op_divu2_i32:
3163    case INDEX_op_divu2_i64:
3164        return C_O2_I3(o, m, 0, 1, r);
3165
3166    case INDEX_op_mulu2_i64:
3167        return C_O2_I2(o, m, 0, r);
3168    case INDEX_op_muls2_i64:
3169        return C_O2_I2(o, m, r, r);
3170
3171    case INDEX_op_add2_i32:
3172    case INDEX_op_sub2_i32:
3173        return C_O2_I4(r, r, 0, 1, ri, r);
3174
3175    case INDEX_op_add2_i64:
3176    case INDEX_op_sub2_i64:
3177        return C_O2_I4(r, r, 0, 1, rA, r);
3178
3179    case INDEX_op_st_vec:
3180        return C_O0_I2(v, r);
3181    case INDEX_op_ld_vec:
3182    case INDEX_op_dupm_vec:
3183        return C_O1_I1(v, r);
3184    case INDEX_op_dup_vec:
3185        return C_O1_I1(v, vr);
3186    case INDEX_op_abs_vec:
3187    case INDEX_op_neg_vec:
3188    case INDEX_op_not_vec:
3189    case INDEX_op_rotli_vec:
3190    case INDEX_op_sari_vec:
3191    case INDEX_op_shli_vec:
3192    case INDEX_op_shri_vec:
3193    case INDEX_op_s390_vuph_vec:
3194    case INDEX_op_s390_vupl_vec:
3195        return C_O1_I1(v, v);
3196    case INDEX_op_add_vec:
3197    case INDEX_op_sub_vec:
3198    case INDEX_op_and_vec:
3199    case INDEX_op_andc_vec:
3200    case INDEX_op_or_vec:
3201    case INDEX_op_orc_vec:
3202    case INDEX_op_xor_vec:
3203    case INDEX_op_nand_vec:
3204    case INDEX_op_nor_vec:
3205    case INDEX_op_eqv_vec:
3206    case INDEX_op_cmp_vec:
3207    case INDEX_op_mul_vec:
3208    case INDEX_op_rotlv_vec:
3209    case INDEX_op_rotrv_vec:
3210    case INDEX_op_shlv_vec:
3211    case INDEX_op_shrv_vec:
3212    case INDEX_op_sarv_vec:
3213    case INDEX_op_smax_vec:
3214    case INDEX_op_smin_vec:
3215    case INDEX_op_umax_vec:
3216    case INDEX_op_umin_vec:
3217    case INDEX_op_s390_vpks_vec:
3218        return C_O1_I2(v, v, v);
3219    case INDEX_op_rotls_vec:
3220    case INDEX_op_shls_vec:
3221    case INDEX_op_shrs_vec:
3222    case INDEX_op_sars_vec:
3223        return C_O1_I2(v, v, r);
3224    case INDEX_op_bitsel_vec:
3225        return C_O1_I3(v, v, v, v);
3226
3227    default:
3228        g_assert_not_reached();
3229    }
3230}
3231
3232/*
3233 * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3234 * Some distros have fixed this up locally, others have not.
3235 */
3236#ifndef HWCAP_S390_VXRS
3237#define HWCAP_S390_VXRS 2048
3238#endif
3239
3240static void query_s390_facilities(void)
3241{
3242    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3243    const char *which;
3244
3245    /* Is STORE FACILITY LIST EXTENDED available?  Honestly, I believe this
3246       is present on all 64-bit systems, but let's check for it anyway.  */
3247    if (hwcap & HWCAP_S390_STFLE) {
3248        register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3249        register void *r1 __asm__("1") = s390_facilities;
3250
3251        /* stfle 0(%r1) */
3252        asm volatile(".word 0xb2b0,0x1000"
3253                     : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3254    }
3255
3256    /*
3257     * Use of vector registers requires os support beyond the facility bit.
3258     * If the kernel does not advertise support, disable the facility bits.
3259     * There is nothing else we currently care about in the 3rd word, so
3260     * disable VECTOR with one store.
3261     */
3262    if (!(hwcap & HWCAP_S390_VXRS)) {
3263        s390_facilities[2] = 0;
3264    }
3265
3266    /*
3267     * Minimum supported cpu revision is z196.
3268     * Check for all required facilities.
3269     * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3270     */
3271    if (!HAVE_FACILITY(LONG_DISP)) {
3272        which = "long-displacement";
3273        goto fail;
3274    }
3275    if (!HAVE_FACILITY(EXT_IMM)) {
3276        which = "extended-immediate";
3277        goto fail;
3278    }
3279    if (!HAVE_FACILITY(GEN_INST_EXT)) {
3280        which = "general-instructions-extension";
3281        goto fail;
3282    }
3283    /*
3284     * Facility 45 is a big bin that contains: distinct-operands,
3285     * fast-BCR-serialization, high-word, population-count,
3286     * interlocked-access-1, and load/store-on-condition-1
3287     */
3288    if (!HAVE_FACILITY(45)) {
3289        which = "45";
3290        goto fail;
3291    }
3292    return;
3293
3294 fail:
3295    error_report("%s: missing required facility %s", __func__, which);
3296    exit(EXIT_FAILURE);
3297}
3298
3299static void tcg_target_init(TCGContext *s)
3300{
3301    query_s390_facilities();
3302
3303    tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3304    tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3305    if (HAVE_FACILITY(VECTOR)) {
3306        tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3307        tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3308    }
3309
3310    tcg_target_call_clobber_regs = 0;
3311    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3312    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3313    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3314    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3315    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3316    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3317    /* The r6 register is technically call-saved, but it's also a parameter
3318       register, so it can get killed by setup for the qemu_st helper.  */
3319    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3320    /* The return register can be considered call-clobbered.  */
3321    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3322
3323    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3324    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3325    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3326    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3327    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3328    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3329    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3330    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3331    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3332    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3333    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3334    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3335    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3336    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3337    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3338    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3339    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3340    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3341    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3342    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3343    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3344    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3345    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3346    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3347
3348    s->reserved_regs = 0;
3349    tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3350    /* XXX many insns can't be used with R0, so we better avoid it for now */
3351    tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3352    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3353}
3354
3355#define FRAME_SIZE  ((int)(TCG_TARGET_CALL_STACK_OFFSET          \
3356                           + TCG_STATIC_CALL_ARGS_SIZE           \
3357                           + CPU_TEMP_BUF_NLONGS * sizeof(long)))
3358
3359static void tcg_target_qemu_prologue(TCGContext *s)
3360{
3361    /* stmg %r6,%r15,48(%r15) (save registers) */
3362    tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3363
3364    /* aghi %r15,-frame_size */
3365    tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3366
3367    tcg_set_frame(s, TCG_REG_CALL_STACK,
3368                  TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3369                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3370
3371#ifndef CONFIG_SOFTMMU
3372    if (guest_base >= 0x80000) {
3373        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3374        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3375    }
3376#endif
3377
3378    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3379
3380    /* br %r3 (go to TB) */
3381    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3382
3383    /*
3384     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3385     * and fall through to the rest of the epilogue.
3386     */
3387    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3388    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3389
3390    /* TB epilogue */
3391    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3392
3393    /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3394    tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3395                 FRAME_SIZE + 48);
3396
3397    /* br %r14 (return) */
3398    tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3399}
3400
3401static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3402{
3403    memset(p, 0x07, count * sizeof(tcg_insn_unit));
3404}
3405
3406typedef struct {
3407    DebugFrameHeader h;
3408    uint8_t fde_def_cfa[4];
3409    uint8_t fde_reg_ofs[18];
3410} DebugFrame;
3411
3412/* We're expecting a 2 byte uleb128 encoded value.  */
3413QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3414
3415#define ELF_HOST_MACHINE  EM_S390
3416
3417static const DebugFrame debug_frame = {
3418    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3419    .h.cie.id = -1,
3420    .h.cie.version = 1,
3421    .h.cie.code_align = 1,
3422    .h.cie.data_align = 8,                /* sleb128 8 */
3423    .h.cie.return_column = TCG_REG_R14,
3424
3425    /* Total FDE size does not include the "len" member.  */
3426    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3427
3428    .fde_def_cfa = {
3429        12, TCG_REG_CALL_STACK,         /* DW_CFA_def_cfa %r15, ... */
3430        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3431        (FRAME_SIZE >> 7)
3432    },
3433    .fde_reg_ofs = {
3434        0x86, 6,                        /* DW_CFA_offset, %r6, 48 */
3435        0x87, 7,                        /* DW_CFA_offset, %r7, 56 */
3436        0x88, 8,                        /* DW_CFA_offset, %r8, 64 */
3437        0x89, 9,                        /* DW_CFA_offset, %r92, 72 */
3438        0x8a, 10,                       /* DW_CFA_offset, %r10, 80 */
3439        0x8b, 11,                       /* DW_CFA_offset, %r11, 88 */
3440        0x8c, 12,                       /* DW_CFA_offset, %r12, 96 */
3441        0x8d, 13,                       /* DW_CFA_offset, %r13, 104 */
3442        0x8e, 14,                       /* DW_CFA_offset, %r14, 112 */
3443    }
3444};
3445
3446void tcg_register_jit(const void *buf, size_t buf_size)
3447{
3448    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3449}
3450