1/*
2 * RISC-V translation routines for the T-Head vendor extensions (xthead*).
3 *
4 * Copyright (c) 2022 VRULL GmbH.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#define REQUIRE_XTHEADBA(ctx) do {               \
20    if (!ctx->cfg_ptr->ext_xtheadba) {           \
21        return false;                            \
22    }                                            \
23} while (0)
24
25#define REQUIRE_XTHEADBB(ctx) do {               \
26    if (!ctx->cfg_ptr->ext_xtheadbb) {           \
27        return false;                            \
28    }                                            \
29} while (0)
30
31#define REQUIRE_XTHEADBS(ctx) do {               \
32    if (!ctx->cfg_ptr->ext_xtheadbs) {           \
33        return false;                            \
34    }                                            \
35} while (0)
36
37#define REQUIRE_XTHEADCMO(ctx) do {              \
38    if (!ctx->cfg_ptr->ext_xtheadcmo) {          \
39        return false;                            \
40    }                                            \
41} while (0)
42
43#define REQUIRE_XTHEADCONDMOV(ctx) do {          \
44    if (!ctx->cfg_ptr->ext_xtheadcondmov) {      \
45        return false;                            \
46    }                                            \
47} while (0)
48
49#define REQUIRE_XTHEADFMEMIDX(ctx) do {          \
50    if (!ctx->cfg_ptr->ext_xtheadfmemidx) {      \
51        return false;                            \
52    }                                            \
53} while (0)
54
55#define REQUIRE_XTHEADFMV(ctx) do {              \
56    if (!ctx->cfg_ptr->ext_xtheadfmv) {          \
57        return false;                            \
58    }                                            \
59} while (0)
60
61#define REQUIRE_XTHEADMAC(ctx) do {              \
62    if (!ctx->cfg_ptr->ext_xtheadmac) {          \
63        return false;                            \
64    }                                            \
65} while (0)
66
67#define REQUIRE_XTHEADMEMIDX(ctx) do {           \
68    if (!ctx->cfg_ptr->ext_xtheadmemidx) {       \
69        return false;                            \
70    }                                            \
71} while (0)
72
73#define REQUIRE_XTHEADMEMPAIR(ctx) do {          \
74    if (!ctx->cfg_ptr->ext_xtheadmempair) {      \
75        return false;                            \
76    }                                            \
77} while (0)
78
79#define REQUIRE_XTHEADSYNC(ctx) do {             \
80    if (!ctx->cfg_ptr->ext_xtheadsync) {         \
81        return false;                            \
82    }                                            \
83} while (0)
84
85/*
86 * Calculate and return the address for indexed mem operations:
87 * If !zext_offs, then the address is rs1 + (rs2 << imm2).
88 * If  zext_offs, then the address is rs1 + (zext(rs2[31:0]) << imm2).
89 */
90static TCGv get_th_address_indexed(DisasContext *ctx, int rs1, int rs2,
91                                   int imm2, bool zext_offs)
92{
93    TCGv src2 = get_gpr(ctx, rs2, EXT_NONE);
94    TCGv offs = tcg_temp_new();
95
96    if (zext_offs) {
97        tcg_gen_extract_tl(offs, src2, 0, 32);
98        tcg_gen_shli_tl(offs, offs, imm2);
99    } else {
100        tcg_gen_shli_tl(offs, src2, imm2);
101    }
102
103    TCGv addr = get_address_indexed(ctx, rs1, offs);
104
105    tcg_temp_free(offs);
106    return addr;
107}
108
109/* XTheadBa */
110
111/*
112 * th.addsl is similar to sh[123]add (from Zba), but not an
113 * alternative encoding: while sh[123] applies the shift to rs1,
114 * th.addsl shifts rs2.
115 */
116
117#define GEN_TH_ADDSL(SHAMT)                                     \
118static void gen_th_addsl##SHAMT(TCGv ret, TCGv arg1, TCGv arg2) \
119{                                                               \
120    TCGv t = tcg_temp_new();                                    \
121    tcg_gen_shli_tl(t, arg2, SHAMT);                            \
122    tcg_gen_add_tl(ret, t, arg1);                               \
123    tcg_temp_free(t);                                           \
124}
125
126GEN_TH_ADDSL(1)
127GEN_TH_ADDSL(2)
128GEN_TH_ADDSL(3)
129
130#define GEN_TRANS_TH_ADDSL(SHAMT)                                       \
131static bool trans_th_addsl##SHAMT(DisasContext *ctx,                    \
132                                  arg_th_addsl##SHAMT * a)              \
133{                                                                       \
134    REQUIRE_XTHEADBA(ctx);                                              \
135    return gen_arith(ctx, a, EXT_NONE, gen_th_addsl##SHAMT, NULL);      \
136}
137
138GEN_TRANS_TH_ADDSL(1)
139GEN_TRANS_TH_ADDSL(2)
140GEN_TRANS_TH_ADDSL(3)
141
142/* XTheadBb */
143
144/* th.srri is an alternate encoding for rori (from Zbb) */
145static bool trans_th_srri(DisasContext *ctx, arg_th_srri * a)
146{
147    REQUIRE_XTHEADBB(ctx);
148    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
149                                   tcg_gen_rotri_tl, gen_roriw, NULL);
150}
151
152/* th.srriw is an alternate encoding for roriw (from Zbb) */
153static bool trans_th_srriw(DisasContext *ctx, arg_th_srriw *a)
154{
155    REQUIRE_XTHEADBB(ctx);
156    REQUIRE_64BIT(ctx);
157    ctx->ol = MXL_RV32;
158    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw, NULL);
159}
160
161/* th.ext and th.extu perform signed/unsigned bitfield extraction */
162static bool gen_th_bfextract(DisasContext *ctx, arg_th_bfext *a,
163                             void (*f)(TCGv, TCGv, unsigned int, unsigned int))
164{
165    TCGv dest = dest_gpr(ctx, a->rd);
166    TCGv source = get_gpr(ctx, a->rs1, EXT_ZERO);
167
168    if (a->lsb <= a->msb) {
169        f(dest, source, a->lsb, a->msb - a->lsb + 1);
170        gen_set_gpr(ctx, a->rd, dest);
171    }
172    return true;
173}
174
175static bool trans_th_ext(DisasContext *ctx, arg_th_ext *a)
176{
177    REQUIRE_XTHEADBB(ctx);
178    return gen_th_bfextract(ctx, a, tcg_gen_sextract_tl);
179}
180
181static bool trans_th_extu(DisasContext *ctx, arg_th_extu *a)
182{
183    REQUIRE_XTHEADBB(ctx);
184    return gen_th_bfextract(ctx, a, tcg_gen_extract_tl);
185}
186
187/* th.ff0: find first zero (clz on an inverted input) */
188static bool gen_th_ff0(DisasContext *ctx, arg_th_ff0 *a, DisasExtend ext)
189{
190    TCGv dest = dest_gpr(ctx, a->rd);
191    TCGv src1 = get_gpr(ctx, a->rs1, ext);
192
193    int olen = get_olen(ctx);
194    TCGv t = tcg_temp_new();
195
196    tcg_gen_not_tl(t, src1);
197    if (olen != TARGET_LONG_BITS) {
198        if (olen == 32) {
199            gen_clzw(dest, t);
200        } else {
201            g_assert_not_reached();
202        }
203    } else {
204        gen_clz(dest, t);
205    }
206
207    tcg_temp_free(t);
208    gen_set_gpr(ctx, a->rd, dest);
209
210    return true;
211}
212
213static bool trans_th_ff0(DisasContext *ctx, arg_th_ff0 *a)
214{
215    REQUIRE_XTHEADBB(ctx);
216    return gen_th_ff0(ctx, a, EXT_NONE);
217}
218
219/* th.ff1 is an alternate encoding for clz (from Zbb) */
220static bool trans_th_ff1(DisasContext *ctx, arg_th_ff1 *a)
221{
222    REQUIRE_XTHEADBB(ctx);
223    return gen_unary_per_ol(ctx, a, EXT_NONE, gen_clz, gen_clzw);
224}
225
226static void gen_th_revw(TCGv ret, TCGv arg1)
227{
228    tcg_gen_bswap32_tl(ret, arg1, TCG_BSWAP_OS);
229}
230
231/* th.rev is an alternate encoding for the RV64 rev8 (from Zbb) */
232static bool trans_th_rev(DisasContext *ctx, arg_th_rev *a)
233{
234    REQUIRE_XTHEADBB(ctx);
235
236    return gen_unary_per_ol(ctx, a, EXT_NONE, tcg_gen_bswap_tl, gen_th_revw);
237}
238
239/* th.revw is a sign-extended byte-swap of the lower word */
240static bool trans_th_revw(DisasContext *ctx, arg_th_revw *a)
241{
242    REQUIRE_XTHEADBB(ctx);
243    REQUIRE_64BIT(ctx);
244    return gen_unary(ctx, a, EXT_NONE, gen_th_revw);
245}
246
247/* th.tstnbz is equivalent to an orc.b (from Zbb) with inverted result */
248static void gen_th_tstnbz(TCGv ret, TCGv source1)
249{
250    gen_orc_b(ret, source1);
251    tcg_gen_not_tl(ret, ret);
252}
253
254static bool trans_th_tstnbz(DisasContext *ctx, arg_th_tstnbz *a)
255{
256    REQUIRE_XTHEADBB(ctx);
257    return gen_unary(ctx, a, EXT_ZERO, gen_th_tstnbz);
258}
259
260/* XTheadBs */
261
262/* th.tst is an alternate encoding for bexti (from Zbs) */
263static bool trans_th_tst(DisasContext *ctx, arg_th_tst *a)
264{
265    REQUIRE_XTHEADBS(ctx);
266    return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext);
267}
268
269/* XTheadCmo */
270
271static inline int priv_level(DisasContext *ctx)
272{
273#ifdef CONFIG_USER_ONLY
274    return PRV_U;
275#else
276     /* Priv level is part of mem_idx. */
277    return ctx->mem_idx & TB_FLAGS_PRIV_MMU_MASK;
278#endif
279}
280
281/* Test if priv level is M, S, or U (cannot fail). */
282#define REQUIRE_PRIV_MSU(ctx)
283
284/* Test if priv level is M or S. */
285#define REQUIRE_PRIV_MS(ctx)                                    \
286do {                                                            \
287    int priv = priv_level(ctx);                                 \
288    if (!(priv == PRV_M ||                                      \
289          priv == PRV_S)) {                                     \
290        return false;                                           \
291    }                                                           \
292} while (0)
293
294#define NOP_PRIVCHECK(insn, extcheck, privcheck)                \
295static bool trans_ ## insn(DisasContext *ctx, arg_ ## insn * a) \
296{                                                               \
297    (void) a;                                                   \
298    extcheck(ctx);                                              \
299    privcheck(ctx);                                             \
300    return true;                                                \
301}
302
303NOP_PRIVCHECK(th_dcache_call, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
304NOP_PRIVCHECK(th_dcache_ciall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
305NOP_PRIVCHECK(th_dcache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
306NOP_PRIVCHECK(th_dcache_cpa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
307NOP_PRIVCHECK(th_dcache_cipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
308NOP_PRIVCHECK(th_dcache_ipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
309NOP_PRIVCHECK(th_dcache_cva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
310NOP_PRIVCHECK(th_dcache_civa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
311NOP_PRIVCHECK(th_dcache_iva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
312NOP_PRIVCHECK(th_dcache_csw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
313NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
314NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
315NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
316NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
317
318NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
319NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
320NOP_PRIVCHECK(th_icache_ipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
321NOP_PRIVCHECK(th_icache_iva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
322
323NOP_PRIVCHECK(th_l2cache_call, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
324NOP_PRIVCHECK(th_l2cache_ciall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
325NOP_PRIVCHECK(th_l2cache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
326
327/* XTheadCondMov */
328
329static bool gen_th_condmove(DisasContext *ctx, arg_r *a, TCGCond cond)
330{
331    TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
332    TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
333    TCGv old = get_gpr(ctx, a->rd, EXT_NONE);
334    TCGv dest = dest_gpr(ctx, a->rd);
335
336    tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, old);
337
338    gen_set_gpr(ctx, a->rd, dest);
339    return true;
340}
341
342/* th.mveqz: "if (rs2 == 0) rd = rs1;" */
343static bool trans_th_mveqz(DisasContext *ctx, arg_th_mveqz *a)
344{
345    REQUIRE_XTHEADCONDMOV(ctx);
346    return gen_th_condmove(ctx, a, TCG_COND_EQ);
347}
348
349/* th.mvnez: "if (rs2 != 0) rd = rs1;" */
350static bool trans_th_mvnez(DisasContext *ctx, arg_th_mveqz *a)
351{
352    REQUIRE_XTHEADCONDMOV(ctx);
353    return gen_th_condmove(ctx, a, TCG_COND_NE);
354}
355
356/* XTheadFMem */
357
358/*
359 * Load 64-bit float from indexed address.
360 * If !zext_offs, then address is rs1 + (rs2 << imm2).
361 * If  zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
362 */
363static bool gen_fload_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
364                          bool zext_offs)
365{
366    TCGv_i64 rd = cpu_fpr[a->rd];
367    TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
368
369    tcg_gen_qemu_ld_i64(rd, addr, ctx->mem_idx, memop);
370    if ((memop & MO_SIZE) == MO_32) {
371        gen_nanbox_s(rd, rd);
372    }
373
374    mark_fs_dirty(ctx);
375    return true;
376}
377
378/*
379 * Store 64-bit float to indexed address.
380 * If !zext_offs, then address is rs1 + (rs2 << imm2).
381 * If  zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
382 */
383static bool gen_fstore_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
384                           bool zext_offs)
385{
386    TCGv_i64 rd = cpu_fpr[a->rd];
387    TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
388
389    tcg_gen_qemu_st_i64(rd, addr, ctx->mem_idx, memop);
390
391    return true;
392}
393
394static bool trans_th_flrd(DisasContext *ctx, arg_th_memidx *a)
395{
396    REQUIRE_XTHEADFMEMIDX(ctx);
397    REQUIRE_FPU;
398    REQUIRE_EXT(ctx, RVD);
399    return gen_fload_idx(ctx, a, MO_TEUQ, false);
400}
401
402static bool trans_th_flrw(DisasContext *ctx, arg_th_memidx *a)
403{
404    REQUIRE_XTHEADFMEMIDX(ctx);
405    REQUIRE_FPU;
406    REQUIRE_EXT(ctx, RVF);
407    return gen_fload_idx(ctx, a, MO_TEUL, false);
408}
409
410static bool trans_th_flurd(DisasContext *ctx, arg_th_memidx *a)
411{
412    REQUIRE_XTHEADFMEMIDX(ctx);
413    REQUIRE_FPU;
414    REQUIRE_EXT(ctx, RVD);
415    return gen_fload_idx(ctx, a, MO_TEUQ, true);
416}
417
418static bool trans_th_flurw(DisasContext *ctx, arg_th_memidx *a)
419{
420    REQUIRE_XTHEADFMEMIDX(ctx);
421    REQUIRE_FPU;
422    REQUIRE_EXT(ctx, RVF);
423    return gen_fload_idx(ctx, a, MO_TEUL, true);
424}
425
426static bool trans_th_fsrd(DisasContext *ctx, arg_th_memidx *a)
427{
428    REQUIRE_XTHEADFMEMIDX(ctx);
429    REQUIRE_FPU;
430    REQUIRE_EXT(ctx, RVD);
431    return gen_fstore_idx(ctx, a, MO_TEUQ, false);
432}
433
434static bool trans_th_fsrw(DisasContext *ctx, arg_th_memidx *a)
435{
436    REQUIRE_XTHEADFMEMIDX(ctx);
437    REQUIRE_FPU;
438    REQUIRE_EXT(ctx, RVF);
439    return gen_fstore_idx(ctx, a, MO_TEUL, false);
440}
441
442static bool trans_th_fsurd(DisasContext *ctx, arg_th_memidx *a)
443{
444    REQUIRE_XTHEADFMEMIDX(ctx);
445    REQUIRE_FPU;
446    REQUIRE_EXT(ctx, RVD);
447    return gen_fstore_idx(ctx, a, MO_TEUQ, true);
448}
449
450static bool trans_th_fsurw(DisasContext *ctx, arg_th_memidx *a)
451{
452    REQUIRE_XTHEADFMEMIDX(ctx);
453    REQUIRE_FPU;
454    REQUIRE_EXT(ctx, RVF);
455    return gen_fstore_idx(ctx, a, MO_TEUL, true);
456}
457
458/* XTheadFmv */
459
460static bool trans_th_fmv_hw_x(DisasContext *ctx, arg_th_fmv_hw_x *a)
461{
462    REQUIRE_XTHEADFMV(ctx);
463    REQUIRE_32BIT(ctx);
464    REQUIRE_FPU;
465    REQUIRE_EXT(ctx, RVD);
466
467    TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
468    TCGv_i64 t1 = tcg_temp_new_i64();
469
470    tcg_gen_extu_tl_i64(t1, src1);
471    tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], t1, 32, 32);
472    tcg_temp_free_i64(t1);
473    mark_fs_dirty(ctx);
474    return true;
475}
476
477static bool trans_th_fmv_x_hw(DisasContext *ctx, arg_th_fmv_x_hw *a)
478{
479    REQUIRE_XTHEADFMV(ctx);
480    REQUIRE_32BIT(ctx);
481    REQUIRE_FPU;
482    REQUIRE_EXT(ctx, RVD);
483    TCGv dst;
484    TCGv_i64 t1;
485
486    dst = dest_gpr(ctx, a->rd);
487    t1 = tcg_temp_new_i64();
488
489    tcg_gen_extract_i64(t1, cpu_fpr[a->rs1], 32, 32);
490    tcg_gen_trunc_i64_tl(dst, t1);
491    gen_set_gpr(ctx, a->rd, dst);
492    tcg_temp_free_i64(t1);
493    mark_fs_dirty(ctx);
494    return true;
495}
496
497/* XTheadMac */
498
499static bool gen_th_mac(DisasContext *ctx, arg_r *a,
500                       void (*accumulate_func)(TCGv, TCGv, TCGv),
501                       void (*extend_operand_func)(TCGv, TCGv))
502{
503    TCGv dest = dest_gpr(ctx, a->rd);
504    TCGv src0 = get_gpr(ctx, a->rd, EXT_NONE);
505    TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
506    TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
507    TCGv tmp = tcg_temp_new();
508
509    if (extend_operand_func) {
510        TCGv tmp2 = tcg_temp_new();
511        extend_operand_func(tmp, src1);
512        extend_operand_func(tmp2, src2);
513        tcg_gen_mul_tl(tmp, tmp, tmp2);
514        tcg_temp_free(tmp2);
515    } else {
516        tcg_gen_mul_tl(tmp, src1, src2);
517    }
518
519    accumulate_func(dest, src0, tmp);
520    gen_set_gpr(ctx, a->rd, dest);
521    tcg_temp_free(tmp);
522
523    return true;
524}
525
526/* th.mula: "rd = rd + rs1 * rs2" */
527static bool trans_th_mula(DisasContext *ctx, arg_th_mula *a)
528{
529    REQUIRE_XTHEADMAC(ctx);
530    return gen_th_mac(ctx, a, tcg_gen_add_tl, NULL);
531}
532
533/* th.mulah: "rd = sext.w(rd + sext.w(rs1[15:0]) * sext.w(rs2[15:0]))" */
534static bool trans_th_mulah(DisasContext *ctx, arg_th_mulah *a)
535{
536    REQUIRE_XTHEADMAC(ctx);
537    ctx->ol = MXL_RV32;
538    return gen_th_mac(ctx, a, tcg_gen_add_tl, tcg_gen_ext16s_tl);
539}
540
541/* th.mulaw: "rd = sext.w(rd + rs1 * rs2)" */
542static bool trans_th_mulaw(DisasContext *ctx, arg_th_mulaw *a)
543{
544    REQUIRE_XTHEADMAC(ctx);
545    REQUIRE_64BIT(ctx);
546    ctx->ol = MXL_RV32;
547    return gen_th_mac(ctx, a, tcg_gen_add_tl, NULL);
548}
549
550/* th.muls: "rd = rd - rs1 * rs2" */
551static bool trans_th_muls(DisasContext *ctx, arg_th_muls *a)
552{
553    REQUIRE_XTHEADMAC(ctx);
554    return gen_th_mac(ctx, a, tcg_gen_sub_tl, NULL);
555}
556
557/* th.mulsh: "rd = sext.w(rd - sext.w(rs1[15:0]) * sext.w(rs2[15:0]))" */
558static bool trans_th_mulsh(DisasContext *ctx, arg_th_mulsh *a)
559{
560    REQUIRE_XTHEADMAC(ctx);
561    ctx->ol = MXL_RV32;
562    return gen_th_mac(ctx, a, tcg_gen_sub_tl, tcg_gen_ext16s_tl);
563}
564
565/* th.mulsw: "rd = sext.w(rd - rs1 * rs2)" */
566static bool trans_th_mulsw(DisasContext *ctx, arg_th_mulsw *a)
567{
568    REQUIRE_XTHEADMAC(ctx);
569    REQUIRE_64BIT(ctx);
570    ctx->ol = MXL_RV32;
571    return gen_th_mac(ctx, a, tcg_gen_sub_tl, NULL);
572}
573
574/* XTheadMemIdx */
575
576/*
577 * Load with memop from indexed address and add (imm5 << imm2) to rs1.
578 * If !preinc, then the load address is rs1.
579 * If  preinc, then the load address is rs1 + (imm5) << imm2).
580 */
581static bool gen_load_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
582                         bool preinc)
583{
584    if (a->rs1 == a->rd) {
585        return false;
586    }
587
588    int imm = a->imm5 << a->imm2;
589    TCGv addr = get_address(ctx, a->rs1, preinc ? imm : 0);
590    TCGv rd = dest_gpr(ctx, a->rd);
591    TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
592
593    tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
594    tcg_gen_addi_tl(rs1, rs1, imm);
595    gen_set_gpr(ctx, a->rd, rd);
596    gen_set_gpr(ctx, a->rs1, rs1);
597
598    tcg_temp_free(addr);
599    return true;
600}
601
602/*
603 * Store with memop to indexed address and add (imm5 << imm2) to rs1.
604 * If !preinc, then the store address is rs1.
605 * If  preinc, then the store address is rs1 + (imm5) << imm2).
606 */
607static bool gen_store_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
608                          bool preinc)
609{
610    int imm = a->imm5 << a->imm2;
611    TCGv addr = get_address(ctx, a->rs1, preinc ? imm : 0);
612    TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
613    TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
614
615    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
616    tcg_gen_addi_tl(rs1, rs1, imm);
617    gen_set_gpr(ctx, a->rs1, rs1);
618
619    tcg_temp_free(addr);
620    return true;
621}
622
623static bool trans_th_ldia(DisasContext *ctx, arg_th_meminc *a)
624{
625    REQUIRE_XTHEADMEMIDX(ctx);
626    REQUIRE_64BIT(ctx);
627    return gen_load_inc(ctx, a, MO_TESQ, false);
628}
629
630static bool trans_th_ldib(DisasContext *ctx, arg_th_meminc *a)
631{
632    REQUIRE_XTHEADMEMIDX(ctx);
633    REQUIRE_64BIT(ctx);
634    return gen_load_inc(ctx, a, MO_TESQ, true);
635}
636
637static bool trans_th_lwia(DisasContext *ctx, arg_th_meminc *a)
638{
639    REQUIRE_XTHEADMEMIDX(ctx);
640    return gen_load_inc(ctx, a, MO_TESL, false);
641}
642
643static bool trans_th_lwib(DisasContext *ctx, arg_th_meminc *a)
644{
645    REQUIRE_XTHEADMEMIDX(ctx);
646    return gen_load_inc(ctx, a, MO_TESL, true);
647}
648
649static bool trans_th_lwuia(DisasContext *ctx, arg_th_meminc *a)
650{
651    REQUIRE_XTHEADMEMIDX(ctx);
652    REQUIRE_64BIT(ctx);
653    return gen_load_inc(ctx, a, MO_TEUL, false);
654}
655
656static bool trans_th_lwuib(DisasContext *ctx, arg_th_meminc *a)
657{
658    REQUIRE_XTHEADMEMIDX(ctx);
659    REQUIRE_64BIT(ctx);
660    return gen_load_inc(ctx, a, MO_TEUL, true);
661}
662
663static bool trans_th_lhia(DisasContext *ctx, arg_th_meminc *a)
664{
665    REQUIRE_XTHEADMEMIDX(ctx);
666    return gen_load_inc(ctx, a, MO_TESW, false);
667}
668
669static bool trans_th_lhib(DisasContext *ctx, arg_th_meminc *a)
670{
671    REQUIRE_XTHEADMEMIDX(ctx);
672    return gen_load_inc(ctx, a, MO_TESW, true);
673}
674
675static bool trans_th_lhuia(DisasContext *ctx, arg_th_meminc *a)
676{
677    REQUIRE_XTHEADMEMIDX(ctx);
678    return gen_load_inc(ctx, a, MO_TEUW, false);
679}
680
681static bool trans_th_lhuib(DisasContext *ctx, arg_th_meminc *a)
682{
683    REQUIRE_XTHEADMEMIDX(ctx);
684    return gen_load_inc(ctx, a, MO_TEUW, true);
685}
686
687static bool trans_th_lbia(DisasContext *ctx, arg_th_meminc *a)
688{
689    REQUIRE_XTHEADMEMIDX(ctx);
690    return gen_load_inc(ctx, a, MO_SB, false);
691}
692
693static bool trans_th_lbib(DisasContext *ctx, arg_th_meminc *a)
694{
695    REQUIRE_XTHEADMEMIDX(ctx);
696    return gen_load_inc(ctx, a, MO_SB, true);
697}
698
699static bool trans_th_lbuia(DisasContext *ctx, arg_th_meminc *a)
700{
701    REQUIRE_XTHEADMEMIDX(ctx);
702    return gen_load_inc(ctx, a, MO_UB, false);
703}
704
705static bool trans_th_lbuib(DisasContext *ctx, arg_th_meminc *a)
706{
707    REQUIRE_XTHEADMEMIDX(ctx);
708    return gen_load_inc(ctx, a, MO_UB, true);
709}
710
711static bool trans_th_sdia(DisasContext *ctx, arg_th_meminc *a)
712{
713    REQUIRE_XTHEADMEMIDX(ctx);
714    REQUIRE_64BIT(ctx);
715    return gen_store_inc(ctx, a, MO_TESQ, false);
716}
717
718static bool trans_th_sdib(DisasContext *ctx, arg_th_meminc *a)
719{
720    REQUIRE_XTHEADMEMIDX(ctx);
721    REQUIRE_64BIT(ctx);
722    return gen_store_inc(ctx, a, MO_TESQ, true);
723}
724
725static bool trans_th_swia(DisasContext *ctx, arg_th_meminc *a)
726{
727    REQUIRE_XTHEADMEMIDX(ctx);
728    return gen_store_inc(ctx, a, MO_TESL, false);
729}
730
731static bool trans_th_swib(DisasContext *ctx, arg_th_meminc *a)
732{
733    REQUIRE_XTHEADMEMIDX(ctx);
734    return gen_store_inc(ctx, a, MO_TESL, true);
735}
736
737static bool trans_th_shia(DisasContext *ctx, arg_th_meminc *a)
738{
739    REQUIRE_XTHEADMEMIDX(ctx);
740    return gen_store_inc(ctx, a, MO_TESW, false);
741}
742
743static bool trans_th_shib(DisasContext *ctx, arg_th_meminc *a)
744{
745    REQUIRE_XTHEADMEMIDX(ctx);
746    return gen_store_inc(ctx, a, MO_TESW, true);
747}
748
749static bool trans_th_sbia(DisasContext *ctx, arg_th_meminc *a)
750{
751    REQUIRE_XTHEADMEMIDX(ctx);
752    return gen_store_inc(ctx, a, MO_SB, false);
753}
754
755static bool trans_th_sbib(DisasContext *ctx, arg_th_meminc *a)
756{
757    REQUIRE_XTHEADMEMIDX(ctx);
758    return gen_store_inc(ctx, a, MO_SB, true);
759}
760
761/*
762 * Load with memop from indexed address.
763 * If !zext_offs, then address is rs1 + (rs2 << imm2).
764 * If  zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
765 */
766static bool gen_load_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
767                         bool zext_offs)
768{
769    TCGv rd = dest_gpr(ctx, a->rd);
770    TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
771
772    tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
773    gen_set_gpr(ctx, a->rd, rd);
774
775    return true;
776}
777
778/*
779 * Store with memop to indexed address.
780 * If !zext_offs, then address is rs1 + (rs2 << imm2).
781 * If  zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
782 */
783static bool gen_store_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
784                          bool zext_offs)
785{
786    TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
787    TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
788
789    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
790
791    return true;
792}
793
794static bool trans_th_lrd(DisasContext *ctx, arg_th_memidx *a)
795{
796    REQUIRE_XTHEADMEMIDX(ctx);
797    REQUIRE_64BIT(ctx);
798    return gen_load_idx(ctx, a, MO_TESQ, false);
799}
800
801static bool trans_th_lrw(DisasContext *ctx, arg_th_memidx *a)
802{
803    REQUIRE_XTHEADMEMIDX(ctx);
804    return gen_load_idx(ctx, a, MO_TESL, false);
805}
806
807static bool trans_th_lrwu(DisasContext *ctx, arg_th_memidx *a)
808{
809    REQUIRE_XTHEADMEMIDX(ctx);
810    REQUIRE_64BIT(ctx);
811    return gen_load_idx(ctx, a, MO_TEUL, false);
812}
813
814static bool trans_th_lrh(DisasContext *ctx, arg_th_memidx *a)
815{
816    REQUIRE_XTHEADMEMIDX(ctx);
817    return gen_load_idx(ctx, a, MO_TESW, false);
818}
819
820static bool trans_th_lrhu(DisasContext *ctx, arg_th_memidx *a)
821{
822    REQUIRE_XTHEADMEMIDX(ctx);
823    return gen_load_idx(ctx, a, MO_TEUW, false);
824}
825
826static bool trans_th_lrb(DisasContext *ctx, arg_th_memidx *a)
827{
828    REQUIRE_XTHEADMEMIDX(ctx);
829    return gen_load_idx(ctx, a, MO_SB, false);
830}
831
832static bool trans_th_lrbu(DisasContext *ctx, arg_th_memidx *a)
833{
834    REQUIRE_XTHEADMEMIDX(ctx);
835    return gen_load_idx(ctx, a, MO_UB, false);
836}
837
838static bool trans_th_srd(DisasContext *ctx, arg_th_memidx *a)
839{
840    REQUIRE_XTHEADMEMIDX(ctx);
841    REQUIRE_64BIT(ctx);
842    return gen_store_idx(ctx, a, MO_TESQ, false);
843}
844
845static bool trans_th_srw(DisasContext *ctx, arg_th_memidx *a)
846{
847    REQUIRE_XTHEADMEMIDX(ctx);
848    return gen_store_idx(ctx, a, MO_TESL, false);
849}
850
851static bool trans_th_srh(DisasContext *ctx, arg_th_memidx *a)
852{
853    REQUIRE_XTHEADMEMIDX(ctx);
854    return gen_store_idx(ctx, a, MO_TESW, false);
855}
856
857static bool trans_th_srb(DisasContext *ctx, arg_th_memidx *a)
858{
859    REQUIRE_XTHEADMEMIDX(ctx);
860    return gen_store_idx(ctx, a, MO_SB, false);
861}
862static bool trans_th_lurd(DisasContext *ctx, arg_th_memidx *a)
863{
864    REQUIRE_XTHEADMEMIDX(ctx);
865    REQUIRE_64BIT(ctx);
866    return gen_load_idx(ctx, a, MO_TESQ, true);
867}
868
869static bool trans_th_lurw(DisasContext *ctx, arg_th_memidx *a)
870{
871    REQUIRE_XTHEADMEMIDX(ctx);
872    return gen_load_idx(ctx, a, MO_TESL, true);
873}
874
875static bool trans_th_lurwu(DisasContext *ctx, arg_th_memidx *a)
876{
877    REQUIRE_XTHEADMEMIDX(ctx);
878    REQUIRE_64BIT(ctx);
879    return gen_load_idx(ctx, a, MO_TEUL, true);
880}
881
882static bool trans_th_lurh(DisasContext *ctx, arg_th_memidx *a)
883{
884    REQUIRE_XTHEADMEMIDX(ctx);
885    return gen_load_idx(ctx, a, MO_TESW, true);
886}
887
888static bool trans_th_lurhu(DisasContext *ctx, arg_th_memidx *a)
889{
890    REQUIRE_XTHEADMEMIDX(ctx);
891    return gen_load_idx(ctx, a, MO_TEUW, true);
892}
893
894static bool trans_th_lurb(DisasContext *ctx, arg_th_memidx *a)
895{
896    REQUIRE_XTHEADMEMIDX(ctx);
897    return gen_load_idx(ctx, a, MO_SB, true);
898}
899
900static bool trans_th_lurbu(DisasContext *ctx, arg_th_memidx *a)
901{
902    REQUIRE_XTHEADMEMIDX(ctx);
903    return gen_load_idx(ctx, a, MO_UB, true);
904}
905
906static bool trans_th_surd(DisasContext *ctx, arg_th_memidx *a)
907{
908    REQUIRE_XTHEADMEMIDX(ctx);
909    REQUIRE_64BIT(ctx);
910    return gen_store_idx(ctx, a, MO_TESQ, true);
911}
912
913static bool trans_th_surw(DisasContext *ctx, arg_th_memidx *a)
914{
915    REQUIRE_XTHEADMEMIDX(ctx);
916    return gen_store_idx(ctx, a, MO_TESL, true);
917}
918
919static bool trans_th_surh(DisasContext *ctx, arg_th_memidx *a)
920{
921    REQUIRE_XTHEADMEMIDX(ctx);
922    return gen_store_idx(ctx, a, MO_TESW, true);
923}
924
925static bool trans_th_surb(DisasContext *ctx, arg_th_memidx *a)
926{
927    REQUIRE_XTHEADMEMIDX(ctx);
928    return gen_store_idx(ctx, a, MO_SB, true);
929}
930
931/* XTheadMemPair */
932
933static bool gen_loadpair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop,
934                            int shamt)
935{
936    if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) {
937        return false;
938    }
939
940    TCGv t1 = tcg_temp_new();
941    TCGv t2 = tcg_temp_new();
942    TCGv addr1 = tcg_temp_new();
943    TCGv addr2 = tcg_temp_new();
944    int imm = a->sh2 << shamt;
945
946    addr1 = get_address(ctx, a->rs, imm);
947    addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
948
949    tcg_gen_qemu_ld_tl(t1, addr1, ctx->mem_idx, memop);
950    tcg_gen_qemu_ld_tl(t2, addr2, ctx->mem_idx, memop);
951    gen_set_gpr(ctx, a->rd1, t1);
952    gen_set_gpr(ctx, a->rd2, t2);
953
954    tcg_temp_free(t1);
955    tcg_temp_free(t2);
956    tcg_temp_free(addr1);
957    tcg_temp_free(addr2);
958    return true;
959}
960
961static bool trans_th_ldd(DisasContext *ctx, arg_th_pair *a)
962{
963    REQUIRE_XTHEADMEMPAIR(ctx);
964    REQUIRE_64BIT(ctx);
965    return gen_loadpair_tl(ctx, a, MO_TESQ, 4);
966}
967
968static bool trans_th_lwd(DisasContext *ctx, arg_th_pair *a)
969{
970    REQUIRE_XTHEADMEMPAIR(ctx);
971    return gen_loadpair_tl(ctx, a, MO_TESL, 3);
972}
973
974static bool trans_th_lwud(DisasContext *ctx, arg_th_pair *a)
975{
976    REQUIRE_XTHEADMEMPAIR(ctx);
977    return gen_loadpair_tl(ctx, a, MO_TEUL, 3);
978}
979
980static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop,
981                             int shamt)
982{
983    TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE);
984    TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE);
985    TCGv addr1 = tcg_temp_new();
986    TCGv addr2 = tcg_temp_new();
987    int imm = a->sh2 << shamt;
988
989    addr1 = get_address(ctx, a->rs, imm);
990    addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
991
992    tcg_gen_qemu_st_tl(data1, addr1, ctx->mem_idx, memop);
993    tcg_gen_qemu_st_tl(data2, addr2, ctx->mem_idx, memop);
994
995    tcg_temp_free(addr1);
996    tcg_temp_free(addr2);
997    return true;
998}
999
1000static bool trans_th_sdd(DisasContext *ctx, arg_th_pair *a)
1001{
1002    REQUIRE_XTHEADMEMPAIR(ctx);
1003    REQUIRE_64BIT(ctx);
1004    return gen_storepair_tl(ctx, a, MO_TESQ, 4);
1005}
1006
1007static bool trans_th_swd(DisasContext *ctx, arg_th_pair *a)
1008{
1009    REQUIRE_XTHEADMEMPAIR(ctx);
1010    return gen_storepair_tl(ctx, a, MO_TESL, 3);
1011}
1012
1013/* XTheadSync */
1014
1015static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a)
1016{
1017    (void) a;
1018    REQUIRE_XTHEADSYNC(ctx);
1019
1020#ifndef CONFIG_USER_ONLY
1021    REQUIRE_PRIV_MS(ctx);
1022    gen_helper_tlb_flush_all(cpu_env);
1023    return true;
1024#else
1025    return false;
1026#endif
1027}
1028
1029#ifndef CONFIG_USER_ONLY
1030static void gen_th_sync_local(DisasContext *ctx)
1031{
1032    /*
1033     * Emulate out-of-order barriers with pipeline flush
1034     * by exiting the translation block.
1035     */
1036    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
1037    tcg_gen_exit_tb(NULL, 0);
1038    ctx->base.is_jmp = DISAS_NORETURN;
1039}
1040#endif
1041
1042static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a)
1043{
1044    (void) a;
1045    REQUIRE_XTHEADSYNC(ctx);
1046
1047#ifndef CONFIG_USER_ONLY
1048    REQUIRE_PRIV_MSU(ctx);
1049
1050    /*
1051     * th.sync is an out-of-order barrier.
1052     */
1053    gen_th_sync_local(ctx);
1054
1055    return true;
1056#else
1057    return false;
1058#endif
1059}
1060
1061static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
1062{
1063    (void) a;
1064    REQUIRE_XTHEADSYNC(ctx);
1065
1066#ifndef CONFIG_USER_ONLY
1067    REQUIRE_PRIV_MSU(ctx);
1068
1069    /*
1070     * th.sync.i is th.sync plus pipeline flush.
1071     */
1072    gen_th_sync_local(ctx);
1073
1074    return true;
1075#else
1076    return false;
1077#endif
1078}
1079
1080static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a)
1081{
1082    /* This instruction has the same behaviour like th.sync.i. */
1083    return trans_th_sync_i(ctx, a);
1084}
1085
1086static bool trans_th_sync_s(DisasContext *ctx, arg_th_sync_s *a)
1087{
1088    /* This instruction has the same behaviour like th.sync. */
1089    return trans_th_sync(ctx, a);
1090}
1091