1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * LoongArch emulation for QEMU - main translation routines.
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "tcg/tcg-op.h"
11 #include "tcg/tcg-op-gvec.h"
12 #include "exec/translation-block.h"
13 #include "exec/translator.h"
14 #include "exec/helper-proto.h"
15 #include "exec/helper-gen.h"
16 #include "exec/log.h"
17 #include "qemu/qemu-print.h"
18 #include "fpu/softfloat.h"
19 #include "tcg_loongarch.h"
20 #include "translate.h"
21 #include "internals.h"
22 #include "vec.h"
23
24 /* Global register indices */
25 TCGv cpu_gpr[32], cpu_pc;
26 static TCGv cpu_lladdr, cpu_llval;
27
28 #define HELPER_H "helper.h"
29 #include "exec/helper-info.c.inc"
30 #undef HELPER_H
31
32 #define DISAS_STOP DISAS_TARGET_0
33 #define DISAS_EXIT DISAS_TARGET_1
34 #define DISAS_EXIT_UPDATE DISAS_TARGET_2
35
vec_full_offset(int regno)36 static inline int vec_full_offset(int regno)
37 {
38 return offsetof(CPULoongArchState, fpr[regno]);
39 }
40
vec_reg_offset(int regno,int index,MemOp mop)41 static inline int vec_reg_offset(int regno, int index, MemOp mop)
42 {
43 const uint8_t size = 1 << mop;
44 int offs = index * size;
45
46 if (HOST_BIG_ENDIAN && size < 8 ) {
47 offs ^= (8 - size);
48 }
49
50 return offs + vec_full_offset(regno);
51 }
52
get_vreg64(TCGv_i64 dest,int regno,int index)53 static inline void get_vreg64(TCGv_i64 dest, int regno, int index)
54 {
55 tcg_gen_ld_i64(dest, tcg_env,
56 offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
57 }
58
set_vreg64(TCGv_i64 src,int regno,int index)59 static inline void set_vreg64(TCGv_i64 src, int regno, int index)
60 {
61 tcg_gen_st_i64(src, tcg_env,
62 offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
63 }
64
plus_1(DisasContext * ctx,int x)65 static inline int plus_1(DisasContext *ctx, int x)
66 {
67 return x + 1;
68 }
69
shl_1(DisasContext * ctx,int x)70 static inline int shl_1(DisasContext *ctx, int x)
71 {
72 return x << 1;
73 }
74
shl_2(DisasContext * ctx,int x)75 static inline int shl_2(DisasContext *ctx, int x)
76 {
77 return x << 2;
78 }
79
shl_3(DisasContext * ctx,int x)80 static inline int shl_3(DisasContext *ctx, int x)
81 {
82 return x << 3;
83 }
84
85 /*
86 * LoongArch the upper 32 bits are undefined ("can be any value").
87 * QEMU chooses to nanbox, because it is most likely to show guest bugs early.
88 */
gen_nanbox_s(TCGv_i64 out,TCGv_i64 in)89 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
90 {
91 tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
92 }
93
generate_exception(DisasContext * ctx,int excp)94 void generate_exception(DisasContext *ctx, int excp)
95 {
96 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
97 gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
98 ctx->base.is_jmp = DISAS_NORETURN;
99 }
100
gen_goto_tb(DisasContext * ctx,int n,target_ulong dest)101 static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
102 {
103 if (ctx->va32) {
104 dest = (uint32_t) dest;
105 }
106
107 if (translator_use_goto_tb(&ctx->base, dest)) {
108 tcg_gen_goto_tb(n);
109 tcg_gen_movi_tl(cpu_pc, dest);
110 tcg_gen_exit_tb(ctx->base.tb, n);
111 } else {
112 tcg_gen_movi_tl(cpu_pc, dest);
113 tcg_gen_lookup_and_goto_ptr();
114 }
115 }
116
loongarch_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)117 static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
118 CPUState *cs)
119 {
120 int64_t bound;
121 CPULoongArchState *env = cpu_env(cs);
122 DisasContext *ctx = container_of(dcbase, DisasContext, base);
123
124 ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
125 ctx->plv = ctx->base.tb->flags & HW_FLAGS_PLV_MASK;
126 if (ctx->base.tb->flags & HW_FLAGS_CRMD_PG) {
127 ctx->mem_idx = ctx->plv;
128 } else {
129 ctx->mem_idx = MMU_DA_IDX;
130 }
131
132 /* Bound the number of insns to execute to those left on the page. */
133 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
134 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
135
136 if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LSX)) {
137 ctx->vl = LSX_LEN;
138 }
139
140 if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LASX)) {
141 ctx->vl = LASX_LEN;
142 }
143
144 ctx->la64 = is_la64(env);
145 ctx->va32 = (ctx->base.tb->flags & HW_FLAGS_VA32) != 0;
146
147 ctx->zero = tcg_constant_tl(0);
148
149 ctx->cpucfg1 = env->cpucfg[1];
150 ctx->cpucfg2 = env->cpucfg[2];
151 }
152
loongarch_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)153 static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
154 {
155 }
156
loongarch_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)157 static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
158 {
159 DisasContext *ctx = container_of(dcbase, DisasContext, base);
160
161 tcg_gen_insn_start(ctx->base.pc_next);
162 }
163
164 /*
165 * Wrappers for getting reg values.
166 *
167 * The $zero register does not have cpu_gpr[0] allocated -- we supply the
168 * constant zero as a source, and an uninitialized sink as destination.
169 *
170 * Further, we may provide an extension for word operations.
171 */
gpr_src(DisasContext * ctx,int reg_num,DisasExtend src_ext)172 static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext)
173 {
174 TCGv t;
175
176 if (reg_num == 0) {
177 return ctx->zero;
178 }
179
180 switch (src_ext) {
181 case EXT_NONE:
182 return cpu_gpr[reg_num];
183 case EXT_SIGN:
184 t = tcg_temp_new();
185 tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
186 return t;
187 case EXT_ZERO:
188 t = tcg_temp_new();
189 tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
190 return t;
191 }
192 g_assert_not_reached();
193 }
194
gpr_dst(DisasContext * ctx,int reg_num,DisasExtend dst_ext)195 static TCGv gpr_dst(DisasContext *ctx, int reg_num, DisasExtend dst_ext)
196 {
197 if (reg_num == 0 || dst_ext) {
198 return tcg_temp_new();
199 }
200 return cpu_gpr[reg_num];
201 }
202
gen_set_gpr(int reg_num,TCGv t,DisasExtend dst_ext)203 static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext)
204 {
205 if (reg_num != 0) {
206 switch (dst_ext) {
207 case EXT_NONE:
208 tcg_gen_mov_tl(cpu_gpr[reg_num], t);
209 break;
210 case EXT_SIGN:
211 tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
212 break;
213 case EXT_ZERO:
214 tcg_gen_ext32u_tl(cpu_gpr[reg_num], t);
215 break;
216 default:
217 g_assert_not_reached();
218 }
219 }
220 }
221
get_fpr(DisasContext * ctx,int reg_num)222 static TCGv get_fpr(DisasContext *ctx, int reg_num)
223 {
224 TCGv t = tcg_temp_new();
225 tcg_gen_ld_i64(t, tcg_env,
226 offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
227 return t;
228 }
229
set_fpr(int reg_num,TCGv val)230 static void set_fpr(int reg_num, TCGv val)
231 {
232 tcg_gen_st_i64(val, tcg_env,
233 offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
234 }
235
make_address_x(DisasContext * ctx,TCGv base,TCGv addend)236 static TCGv make_address_x(DisasContext *ctx, TCGv base, TCGv addend)
237 {
238 TCGv temp = NULL;
239
240 if (addend || ctx->va32) {
241 temp = tcg_temp_new();
242 }
243 if (addend) {
244 tcg_gen_add_tl(temp, base, addend);
245 base = temp;
246 }
247 if (ctx->va32) {
248 tcg_gen_ext32u_tl(temp, base);
249 base = temp;
250 }
251 return base;
252 }
253
make_address_i(DisasContext * ctx,TCGv base,target_long ofs)254 static TCGv make_address_i(DisasContext *ctx, TCGv base, target_long ofs)
255 {
256 TCGv addend = ofs ? tcg_constant_tl(ofs) : NULL;
257 return make_address_x(ctx, base, addend);
258 }
259
make_address_pc(DisasContext * ctx,uint64_t addr)260 static uint64_t make_address_pc(DisasContext *ctx, uint64_t addr)
261 {
262 if (ctx->va32) {
263 addr = (int32_t)addr;
264 }
265 return addr;
266 }
267
268 #include "decode-insns.c.inc"
269 #include "insn_trans/trans_arith.c.inc"
270 #include "insn_trans/trans_shift.c.inc"
271 #include "insn_trans/trans_bit.c.inc"
272 #include "insn_trans/trans_memory.c.inc"
273 #include "insn_trans/trans_atomic.c.inc"
274 #include "insn_trans/trans_extra.c.inc"
275 #include "insn_trans/trans_farith.c.inc"
276 #include "insn_trans/trans_fcmp.c.inc"
277 #include "insn_trans/trans_fcnv.c.inc"
278 #include "insn_trans/trans_fmov.c.inc"
279 #include "insn_trans/trans_fmemory.c.inc"
280 #include "insn_trans/trans_branch.c.inc"
281 #include "insn_trans/trans_privileged.c.inc"
282 #include "insn_trans/trans_vec.c.inc"
283
loongarch_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)284 static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
285 {
286 DisasContext *ctx = container_of(dcbase, DisasContext, base);
287
288 ctx->opcode = translator_ldl(cpu_env(cs), &ctx->base, ctx->base.pc_next);
289
290 if (!decode(ctx, ctx->opcode)) {
291 qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. "
292 "0x%" VADDR_PRIx ": 0x%x\n",
293 ctx->base.pc_next, ctx->opcode);
294 generate_exception(ctx, EXCCODE_INE);
295 }
296
297 ctx->base.pc_next += 4;
298
299 if (ctx->va32) {
300 ctx->base.pc_next = (uint32_t)ctx->base.pc_next;
301 }
302 }
303
loongarch_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)304 static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
305 {
306 DisasContext *ctx = container_of(dcbase, DisasContext, base);
307
308 switch (ctx->base.is_jmp) {
309 case DISAS_STOP:
310 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
311 tcg_gen_lookup_and_goto_ptr();
312 break;
313 case DISAS_TOO_MANY:
314 gen_goto_tb(ctx, 0, ctx->base.pc_next);
315 break;
316 case DISAS_NORETURN:
317 break;
318 case DISAS_EXIT_UPDATE:
319 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
320 QEMU_FALLTHROUGH;
321 case DISAS_EXIT:
322 tcg_gen_exit_tb(NULL, 0);
323 break;
324 default:
325 g_assert_not_reached();
326 }
327 }
328
329 static const TranslatorOps loongarch_tr_ops = {
330 .init_disas_context = loongarch_tr_init_disas_context,
331 .tb_start = loongarch_tr_tb_start,
332 .insn_start = loongarch_tr_insn_start,
333 .translate_insn = loongarch_tr_translate_insn,
334 .tb_stop = loongarch_tr_tb_stop,
335 };
336
loongarch_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)337 void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
338 int *max_insns, vaddr pc, void *host_pc)
339 {
340 DisasContext ctx;
341
342 translator_loop(cs, tb, max_insns, pc, host_pc,
343 &loongarch_tr_ops, &ctx.base);
344 }
345
loongarch_translate_init(void)346 void loongarch_translate_init(void)
347 {
348 int i;
349
350 cpu_gpr[0] = NULL;
351 for (i = 1; i < 32; i++) {
352 cpu_gpr[i] = tcg_global_mem_new(tcg_env,
353 offsetof(CPULoongArchState, gpr[i]),
354 regnames[i]);
355 }
356
357 cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPULoongArchState, pc), "pc");
358 cpu_lladdr = tcg_global_mem_new(tcg_env,
359 offsetof(CPULoongArchState, lladdr), "lladdr");
360 cpu_llval = tcg_global_mem_new(tcg_env,
361 offsetof(CPULoongArchState, llval), "llval");
362
363 #ifndef CONFIG_USER_ONLY
364 loongarch_csr_translate_init();
365 #endif
366 }
367