1 /*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/translation-block.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35
36 typedef struct DisasContext {
37 DisasContextBase base;
38 CPURXState *env;
39 uint32_t pc;
40 uint32_t tb_flags;
41 } DisasContext;
42
43 typedef struct DisasCompare {
44 TCGv value;
45 TCGv temp;
46 TCGCond cond;
47 } DisasCompare;
48
rx_crname(uint8_t cr)49 const char *rx_crname(uint8_t cr)
50 {
51 static const char *cr_names[] = {
52 "psw", "pc", "usp", "fpsw", "", "", "", "",
53 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
54 };
55 if (cr >= ARRAY_SIZE(cr_names)) {
56 return "illegal";
57 }
58 return cr_names[cr];
59 }
60
61 /* Target-specific values for dc->base.is_jmp. */
62 #define DISAS_JUMP DISAS_TARGET_0
63 #define DISAS_UPDATE DISAS_TARGET_1
64 #define DISAS_EXIT DISAS_TARGET_2
65
66 /* global register indexes */
67 static TCGv cpu_regs[16];
68 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
69 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
70 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
71 static TCGv cpu_fintv, cpu_intb, cpu_pc;
72 static TCGv_i64 cpu_acc;
73
74 #define cpu_sp cpu_regs[0]
75
76 /* decoder helper */
decode_load_bytes(DisasContext * ctx,uint32_t insn,int i,int n)77 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
78 int i, int n)
79 {
80 while (++i <= n) {
81 uint8_t b = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next++);
82 insn |= b << (32 - i * 8);
83 }
84 return insn;
85 }
86
li(DisasContext * ctx,int sz)87 static uint32_t li(DisasContext *ctx, int sz)
88 {
89 target_ulong addr;
90 uint32_t tmp;
91 CPURXState *env = ctx->env;
92 addr = ctx->base.pc_next;
93
94 switch (sz) {
95 case 1:
96 ctx->base.pc_next += 1;
97 return (int8_t)translator_ldub(env, &ctx->base, addr);
98 case 2:
99 ctx->base.pc_next += 2;
100 return (int16_t)translator_lduw(env, &ctx->base, addr);
101 case 3:
102 ctx->base.pc_next += 3;
103 tmp = (int8_t)translator_ldub(env, &ctx->base, addr + 2);
104 tmp <<= 16;
105 tmp |= translator_lduw(env, &ctx->base, addr);
106 return tmp;
107 case 0:
108 ctx->base.pc_next += 4;
109 return translator_ldl(env, &ctx->base, addr);
110 default:
111 g_assert_not_reached();
112 }
113 return 0;
114 }
115
bdsp_s(DisasContext * ctx,int d)116 static int bdsp_s(DisasContext *ctx, int d)
117 {
118 /*
119 * 0 -> 8
120 * 1 -> 9
121 * 2 -> 10
122 * 3 -> 3
123 * :
124 * 7 -> 7
125 */
126 if (d < 3) {
127 d += 8;
128 }
129 return d;
130 }
131
132 /* Include the auto-generated decoder. */
133 #include "decode-insns.c.inc"
134
rx_cpu_dump_state(CPUState * cs,FILE * f,int flags)135 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
136 {
137 CPURXState *env = cpu_env(cs);
138 int i;
139 uint32_t psw;
140
141 psw = rx_cpu_pack_psw(env);
142 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
143 env->pc, psw);
144 for (i = 0; i < 16; i += 4) {
145 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
146 i, env->regs[i], i + 1, env->regs[i + 1],
147 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
148 }
149 }
150
gen_goto_tb(DisasContext * dc,int n,target_ulong dest)151 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
152 {
153 if (translator_use_goto_tb(&dc->base, dest)) {
154 tcg_gen_goto_tb(n);
155 tcg_gen_movi_i32(cpu_pc, dest);
156 tcg_gen_exit_tb(dc->base.tb, n);
157 } else {
158 tcg_gen_movi_i32(cpu_pc, dest);
159 tcg_gen_lookup_and_goto_ptr();
160 }
161 dc->base.is_jmp = DISAS_NORETURN;
162 }
163
164 /* generic load wrapper */
rx_gen_ld(unsigned int size,TCGv reg,TCGv mem)165 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
166 {
167 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
168 }
169
170 /* unsigned load wrapper */
rx_gen_ldu(unsigned int size,TCGv reg,TCGv mem)171 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
172 {
173 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
174 }
175
176 /* generic store wrapper */
rx_gen_st(unsigned int size,TCGv reg,TCGv mem)177 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
178 {
179 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
180 }
181
182 /* [ri, rb] */
rx_gen_regindex(DisasContext * ctx,TCGv mem,int size,int ri,int rb)183 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
184 int size, int ri, int rb)
185 {
186 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
187 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
188 }
189
190 /* dsp[reg] */
rx_index_addr(DisasContext * ctx,TCGv mem,int ld,int size,int reg)191 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
192 int ld, int size, int reg)
193 {
194 uint32_t dsp;
195
196 switch (ld) {
197 case 0:
198 return cpu_regs[reg];
199 case 1:
200 dsp = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next) << size;
201 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
202 ctx->base.pc_next += 1;
203 return mem;
204 case 2:
205 dsp = translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << size;
206 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
207 ctx->base.pc_next += 2;
208 return mem;
209 default:
210 g_assert_not_reached();
211 }
212 }
213
mi_to_mop(unsigned mi)214 static inline MemOp mi_to_mop(unsigned mi)
215 {
216 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
217 tcg_debug_assert(mi < 5);
218 return mop[mi];
219 }
220
221 /* load source operand */
rx_load_source(DisasContext * ctx,TCGv mem,int ld,int mi,int rs)222 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
223 int ld, int mi, int rs)
224 {
225 TCGv addr;
226 MemOp mop;
227 if (ld < 3) {
228 mop = mi_to_mop(mi);
229 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
230 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
231 return mem;
232 } else {
233 return cpu_regs[rs];
234 }
235 }
236
237 /* Processor mode check */
is_privileged(DisasContext * ctx,int is_exception)238 static int is_privileged(DisasContext *ctx, int is_exception)
239 {
240 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) {
241 if (is_exception) {
242 gen_helper_raise_privilege_violation(tcg_env);
243 }
244 return 0;
245 } else {
246 return 1;
247 }
248 }
249
250 /* generate QEMU condition */
psw_cond(DisasCompare * dc,uint32_t cond)251 static void psw_cond(DisasCompare *dc, uint32_t cond)
252 {
253 tcg_debug_assert(cond < 16);
254 switch (cond) {
255 case 0: /* z */
256 dc->cond = TCG_COND_EQ;
257 dc->value = cpu_psw_z;
258 break;
259 case 1: /* nz */
260 dc->cond = TCG_COND_NE;
261 dc->value = cpu_psw_z;
262 break;
263 case 2: /* c */
264 dc->cond = TCG_COND_NE;
265 dc->value = cpu_psw_c;
266 break;
267 case 3: /* nc */
268 dc->cond = TCG_COND_EQ;
269 dc->value = cpu_psw_c;
270 break;
271 case 4: /* gtu (C& ~Z) == 1 */
272 case 5: /* leu (C& ~Z) == 0 */
273 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
274 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
275 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
276 dc->value = dc->temp;
277 break;
278 case 6: /* pz (S == 0) */
279 dc->cond = TCG_COND_GE;
280 dc->value = cpu_psw_s;
281 break;
282 case 7: /* n (S == 1) */
283 dc->cond = TCG_COND_LT;
284 dc->value = cpu_psw_s;
285 break;
286 case 8: /* ge (S^O)==0 */
287 case 9: /* lt (S^O)==1 */
288 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
289 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
290 dc->value = dc->temp;
291 break;
292 case 10: /* gt ((S^O)|Z)==0 */
293 case 11: /* le ((S^O)|Z)==1 */
294 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
295 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
296 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
297 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
298 dc->value = dc->temp;
299 break;
300 case 12: /* o */
301 dc->cond = TCG_COND_LT;
302 dc->value = cpu_psw_o;
303 break;
304 case 13: /* no */
305 dc->cond = TCG_COND_GE;
306 dc->value = cpu_psw_o;
307 break;
308 case 14: /* always true */
309 dc->cond = TCG_COND_ALWAYS;
310 dc->value = dc->temp;
311 break;
312 case 15: /* always false */
313 dc->cond = TCG_COND_NEVER;
314 dc->value = dc->temp;
315 break;
316 }
317 }
318
move_from_cr(DisasContext * ctx,TCGv ret,int cr,uint32_t pc)319 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
320 {
321 switch (cr) {
322 case 0: /* PSW */
323 gen_helper_pack_psw(ret, tcg_env);
324 break;
325 case 1: /* PC */
326 tcg_gen_movi_i32(ret, pc);
327 break;
328 case 2: /* USP */
329 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
330 tcg_gen_mov_i32(ret, cpu_sp);
331 } else {
332 tcg_gen_mov_i32(ret, cpu_usp);
333 }
334 break;
335 case 3: /* FPSW */
336 tcg_gen_mov_i32(ret, cpu_fpsw);
337 break;
338 case 8: /* BPSW */
339 tcg_gen_mov_i32(ret, cpu_bpsw);
340 break;
341 case 9: /* BPC */
342 tcg_gen_mov_i32(ret, cpu_bpc);
343 break;
344 case 10: /* ISP */
345 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
346 tcg_gen_mov_i32(ret, cpu_isp);
347 } else {
348 tcg_gen_mov_i32(ret, cpu_sp);
349 }
350 break;
351 case 11: /* FINTV */
352 tcg_gen_mov_i32(ret, cpu_fintv);
353 break;
354 case 12: /* INTB */
355 tcg_gen_mov_i32(ret, cpu_intb);
356 break;
357 default:
358 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
359 /* Unimplement registers return 0 */
360 tcg_gen_movi_i32(ret, 0);
361 break;
362 }
363 }
364
move_to_cr(DisasContext * ctx,TCGv val,int cr)365 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
366 {
367 if (cr >= 8 && !is_privileged(ctx, 0)) {
368 /* Some control registers can only be written in privileged mode. */
369 qemu_log_mask(LOG_GUEST_ERROR,
370 "disallow control register write %s", rx_crname(cr));
371 return;
372 }
373 switch (cr) {
374 case 0: /* PSW */
375 gen_helper_set_psw(tcg_env, val);
376 if (is_privileged(ctx, 0)) {
377 /* PSW.{I,U} may be updated here. exit TB. */
378 ctx->base.is_jmp = DISAS_UPDATE;
379 }
380 break;
381 /* case 1: to PC not supported */
382 case 2: /* USP */
383 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
384 tcg_gen_mov_i32(cpu_sp, val);
385 } else {
386 tcg_gen_mov_i32(cpu_usp, val);
387 }
388 break;
389 case 3: /* FPSW */
390 gen_helper_set_fpsw(tcg_env, val);
391 break;
392 case 8: /* BPSW */
393 tcg_gen_mov_i32(cpu_bpsw, val);
394 break;
395 case 9: /* BPC */
396 tcg_gen_mov_i32(cpu_bpc, val);
397 break;
398 case 10: /* ISP */
399 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
400 tcg_gen_mov_i32(cpu_isp, val);
401 } else {
402 tcg_gen_mov_i32(cpu_sp, val);
403 }
404 break;
405 case 11: /* FINTV */
406 tcg_gen_mov_i32(cpu_fintv, val);
407 break;
408 case 12: /* INTB */
409 tcg_gen_mov_i32(cpu_intb, val);
410 break;
411 default:
412 qemu_log_mask(LOG_GUEST_ERROR,
413 "Unimplement control register %d", cr);
414 break;
415 }
416 }
417
push(TCGv val)418 static void push(TCGv val)
419 {
420 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
421 rx_gen_st(MO_32, val, cpu_sp);
422 }
423
pop(TCGv ret)424 static void pop(TCGv ret)
425 {
426 rx_gen_ld(MO_32, ret, cpu_sp);
427 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
428 }
429
430 /* mov.<bwl> rs,dsp5[rd] */
trans_MOV_rm(DisasContext * ctx,arg_MOV_rm * a)431 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
432 {
433 TCGv mem;
434 mem = tcg_temp_new();
435 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
436 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
437 return true;
438 }
439
440 /* mov.<bwl> dsp5[rs],rd */
trans_MOV_mr(DisasContext * ctx,arg_MOV_mr * a)441 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
442 {
443 TCGv mem;
444 mem = tcg_temp_new();
445 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
446 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
447 return true;
448 }
449
450 /* mov.l #uimm4,rd */
451 /* mov.l #uimm8,rd */
452 /* mov.l #imm,rd */
trans_MOV_ir(DisasContext * ctx,arg_MOV_ir * a)453 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
454 {
455 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
456 return true;
457 }
458
459 /* mov.<bwl> #uimm8,dsp[rd] */
460 /* mov.<bwl> #imm, dsp[rd] */
trans_MOV_im(DisasContext * ctx,arg_MOV_im * a)461 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
462 {
463 TCGv imm, mem;
464 imm = tcg_constant_i32(a->imm);
465 mem = tcg_temp_new();
466 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
467 rx_gen_st(a->sz, imm, mem);
468 return true;
469 }
470
471 /* mov.<bwl> [ri,rb],rd */
trans_MOV_ar(DisasContext * ctx,arg_MOV_ar * a)472 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
473 {
474 TCGv mem;
475 mem = tcg_temp_new();
476 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
477 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
478 return true;
479 }
480
481 /* mov.<bwl> rd,[ri,rb] */
trans_MOV_ra(DisasContext * ctx,arg_MOV_ra * a)482 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
483 {
484 TCGv mem;
485 mem = tcg_temp_new();
486 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
487 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
488 return true;
489 }
490
491 /* mov.<bwl> dsp[rs],dsp[rd] */
492 /* mov.<bwl> rs,dsp[rd] */
493 /* mov.<bwl> dsp[rs],rd */
494 /* mov.<bwl> rs,rd */
trans_MOV_mm(DisasContext * ctx,arg_MOV_mm * a)495 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
496 {
497 TCGv tmp, mem, addr;
498
499 if (a->lds == 3 && a->ldd == 3) {
500 /* mov.<bwl> rs,rd */
501 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN);
502 return true;
503 }
504
505 mem = tcg_temp_new();
506 if (a->lds == 3) {
507 /* mov.<bwl> rs,dsp[rd] */
508 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
509 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
510 } else if (a->ldd == 3) {
511 /* mov.<bwl> dsp[rs],rd */
512 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
513 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
514 } else {
515 /* mov.<bwl> dsp[rs],dsp[rd] */
516 tmp = tcg_temp_new();
517 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
518 rx_gen_ld(a->sz, tmp, addr);
519 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
520 rx_gen_st(a->sz, tmp, addr);
521 }
522 return true;
523 }
524
525 /* mov.<bwl> rs,[rd+] */
526 /* mov.<bwl> rs,[-rd] */
trans_MOV_rp(DisasContext * ctx,arg_MOV_rp * a)527 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
528 {
529 TCGv val;
530 val = tcg_temp_new();
531 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
532 if (a->ad == 1) {
533 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
534 }
535 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
536 if (a->ad == 0) {
537 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
538 }
539 return true;
540 }
541
542 /* mov.<bwl> [rd+],rs */
543 /* mov.<bwl> [-rd],rs */
trans_MOV_pr(DisasContext * ctx,arg_MOV_pr * a)544 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
545 {
546 TCGv val;
547 val = tcg_temp_new();
548 if (a->ad == 1) {
549 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
550 }
551 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
552 if (a->ad == 0) {
553 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
554 }
555 tcg_gen_mov_i32(cpu_regs[a->rs], val);
556 return true;
557 }
558
559 /* movu.<bw> dsp5[rs],rd */
560 /* movu.<bw> dsp[rs],rd */
trans_MOVU_mr(DisasContext * ctx,arg_MOVU_mr * a)561 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
562 {
563 TCGv mem;
564 mem = tcg_temp_new();
565 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
566 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
567 return true;
568 }
569
570 /* movu.<bw> rs,rd */
trans_MOVU_rr(DisasContext * ctx,arg_MOVU_rr * a)571 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
572 {
573 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz);
574 return true;
575 }
576
577 /* movu.<bw> [ri,rb],rd */
trans_MOVU_ar(DisasContext * ctx,arg_MOVU_ar * a)578 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
579 {
580 TCGv mem;
581 mem = tcg_temp_new();
582 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
583 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
584 return true;
585 }
586
587 /* movu.<bw> [rd+],rs */
588 /* mov.<bw> [-rd],rs */
trans_MOVU_pr(DisasContext * ctx,arg_MOVU_pr * a)589 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
590 {
591 TCGv val;
592 val = tcg_temp_new();
593 if (a->ad == 1) {
594 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
595 }
596 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
597 if (a->ad == 0) {
598 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
599 }
600 tcg_gen_mov_i32(cpu_regs[a->rs], val);
601 return true;
602 }
603
604
605 /* pop rd */
trans_POP(DisasContext * ctx,arg_POP * a)606 static bool trans_POP(DisasContext *ctx, arg_POP *a)
607 {
608 /* mov.l [r0+], rd */
609 arg_MOV_rp mov_a;
610 mov_a.rd = 0;
611 mov_a.rs = a->rd;
612 mov_a.ad = 0;
613 mov_a.sz = MO_32;
614 trans_MOV_pr(ctx, &mov_a);
615 return true;
616 }
617
618 /* popc cr */
trans_POPC(DisasContext * ctx,arg_POPC * a)619 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
620 {
621 TCGv val;
622 val = tcg_temp_new();
623 pop(val);
624 move_to_cr(ctx, val, a->cr);
625 return true;
626 }
627
628 /* popm rd-rd2 */
trans_POPM(DisasContext * ctx,arg_POPM * a)629 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
630 {
631 int r;
632 if (a->rd == 0 || a->rd >= a->rd2) {
633 qemu_log_mask(LOG_GUEST_ERROR,
634 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
635 }
636 r = a->rd;
637 while (r <= a->rd2 && r < 16) {
638 pop(cpu_regs[r++]);
639 }
640 return true;
641 }
642
643
644 /* push.<bwl> rs */
trans_PUSH_r(DisasContext * ctx,arg_PUSH_r * a)645 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
646 {
647 TCGv val;
648 val = tcg_temp_new();
649 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
650 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
651 rx_gen_st(a->sz, val, cpu_sp);
652 return true;
653 }
654
655 /* push.<bwl> dsp[rs] */
trans_PUSH_m(DisasContext * ctx,arg_PUSH_m * a)656 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
657 {
658 TCGv mem, val, addr;
659 mem = tcg_temp_new();
660 val = tcg_temp_new();
661 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
662 rx_gen_ld(a->sz, val, addr);
663 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
664 rx_gen_st(a->sz, val, cpu_sp);
665 return true;
666 }
667
668 /* pushc rx */
trans_PUSHC(DisasContext * ctx,arg_PUSHC * a)669 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
670 {
671 TCGv val;
672 val = tcg_temp_new();
673 move_from_cr(ctx, val, a->cr, ctx->pc);
674 push(val);
675 return true;
676 }
677
678 /* pushm rs-rs2 */
trans_PUSHM(DisasContext * ctx,arg_PUSHM * a)679 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
680 {
681 int r;
682
683 if (a->rs == 0 || a->rs >= a->rs2) {
684 qemu_log_mask(LOG_GUEST_ERROR,
685 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
686 }
687 r = a->rs2;
688 while (r >= a->rs && r >= 0) {
689 push(cpu_regs[r--]);
690 }
691 return true;
692 }
693
694 /* xchg rs,rd */
trans_XCHG_rr(DisasContext * ctx,arg_XCHG_rr * a)695 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
696 {
697 TCGv tmp;
698 tmp = tcg_temp_new();
699 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
700 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
701 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
702 return true;
703 }
704
705 /* xchg dsp[rs].<mi>,rd */
trans_XCHG_mr(DisasContext * ctx,arg_XCHG_mr * a)706 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
707 {
708 TCGv mem, addr;
709 mem = tcg_temp_new();
710 switch (a->mi) {
711 case 0: /* dsp[rs].b */
712 case 1: /* dsp[rs].w */
713 case 2: /* dsp[rs].l */
714 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
715 break;
716 case 3: /* dsp[rs].uw */
717 case 4: /* dsp[rs].ub */
718 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
719 break;
720 default:
721 g_assert_not_reached();
722 }
723 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
724 0, mi_to_mop(a->mi));
725 return true;
726 }
727
stcond(TCGCond cond,int rd,int imm)728 static inline void stcond(TCGCond cond, int rd, int imm)
729 {
730 TCGv z;
731 TCGv _imm;
732 z = tcg_constant_i32(0);
733 _imm = tcg_constant_i32(imm);
734 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
735 _imm, cpu_regs[rd]);
736 }
737
738 /* stz #imm,rd */
trans_STZ(DisasContext * ctx,arg_STZ * a)739 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
740 {
741 stcond(TCG_COND_EQ, a->rd, a->imm);
742 return true;
743 }
744
745 /* stnz #imm,rd */
trans_STNZ(DisasContext * ctx,arg_STNZ * a)746 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
747 {
748 stcond(TCG_COND_NE, a->rd, a->imm);
749 return true;
750 }
751
752 /* sccnd.<bwl> rd */
753 /* sccnd.<bwl> dsp:[rd] */
trans_SCCnd(DisasContext * ctx,arg_SCCnd * a)754 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
755 {
756 DisasCompare dc;
757 TCGv val, mem, addr;
758 dc.temp = tcg_temp_new();
759 psw_cond(&dc, a->cd);
760 if (a->ld < 3) {
761 val = tcg_temp_new();
762 mem = tcg_temp_new();
763 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
764 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
765 rx_gen_st(a->sz, val, addr);
766 } else {
767 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
768 }
769 return true;
770 }
771
772 /* rtsd #imm */
trans_RTSD_i(DisasContext * ctx,arg_RTSD_i * a)773 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
774 {
775 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
776 pop(cpu_pc);
777 ctx->base.is_jmp = DISAS_JUMP;
778 return true;
779 }
780
781 /* rtsd #imm, rd-rd2 */
trans_RTSD_irr(DisasContext * ctx,arg_RTSD_irr * a)782 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
783 {
784 int dst;
785 int adj;
786
787 if (a->rd2 >= a->rd) {
788 adj = a->imm - (a->rd2 - a->rd + 1);
789 } else {
790 adj = a->imm - (15 - a->rd + 1);
791 }
792
793 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
794 dst = a->rd;
795 while (dst <= a->rd2 && dst < 16) {
796 pop(cpu_regs[dst++]);
797 }
798 pop(cpu_pc);
799 ctx->base.is_jmp = DISAS_JUMP;
800 return true;
801 }
802
803 typedef void (*op2fn)(TCGv ret, TCGv arg1);
804 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
805
rx_gen_op_rr(op2fn opr,int dst,int src)806 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
807 {
808 opr(cpu_regs[dst], cpu_regs[src]);
809 }
810
rx_gen_op_rrr(op3fn opr,int dst,int src,int src2)811 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
812 {
813 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
814 }
815
rx_gen_op_irr(op3fn opr,int dst,int src,uint32_t src2)816 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
817 {
818 TCGv imm = tcg_constant_i32(src2);
819 opr(cpu_regs[dst], cpu_regs[src], imm);
820 }
821
rx_gen_op_mr(op3fn opr,DisasContext * ctx,int dst,int src,int ld,int mi)822 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
823 int dst, int src, int ld, int mi)
824 {
825 TCGv val, mem;
826 mem = tcg_temp_new();
827 val = rx_load_source(ctx, mem, ld, mi, src);
828 opr(cpu_regs[dst], cpu_regs[dst], val);
829 }
830
rx_and(TCGv ret,TCGv arg1,TCGv arg2)831 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
832 {
833 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
834 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
835 tcg_gen_mov_i32(ret, cpu_psw_s);
836 }
837
838 /* and #uimm:4, rd */
839 /* and #imm, rd */
trans_AND_ir(DisasContext * ctx,arg_AND_ir * a)840 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
841 {
842 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
843 return true;
844 }
845
846 /* and dsp[rs], rd */
847 /* and rs,rd */
trans_AND_mr(DisasContext * ctx,arg_AND_mr * a)848 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
849 {
850 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
851 return true;
852 }
853
854 /* and rs,rs2,rd */
trans_AND_rrr(DisasContext * ctx,arg_AND_rrr * a)855 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
856 {
857 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
858 return true;
859 }
860
rx_or(TCGv ret,TCGv arg1,TCGv arg2)861 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
862 {
863 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
864 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
865 tcg_gen_mov_i32(ret, cpu_psw_s);
866 }
867
868 /* or #uimm:4, rd */
869 /* or #imm, rd */
trans_OR_ir(DisasContext * ctx,arg_OR_ir * a)870 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
871 {
872 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
873 return true;
874 }
875
876 /* or dsp[rs], rd */
877 /* or rs,rd */
trans_OR_mr(DisasContext * ctx,arg_OR_mr * a)878 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
879 {
880 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
881 return true;
882 }
883
884 /* or rs,rs2,rd */
trans_OR_rrr(DisasContext * ctx,arg_OR_rrr * a)885 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
886 {
887 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
888 return true;
889 }
890
rx_xor(TCGv ret,TCGv arg1,TCGv arg2)891 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
892 {
893 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
894 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
895 tcg_gen_mov_i32(ret, cpu_psw_s);
896 }
897
898 /* xor #imm, rd */
trans_XOR_ir(DisasContext * ctx,arg_XOR_ir * a)899 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
900 {
901 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
902 return true;
903 }
904
905 /* xor dsp[rs], rd */
906 /* xor rs,rd */
trans_XOR_mr(DisasContext * ctx,arg_XOR_mr * a)907 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
908 {
909 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
910 return true;
911 }
912
rx_tst(TCGv ret,TCGv arg1,TCGv arg2)913 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
914 {
915 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
916 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
917 }
918
919 /* tst #imm, rd */
trans_TST_ir(DisasContext * ctx,arg_TST_ir * a)920 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
921 {
922 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
923 return true;
924 }
925
926 /* tst dsp[rs], rd */
927 /* tst rs, rd */
trans_TST_mr(DisasContext * ctx,arg_TST_mr * a)928 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
929 {
930 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
931 return true;
932 }
933
rx_not(TCGv ret,TCGv arg1)934 static void rx_not(TCGv ret, TCGv arg1)
935 {
936 tcg_gen_not_i32(ret, arg1);
937 tcg_gen_mov_i32(cpu_psw_z, ret);
938 tcg_gen_mov_i32(cpu_psw_s, ret);
939 }
940
941 /* not rd */
942 /* not rs, rd */
trans_NOT_rr(DisasContext * ctx,arg_NOT_rr * a)943 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
944 {
945 rx_gen_op_rr(rx_not, a->rd, a->rs);
946 return true;
947 }
948
rx_neg(TCGv ret,TCGv arg1)949 static void rx_neg(TCGv ret, TCGv arg1)
950 {
951 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
952 tcg_gen_neg_i32(ret, arg1);
953 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
954 tcg_gen_mov_i32(cpu_psw_z, ret);
955 tcg_gen_mov_i32(cpu_psw_s, ret);
956 }
957
958
959 /* neg rd */
960 /* neg rs, rd */
trans_NEG_rr(DisasContext * ctx,arg_NEG_rr * a)961 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
962 {
963 rx_gen_op_rr(rx_neg, a->rd, a->rs);
964 return true;
965 }
966
967 /* ret = arg1 + arg2 + psw_c */
rx_adc(TCGv ret,TCGv arg1,TCGv arg2)968 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
969 {
970 TCGv z = tcg_constant_i32(0);
971 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
972 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
973 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
974 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
975 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
976 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
977 tcg_gen_mov_i32(ret, cpu_psw_s);
978 }
979
980 /* adc #imm, rd */
trans_ADC_ir(DisasContext * ctx,arg_ADC_ir * a)981 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
982 {
983 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
984 return true;
985 }
986
987 /* adc rs, rd */
trans_ADC_rr(DisasContext * ctx,arg_ADC_rr * a)988 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
989 {
990 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
991 return true;
992 }
993
994 /* adc dsp[rs], rd */
trans_ADC_mr(DisasContext * ctx,arg_ADC_mr * a)995 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
996 {
997 /* mi only 2 */
998 if (a->mi != 2) {
999 return false;
1000 }
1001 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1002 return true;
1003 }
1004
1005 /* ret = arg1 + arg2 */
rx_add(TCGv ret,TCGv arg1,TCGv arg2)1006 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1007 {
1008 TCGv z = tcg_constant_i32(0);
1009 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1010 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1011 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1012 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1013 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1014 tcg_gen_mov_i32(ret, cpu_psw_s);
1015 }
1016
1017 /* add #uimm4, rd */
1018 /* add #imm, rs, rd */
trans_ADD_irr(DisasContext * ctx,arg_ADD_irr * a)1019 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1020 {
1021 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1022 return true;
1023 }
1024
1025 /* add rs, rd */
1026 /* add dsp[rs], rd */
trans_ADD_mr(DisasContext * ctx,arg_ADD_mr * a)1027 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1028 {
1029 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1030 return true;
1031 }
1032
1033 /* add rs, rs2, rd */
trans_ADD_rrr(DisasContext * ctx,arg_ADD_rrr * a)1034 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1035 {
1036 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1037 return true;
1038 }
1039
1040 /* ret = arg1 - arg2 */
rx_sub(TCGv ret,TCGv arg1,TCGv arg2)1041 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1042 {
1043 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1044 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1045 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1046 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1047 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1048 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1049 /* CMP not required return */
1050 if (ret) {
1051 tcg_gen_mov_i32(ret, cpu_psw_s);
1052 }
1053 }
1054
rx_cmp(TCGv dummy,TCGv arg1,TCGv arg2)1055 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1056 {
1057 rx_sub(NULL, arg1, arg2);
1058 }
1059
1060 /* ret = arg1 - arg2 - !psw_c */
1061 /* -> ret = arg1 + ~arg2 + psw_c */
rx_sbb(TCGv ret,TCGv arg1,TCGv arg2)1062 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1063 {
1064 TCGv temp;
1065 temp = tcg_temp_new();
1066 tcg_gen_not_i32(temp, arg2);
1067 rx_adc(ret, arg1, temp);
1068 }
1069
1070 /* cmp #imm4, rs2 */
1071 /* cmp #imm8, rs2 */
1072 /* cmp #imm, rs2 */
trans_CMP_ir(DisasContext * ctx,arg_CMP_ir * a)1073 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1074 {
1075 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1076 return true;
1077 }
1078
1079 /* cmp rs, rs2 */
1080 /* cmp dsp[rs], rs2 */
trans_CMP_mr(DisasContext * ctx,arg_CMP_mr * a)1081 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1082 {
1083 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1084 return true;
1085 }
1086
1087 /* sub #imm4, rd */
trans_SUB_ir(DisasContext * ctx,arg_SUB_ir * a)1088 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1089 {
1090 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1091 return true;
1092 }
1093
1094 /* sub rs, rd */
1095 /* sub dsp[rs], rd */
trans_SUB_mr(DisasContext * ctx,arg_SUB_mr * a)1096 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1097 {
1098 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1099 return true;
1100 }
1101
1102 /* sub rs2, rs, rd */
trans_SUB_rrr(DisasContext * ctx,arg_SUB_rrr * a)1103 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1104 {
1105 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1106 return true;
1107 }
1108
1109 /* sbb rs, rd */
trans_SBB_rr(DisasContext * ctx,arg_SBB_rr * a)1110 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1111 {
1112 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1113 return true;
1114 }
1115
1116 /* sbb dsp[rs], rd */
trans_SBB_mr(DisasContext * ctx,arg_SBB_mr * a)1117 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1118 {
1119 /* mi only 2 */
1120 if (a->mi != 2) {
1121 return false;
1122 }
1123 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1124 return true;
1125 }
1126
1127 /* abs rd */
1128 /* abs rs, rd */
trans_ABS_rr(DisasContext * ctx,arg_ABS_rr * a)1129 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1130 {
1131 rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs);
1132 return true;
1133 }
1134
1135 /* max #imm, rd */
trans_MAX_ir(DisasContext * ctx,arg_MAX_ir * a)1136 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1137 {
1138 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1139 return true;
1140 }
1141
1142 /* max rs, rd */
1143 /* max dsp[rs], rd */
trans_MAX_mr(DisasContext * ctx,arg_MAX_mr * a)1144 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1145 {
1146 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1147 return true;
1148 }
1149
1150 /* min #imm, rd */
trans_MIN_ir(DisasContext * ctx,arg_MIN_ir * a)1151 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1152 {
1153 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1154 return true;
1155 }
1156
1157 /* min rs, rd */
1158 /* min dsp[rs], rd */
trans_MIN_mr(DisasContext * ctx,arg_MIN_mr * a)1159 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1160 {
1161 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1162 return true;
1163 }
1164
1165 /* mul #uimm4, rd */
1166 /* mul #imm, rd */
trans_MUL_ir(DisasContext * ctx,arg_MUL_ir * a)1167 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1168 {
1169 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1170 return true;
1171 }
1172
1173 /* mul rs, rd */
1174 /* mul dsp[rs], rd */
trans_MUL_mr(DisasContext * ctx,arg_MUL_mr * a)1175 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1176 {
1177 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1178 return true;
1179 }
1180
1181 /* mul rs, rs2, rd */
trans_MUL_rrr(DisasContext * ctx,arg_MUL_rrr * a)1182 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1183 {
1184 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1185 return true;
1186 }
1187
1188 /* emul #imm, rd */
trans_EMUL_ir(DisasContext * ctx,arg_EMUL_ir * a)1189 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1190 {
1191 TCGv imm = tcg_constant_i32(a->imm);
1192 if (a->rd > 14) {
1193 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1194 }
1195 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1196 cpu_regs[a->rd], imm);
1197 return true;
1198 }
1199
1200 /* emul rs, rd */
1201 /* emul dsp[rs], rd */
trans_EMUL_mr(DisasContext * ctx,arg_EMUL_mr * a)1202 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1203 {
1204 TCGv val, mem;
1205 if (a->rd > 14) {
1206 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1207 }
1208 mem = tcg_temp_new();
1209 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1210 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1211 cpu_regs[a->rd], val);
1212 return true;
1213 }
1214
1215 /* emulu #imm, rd */
trans_EMULU_ir(DisasContext * ctx,arg_EMULU_ir * a)1216 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1217 {
1218 TCGv imm = tcg_constant_i32(a->imm);
1219 if (a->rd > 14) {
1220 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1221 }
1222 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1223 cpu_regs[a->rd], imm);
1224 return true;
1225 }
1226
1227 /* emulu rs, rd */
1228 /* emulu dsp[rs], rd */
trans_EMULU_mr(DisasContext * ctx,arg_EMULU_mr * a)1229 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1230 {
1231 TCGv val, mem;
1232 if (a->rd > 14) {
1233 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1234 }
1235 mem = tcg_temp_new();
1236 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1237 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1238 cpu_regs[a->rd], val);
1239 return true;
1240 }
1241
rx_div(TCGv ret,TCGv arg1,TCGv arg2)1242 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1243 {
1244 gen_helper_div(ret, tcg_env, arg1, arg2);
1245 }
1246
rx_divu(TCGv ret,TCGv arg1,TCGv arg2)1247 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1248 {
1249 gen_helper_divu(ret, tcg_env, arg1, arg2);
1250 }
1251
1252 /* div #imm, rd */
trans_DIV_ir(DisasContext * ctx,arg_DIV_ir * a)1253 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1254 {
1255 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1256 return true;
1257 }
1258
1259 /* div rs, rd */
1260 /* div dsp[rs], rd */
trans_DIV_mr(DisasContext * ctx,arg_DIV_mr * a)1261 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1262 {
1263 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1264 return true;
1265 }
1266
1267 /* divu #imm, rd */
trans_DIVU_ir(DisasContext * ctx,arg_DIVU_ir * a)1268 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1269 {
1270 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1271 return true;
1272 }
1273
1274 /* divu rs, rd */
1275 /* divu dsp[rs], rd */
trans_DIVU_mr(DisasContext * ctx,arg_DIVU_mr * a)1276 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1277 {
1278 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1279 return true;
1280 }
1281
1282
1283 /* shll #imm:5, rd */
1284 /* shll #imm:5, rs2, rd */
trans_SHLL_irr(DisasContext * ctx,arg_SHLL_irr * a)1285 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1286 {
1287 TCGv tmp;
1288 tmp = tcg_temp_new();
1289 if (a->imm) {
1290 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1291 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1292 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1293 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1294 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1295 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1296 } else {
1297 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1298 tcg_gen_movi_i32(cpu_psw_c, 0);
1299 tcg_gen_movi_i32(cpu_psw_o, 0);
1300 }
1301 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1302 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1303 return true;
1304 }
1305
1306 /* shll rs, rd */
trans_SHLL_rr(DisasContext * ctx,arg_SHLL_rr * a)1307 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1308 {
1309 TCGLabel *noshift, *done;
1310 TCGv count, tmp;
1311
1312 noshift = gen_new_label();
1313 done = gen_new_label();
1314 /* if (cpu_regs[a->rs]) { */
1315 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1316 count = tcg_temp_new();
1317 tmp = tcg_temp_new();
1318 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1319 tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
1320 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1321 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1322 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1323 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1324 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1325 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1326 tcg_gen_br(done);
1327 /* } else { */
1328 gen_set_label(noshift);
1329 tcg_gen_movi_i32(cpu_psw_c, 0);
1330 tcg_gen_movi_i32(cpu_psw_o, 0);
1331 /* } */
1332 gen_set_label(done);
1333 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1334 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1335 return true;
1336 }
1337
shiftr_imm(uint32_t rd,uint32_t rs,uint32_t imm,unsigned int alith)1338 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1339 unsigned int alith)
1340 {
1341 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1342 tcg_gen_shri_i32, tcg_gen_sari_i32,
1343 };
1344 tcg_debug_assert(alith < 2);
1345 if (imm) {
1346 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1347 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1348 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1349 } else {
1350 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1351 tcg_gen_movi_i32(cpu_psw_c, 0);
1352 }
1353 tcg_gen_movi_i32(cpu_psw_o, 0);
1354 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1355 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1356 }
1357
shiftr_reg(uint32_t rd,uint32_t rs,unsigned int alith)1358 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1359 {
1360 TCGLabel *noshift, *done;
1361 TCGv count;
1362 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1363 tcg_gen_shri_i32, tcg_gen_sari_i32,
1364 };
1365 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1366 tcg_gen_shr_i32, tcg_gen_sar_i32,
1367 };
1368 tcg_debug_assert(alith < 2);
1369 noshift = gen_new_label();
1370 done = gen_new_label();
1371 count = tcg_temp_new();
1372 /* if (cpu_regs[rs]) { */
1373 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1374 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1375 tcg_gen_subi_i32(count, count, 1);
1376 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1377 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1378 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1379 tcg_gen_br(done);
1380 /* } else { */
1381 gen_set_label(noshift);
1382 tcg_gen_movi_i32(cpu_psw_c, 0);
1383 /* } */
1384 gen_set_label(done);
1385 tcg_gen_movi_i32(cpu_psw_o, 0);
1386 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1387 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1388 }
1389
1390 /* shar #imm:5, rd */
1391 /* shar #imm:5, rs2, rd */
trans_SHAR_irr(DisasContext * ctx,arg_SHAR_irr * a)1392 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1393 {
1394 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1395 return true;
1396 }
1397
1398 /* shar rs, rd */
trans_SHAR_rr(DisasContext * ctx,arg_SHAR_rr * a)1399 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1400 {
1401 shiftr_reg(a->rd, a->rs, 1);
1402 return true;
1403 }
1404
1405 /* shlr #imm:5, rd */
1406 /* shlr #imm:5, rs2, rd */
trans_SHLR_irr(DisasContext * ctx,arg_SHLR_irr * a)1407 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1408 {
1409 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1410 return true;
1411 }
1412
1413 /* shlr rs, rd */
trans_SHLR_rr(DisasContext * ctx,arg_SHLR_rr * a)1414 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1415 {
1416 shiftr_reg(a->rd, a->rs, 0);
1417 return true;
1418 }
1419
1420 /* rolc rd */
trans_ROLC(DisasContext * ctx,arg_ROLC * a)1421 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1422 {
1423 TCGv tmp;
1424 tmp = tcg_temp_new();
1425 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1426 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1427 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1428 tcg_gen_mov_i32(cpu_psw_c, tmp);
1429 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1430 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1431 return true;
1432 }
1433
1434 /* rorc rd */
trans_RORC(DisasContext * ctx,arg_RORC * a)1435 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1436 {
1437 TCGv tmp;
1438 tmp = tcg_temp_new();
1439 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1440 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1441 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1442 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1443 tcg_gen_mov_i32(cpu_psw_c, tmp);
1444 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1445 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1446 return true;
1447 }
1448
1449 enum {ROTR = 0, ROTL = 1};
1450 enum {ROT_IMM = 0, ROT_REG = 1};
rx_rot(int ir,int dir,int rd,int src)1451 static inline void rx_rot(int ir, int dir, int rd, int src)
1452 {
1453 switch (dir) {
1454 case ROTL:
1455 if (ir == ROT_IMM) {
1456 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1457 } else {
1458 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1459 }
1460 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1461 break;
1462 case ROTR:
1463 if (ir == ROT_IMM) {
1464 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1465 } else {
1466 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1467 }
1468 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1469 break;
1470 }
1471 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1472 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1473 }
1474
1475 /* rotl #imm, rd */
trans_ROTL_ir(DisasContext * ctx,arg_ROTL_ir * a)1476 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1477 {
1478 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1479 return true;
1480 }
1481
1482 /* rotl rs, rd */
trans_ROTL_rr(DisasContext * ctx,arg_ROTL_rr * a)1483 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1484 {
1485 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1486 return true;
1487 }
1488
1489 /* rotr #imm, rd */
trans_ROTR_ir(DisasContext * ctx,arg_ROTR_ir * a)1490 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1491 {
1492 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1493 return true;
1494 }
1495
1496 /* rotr rs, rd */
trans_ROTR_rr(DisasContext * ctx,arg_ROTR_rr * a)1497 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1498 {
1499 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1500 return true;
1501 }
1502
1503 /* revl rs, rd */
trans_REVL(DisasContext * ctx,arg_REVL * a)1504 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1505 {
1506 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1507 return true;
1508 }
1509
1510 /* revw rs, rd */
trans_REVW(DisasContext * ctx,arg_REVW * a)1511 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1512 {
1513 TCGv tmp;
1514 tmp = tcg_temp_new();
1515 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1516 tcg_gen_shli_i32(tmp, tmp, 8);
1517 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1518 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1519 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1520 return true;
1521 }
1522
1523 /* conditional branch helper */
rx_bcnd_main(DisasContext * ctx,int cd,int dst)1524 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1525 {
1526 DisasCompare dc;
1527 TCGLabel *t, *done;
1528
1529 switch (cd) {
1530 case 0 ... 13:
1531 dc.temp = tcg_temp_new();
1532 psw_cond(&dc, cd);
1533 t = gen_new_label();
1534 done = gen_new_label();
1535 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1536 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1537 tcg_gen_br(done);
1538 gen_set_label(t);
1539 gen_goto_tb(ctx, 1, ctx->pc + dst);
1540 gen_set_label(done);
1541 break;
1542 case 14:
1543 /* always true case */
1544 gen_goto_tb(ctx, 0, ctx->pc + dst);
1545 break;
1546 case 15:
1547 /* always false case */
1548 /* Nothing do */
1549 break;
1550 }
1551 }
1552
1553 /* beq dsp:3 / bne dsp:3 */
1554 /* beq dsp:8 / bne dsp:8 */
1555 /* bc dsp:8 / bnc dsp:8 */
1556 /* bgtu dsp:8 / bleu dsp:8 */
1557 /* bpz dsp:8 / bn dsp:8 */
1558 /* bge dsp:8 / blt dsp:8 */
1559 /* bgt dsp:8 / ble dsp:8 */
1560 /* bo dsp:8 / bno dsp:8 */
1561 /* beq dsp:16 / bne dsp:16 */
trans_BCnd(DisasContext * ctx,arg_BCnd * a)1562 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1563 {
1564 rx_bcnd_main(ctx, a->cd, a->dsp);
1565 return true;
1566 }
1567
1568 /* bra dsp:3 */
1569 /* bra dsp:8 */
1570 /* bra dsp:16 */
1571 /* bra dsp:24 */
trans_BRA(DisasContext * ctx,arg_BRA * a)1572 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1573 {
1574 rx_bcnd_main(ctx, 14, a->dsp);
1575 return true;
1576 }
1577
1578 /* bra rs */
trans_BRA_l(DisasContext * ctx,arg_BRA_l * a)1579 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1580 {
1581 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1582 ctx->base.is_jmp = DISAS_JUMP;
1583 return true;
1584 }
1585
rx_save_pc(DisasContext * ctx)1586 static inline void rx_save_pc(DisasContext *ctx)
1587 {
1588 TCGv pc = tcg_constant_i32(ctx->base.pc_next);
1589 push(pc);
1590 }
1591
1592 /* jmp rs */
trans_JMP(DisasContext * ctx,arg_JMP * a)1593 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1594 {
1595 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1596 ctx->base.is_jmp = DISAS_JUMP;
1597 return true;
1598 }
1599
1600 /* jsr rs */
trans_JSR(DisasContext * ctx,arg_JSR * a)1601 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1602 {
1603 rx_save_pc(ctx);
1604 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1605 ctx->base.is_jmp = DISAS_JUMP;
1606 return true;
1607 }
1608
1609 /* bsr dsp:16 */
1610 /* bsr dsp:24 */
trans_BSR(DisasContext * ctx,arg_BSR * a)1611 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1612 {
1613 rx_save_pc(ctx);
1614 rx_bcnd_main(ctx, 14, a->dsp);
1615 return true;
1616 }
1617
1618 /* bsr rs */
trans_BSR_l(DisasContext * ctx,arg_BSR_l * a)1619 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1620 {
1621 rx_save_pc(ctx);
1622 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1623 ctx->base.is_jmp = DISAS_JUMP;
1624 return true;
1625 }
1626
1627 /* rts */
trans_RTS(DisasContext * ctx,arg_RTS * a)1628 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1629 {
1630 pop(cpu_pc);
1631 ctx->base.is_jmp = DISAS_JUMP;
1632 return true;
1633 }
1634
1635 /* nop */
trans_NOP(DisasContext * ctx,arg_NOP * a)1636 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1637 {
1638 return true;
1639 }
1640
1641 /* scmpu */
trans_SCMPU(DisasContext * ctx,arg_SCMPU * a)1642 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1643 {
1644 gen_helper_scmpu(tcg_env);
1645 return true;
1646 }
1647
1648 /* smovu */
trans_SMOVU(DisasContext * ctx,arg_SMOVU * a)1649 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1650 {
1651 gen_helper_smovu(tcg_env);
1652 return true;
1653 }
1654
1655 /* smovf */
trans_SMOVF(DisasContext * ctx,arg_SMOVF * a)1656 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1657 {
1658 gen_helper_smovf(tcg_env);
1659 return true;
1660 }
1661
1662 /* smovb */
trans_SMOVB(DisasContext * ctx,arg_SMOVB * a)1663 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1664 {
1665 gen_helper_smovb(tcg_env);
1666 return true;
1667 }
1668
1669 #define STRING(op) \
1670 do { \
1671 TCGv size = tcg_constant_i32(a->sz); \
1672 gen_helper_##op(tcg_env, size); \
1673 } while (0)
1674
1675 /* suntile.<bwl> */
trans_SUNTIL(DisasContext * ctx,arg_SUNTIL * a)1676 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1677 {
1678 STRING(suntil);
1679 return true;
1680 }
1681
1682 /* swhile.<bwl> */
trans_SWHILE(DisasContext * ctx,arg_SWHILE * a)1683 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1684 {
1685 STRING(swhile);
1686 return true;
1687 }
1688 /* sstr.<bwl> */
trans_SSTR(DisasContext * ctx,arg_SSTR * a)1689 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1690 {
1691 STRING(sstr);
1692 return true;
1693 }
1694
1695 /* rmpa.<bwl> */
trans_RMPA(DisasContext * ctx,arg_RMPA * a)1696 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1697 {
1698 STRING(rmpa);
1699 return true;
1700 }
1701
rx_mul64hi(TCGv_i64 ret,int rs,int rs2)1702 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1703 {
1704 TCGv_i64 tmp0, tmp1;
1705 tmp0 = tcg_temp_new_i64();
1706 tmp1 = tcg_temp_new_i64();
1707 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1708 tcg_gen_sari_i64(tmp0, tmp0, 16);
1709 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1710 tcg_gen_sari_i64(tmp1, tmp1, 16);
1711 tcg_gen_mul_i64(ret, tmp0, tmp1);
1712 tcg_gen_shli_i64(ret, ret, 16);
1713 }
1714
rx_mul64lo(TCGv_i64 ret,int rs,int rs2)1715 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1716 {
1717 TCGv_i64 tmp0, tmp1;
1718 tmp0 = tcg_temp_new_i64();
1719 tmp1 = tcg_temp_new_i64();
1720 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1721 tcg_gen_ext16s_i64(tmp0, tmp0);
1722 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1723 tcg_gen_ext16s_i64(tmp1, tmp1);
1724 tcg_gen_mul_i64(ret, tmp0, tmp1);
1725 tcg_gen_shli_i64(ret, ret, 16);
1726 }
1727
1728 /* mulhi rs,rs2 */
trans_MULHI(DisasContext * ctx,arg_MULHI * a)1729 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1730 {
1731 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1732 return true;
1733 }
1734
1735 /* mullo rs,rs2 */
trans_MULLO(DisasContext * ctx,arg_MULLO * a)1736 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1737 {
1738 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1739 return true;
1740 }
1741
1742 /* machi rs,rs2 */
trans_MACHI(DisasContext * ctx,arg_MACHI * a)1743 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1744 {
1745 TCGv_i64 tmp;
1746 tmp = tcg_temp_new_i64();
1747 rx_mul64hi(tmp, a->rs, a->rs2);
1748 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1749 return true;
1750 }
1751
1752 /* maclo rs,rs2 */
trans_MACLO(DisasContext * ctx,arg_MACLO * a)1753 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1754 {
1755 TCGv_i64 tmp;
1756 tmp = tcg_temp_new_i64();
1757 rx_mul64lo(tmp, a->rs, a->rs2);
1758 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1759 return true;
1760 }
1761
1762 /* mvfachi rd */
trans_MVFACHI(DisasContext * ctx,arg_MVFACHI * a)1763 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1764 {
1765 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1766 return true;
1767 }
1768
1769 /* mvfacmi rd */
trans_MVFACMI(DisasContext * ctx,arg_MVFACMI * a)1770 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1771 {
1772 TCGv_i64 rd64;
1773 rd64 = tcg_temp_new_i64();
1774 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1775 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1776 return true;
1777 }
1778
1779 /* mvtachi rs */
trans_MVTACHI(DisasContext * ctx,arg_MVTACHI * a)1780 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1781 {
1782 TCGv_i64 rs64;
1783 rs64 = tcg_temp_new_i64();
1784 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1785 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1786 return true;
1787 }
1788
1789 /* mvtaclo rs */
trans_MVTACLO(DisasContext * ctx,arg_MVTACLO * a)1790 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1791 {
1792 TCGv_i64 rs64;
1793 rs64 = tcg_temp_new_i64();
1794 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1795 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1796 return true;
1797 }
1798
1799 /* racw #imm */
trans_RACW(DisasContext * ctx,arg_RACW * a)1800 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1801 {
1802 TCGv imm = tcg_constant_i32(a->imm + 1);
1803 gen_helper_racw(tcg_env, imm);
1804 return true;
1805 }
1806
1807 /* sat rd */
trans_SAT(DisasContext * ctx,arg_SAT * a)1808 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1809 {
1810 TCGv tmp, z;
1811 tmp = tcg_temp_new();
1812 z = tcg_constant_i32(0);
1813 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1814 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1815 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1816 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1817 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1818 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1819 return true;
1820 }
1821
1822 /* satr */
trans_SATR(DisasContext * ctx,arg_SATR * a)1823 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1824 {
1825 gen_helper_satr(tcg_env);
1826 return true;
1827 }
1828
1829 #define cat3(a, b, c) a##b##c
1830 #define FOP(name, op) \
1831 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1832 cat3(arg_, name, _ir) * a) \
1833 { \
1834 TCGv imm = tcg_constant_i32(li(ctx, 0)); \
1835 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1836 cpu_regs[a->rd], imm); \
1837 return true; \
1838 } \
1839 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1840 cat3(arg_, name, _mr) * a) \
1841 { \
1842 TCGv val, mem; \
1843 mem = tcg_temp_new(); \
1844 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1845 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1846 cpu_regs[a->rd], val); \
1847 return true; \
1848 }
1849
1850 #define FCONVOP(name, op) \
1851 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1852 { \
1853 TCGv val, mem; \
1854 mem = tcg_temp_new(); \
1855 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1856 gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
1857 return true; \
1858 }
1859
FOP(FADD,fadd)1860 FOP(FADD, fadd)
1861 FOP(FSUB, fsub)
1862 FOP(FMUL, fmul)
1863 FOP(FDIV, fdiv)
1864
1865 /* fcmp #imm, rd */
1866 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1867 {
1868 TCGv imm = tcg_constant_i32(li(ctx, 0));
1869 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
1870 return true;
1871 }
1872
1873 /* fcmp dsp[rs], rd */
1874 /* fcmp rs, rd */
trans_FCMP_mr(DisasContext * ctx,arg_FCMP_mr * a)1875 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1876 {
1877 TCGv val, mem;
1878 mem = tcg_temp_new();
1879 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1880 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
1881 return true;
1882 }
1883
FCONVOP(FTOI,ftoi)1884 FCONVOP(FTOI, ftoi)
1885 FCONVOP(ROUND, round)
1886
1887 /* itof rs, rd */
1888 /* itof dsp[rs], rd */
1889 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1890 {
1891 TCGv val, mem;
1892 mem = tcg_temp_new();
1893 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1894 gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
1895 return true;
1896 }
1897
rx_bsetm(TCGv mem,TCGv mask)1898 static void rx_bsetm(TCGv mem, TCGv mask)
1899 {
1900 TCGv val;
1901 val = tcg_temp_new();
1902 rx_gen_ld(MO_8, val, mem);
1903 tcg_gen_or_i32(val, val, mask);
1904 rx_gen_st(MO_8, val, mem);
1905 }
1906
rx_bclrm(TCGv mem,TCGv mask)1907 static void rx_bclrm(TCGv mem, TCGv mask)
1908 {
1909 TCGv val;
1910 val = tcg_temp_new();
1911 rx_gen_ld(MO_8, val, mem);
1912 tcg_gen_andc_i32(val, val, mask);
1913 rx_gen_st(MO_8, val, mem);
1914 }
1915
rx_btstm(TCGv mem,TCGv mask)1916 static void rx_btstm(TCGv mem, TCGv mask)
1917 {
1918 TCGv val;
1919 val = tcg_temp_new();
1920 rx_gen_ld(MO_8, val, mem);
1921 tcg_gen_and_i32(val, val, mask);
1922 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
1923 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1924 }
1925
rx_bnotm(TCGv mem,TCGv mask)1926 static void rx_bnotm(TCGv mem, TCGv mask)
1927 {
1928 TCGv val;
1929 val = tcg_temp_new();
1930 rx_gen_ld(MO_8, val, mem);
1931 tcg_gen_xor_i32(val, val, mask);
1932 rx_gen_st(MO_8, val, mem);
1933 }
1934
rx_bsetr(TCGv reg,TCGv mask)1935 static void rx_bsetr(TCGv reg, TCGv mask)
1936 {
1937 tcg_gen_or_i32(reg, reg, mask);
1938 }
1939
rx_bclrr(TCGv reg,TCGv mask)1940 static void rx_bclrr(TCGv reg, TCGv mask)
1941 {
1942 tcg_gen_andc_i32(reg, reg, mask);
1943 }
1944
rx_btstr(TCGv reg,TCGv mask)1945 static inline void rx_btstr(TCGv reg, TCGv mask)
1946 {
1947 TCGv t0;
1948 t0 = tcg_temp_new();
1949 tcg_gen_and_i32(t0, reg, mask);
1950 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
1951 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1952 }
1953
rx_bnotr(TCGv reg,TCGv mask)1954 static inline void rx_bnotr(TCGv reg, TCGv mask)
1955 {
1956 tcg_gen_xor_i32(reg, reg, mask);
1957 }
1958
1959 #define BITOP(name, op) \
1960 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
1961 cat3(arg_, name, _im) * a) \
1962 { \
1963 TCGv mask, mem, addr; \
1964 mem = tcg_temp_new(); \
1965 mask = tcg_constant_i32(1 << a->imm); \
1966 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1967 cat3(rx_, op, m)(addr, mask); \
1968 return true; \
1969 } \
1970 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1971 cat3(arg_, name, _ir) * a) \
1972 { \
1973 TCGv mask; \
1974 mask = tcg_constant_i32(1 << a->imm); \
1975 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1976 return true; \
1977 } \
1978 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
1979 cat3(arg_, name, _rr) * a) \
1980 { \
1981 TCGv mask, b; \
1982 mask = tcg_temp_new(); \
1983 b = tcg_temp_new(); \
1984 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
1985 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1986 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1987 return true; \
1988 } \
1989 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
1990 cat3(arg_, name, _rm) * a) \
1991 { \
1992 TCGv mask, mem, addr, b; \
1993 mask = tcg_temp_new(); \
1994 b = tcg_temp_new(); \
1995 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
1996 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1997 mem = tcg_temp_new(); \
1998 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1999 cat3(rx_, op, m)(addr, mask); \
2000 return true; \
2001 }
2002
BITOP(BSET,bset)2003 BITOP(BSET, bset)
2004 BITOP(BCLR, bclr)
2005 BITOP(BTST, btst)
2006 BITOP(BNOT, bnot)
2007
2008 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2009 {
2010 TCGv bit;
2011 DisasCompare dc;
2012 dc.temp = tcg_temp_new();
2013 bit = tcg_temp_new();
2014 psw_cond(&dc, cond);
2015 tcg_gen_andi_i32(val, val, ~(1 << pos));
2016 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2017 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2018 }
2019
2020 /* bmcnd #imm, dsp[rd] */
trans_BMCnd_im(DisasContext * ctx,arg_BMCnd_im * a)2021 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2022 {
2023 TCGv val, mem, addr;
2024 val = tcg_temp_new();
2025 mem = tcg_temp_new();
2026 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2027 rx_gen_ld(MO_8, val, addr);
2028 bmcnd_op(val, a->cd, a->imm);
2029 rx_gen_st(MO_8, val, addr);
2030 return true;
2031 }
2032
2033 /* bmcond #imm, rd */
trans_BMCnd_ir(DisasContext * ctx,arg_BMCnd_ir * a)2034 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2035 {
2036 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2037 return true;
2038 }
2039
2040 enum {
2041 PSW_C = 0,
2042 PSW_Z = 1,
2043 PSW_S = 2,
2044 PSW_O = 3,
2045 PSW_I = 8,
2046 PSW_U = 9,
2047 };
2048
clrsetpsw(DisasContext * ctx,int cb,int val)2049 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2050 {
2051 if (cb < 8) {
2052 switch (cb) {
2053 case PSW_C:
2054 tcg_gen_movi_i32(cpu_psw_c, val);
2055 break;
2056 case PSW_Z:
2057 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2058 break;
2059 case PSW_S:
2060 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2061 break;
2062 case PSW_O:
2063 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2064 break;
2065 default:
2066 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2067 break;
2068 }
2069 } else if (is_privileged(ctx, 0)) {
2070 switch (cb) {
2071 case PSW_I:
2072 tcg_gen_movi_i32(cpu_psw_i, val);
2073 ctx->base.is_jmp = DISAS_UPDATE;
2074 break;
2075 case PSW_U:
2076 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) {
2077 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val);
2078 tcg_gen_movi_i32(cpu_psw_u, val);
2079 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp);
2080 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp);
2081 }
2082 break;
2083 default:
2084 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2085 break;
2086 }
2087 }
2088 }
2089
2090 /* clrpsw psw */
trans_CLRPSW(DisasContext * ctx,arg_CLRPSW * a)2091 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2092 {
2093 clrsetpsw(ctx, a->cb, 0);
2094 return true;
2095 }
2096
2097 /* setpsw psw */
trans_SETPSW(DisasContext * ctx,arg_SETPSW * a)2098 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2099 {
2100 clrsetpsw(ctx, a->cb, 1);
2101 return true;
2102 }
2103
2104 /* mvtipl #imm */
trans_MVTIPL(DisasContext * ctx,arg_MVTIPL * a)2105 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2106 {
2107 if (is_privileged(ctx, 1)) {
2108 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2109 ctx->base.is_jmp = DISAS_UPDATE;
2110 }
2111 return true;
2112 }
2113
2114 /* mvtc #imm, rd */
trans_MVTC_i(DisasContext * ctx,arg_MVTC_i * a)2115 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2116 {
2117 TCGv imm;
2118
2119 imm = tcg_constant_i32(a->imm);
2120 move_to_cr(ctx, imm, a->cr);
2121 return true;
2122 }
2123
2124 /* mvtc rs, rd */
trans_MVTC_r(DisasContext * ctx,arg_MVTC_r * a)2125 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2126 {
2127 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2128 return true;
2129 }
2130
2131 /* mvfc rs, rd */
trans_MVFC(DisasContext * ctx,arg_MVFC * a)2132 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2133 {
2134 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc);
2135 return true;
2136 }
2137
2138 /* rtfi */
trans_RTFI(DisasContext * ctx,arg_RTFI * a)2139 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2140 {
2141 TCGv psw;
2142 if (is_privileged(ctx, 1)) {
2143 psw = tcg_temp_new();
2144 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2145 tcg_gen_mov_i32(psw, cpu_bpsw);
2146 gen_helper_set_psw_rte(tcg_env, psw);
2147 ctx->base.is_jmp = DISAS_EXIT;
2148 }
2149 return true;
2150 }
2151
2152 /* rte */
trans_RTE(DisasContext * ctx,arg_RTE * a)2153 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2154 {
2155 TCGv psw;
2156 if (is_privileged(ctx, 1)) {
2157 psw = tcg_temp_new();
2158 pop(cpu_pc);
2159 pop(psw);
2160 gen_helper_set_psw_rte(tcg_env, psw);
2161 ctx->base.is_jmp = DISAS_EXIT;
2162 }
2163 return true;
2164 }
2165
2166 /* brk */
trans_BRK(DisasContext * ctx,arg_BRK * a)2167 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2168 {
2169 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2170 gen_helper_rxbrk(tcg_env);
2171 ctx->base.is_jmp = DISAS_NORETURN;
2172 return true;
2173 }
2174
2175 /* int #imm */
trans_INT(DisasContext * ctx,arg_INT * a)2176 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2177 {
2178 TCGv vec;
2179
2180 tcg_debug_assert(a->imm < 0x100);
2181 vec = tcg_constant_i32(a->imm);
2182 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2183 gen_helper_rxint(tcg_env, vec);
2184 ctx->base.is_jmp = DISAS_NORETURN;
2185 return true;
2186 }
2187
2188 /* wait */
trans_WAIT(DisasContext * ctx,arg_WAIT * a)2189 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2190 {
2191 if (is_privileged(ctx, 1)) {
2192 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2193 gen_helper_wait(tcg_env);
2194 }
2195 return true;
2196 }
2197
rx_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)2198 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2199 {
2200 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2201 ctx->env = cpu_env(cs);
2202 ctx->tb_flags = ctx->base.tb->flags;
2203 }
2204
rx_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)2205 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2206 {
2207 }
2208
rx_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)2209 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2210 {
2211 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2212
2213 tcg_gen_insn_start(ctx->base.pc_next);
2214 }
2215
rx_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)2216 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2217 {
2218 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2219 uint32_t insn;
2220
2221 ctx->pc = ctx->base.pc_next;
2222 insn = decode_load(ctx);
2223 if (!decode(ctx, insn)) {
2224 gen_helper_raise_illegal_instruction(tcg_env);
2225 }
2226 }
2227
rx_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)2228 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2229 {
2230 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2231
2232 switch (ctx->base.is_jmp) {
2233 case DISAS_NEXT:
2234 case DISAS_TOO_MANY:
2235 gen_goto_tb(ctx, 0, dcbase->pc_next);
2236 break;
2237 case DISAS_JUMP:
2238 tcg_gen_lookup_and_goto_ptr();
2239 break;
2240 case DISAS_UPDATE:
2241 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2242 /* fall through */
2243 case DISAS_EXIT:
2244 tcg_gen_exit_tb(NULL, 0);
2245 break;
2246 case DISAS_NORETURN:
2247 break;
2248 default:
2249 g_assert_not_reached();
2250 }
2251 }
2252
2253 static const TranslatorOps rx_tr_ops = {
2254 .init_disas_context = rx_tr_init_disas_context,
2255 .tb_start = rx_tr_tb_start,
2256 .insn_start = rx_tr_insn_start,
2257 .translate_insn = rx_tr_translate_insn,
2258 .tb_stop = rx_tr_tb_stop,
2259 };
2260
rx_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)2261 void rx_translate_code(CPUState *cs, TranslationBlock *tb,
2262 int *max_insns, vaddr pc, void *host_pc)
2263 {
2264 DisasContext dc;
2265
2266 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
2267 }
2268
2269 #define ALLOC_REGISTER(sym, name) \
2270 cpu_##sym = tcg_global_mem_new_i32(tcg_env, \
2271 offsetof(CPURXState, sym), name)
2272
rx_translate_init(void)2273 void rx_translate_init(void)
2274 {
2275 static const char * const regnames[NUM_REGS] = {
2276 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2277 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2278 };
2279 int i;
2280
2281 for (i = 0; i < NUM_REGS; i++) {
2282 cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
2283 offsetof(CPURXState, regs[i]),
2284 regnames[i]);
2285 }
2286 ALLOC_REGISTER(pc, "PC");
2287 ALLOC_REGISTER(psw_o, "PSW(O)");
2288 ALLOC_REGISTER(psw_s, "PSW(S)");
2289 ALLOC_REGISTER(psw_z, "PSW(Z)");
2290 ALLOC_REGISTER(psw_c, "PSW(C)");
2291 ALLOC_REGISTER(psw_u, "PSW(U)");
2292 ALLOC_REGISTER(psw_i, "PSW(I)");
2293 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2294 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2295 ALLOC_REGISTER(usp, "USP");
2296 ALLOC_REGISTER(fpsw, "FPSW");
2297 ALLOC_REGISTER(bpsw, "BPSW");
2298 ALLOC_REGISTER(bpc, "BPC");
2299 ALLOC_REGISTER(isp, "ISP");
2300 ALLOC_REGISTER(fintv, "FINTV");
2301 ALLOC_REGISTER(intb, "INTB");
2302 cpu_acc = tcg_global_mem_new_i64(tcg_env,
2303 offsetof(CPURXState, acc), "ACC");
2304 }
2305