xref: /openbmc/qemu/target/sh4/translate.c (revision 3df4c288)
1 /*
2  *  SH4 translation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/log.h"
29 #include "qemu/qemu-print.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 typedef struct DisasContext {
37     DisasContextBase base;
38 
39     uint32_t tbflags;  /* should stay unmodified during the TB translation */
40     uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
41     int memidx;
42     int gbank;
43     int fbank;
44     uint32_t delayed_pc;
45     uint32_t features;
46 
47     uint16_t opcode;
48 
49     bool has_movcal;
50 } DisasContext;
51 
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
54 #define UNALIGN(C)   (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
55 #else
56 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
57 #define UNALIGN(C)   0
58 #endif
59 
60 /* Target-specific values for ctx->base.is_jmp.  */
61 /* We want to exit back to the cpu loop for some reason.
62    Usually this is to recognize interrupts immediately.  */
63 #define DISAS_STOP    DISAS_TARGET_0
64 
65 /* global register indexes */
66 static TCGv cpu_gregs[32];
67 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
71 static TCGv cpu_lock_addr, cpu_lock_value;
72 static TCGv cpu_fregs[32];
73 
74 /* internal register indexes */
75 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
76 
77 void sh4_translate_init(void)
78 {
79     int i;
80     static const char * const gregnames[24] = {
81         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86     };
87     static const char * const fregnames[32] = {
88          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
89          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
90          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
93          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
94          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96     };
97 
98     for (i = 0; i < 24; i++) {
99         cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
100                                               offsetof(CPUSH4State, gregs[i]),
101                                               gregnames[i]);
102     }
103     memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 
105     cpu_pc = tcg_global_mem_new_i32(tcg_env,
106                                     offsetof(CPUSH4State, pc), "PC");
107     cpu_sr = tcg_global_mem_new_i32(tcg_env,
108                                     offsetof(CPUSH4State, sr), "SR");
109     cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
110                                       offsetof(CPUSH4State, sr_m), "SR_M");
111     cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
112                                       offsetof(CPUSH4State, sr_q), "SR_Q");
113     cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
114                                       offsetof(CPUSH4State, sr_t), "SR_T");
115     cpu_ssr = tcg_global_mem_new_i32(tcg_env,
116                                      offsetof(CPUSH4State, ssr), "SSR");
117     cpu_spc = tcg_global_mem_new_i32(tcg_env,
118                                      offsetof(CPUSH4State, spc), "SPC");
119     cpu_gbr = tcg_global_mem_new_i32(tcg_env,
120                                      offsetof(CPUSH4State, gbr), "GBR");
121     cpu_vbr = tcg_global_mem_new_i32(tcg_env,
122                                      offsetof(CPUSH4State, vbr), "VBR");
123     cpu_sgr = tcg_global_mem_new_i32(tcg_env,
124                                      offsetof(CPUSH4State, sgr), "SGR");
125     cpu_dbr = tcg_global_mem_new_i32(tcg_env,
126                                      offsetof(CPUSH4State, dbr), "DBR");
127     cpu_mach = tcg_global_mem_new_i32(tcg_env,
128                                       offsetof(CPUSH4State, mach), "MACH");
129     cpu_macl = tcg_global_mem_new_i32(tcg_env,
130                                       offsetof(CPUSH4State, macl), "MACL");
131     cpu_pr = tcg_global_mem_new_i32(tcg_env,
132                                     offsetof(CPUSH4State, pr), "PR");
133     cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
134                                        offsetof(CPUSH4State, fpscr), "FPSCR");
135     cpu_fpul = tcg_global_mem_new_i32(tcg_env,
136                                       offsetof(CPUSH4State, fpul), "FPUL");
137 
138     cpu_flags = tcg_global_mem_new_i32(tcg_env,
139                                        offsetof(CPUSH4State, flags), "_flags_");
140     cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
141                                             offsetof(CPUSH4State, delayed_pc),
142                                             "_delayed_pc_");
143     cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
144                                               offsetof(CPUSH4State,
145                                                        delayed_cond),
146                                               "_delayed_cond_");
147     cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
148                                            offsetof(CPUSH4State, lock_addr),
149                                            "_lock_addr_");
150     cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
151                                             offsetof(CPUSH4State, lock_value),
152                                             "_lock_value_");
153 
154     for (i = 0; i < 32; i++)
155         cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
156                                               offsetof(CPUSH4State, fregs[i]),
157                                               fregnames[i]);
158 }
159 
160 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161 {
162     CPUSH4State *env = cpu_env(cs);
163     int i;
164 
165     qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166                  env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167     qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168                  env->spc, env->ssr, env->gbr, env->vbr);
169     qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170                  env->sgr, env->dbr, env->delayed_pc, env->fpul);
171     for (i = 0; i < 24; i += 4) {
172         qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173                      i, env->gregs[i], i + 1, env->gregs[i + 1],
174                      i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175     }
176     if (env->flags & TB_FLAG_DELAY_SLOT) {
177         qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178                      env->delayed_pc);
179     } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
180         qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181                      env->delayed_pc);
182     } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
183         qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184                      env->delayed_pc);
185     }
186 }
187 
188 static void gen_read_sr(TCGv dst)
189 {
190     TCGv t0 = tcg_temp_new();
191     tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192     tcg_gen_or_i32(dst, dst, t0);
193     tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194     tcg_gen_or_i32(dst, dst, t0);
195     tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196     tcg_gen_or_i32(dst, cpu_sr, t0);
197 }
198 
199 static void gen_write_sr(TCGv src)
200 {
201     tcg_gen_andi_i32(cpu_sr, src,
202                      ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
203     tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
204     tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
205     tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
206 }
207 
208 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
209 {
210     if (save_pc) {
211         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
212     }
213     if (ctx->delayed_pc != (uint32_t) -1) {
214         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
215     }
216     if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
217         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
218     }
219 }
220 
221 static inline bool use_exit_tb(DisasContext *ctx)
222 {
223     return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
224 }
225 
226 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
227 {
228     if (use_exit_tb(ctx)) {
229         return false;
230     }
231     return translator_use_goto_tb(&ctx->base, dest);
232 }
233 
234 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
235 {
236     if (use_goto_tb(ctx, dest)) {
237         tcg_gen_goto_tb(n);
238         tcg_gen_movi_i32(cpu_pc, dest);
239         tcg_gen_exit_tb(ctx->base.tb, n);
240     } else {
241         tcg_gen_movi_i32(cpu_pc, dest);
242         if (use_exit_tb(ctx)) {
243             tcg_gen_exit_tb(NULL, 0);
244         } else {
245             tcg_gen_lookup_and_goto_ptr();
246         }
247     }
248     ctx->base.is_jmp = DISAS_NORETURN;
249 }
250 
251 static void gen_jump(DisasContext * ctx)
252 {
253     if (ctx->delayed_pc == -1) {
254         /* Target is not statically known, it comes necessarily from a
255            delayed jump as immediate jump are conditinal jumps */
256         tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
257         tcg_gen_discard_i32(cpu_delayed_pc);
258         if (use_exit_tb(ctx)) {
259             tcg_gen_exit_tb(NULL, 0);
260         } else {
261             tcg_gen_lookup_and_goto_ptr();
262         }
263         ctx->base.is_jmp = DISAS_NORETURN;
264     } else {
265         gen_goto_tb(ctx, 0, ctx->delayed_pc);
266     }
267 }
268 
269 /* Immediate conditional jump (bt or bf) */
270 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
271                                  bool jump_if_true)
272 {
273     TCGLabel *l1 = gen_new_label();
274     TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
275 
276     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
277         /* When in an exclusive region, we must continue to the end.
278            Therefore, exit the region on a taken branch, but otherwise
279            fall through to the next instruction.  */
280         tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
281         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
282         /* Note that this won't actually use a goto_tb opcode because we
283            disallow it in use_goto_tb, but it handles exit + singlestep.  */
284         gen_goto_tb(ctx, 0, dest);
285         gen_set_label(l1);
286         ctx->base.is_jmp = DISAS_NEXT;
287         return;
288     }
289 
290     gen_save_cpu_state(ctx, false);
291     tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292     gen_goto_tb(ctx, 0, dest);
293     gen_set_label(l1);
294     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
295     ctx->base.is_jmp = DISAS_NORETURN;
296 }
297 
298 /* Delayed conditional jump (bt or bf) */
299 static void gen_delayed_conditional_jump(DisasContext * ctx)
300 {
301     TCGLabel *l1 = gen_new_label();
302     TCGv ds = tcg_temp_new();
303 
304     tcg_gen_mov_i32(ds, cpu_delayed_cond);
305     tcg_gen_discard_i32(cpu_delayed_cond);
306 
307     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
308         /* When in an exclusive region, we must continue to the end.
309            Therefore, exit the region on a taken branch, but otherwise
310            fall through to the next instruction.  */
311         tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
312 
313         /* Leave the gUSA region.  */
314         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
315         gen_jump(ctx);
316 
317         gen_set_label(l1);
318         ctx->base.is_jmp = DISAS_NEXT;
319         return;
320     }
321 
322     tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
323     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
324     gen_set_label(l1);
325     gen_jump(ctx);
326 }
327 
328 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
329 {
330     /* We have already signaled illegal instruction for odd Dr.  */
331     tcg_debug_assert((reg & 1) == 0);
332     reg ^= ctx->fbank;
333     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
334 }
335 
336 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
337 {
338     /* We have already signaled illegal instruction for odd Dr.  */
339     tcg_debug_assert((reg & 1) == 0);
340     reg ^= ctx->fbank;
341     tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
342 }
343 
344 #define B3_0 (ctx->opcode & 0xf)
345 #define B6_4 ((ctx->opcode >> 4) & 0x7)
346 #define B7_4 ((ctx->opcode >> 4) & 0xf)
347 #define B7_0 (ctx->opcode & 0xff)
348 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
349 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
350   (ctx->opcode & 0xfff))
351 #define B11_8 ((ctx->opcode >> 8) & 0xf)
352 #define B15_12 ((ctx->opcode >> 12) & 0xf)
353 
354 #define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
355 #define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
356 #define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
357 
358 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
359 
360 #define CHECK_NOT_DELAY_SLOT \
361     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {  \
362         goto do_illegal_slot;                       \
363     }
364 
365 #define CHECK_PRIVILEGED \
366     if (IS_USER(ctx)) {                     \
367         goto do_illegal;                    \
368     }
369 
370 #define CHECK_FPU_ENABLED \
371     if (ctx->tbflags & (1u << SR_FD)) {     \
372         goto do_fpu_disabled;               \
373     }
374 
375 #define CHECK_FPSCR_PR_0 \
376     if (ctx->tbflags & FPSCR_PR) {          \
377         goto do_illegal;                    \
378     }
379 
380 #define CHECK_FPSCR_PR_1 \
381     if (!(ctx->tbflags & FPSCR_PR)) {       \
382         goto do_illegal;                    \
383     }
384 
385 #define CHECK_SH4A \
386     if (!(ctx->features & SH_FEATURE_SH4A)) { \
387         goto do_illegal;                      \
388     }
389 
390 static void _decode_opc(DisasContext * ctx)
391 {
392     /* This code tries to make movcal emulation sufficiently
393        accurate for Linux purposes.  This instruction writes
394        memory, and prior to that, always allocates a cache line.
395        It is used in two contexts:
396        - in memcpy, where data is copied in blocks, the first write
397        of to a block uses movca.l for performance.
398        - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
399        to flush the cache. Here, the data written by movcal.l is never
400        written to memory, and the data written is just bogus.
401 
402        To simulate this, we simulate movcal.l, we store the value to memory,
403        but we also remember the previous content. If we see ocbi, we check
404        if movcal.l for that address was done previously. If so, the write should
405        not have hit the memory, so we restore the previous content.
406        When we see an instruction that is neither movca.l
407        nor ocbi, the previous content is discarded.
408 
409        To optimize, we only try to flush stores when we're at the start of
410        TB, or if we already saw movca.l in this TB and did not flush stores
411        yet.  */
412     if (ctx->has_movcal)
413     {
414         int opcode = ctx->opcode & 0xf0ff;
415         if (opcode != 0x0093 /* ocbi */
416             && opcode != 0x00c3 /* movca.l */)
417         {
418             gen_helper_discard_movcal_backup(tcg_env);
419             ctx->has_movcal = 0;
420         }
421     }
422 
423 #if 0
424     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
425 #endif
426 
427     switch (ctx->opcode) {
428     case 0x0019: /* div0u */
429         tcg_gen_movi_i32(cpu_sr_m, 0);
430         tcg_gen_movi_i32(cpu_sr_q, 0);
431         tcg_gen_movi_i32(cpu_sr_t, 0);
432         return;
433     case 0x000b: /* rts */
434         CHECK_NOT_DELAY_SLOT
435         tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
436         ctx->envflags |= TB_FLAG_DELAY_SLOT;
437         ctx->delayed_pc = (uint32_t) - 1;
438         return;
439     case 0x0028: /* clrmac */
440         tcg_gen_movi_i32(cpu_mach, 0);
441         tcg_gen_movi_i32(cpu_macl, 0);
442         return;
443     case 0x0048: /* clrs */
444         tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
445         return;
446     case 0x0008: /* clrt */
447         tcg_gen_movi_i32(cpu_sr_t, 0);
448         return;
449     case 0x0038: /* ldtlb */
450         CHECK_PRIVILEGED
451         gen_helper_ldtlb(tcg_env);
452         return;
453     case 0x002b: /* rte */
454         CHECK_PRIVILEGED
455         CHECK_NOT_DELAY_SLOT
456         gen_write_sr(cpu_ssr);
457         tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
458         ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
459         ctx->delayed_pc = (uint32_t) - 1;
460         ctx->base.is_jmp = DISAS_STOP;
461         return;
462     case 0x0058: /* sets */
463         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
464         return;
465     case 0x0018: /* sett */
466         tcg_gen_movi_i32(cpu_sr_t, 1);
467         return;
468     case 0xfbfd: /* frchg */
469         CHECK_FPSCR_PR_0
470         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
471         ctx->base.is_jmp = DISAS_STOP;
472         return;
473     case 0xf3fd: /* fschg */
474         CHECK_FPSCR_PR_0
475         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
476         ctx->base.is_jmp = DISAS_STOP;
477         return;
478     case 0xf7fd: /* fpchg */
479         CHECK_SH4A
480         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
481         ctx->base.is_jmp = DISAS_STOP;
482         return;
483     case 0x0009: /* nop */
484         return;
485     case 0x001b: /* sleep */
486         CHECK_PRIVILEGED
487         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
488         gen_helper_sleep(tcg_env);
489         return;
490     }
491 
492     switch (ctx->opcode & 0xf000) {
493     case 0x1000: /* mov.l Rm,@(disp,Rn) */
494         {
495             TCGv addr = tcg_temp_new();
496             tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
497             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
498                                 MO_TEUL | UNALIGN(ctx));
499         }
500         return;
501     case 0x5000: /* mov.l @(disp,Rm),Rn */
502         {
503             TCGv addr = tcg_temp_new();
504             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
505             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
506                                 MO_TESL | UNALIGN(ctx));
507         }
508         return;
509     case 0xe000: /* mov #imm,Rn */
510 #ifdef CONFIG_USER_ONLY
511         /*
512          * Detect the start of a gUSA region (mov #-n, r15).
513          * If so, update envflags and end the TB.  This will allow us
514          * to see the end of the region (stored in R0) in the next TB.
515          */
516         if (B11_8 == 15 && B7_0s < 0 &&
517             (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
518             ctx->envflags =
519                 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
520             ctx->base.is_jmp = DISAS_STOP;
521         }
522 #endif
523         tcg_gen_movi_i32(REG(B11_8), B7_0s);
524         return;
525     case 0x9000: /* mov.w @(disp,PC),Rn */
526         CHECK_NOT_DELAY_SLOT
527         {
528             TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
529             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
530                                 MO_TESW | MO_ALIGN);
531         }
532         return;
533     case 0xd000: /* mov.l @(disp,PC),Rn */
534         CHECK_NOT_DELAY_SLOT
535         {
536             TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
537             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
538                                 MO_TESL | MO_ALIGN);
539         }
540         return;
541     case 0x7000: /* add #imm,Rn */
542         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
543         return;
544     case 0xa000: /* bra disp */
545         CHECK_NOT_DELAY_SLOT
546         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
547         ctx->envflags |= TB_FLAG_DELAY_SLOT;
548         return;
549     case 0xb000: /* bsr disp */
550         CHECK_NOT_DELAY_SLOT
551         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
552         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
553         ctx->envflags |= TB_FLAG_DELAY_SLOT;
554         return;
555     }
556 
557     switch (ctx->opcode & 0xf00f) {
558     case 0x6003: /* mov Rm,Rn */
559         tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
560         return;
561     case 0x2000: /* mov.b Rm,@Rn */
562         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
563         return;
564     case 0x2001: /* mov.w Rm,@Rn */
565         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
566                             MO_TEUW | UNALIGN(ctx));
567         return;
568     case 0x2002: /* mov.l Rm,@Rn */
569         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
570                             MO_TEUL | UNALIGN(ctx));
571         return;
572     case 0x6000: /* mov.b @Rm,Rn */
573         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
574         return;
575     case 0x6001: /* mov.w @Rm,Rn */
576         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
577                             MO_TESW | UNALIGN(ctx));
578         return;
579     case 0x6002: /* mov.l @Rm,Rn */
580         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
581                             MO_TESL | UNALIGN(ctx));
582         return;
583     case 0x2004: /* mov.b Rm,@-Rn */
584         {
585             TCGv addr = tcg_temp_new();
586             tcg_gen_subi_i32(addr, REG(B11_8), 1);
587             /* might cause re-execution */
588             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
589             tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
590         }
591         return;
592     case 0x2005: /* mov.w Rm,@-Rn */
593         {
594             TCGv addr = tcg_temp_new();
595             tcg_gen_subi_i32(addr, REG(B11_8), 2);
596             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
597                                 MO_TEUW | UNALIGN(ctx));
598             tcg_gen_mov_i32(REG(B11_8), addr);
599         }
600         return;
601     case 0x2006: /* mov.l Rm,@-Rn */
602         {
603             TCGv addr = tcg_temp_new();
604             tcg_gen_subi_i32(addr, REG(B11_8), 4);
605             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
606                                 MO_TEUL | UNALIGN(ctx));
607             tcg_gen_mov_i32(REG(B11_8), addr);
608         }
609         return;
610     case 0x6004: /* mov.b @Rm+,Rn */
611         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
612         if ( B11_8 != B7_4 )
613                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
614         return;
615     case 0x6005: /* mov.w @Rm+,Rn */
616         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
617                             MO_TESW | UNALIGN(ctx));
618         if ( B11_8 != B7_4 )
619                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
620         return;
621     case 0x6006: /* mov.l @Rm+,Rn */
622         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
623                             MO_TESL | UNALIGN(ctx));
624         if ( B11_8 != B7_4 )
625                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
626         return;
627     case 0x0004: /* mov.b Rm,@(R0,Rn) */
628         {
629             TCGv addr = tcg_temp_new();
630             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
631             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
632         }
633         return;
634     case 0x0005: /* mov.w Rm,@(R0,Rn) */
635         {
636             TCGv addr = tcg_temp_new();
637             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
638             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
639                                 MO_TEUW | UNALIGN(ctx));
640         }
641         return;
642     case 0x0006: /* mov.l Rm,@(R0,Rn) */
643         {
644             TCGv addr = tcg_temp_new();
645             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
646             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
647                                 MO_TEUL | UNALIGN(ctx));
648         }
649         return;
650     case 0x000c: /* mov.b @(R0,Rm),Rn */
651         {
652             TCGv addr = tcg_temp_new();
653             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
654             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
655         }
656         return;
657     case 0x000d: /* mov.w @(R0,Rm),Rn */
658         {
659             TCGv addr = tcg_temp_new();
660             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
661             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
662                                 MO_TESW | UNALIGN(ctx));
663         }
664         return;
665     case 0x000e: /* mov.l @(R0,Rm),Rn */
666         {
667             TCGv addr = tcg_temp_new();
668             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
669             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
670                                 MO_TESL | UNALIGN(ctx));
671         }
672         return;
673     case 0x6008: /* swap.b Rm,Rn */
674         {
675             TCGv low = tcg_temp_new();
676             tcg_gen_bswap16_i32(low, REG(B7_4), 0);
677             tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
678         }
679         return;
680     case 0x6009: /* swap.w Rm,Rn */
681         tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
682         return;
683     case 0x200d: /* xtrct Rm,Rn */
684         {
685             TCGv high, low;
686             high = tcg_temp_new();
687             tcg_gen_shli_i32(high, REG(B7_4), 16);
688             low = tcg_temp_new();
689             tcg_gen_shri_i32(low, REG(B11_8), 16);
690             tcg_gen_or_i32(REG(B11_8), high, low);
691         }
692         return;
693     case 0x300c: /* add Rm,Rn */
694         tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
695         return;
696     case 0x300e: /* addc Rm,Rn */
697         {
698             TCGv t0, t1;
699             t0 = tcg_constant_tl(0);
700             t1 = tcg_temp_new();
701             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
702             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
703                              REG(B11_8), t0, t1, cpu_sr_t);
704         }
705         return;
706     case 0x300f: /* addv Rm,Rn */
707         {
708             TCGv t0, t1, t2;
709             t0 = tcg_temp_new();
710             tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
711             t1 = tcg_temp_new();
712             tcg_gen_xor_i32(t1, t0, REG(B11_8));
713             t2 = tcg_temp_new();
714             tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
715             tcg_gen_andc_i32(cpu_sr_t, t1, t2);
716             tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
717             tcg_gen_mov_i32(REG(B7_4), t0);
718         }
719         return;
720     case 0x2009: /* and Rm,Rn */
721         tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
722         return;
723     case 0x3000: /* cmp/eq Rm,Rn */
724         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
725         return;
726     case 0x3003: /* cmp/ge Rm,Rn */
727         tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
728         return;
729     case 0x3007: /* cmp/gt Rm,Rn */
730         tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
731         return;
732     case 0x3006: /* cmp/hi Rm,Rn */
733         tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
734         return;
735     case 0x3002: /* cmp/hs Rm,Rn */
736         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
737         return;
738     case 0x200c: /* cmp/str Rm,Rn */
739         {
740             TCGv cmp1 = tcg_temp_new();
741             TCGv cmp2 = tcg_temp_new();
742             tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
743             tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
744             tcg_gen_andc_i32(cmp1, cmp1, cmp2);
745             tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
746             tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
747         }
748         return;
749     case 0x2007: /* div0s Rm,Rn */
750         tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
751         tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
752         tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
753         return;
754     case 0x3004: /* div1 Rm,Rn */
755         {
756             TCGv t0 = tcg_temp_new();
757             TCGv t1 = tcg_temp_new();
758             TCGv t2 = tcg_temp_new();
759             TCGv zero = tcg_constant_i32(0);
760 
761             /* shift left arg1, saving the bit being pushed out and inserting
762                T on the right */
763             tcg_gen_shri_i32(t0, REG(B11_8), 31);
764             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
765             tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
766 
767             /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
768                using 64-bit temps, we compute arg0's high part from q ^ m, so
769                that it is 0x00000000 when adding the value or 0xffffffff when
770                subtracting it. */
771             tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
772             tcg_gen_subi_i32(t1, t1, 1);
773             tcg_gen_neg_i32(t2, REG(B7_4));
774             tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
775             tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
776 
777             /* compute T and Q depending on carry */
778             tcg_gen_andi_i32(t1, t1, 1);
779             tcg_gen_xor_i32(t1, t1, t0);
780             tcg_gen_xori_i32(cpu_sr_t, t1, 1);
781             tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
782         }
783         return;
784     case 0x300d: /* dmuls.l Rm,Rn */
785         tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
786         return;
787     case 0x3005: /* dmulu.l Rm,Rn */
788         tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
789         return;
790     case 0x600e: /* exts.b Rm,Rn */
791         tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
792         return;
793     case 0x600f: /* exts.w Rm,Rn */
794         tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
795         return;
796     case 0x600c: /* extu.b Rm,Rn */
797         tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
798         return;
799     case 0x600d: /* extu.w Rm,Rn */
800         tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
801         return;
802     case 0x000f: /* mac.l @Rm+,@Rn+ */
803         {
804             TCGv arg0, arg1;
805             arg0 = tcg_temp_new();
806             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
807                                 MO_TESL | MO_ALIGN);
808             arg1 = tcg_temp_new();
809             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
810                                 MO_TESL | MO_ALIGN);
811             gen_helper_macl(tcg_env, arg0, arg1);
812             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
813             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
814         }
815         return;
816     case 0x400f: /* mac.w @Rm+,@Rn+ */
817         {
818             TCGv arg0, arg1;
819             arg0 = tcg_temp_new();
820             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
821                                 MO_TESW | MO_ALIGN);
822             arg1 = tcg_temp_new();
823             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
824                                 MO_TESW | MO_ALIGN);
825             gen_helper_macw(tcg_env, arg0, arg1);
826             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
827             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
828         }
829         return;
830     case 0x0007: /* mul.l Rm,Rn */
831         tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
832         return;
833     case 0x200f: /* muls.w Rm,Rn */
834         {
835             TCGv arg0, arg1;
836             arg0 = tcg_temp_new();
837             tcg_gen_ext16s_i32(arg0, REG(B7_4));
838             arg1 = tcg_temp_new();
839             tcg_gen_ext16s_i32(arg1, REG(B11_8));
840             tcg_gen_mul_i32(cpu_macl, arg0, arg1);
841         }
842         return;
843     case 0x200e: /* mulu.w Rm,Rn */
844         {
845             TCGv arg0, arg1;
846             arg0 = tcg_temp_new();
847             tcg_gen_ext16u_i32(arg0, REG(B7_4));
848             arg1 = tcg_temp_new();
849             tcg_gen_ext16u_i32(arg1, REG(B11_8));
850             tcg_gen_mul_i32(cpu_macl, arg0, arg1);
851         }
852         return;
853     case 0x600b: /* neg Rm,Rn */
854         tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
855         return;
856     case 0x600a: /* negc Rm,Rn */
857         {
858             TCGv t0 = tcg_constant_i32(0);
859             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
860                              REG(B7_4), t0, cpu_sr_t, t0);
861             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
862                              t0, t0, REG(B11_8), cpu_sr_t);
863             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
864         }
865         return;
866     case 0x6007: /* not Rm,Rn */
867         tcg_gen_not_i32(REG(B11_8), REG(B7_4));
868         return;
869     case 0x200b: /* or Rm,Rn */
870         tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
871         return;
872     case 0x400c: /* shad Rm,Rn */
873         {
874             TCGv t0 = tcg_temp_new();
875             TCGv t1 = tcg_temp_new();
876             TCGv t2 = tcg_temp_new();
877 
878             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
879 
880             /* positive case: shift to the left */
881             tcg_gen_shl_i32(t1, REG(B11_8), t0);
882 
883             /* negative case: shift to the right in two steps to
884                correctly handle the -32 case */
885             tcg_gen_xori_i32(t0, t0, 0x1f);
886             tcg_gen_sar_i32(t2, REG(B11_8), t0);
887             tcg_gen_sari_i32(t2, t2, 1);
888 
889             /* select between the two cases */
890             tcg_gen_movi_i32(t0, 0);
891             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
892         }
893         return;
894     case 0x400d: /* shld Rm,Rn */
895         {
896             TCGv t0 = tcg_temp_new();
897             TCGv t1 = tcg_temp_new();
898             TCGv t2 = tcg_temp_new();
899 
900             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
901 
902             /* positive case: shift to the left */
903             tcg_gen_shl_i32(t1, REG(B11_8), t0);
904 
905             /* negative case: shift to the right in two steps to
906                correctly handle the -32 case */
907             tcg_gen_xori_i32(t0, t0, 0x1f);
908             tcg_gen_shr_i32(t2, REG(B11_8), t0);
909             tcg_gen_shri_i32(t2, t2, 1);
910 
911             /* select between the two cases */
912             tcg_gen_movi_i32(t0, 0);
913             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
914         }
915         return;
916     case 0x3008: /* sub Rm,Rn */
917         tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
918         return;
919     case 0x300a: /* subc Rm,Rn */
920         {
921             TCGv t0, t1;
922             t0 = tcg_constant_tl(0);
923             t1 = tcg_temp_new();
924             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
925             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
926                              REG(B11_8), t0, t1, cpu_sr_t);
927             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
928         }
929         return;
930     case 0x300b: /* subv Rm,Rn */
931         {
932             TCGv t0, t1, t2;
933             t0 = tcg_temp_new();
934             tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
935             t1 = tcg_temp_new();
936             tcg_gen_xor_i32(t1, t0, REG(B7_4));
937             t2 = tcg_temp_new();
938             tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
939             tcg_gen_and_i32(t1, t1, t2);
940             tcg_gen_shri_i32(cpu_sr_t, t1, 31);
941             tcg_gen_mov_i32(REG(B11_8), t0);
942         }
943         return;
944     case 0x2008: /* tst Rm,Rn */
945         {
946             TCGv val = tcg_temp_new();
947             tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
948             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
949         }
950         return;
951     case 0x200a: /* xor Rm,Rn */
952         tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
953         return;
954     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
955         CHECK_FPU_ENABLED
956         if (ctx->tbflags & FPSCR_SZ) {
957             int xsrc = XHACK(B7_4);
958             int xdst = XHACK(B11_8);
959             tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
960             tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
961         } else {
962             tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
963         }
964         return;
965     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
966         CHECK_FPU_ENABLED
967         if (ctx->tbflags & FPSCR_SZ) {
968             TCGv_i64 fp = tcg_temp_new_i64();
969             gen_load_fpr64(ctx, fp, XHACK(B7_4));
970             tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
971                                 MO_TEUQ | MO_ALIGN);
972         } else {
973             tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
974                                 MO_TEUL | MO_ALIGN);
975         }
976         return;
977     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
978         CHECK_FPU_ENABLED
979         if (ctx->tbflags & FPSCR_SZ) {
980             TCGv_i64 fp = tcg_temp_new_i64();
981             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
982                                 MO_TEUQ | MO_ALIGN);
983             gen_store_fpr64(ctx, fp, XHACK(B11_8));
984         } else {
985             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
986                                 MO_TEUL | MO_ALIGN);
987         }
988         return;
989     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
990         CHECK_FPU_ENABLED
991         if (ctx->tbflags & FPSCR_SZ) {
992             TCGv_i64 fp = tcg_temp_new_i64();
993             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
994                                 MO_TEUQ | MO_ALIGN);
995             gen_store_fpr64(ctx, fp, XHACK(B11_8));
996             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
997         } else {
998             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
999                                 MO_TEUL | MO_ALIGN);
1000             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1001         }
1002         return;
1003     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1004         CHECK_FPU_ENABLED
1005         {
1006             TCGv addr = tcg_temp_new_i32();
1007             if (ctx->tbflags & FPSCR_SZ) {
1008                 TCGv_i64 fp = tcg_temp_new_i64();
1009                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1010                 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1011                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1012                                     MO_TEUQ | MO_ALIGN);
1013             } else {
1014                 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1015                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1016                                     MO_TEUL | MO_ALIGN);
1017             }
1018             tcg_gen_mov_i32(REG(B11_8), addr);
1019         }
1020         return;
1021     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1022         CHECK_FPU_ENABLED
1023         {
1024             TCGv addr = tcg_temp_new_i32();
1025             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1026             if (ctx->tbflags & FPSCR_SZ) {
1027                 TCGv_i64 fp = tcg_temp_new_i64();
1028                 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1029                                     MO_TEUQ | MO_ALIGN);
1030                 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1031             } else {
1032                 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1033                                     MO_TEUL | MO_ALIGN);
1034             }
1035         }
1036         return;
1037     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1038         CHECK_FPU_ENABLED
1039         {
1040             TCGv addr = tcg_temp_new();
1041             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1042             if (ctx->tbflags & FPSCR_SZ) {
1043                 TCGv_i64 fp = tcg_temp_new_i64();
1044                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1045                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1046                                     MO_TEUQ | MO_ALIGN);
1047             } else {
1048                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1049                                     MO_TEUL | MO_ALIGN);
1050             }
1051         }
1052         return;
1053     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1056     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1057     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1058     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1059         {
1060             CHECK_FPU_ENABLED
1061             if (ctx->tbflags & FPSCR_PR) {
1062                 TCGv_i64 fp0, fp1;
1063 
1064                 if (ctx->opcode & 0x0110) {
1065                     goto do_illegal;
1066                 }
1067                 fp0 = tcg_temp_new_i64();
1068                 fp1 = tcg_temp_new_i64();
1069                 gen_load_fpr64(ctx, fp0, B11_8);
1070                 gen_load_fpr64(ctx, fp1, B7_4);
1071                 switch (ctx->opcode & 0xf00f) {
1072                 case 0xf000: /* fadd Rm,Rn */
1073                     gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
1074                     break;
1075                 case 0xf001: /* fsub Rm,Rn */
1076                     gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
1077                     break;
1078                 case 0xf002: /* fmul Rm,Rn */
1079                     gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
1080                     break;
1081                 case 0xf003: /* fdiv Rm,Rn */
1082                     gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
1083                     break;
1084                 case 0xf004: /* fcmp/eq Rm,Rn */
1085                     gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
1086                     return;
1087                 case 0xf005: /* fcmp/gt Rm,Rn */
1088                     gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
1089                     return;
1090                 }
1091                 gen_store_fpr64(ctx, fp0, B11_8);
1092             } else {
1093                 switch (ctx->opcode & 0xf00f) {
1094                 case 0xf000: /* fadd Rm,Rn */
1095                     gen_helper_fadd_FT(FREG(B11_8), tcg_env,
1096                                        FREG(B11_8), FREG(B7_4));
1097                     break;
1098                 case 0xf001: /* fsub Rm,Rn */
1099                     gen_helper_fsub_FT(FREG(B11_8), tcg_env,
1100                                        FREG(B11_8), FREG(B7_4));
1101                     break;
1102                 case 0xf002: /* fmul Rm,Rn */
1103                     gen_helper_fmul_FT(FREG(B11_8), tcg_env,
1104                                        FREG(B11_8), FREG(B7_4));
1105                     break;
1106                 case 0xf003: /* fdiv Rm,Rn */
1107                     gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
1108                                        FREG(B11_8), FREG(B7_4));
1109                     break;
1110                 case 0xf004: /* fcmp/eq Rm,Rn */
1111                     gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
1112                                           FREG(B11_8), FREG(B7_4));
1113                     return;
1114                 case 0xf005: /* fcmp/gt Rm,Rn */
1115                     gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
1116                                           FREG(B11_8), FREG(B7_4));
1117                     return;
1118                 }
1119             }
1120         }
1121         return;
1122     case 0xf00e: /* fmac FR0,RM,Rn */
1123         CHECK_FPU_ENABLED
1124         CHECK_FPSCR_PR_0
1125         gen_helper_fmac_FT(FREG(B11_8), tcg_env,
1126                            FREG(0), FREG(B7_4), FREG(B11_8));
1127         return;
1128     }
1129 
1130     switch (ctx->opcode & 0xff00) {
1131     case 0xc900: /* and #imm,R0 */
1132         tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1133         return;
1134     case 0xcd00: /* and.b #imm,@(R0,GBR) */
1135         {
1136             TCGv addr, val;
1137             addr = tcg_temp_new();
1138             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1139             val = tcg_temp_new();
1140             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1141             tcg_gen_andi_i32(val, val, B7_0);
1142             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1143         }
1144         return;
1145     case 0x8b00: /* bf label */
1146         CHECK_NOT_DELAY_SLOT
1147         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1148         return;
1149     case 0x8f00: /* bf/s label */
1150         CHECK_NOT_DELAY_SLOT
1151         tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1152         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1153         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1154         return;
1155     case 0x8900: /* bt label */
1156         CHECK_NOT_DELAY_SLOT
1157         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1158         return;
1159     case 0x8d00: /* bt/s label */
1160         CHECK_NOT_DELAY_SLOT
1161         tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1162         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1163         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1164         return;
1165     case 0x8800: /* cmp/eq #imm,R0 */
1166         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1167         return;
1168     case 0xc400: /* mov.b @(disp,GBR),R0 */
1169         {
1170             TCGv addr = tcg_temp_new();
1171             tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1172             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1173         }
1174         return;
1175     case 0xc500: /* mov.w @(disp,GBR),R0 */
1176         {
1177             TCGv addr = tcg_temp_new();
1178             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1179             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
1180         }
1181         return;
1182     case 0xc600: /* mov.l @(disp,GBR),R0 */
1183         {
1184             TCGv addr = tcg_temp_new();
1185             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1186             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
1187         }
1188         return;
1189     case 0xc000: /* mov.b R0,@(disp,GBR) */
1190         {
1191             TCGv addr = tcg_temp_new();
1192             tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1193             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1194         }
1195         return;
1196     case 0xc100: /* mov.w R0,@(disp,GBR) */
1197         {
1198             TCGv addr = tcg_temp_new();
1199             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1200             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
1201         }
1202         return;
1203     case 0xc200: /* mov.l R0,@(disp,GBR) */
1204         {
1205             TCGv addr = tcg_temp_new();
1206             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1207             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1208         }
1209         return;
1210     case 0x8000: /* mov.b R0,@(disp,Rn) */
1211         {
1212             TCGv addr = tcg_temp_new();
1213             tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1214             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1215         }
1216         return;
1217     case 0x8100: /* mov.w R0,@(disp,Rn) */
1218         {
1219             TCGv addr = tcg_temp_new();
1220             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1221             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1222                                 MO_TEUW | UNALIGN(ctx));
1223         }
1224         return;
1225     case 0x8400: /* mov.b @(disp,Rn),R0 */
1226         {
1227             TCGv addr = tcg_temp_new();
1228             tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1229             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1230         }
1231         return;
1232     case 0x8500: /* mov.w @(disp,Rn),R0 */
1233         {
1234             TCGv addr = tcg_temp_new();
1235             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1236             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1237                                 MO_TESW | UNALIGN(ctx));
1238         }
1239         return;
1240     case 0xc700: /* mova @(disp,PC),R0 */
1241         CHECK_NOT_DELAY_SLOT
1242         tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1243                                   4 + B7_0 * 4) & ~3);
1244         return;
1245     case 0xcb00: /* or #imm,R0 */
1246         tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1247         return;
1248     case 0xcf00: /* or.b #imm,@(R0,GBR) */
1249         {
1250             TCGv addr, val;
1251             addr = tcg_temp_new();
1252             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1253             val = tcg_temp_new();
1254             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1255             tcg_gen_ori_i32(val, val, B7_0);
1256             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1257         }
1258         return;
1259     case 0xc300: /* trapa #imm */
1260         {
1261             TCGv imm;
1262             CHECK_NOT_DELAY_SLOT
1263             gen_save_cpu_state(ctx, true);
1264             imm = tcg_constant_i32(B7_0);
1265             gen_helper_trapa(tcg_env, imm);
1266             ctx->base.is_jmp = DISAS_NORETURN;
1267         }
1268         return;
1269     case 0xc800: /* tst #imm,R0 */
1270         {
1271             TCGv val = tcg_temp_new();
1272             tcg_gen_andi_i32(val, REG(0), B7_0);
1273             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1274         }
1275         return;
1276     case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1277         {
1278             TCGv val = tcg_temp_new();
1279             tcg_gen_add_i32(val, REG(0), cpu_gbr);
1280             tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1281             tcg_gen_andi_i32(val, val, B7_0);
1282             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1283         }
1284         return;
1285     case 0xca00: /* xor #imm,R0 */
1286         tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1287         return;
1288     case 0xce00: /* xor.b #imm,@(R0,GBR) */
1289         {
1290             TCGv addr, val;
1291             addr = tcg_temp_new();
1292             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1293             val = tcg_temp_new();
1294             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1295             tcg_gen_xori_i32(val, val, B7_0);
1296             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1297         }
1298         return;
1299     }
1300 
1301     switch (ctx->opcode & 0xf08f) {
1302     case 0x408e: /* ldc Rm,Rn_BANK */
1303         CHECK_PRIVILEGED
1304         tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1305         return;
1306     case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1307         CHECK_PRIVILEGED
1308         tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1309                             MO_TESL | MO_ALIGN);
1310         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1311         return;
1312     case 0x0082: /* stc Rm_BANK,Rn */
1313         CHECK_PRIVILEGED
1314         tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1315         return;
1316     case 0x4083: /* stc.l Rm_BANK,@-Rn */
1317         CHECK_PRIVILEGED
1318         {
1319             TCGv addr = tcg_temp_new();
1320             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1321             tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1322                                 MO_TEUL | MO_ALIGN);
1323             tcg_gen_mov_i32(REG(B11_8), addr);
1324         }
1325         return;
1326     }
1327 
1328     switch (ctx->opcode & 0xf0ff) {
1329     case 0x0023: /* braf Rn */
1330         CHECK_NOT_DELAY_SLOT
1331         tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1332         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1333         ctx->delayed_pc = (uint32_t) - 1;
1334         return;
1335     case 0x0003: /* bsrf Rn */
1336         CHECK_NOT_DELAY_SLOT
1337         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1338         tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1339         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1340         ctx->delayed_pc = (uint32_t) - 1;
1341         return;
1342     case 0x4015: /* cmp/pl Rn */
1343         tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1344         return;
1345     case 0x4011: /* cmp/pz Rn */
1346         tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1347         return;
1348     case 0x4010: /* dt Rn */
1349         tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1350         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1351         return;
1352     case 0x402b: /* jmp @Rn */
1353         CHECK_NOT_DELAY_SLOT
1354         tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1355         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1356         ctx->delayed_pc = (uint32_t) - 1;
1357         return;
1358     case 0x400b: /* jsr @Rn */
1359         CHECK_NOT_DELAY_SLOT
1360         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1361         tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1362         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1363         ctx->delayed_pc = (uint32_t) - 1;
1364         return;
1365     case 0x400e: /* ldc Rm,SR */
1366         CHECK_PRIVILEGED
1367         {
1368             TCGv val = tcg_temp_new();
1369             tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1370             gen_write_sr(val);
1371             ctx->base.is_jmp = DISAS_STOP;
1372         }
1373         return;
1374     case 0x4007: /* ldc.l @Rm+,SR */
1375         CHECK_PRIVILEGED
1376         {
1377             TCGv val = tcg_temp_new();
1378             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1379                                 MO_TESL | MO_ALIGN);
1380             tcg_gen_andi_i32(val, val, 0x700083f3);
1381             gen_write_sr(val);
1382             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1383             ctx->base.is_jmp = DISAS_STOP;
1384         }
1385         return;
1386     case 0x0002: /* stc SR,Rn */
1387         CHECK_PRIVILEGED
1388         gen_read_sr(REG(B11_8));
1389         return;
1390     case 0x4003: /* stc SR,@-Rn */
1391         CHECK_PRIVILEGED
1392         {
1393             TCGv addr = tcg_temp_new();
1394             TCGv val = tcg_temp_new();
1395             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1396             gen_read_sr(val);
1397             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1398             tcg_gen_mov_i32(REG(B11_8), addr);
1399         }
1400         return;
1401 #define LD(reg,ldnum,ldpnum,prechk)            \
1402   case ldnum:                                                        \
1403     prechk                                                           \
1404     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));                         \
1405     return;                                                          \
1406   case ldpnum:                                                       \
1407     prechk                                                           \
1408     tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx,          \
1409                         MO_TESL | MO_ALIGN);                         \
1410     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);                     \
1411     return;
1412 #define ST(reg,stnum,stpnum,prechk)                \
1413   case stnum:                                                        \
1414     prechk                                                           \
1415     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);                         \
1416     return;                                                          \
1417   case stpnum:                                                       \
1418     prechk                                                           \
1419     {                                                                \
1420         TCGv addr = tcg_temp_new();                                  \
1421         tcg_gen_subi_i32(addr, REG(B11_8), 4);                       \
1422         tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx,            \
1423                             MO_TEUL | MO_ALIGN);                     \
1424         tcg_gen_mov_i32(REG(B11_8), addr);                           \
1425     }                                                                \
1426     return;
1427 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)                \
1428         LD(reg,ldnum,ldpnum,prechk)                               \
1429         ST(reg,stnum,stpnum,prechk)
1430         LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1431         LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1432         LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1433         LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1434         ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1435         LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1436         LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1437         LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1438         LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1439         LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1440         LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1441     case 0x406a: /* lds Rm,FPSCR */
1442         CHECK_FPU_ENABLED
1443         gen_helper_ld_fpscr(tcg_env, REG(B11_8));
1444         ctx->base.is_jmp = DISAS_STOP;
1445         return;
1446     case 0x4066: /* lds.l @Rm+,FPSCR */
1447         CHECK_FPU_ENABLED
1448         {
1449             TCGv addr = tcg_temp_new();
1450             tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1451                                 MO_TESL | MO_ALIGN);
1452             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1453             gen_helper_ld_fpscr(tcg_env, addr);
1454             ctx->base.is_jmp = DISAS_STOP;
1455         }
1456         return;
1457     case 0x006a: /* sts FPSCR,Rn */
1458         CHECK_FPU_ENABLED
1459         tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1460         return;
1461     case 0x4062: /* sts FPSCR,@-Rn */
1462         CHECK_FPU_ENABLED
1463         {
1464             TCGv addr, val;
1465             val = tcg_temp_new();
1466             tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1467             addr = tcg_temp_new();
1468             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1469             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1470             tcg_gen_mov_i32(REG(B11_8), addr);
1471         }
1472         return;
1473     case 0x00c3: /* movca.l R0,@Rm */
1474         {
1475             TCGv val = tcg_temp_new();
1476             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1477                                 MO_TEUL | MO_ALIGN);
1478             gen_helper_movcal(tcg_env, REG(B11_8), val);
1479             tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1480                                 MO_TEUL | MO_ALIGN);
1481         }
1482         ctx->has_movcal = 1;
1483         return;
1484     case 0x40a9: /* movua.l @Rm,R0 */
1485         CHECK_SH4A
1486         /* Load non-boundary-aligned data */
1487         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1488                             MO_TEUL | MO_UNALN);
1489         return;
1490     case 0x40e9: /* movua.l @Rm+,R0 */
1491         CHECK_SH4A
1492         /* Load non-boundary-aligned data */
1493         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1494                             MO_TEUL | MO_UNALN);
1495         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1496         return;
1497     case 0x0029: /* movt Rn */
1498         tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1499         return;
1500     case 0x0073:
1501         /* MOVCO.L
1502          *     LDST -> T
1503          *     If (T == 1) R0 -> (Rn)
1504          *     0 -> LDST
1505          *
1506          * The above description doesn't work in a parallel context.
1507          * Since we currently support no smp boards, this implies user-mode.
1508          * But we can still support the official mechanism while user-mode
1509          * is single-threaded.  */
1510         CHECK_SH4A
1511         {
1512             TCGLabel *fail = gen_new_label();
1513             TCGLabel *done = gen_new_label();
1514 
1515             if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1516                 TCGv tmp;
1517 
1518                 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1519                                    cpu_lock_addr, fail);
1520                 tmp = tcg_temp_new();
1521                 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1522                                            REG(0), ctx->memidx,
1523                                            MO_TEUL | MO_ALIGN);
1524                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1525             } else {
1526                 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1527                 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1528                                     MO_TEUL | MO_ALIGN);
1529                 tcg_gen_movi_i32(cpu_sr_t, 1);
1530             }
1531             tcg_gen_br(done);
1532 
1533             gen_set_label(fail);
1534             tcg_gen_movi_i32(cpu_sr_t, 0);
1535 
1536             gen_set_label(done);
1537             tcg_gen_movi_i32(cpu_lock_addr, -1);
1538         }
1539         return;
1540     case 0x0063:
1541         /* MOVLI.L @Rm,R0
1542          *     1 -> LDST
1543          *     (Rm) -> R0
1544          *     When interrupt/exception
1545          *     occurred 0 -> LDST
1546          *
1547          * In a parallel context, we must also save the loaded value
1548          * for use with the cmpxchg that we'll use with movco.l.  */
1549         CHECK_SH4A
1550         if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1551             TCGv tmp = tcg_temp_new();
1552             tcg_gen_mov_i32(tmp, REG(B11_8));
1553             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1554                                 MO_TESL | MO_ALIGN);
1555             tcg_gen_mov_i32(cpu_lock_value, REG(0));
1556             tcg_gen_mov_i32(cpu_lock_addr, tmp);
1557         } else {
1558             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1559                                 MO_TESL | MO_ALIGN);
1560             tcg_gen_movi_i32(cpu_lock_addr, 0);
1561         }
1562         return;
1563     case 0x0093: /* ocbi @Rn */
1564         {
1565             gen_helper_ocbi(tcg_env, REG(B11_8));
1566         }
1567         return;
1568     case 0x00a3: /* ocbp @Rn */
1569     case 0x00b3: /* ocbwb @Rn */
1570         /* These instructions are supposed to do nothing in case of
1571            a cache miss. Given that we only partially emulate caches
1572            it is safe to simply ignore them. */
1573         return;
1574     case 0x0083: /* pref @Rn */
1575         return;
1576     case 0x00d3: /* prefi @Rn */
1577         CHECK_SH4A
1578         return;
1579     case 0x00e3: /* icbi @Rn */
1580         CHECK_SH4A
1581         return;
1582     case 0x00ab: /* synco */
1583         CHECK_SH4A
1584         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1585         return;
1586     case 0x4024: /* rotcl Rn */
1587         {
1588             TCGv tmp = tcg_temp_new();
1589             tcg_gen_mov_i32(tmp, cpu_sr_t);
1590             tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1591             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1592             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1593         }
1594         return;
1595     case 0x4025: /* rotcr Rn */
1596         {
1597             TCGv tmp = tcg_temp_new();
1598             tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1599             tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1600             tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1601             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1602         }
1603         return;
1604     case 0x4004: /* rotl Rn */
1605         tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1606         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1607         return;
1608     case 0x4005: /* rotr Rn */
1609         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1610         tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1611         return;
1612     case 0x4000: /* shll Rn */
1613     case 0x4020: /* shal Rn */
1614         tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1615         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1616         return;
1617     case 0x4021: /* shar Rn */
1618         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1619         tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1620         return;
1621     case 0x4001: /* shlr Rn */
1622         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1623         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1624         return;
1625     case 0x4008: /* shll2 Rn */
1626         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1627         return;
1628     case 0x4018: /* shll8 Rn */
1629         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1630         return;
1631     case 0x4028: /* shll16 Rn */
1632         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1633         return;
1634     case 0x4009: /* shlr2 Rn */
1635         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1636         return;
1637     case 0x4019: /* shlr8 Rn */
1638         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1639         return;
1640     case 0x4029: /* shlr16 Rn */
1641         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1642         return;
1643     case 0x401b: /* tas.b @Rn */
1644         tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1645                                     tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1646         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
1647         return;
1648     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1649         CHECK_FPU_ENABLED
1650         tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1651         return;
1652     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1653         CHECK_FPU_ENABLED
1654         tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1655         return;
1656     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1657         CHECK_FPU_ENABLED
1658         if (ctx->tbflags & FPSCR_PR) {
1659             TCGv_i64 fp;
1660             if (ctx->opcode & 0x0100) {
1661                 goto do_illegal;
1662             }
1663             fp = tcg_temp_new_i64();
1664             gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1665             gen_store_fpr64(ctx, fp, B11_8);
1666         }
1667         else {
1668             gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
1669         }
1670         return;
1671     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1672         CHECK_FPU_ENABLED
1673         if (ctx->tbflags & FPSCR_PR) {
1674             TCGv_i64 fp;
1675             if (ctx->opcode & 0x0100) {
1676                 goto do_illegal;
1677             }
1678             fp = tcg_temp_new_i64();
1679             gen_load_fpr64(ctx, fp, B11_8);
1680             gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
1681         }
1682         else {
1683             gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
1684         }
1685         return;
1686     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1687         CHECK_FPU_ENABLED
1688         tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1689         return;
1690     case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1691         CHECK_FPU_ENABLED
1692         tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1693         return;
1694     case 0xf06d: /* fsqrt FRn */
1695         CHECK_FPU_ENABLED
1696         if (ctx->tbflags & FPSCR_PR) {
1697             if (ctx->opcode & 0x0100) {
1698                 goto do_illegal;
1699             }
1700             TCGv_i64 fp = tcg_temp_new_i64();
1701             gen_load_fpr64(ctx, fp, B11_8);
1702             gen_helper_fsqrt_DT(fp, tcg_env, fp);
1703             gen_store_fpr64(ctx, fp, B11_8);
1704         } else {
1705             gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1706         }
1707         return;
1708     case 0xf07d: /* fsrra FRn */
1709         CHECK_FPU_ENABLED
1710         CHECK_FPSCR_PR_0
1711         gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1712         break;
1713     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1714         CHECK_FPU_ENABLED
1715         CHECK_FPSCR_PR_0
1716         tcg_gen_movi_i32(FREG(B11_8), 0);
1717         return;
1718     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1719         CHECK_FPU_ENABLED
1720         CHECK_FPSCR_PR_0
1721         tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1722         return;
1723     case 0xf0ad: /* fcnvsd FPUL,DRn */
1724         CHECK_FPU_ENABLED
1725         {
1726             TCGv_i64 fp = tcg_temp_new_i64();
1727             gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1728             gen_store_fpr64(ctx, fp, B11_8);
1729         }
1730         return;
1731     case 0xf0bd: /* fcnvds DRn,FPUL */
1732         CHECK_FPU_ENABLED
1733         {
1734             TCGv_i64 fp = tcg_temp_new_i64();
1735             gen_load_fpr64(ctx, fp, B11_8);
1736             gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
1737         }
1738         return;
1739     case 0xf0ed: /* fipr FVm,FVn */
1740         CHECK_FPU_ENABLED
1741         CHECK_FPSCR_PR_1
1742         {
1743             TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1744             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1745             gen_helper_fipr(tcg_env, m, n);
1746             return;
1747         }
1748         break;
1749     case 0xf0fd: /* ftrv XMTRX,FVn */
1750         CHECK_FPU_ENABLED
1751         CHECK_FPSCR_PR_1
1752         {
1753             if ((ctx->opcode & 0x0300) != 0x0100) {
1754                 goto do_illegal;
1755             }
1756             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1757             gen_helper_ftrv(tcg_env, n);
1758             return;
1759         }
1760         break;
1761     }
1762 #if 0
1763     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1764             ctx->opcode, ctx->base.pc_next);
1765     fflush(stderr);
1766 #endif
1767  do_illegal:
1768     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1769  do_illegal_slot:
1770         gen_save_cpu_state(ctx, true);
1771         gen_helper_raise_slot_illegal_instruction(tcg_env);
1772     } else {
1773         gen_save_cpu_state(ctx, true);
1774         gen_helper_raise_illegal_instruction(tcg_env);
1775     }
1776     ctx->base.is_jmp = DISAS_NORETURN;
1777     return;
1778 
1779  do_fpu_disabled:
1780     gen_save_cpu_state(ctx, true);
1781     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1782         gen_helper_raise_slot_fpu_disable(tcg_env);
1783     } else {
1784         gen_helper_raise_fpu_disable(tcg_env);
1785     }
1786     ctx->base.is_jmp = DISAS_NORETURN;
1787     return;
1788 }
1789 
1790 static void decode_opc(DisasContext * ctx)
1791 {
1792     uint32_t old_flags = ctx->envflags;
1793 
1794     _decode_opc(ctx);
1795 
1796     if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1797         /* go out of the delay slot */
1798         ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1799 
1800         /* When in an exclusive region, we must continue to the end
1801            for conditional branches.  */
1802         if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1803             && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1804             gen_delayed_conditional_jump(ctx);
1805             return;
1806         }
1807         /* Otherwise this is probably an invalid gUSA region.
1808            Drop the GUSA bits so the next TB doesn't see them.  */
1809         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1810 
1811         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1812         if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1813             gen_delayed_conditional_jump(ctx);
1814         } else {
1815             gen_jump(ctx);
1816         }
1817     }
1818 }
1819 
1820 #ifdef CONFIG_USER_ONLY
1821 /*
1822  * Restart with the EXCLUSIVE bit set, within a TB run via
1823  * cpu_exec_step_atomic holding the exclusive lock.
1824  */
1825 static void gen_restart_exclusive(DisasContext *ctx)
1826 {
1827     ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
1828     gen_save_cpu_state(ctx, false);
1829     gen_helper_exclusive(tcg_env);
1830     ctx->base.is_jmp = DISAS_NORETURN;
1831 }
1832 
1833 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1834    Upon an interrupt, a real kernel would simply notice magic values in
1835    the registers and reset the PC to the start of the sequence.
1836 
1837    For QEMU, we cannot do this in quite the same way.  Instead, we notice
1838    the normal start of such a sequence (mov #-x,r15).  While we can handle
1839    any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1840    sequences and transform them into atomic operations as seen by the host.
1841 */
1842 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1843 {
1844     uint16_t insns[5];
1845     int ld_adr, ld_dst, ld_mop;
1846     int op_dst, op_src, op_opc;
1847     int mv_src, mt_dst, st_src, st_mop;
1848     TCGv op_arg;
1849     uint32_t pc = ctx->base.pc_next;
1850     uint32_t pc_end = ctx->base.tb->cs_base;
1851     int max_insns = (pc_end - pc) / 2;
1852     int i;
1853 
1854     /* The state machine below will consume only a few insns.
1855        If there are more than that in a region, fail now.  */
1856     if (max_insns > ARRAY_SIZE(insns)) {
1857         goto fail;
1858     }
1859 
1860     /* Read all of the insns for the region.  */
1861     for (i = 0; i < max_insns; ++i) {
1862         insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1863     }
1864 
1865     ld_adr = ld_dst = ld_mop = -1;
1866     mv_src = -1;
1867     op_dst = op_src = op_opc = -1;
1868     mt_dst = -1;
1869     st_src = st_mop = -1;
1870     op_arg = NULL;
1871     i = 0;
1872 
1873 #define NEXT_INSN \
1874     do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1875 
1876     /*
1877      * Expect a load to begin the region.
1878      */
1879     NEXT_INSN;
1880     switch (ctx->opcode & 0xf00f) {
1881     case 0x6000: /* mov.b @Rm,Rn */
1882         ld_mop = MO_SB;
1883         break;
1884     case 0x6001: /* mov.w @Rm,Rn */
1885         ld_mop = MO_TESW;
1886         break;
1887     case 0x6002: /* mov.l @Rm,Rn */
1888         ld_mop = MO_TESL;
1889         break;
1890     default:
1891         goto fail;
1892     }
1893     ld_adr = B7_4;
1894     ld_dst = B11_8;
1895     if (ld_adr == ld_dst) {
1896         goto fail;
1897     }
1898     /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1899     op_dst = ld_dst;
1900 
1901     /*
1902      * Expect an optional register move.
1903      */
1904     NEXT_INSN;
1905     switch (ctx->opcode & 0xf00f) {
1906     case 0x6003: /* mov Rm,Rn */
1907         /*
1908          * Here we want to recognize ld_dst being saved for later consumption,
1909          * or for another input register being copied so that ld_dst need not
1910          * be clobbered during the operation.
1911          */
1912         op_dst = B11_8;
1913         mv_src = B7_4;
1914         if (op_dst == ld_dst) {
1915             /* Overwriting the load output.  */
1916             goto fail;
1917         }
1918         if (mv_src != ld_dst) {
1919             /* Copying a new input; constrain op_src to match the load.  */
1920             op_src = ld_dst;
1921         }
1922         break;
1923 
1924     default:
1925         /* Put back and re-examine as operation.  */
1926         --i;
1927     }
1928 
1929     /*
1930      * Expect the operation.
1931      */
1932     NEXT_INSN;
1933     switch (ctx->opcode & 0xf00f) {
1934     case 0x300c: /* add Rm,Rn */
1935         op_opc = INDEX_op_add_i32;
1936         goto do_reg_op;
1937     case 0x2009: /* and Rm,Rn */
1938         op_opc = INDEX_op_and_i32;
1939         goto do_reg_op;
1940     case 0x200a: /* xor Rm,Rn */
1941         op_opc = INDEX_op_xor_i32;
1942         goto do_reg_op;
1943     case 0x200b: /* or Rm,Rn */
1944         op_opc = INDEX_op_or_i32;
1945     do_reg_op:
1946         /* The operation register should be as expected, and the
1947            other input cannot depend on the load.  */
1948         if (op_dst != B11_8) {
1949             goto fail;
1950         }
1951         if (op_src < 0) {
1952             /* Unconstrainted input.  */
1953             op_src = B7_4;
1954         } else if (op_src == B7_4) {
1955             /* Constrained input matched load.  All operations are
1956                commutative; "swap" them by "moving" the load output
1957                to the (implicit) first argument and the move source
1958                to the (explicit) second argument.  */
1959             op_src = mv_src;
1960         } else {
1961             goto fail;
1962         }
1963         op_arg = REG(op_src);
1964         break;
1965 
1966     case 0x6007: /* not Rm,Rn */
1967         if (ld_dst != B7_4 || mv_src >= 0) {
1968             goto fail;
1969         }
1970         op_dst = B11_8;
1971         op_opc = INDEX_op_xor_i32;
1972         op_arg = tcg_constant_i32(-1);
1973         break;
1974 
1975     case 0x7000 ... 0x700f: /* add #imm,Rn */
1976         if (op_dst != B11_8 || mv_src >= 0) {
1977             goto fail;
1978         }
1979         op_opc = INDEX_op_add_i32;
1980         op_arg = tcg_constant_i32(B7_0s);
1981         break;
1982 
1983     case 0x3000: /* cmp/eq Rm,Rn */
1984         /* Looking for the middle of a compare-and-swap sequence,
1985            beginning with the compare.  Operands can be either order,
1986            but with only one overlapping the load.  */
1987         if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1988             goto fail;
1989         }
1990         op_opc = INDEX_op_setcond_i32;  /* placeholder */
1991         op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1992         op_arg = REG(op_src);
1993 
1994         NEXT_INSN;
1995         switch (ctx->opcode & 0xff00) {
1996         case 0x8b00: /* bf label */
1997         case 0x8f00: /* bf/s label */
1998             if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1999                 goto fail;
2000             }
2001             if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2002                 break;
2003             }
2004             /* We're looking to unconditionally modify Rn with the
2005                result of the comparison, within the delay slot of
2006                the branch.  This is used by older gcc.  */
2007             NEXT_INSN;
2008             if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2009                 mt_dst = B11_8;
2010             } else {
2011                 goto fail;
2012             }
2013             break;
2014 
2015         default:
2016             goto fail;
2017         }
2018         break;
2019 
2020     case 0x2008: /* tst Rm,Rn */
2021         /* Looking for a compare-and-swap against zero.  */
2022         if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2023             goto fail;
2024         }
2025         op_opc = INDEX_op_setcond_i32;
2026         op_arg = tcg_constant_i32(0);
2027 
2028         NEXT_INSN;
2029         if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2030             || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2031             goto fail;
2032         }
2033         break;
2034 
2035     default:
2036         /* Put back and re-examine as store.  */
2037         --i;
2038     }
2039 
2040     /*
2041      * Expect the store.
2042      */
2043     /* The store must be the last insn.  */
2044     if (i != max_insns - 1) {
2045         goto fail;
2046     }
2047     NEXT_INSN;
2048     switch (ctx->opcode & 0xf00f) {
2049     case 0x2000: /* mov.b Rm,@Rn */
2050         st_mop = MO_UB;
2051         break;
2052     case 0x2001: /* mov.w Rm,@Rn */
2053         st_mop = MO_UW;
2054         break;
2055     case 0x2002: /* mov.l Rm,@Rn */
2056         st_mop = MO_UL;
2057         break;
2058     default:
2059         goto fail;
2060     }
2061     /* The store must match the load.  */
2062     if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2063         goto fail;
2064     }
2065     st_src = B7_4;
2066 
2067 #undef NEXT_INSN
2068 
2069     /*
2070      * Emit the operation.
2071      */
2072     switch (op_opc) {
2073     case -1:
2074         /* No operation found.  Look for exchange pattern.  */
2075         if (st_src == ld_dst || mv_src >= 0) {
2076             goto fail;
2077         }
2078         tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2079                                 ctx->memidx, ld_mop);
2080         break;
2081 
2082     case INDEX_op_add_i32:
2083         if (op_dst != st_src) {
2084             goto fail;
2085         }
2086         if (op_dst == ld_dst && st_mop == MO_UL) {
2087             tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2088                                          op_arg, ctx->memidx, ld_mop);
2089         } else {
2090             tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2091                                          op_arg, ctx->memidx, ld_mop);
2092             if (op_dst != ld_dst) {
2093                 /* Note that mop sizes < 4 cannot use add_fetch
2094                    because it won't carry into the higher bits.  */
2095                 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2096             }
2097         }
2098         break;
2099 
2100     case INDEX_op_and_i32:
2101         if (op_dst != st_src) {
2102             goto fail;
2103         }
2104         if (op_dst == ld_dst) {
2105             tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2106                                          op_arg, ctx->memidx, ld_mop);
2107         } else {
2108             tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2109                                          op_arg, ctx->memidx, ld_mop);
2110             tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2111         }
2112         break;
2113 
2114     case INDEX_op_or_i32:
2115         if (op_dst != st_src) {
2116             goto fail;
2117         }
2118         if (op_dst == ld_dst) {
2119             tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2120                                         op_arg, ctx->memidx, ld_mop);
2121         } else {
2122             tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2123                                         op_arg, ctx->memidx, ld_mop);
2124             tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2125         }
2126         break;
2127 
2128     case INDEX_op_xor_i32:
2129         if (op_dst != st_src) {
2130             goto fail;
2131         }
2132         if (op_dst == ld_dst) {
2133             tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2134                                          op_arg, ctx->memidx, ld_mop);
2135         } else {
2136             tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2137                                          op_arg, ctx->memidx, ld_mop);
2138             tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2139         }
2140         break;
2141 
2142     case INDEX_op_setcond_i32:
2143         if (st_src == ld_dst) {
2144             goto fail;
2145         }
2146         tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2147                                    REG(st_src), ctx->memidx, ld_mop);
2148         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2149         if (mt_dst >= 0) {
2150             tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2151         }
2152         break;
2153 
2154     default:
2155         g_assert_not_reached();
2156     }
2157 
2158     /* The entire region has been translated.  */
2159     ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2160     goto done;
2161 
2162  fail:
2163     qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2164                   pc, pc_end);
2165 
2166     gen_restart_exclusive(ctx);
2167 
2168     /* We're not executing an instruction, but we must report one for the
2169        purposes of accounting within the TB.  We might as well report the
2170        entire region consumed via ctx->base.pc_next so that it's immediately
2171        available in the disassembly dump.  */
2172 
2173  done:
2174     ctx->base.pc_next = pc_end;
2175     ctx->base.num_insns += max_insns - 1;
2176 
2177     /*
2178      * Emit insn_start to cover each of the insns in the region.
2179      * This matches an assert in tcg.c making sure that we have
2180      * tb->icount * insn_start.
2181      */
2182     for (i = 1; i < max_insns; ++i) {
2183         tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2184     }
2185 }
2186 #endif
2187 
2188 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2189 {
2190     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2191     uint32_t tbflags;
2192     int bound;
2193 
2194     ctx->tbflags = tbflags = ctx->base.tb->flags;
2195     ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2196     ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2197     /* We don't know if the delayed pc came from a dynamic or static branch,
2198        so assume it is a dynamic branch.  */
2199     ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2200     ctx->features = cpu_env(cs)->features;
2201     ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2202     ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2203                   (tbflags & (1 << SR_RB))) * 0x10;
2204     ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2205 
2206 #ifdef CONFIG_USER_ONLY
2207     if (tbflags & TB_FLAG_GUSA_MASK) {
2208         /* In gUSA exclusive region. */
2209         uint32_t pc = ctx->base.pc_next;
2210         uint32_t pc_end = ctx->base.tb->cs_base;
2211         int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2212         int max_insns = (pc_end - pc) / 2;
2213 
2214         if (pc != pc_end + backup || max_insns < 2) {
2215             /* This is a malformed gUSA region.  Don't do anything special,
2216                since the interpreter is likely to get confused.  */
2217             ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2218         } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2219             /* Regardless of single-stepping or the end of the page,
2220                we must complete execution of the gUSA region while
2221                holding the exclusive lock.  */
2222             ctx->base.max_insns = max_insns;
2223             return;
2224         }
2225     }
2226 #endif
2227 
2228     /* Since the ISA is fixed-width, we can bound by the number
2229        of instructions remaining on the page.  */
2230     bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2231     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2232 }
2233 
2234 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2235 {
2236 }
2237 
2238 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2239 {
2240     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2241 
2242     tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2243 }
2244 
2245 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2246 {
2247     CPUSH4State *env = cpu_env(cs);
2248     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2249 
2250 #ifdef CONFIG_USER_ONLY
2251     if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2252         && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2253         /*
2254          * We're in an gUSA region, and we have not already fallen
2255          * back on using an exclusive region.  Attempt to parse the
2256          * region into a single supported atomic operation.  Failure
2257          * is handled within the parser by raising an exception to
2258          * retry using an exclusive region.
2259          *
2260          * Parsing the region in one block conflicts with plugins,
2261          * so always use exclusive mode if plugins enabled.
2262          */
2263         if (ctx->base.plugin_enabled) {
2264             gen_restart_exclusive(ctx);
2265             ctx->base.pc_next += 2;
2266         } else {
2267             decode_gusa(ctx, env);
2268         }
2269         return;
2270     }
2271 #endif
2272 
2273     ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2274     decode_opc(ctx);
2275     ctx->base.pc_next += 2;
2276 }
2277 
2278 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2279 {
2280     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2281 
2282     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2283         /* Ending the region of exclusivity.  Clear the bits.  */
2284         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2285     }
2286 
2287     switch (ctx->base.is_jmp) {
2288     case DISAS_STOP:
2289         gen_save_cpu_state(ctx, true);
2290         tcg_gen_exit_tb(NULL, 0);
2291         break;
2292     case DISAS_NEXT:
2293     case DISAS_TOO_MANY:
2294         gen_save_cpu_state(ctx, false);
2295         gen_goto_tb(ctx, 0, ctx->base.pc_next);
2296         break;
2297     case DISAS_NORETURN:
2298         break;
2299     default:
2300         g_assert_not_reached();
2301     }
2302 }
2303 
2304 static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2305                              CPUState *cs, FILE *logfile)
2306 {
2307     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2308     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2309 }
2310 
2311 static const TranslatorOps sh4_tr_ops = {
2312     .init_disas_context = sh4_tr_init_disas_context,
2313     .tb_start           = sh4_tr_tb_start,
2314     .insn_start         = sh4_tr_insn_start,
2315     .translate_insn     = sh4_tr_translate_insn,
2316     .tb_stop            = sh4_tr_tb_stop,
2317     .disas_log          = sh4_tr_disas_log,
2318 };
2319 
2320 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2321                            vaddr pc, void *host_pc)
2322 {
2323     DisasContext ctx;
2324 
2325     translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
2326 }
2327