xref: /openbmc/qemu/target/sh4/translate.c (revision 86b7c551)
1 /*
2  *  SH4 translation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/log.h"
29 #include "qemu/qemu-print.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 typedef struct DisasContext {
37     DisasContextBase base;
38 
39     uint32_t tbflags;  /* should stay unmodified during the TB translation */
40     uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
41     int memidx;
42     int gbank;
43     int fbank;
44     uint32_t delayed_pc;
45     uint32_t features;
46 
47     uint16_t opcode;
48 
49     bool has_movcal;
50 } DisasContext;
51 
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
54 #define UNALIGN(C)   (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
55 #else
56 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
57 #define UNALIGN(C)   0
58 #endif
59 
60 /* Target-specific values for ctx->base.is_jmp.  */
61 /* We want to exit back to the cpu loop for some reason.
62    Usually this is to recognize interrupts immediately.  */
63 #define DISAS_STOP    DISAS_TARGET_0
64 
65 /* global register indexes */
66 static TCGv cpu_gregs[32];
67 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
71 static TCGv cpu_lock_addr, cpu_lock_value;
72 static TCGv cpu_fregs[32];
73 
74 /* internal register indexes */
75 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
76 
77 void sh4_translate_init(void)
78 {
79     int i;
80     static const char * const gregnames[24] = {
81         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86     };
87     static const char * const fregnames[32] = {
88          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
89          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
90          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
93          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
94          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96     };
97 
98     for (i = 0; i < 24; i++) {
99         cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
100                                               offsetof(CPUSH4State, gregs[i]),
101                                               gregnames[i]);
102     }
103     memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 
105     cpu_pc = tcg_global_mem_new_i32(tcg_env,
106                                     offsetof(CPUSH4State, pc), "PC");
107     cpu_sr = tcg_global_mem_new_i32(tcg_env,
108                                     offsetof(CPUSH4State, sr), "SR");
109     cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
110                                       offsetof(CPUSH4State, sr_m), "SR_M");
111     cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
112                                       offsetof(CPUSH4State, sr_q), "SR_Q");
113     cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
114                                       offsetof(CPUSH4State, sr_t), "SR_T");
115     cpu_ssr = tcg_global_mem_new_i32(tcg_env,
116                                      offsetof(CPUSH4State, ssr), "SSR");
117     cpu_spc = tcg_global_mem_new_i32(tcg_env,
118                                      offsetof(CPUSH4State, spc), "SPC");
119     cpu_gbr = tcg_global_mem_new_i32(tcg_env,
120                                      offsetof(CPUSH4State, gbr), "GBR");
121     cpu_vbr = tcg_global_mem_new_i32(tcg_env,
122                                      offsetof(CPUSH4State, vbr), "VBR");
123     cpu_sgr = tcg_global_mem_new_i32(tcg_env,
124                                      offsetof(CPUSH4State, sgr), "SGR");
125     cpu_dbr = tcg_global_mem_new_i32(tcg_env,
126                                      offsetof(CPUSH4State, dbr), "DBR");
127     cpu_mach = tcg_global_mem_new_i32(tcg_env,
128                                       offsetof(CPUSH4State, mach), "MACH");
129     cpu_macl = tcg_global_mem_new_i32(tcg_env,
130                                       offsetof(CPUSH4State, macl), "MACL");
131     cpu_pr = tcg_global_mem_new_i32(tcg_env,
132                                     offsetof(CPUSH4State, pr), "PR");
133     cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
134                                        offsetof(CPUSH4State, fpscr), "FPSCR");
135     cpu_fpul = tcg_global_mem_new_i32(tcg_env,
136                                       offsetof(CPUSH4State, fpul), "FPUL");
137 
138     cpu_flags = tcg_global_mem_new_i32(tcg_env,
139                                        offsetof(CPUSH4State, flags), "_flags_");
140     cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
141                                             offsetof(CPUSH4State, delayed_pc),
142                                             "_delayed_pc_");
143     cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
144                                               offsetof(CPUSH4State,
145                                                        delayed_cond),
146                                               "_delayed_cond_");
147     cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
148                                            offsetof(CPUSH4State, lock_addr),
149                                            "_lock_addr_");
150     cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
151                                             offsetof(CPUSH4State, lock_value),
152                                             "_lock_value_");
153 
154     for (i = 0; i < 32; i++)
155         cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
156                                               offsetof(CPUSH4State, fregs[i]),
157                                               fregnames[i]);
158 }
159 
160 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161 {
162     CPUSH4State *env = cpu_env(cs);
163     int i;
164 
165     qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166                  env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167     qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168                  env->spc, env->ssr, env->gbr, env->vbr);
169     qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170                  env->sgr, env->dbr, env->delayed_pc, env->fpul);
171     for (i = 0; i < 24; i += 4) {
172         qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173                      i, env->gregs[i], i + 1, env->gregs[i + 1],
174                      i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175     }
176     if (env->flags & TB_FLAG_DELAY_SLOT) {
177         qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178                      env->delayed_pc);
179     } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
180         qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181                      env->delayed_pc);
182     } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
183         qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184                      env->delayed_pc);
185     }
186 }
187 
188 static void gen_read_sr(TCGv dst)
189 {
190     TCGv t0 = tcg_temp_new();
191     tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192     tcg_gen_or_i32(dst, dst, t0);
193     tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194     tcg_gen_or_i32(dst, dst, t0);
195     tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196     tcg_gen_or_i32(dst, cpu_sr, t0);
197 }
198 
199 static void gen_write_sr(TCGv src)
200 {
201     tcg_gen_andi_i32(cpu_sr, src,
202                      ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
203     tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
204     tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
205     tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
206 }
207 
208 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
209 {
210     if (save_pc) {
211         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
212     }
213     if (ctx->delayed_pc != (uint32_t) -1) {
214         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
215     }
216     if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
217         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
218     }
219 }
220 
221 static inline bool use_exit_tb(DisasContext *ctx)
222 {
223     return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
224 }
225 
226 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
227 {
228     if (use_exit_tb(ctx)) {
229         return false;
230     }
231     return translator_use_goto_tb(&ctx->base, dest);
232 }
233 
234 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
235 {
236     if (use_goto_tb(ctx, dest)) {
237         tcg_gen_goto_tb(n);
238         tcg_gen_movi_i32(cpu_pc, dest);
239         tcg_gen_exit_tb(ctx->base.tb, n);
240     } else {
241         tcg_gen_movi_i32(cpu_pc, dest);
242         if (use_exit_tb(ctx)) {
243             tcg_gen_exit_tb(NULL, 0);
244         } else {
245             tcg_gen_lookup_and_goto_ptr();
246         }
247     }
248     ctx->base.is_jmp = DISAS_NORETURN;
249 }
250 
251 static void gen_jump(DisasContext * ctx)
252 {
253     if (ctx->delayed_pc == -1) {
254         /* Target is not statically known, it comes necessarily from a
255            delayed jump as immediate jump are conditinal jumps */
256         tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
257         tcg_gen_discard_i32(cpu_delayed_pc);
258         if (use_exit_tb(ctx)) {
259             tcg_gen_exit_tb(NULL, 0);
260         } else {
261             tcg_gen_lookup_and_goto_ptr();
262         }
263         ctx->base.is_jmp = DISAS_NORETURN;
264     } else {
265         gen_goto_tb(ctx, 0, ctx->delayed_pc);
266     }
267 }
268 
269 /* Immediate conditional jump (bt or bf) */
270 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
271                                  bool jump_if_true)
272 {
273     TCGLabel *l1 = gen_new_label();
274     TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
275 
276     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
277         /* When in an exclusive region, we must continue to the end.
278            Therefore, exit the region on a taken branch, but otherwise
279            fall through to the next instruction.  */
280         tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
281         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
282         /* Note that this won't actually use a goto_tb opcode because we
283            disallow it in use_goto_tb, but it handles exit + singlestep.  */
284         gen_goto_tb(ctx, 0, dest);
285         gen_set_label(l1);
286         ctx->base.is_jmp = DISAS_NEXT;
287         return;
288     }
289 
290     gen_save_cpu_state(ctx, false);
291     tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292     gen_goto_tb(ctx, 0, dest);
293     gen_set_label(l1);
294     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
295     ctx->base.is_jmp = DISAS_NORETURN;
296 }
297 
298 /* Delayed conditional jump (bt or bf) */
299 static void gen_delayed_conditional_jump(DisasContext * ctx)
300 {
301     TCGLabel *l1 = gen_new_label();
302     TCGv ds = tcg_temp_new();
303 
304     tcg_gen_mov_i32(ds, cpu_delayed_cond);
305     tcg_gen_discard_i32(cpu_delayed_cond);
306 
307     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
308         /* When in an exclusive region, we must continue to the end.
309            Therefore, exit the region on a taken branch, but otherwise
310            fall through to the next instruction.  */
311         tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
312 
313         /* Leave the gUSA region.  */
314         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
315         gen_jump(ctx);
316 
317         gen_set_label(l1);
318         ctx->base.is_jmp = DISAS_NEXT;
319         return;
320     }
321 
322     tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
323     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
324     gen_set_label(l1);
325     gen_jump(ctx);
326 }
327 
328 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
329 {
330     /* We have already signaled illegal instruction for odd Dr.  */
331     tcg_debug_assert((reg & 1) == 0);
332     reg ^= ctx->fbank;
333     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
334 }
335 
336 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
337 {
338     /* We have already signaled illegal instruction for odd Dr.  */
339     tcg_debug_assert((reg & 1) == 0);
340     reg ^= ctx->fbank;
341     tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
342 }
343 
344 #define B3_0 (ctx->opcode & 0xf)
345 #define B6_4 ((ctx->opcode >> 4) & 0x7)
346 #define B7_4 ((ctx->opcode >> 4) & 0xf)
347 #define B7_0 (ctx->opcode & 0xff)
348 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
349 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
350   (ctx->opcode & 0xfff))
351 #define B11_8 ((ctx->opcode >> 8) & 0xf)
352 #define B15_12 ((ctx->opcode >> 12) & 0xf)
353 
354 #define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
355 #define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
356 #define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
357 
358 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
359 
360 #define CHECK_NOT_DELAY_SLOT \
361     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {  \
362         goto do_illegal_slot;                       \
363     }
364 
365 #define CHECK_PRIVILEGED \
366     if (IS_USER(ctx)) {                     \
367         goto do_illegal;                    \
368     }
369 
370 #define CHECK_FPU_ENABLED \
371     if (ctx->tbflags & (1u << SR_FD)) {     \
372         goto do_fpu_disabled;               \
373     }
374 
375 #define CHECK_FPSCR_PR_0 \
376     if (ctx->tbflags & FPSCR_PR) {          \
377         goto do_illegal;                    \
378     }
379 
380 #define CHECK_FPSCR_PR_1 \
381     if (!(ctx->tbflags & FPSCR_PR)) {       \
382         goto do_illegal;                    \
383     }
384 
385 #define CHECK_SH4A \
386     if (!(ctx->features & SH_FEATURE_SH4A)) { \
387         goto do_illegal;                      \
388     }
389 
390 static void _decode_opc(DisasContext * ctx)
391 {
392     /* This code tries to make movcal emulation sufficiently
393        accurate for Linux purposes.  This instruction writes
394        memory, and prior to that, always allocates a cache line.
395        It is used in two contexts:
396        - in memcpy, where data is copied in blocks, the first write
397        of to a block uses movca.l for performance.
398        - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
399        to flush the cache. Here, the data written by movcal.l is never
400        written to memory, and the data written is just bogus.
401 
402        To simulate this, we simulate movcal.l, we store the value to memory,
403        but we also remember the previous content. If we see ocbi, we check
404        if movcal.l for that address was done previously. If so, the write should
405        not have hit the memory, so we restore the previous content.
406        When we see an instruction that is neither movca.l
407        nor ocbi, the previous content is discarded.
408 
409        To optimize, we only try to flush stores when we're at the start of
410        TB, or if we already saw movca.l in this TB and did not flush stores
411        yet.  */
412     if (ctx->has_movcal)
413     {
414         int opcode = ctx->opcode & 0xf0ff;
415         if (opcode != 0x0093 /* ocbi */
416             && opcode != 0x00c3 /* movca.l */)
417         {
418             gen_helper_discard_movcal_backup(tcg_env);
419             ctx->has_movcal = 0;
420         }
421     }
422 
423 #if 0
424     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
425 #endif
426 
427     switch (ctx->opcode) {
428     case 0x0019: /* div0u */
429         tcg_gen_movi_i32(cpu_sr_m, 0);
430         tcg_gen_movi_i32(cpu_sr_q, 0);
431         tcg_gen_movi_i32(cpu_sr_t, 0);
432         return;
433     case 0x000b: /* rts */
434         CHECK_NOT_DELAY_SLOT
435         tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
436         ctx->envflags |= TB_FLAG_DELAY_SLOT;
437         ctx->delayed_pc = (uint32_t) - 1;
438         return;
439     case 0x0028: /* clrmac */
440         tcg_gen_movi_i32(cpu_mach, 0);
441         tcg_gen_movi_i32(cpu_macl, 0);
442         return;
443     case 0x0048: /* clrs */
444         tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
445         return;
446     case 0x0008: /* clrt */
447         tcg_gen_movi_i32(cpu_sr_t, 0);
448         return;
449     case 0x0038: /* ldtlb */
450         CHECK_PRIVILEGED
451         gen_helper_ldtlb(tcg_env);
452         return;
453     case 0x002b: /* rte */
454         CHECK_PRIVILEGED
455         CHECK_NOT_DELAY_SLOT
456         gen_write_sr(cpu_ssr);
457         tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
458         ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
459         ctx->delayed_pc = (uint32_t) - 1;
460         ctx->base.is_jmp = DISAS_STOP;
461         return;
462     case 0x0058: /* sets */
463         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
464         return;
465     case 0x0018: /* sett */
466         tcg_gen_movi_i32(cpu_sr_t, 1);
467         return;
468     case 0xfbfd: /* frchg */
469         CHECK_FPSCR_PR_0
470         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
471         ctx->base.is_jmp = DISAS_STOP;
472         return;
473     case 0xf3fd: /* fschg */
474         CHECK_FPSCR_PR_0
475         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
476         ctx->base.is_jmp = DISAS_STOP;
477         return;
478     case 0xf7fd: /* fpchg */
479         CHECK_SH4A
480         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
481         ctx->base.is_jmp = DISAS_STOP;
482         return;
483     case 0x0009: /* nop */
484         return;
485     case 0x001b: /* sleep */
486         CHECK_PRIVILEGED
487         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
488         gen_helper_sleep(tcg_env);
489         return;
490     }
491 
492     switch (ctx->opcode & 0xf000) {
493     case 0x1000: /* mov.l Rm,@(disp,Rn) */
494         {
495             TCGv addr = tcg_temp_new();
496             tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
497             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
498                                 MO_TEUL | UNALIGN(ctx));
499         }
500         return;
501     case 0x5000: /* mov.l @(disp,Rm),Rn */
502         {
503             TCGv addr = tcg_temp_new();
504             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
505             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
506                                 MO_TESL | UNALIGN(ctx));
507         }
508         return;
509     case 0xe000: /* mov #imm,Rn */
510 #ifdef CONFIG_USER_ONLY
511         /*
512          * Detect the start of a gUSA region (mov #-n, r15).
513          * If so, update envflags and end the TB.  This will allow us
514          * to see the end of the region (stored in R0) in the next TB.
515          */
516         if (B11_8 == 15 && B7_0s < 0 &&
517             (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
518             ctx->envflags =
519                 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
520             ctx->base.is_jmp = DISAS_STOP;
521         }
522 #endif
523         tcg_gen_movi_i32(REG(B11_8), B7_0s);
524         return;
525     case 0x9000: /* mov.w @(disp,PC),Rn */
526         CHECK_NOT_DELAY_SLOT
527         {
528             TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
529             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
530                                 MO_TESW | MO_ALIGN);
531         }
532         return;
533     case 0xd000: /* mov.l @(disp,PC),Rn */
534         CHECK_NOT_DELAY_SLOT
535         {
536             TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
537             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
538                                 MO_TESL | MO_ALIGN);
539         }
540         return;
541     case 0x7000: /* add #imm,Rn */
542         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
543         return;
544     case 0xa000: /* bra disp */
545         CHECK_NOT_DELAY_SLOT
546         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
547         ctx->envflags |= TB_FLAG_DELAY_SLOT;
548         return;
549     case 0xb000: /* bsr disp */
550         CHECK_NOT_DELAY_SLOT
551         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
552         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
553         ctx->envflags |= TB_FLAG_DELAY_SLOT;
554         return;
555     }
556 
557     switch (ctx->opcode & 0xf00f) {
558     case 0x6003: /* mov Rm,Rn */
559         tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
560         return;
561     case 0x2000: /* mov.b Rm,@Rn */
562         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
563         return;
564     case 0x2001: /* mov.w Rm,@Rn */
565         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
566                             MO_TEUW | UNALIGN(ctx));
567         return;
568     case 0x2002: /* mov.l Rm,@Rn */
569         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
570                             MO_TEUL | UNALIGN(ctx));
571         return;
572     case 0x6000: /* mov.b @Rm,Rn */
573         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
574         return;
575     case 0x6001: /* mov.w @Rm,Rn */
576         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
577                             MO_TESW | UNALIGN(ctx));
578         return;
579     case 0x6002: /* mov.l @Rm,Rn */
580         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
581                             MO_TESL | UNALIGN(ctx));
582         return;
583     case 0x2004: /* mov.b Rm,@-Rn */
584         {
585             TCGv addr = tcg_temp_new();
586             tcg_gen_subi_i32(addr, REG(B11_8), 1);
587             /* might cause re-execution */
588             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
589             tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
590         }
591         return;
592     case 0x2005: /* mov.w Rm,@-Rn */
593         {
594             TCGv addr = tcg_temp_new();
595             tcg_gen_subi_i32(addr, REG(B11_8), 2);
596             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
597                                 MO_TEUW | UNALIGN(ctx));
598             tcg_gen_mov_i32(REG(B11_8), addr);
599         }
600         return;
601     case 0x2006: /* mov.l Rm,@-Rn */
602         {
603             TCGv addr = tcg_temp_new();
604             tcg_gen_subi_i32(addr, REG(B11_8), 4);
605             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
606                                 MO_TEUL | UNALIGN(ctx));
607             tcg_gen_mov_i32(REG(B11_8), addr);
608         }
609         return;
610     case 0x6004: /* mov.b @Rm+,Rn */
611         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
612         if ( B11_8 != B7_4 )
613                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
614         return;
615     case 0x6005: /* mov.w @Rm+,Rn */
616         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
617                             MO_TESW | UNALIGN(ctx));
618         if ( B11_8 != B7_4 )
619                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
620         return;
621     case 0x6006: /* mov.l @Rm+,Rn */
622         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
623                             MO_TESL | UNALIGN(ctx));
624         if ( B11_8 != B7_4 )
625                 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
626         return;
627     case 0x0004: /* mov.b Rm,@(R0,Rn) */
628         {
629             TCGv addr = tcg_temp_new();
630             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
631             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
632         }
633         return;
634     case 0x0005: /* mov.w Rm,@(R0,Rn) */
635         {
636             TCGv addr = tcg_temp_new();
637             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
638             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
639                                 MO_TEUW | UNALIGN(ctx));
640         }
641         return;
642     case 0x0006: /* mov.l Rm,@(R0,Rn) */
643         {
644             TCGv addr = tcg_temp_new();
645             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
646             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
647                                 MO_TEUL | UNALIGN(ctx));
648         }
649         return;
650     case 0x000c: /* mov.b @(R0,Rm),Rn */
651         {
652             TCGv addr = tcg_temp_new();
653             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
654             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
655         }
656         return;
657     case 0x000d: /* mov.w @(R0,Rm),Rn */
658         {
659             TCGv addr = tcg_temp_new();
660             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
661             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
662                                 MO_TESW | UNALIGN(ctx));
663         }
664         return;
665     case 0x000e: /* mov.l @(R0,Rm),Rn */
666         {
667             TCGv addr = tcg_temp_new();
668             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
669             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
670                                 MO_TESL | UNALIGN(ctx));
671         }
672         return;
673     case 0x6008: /* swap.b Rm,Rn */
674         {
675             TCGv low = tcg_temp_new();
676             tcg_gen_bswap16_i32(low, REG(B7_4), 0);
677             tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
678         }
679         return;
680     case 0x6009: /* swap.w Rm,Rn */
681         tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
682         return;
683     case 0x200d: /* xtrct Rm,Rn */
684         {
685             TCGv high, low;
686             high = tcg_temp_new();
687             tcg_gen_shli_i32(high, REG(B7_4), 16);
688             low = tcg_temp_new();
689             tcg_gen_shri_i32(low, REG(B11_8), 16);
690             tcg_gen_or_i32(REG(B11_8), high, low);
691         }
692         return;
693     case 0x300c: /* add Rm,Rn */
694         tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
695         return;
696     case 0x300e: /* addc Rm,Rn */
697         {
698             TCGv t0, t1;
699             t0 = tcg_constant_tl(0);
700             t1 = tcg_temp_new();
701             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
702             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
703                              REG(B11_8), t0, t1, cpu_sr_t);
704         }
705         return;
706     case 0x300f: /* addv Rm,Rn */
707         {
708             TCGv Rn = REG(B11_8);
709             TCGv Rm = REG(B7_4);
710             TCGv result, t1, t2;
711 
712             result = tcg_temp_new();
713             t1 = tcg_temp_new();
714             t2 = tcg_temp_new();
715             tcg_gen_add_i32(result, Rm, Rn);
716             /* T = ((Rn ^ Rm) & (Result ^ Rn)) >> 31 */
717             tcg_gen_xor_i32(t1, result, Rn);
718             tcg_gen_xor_i32(t2, Rm, Rn);
719             tcg_gen_andc_i32(cpu_sr_t, t1, t2);
720             tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
721             tcg_gen_mov_i32(Rn, result);
722         }
723         return;
724     case 0x2009: /* and Rm,Rn */
725         tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
726         return;
727     case 0x3000: /* cmp/eq Rm,Rn */
728         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
729         return;
730     case 0x3003: /* cmp/ge Rm,Rn */
731         tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
732         return;
733     case 0x3007: /* cmp/gt Rm,Rn */
734         tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
735         return;
736     case 0x3006: /* cmp/hi Rm,Rn */
737         tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
738         return;
739     case 0x3002: /* cmp/hs Rm,Rn */
740         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
741         return;
742     case 0x200c: /* cmp/str Rm,Rn */
743         {
744             TCGv cmp1 = tcg_temp_new();
745             TCGv cmp2 = tcg_temp_new();
746             tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
747             tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
748             tcg_gen_andc_i32(cmp1, cmp1, cmp2);
749             tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
750             tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
751         }
752         return;
753     case 0x2007: /* div0s Rm,Rn */
754         tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
755         tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
756         tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
757         return;
758     case 0x3004: /* div1 Rm,Rn */
759         {
760             TCGv t0 = tcg_temp_new();
761             TCGv t1 = tcg_temp_new();
762             TCGv t2 = tcg_temp_new();
763             TCGv zero = tcg_constant_i32(0);
764 
765             /* shift left arg1, saving the bit being pushed out and inserting
766                T on the right */
767             tcg_gen_shri_i32(t0, REG(B11_8), 31);
768             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
769             tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
770 
771             /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
772                using 64-bit temps, we compute arg0's high part from q ^ m, so
773                that it is 0x00000000 when adding the value or 0xffffffff when
774                subtracting it. */
775             tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
776             tcg_gen_subi_i32(t1, t1, 1);
777             tcg_gen_neg_i32(t2, REG(B7_4));
778             tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
779             tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
780 
781             /* compute T and Q depending on carry */
782             tcg_gen_andi_i32(t1, t1, 1);
783             tcg_gen_xor_i32(t1, t1, t0);
784             tcg_gen_xori_i32(cpu_sr_t, t1, 1);
785             tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
786         }
787         return;
788     case 0x300d: /* dmuls.l Rm,Rn */
789         tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
790         return;
791     case 0x3005: /* dmulu.l Rm,Rn */
792         tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
793         return;
794     case 0x600e: /* exts.b Rm,Rn */
795         tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
796         return;
797     case 0x600f: /* exts.w Rm,Rn */
798         tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
799         return;
800     case 0x600c: /* extu.b Rm,Rn */
801         tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
802         return;
803     case 0x600d: /* extu.w Rm,Rn */
804         tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
805         return;
806     case 0x000f: /* mac.l @Rm+,@Rn+ */
807         {
808             TCGv arg0, arg1;
809             arg0 = tcg_temp_new();
810             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
811                                 MO_TESL | MO_ALIGN);
812             arg1 = tcg_temp_new();
813             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
814                                 MO_TESL | MO_ALIGN);
815             gen_helper_macl(tcg_env, arg0, arg1);
816             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
817             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
818         }
819         return;
820     case 0x400f: /* mac.w @Rm+,@Rn+ */
821         {
822             TCGv arg0, arg1;
823             arg0 = tcg_temp_new();
824             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
825                                 MO_TESW | MO_ALIGN);
826             arg1 = tcg_temp_new();
827             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
828                                 MO_TESW | MO_ALIGN);
829             gen_helper_macw(tcg_env, arg0, arg1);
830             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
831             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
832         }
833         return;
834     case 0x0007: /* mul.l Rm,Rn */
835         tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
836         return;
837     case 0x200f: /* muls.w Rm,Rn */
838         {
839             TCGv arg0, arg1;
840             arg0 = tcg_temp_new();
841             tcg_gen_ext16s_i32(arg0, REG(B7_4));
842             arg1 = tcg_temp_new();
843             tcg_gen_ext16s_i32(arg1, REG(B11_8));
844             tcg_gen_mul_i32(cpu_macl, arg0, arg1);
845         }
846         return;
847     case 0x200e: /* mulu.w Rm,Rn */
848         {
849             TCGv arg0, arg1;
850             arg0 = tcg_temp_new();
851             tcg_gen_ext16u_i32(arg0, REG(B7_4));
852             arg1 = tcg_temp_new();
853             tcg_gen_ext16u_i32(arg1, REG(B11_8));
854             tcg_gen_mul_i32(cpu_macl, arg0, arg1);
855         }
856         return;
857     case 0x600b: /* neg Rm,Rn */
858         tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
859         return;
860     case 0x600a: /* negc Rm,Rn */
861         {
862             TCGv t0 = tcg_constant_i32(0);
863             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
864                              REG(B7_4), t0, cpu_sr_t, t0);
865             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
866                              t0, t0, REG(B11_8), cpu_sr_t);
867             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
868         }
869         return;
870     case 0x6007: /* not Rm,Rn */
871         tcg_gen_not_i32(REG(B11_8), REG(B7_4));
872         return;
873     case 0x200b: /* or Rm,Rn */
874         tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
875         return;
876     case 0x400c: /* shad Rm,Rn */
877         {
878             TCGv t0 = tcg_temp_new();
879             TCGv t1 = tcg_temp_new();
880             TCGv t2 = tcg_temp_new();
881 
882             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
883 
884             /* positive case: shift to the left */
885             tcg_gen_shl_i32(t1, REG(B11_8), t0);
886 
887             /* negative case: shift to the right in two steps to
888                correctly handle the -32 case */
889             tcg_gen_xori_i32(t0, t0, 0x1f);
890             tcg_gen_sar_i32(t2, REG(B11_8), t0);
891             tcg_gen_sari_i32(t2, t2, 1);
892 
893             /* select between the two cases */
894             tcg_gen_movi_i32(t0, 0);
895             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
896         }
897         return;
898     case 0x400d: /* shld Rm,Rn */
899         {
900             TCGv t0 = tcg_temp_new();
901             TCGv t1 = tcg_temp_new();
902             TCGv t2 = tcg_temp_new();
903 
904             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
905 
906             /* positive case: shift to the left */
907             tcg_gen_shl_i32(t1, REG(B11_8), t0);
908 
909             /* negative case: shift to the right in two steps to
910                correctly handle the -32 case */
911             tcg_gen_xori_i32(t0, t0, 0x1f);
912             tcg_gen_shr_i32(t2, REG(B11_8), t0);
913             tcg_gen_shri_i32(t2, t2, 1);
914 
915             /* select between the two cases */
916             tcg_gen_movi_i32(t0, 0);
917             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
918         }
919         return;
920     case 0x3008: /* sub Rm,Rn */
921         tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
922         return;
923     case 0x300a: /* subc Rm,Rn */
924         {
925             TCGv t0, t1;
926             t0 = tcg_constant_tl(0);
927             t1 = tcg_temp_new();
928             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
929             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
930                              REG(B11_8), t0, t1, cpu_sr_t);
931             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
932         }
933         return;
934     case 0x300b: /* subv Rm,Rn */
935         {
936             TCGv Rn = REG(B11_8);
937             TCGv Rm = REG(B7_4);
938             TCGv result, t1, t2;
939 
940             result = tcg_temp_new();
941             t1 = tcg_temp_new();
942             t2 = tcg_temp_new();
943             tcg_gen_sub_i32(result, Rn, Rm);
944             /* T = ((Rn ^ Rm) & (Result ^ Rn)) >> 31 */
945             tcg_gen_xor_i32(t1, result, Rn);
946             tcg_gen_xor_i32(t2, Rn, Rm);
947             tcg_gen_and_i32(t1, t1, t2);
948             tcg_gen_shri_i32(cpu_sr_t, t1, 31);
949             tcg_gen_mov_i32(Rn, result);
950         }
951         return;
952     case 0x2008: /* tst Rm,Rn */
953         {
954             TCGv val = tcg_temp_new();
955             tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
956             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
957         }
958         return;
959     case 0x200a: /* xor Rm,Rn */
960         tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
961         return;
962     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
963         CHECK_FPU_ENABLED
964         if (ctx->tbflags & FPSCR_SZ) {
965             int xsrc = XHACK(B7_4);
966             int xdst = XHACK(B11_8);
967             tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
968             tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
969         } else {
970             tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
971         }
972         return;
973     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
974         CHECK_FPU_ENABLED
975         if (ctx->tbflags & FPSCR_SZ) {
976             TCGv_i64 fp = tcg_temp_new_i64();
977             gen_load_fpr64(ctx, fp, XHACK(B7_4));
978             tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
979                                 MO_TEUQ | MO_ALIGN);
980         } else {
981             tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
982                                 MO_TEUL | MO_ALIGN);
983         }
984         return;
985     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
986         CHECK_FPU_ENABLED
987         if (ctx->tbflags & FPSCR_SZ) {
988             TCGv_i64 fp = tcg_temp_new_i64();
989             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
990                                 MO_TEUQ | MO_ALIGN);
991             gen_store_fpr64(ctx, fp, XHACK(B11_8));
992         } else {
993             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
994                                 MO_TEUL | MO_ALIGN);
995         }
996         return;
997     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
998         CHECK_FPU_ENABLED
999         if (ctx->tbflags & FPSCR_SZ) {
1000             TCGv_i64 fp = tcg_temp_new_i64();
1001             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
1002                                 MO_TEUQ | MO_ALIGN);
1003             gen_store_fpr64(ctx, fp, XHACK(B11_8));
1004             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1005         } else {
1006             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
1007                                 MO_TEUL | MO_ALIGN);
1008             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1009         }
1010         return;
1011     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1012         CHECK_FPU_ENABLED
1013         {
1014             TCGv addr = tcg_temp_new_i32();
1015             if (ctx->tbflags & FPSCR_SZ) {
1016                 TCGv_i64 fp = tcg_temp_new_i64();
1017                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1018                 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1019                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1020                                     MO_TEUQ | MO_ALIGN);
1021             } else {
1022                 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1023                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1024                                     MO_TEUL | MO_ALIGN);
1025             }
1026             tcg_gen_mov_i32(REG(B11_8), addr);
1027         }
1028         return;
1029     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1030         CHECK_FPU_ENABLED
1031         {
1032             TCGv addr = tcg_temp_new_i32();
1033             tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1034             if (ctx->tbflags & FPSCR_SZ) {
1035                 TCGv_i64 fp = tcg_temp_new_i64();
1036                 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1037                                     MO_TEUQ | MO_ALIGN);
1038                 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1039             } else {
1040                 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1041                                     MO_TEUL | MO_ALIGN);
1042             }
1043         }
1044         return;
1045     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1046         CHECK_FPU_ENABLED
1047         {
1048             TCGv addr = tcg_temp_new();
1049             tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1050             if (ctx->tbflags & FPSCR_SZ) {
1051                 TCGv_i64 fp = tcg_temp_new_i64();
1052                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1053                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1054                                     MO_TEUQ | MO_ALIGN);
1055             } else {
1056                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1057                                     MO_TEUL | MO_ALIGN);
1058             }
1059         }
1060         return;
1061     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1062     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1063     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1064     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1065     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1066     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1067         {
1068             CHECK_FPU_ENABLED
1069             if (ctx->tbflags & FPSCR_PR) {
1070                 TCGv_i64 fp0, fp1;
1071 
1072                 if (ctx->opcode & 0x0110) {
1073                     goto do_illegal;
1074                 }
1075                 fp0 = tcg_temp_new_i64();
1076                 fp1 = tcg_temp_new_i64();
1077                 gen_load_fpr64(ctx, fp0, B11_8);
1078                 gen_load_fpr64(ctx, fp1, B7_4);
1079                 switch (ctx->opcode & 0xf00f) {
1080                 case 0xf000: /* fadd Rm,Rn */
1081                     gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
1082                     break;
1083                 case 0xf001: /* fsub Rm,Rn */
1084                     gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
1085                     break;
1086                 case 0xf002: /* fmul Rm,Rn */
1087                     gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
1088                     break;
1089                 case 0xf003: /* fdiv Rm,Rn */
1090                     gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
1091                     break;
1092                 case 0xf004: /* fcmp/eq Rm,Rn */
1093                     gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
1094                     return;
1095                 case 0xf005: /* fcmp/gt Rm,Rn */
1096                     gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
1097                     return;
1098                 }
1099                 gen_store_fpr64(ctx, fp0, B11_8);
1100             } else {
1101                 switch (ctx->opcode & 0xf00f) {
1102                 case 0xf000: /* fadd Rm,Rn */
1103                     gen_helper_fadd_FT(FREG(B11_8), tcg_env,
1104                                        FREG(B11_8), FREG(B7_4));
1105                     break;
1106                 case 0xf001: /* fsub Rm,Rn */
1107                     gen_helper_fsub_FT(FREG(B11_8), tcg_env,
1108                                        FREG(B11_8), FREG(B7_4));
1109                     break;
1110                 case 0xf002: /* fmul Rm,Rn */
1111                     gen_helper_fmul_FT(FREG(B11_8), tcg_env,
1112                                        FREG(B11_8), FREG(B7_4));
1113                     break;
1114                 case 0xf003: /* fdiv Rm,Rn */
1115                     gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
1116                                        FREG(B11_8), FREG(B7_4));
1117                     break;
1118                 case 0xf004: /* fcmp/eq Rm,Rn */
1119                     gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
1120                                           FREG(B11_8), FREG(B7_4));
1121                     return;
1122                 case 0xf005: /* fcmp/gt Rm,Rn */
1123                     gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
1124                                           FREG(B11_8), FREG(B7_4));
1125                     return;
1126                 }
1127             }
1128         }
1129         return;
1130     case 0xf00e: /* fmac FR0,RM,Rn */
1131         CHECK_FPU_ENABLED
1132         CHECK_FPSCR_PR_0
1133         gen_helper_fmac_FT(FREG(B11_8), tcg_env,
1134                            FREG(0), FREG(B7_4), FREG(B11_8));
1135         return;
1136     }
1137 
1138     switch (ctx->opcode & 0xff00) {
1139     case 0xc900: /* and #imm,R0 */
1140         tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1141         return;
1142     case 0xcd00: /* and.b #imm,@(R0,GBR) */
1143         {
1144             TCGv addr, val;
1145             addr = tcg_temp_new();
1146             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1147             val = tcg_temp_new();
1148             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1149             tcg_gen_andi_i32(val, val, B7_0);
1150             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1151         }
1152         return;
1153     case 0x8b00: /* bf label */
1154         CHECK_NOT_DELAY_SLOT
1155         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1156         return;
1157     case 0x8f00: /* bf/s label */
1158         CHECK_NOT_DELAY_SLOT
1159         tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1160         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1161         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1162         return;
1163     case 0x8900: /* bt label */
1164         CHECK_NOT_DELAY_SLOT
1165         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1166         return;
1167     case 0x8d00: /* bt/s label */
1168         CHECK_NOT_DELAY_SLOT
1169         tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1170         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1171         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1172         return;
1173     case 0x8800: /* cmp/eq #imm,R0 */
1174         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1175         return;
1176     case 0xc400: /* mov.b @(disp,GBR),R0 */
1177         {
1178             TCGv addr = tcg_temp_new();
1179             tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1180             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1181         }
1182         return;
1183     case 0xc500: /* mov.w @(disp,GBR),R0 */
1184         {
1185             TCGv addr = tcg_temp_new();
1186             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1187             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
1188         }
1189         return;
1190     case 0xc600: /* mov.l @(disp,GBR),R0 */
1191         {
1192             TCGv addr = tcg_temp_new();
1193             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1194             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
1195         }
1196         return;
1197     case 0xc000: /* mov.b R0,@(disp,GBR) */
1198         {
1199             TCGv addr = tcg_temp_new();
1200             tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1201             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1202         }
1203         return;
1204     case 0xc100: /* mov.w R0,@(disp,GBR) */
1205         {
1206             TCGv addr = tcg_temp_new();
1207             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1208             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
1209         }
1210         return;
1211     case 0xc200: /* mov.l R0,@(disp,GBR) */
1212         {
1213             TCGv addr = tcg_temp_new();
1214             tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1215             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1216         }
1217         return;
1218     case 0x8000: /* mov.b R0,@(disp,Rn) */
1219         {
1220             TCGv addr = tcg_temp_new();
1221             tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1222             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1223         }
1224         return;
1225     case 0x8100: /* mov.w R0,@(disp,Rn) */
1226         {
1227             TCGv addr = tcg_temp_new();
1228             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1229             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1230                                 MO_TEUW | UNALIGN(ctx));
1231         }
1232         return;
1233     case 0x8400: /* mov.b @(disp,Rn),R0 */
1234         {
1235             TCGv addr = tcg_temp_new();
1236             tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1237             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1238         }
1239         return;
1240     case 0x8500: /* mov.w @(disp,Rn),R0 */
1241         {
1242             TCGv addr = tcg_temp_new();
1243             tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1244             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1245                                 MO_TESW | UNALIGN(ctx));
1246         }
1247         return;
1248     case 0xc700: /* mova @(disp,PC),R0 */
1249         CHECK_NOT_DELAY_SLOT
1250         tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1251                                   4 + B7_0 * 4) & ~3);
1252         return;
1253     case 0xcb00: /* or #imm,R0 */
1254         tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1255         return;
1256     case 0xcf00: /* or.b #imm,@(R0,GBR) */
1257         {
1258             TCGv addr, val;
1259             addr = tcg_temp_new();
1260             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1261             val = tcg_temp_new();
1262             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1263             tcg_gen_ori_i32(val, val, B7_0);
1264             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1265         }
1266         return;
1267     case 0xc300: /* trapa #imm */
1268         {
1269             TCGv imm;
1270             CHECK_NOT_DELAY_SLOT
1271             gen_save_cpu_state(ctx, true);
1272             imm = tcg_constant_i32(B7_0);
1273             gen_helper_trapa(tcg_env, imm);
1274             ctx->base.is_jmp = DISAS_NORETURN;
1275         }
1276         return;
1277     case 0xc800: /* tst #imm,R0 */
1278         {
1279             TCGv val = tcg_temp_new();
1280             tcg_gen_andi_i32(val, REG(0), B7_0);
1281             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1282         }
1283         return;
1284     case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1285         {
1286             TCGv val = tcg_temp_new();
1287             tcg_gen_add_i32(val, REG(0), cpu_gbr);
1288             tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1289             tcg_gen_andi_i32(val, val, B7_0);
1290             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1291         }
1292         return;
1293     case 0xca00: /* xor #imm,R0 */
1294         tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1295         return;
1296     case 0xce00: /* xor.b #imm,@(R0,GBR) */
1297         {
1298             TCGv addr, val;
1299             addr = tcg_temp_new();
1300             tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1301             val = tcg_temp_new();
1302             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1303             tcg_gen_xori_i32(val, val, B7_0);
1304             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1305         }
1306         return;
1307     }
1308 
1309     switch (ctx->opcode & 0xf08f) {
1310     case 0x408e: /* ldc Rm,Rn_BANK */
1311         CHECK_PRIVILEGED
1312         tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1313         return;
1314     case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1315         CHECK_PRIVILEGED
1316         tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1317                             MO_TESL | MO_ALIGN);
1318         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1319         return;
1320     case 0x0082: /* stc Rm_BANK,Rn */
1321         CHECK_PRIVILEGED
1322         tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1323         return;
1324     case 0x4083: /* stc.l Rm_BANK,@-Rn */
1325         CHECK_PRIVILEGED
1326         {
1327             TCGv addr = tcg_temp_new();
1328             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1329             tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1330                                 MO_TEUL | MO_ALIGN);
1331             tcg_gen_mov_i32(REG(B11_8), addr);
1332         }
1333         return;
1334     }
1335 
1336     switch (ctx->opcode & 0xf0ff) {
1337     case 0x0023: /* braf Rn */
1338         CHECK_NOT_DELAY_SLOT
1339         tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1340         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1341         ctx->delayed_pc = (uint32_t) - 1;
1342         return;
1343     case 0x0003: /* bsrf Rn */
1344         CHECK_NOT_DELAY_SLOT
1345         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1346         tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1347         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1348         ctx->delayed_pc = (uint32_t) - 1;
1349         return;
1350     case 0x4015: /* cmp/pl Rn */
1351         tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1352         return;
1353     case 0x4011: /* cmp/pz Rn */
1354         tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1355         return;
1356     case 0x4010: /* dt Rn */
1357         tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1358         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1359         return;
1360     case 0x402b: /* jmp @Rn */
1361         CHECK_NOT_DELAY_SLOT
1362         tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1363         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1364         ctx->delayed_pc = (uint32_t) - 1;
1365         return;
1366     case 0x400b: /* jsr @Rn */
1367         CHECK_NOT_DELAY_SLOT
1368         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1369         tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1370         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1371         ctx->delayed_pc = (uint32_t) - 1;
1372         return;
1373     case 0x400e: /* ldc Rm,SR */
1374         CHECK_PRIVILEGED
1375         {
1376             TCGv val = tcg_temp_new();
1377             tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1378             gen_write_sr(val);
1379             ctx->base.is_jmp = DISAS_STOP;
1380         }
1381         return;
1382     case 0x4007: /* ldc.l @Rm+,SR */
1383         CHECK_PRIVILEGED
1384         {
1385             TCGv val = tcg_temp_new();
1386             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1387                                 MO_TESL | MO_ALIGN);
1388             tcg_gen_andi_i32(val, val, 0x700083f3);
1389             gen_write_sr(val);
1390             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1391             ctx->base.is_jmp = DISAS_STOP;
1392         }
1393         return;
1394     case 0x0002: /* stc SR,Rn */
1395         CHECK_PRIVILEGED
1396         gen_read_sr(REG(B11_8));
1397         return;
1398     case 0x4003: /* stc SR,@-Rn */
1399         CHECK_PRIVILEGED
1400         {
1401             TCGv addr = tcg_temp_new();
1402             TCGv val = tcg_temp_new();
1403             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1404             gen_read_sr(val);
1405             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1406             tcg_gen_mov_i32(REG(B11_8), addr);
1407         }
1408         return;
1409 #define LD(reg,ldnum,ldpnum,prechk)            \
1410   case ldnum:                                                        \
1411     prechk                                                           \
1412     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));                         \
1413     return;                                                          \
1414   case ldpnum:                                                       \
1415     prechk                                                           \
1416     tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx,          \
1417                         MO_TESL | MO_ALIGN);                         \
1418     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);                     \
1419     return;
1420 #define ST(reg,stnum,stpnum,prechk)                \
1421   case stnum:                                                        \
1422     prechk                                                           \
1423     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);                         \
1424     return;                                                          \
1425   case stpnum:                                                       \
1426     prechk                                                           \
1427     {                                                                \
1428         TCGv addr = tcg_temp_new();                                  \
1429         tcg_gen_subi_i32(addr, REG(B11_8), 4);                       \
1430         tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx,            \
1431                             MO_TEUL | MO_ALIGN);                     \
1432         tcg_gen_mov_i32(REG(B11_8), addr);                           \
1433     }                                                                \
1434     return;
1435 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)                \
1436         LD(reg,ldnum,ldpnum,prechk)                               \
1437         ST(reg,stnum,stpnum,prechk)
1438         LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1439         LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1440         LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1441         LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1442         ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1443         LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1444         LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1445         LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1446         LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1447         LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1448         LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1449     case 0x406a: /* lds Rm,FPSCR */
1450         CHECK_FPU_ENABLED
1451         gen_helper_ld_fpscr(tcg_env, REG(B11_8));
1452         ctx->base.is_jmp = DISAS_STOP;
1453         return;
1454     case 0x4066: /* lds.l @Rm+,FPSCR */
1455         CHECK_FPU_ENABLED
1456         {
1457             TCGv addr = tcg_temp_new();
1458             tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1459                                 MO_TESL | MO_ALIGN);
1460             tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1461             gen_helper_ld_fpscr(tcg_env, addr);
1462             ctx->base.is_jmp = DISAS_STOP;
1463         }
1464         return;
1465     case 0x006a: /* sts FPSCR,Rn */
1466         CHECK_FPU_ENABLED
1467         tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1468         return;
1469     case 0x4062: /* sts FPSCR,@-Rn */
1470         CHECK_FPU_ENABLED
1471         {
1472             TCGv addr, val;
1473             val = tcg_temp_new();
1474             tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1475             addr = tcg_temp_new();
1476             tcg_gen_subi_i32(addr, REG(B11_8), 4);
1477             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1478             tcg_gen_mov_i32(REG(B11_8), addr);
1479         }
1480         return;
1481     case 0x00c3: /* movca.l R0,@Rm */
1482         {
1483             TCGv val = tcg_temp_new();
1484             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1485                                 MO_TEUL | MO_ALIGN);
1486             gen_helper_movcal(tcg_env, REG(B11_8), val);
1487             tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1488                                 MO_TEUL | MO_ALIGN);
1489         }
1490         ctx->has_movcal = 1;
1491         return;
1492     case 0x40a9: /* movua.l @Rm,R0 */
1493         CHECK_SH4A
1494         /* Load non-boundary-aligned data */
1495         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1496                             MO_TEUL | MO_UNALN);
1497         return;
1498     case 0x40e9: /* movua.l @Rm+,R0 */
1499         CHECK_SH4A
1500         /* Load non-boundary-aligned data */
1501         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1502                             MO_TEUL | MO_UNALN);
1503         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1504         return;
1505     case 0x0029: /* movt Rn */
1506         tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1507         return;
1508     case 0x0073:
1509         /* MOVCO.L
1510          *     LDST -> T
1511          *     If (T == 1) R0 -> (Rn)
1512          *     0 -> LDST
1513          *
1514          * The above description doesn't work in a parallel context.
1515          * Since we currently support no smp boards, this implies user-mode.
1516          * But we can still support the official mechanism while user-mode
1517          * is single-threaded.  */
1518         CHECK_SH4A
1519         {
1520             TCGLabel *fail = gen_new_label();
1521             TCGLabel *done = gen_new_label();
1522 
1523             if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1524                 TCGv tmp;
1525 
1526                 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1527                                    cpu_lock_addr, fail);
1528                 tmp = tcg_temp_new();
1529                 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1530                                            REG(0), ctx->memidx,
1531                                            MO_TEUL | MO_ALIGN);
1532                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1533             } else {
1534                 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1535                 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1536                                     MO_TEUL | MO_ALIGN);
1537                 tcg_gen_movi_i32(cpu_sr_t, 1);
1538             }
1539             tcg_gen_br(done);
1540 
1541             gen_set_label(fail);
1542             tcg_gen_movi_i32(cpu_sr_t, 0);
1543 
1544             gen_set_label(done);
1545             tcg_gen_movi_i32(cpu_lock_addr, -1);
1546         }
1547         return;
1548     case 0x0063:
1549         /* MOVLI.L @Rm,R0
1550          *     1 -> LDST
1551          *     (Rm) -> R0
1552          *     When interrupt/exception
1553          *     occurred 0 -> LDST
1554          *
1555          * In a parallel context, we must also save the loaded value
1556          * for use with the cmpxchg that we'll use with movco.l.  */
1557         CHECK_SH4A
1558         if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1559             TCGv tmp = tcg_temp_new();
1560             tcg_gen_mov_i32(tmp, REG(B11_8));
1561             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1562                                 MO_TESL | MO_ALIGN);
1563             tcg_gen_mov_i32(cpu_lock_value, REG(0));
1564             tcg_gen_mov_i32(cpu_lock_addr, tmp);
1565         } else {
1566             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1567                                 MO_TESL | MO_ALIGN);
1568             tcg_gen_movi_i32(cpu_lock_addr, 0);
1569         }
1570         return;
1571     case 0x0093: /* ocbi @Rn */
1572         {
1573             gen_helper_ocbi(tcg_env, REG(B11_8));
1574         }
1575         return;
1576     case 0x00a3: /* ocbp @Rn */
1577     case 0x00b3: /* ocbwb @Rn */
1578         /* These instructions are supposed to do nothing in case of
1579            a cache miss. Given that we only partially emulate caches
1580            it is safe to simply ignore them. */
1581         return;
1582     case 0x0083: /* pref @Rn */
1583         return;
1584     case 0x00d3: /* prefi @Rn */
1585         CHECK_SH4A
1586         return;
1587     case 0x00e3: /* icbi @Rn */
1588         CHECK_SH4A
1589         return;
1590     case 0x00ab: /* synco */
1591         CHECK_SH4A
1592         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1593         return;
1594     case 0x4024: /* rotcl Rn */
1595         {
1596             TCGv tmp = tcg_temp_new();
1597             tcg_gen_mov_i32(tmp, cpu_sr_t);
1598             tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1599             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1600             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1601         }
1602         return;
1603     case 0x4025: /* rotcr Rn */
1604         {
1605             TCGv tmp = tcg_temp_new();
1606             tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1607             tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1608             tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1609             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1610         }
1611         return;
1612     case 0x4004: /* rotl Rn */
1613         tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1614         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1615         return;
1616     case 0x4005: /* rotr Rn */
1617         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1618         tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1619         return;
1620     case 0x4000: /* shll Rn */
1621     case 0x4020: /* shal Rn */
1622         tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1623         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1624         return;
1625     case 0x4021: /* shar Rn */
1626         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1627         tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1628         return;
1629     case 0x4001: /* shlr Rn */
1630         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1631         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1632         return;
1633     case 0x4008: /* shll2 Rn */
1634         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1635         return;
1636     case 0x4018: /* shll8 Rn */
1637         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1638         return;
1639     case 0x4028: /* shll16 Rn */
1640         tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1641         return;
1642     case 0x4009: /* shlr2 Rn */
1643         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1644         return;
1645     case 0x4019: /* shlr8 Rn */
1646         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1647         return;
1648     case 0x4029: /* shlr16 Rn */
1649         tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1650         return;
1651     case 0x401b: /* tas.b @Rn */
1652         tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1653                                     tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1654         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
1655         return;
1656     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1657         CHECK_FPU_ENABLED
1658         tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1659         return;
1660     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1661         CHECK_FPU_ENABLED
1662         tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1663         return;
1664     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1665         CHECK_FPU_ENABLED
1666         if (ctx->tbflags & FPSCR_PR) {
1667             TCGv_i64 fp;
1668             if (ctx->opcode & 0x0100) {
1669                 goto do_illegal;
1670             }
1671             fp = tcg_temp_new_i64();
1672             gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1673             gen_store_fpr64(ctx, fp, B11_8);
1674         }
1675         else {
1676             gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
1677         }
1678         return;
1679     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1680         CHECK_FPU_ENABLED
1681         if (ctx->tbflags & FPSCR_PR) {
1682             TCGv_i64 fp;
1683             if (ctx->opcode & 0x0100) {
1684                 goto do_illegal;
1685             }
1686             fp = tcg_temp_new_i64();
1687             gen_load_fpr64(ctx, fp, B11_8);
1688             gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
1689         }
1690         else {
1691             gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
1692         }
1693         return;
1694     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1695         CHECK_FPU_ENABLED
1696         tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1697         return;
1698     case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1699         CHECK_FPU_ENABLED
1700         tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1701         return;
1702     case 0xf06d: /* fsqrt FRn */
1703         CHECK_FPU_ENABLED
1704         if (ctx->tbflags & FPSCR_PR) {
1705             if (ctx->opcode & 0x0100) {
1706                 goto do_illegal;
1707             }
1708             TCGv_i64 fp = tcg_temp_new_i64();
1709             gen_load_fpr64(ctx, fp, B11_8);
1710             gen_helper_fsqrt_DT(fp, tcg_env, fp);
1711             gen_store_fpr64(ctx, fp, B11_8);
1712         } else {
1713             gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1714         }
1715         return;
1716     case 0xf07d: /* fsrra FRn */
1717         CHECK_FPU_ENABLED
1718         CHECK_FPSCR_PR_0
1719         gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1720         break;
1721     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1722         CHECK_FPU_ENABLED
1723         CHECK_FPSCR_PR_0
1724         tcg_gen_movi_i32(FREG(B11_8), 0);
1725         return;
1726     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1727         CHECK_FPU_ENABLED
1728         CHECK_FPSCR_PR_0
1729         tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1730         return;
1731     case 0xf0ad: /* fcnvsd FPUL,DRn */
1732         CHECK_FPU_ENABLED
1733         {
1734             TCGv_i64 fp = tcg_temp_new_i64();
1735             gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1736             gen_store_fpr64(ctx, fp, B11_8);
1737         }
1738         return;
1739     case 0xf0bd: /* fcnvds DRn,FPUL */
1740         CHECK_FPU_ENABLED
1741         {
1742             TCGv_i64 fp = tcg_temp_new_i64();
1743             gen_load_fpr64(ctx, fp, B11_8);
1744             gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
1745         }
1746         return;
1747     case 0xf0ed: /* fipr FVm,FVn */
1748         CHECK_FPU_ENABLED
1749         CHECK_FPSCR_PR_1
1750         {
1751             TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1752             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1753             gen_helper_fipr(tcg_env, m, n);
1754             return;
1755         }
1756         break;
1757     case 0xf0fd: /* ftrv XMTRX,FVn */
1758         CHECK_FPU_ENABLED
1759         CHECK_FPSCR_PR_1
1760         {
1761             if ((ctx->opcode & 0x0300) != 0x0100) {
1762                 goto do_illegal;
1763             }
1764             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1765             gen_helper_ftrv(tcg_env, n);
1766             return;
1767         }
1768         break;
1769     }
1770 #if 0
1771     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1772             ctx->opcode, ctx->base.pc_next);
1773     fflush(stderr);
1774 #endif
1775  do_illegal:
1776     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1777  do_illegal_slot:
1778         gen_save_cpu_state(ctx, true);
1779         gen_helper_raise_slot_illegal_instruction(tcg_env);
1780     } else {
1781         gen_save_cpu_state(ctx, true);
1782         gen_helper_raise_illegal_instruction(tcg_env);
1783     }
1784     ctx->base.is_jmp = DISAS_NORETURN;
1785     return;
1786 
1787  do_fpu_disabled:
1788     gen_save_cpu_state(ctx, true);
1789     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1790         gen_helper_raise_slot_fpu_disable(tcg_env);
1791     } else {
1792         gen_helper_raise_fpu_disable(tcg_env);
1793     }
1794     ctx->base.is_jmp = DISAS_NORETURN;
1795     return;
1796 }
1797 
1798 static void decode_opc(DisasContext * ctx)
1799 {
1800     uint32_t old_flags = ctx->envflags;
1801 
1802     _decode_opc(ctx);
1803 
1804     if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1805         /* go out of the delay slot */
1806         ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1807 
1808         /* When in an exclusive region, we must continue to the end
1809            for conditional branches.  */
1810         if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1811             && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1812             gen_delayed_conditional_jump(ctx);
1813             return;
1814         }
1815         /* Otherwise this is probably an invalid gUSA region.
1816            Drop the GUSA bits so the next TB doesn't see them.  */
1817         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1818 
1819         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1820         if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1821             gen_delayed_conditional_jump(ctx);
1822         } else {
1823             gen_jump(ctx);
1824         }
1825     }
1826 }
1827 
1828 #ifdef CONFIG_USER_ONLY
1829 /*
1830  * Restart with the EXCLUSIVE bit set, within a TB run via
1831  * cpu_exec_step_atomic holding the exclusive lock.
1832  */
1833 static void gen_restart_exclusive(DisasContext *ctx)
1834 {
1835     ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
1836     gen_save_cpu_state(ctx, false);
1837     gen_helper_exclusive(tcg_env);
1838     ctx->base.is_jmp = DISAS_NORETURN;
1839 }
1840 
1841 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1842    Upon an interrupt, a real kernel would simply notice magic values in
1843    the registers and reset the PC to the start of the sequence.
1844 
1845    For QEMU, we cannot do this in quite the same way.  Instead, we notice
1846    the normal start of such a sequence (mov #-x,r15).  While we can handle
1847    any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1848    sequences and transform them into atomic operations as seen by the host.
1849 */
1850 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1851 {
1852     uint16_t insns[5];
1853     int ld_adr, ld_dst, ld_mop;
1854     int op_dst, op_src, op_opc;
1855     int mv_src, mt_dst, st_src, st_mop;
1856     TCGv op_arg;
1857     uint32_t pc = ctx->base.pc_next;
1858     uint32_t pc_end = ctx->base.tb->cs_base;
1859     int max_insns = (pc_end - pc) / 2;
1860     int i;
1861 
1862     /* The state machine below will consume only a few insns.
1863        If there are more than that in a region, fail now.  */
1864     if (max_insns > ARRAY_SIZE(insns)) {
1865         goto fail;
1866     }
1867 
1868     /* Read all of the insns for the region.  */
1869     for (i = 0; i < max_insns; ++i) {
1870         insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1871     }
1872 
1873     ld_adr = ld_dst = ld_mop = -1;
1874     mv_src = -1;
1875     op_dst = op_src = op_opc = -1;
1876     mt_dst = -1;
1877     st_src = st_mop = -1;
1878     op_arg = NULL;
1879     i = 0;
1880 
1881 #define NEXT_INSN \
1882     do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1883 
1884     /*
1885      * Expect a load to begin the region.
1886      */
1887     NEXT_INSN;
1888     switch (ctx->opcode & 0xf00f) {
1889     case 0x6000: /* mov.b @Rm,Rn */
1890         ld_mop = MO_SB;
1891         break;
1892     case 0x6001: /* mov.w @Rm,Rn */
1893         ld_mop = MO_TESW;
1894         break;
1895     case 0x6002: /* mov.l @Rm,Rn */
1896         ld_mop = MO_TESL;
1897         break;
1898     default:
1899         goto fail;
1900     }
1901     ld_adr = B7_4;
1902     ld_dst = B11_8;
1903     if (ld_adr == ld_dst) {
1904         goto fail;
1905     }
1906     /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1907     op_dst = ld_dst;
1908 
1909     /*
1910      * Expect an optional register move.
1911      */
1912     NEXT_INSN;
1913     switch (ctx->opcode & 0xf00f) {
1914     case 0x6003: /* mov Rm,Rn */
1915         /*
1916          * Here we want to recognize ld_dst being saved for later consumption,
1917          * or for another input register being copied so that ld_dst need not
1918          * be clobbered during the operation.
1919          */
1920         op_dst = B11_8;
1921         mv_src = B7_4;
1922         if (op_dst == ld_dst) {
1923             /* Overwriting the load output.  */
1924             goto fail;
1925         }
1926         if (mv_src != ld_dst) {
1927             /* Copying a new input; constrain op_src to match the load.  */
1928             op_src = ld_dst;
1929         }
1930         break;
1931 
1932     default:
1933         /* Put back and re-examine as operation.  */
1934         --i;
1935     }
1936 
1937     /*
1938      * Expect the operation.
1939      */
1940     NEXT_INSN;
1941     switch (ctx->opcode & 0xf00f) {
1942     case 0x300c: /* add Rm,Rn */
1943         op_opc = INDEX_op_add_i32;
1944         goto do_reg_op;
1945     case 0x2009: /* and Rm,Rn */
1946         op_opc = INDEX_op_and_i32;
1947         goto do_reg_op;
1948     case 0x200a: /* xor Rm,Rn */
1949         op_opc = INDEX_op_xor_i32;
1950         goto do_reg_op;
1951     case 0x200b: /* or Rm,Rn */
1952         op_opc = INDEX_op_or_i32;
1953     do_reg_op:
1954         /* The operation register should be as expected, and the
1955            other input cannot depend on the load.  */
1956         if (op_dst != B11_8) {
1957             goto fail;
1958         }
1959         if (op_src < 0) {
1960             /* Unconstrainted input.  */
1961             op_src = B7_4;
1962         } else if (op_src == B7_4) {
1963             /* Constrained input matched load.  All operations are
1964                commutative; "swap" them by "moving" the load output
1965                to the (implicit) first argument and the move source
1966                to the (explicit) second argument.  */
1967             op_src = mv_src;
1968         } else {
1969             goto fail;
1970         }
1971         op_arg = REG(op_src);
1972         break;
1973 
1974     case 0x6007: /* not Rm,Rn */
1975         if (ld_dst != B7_4 || mv_src >= 0) {
1976             goto fail;
1977         }
1978         op_dst = B11_8;
1979         op_opc = INDEX_op_xor_i32;
1980         op_arg = tcg_constant_i32(-1);
1981         break;
1982 
1983     case 0x7000 ... 0x700f: /* add #imm,Rn */
1984         if (op_dst != B11_8 || mv_src >= 0) {
1985             goto fail;
1986         }
1987         op_opc = INDEX_op_add_i32;
1988         op_arg = tcg_constant_i32(B7_0s);
1989         break;
1990 
1991     case 0x3000: /* cmp/eq Rm,Rn */
1992         /* Looking for the middle of a compare-and-swap sequence,
1993            beginning with the compare.  Operands can be either order,
1994            but with only one overlapping the load.  */
1995         if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1996             goto fail;
1997         }
1998         op_opc = INDEX_op_setcond_i32;  /* placeholder */
1999         op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2000         op_arg = REG(op_src);
2001 
2002         NEXT_INSN;
2003         switch (ctx->opcode & 0xff00) {
2004         case 0x8b00: /* bf label */
2005         case 0x8f00: /* bf/s label */
2006             if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2007                 goto fail;
2008             }
2009             if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2010                 break;
2011             }
2012             /* We're looking to unconditionally modify Rn with the
2013                result of the comparison, within the delay slot of
2014                the branch.  This is used by older gcc.  */
2015             NEXT_INSN;
2016             if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2017                 mt_dst = B11_8;
2018             } else {
2019                 goto fail;
2020             }
2021             break;
2022 
2023         default:
2024             goto fail;
2025         }
2026         break;
2027 
2028     case 0x2008: /* tst Rm,Rn */
2029         /* Looking for a compare-and-swap against zero.  */
2030         if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2031             goto fail;
2032         }
2033         op_opc = INDEX_op_setcond_i32;
2034         op_arg = tcg_constant_i32(0);
2035 
2036         NEXT_INSN;
2037         if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2038             || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2039             goto fail;
2040         }
2041         break;
2042 
2043     default:
2044         /* Put back and re-examine as store.  */
2045         --i;
2046     }
2047 
2048     /*
2049      * Expect the store.
2050      */
2051     /* The store must be the last insn.  */
2052     if (i != max_insns - 1) {
2053         goto fail;
2054     }
2055     NEXT_INSN;
2056     switch (ctx->opcode & 0xf00f) {
2057     case 0x2000: /* mov.b Rm,@Rn */
2058         st_mop = MO_UB;
2059         break;
2060     case 0x2001: /* mov.w Rm,@Rn */
2061         st_mop = MO_UW;
2062         break;
2063     case 0x2002: /* mov.l Rm,@Rn */
2064         st_mop = MO_UL;
2065         break;
2066     default:
2067         goto fail;
2068     }
2069     /* The store must match the load.  */
2070     if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2071         goto fail;
2072     }
2073     st_src = B7_4;
2074 
2075 #undef NEXT_INSN
2076 
2077     /*
2078      * Emit the operation.
2079      */
2080     switch (op_opc) {
2081     case -1:
2082         /* No operation found.  Look for exchange pattern.  */
2083         if (st_src == ld_dst || mv_src >= 0) {
2084             goto fail;
2085         }
2086         tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2087                                 ctx->memidx, ld_mop);
2088         break;
2089 
2090     case INDEX_op_add_i32:
2091         if (op_dst != st_src) {
2092             goto fail;
2093         }
2094         if (op_dst == ld_dst && st_mop == MO_UL) {
2095             tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2096                                          op_arg, ctx->memidx, ld_mop);
2097         } else {
2098             tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2099                                          op_arg, ctx->memidx, ld_mop);
2100             if (op_dst != ld_dst) {
2101                 /* Note that mop sizes < 4 cannot use add_fetch
2102                    because it won't carry into the higher bits.  */
2103                 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2104             }
2105         }
2106         break;
2107 
2108     case INDEX_op_and_i32:
2109         if (op_dst != st_src) {
2110             goto fail;
2111         }
2112         if (op_dst == ld_dst) {
2113             tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2114                                          op_arg, ctx->memidx, ld_mop);
2115         } else {
2116             tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2117                                          op_arg, ctx->memidx, ld_mop);
2118             tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2119         }
2120         break;
2121 
2122     case INDEX_op_or_i32:
2123         if (op_dst != st_src) {
2124             goto fail;
2125         }
2126         if (op_dst == ld_dst) {
2127             tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2128                                         op_arg, ctx->memidx, ld_mop);
2129         } else {
2130             tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2131                                         op_arg, ctx->memidx, ld_mop);
2132             tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2133         }
2134         break;
2135 
2136     case INDEX_op_xor_i32:
2137         if (op_dst != st_src) {
2138             goto fail;
2139         }
2140         if (op_dst == ld_dst) {
2141             tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2142                                          op_arg, ctx->memidx, ld_mop);
2143         } else {
2144             tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2145                                          op_arg, ctx->memidx, ld_mop);
2146             tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2147         }
2148         break;
2149 
2150     case INDEX_op_setcond_i32:
2151         if (st_src == ld_dst) {
2152             goto fail;
2153         }
2154         tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2155                                    REG(st_src), ctx->memidx, ld_mop);
2156         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2157         if (mt_dst >= 0) {
2158             tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2159         }
2160         break;
2161 
2162     default:
2163         g_assert_not_reached();
2164     }
2165 
2166     /* The entire region has been translated.  */
2167     ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2168     goto done;
2169 
2170  fail:
2171     qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2172                   pc, pc_end);
2173 
2174     gen_restart_exclusive(ctx);
2175 
2176     /* We're not executing an instruction, but we must report one for the
2177        purposes of accounting within the TB.  We might as well report the
2178        entire region consumed via ctx->base.pc_next so that it's immediately
2179        available in the disassembly dump.  */
2180 
2181  done:
2182     ctx->base.pc_next = pc_end;
2183     ctx->base.num_insns += max_insns - 1;
2184 
2185     /*
2186      * Emit insn_start to cover each of the insns in the region.
2187      * This matches an assert in tcg.c making sure that we have
2188      * tb->icount * insn_start.
2189      */
2190     for (i = 1; i < max_insns; ++i) {
2191         tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2192     }
2193 }
2194 #endif
2195 
2196 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2197 {
2198     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2199     uint32_t tbflags;
2200     int bound;
2201 
2202     ctx->tbflags = tbflags = ctx->base.tb->flags;
2203     ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2204     ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2205     /* We don't know if the delayed pc came from a dynamic or static branch,
2206        so assume it is a dynamic branch.  */
2207     ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2208     ctx->features = cpu_env(cs)->features;
2209     ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2210     ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2211                   (tbflags & (1 << SR_RB))) * 0x10;
2212     ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2213 
2214 #ifdef CONFIG_USER_ONLY
2215     if (tbflags & TB_FLAG_GUSA_MASK) {
2216         /* In gUSA exclusive region. */
2217         uint32_t pc = ctx->base.pc_next;
2218         uint32_t pc_end = ctx->base.tb->cs_base;
2219         int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2220         int max_insns = (pc_end - pc) / 2;
2221 
2222         if (pc != pc_end + backup || max_insns < 2) {
2223             /* This is a malformed gUSA region.  Don't do anything special,
2224                since the interpreter is likely to get confused.  */
2225             ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2226         } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2227             /* Regardless of single-stepping or the end of the page,
2228                we must complete execution of the gUSA region while
2229                holding the exclusive lock.  */
2230             ctx->base.max_insns = max_insns;
2231             return;
2232         }
2233     }
2234 #endif
2235 
2236     /* Since the ISA is fixed-width, we can bound by the number
2237        of instructions remaining on the page.  */
2238     bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2239     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2240 }
2241 
2242 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2243 {
2244 }
2245 
2246 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2247 {
2248     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2249 
2250     tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2251 }
2252 
2253 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2254 {
2255     CPUSH4State *env = cpu_env(cs);
2256     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2257 
2258 #ifdef CONFIG_USER_ONLY
2259     if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2260         && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2261         /*
2262          * We're in an gUSA region, and we have not already fallen
2263          * back on using an exclusive region.  Attempt to parse the
2264          * region into a single supported atomic operation.  Failure
2265          * is handled within the parser by raising an exception to
2266          * retry using an exclusive region.
2267          *
2268          * Parsing the region in one block conflicts with plugins,
2269          * so always use exclusive mode if plugins enabled.
2270          */
2271         if (ctx->base.plugin_enabled) {
2272             gen_restart_exclusive(ctx);
2273             ctx->base.pc_next += 2;
2274         } else {
2275             decode_gusa(ctx, env);
2276         }
2277         return;
2278     }
2279 #endif
2280 
2281     ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2282     decode_opc(ctx);
2283     ctx->base.pc_next += 2;
2284 }
2285 
2286 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2287 {
2288     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2289 
2290     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2291         /* Ending the region of exclusivity.  Clear the bits.  */
2292         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2293     }
2294 
2295     switch (ctx->base.is_jmp) {
2296     case DISAS_STOP:
2297         gen_save_cpu_state(ctx, true);
2298         tcg_gen_exit_tb(NULL, 0);
2299         break;
2300     case DISAS_NEXT:
2301     case DISAS_TOO_MANY:
2302         gen_save_cpu_state(ctx, false);
2303         gen_goto_tb(ctx, 0, ctx->base.pc_next);
2304         break;
2305     case DISAS_NORETURN:
2306         break;
2307     default:
2308         g_assert_not_reached();
2309     }
2310 }
2311 
2312 static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2313                              CPUState *cs, FILE *logfile)
2314 {
2315     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2316     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2317 }
2318 
2319 static const TranslatorOps sh4_tr_ops = {
2320     .init_disas_context = sh4_tr_init_disas_context,
2321     .tb_start           = sh4_tr_tb_start,
2322     .insn_start         = sh4_tr_insn_start,
2323     .translate_insn     = sh4_tr_translate_insn,
2324     .tb_stop            = sh4_tr_tb_stop,
2325     .disas_log          = sh4_tr_disas_log,
2326 };
2327 
2328 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2329                            vaddr pc, void *host_pc)
2330 {
2331     DisasContext ctx;
2332 
2333     translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
2334 }
2335