xref: /openbmc/qemu/target/sh4/translate.c (revision 89aafcf2)
1 /*
2  *  SH4 translation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 #include "qemu/qemu-print.h"
31 
32 
33 typedef struct DisasContext {
34     DisasContextBase base;
35 
36     uint32_t tbflags;  /* should stay unmodified during the TB translation */
37     uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
38     int memidx;
39     int gbank;
40     int fbank;
41     uint32_t delayed_pc;
42     uint32_t features;
43 
44     uint16_t opcode;
45 
46     bool has_movcal;
47 } DisasContext;
48 
49 #if defined(CONFIG_USER_ONLY)
50 #define IS_USER(ctx) 1
51 #define UNALIGN(C)   (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
52 #else
53 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
54 #define UNALIGN(C)   0
55 #endif
56 
57 /* Target-specific values for ctx->base.is_jmp.  */
58 /* We want to exit back to the cpu loop for some reason.
59    Usually this is to recognize interrupts immediately.  */
60 #define DISAS_STOP    DISAS_TARGET_0
61 
62 /* global register indexes */
63 static TCGv cpu_gregs[32];
64 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68 static TCGv cpu_lock_addr, cpu_lock_value;
69 static TCGv cpu_fregs[32];
70 
71 /* internal register indexes */
72 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
73 
74 #include "exec/gen-icount.h"
75 
76 void sh4_translate_init(void)
77 {
78     int i;
79     static const char * const gregnames[24] = {
80         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85     };
86     static const char * const fregnames[32] = {
87          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
88          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
89          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
92          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
93          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
95     };
96 
97     for (i = 0; i < 24; i++) {
98         cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
99                                               offsetof(CPUSH4State, gregs[i]),
100                                               gregnames[i]);
101     }
102     memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
103 
104     cpu_pc = tcg_global_mem_new_i32(cpu_env,
105                                     offsetof(CPUSH4State, pc), "PC");
106     cpu_sr = tcg_global_mem_new_i32(cpu_env,
107                                     offsetof(CPUSH4State, sr), "SR");
108     cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109                                       offsetof(CPUSH4State, sr_m), "SR_M");
110     cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111                                       offsetof(CPUSH4State, sr_q), "SR_Q");
112     cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113                                       offsetof(CPUSH4State, sr_t), "SR_T");
114     cpu_ssr = tcg_global_mem_new_i32(cpu_env,
115                                      offsetof(CPUSH4State, ssr), "SSR");
116     cpu_spc = tcg_global_mem_new_i32(cpu_env,
117                                      offsetof(CPUSH4State, spc), "SPC");
118     cpu_gbr = tcg_global_mem_new_i32(cpu_env,
119                                      offsetof(CPUSH4State, gbr), "GBR");
120     cpu_vbr = tcg_global_mem_new_i32(cpu_env,
121                                      offsetof(CPUSH4State, vbr), "VBR");
122     cpu_sgr = tcg_global_mem_new_i32(cpu_env,
123                                      offsetof(CPUSH4State, sgr), "SGR");
124     cpu_dbr = tcg_global_mem_new_i32(cpu_env,
125                                      offsetof(CPUSH4State, dbr), "DBR");
126     cpu_mach = tcg_global_mem_new_i32(cpu_env,
127                                       offsetof(CPUSH4State, mach), "MACH");
128     cpu_macl = tcg_global_mem_new_i32(cpu_env,
129                                       offsetof(CPUSH4State, macl), "MACL");
130     cpu_pr = tcg_global_mem_new_i32(cpu_env,
131                                     offsetof(CPUSH4State, pr), "PR");
132     cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
133                                        offsetof(CPUSH4State, fpscr), "FPSCR");
134     cpu_fpul = tcg_global_mem_new_i32(cpu_env,
135                                       offsetof(CPUSH4State, fpul), "FPUL");
136 
137     cpu_flags = tcg_global_mem_new_i32(cpu_env,
138 				       offsetof(CPUSH4State, flags), "_flags_");
139     cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
140 					    offsetof(CPUSH4State, delayed_pc),
141 					    "_delayed_pc_");
142     cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143                                               offsetof(CPUSH4State,
144                                                        delayed_cond),
145                                               "_delayed_cond_");
146     cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147                                            offsetof(CPUSH4State, lock_addr),
148                                            "_lock_addr_");
149     cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150                                             offsetof(CPUSH4State, lock_value),
151                                             "_lock_value_");
152 
153     for (i = 0; i < 32; i++)
154         cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155                                               offsetof(CPUSH4State, fregs[i]),
156                                               fregnames[i]);
157 }
158 
159 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
160 {
161     SuperHCPU *cpu = SUPERH_CPU(cs);
162     CPUSH4State *env = &cpu->env;
163     int i;
164 
165     qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166                  env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167     qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168                  env->spc, env->ssr, env->gbr, env->vbr);
169     qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170                  env->sgr, env->dbr, env->delayed_pc, env->fpul);
171     for (i = 0; i < 24; i += 4) {
172         qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173                      i, env->gregs[i], i + 1, env->gregs[i + 1],
174                      i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175     }
176     if (env->flags & TB_FLAG_DELAY_SLOT) {
177         qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178                      env->delayed_pc);
179     } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
180         qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181                      env->delayed_pc);
182     } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
183         qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184                      env->delayed_pc);
185     }
186 }
187 
188 static void gen_read_sr(TCGv dst)
189 {
190     TCGv t0 = tcg_temp_new();
191     tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192     tcg_gen_or_i32(dst, dst, t0);
193     tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194     tcg_gen_or_i32(dst, dst, t0);
195     tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196     tcg_gen_or_i32(dst, cpu_sr, t0);
197 }
198 
199 static void gen_write_sr(TCGv src)
200 {
201     tcg_gen_andi_i32(cpu_sr, src,
202                      ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
203     tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
204     tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
205     tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
206 }
207 
208 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
209 {
210     if (save_pc) {
211         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
212     }
213     if (ctx->delayed_pc != (uint32_t) -1) {
214         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
215     }
216     if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
217         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
218     }
219 }
220 
221 static inline bool use_exit_tb(DisasContext *ctx)
222 {
223     return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
224 }
225 
226 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
227 {
228     if (use_exit_tb(ctx)) {
229         return false;
230     }
231     return translator_use_goto_tb(&ctx->base, dest);
232 }
233 
234 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
235 {
236     if (use_goto_tb(ctx, dest)) {
237         tcg_gen_goto_tb(n);
238         tcg_gen_movi_i32(cpu_pc, dest);
239         tcg_gen_exit_tb(ctx->base.tb, n);
240     } else {
241         tcg_gen_movi_i32(cpu_pc, dest);
242         if (use_exit_tb(ctx)) {
243             tcg_gen_exit_tb(NULL, 0);
244         } else {
245             tcg_gen_lookup_and_goto_ptr();
246         }
247     }
248     ctx->base.is_jmp = DISAS_NORETURN;
249 }
250 
251 static void gen_jump(DisasContext * ctx)
252 {
253     if (ctx->delayed_pc == -1) {
254 	/* Target is not statically known, it comes necessarily from a
255 	   delayed jump as immediate jump are conditinal jumps */
256 	tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
257         tcg_gen_discard_i32(cpu_delayed_pc);
258         if (use_exit_tb(ctx)) {
259             tcg_gen_exit_tb(NULL, 0);
260         } else {
261             tcg_gen_lookup_and_goto_ptr();
262         }
263         ctx->base.is_jmp = DISAS_NORETURN;
264     } else {
265 	gen_goto_tb(ctx, 0, ctx->delayed_pc);
266     }
267 }
268 
269 /* Immediate conditional jump (bt or bf) */
270 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
271                                  bool jump_if_true)
272 {
273     TCGLabel *l1 = gen_new_label();
274     TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
275 
276     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
277         /* When in an exclusive region, we must continue to the end.
278            Therefore, exit the region on a taken branch, but otherwise
279            fall through to the next instruction.  */
280         tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
281         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
282         /* Note that this won't actually use a goto_tb opcode because we
283            disallow it in use_goto_tb, but it handles exit + singlestep.  */
284         gen_goto_tb(ctx, 0, dest);
285         gen_set_label(l1);
286         ctx->base.is_jmp = DISAS_NEXT;
287         return;
288     }
289 
290     gen_save_cpu_state(ctx, false);
291     tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292     gen_goto_tb(ctx, 0, dest);
293     gen_set_label(l1);
294     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
295     ctx->base.is_jmp = DISAS_NORETURN;
296 }
297 
298 /* Delayed conditional jump (bt or bf) */
299 static void gen_delayed_conditional_jump(DisasContext * ctx)
300 {
301     TCGLabel *l1 = gen_new_label();
302     TCGv ds = tcg_temp_new();
303 
304     tcg_gen_mov_i32(ds, cpu_delayed_cond);
305     tcg_gen_discard_i32(cpu_delayed_cond);
306 
307     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
308         /* When in an exclusive region, we must continue to the end.
309            Therefore, exit the region on a taken branch, but otherwise
310            fall through to the next instruction.  */
311         tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
312 
313         /* Leave the gUSA region.  */
314         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
315         gen_jump(ctx);
316 
317         gen_set_label(l1);
318         ctx->base.is_jmp = DISAS_NEXT;
319         return;
320     }
321 
322     tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
323     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
324     gen_set_label(l1);
325     gen_jump(ctx);
326 }
327 
328 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
329 {
330     /* We have already signaled illegal instruction for odd Dr.  */
331     tcg_debug_assert((reg & 1) == 0);
332     reg ^= ctx->fbank;
333     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
334 }
335 
336 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
337 {
338     /* We have already signaled illegal instruction for odd Dr.  */
339     tcg_debug_assert((reg & 1) == 0);
340     reg ^= ctx->fbank;
341     tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
342 }
343 
344 #define B3_0 (ctx->opcode & 0xf)
345 #define B6_4 ((ctx->opcode >> 4) & 0x7)
346 #define B7_4 ((ctx->opcode >> 4) & 0xf)
347 #define B7_0 (ctx->opcode & 0xff)
348 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
349 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
350   (ctx->opcode & 0xfff))
351 #define B11_8 ((ctx->opcode >> 8) & 0xf)
352 #define B15_12 ((ctx->opcode >> 12) & 0xf)
353 
354 #define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
355 #define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
356 #define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
357 
358 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
359 
360 #define CHECK_NOT_DELAY_SLOT \
361     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {  \
362         goto do_illegal_slot;                       \
363     }
364 
365 #define CHECK_PRIVILEGED \
366     if (IS_USER(ctx)) {                     \
367         goto do_illegal;                    \
368     }
369 
370 #define CHECK_FPU_ENABLED \
371     if (ctx->tbflags & (1u << SR_FD)) {     \
372         goto do_fpu_disabled;               \
373     }
374 
375 #define CHECK_FPSCR_PR_0 \
376     if (ctx->tbflags & FPSCR_PR) {          \
377         goto do_illegal;                    \
378     }
379 
380 #define CHECK_FPSCR_PR_1 \
381     if (!(ctx->tbflags & FPSCR_PR)) {       \
382         goto do_illegal;                    \
383     }
384 
385 #define CHECK_SH4A \
386     if (!(ctx->features & SH_FEATURE_SH4A)) { \
387         goto do_illegal;                      \
388     }
389 
390 static void _decode_opc(DisasContext * ctx)
391 {
392     /* This code tries to make movcal emulation sufficiently
393        accurate for Linux purposes.  This instruction writes
394        memory, and prior to that, always allocates a cache line.
395        It is used in two contexts:
396        - in memcpy, where data is copied in blocks, the first write
397        of to a block uses movca.l for performance.
398        - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
399        to flush the cache. Here, the data written by movcal.l is never
400        written to memory, and the data written is just bogus.
401 
402        To simulate this, we simulate movcal.l, we store the value to memory,
403        but we also remember the previous content. If we see ocbi, we check
404        if movcal.l for that address was done previously. If so, the write should
405        not have hit the memory, so we restore the previous content.
406        When we see an instruction that is neither movca.l
407        nor ocbi, the previous content is discarded.
408 
409        To optimize, we only try to flush stores when we're at the start of
410        TB, or if we already saw movca.l in this TB and did not flush stores
411        yet.  */
412     if (ctx->has_movcal)
413 	{
414 	  int opcode = ctx->opcode & 0xf0ff;
415 	  if (opcode != 0x0093 /* ocbi */
416 	      && opcode != 0x00c3 /* movca.l */)
417 	      {
418                   gen_helper_discard_movcal_backup(cpu_env);
419 		  ctx->has_movcal = 0;
420 	      }
421 	}
422 
423 #if 0
424     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
425 #endif
426 
427     switch (ctx->opcode) {
428     case 0x0019:		/* div0u */
429         tcg_gen_movi_i32(cpu_sr_m, 0);
430         tcg_gen_movi_i32(cpu_sr_q, 0);
431         tcg_gen_movi_i32(cpu_sr_t, 0);
432 	return;
433     case 0x000b:		/* rts */
434 	CHECK_NOT_DELAY_SLOT
435 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
436         ctx->envflags |= TB_FLAG_DELAY_SLOT;
437 	ctx->delayed_pc = (uint32_t) - 1;
438 	return;
439     case 0x0028:		/* clrmac */
440 	tcg_gen_movi_i32(cpu_mach, 0);
441 	tcg_gen_movi_i32(cpu_macl, 0);
442 	return;
443     case 0x0048:		/* clrs */
444         tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
445 	return;
446     case 0x0008:		/* clrt */
447         tcg_gen_movi_i32(cpu_sr_t, 0);
448 	return;
449     case 0x0038:		/* ldtlb */
450 	CHECK_PRIVILEGED
451         gen_helper_ldtlb(cpu_env);
452 	return;
453     case 0x002b:		/* rte */
454 	CHECK_PRIVILEGED
455 	CHECK_NOT_DELAY_SLOT
456         gen_write_sr(cpu_ssr);
457 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
458         ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
459 	ctx->delayed_pc = (uint32_t) - 1;
460         ctx->base.is_jmp = DISAS_STOP;
461 	return;
462     case 0x0058:		/* sets */
463         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
464 	return;
465     case 0x0018:		/* sett */
466         tcg_gen_movi_i32(cpu_sr_t, 1);
467 	return;
468     case 0xfbfd:		/* frchg */
469         CHECK_FPSCR_PR_0
470 	tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
471         ctx->base.is_jmp = DISAS_STOP;
472 	return;
473     case 0xf3fd:		/* fschg */
474         CHECK_FPSCR_PR_0
475         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
476         ctx->base.is_jmp = DISAS_STOP;
477 	return;
478     case 0xf7fd:                /* fpchg */
479         CHECK_SH4A
480         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
481         ctx->base.is_jmp = DISAS_STOP;
482         return;
483     case 0x0009:		/* nop */
484 	return;
485     case 0x001b:		/* sleep */
486 	CHECK_PRIVILEGED
487         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
488         gen_helper_sleep(cpu_env);
489 	return;
490     }
491 
492     switch (ctx->opcode & 0xf000) {
493     case 0x1000:		/* mov.l Rm,@(disp,Rn) */
494 	{
495 	    TCGv addr = tcg_temp_new();
496 	    tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
497             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
498                                 MO_TEUL | UNALIGN(ctx));
499 	}
500 	return;
501     case 0x5000:		/* mov.l @(disp,Rm),Rn */
502 	{
503 	    TCGv addr = tcg_temp_new();
504 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
505             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
506                                 MO_TESL | UNALIGN(ctx));
507 	}
508 	return;
509     case 0xe000:		/* mov #imm,Rn */
510 #ifdef CONFIG_USER_ONLY
511         /*
512          * Detect the start of a gUSA region (mov #-n, r15).
513          * If so, update envflags and end the TB.  This will allow us
514          * to see the end of the region (stored in R0) in the next TB.
515          */
516         if (B11_8 == 15 && B7_0s < 0 &&
517             (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
518             ctx->envflags =
519                 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
520             ctx->base.is_jmp = DISAS_STOP;
521         }
522 #endif
523 	tcg_gen_movi_i32(REG(B11_8), B7_0s);
524 	return;
525     case 0x9000:		/* mov.w @(disp,PC),Rn */
526 	{
527             TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
528             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
529                                 MO_TESW | MO_ALIGN);
530 	}
531 	return;
532     case 0xd000:		/* mov.l @(disp,PC),Rn */
533 	{
534             TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
535             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
536                                 MO_TESL | MO_ALIGN);
537 	}
538 	return;
539     case 0x7000:		/* add #imm,Rn */
540 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
541 	return;
542     case 0xa000:		/* bra disp */
543 	CHECK_NOT_DELAY_SLOT
544         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
545         ctx->envflags |= TB_FLAG_DELAY_SLOT;
546 	return;
547     case 0xb000:		/* bsr disp */
548 	CHECK_NOT_DELAY_SLOT
549         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
550         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
551         ctx->envflags |= TB_FLAG_DELAY_SLOT;
552 	return;
553     }
554 
555     switch (ctx->opcode & 0xf00f) {
556     case 0x6003:		/* mov Rm,Rn */
557 	tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
558 	return;
559     case 0x2000:		/* mov.b Rm,@Rn */
560         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
561 	return;
562     case 0x2001:		/* mov.w Rm,@Rn */
563         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
564                             MO_TEUW | UNALIGN(ctx));
565 	return;
566     case 0x2002:		/* mov.l Rm,@Rn */
567         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
568                             MO_TEUL | UNALIGN(ctx));
569 	return;
570     case 0x6000:		/* mov.b @Rm,Rn */
571         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
572 	return;
573     case 0x6001:		/* mov.w @Rm,Rn */
574         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
575                             MO_TESW | UNALIGN(ctx));
576 	return;
577     case 0x6002:		/* mov.l @Rm,Rn */
578         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
579                             MO_TESL | UNALIGN(ctx));
580 	return;
581     case 0x2004:		/* mov.b Rm,@-Rn */
582 	{
583 	    TCGv addr = tcg_temp_new();
584 	    tcg_gen_subi_i32(addr, REG(B11_8), 1);
585             /* might cause re-execution */
586             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
587 	    tcg_gen_mov_i32(REG(B11_8), addr);			/* modify register status */
588 	}
589 	return;
590     case 0x2005:		/* mov.w Rm,@-Rn */
591 	{
592 	    TCGv addr = tcg_temp_new();
593 	    tcg_gen_subi_i32(addr, REG(B11_8), 2);
594             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
595                                 MO_TEUW | UNALIGN(ctx));
596 	    tcg_gen_mov_i32(REG(B11_8), addr);
597 	}
598 	return;
599     case 0x2006:		/* mov.l Rm,@-Rn */
600 	{
601 	    TCGv addr = tcg_temp_new();
602 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
603             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
604                                 MO_TEUL | UNALIGN(ctx));
605 	    tcg_gen_mov_i32(REG(B11_8), addr);
606 	}
607 	return;
608     case 0x6004:		/* mov.b @Rm+,Rn */
609         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
610 	if ( B11_8 != B7_4 )
611 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
612 	return;
613     case 0x6005:		/* mov.w @Rm+,Rn */
614         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
615                             MO_TESW | UNALIGN(ctx));
616 	if ( B11_8 != B7_4 )
617 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
618 	return;
619     case 0x6006:		/* mov.l @Rm+,Rn */
620         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
621                             MO_TESL | UNALIGN(ctx));
622 	if ( B11_8 != B7_4 )
623 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
624 	return;
625     case 0x0004:		/* mov.b Rm,@(R0,Rn) */
626 	{
627 	    TCGv addr = tcg_temp_new();
628 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
629             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
630 	}
631 	return;
632     case 0x0005:		/* mov.w Rm,@(R0,Rn) */
633 	{
634 	    TCGv addr = tcg_temp_new();
635 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
636             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
637                                 MO_TEUW | UNALIGN(ctx));
638 	}
639 	return;
640     case 0x0006:		/* mov.l Rm,@(R0,Rn) */
641 	{
642 	    TCGv addr = tcg_temp_new();
643 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
644             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
645                                 MO_TEUL | UNALIGN(ctx));
646 	}
647 	return;
648     case 0x000c:		/* mov.b @(R0,Rm),Rn */
649 	{
650 	    TCGv addr = tcg_temp_new();
651 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
652             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
653 	}
654 	return;
655     case 0x000d:		/* mov.w @(R0,Rm),Rn */
656 	{
657 	    TCGv addr = tcg_temp_new();
658 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
659             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
660                                 MO_TESW | UNALIGN(ctx));
661 	}
662 	return;
663     case 0x000e:		/* mov.l @(R0,Rm),Rn */
664 	{
665 	    TCGv addr = tcg_temp_new();
666 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
667             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
668                                 MO_TESL | UNALIGN(ctx));
669 	}
670 	return;
671     case 0x6008:		/* swap.b Rm,Rn */
672 	{
673             TCGv low = tcg_temp_new();
674             tcg_gen_bswap16_i32(low, REG(B7_4), 0);
675             tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
676 	}
677 	return;
678     case 0x6009:		/* swap.w Rm,Rn */
679         tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
680 	return;
681     case 0x200d:		/* xtrct Rm,Rn */
682 	{
683 	    TCGv high, low;
684 	    high = tcg_temp_new();
685 	    tcg_gen_shli_i32(high, REG(B7_4), 16);
686 	    low = tcg_temp_new();
687 	    tcg_gen_shri_i32(low, REG(B11_8), 16);
688 	    tcg_gen_or_i32(REG(B11_8), high, low);
689 	}
690 	return;
691     case 0x300c:		/* add Rm,Rn */
692 	tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
693 	return;
694     case 0x300e:		/* addc Rm,Rn */
695         {
696             TCGv t0, t1;
697             t0 = tcg_constant_tl(0);
698             t1 = tcg_temp_new();
699             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
700             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
701                              REG(B11_8), t0, t1, cpu_sr_t);
702         }
703 	return;
704     case 0x300f:		/* addv Rm,Rn */
705         {
706             TCGv t0, t1, t2;
707             t0 = tcg_temp_new();
708             tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
709             t1 = tcg_temp_new();
710             tcg_gen_xor_i32(t1, t0, REG(B11_8));
711             t2 = tcg_temp_new();
712             tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
713             tcg_gen_andc_i32(cpu_sr_t, t1, t2);
714             tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
715             tcg_gen_mov_i32(REG(B7_4), t0);
716         }
717 	return;
718     case 0x2009:		/* and Rm,Rn */
719 	tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
720 	return;
721     case 0x3000:		/* cmp/eq Rm,Rn */
722         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
723 	return;
724     case 0x3003:		/* cmp/ge Rm,Rn */
725         tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
726 	return;
727     case 0x3007:		/* cmp/gt Rm,Rn */
728         tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
729 	return;
730     case 0x3006:		/* cmp/hi Rm,Rn */
731         tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
732 	return;
733     case 0x3002:		/* cmp/hs Rm,Rn */
734         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
735 	return;
736     case 0x200c:		/* cmp/str Rm,Rn */
737 	{
738 	    TCGv cmp1 = tcg_temp_new();
739 	    TCGv cmp2 = tcg_temp_new();
740             tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
741             tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
742             tcg_gen_andc_i32(cmp1, cmp1, cmp2);
743             tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
744             tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
745 	}
746 	return;
747     case 0x2007:		/* div0s Rm,Rn */
748         tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
749         tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
750         tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
751 	return;
752     case 0x3004:		/* div1 Rm,Rn */
753         {
754             TCGv t0 = tcg_temp_new();
755             TCGv t1 = tcg_temp_new();
756             TCGv t2 = tcg_temp_new();
757             TCGv zero = tcg_constant_i32(0);
758 
759             /* shift left arg1, saving the bit being pushed out and inserting
760                T on the right */
761             tcg_gen_shri_i32(t0, REG(B11_8), 31);
762             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
763             tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
764 
765             /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
766                using 64-bit temps, we compute arg0's high part from q ^ m, so
767                that it is 0x00000000 when adding the value or 0xffffffff when
768                subtracting it. */
769             tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
770             tcg_gen_subi_i32(t1, t1, 1);
771             tcg_gen_neg_i32(t2, REG(B7_4));
772             tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
773             tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
774 
775             /* compute T and Q depending on carry */
776             tcg_gen_andi_i32(t1, t1, 1);
777             tcg_gen_xor_i32(t1, t1, t0);
778             tcg_gen_xori_i32(cpu_sr_t, t1, 1);
779             tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
780         }
781 	return;
782     case 0x300d:		/* dmuls.l Rm,Rn */
783         tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
784 	return;
785     case 0x3005:		/* dmulu.l Rm,Rn */
786         tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
787 	return;
788     case 0x600e:		/* exts.b Rm,Rn */
789 	tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
790 	return;
791     case 0x600f:		/* exts.w Rm,Rn */
792 	tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
793 	return;
794     case 0x600c:		/* extu.b Rm,Rn */
795 	tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
796 	return;
797     case 0x600d:		/* extu.w Rm,Rn */
798 	tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
799 	return;
800     case 0x000f:		/* mac.l @Rm+,@Rn+ */
801 	{
802 	    TCGv arg0, arg1;
803 	    arg0 = tcg_temp_new();
804             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
805                                 MO_TESL | MO_ALIGN);
806 	    arg1 = tcg_temp_new();
807             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
808                                 MO_TESL | MO_ALIGN);
809             gen_helper_macl(cpu_env, arg0, arg1);
810 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
811 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
812 	}
813 	return;
814     case 0x400f:		/* mac.w @Rm+,@Rn+ */
815 	{
816 	    TCGv arg0, arg1;
817 	    arg0 = tcg_temp_new();
818             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
819                                 MO_TESL | MO_ALIGN);
820 	    arg1 = tcg_temp_new();
821             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
822                                 MO_TESL | MO_ALIGN);
823             gen_helper_macw(cpu_env, arg0, arg1);
824 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
825 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
826 	}
827 	return;
828     case 0x0007:		/* mul.l Rm,Rn */
829 	tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
830 	return;
831     case 0x200f:		/* muls.w Rm,Rn */
832 	{
833 	    TCGv arg0, arg1;
834 	    arg0 = tcg_temp_new();
835 	    tcg_gen_ext16s_i32(arg0, REG(B7_4));
836 	    arg1 = tcg_temp_new();
837 	    tcg_gen_ext16s_i32(arg1, REG(B11_8));
838 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
839 	}
840 	return;
841     case 0x200e:		/* mulu.w Rm,Rn */
842 	{
843 	    TCGv arg0, arg1;
844 	    arg0 = tcg_temp_new();
845 	    tcg_gen_ext16u_i32(arg0, REG(B7_4));
846 	    arg1 = tcg_temp_new();
847 	    tcg_gen_ext16u_i32(arg1, REG(B11_8));
848 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
849 	}
850 	return;
851     case 0x600b:		/* neg Rm,Rn */
852 	tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
853 	return;
854     case 0x600a:		/* negc Rm,Rn */
855         {
856             TCGv t0 = tcg_constant_i32(0);
857             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
858                              REG(B7_4), t0, cpu_sr_t, t0);
859             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
860                              t0, t0, REG(B11_8), cpu_sr_t);
861             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
862         }
863 	return;
864     case 0x6007:		/* not Rm,Rn */
865 	tcg_gen_not_i32(REG(B11_8), REG(B7_4));
866 	return;
867     case 0x200b:		/* or Rm,Rn */
868 	tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
869 	return;
870     case 0x400c:		/* shad Rm,Rn */
871 	{
872             TCGv t0 = tcg_temp_new();
873             TCGv t1 = tcg_temp_new();
874             TCGv t2 = tcg_temp_new();
875 
876             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
877 
878             /* positive case: shift to the left */
879             tcg_gen_shl_i32(t1, REG(B11_8), t0);
880 
881             /* negative case: shift to the right in two steps to
882                correctly handle the -32 case */
883             tcg_gen_xori_i32(t0, t0, 0x1f);
884             tcg_gen_sar_i32(t2, REG(B11_8), t0);
885             tcg_gen_sari_i32(t2, t2, 1);
886 
887             /* select between the two cases */
888             tcg_gen_movi_i32(t0, 0);
889             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
890 	}
891 	return;
892     case 0x400d:		/* shld Rm,Rn */
893 	{
894             TCGv t0 = tcg_temp_new();
895             TCGv t1 = tcg_temp_new();
896             TCGv t2 = tcg_temp_new();
897 
898             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
899 
900             /* positive case: shift to the left */
901             tcg_gen_shl_i32(t1, REG(B11_8), t0);
902 
903             /* negative case: shift to the right in two steps to
904                correctly handle the -32 case */
905             tcg_gen_xori_i32(t0, t0, 0x1f);
906             tcg_gen_shr_i32(t2, REG(B11_8), t0);
907             tcg_gen_shri_i32(t2, t2, 1);
908 
909             /* select between the two cases */
910             tcg_gen_movi_i32(t0, 0);
911             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
912 	}
913 	return;
914     case 0x3008:		/* sub Rm,Rn */
915 	tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
916 	return;
917     case 0x300a:		/* subc Rm,Rn */
918         {
919             TCGv t0, t1;
920             t0 = tcg_constant_tl(0);
921             t1 = tcg_temp_new();
922             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
923             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
924                              REG(B11_8), t0, t1, cpu_sr_t);
925             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
926         }
927 	return;
928     case 0x300b:		/* subv Rm,Rn */
929         {
930             TCGv t0, t1, t2;
931             t0 = tcg_temp_new();
932             tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
933             t1 = tcg_temp_new();
934             tcg_gen_xor_i32(t1, t0, REG(B7_4));
935             t2 = tcg_temp_new();
936             tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
937             tcg_gen_and_i32(t1, t1, t2);
938             tcg_gen_shri_i32(cpu_sr_t, t1, 31);
939             tcg_gen_mov_i32(REG(B11_8), t0);
940         }
941 	return;
942     case 0x2008:		/* tst Rm,Rn */
943 	{
944 	    TCGv val = tcg_temp_new();
945 	    tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
946             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
947 	}
948 	return;
949     case 0x200a:		/* xor Rm,Rn */
950 	tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
951 	return;
952     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
953 	CHECK_FPU_ENABLED
954         if (ctx->tbflags & FPSCR_SZ) {
955             int xsrc = XHACK(B7_4);
956             int xdst = XHACK(B11_8);
957             tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
958             tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
959 	} else {
960             tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
961 	}
962 	return;
963     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
964 	CHECK_FPU_ENABLED
965         if (ctx->tbflags & FPSCR_SZ) {
966             TCGv_i64 fp = tcg_temp_new_i64();
967             gen_load_fpr64(ctx, fp, XHACK(B7_4));
968             tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
969                                 MO_TEUQ | MO_ALIGN);
970 	} else {
971             tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
972                                 MO_TEUL | MO_ALIGN);
973 	}
974 	return;
975     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
976 	CHECK_FPU_ENABLED
977         if (ctx->tbflags & FPSCR_SZ) {
978             TCGv_i64 fp = tcg_temp_new_i64();
979             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
980                                 MO_TEUQ | MO_ALIGN);
981             gen_store_fpr64(ctx, fp, XHACK(B11_8));
982 	} else {
983             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
984                                 MO_TEUL | MO_ALIGN);
985 	}
986 	return;
987     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
988 	CHECK_FPU_ENABLED
989         if (ctx->tbflags & FPSCR_SZ) {
990             TCGv_i64 fp = tcg_temp_new_i64();
991             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
992                                 MO_TEUQ | MO_ALIGN);
993             gen_store_fpr64(ctx, fp, XHACK(B11_8));
994             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
995 	} else {
996             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
997                                 MO_TEUL | MO_ALIGN);
998 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
999 	}
1000 	return;
1001     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1002 	CHECK_FPU_ENABLED
1003         {
1004             TCGv addr = tcg_temp_new_i32();
1005             if (ctx->tbflags & FPSCR_SZ) {
1006                 TCGv_i64 fp = tcg_temp_new_i64();
1007                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1008                 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1009                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1010                                     MO_TEUQ | MO_ALIGN);
1011             } else {
1012                 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1013                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1014                                     MO_TEUL | MO_ALIGN);
1015             }
1016             tcg_gen_mov_i32(REG(B11_8), addr);
1017         }
1018 	return;
1019     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1020 	CHECK_FPU_ENABLED
1021 	{
1022 	    TCGv addr = tcg_temp_new_i32();
1023 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1024             if (ctx->tbflags & FPSCR_SZ) {
1025                 TCGv_i64 fp = tcg_temp_new_i64();
1026                 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1027                                     MO_TEUQ | MO_ALIGN);
1028                 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1029 	    } else {
1030                 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1031                                     MO_TEUL | MO_ALIGN);
1032 	    }
1033 	}
1034 	return;
1035     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1036 	CHECK_FPU_ENABLED
1037 	{
1038 	    TCGv addr = tcg_temp_new();
1039 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1040             if (ctx->tbflags & FPSCR_SZ) {
1041                 TCGv_i64 fp = tcg_temp_new_i64();
1042                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1043                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1044                                     MO_TEUQ | MO_ALIGN);
1045 	    } else {
1046                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1047                                     MO_TEUL | MO_ALIGN);
1048 	    }
1049 	}
1050 	return;
1051     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1057 	{
1058 	    CHECK_FPU_ENABLED
1059             if (ctx->tbflags & FPSCR_PR) {
1060                 TCGv_i64 fp0, fp1;
1061 
1062                 if (ctx->opcode & 0x0110) {
1063                     goto do_illegal;
1064                 }
1065 		fp0 = tcg_temp_new_i64();
1066 		fp1 = tcg_temp_new_i64();
1067                 gen_load_fpr64(ctx, fp0, B11_8);
1068                 gen_load_fpr64(ctx, fp1, B7_4);
1069                 switch (ctx->opcode & 0xf00f) {
1070                 case 0xf000:		/* fadd Rm,Rn */
1071                     gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1072                     break;
1073                 case 0xf001:		/* fsub Rm,Rn */
1074                     gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1075                     break;
1076                 case 0xf002:		/* fmul Rm,Rn */
1077                     gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1078                     break;
1079                 case 0xf003:		/* fdiv Rm,Rn */
1080                     gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1081                     break;
1082                 case 0xf004:		/* fcmp/eq Rm,Rn */
1083                     gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1084                     return;
1085                 case 0xf005:		/* fcmp/gt Rm,Rn */
1086                     gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1087                     return;
1088                 }
1089                 gen_store_fpr64(ctx, fp0, B11_8);
1090 	    } else {
1091                 switch (ctx->opcode & 0xf00f) {
1092                 case 0xf000:		/* fadd Rm,Rn */
1093                     gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1094                                        FREG(B11_8), FREG(B7_4));
1095                     break;
1096                 case 0xf001:		/* fsub Rm,Rn */
1097                     gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1098                                        FREG(B11_8), FREG(B7_4));
1099                     break;
1100                 case 0xf002:		/* fmul Rm,Rn */
1101                     gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1102                                        FREG(B11_8), FREG(B7_4));
1103                     break;
1104                 case 0xf003:		/* fdiv Rm,Rn */
1105                     gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1106                                        FREG(B11_8), FREG(B7_4));
1107                     break;
1108                 case 0xf004:		/* fcmp/eq Rm,Rn */
1109                     gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1110                                           FREG(B11_8), FREG(B7_4));
1111                     return;
1112                 case 0xf005:		/* fcmp/gt Rm,Rn */
1113                     gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1114                                           FREG(B11_8), FREG(B7_4));
1115                     return;
1116                 }
1117 	    }
1118 	}
1119 	return;
1120     case 0xf00e: /* fmac FR0,RM,Rn */
1121         CHECK_FPU_ENABLED
1122         CHECK_FPSCR_PR_0
1123         gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1124                            FREG(0), FREG(B7_4), FREG(B11_8));
1125         return;
1126     }
1127 
1128     switch (ctx->opcode & 0xff00) {
1129     case 0xc900:		/* and #imm,R0 */
1130 	tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1131 	return;
1132     case 0xcd00:		/* and.b #imm,@(R0,GBR) */
1133 	{
1134 	    TCGv addr, val;
1135 	    addr = tcg_temp_new();
1136 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1137 	    val = tcg_temp_new();
1138             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1139 	    tcg_gen_andi_i32(val, val, B7_0);
1140             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1141 	}
1142 	return;
1143     case 0x8b00:		/* bf label */
1144 	CHECK_NOT_DELAY_SLOT
1145         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1146 	return;
1147     case 0x8f00:		/* bf/s label */
1148 	CHECK_NOT_DELAY_SLOT
1149         tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1150         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1151         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1152 	return;
1153     case 0x8900:		/* bt label */
1154 	CHECK_NOT_DELAY_SLOT
1155         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1156 	return;
1157     case 0x8d00:		/* bt/s label */
1158 	CHECK_NOT_DELAY_SLOT
1159         tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1160         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1161         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1162 	return;
1163     case 0x8800:		/* cmp/eq #imm,R0 */
1164         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1165 	return;
1166     case 0xc400:		/* mov.b @(disp,GBR),R0 */
1167 	{
1168 	    TCGv addr = tcg_temp_new();
1169 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1170             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1171 	}
1172 	return;
1173     case 0xc500:		/* mov.w @(disp,GBR),R0 */
1174 	{
1175 	    TCGv addr = tcg_temp_new();
1176 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1177             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
1178 	}
1179 	return;
1180     case 0xc600:		/* mov.l @(disp,GBR),R0 */
1181 	{
1182 	    TCGv addr = tcg_temp_new();
1183 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1184             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
1185 	}
1186 	return;
1187     case 0xc000:		/* mov.b R0,@(disp,GBR) */
1188 	{
1189 	    TCGv addr = tcg_temp_new();
1190 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1191             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1192 	}
1193 	return;
1194     case 0xc100:		/* mov.w R0,@(disp,GBR) */
1195 	{
1196 	    TCGv addr = tcg_temp_new();
1197 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1198             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
1199 	}
1200 	return;
1201     case 0xc200:		/* mov.l R0,@(disp,GBR) */
1202 	{
1203 	    TCGv addr = tcg_temp_new();
1204 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1205             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1206 	}
1207 	return;
1208     case 0x8000:		/* mov.b R0,@(disp,Rn) */
1209 	{
1210 	    TCGv addr = tcg_temp_new();
1211 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1212             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1213 	}
1214 	return;
1215     case 0x8100:		/* mov.w R0,@(disp,Rn) */
1216 	{
1217 	    TCGv addr = tcg_temp_new();
1218 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1219             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1220                                 MO_TEUW | UNALIGN(ctx));
1221 	}
1222 	return;
1223     case 0x8400:		/* mov.b @(disp,Rn),R0 */
1224 	{
1225 	    TCGv addr = tcg_temp_new();
1226 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1227             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1228 	}
1229 	return;
1230     case 0x8500:		/* mov.w @(disp,Rn),R0 */
1231 	{
1232 	    TCGv addr = tcg_temp_new();
1233 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1234             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1235                                 MO_TESW | UNALIGN(ctx));
1236 	}
1237 	return;
1238     case 0xc700:		/* mova @(disp,PC),R0 */
1239         tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1240                                   4 + B7_0 * 4) & ~3);
1241 	return;
1242     case 0xcb00:		/* or #imm,R0 */
1243 	tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1244 	return;
1245     case 0xcf00:		/* or.b #imm,@(R0,GBR) */
1246 	{
1247 	    TCGv addr, val;
1248 	    addr = tcg_temp_new();
1249 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1250 	    val = tcg_temp_new();
1251             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1252 	    tcg_gen_ori_i32(val, val, B7_0);
1253             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1254 	}
1255 	return;
1256     case 0xc300:		/* trapa #imm */
1257 	{
1258 	    TCGv imm;
1259 	    CHECK_NOT_DELAY_SLOT
1260             gen_save_cpu_state(ctx, true);
1261 	    imm = tcg_constant_i32(B7_0);
1262             gen_helper_trapa(cpu_env, imm);
1263             ctx->base.is_jmp = DISAS_NORETURN;
1264 	}
1265 	return;
1266     case 0xc800:		/* tst #imm,R0 */
1267 	{
1268 	    TCGv val = tcg_temp_new();
1269 	    tcg_gen_andi_i32(val, REG(0), B7_0);
1270             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1271 	}
1272 	return;
1273     case 0xcc00:		/* tst.b #imm,@(R0,GBR) */
1274 	{
1275 	    TCGv val = tcg_temp_new();
1276 	    tcg_gen_add_i32(val, REG(0), cpu_gbr);
1277             tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1278 	    tcg_gen_andi_i32(val, val, B7_0);
1279             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1280 	}
1281 	return;
1282     case 0xca00:		/* xor #imm,R0 */
1283 	tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1284 	return;
1285     case 0xce00:		/* xor.b #imm,@(R0,GBR) */
1286 	{
1287 	    TCGv addr, val;
1288 	    addr = tcg_temp_new();
1289 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1290 	    val = tcg_temp_new();
1291             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1292 	    tcg_gen_xori_i32(val, val, B7_0);
1293             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1294 	}
1295 	return;
1296     }
1297 
1298     switch (ctx->opcode & 0xf08f) {
1299     case 0x408e:		/* ldc Rm,Rn_BANK */
1300 	CHECK_PRIVILEGED
1301 	tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1302 	return;
1303     case 0x4087:		/* ldc.l @Rm+,Rn_BANK */
1304 	CHECK_PRIVILEGED
1305         tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1306                             MO_TESL | MO_ALIGN);
1307 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1308 	return;
1309     case 0x0082:		/* stc Rm_BANK,Rn */
1310 	CHECK_PRIVILEGED
1311 	tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1312 	return;
1313     case 0x4083:		/* stc.l Rm_BANK,@-Rn */
1314 	CHECK_PRIVILEGED
1315 	{
1316 	    TCGv addr = tcg_temp_new();
1317 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1318             tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1319                                 MO_TEUL | MO_ALIGN);
1320 	    tcg_gen_mov_i32(REG(B11_8), addr);
1321 	}
1322 	return;
1323     }
1324 
1325     switch (ctx->opcode & 0xf0ff) {
1326     case 0x0023:		/* braf Rn */
1327 	CHECK_NOT_DELAY_SLOT
1328         tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1329         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1330 	ctx->delayed_pc = (uint32_t) - 1;
1331 	return;
1332     case 0x0003:		/* bsrf Rn */
1333 	CHECK_NOT_DELAY_SLOT
1334         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1335 	tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1336         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1337 	ctx->delayed_pc = (uint32_t) - 1;
1338 	return;
1339     case 0x4015:		/* cmp/pl Rn */
1340         tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1341 	return;
1342     case 0x4011:		/* cmp/pz Rn */
1343         tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1344 	return;
1345     case 0x4010:		/* dt Rn */
1346 	tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1347         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1348 	return;
1349     case 0x402b:		/* jmp @Rn */
1350 	CHECK_NOT_DELAY_SLOT
1351 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1352         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1353 	ctx->delayed_pc = (uint32_t) - 1;
1354 	return;
1355     case 0x400b:		/* jsr @Rn */
1356 	CHECK_NOT_DELAY_SLOT
1357         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1358 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1359         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1360 	ctx->delayed_pc = (uint32_t) - 1;
1361 	return;
1362     case 0x400e:		/* ldc Rm,SR */
1363 	CHECK_PRIVILEGED
1364         {
1365             TCGv val = tcg_temp_new();
1366             tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1367             gen_write_sr(val);
1368             ctx->base.is_jmp = DISAS_STOP;
1369         }
1370 	return;
1371     case 0x4007:		/* ldc.l @Rm+,SR */
1372 	CHECK_PRIVILEGED
1373 	{
1374 	    TCGv val = tcg_temp_new();
1375             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1376                                 MO_TESL | MO_ALIGN);
1377             tcg_gen_andi_i32(val, val, 0x700083f3);
1378             gen_write_sr(val);
1379 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1380             ctx->base.is_jmp = DISAS_STOP;
1381 	}
1382 	return;
1383     case 0x0002:		/* stc SR,Rn */
1384 	CHECK_PRIVILEGED
1385         gen_read_sr(REG(B11_8));
1386 	return;
1387     case 0x4003:		/* stc SR,@-Rn */
1388 	CHECK_PRIVILEGED
1389 	{
1390 	    TCGv addr = tcg_temp_new();
1391             TCGv val = tcg_temp_new();
1392 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1393             gen_read_sr(val);
1394             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1395 	    tcg_gen_mov_i32(REG(B11_8), addr);
1396 	}
1397 	return;
1398 #define LD(reg,ldnum,ldpnum,prechk)		\
1399   case ldnum:							\
1400     prechk    							\
1401     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));			\
1402     return;							\
1403   case ldpnum:							\
1404     prechk    							\
1405     tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx,     \
1406                         MO_TESL | MO_ALIGN);                    \
1407     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);		\
1408     return;
1409 #define ST(reg,stnum,stpnum,prechk)		\
1410   case stnum:							\
1411     prechk    							\
1412     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);			\
1413     return;							\
1414   case stpnum:							\
1415     prechk    							\
1416     {								\
1417 	TCGv addr = tcg_temp_new();				\
1418 	tcg_gen_subi_i32(addr, REG(B11_8), 4);			\
1419         tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx,       \
1420                             MO_TEUL | MO_ALIGN);                \
1421 	tcg_gen_mov_i32(REG(B11_8), addr);			\
1422     }								\
1423     return;
1424 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)		\
1425 	LD(reg,ldnum,ldpnum,prechk)				\
1426 	ST(reg,stnum,stpnum,prechk)
1427 	LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1428 	LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1429 	LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1430 	LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1431 	ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1432         LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1433 	LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1434 	LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1435 	LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1436 	LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1437 	LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1438     case 0x406a:		/* lds Rm,FPSCR */
1439 	CHECK_FPU_ENABLED
1440         gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1441         ctx->base.is_jmp = DISAS_STOP;
1442 	return;
1443     case 0x4066:		/* lds.l @Rm+,FPSCR */
1444 	CHECK_FPU_ENABLED
1445 	{
1446 	    TCGv addr = tcg_temp_new();
1447             tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1448                                 MO_TESL | MO_ALIGN);
1449 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1450             gen_helper_ld_fpscr(cpu_env, addr);
1451             ctx->base.is_jmp = DISAS_STOP;
1452 	}
1453 	return;
1454     case 0x006a:		/* sts FPSCR,Rn */
1455 	CHECK_FPU_ENABLED
1456 	tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1457 	return;
1458     case 0x4062:		/* sts FPSCR,@-Rn */
1459 	CHECK_FPU_ENABLED
1460 	{
1461 	    TCGv addr, val;
1462 	    val = tcg_temp_new();
1463 	    tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1464 	    addr = tcg_temp_new();
1465 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1466             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1467 	    tcg_gen_mov_i32(REG(B11_8), addr);
1468 	}
1469 	return;
1470     case 0x00c3:		/* movca.l R0,@Rm */
1471         {
1472             TCGv val = tcg_temp_new();
1473             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1474                                 MO_TEUL | MO_ALIGN);
1475             gen_helper_movcal(cpu_env, REG(B11_8), val);
1476             tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1477                                 MO_TEUL | MO_ALIGN);
1478         }
1479         ctx->has_movcal = 1;
1480 	return;
1481     case 0x40a9:                /* movua.l @Rm,R0 */
1482         CHECK_SH4A
1483         /* Load non-boundary-aligned data */
1484         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1485                             MO_TEUL | MO_UNALN);
1486         return;
1487     case 0x40e9:                /* movua.l @Rm+,R0 */
1488         CHECK_SH4A
1489         /* Load non-boundary-aligned data */
1490         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1491                             MO_TEUL | MO_UNALN);
1492         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1493         return;
1494     case 0x0029:		/* movt Rn */
1495         tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1496 	return;
1497     case 0x0073:
1498         /* MOVCO.L
1499          *     LDST -> T
1500          *     If (T == 1) R0 -> (Rn)
1501          *     0 -> LDST
1502          *
1503          * The above description doesn't work in a parallel context.
1504          * Since we currently support no smp boards, this implies user-mode.
1505          * But we can still support the official mechanism while user-mode
1506          * is single-threaded.  */
1507         CHECK_SH4A
1508         {
1509             TCGLabel *fail = gen_new_label();
1510             TCGLabel *done = gen_new_label();
1511 
1512             if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1513                 TCGv tmp;
1514 
1515                 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1516                                    cpu_lock_addr, fail);
1517                 tmp = tcg_temp_new();
1518                 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1519                                            REG(0), ctx->memidx,
1520                                            MO_TEUL | MO_ALIGN);
1521                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1522             } else {
1523                 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1524                 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1525                                     MO_TEUL | MO_ALIGN);
1526                 tcg_gen_movi_i32(cpu_sr_t, 1);
1527             }
1528             tcg_gen_br(done);
1529 
1530             gen_set_label(fail);
1531             tcg_gen_movi_i32(cpu_sr_t, 0);
1532 
1533             gen_set_label(done);
1534             tcg_gen_movi_i32(cpu_lock_addr, -1);
1535         }
1536         return;
1537     case 0x0063:
1538         /* MOVLI.L @Rm,R0
1539          *     1 -> LDST
1540          *     (Rm) -> R0
1541          *     When interrupt/exception
1542          *     occurred 0 -> LDST
1543          *
1544          * In a parallel context, we must also save the loaded value
1545          * for use with the cmpxchg that we'll use with movco.l.  */
1546         CHECK_SH4A
1547         if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1548             TCGv tmp = tcg_temp_new();
1549             tcg_gen_mov_i32(tmp, REG(B11_8));
1550             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1551                                 MO_TESL | MO_ALIGN);
1552             tcg_gen_mov_i32(cpu_lock_value, REG(0));
1553             tcg_gen_mov_i32(cpu_lock_addr, tmp);
1554         } else {
1555             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1556                                 MO_TESL | MO_ALIGN);
1557             tcg_gen_movi_i32(cpu_lock_addr, 0);
1558         }
1559         return;
1560     case 0x0093:		/* ocbi @Rn */
1561 	{
1562             gen_helper_ocbi(cpu_env, REG(B11_8));
1563 	}
1564 	return;
1565     case 0x00a3:		/* ocbp @Rn */
1566     case 0x00b3:		/* ocbwb @Rn */
1567         /* These instructions are supposed to do nothing in case of
1568            a cache miss. Given that we only partially emulate caches
1569            it is safe to simply ignore them. */
1570 	return;
1571     case 0x0083:		/* pref @Rn */
1572 	return;
1573     case 0x00d3:		/* prefi @Rn */
1574         CHECK_SH4A
1575         return;
1576     case 0x00e3:		/* icbi @Rn */
1577         CHECK_SH4A
1578         return;
1579     case 0x00ab:		/* synco */
1580         CHECK_SH4A
1581         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1582         return;
1583     case 0x4024:		/* rotcl Rn */
1584 	{
1585 	    TCGv tmp = tcg_temp_new();
1586             tcg_gen_mov_i32(tmp, cpu_sr_t);
1587             tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1588 	    tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1589             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1590 	}
1591 	return;
1592     case 0x4025:		/* rotcr Rn */
1593 	{
1594 	    TCGv tmp = tcg_temp_new();
1595             tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1596             tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1597 	    tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1598             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1599 	}
1600 	return;
1601     case 0x4004:		/* rotl Rn */
1602 	tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1603         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1604 	return;
1605     case 0x4005:		/* rotr Rn */
1606         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1607 	tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1608 	return;
1609     case 0x4000:		/* shll Rn */
1610     case 0x4020:		/* shal Rn */
1611         tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1612 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1613 	return;
1614     case 0x4021:		/* shar Rn */
1615         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1616 	tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1617 	return;
1618     case 0x4001:		/* shlr Rn */
1619         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1620 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1621 	return;
1622     case 0x4008:		/* shll2 Rn */
1623 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1624 	return;
1625     case 0x4018:		/* shll8 Rn */
1626 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1627 	return;
1628     case 0x4028:		/* shll16 Rn */
1629 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1630 	return;
1631     case 0x4009:		/* shlr2 Rn */
1632 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1633 	return;
1634     case 0x4019:		/* shlr8 Rn */
1635 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1636 	return;
1637     case 0x4029:		/* shlr16 Rn */
1638 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1639 	return;
1640     case 0x401b:		/* tas.b @Rn */
1641         tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1642                                     tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1643         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
1644         return;
1645     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1646 	CHECK_FPU_ENABLED
1647         tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1648 	return;
1649     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1650 	CHECK_FPU_ENABLED
1651         tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1652 	return;
1653     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1654 	CHECK_FPU_ENABLED
1655         if (ctx->tbflags & FPSCR_PR) {
1656 	    TCGv_i64 fp;
1657             if (ctx->opcode & 0x0100) {
1658                 goto do_illegal;
1659             }
1660 	    fp = tcg_temp_new_i64();
1661             gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1662             gen_store_fpr64(ctx, fp, B11_8);
1663 	}
1664 	else {
1665             gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1666 	}
1667 	return;
1668     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1669 	CHECK_FPU_ENABLED
1670         if (ctx->tbflags & FPSCR_PR) {
1671 	    TCGv_i64 fp;
1672             if (ctx->opcode & 0x0100) {
1673                 goto do_illegal;
1674             }
1675 	    fp = tcg_temp_new_i64();
1676             gen_load_fpr64(ctx, fp, B11_8);
1677             gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1678 	}
1679 	else {
1680             gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1681 	}
1682 	return;
1683     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1684 	CHECK_FPU_ENABLED
1685         tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1686 	return;
1687     case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1688 	CHECK_FPU_ENABLED
1689         tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1690 	return;
1691     case 0xf06d: /* fsqrt FRn */
1692 	CHECK_FPU_ENABLED
1693         if (ctx->tbflags & FPSCR_PR) {
1694             if (ctx->opcode & 0x0100) {
1695                 goto do_illegal;
1696             }
1697 	    TCGv_i64 fp = tcg_temp_new_i64();
1698             gen_load_fpr64(ctx, fp, B11_8);
1699             gen_helper_fsqrt_DT(fp, cpu_env, fp);
1700             gen_store_fpr64(ctx, fp, B11_8);
1701 	} else {
1702             gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1703 	}
1704 	return;
1705     case 0xf07d: /* fsrra FRn */
1706 	CHECK_FPU_ENABLED
1707         CHECK_FPSCR_PR_0
1708         gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1709 	break;
1710     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1711 	CHECK_FPU_ENABLED
1712         CHECK_FPSCR_PR_0
1713         tcg_gen_movi_i32(FREG(B11_8), 0);
1714         return;
1715     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1716 	CHECK_FPU_ENABLED
1717         CHECK_FPSCR_PR_0
1718         tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1719         return;
1720     case 0xf0ad: /* fcnvsd FPUL,DRn */
1721 	CHECK_FPU_ENABLED
1722 	{
1723 	    TCGv_i64 fp = tcg_temp_new_i64();
1724             gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1725             gen_store_fpr64(ctx, fp, B11_8);
1726 	}
1727 	return;
1728     case 0xf0bd: /* fcnvds DRn,FPUL */
1729 	CHECK_FPU_ENABLED
1730 	{
1731 	    TCGv_i64 fp = tcg_temp_new_i64();
1732             gen_load_fpr64(ctx, fp, B11_8);
1733             gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1734 	}
1735 	return;
1736     case 0xf0ed: /* fipr FVm,FVn */
1737         CHECK_FPU_ENABLED
1738         CHECK_FPSCR_PR_1
1739         {
1740             TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1741             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1742             gen_helper_fipr(cpu_env, m, n);
1743             return;
1744         }
1745         break;
1746     case 0xf0fd: /* ftrv XMTRX,FVn */
1747         CHECK_FPU_ENABLED
1748         CHECK_FPSCR_PR_1
1749         {
1750             if ((ctx->opcode & 0x0300) != 0x0100) {
1751                 goto do_illegal;
1752             }
1753             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1754             gen_helper_ftrv(cpu_env, n);
1755             return;
1756         }
1757         break;
1758     }
1759 #if 0
1760     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1761             ctx->opcode, ctx->base.pc_next);
1762     fflush(stderr);
1763 #endif
1764  do_illegal:
1765     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1766  do_illegal_slot:
1767         gen_save_cpu_state(ctx, true);
1768         gen_helper_raise_slot_illegal_instruction(cpu_env);
1769     } else {
1770         gen_save_cpu_state(ctx, true);
1771         gen_helper_raise_illegal_instruction(cpu_env);
1772     }
1773     ctx->base.is_jmp = DISAS_NORETURN;
1774     return;
1775 
1776  do_fpu_disabled:
1777     gen_save_cpu_state(ctx, true);
1778     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1779         gen_helper_raise_slot_fpu_disable(cpu_env);
1780     } else {
1781         gen_helper_raise_fpu_disable(cpu_env);
1782     }
1783     ctx->base.is_jmp = DISAS_NORETURN;
1784     return;
1785 }
1786 
1787 static void decode_opc(DisasContext * ctx)
1788 {
1789     uint32_t old_flags = ctx->envflags;
1790 
1791     _decode_opc(ctx);
1792 
1793     if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1794         /* go out of the delay slot */
1795         ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1796 
1797         /* When in an exclusive region, we must continue to the end
1798            for conditional branches.  */
1799         if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1800             && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1801             gen_delayed_conditional_jump(ctx);
1802             return;
1803         }
1804         /* Otherwise this is probably an invalid gUSA region.
1805            Drop the GUSA bits so the next TB doesn't see them.  */
1806         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1807 
1808         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1809         if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1810 	    gen_delayed_conditional_jump(ctx);
1811         } else {
1812             gen_jump(ctx);
1813 	}
1814     }
1815 }
1816 
1817 #ifdef CONFIG_USER_ONLY
1818 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1819    Upon an interrupt, a real kernel would simply notice magic values in
1820    the registers and reset the PC to the start of the sequence.
1821 
1822    For QEMU, we cannot do this in quite the same way.  Instead, we notice
1823    the normal start of such a sequence (mov #-x,r15).  While we can handle
1824    any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1825    sequences and transform them into atomic operations as seen by the host.
1826 */
1827 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1828 {
1829     uint16_t insns[5];
1830     int ld_adr, ld_dst, ld_mop;
1831     int op_dst, op_src, op_opc;
1832     int mv_src, mt_dst, st_src, st_mop;
1833     TCGv op_arg;
1834     uint32_t pc = ctx->base.pc_next;
1835     uint32_t pc_end = ctx->base.tb->cs_base;
1836     int max_insns = (pc_end - pc) / 2;
1837     int i;
1838 
1839     /* The state machine below will consume only a few insns.
1840        If there are more than that in a region, fail now.  */
1841     if (max_insns > ARRAY_SIZE(insns)) {
1842         goto fail;
1843     }
1844 
1845     /* Read all of the insns for the region.  */
1846     for (i = 0; i < max_insns; ++i) {
1847         insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1848     }
1849 
1850     ld_adr = ld_dst = ld_mop = -1;
1851     mv_src = -1;
1852     op_dst = op_src = op_opc = -1;
1853     mt_dst = -1;
1854     st_src = st_mop = -1;
1855     op_arg = NULL;
1856     i = 0;
1857 
1858 #define NEXT_INSN \
1859     do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1860 
1861     /*
1862      * Expect a load to begin the region.
1863      */
1864     NEXT_INSN;
1865     switch (ctx->opcode & 0xf00f) {
1866     case 0x6000: /* mov.b @Rm,Rn */
1867         ld_mop = MO_SB;
1868         break;
1869     case 0x6001: /* mov.w @Rm,Rn */
1870         ld_mop = MO_TESW;
1871         break;
1872     case 0x6002: /* mov.l @Rm,Rn */
1873         ld_mop = MO_TESL;
1874         break;
1875     default:
1876         goto fail;
1877     }
1878     ld_adr = B7_4;
1879     ld_dst = B11_8;
1880     if (ld_adr == ld_dst) {
1881         goto fail;
1882     }
1883     /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1884     op_dst = ld_dst;
1885 
1886     /*
1887      * Expect an optional register move.
1888      */
1889     NEXT_INSN;
1890     switch (ctx->opcode & 0xf00f) {
1891     case 0x6003: /* mov Rm,Rn */
1892         /*
1893          * Here we want to recognize ld_dst being saved for later consumption,
1894          * or for another input register being copied so that ld_dst need not
1895          * be clobbered during the operation.
1896          */
1897         op_dst = B11_8;
1898         mv_src = B7_4;
1899         if (op_dst == ld_dst) {
1900             /* Overwriting the load output.  */
1901             goto fail;
1902         }
1903         if (mv_src != ld_dst) {
1904             /* Copying a new input; constrain op_src to match the load.  */
1905             op_src = ld_dst;
1906         }
1907         break;
1908 
1909     default:
1910         /* Put back and re-examine as operation.  */
1911         --i;
1912     }
1913 
1914     /*
1915      * Expect the operation.
1916      */
1917     NEXT_INSN;
1918     switch (ctx->opcode & 0xf00f) {
1919     case 0x300c: /* add Rm,Rn */
1920         op_opc = INDEX_op_add_i32;
1921         goto do_reg_op;
1922     case 0x2009: /* and Rm,Rn */
1923         op_opc = INDEX_op_and_i32;
1924         goto do_reg_op;
1925     case 0x200a: /* xor Rm,Rn */
1926         op_opc = INDEX_op_xor_i32;
1927         goto do_reg_op;
1928     case 0x200b: /* or Rm,Rn */
1929         op_opc = INDEX_op_or_i32;
1930     do_reg_op:
1931         /* The operation register should be as expected, and the
1932            other input cannot depend on the load.  */
1933         if (op_dst != B11_8) {
1934             goto fail;
1935         }
1936         if (op_src < 0) {
1937             /* Unconstrainted input.  */
1938             op_src = B7_4;
1939         } else if (op_src == B7_4) {
1940             /* Constrained input matched load.  All operations are
1941                commutative; "swap" them by "moving" the load output
1942                to the (implicit) first argument and the move source
1943                to the (explicit) second argument.  */
1944             op_src = mv_src;
1945         } else {
1946             goto fail;
1947         }
1948         op_arg = REG(op_src);
1949         break;
1950 
1951     case 0x6007: /* not Rm,Rn */
1952         if (ld_dst != B7_4 || mv_src >= 0) {
1953             goto fail;
1954         }
1955         op_dst = B11_8;
1956         op_opc = INDEX_op_xor_i32;
1957         op_arg = tcg_constant_i32(-1);
1958         break;
1959 
1960     case 0x7000 ... 0x700f: /* add #imm,Rn */
1961         if (op_dst != B11_8 || mv_src >= 0) {
1962             goto fail;
1963         }
1964         op_opc = INDEX_op_add_i32;
1965         op_arg = tcg_constant_i32(B7_0s);
1966         break;
1967 
1968     case 0x3000: /* cmp/eq Rm,Rn */
1969         /* Looking for the middle of a compare-and-swap sequence,
1970            beginning with the compare.  Operands can be either order,
1971            but with only one overlapping the load.  */
1972         if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1973             goto fail;
1974         }
1975         op_opc = INDEX_op_setcond_i32;  /* placeholder */
1976         op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1977         op_arg = REG(op_src);
1978 
1979         NEXT_INSN;
1980         switch (ctx->opcode & 0xff00) {
1981         case 0x8b00: /* bf label */
1982         case 0x8f00: /* bf/s label */
1983             if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1984                 goto fail;
1985             }
1986             if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
1987                 break;
1988             }
1989             /* We're looking to unconditionally modify Rn with the
1990                result of the comparison, within the delay slot of
1991                the branch.  This is used by older gcc.  */
1992             NEXT_INSN;
1993             if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
1994                 mt_dst = B11_8;
1995             } else {
1996                 goto fail;
1997             }
1998             break;
1999 
2000         default:
2001             goto fail;
2002         }
2003         break;
2004 
2005     case 0x2008: /* tst Rm,Rn */
2006         /* Looking for a compare-and-swap against zero.  */
2007         if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2008             goto fail;
2009         }
2010         op_opc = INDEX_op_setcond_i32;
2011         op_arg = tcg_constant_i32(0);
2012 
2013         NEXT_INSN;
2014         if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2015             || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2016             goto fail;
2017         }
2018         break;
2019 
2020     default:
2021         /* Put back and re-examine as store.  */
2022         --i;
2023     }
2024 
2025     /*
2026      * Expect the store.
2027      */
2028     /* The store must be the last insn.  */
2029     if (i != max_insns - 1) {
2030         goto fail;
2031     }
2032     NEXT_INSN;
2033     switch (ctx->opcode & 0xf00f) {
2034     case 0x2000: /* mov.b Rm,@Rn */
2035         st_mop = MO_UB;
2036         break;
2037     case 0x2001: /* mov.w Rm,@Rn */
2038         st_mop = MO_UW;
2039         break;
2040     case 0x2002: /* mov.l Rm,@Rn */
2041         st_mop = MO_UL;
2042         break;
2043     default:
2044         goto fail;
2045     }
2046     /* The store must match the load.  */
2047     if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2048         goto fail;
2049     }
2050     st_src = B7_4;
2051 
2052 #undef NEXT_INSN
2053 
2054     /*
2055      * Emit the operation.
2056      */
2057     switch (op_opc) {
2058     case -1:
2059         /* No operation found.  Look for exchange pattern.  */
2060         if (st_src == ld_dst || mv_src >= 0) {
2061             goto fail;
2062         }
2063         tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2064                                 ctx->memidx, ld_mop);
2065         break;
2066 
2067     case INDEX_op_add_i32:
2068         if (op_dst != st_src) {
2069             goto fail;
2070         }
2071         if (op_dst == ld_dst && st_mop == MO_UL) {
2072             tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2073                                          op_arg, ctx->memidx, ld_mop);
2074         } else {
2075             tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2076                                          op_arg, ctx->memidx, ld_mop);
2077             if (op_dst != ld_dst) {
2078                 /* Note that mop sizes < 4 cannot use add_fetch
2079                    because it won't carry into the higher bits.  */
2080                 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2081             }
2082         }
2083         break;
2084 
2085     case INDEX_op_and_i32:
2086         if (op_dst != st_src) {
2087             goto fail;
2088         }
2089         if (op_dst == ld_dst) {
2090             tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2091                                          op_arg, ctx->memidx, ld_mop);
2092         } else {
2093             tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2094                                          op_arg, ctx->memidx, ld_mop);
2095             tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2096         }
2097         break;
2098 
2099     case INDEX_op_or_i32:
2100         if (op_dst != st_src) {
2101             goto fail;
2102         }
2103         if (op_dst == ld_dst) {
2104             tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2105                                         op_arg, ctx->memidx, ld_mop);
2106         } else {
2107             tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2108                                         op_arg, ctx->memidx, ld_mop);
2109             tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2110         }
2111         break;
2112 
2113     case INDEX_op_xor_i32:
2114         if (op_dst != st_src) {
2115             goto fail;
2116         }
2117         if (op_dst == ld_dst) {
2118             tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2119                                          op_arg, ctx->memidx, ld_mop);
2120         } else {
2121             tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2122                                          op_arg, ctx->memidx, ld_mop);
2123             tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2124         }
2125         break;
2126 
2127     case INDEX_op_setcond_i32:
2128         if (st_src == ld_dst) {
2129             goto fail;
2130         }
2131         tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2132                                    REG(st_src), ctx->memidx, ld_mop);
2133         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2134         if (mt_dst >= 0) {
2135             tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2136         }
2137         break;
2138 
2139     default:
2140         g_assert_not_reached();
2141     }
2142 
2143     /* The entire region has been translated.  */
2144     ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2145     ctx->base.pc_next = pc_end;
2146     ctx->base.num_insns += max_insns - 1;
2147     return;
2148 
2149  fail:
2150     qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2151                   pc, pc_end);
2152 
2153     /* Restart with the EXCLUSIVE bit set, within a TB run via
2154        cpu_exec_step_atomic holding the exclusive lock.  */
2155     ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
2156     gen_save_cpu_state(ctx, false);
2157     gen_helper_exclusive(cpu_env);
2158     ctx->base.is_jmp = DISAS_NORETURN;
2159 
2160     /* We're not executing an instruction, but we must report one for the
2161        purposes of accounting within the TB.  We might as well report the
2162        entire region consumed via ctx->base.pc_next so that it's immediately
2163        available in the disassembly dump.  */
2164     ctx->base.pc_next = pc_end;
2165     ctx->base.num_insns += max_insns - 1;
2166 }
2167 #endif
2168 
2169 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2170 {
2171     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2172     CPUSH4State *env = cs->env_ptr;
2173     uint32_t tbflags;
2174     int bound;
2175 
2176     ctx->tbflags = tbflags = ctx->base.tb->flags;
2177     ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2178     ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2179     /* We don't know if the delayed pc came from a dynamic or static branch,
2180        so assume it is a dynamic branch.  */
2181     ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2182     ctx->features = env->features;
2183     ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2184     ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2185                   (tbflags & (1 << SR_RB))) * 0x10;
2186     ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2187 
2188 #ifdef CONFIG_USER_ONLY
2189     if (tbflags & TB_FLAG_GUSA_MASK) {
2190         /* In gUSA exclusive region. */
2191         uint32_t pc = ctx->base.pc_next;
2192         uint32_t pc_end = ctx->base.tb->cs_base;
2193         int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2194         int max_insns = (pc_end - pc) / 2;
2195 
2196         if (pc != pc_end + backup || max_insns < 2) {
2197             /* This is a malformed gUSA region.  Don't do anything special,
2198                since the interpreter is likely to get confused.  */
2199             ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2200         } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2201             /* Regardless of single-stepping or the end of the page,
2202                we must complete execution of the gUSA region while
2203                holding the exclusive lock.  */
2204             ctx->base.max_insns = max_insns;
2205             return;
2206         }
2207     }
2208 #endif
2209 
2210     /* Since the ISA is fixed-width, we can bound by the number
2211        of instructions remaining on the page.  */
2212     bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2213     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2214 }
2215 
2216 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2217 {
2218 }
2219 
2220 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2221 {
2222     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2223 
2224     tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2225 }
2226 
2227 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2228 {
2229     CPUSH4State *env = cs->env_ptr;
2230     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2231 
2232 #ifdef CONFIG_USER_ONLY
2233     if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2234         && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2235         /* We're in an gUSA region, and we have not already fallen
2236            back on using an exclusive region.  Attempt to parse the
2237            region into a single supported atomic operation.  Failure
2238            is handled within the parser by raising an exception to
2239            retry using an exclusive region.  */
2240         decode_gusa(ctx, env);
2241         return;
2242     }
2243 #endif
2244 
2245     ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2246     decode_opc(ctx);
2247     ctx->base.pc_next += 2;
2248 }
2249 
2250 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2251 {
2252     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2253 
2254     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2255         /* Ending the region of exclusivity.  Clear the bits.  */
2256         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2257     }
2258 
2259     switch (ctx->base.is_jmp) {
2260     case DISAS_STOP:
2261         gen_save_cpu_state(ctx, true);
2262         tcg_gen_exit_tb(NULL, 0);
2263         break;
2264     case DISAS_NEXT:
2265     case DISAS_TOO_MANY:
2266         gen_save_cpu_state(ctx, false);
2267         gen_goto_tb(ctx, 0, ctx->base.pc_next);
2268         break;
2269     case DISAS_NORETURN:
2270         break;
2271     default:
2272         g_assert_not_reached();
2273     }
2274 }
2275 
2276 static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2277                              CPUState *cs, FILE *logfile)
2278 {
2279     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2280     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2281 }
2282 
2283 static const TranslatorOps sh4_tr_ops = {
2284     .init_disas_context = sh4_tr_init_disas_context,
2285     .tb_start           = sh4_tr_tb_start,
2286     .insn_start         = sh4_tr_insn_start,
2287     .translate_insn     = sh4_tr_translate_insn,
2288     .tb_stop            = sh4_tr_tb_stop,
2289     .disas_log          = sh4_tr_disas_log,
2290 };
2291 
2292 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2293                            target_ulong pc, void *host_pc)
2294 {
2295     DisasContext ctx;
2296 
2297     translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
2298 }
2299