xref: /openbmc/qemu/target/sh4/translate.c (revision f14eced5)
1 /*
2  *  SH4 translation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/log.h"
29 #include "qemu/qemu-print.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 typedef struct DisasContext {
37     DisasContextBase base;
38 
39     uint32_t tbflags;  /* should stay unmodified during the TB translation */
40     uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
41     int memidx;
42     int gbank;
43     int fbank;
44     uint32_t delayed_pc;
45     uint32_t features;
46 
47     uint16_t opcode;
48 
49     bool has_movcal;
50 } DisasContext;
51 
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
54 #define UNALIGN(C)   (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
55 #else
56 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
57 #define UNALIGN(C)   0
58 #endif
59 
60 /* Target-specific values for ctx->base.is_jmp.  */
61 /* We want to exit back to the cpu loop for some reason.
62    Usually this is to recognize interrupts immediately.  */
63 #define DISAS_STOP    DISAS_TARGET_0
64 
65 /* global register indexes */
66 static TCGv cpu_gregs[32];
67 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
71 static TCGv cpu_lock_addr, cpu_lock_value;
72 static TCGv cpu_fregs[32];
73 
74 /* internal register indexes */
75 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
76 
77 void sh4_translate_init(void)
78 {
79     int i;
80     static const char * const gregnames[24] = {
81         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86     };
87     static const char * const fregnames[32] = {
88          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
89          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
90          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
93          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
94          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96     };
97 
98     for (i = 0; i < 24; i++) {
99         cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
100                                               offsetof(CPUSH4State, gregs[i]),
101                                               gregnames[i]);
102     }
103     memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 
105     cpu_pc = tcg_global_mem_new_i32(tcg_env,
106                                     offsetof(CPUSH4State, pc), "PC");
107     cpu_sr = tcg_global_mem_new_i32(tcg_env,
108                                     offsetof(CPUSH4State, sr), "SR");
109     cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
110                                       offsetof(CPUSH4State, sr_m), "SR_M");
111     cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
112                                       offsetof(CPUSH4State, sr_q), "SR_Q");
113     cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
114                                       offsetof(CPUSH4State, sr_t), "SR_T");
115     cpu_ssr = tcg_global_mem_new_i32(tcg_env,
116                                      offsetof(CPUSH4State, ssr), "SSR");
117     cpu_spc = tcg_global_mem_new_i32(tcg_env,
118                                      offsetof(CPUSH4State, spc), "SPC");
119     cpu_gbr = tcg_global_mem_new_i32(tcg_env,
120                                      offsetof(CPUSH4State, gbr), "GBR");
121     cpu_vbr = tcg_global_mem_new_i32(tcg_env,
122                                      offsetof(CPUSH4State, vbr), "VBR");
123     cpu_sgr = tcg_global_mem_new_i32(tcg_env,
124                                      offsetof(CPUSH4State, sgr), "SGR");
125     cpu_dbr = tcg_global_mem_new_i32(tcg_env,
126                                      offsetof(CPUSH4State, dbr), "DBR");
127     cpu_mach = tcg_global_mem_new_i32(tcg_env,
128                                       offsetof(CPUSH4State, mach), "MACH");
129     cpu_macl = tcg_global_mem_new_i32(tcg_env,
130                                       offsetof(CPUSH4State, macl), "MACL");
131     cpu_pr = tcg_global_mem_new_i32(tcg_env,
132                                     offsetof(CPUSH4State, pr), "PR");
133     cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
134                                        offsetof(CPUSH4State, fpscr), "FPSCR");
135     cpu_fpul = tcg_global_mem_new_i32(tcg_env,
136                                       offsetof(CPUSH4State, fpul), "FPUL");
137 
138     cpu_flags = tcg_global_mem_new_i32(tcg_env,
139 				       offsetof(CPUSH4State, flags), "_flags_");
140     cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
141 					    offsetof(CPUSH4State, delayed_pc),
142 					    "_delayed_pc_");
143     cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
144                                               offsetof(CPUSH4State,
145                                                        delayed_cond),
146                                               "_delayed_cond_");
147     cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
148                                            offsetof(CPUSH4State, lock_addr),
149                                            "_lock_addr_");
150     cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
151                                             offsetof(CPUSH4State, lock_value),
152                                             "_lock_value_");
153 
154     for (i = 0; i < 32; i++)
155         cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
156                                               offsetof(CPUSH4State, fregs[i]),
157                                               fregnames[i]);
158 }
159 
160 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161 {
162     SuperHCPU *cpu = SUPERH_CPU(cs);
163     CPUSH4State *env = &cpu->env;
164     int i;
165 
166     qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167                  env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168     qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169                  env->spc, env->ssr, env->gbr, env->vbr);
170     qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171                  env->sgr, env->dbr, env->delayed_pc, env->fpul);
172     for (i = 0; i < 24; i += 4) {
173         qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174                      i, env->gregs[i], i + 1, env->gregs[i + 1],
175                      i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176     }
177     if (env->flags & TB_FLAG_DELAY_SLOT) {
178         qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179                      env->delayed_pc);
180     } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
181         qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182                      env->delayed_pc);
183     } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
184         qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
185                      env->delayed_pc);
186     }
187 }
188 
189 static void gen_read_sr(TCGv dst)
190 {
191     TCGv t0 = tcg_temp_new();
192     tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
193     tcg_gen_or_i32(dst, dst, t0);
194     tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
195     tcg_gen_or_i32(dst, dst, t0);
196     tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
197     tcg_gen_or_i32(dst, cpu_sr, t0);
198 }
199 
200 static void gen_write_sr(TCGv src)
201 {
202     tcg_gen_andi_i32(cpu_sr, src,
203                      ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204     tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205     tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206     tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
207 }
208 
209 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
210 {
211     if (save_pc) {
212         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
213     }
214     if (ctx->delayed_pc != (uint32_t) -1) {
215         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
216     }
217     if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
219     }
220 }
221 
222 static inline bool use_exit_tb(DisasContext *ctx)
223 {
224     return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
225 }
226 
227 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
228 {
229     if (use_exit_tb(ctx)) {
230         return false;
231     }
232     return translator_use_goto_tb(&ctx->base, dest);
233 }
234 
235 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
236 {
237     if (use_goto_tb(ctx, dest)) {
238         tcg_gen_goto_tb(n);
239         tcg_gen_movi_i32(cpu_pc, dest);
240         tcg_gen_exit_tb(ctx->base.tb, n);
241     } else {
242         tcg_gen_movi_i32(cpu_pc, dest);
243         if (use_exit_tb(ctx)) {
244             tcg_gen_exit_tb(NULL, 0);
245         } else {
246             tcg_gen_lookup_and_goto_ptr();
247         }
248     }
249     ctx->base.is_jmp = DISAS_NORETURN;
250 }
251 
252 static void gen_jump(DisasContext * ctx)
253 {
254     if (ctx->delayed_pc == -1) {
255 	/* Target is not statically known, it comes necessarily from a
256 	   delayed jump as immediate jump are conditinal jumps */
257 	tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
258         tcg_gen_discard_i32(cpu_delayed_pc);
259         if (use_exit_tb(ctx)) {
260             tcg_gen_exit_tb(NULL, 0);
261         } else {
262             tcg_gen_lookup_and_goto_ptr();
263         }
264         ctx->base.is_jmp = DISAS_NORETURN;
265     } else {
266 	gen_goto_tb(ctx, 0, ctx->delayed_pc);
267     }
268 }
269 
270 /* Immediate conditional jump (bt or bf) */
271 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
272                                  bool jump_if_true)
273 {
274     TCGLabel *l1 = gen_new_label();
275     TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
276 
277     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
278         /* When in an exclusive region, we must continue to the end.
279            Therefore, exit the region on a taken branch, but otherwise
280            fall through to the next instruction.  */
281         tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
282         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
283         /* Note that this won't actually use a goto_tb opcode because we
284            disallow it in use_goto_tb, but it handles exit + singlestep.  */
285         gen_goto_tb(ctx, 0, dest);
286         gen_set_label(l1);
287         ctx->base.is_jmp = DISAS_NEXT;
288         return;
289     }
290 
291     gen_save_cpu_state(ctx, false);
292     tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
293     gen_goto_tb(ctx, 0, dest);
294     gen_set_label(l1);
295     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
296     ctx->base.is_jmp = DISAS_NORETURN;
297 }
298 
299 /* Delayed conditional jump (bt or bf) */
300 static void gen_delayed_conditional_jump(DisasContext * ctx)
301 {
302     TCGLabel *l1 = gen_new_label();
303     TCGv ds = tcg_temp_new();
304 
305     tcg_gen_mov_i32(ds, cpu_delayed_cond);
306     tcg_gen_discard_i32(cpu_delayed_cond);
307 
308     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
309         /* When in an exclusive region, we must continue to the end.
310            Therefore, exit the region on a taken branch, but otherwise
311            fall through to the next instruction.  */
312         tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
313 
314         /* Leave the gUSA region.  */
315         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
316         gen_jump(ctx);
317 
318         gen_set_label(l1);
319         ctx->base.is_jmp = DISAS_NEXT;
320         return;
321     }
322 
323     tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
324     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
325     gen_set_label(l1);
326     gen_jump(ctx);
327 }
328 
329 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
330 {
331     /* We have already signaled illegal instruction for odd Dr.  */
332     tcg_debug_assert((reg & 1) == 0);
333     reg ^= ctx->fbank;
334     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
335 }
336 
337 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
338 {
339     /* We have already signaled illegal instruction for odd Dr.  */
340     tcg_debug_assert((reg & 1) == 0);
341     reg ^= ctx->fbank;
342     tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
343 }
344 
345 #define B3_0 (ctx->opcode & 0xf)
346 #define B6_4 ((ctx->opcode >> 4) & 0x7)
347 #define B7_4 ((ctx->opcode >> 4) & 0xf)
348 #define B7_0 (ctx->opcode & 0xff)
349 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
350 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
351   (ctx->opcode & 0xfff))
352 #define B11_8 ((ctx->opcode >> 8) & 0xf)
353 #define B15_12 ((ctx->opcode >> 12) & 0xf)
354 
355 #define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
356 #define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
357 #define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
358 
359 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
360 
361 #define CHECK_NOT_DELAY_SLOT \
362     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {  \
363         goto do_illegal_slot;                       \
364     }
365 
366 #define CHECK_PRIVILEGED \
367     if (IS_USER(ctx)) {                     \
368         goto do_illegal;                    \
369     }
370 
371 #define CHECK_FPU_ENABLED \
372     if (ctx->tbflags & (1u << SR_FD)) {     \
373         goto do_fpu_disabled;               \
374     }
375 
376 #define CHECK_FPSCR_PR_0 \
377     if (ctx->tbflags & FPSCR_PR) {          \
378         goto do_illegal;                    \
379     }
380 
381 #define CHECK_FPSCR_PR_1 \
382     if (!(ctx->tbflags & FPSCR_PR)) {       \
383         goto do_illegal;                    \
384     }
385 
386 #define CHECK_SH4A \
387     if (!(ctx->features & SH_FEATURE_SH4A)) { \
388         goto do_illegal;                      \
389     }
390 
391 static void _decode_opc(DisasContext * ctx)
392 {
393     /* This code tries to make movcal emulation sufficiently
394        accurate for Linux purposes.  This instruction writes
395        memory, and prior to that, always allocates a cache line.
396        It is used in two contexts:
397        - in memcpy, where data is copied in blocks, the first write
398        of to a block uses movca.l for performance.
399        - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
400        to flush the cache. Here, the data written by movcal.l is never
401        written to memory, and the data written is just bogus.
402 
403        To simulate this, we simulate movcal.l, we store the value to memory,
404        but we also remember the previous content. If we see ocbi, we check
405        if movcal.l for that address was done previously. If so, the write should
406        not have hit the memory, so we restore the previous content.
407        When we see an instruction that is neither movca.l
408        nor ocbi, the previous content is discarded.
409 
410        To optimize, we only try to flush stores when we're at the start of
411        TB, or if we already saw movca.l in this TB and did not flush stores
412        yet.  */
413     if (ctx->has_movcal)
414 	{
415 	  int opcode = ctx->opcode & 0xf0ff;
416 	  if (opcode != 0x0093 /* ocbi */
417 	      && opcode != 0x00c3 /* movca.l */)
418 	      {
419                   gen_helper_discard_movcal_backup(tcg_env);
420 		  ctx->has_movcal = 0;
421 	      }
422 	}
423 
424 #if 0
425     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
426 #endif
427 
428     switch (ctx->opcode) {
429     case 0x0019:		/* div0u */
430         tcg_gen_movi_i32(cpu_sr_m, 0);
431         tcg_gen_movi_i32(cpu_sr_q, 0);
432         tcg_gen_movi_i32(cpu_sr_t, 0);
433 	return;
434     case 0x000b:		/* rts */
435 	CHECK_NOT_DELAY_SLOT
436 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
437         ctx->envflags |= TB_FLAG_DELAY_SLOT;
438 	ctx->delayed_pc = (uint32_t) - 1;
439 	return;
440     case 0x0028:		/* clrmac */
441 	tcg_gen_movi_i32(cpu_mach, 0);
442 	tcg_gen_movi_i32(cpu_macl, 0);
443 	return;
444     case 0x0048:		/* clrs */
445         tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
446 	return;
447     case 0x0008:		/* clrt */
448         tcg_gen_movi_i32(cpu_sr_t, 0);
449 	return;
450     case 0x0038:		/* ldtlb */
451 	CHECK_PRIVILEGED
452         gen_helper_ldtlb(tcg_env);
453 	return;
454     case 0x002b:		/* rte */
455 	CHECK_PRIVILEGED
456 	CHECK_NOT_DELAY_SLOT
457         gen_write_sr(cpu_ssr);
458 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
459         ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
460 	ctx->delayed_pc = (uint32_t) - 1;
461         ctx->base.is_jmp = DISAS_STOP;
462 	return;
463     case 0x0058:		/* sets */
464         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
465 	return;
466     case 0x0018:		/* sett */
467         tcg_gen_movi_i32(cpu_sr_t, 1);
468 	return;
469     case 0xfbfd:		/* frchg */
470         CHECK_FPSCR_PR_0
471 	tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
472         ctx->base.is_jmp = DISAS_STOP;
473 	return;
474     case 0xf3fd:		/* fschg */
475         CHECK_FPSCR_PR_0
476         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
477         ctx->base.is_jmp = DISAS_STOP;
478 	return;
479     case 0xf7fd:                /* fpchg */
480         CHECK_SH4A
481         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
482         ctx->base.is_jmp = DISAS_STOP;
483         return;
484     case 0x0009:		/* nop */
485 	return;
486     case 0x001b:		/* sleep */
487 	CHECK_PRIVILEGED
488         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
489         gen_helper_sleep(tcg_env);
490 	return;
491     }
492 
493     switch (ctx->opcode & 0xf000) {
494     case 0x1000:		/* mov.l Rm,@(disp,Rn) */
495 	{
496 	    TCGv addr = tcg_temp_new();
497 	    tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
498             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
499                                 MO_TEUL | UNALIGN(ctx));
500 	}
501 	return;
502     case 0x5000:		/* mov.l @(disp,Rm),Rn */
503 	{
504 	    TCGv addr = tcg_temp_new();
505 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
506             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
507                                 MO_TESL | UNALIGN(ctx));
508 	}
509 	return;
510     case 0xe000:		/* mov #imm,Rn */
511 #ifdef CONFIG_USER_ONLY
512         /*
513          * Detect the start of a gUSA region (mov #-n, r15).
514          * If so, update envflags and end the TB.  This will allow us
515          * to see the end of the region (stored in R0) in the next TB.
516          */
517         if (B11_8 == 15 && B7_0s < 0 &&
518             (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
519             ctx->envflags =
520                 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
521             ctx->base.is_jmp = DISAS_STOP;
522         }
523 #endif
524 	tcg_gen_movi_i32(REG(B11_8), B7_0s);
525 	return;
526     case 0x9000:		/* mov.w @(disp,PC),Rn */
527 	{
528             TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
529             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
530                                 MO_TESW | MO_ALIGN);
531 	}
532 	return;
533     case 0xd000:		/* mov.l @(disp,PC),Rn */
534 	{
535             TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
536             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
537                                 MO_TESL | MO_ALIGN);
538 	}
539 	return;
540     case 0x7000:		/* add #imm,Rn */
541 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
542 	return;
543     case 0xa000:		/* bra disp */
544 	CHECK_NOT_DELAY_SLOT
545         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
546         ctx->envflags |= TB_FLAG_DELAY_SLOT;
547 	return;
548     case 0xb000:		/* bsr disp */
549 	CHECK_NOT_DELAY_SLOT
550         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
551         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
552         ctx->envflags |= TB_FLAG_DELAY_SLOT;
553 	return;
554     }
555 
556     switch (ctx->opcode & 0xf00f) {
557     case 0x6003:		/* mov Rm,Rn */
558 	tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
559 	return;
560     case 0x2000:		/* mov.b Rm,@Rn */
561         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
562 	return;
563     case 0x2001:		/* mov.w Rm,@Rn */
564         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
565                             MO_TEUW | UNALIGN(ctx));
566 	return;
567     case 0x2002:		/* mov.l Rm,@Rn */
568         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569                             MO_TEUL | UNALIGN(ctx));
570 	return;
571     case 0x6000:		/* mov.b @Rm,Rn */
572         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
573 	return;
574     case 0x6001:		/* mov.w @Rm,Rn */
575         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
576                             MO_TESW | UNALIGN(ctx));
577 	return;
578     case 0x6002:		/* mov.l @Rm,Rn */
579         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580                             MO_TESL | UNALIGN(ctx));
581 	return;
582     case 0x2004:		/* mov.b Rm,@-Rn */
583 	{
584 	    TCGv addr = tcg_temp_new();
585 	    tcg_gen_subi_i32(addr, REG(B11_8), 1);
586             /* might cause re-execution */
587             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
588 	    tcg_gen_mov_i32(REG(B11_8), addr);			/* modify register status */
589 	}
590 	return;
591     case 0x2005:		/* mov.w Rm,@-Rn */
592 	{
593 	    TCGv addr = tcg_temp_new();
594 	    tcg_gen_subi_i32(addr, REG(B11_8), 2);
595             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
596                                 MO_TEUW | UNALIGN(ctx));
597 	    tcg_gen_mov_i32(REG(B11_8), addr);
598 	}
599 	return;
600     case 0x2006:		/* mov.l Rm,@-Rn */
601 	{
602 	    TCGv addr = tcg_temp_new();
603 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
604             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
605                                 MO_TEUL | UNALIGN(ctx));
606 	    tcg_gen_mov_i32(REG(B11_8), addr);
607 	}
608 	return;
609     case 0x6004:		/* mov.b @Rm+,Rn */
610         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
611 	if ( B11_8 != B7_4 )
612 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
613 	return;
614     case 0x6005:		/* mov.w @Rm+,Rn */
615         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
616                             MO_TESW | UNALIGN(ctx));
617 	if ( B11_8 != B7_4 )
618 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
619 	return;
620     case 0x6006:		/* mov.l @Rm+,Rn */
621         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
622                             MO_TESL | UNALIGN(ctx));
623 	if ( B11_8 != B7_4 )
624 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
625 	return;
626     case 0x0004:		/* mov.b Rm,@(R0,Rn) */
627 	{
628 	    TCGv addr = tcg_temp_new();
629 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
630             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
631 	}
632 	return;
633     case 0x0005:		/* mov.w Rm,@(R0,Rn) */
634 	{
635 	    TCGv addr = tcg_temp_new();
636 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
637             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
638                                 MO_TEUW | UNALIGN(ctx));
639 	}
640 	return;
641     case 0x0006:		/* mov.l Rm,@(R0,Rn) */
642 	{
643 	    TCGv addr = tcg_temp_new();
644 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
645             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646                                 MO_TEUL | UNALIGN(ctx));
647 	}
648 	return;
649     case 0x000c:		/* mov.b @(R0,Rm),Rn */
650 	{
651 	    TCGv addr = tcg_temp_new();
652 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
653             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
654 	}
655 	return;
656     case 0x000d:		/* mov.w @(R0,Rm),Rn */
657 	{
658 	    TCGv addr = tcg_temp_new();
659 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
660             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
661                                 MO_TESW | UNALIGN(ctx));
662 	}
663 	return;
664     case 0x000e:		/* mov.l @(R0,Rm),Rn */
665 	{
666 	    TCGv addr = tcg_temp_new();
667 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
668             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
669                                 MO_TESL | UNALIGN(ctx));
670 	}
671 	return;
672     case 0x6008:		/* swap.b Rm,Rn */
673 	{
674             TCGv low = tcg_temp_new();
675             tcg_gen_bswap16_i32(low, REG(B7_4), 0);
676             tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
677 	}
678 	return;
679     case 0x6009:		/* swap.w Rm,Rn */
680         tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
681 	return;
682     case 0x200d:		/* xtrct Rm,Rn */
683 	{
684 	    TCGv high, low;
685 	    high = tcg_temp_new();
686 	    tcg_gen_shli_i32(high, REG(B7_4), 16);
687 	    low = tcg_temp_new();
688 	    tcg_gen_shri_i32(low, REG(B11_8), 16);
689 	    tcg_gen_or_i32(REG(B11_8), high, low);
690 	}
691 	return;
692     case 0x300c:		/* add Rm,Rn */
693 	tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
694 	return;
695     case 0x300e:		/* addc Rm,Rn */
696         {
697             TCGv t0, t1;
698             t0 = tcg_constant_tl(0);
699             t1 = tcg_temp_new();
700             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
701             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
702                              REG(B11_8), t0, t1, cpu_sr_t);
703         }
704 	return;
705     case 0x300f:		/* addv Rm,Rn */
706         {
707             TCGv t0, t1, t2;
708             t0 = tcg_temp_new();
709             tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
710             t1 = tcg_temp_new();
711             tcg_gen_xor_i32(t1, t0, REG(B11_8));
712             t2 = tcg_temp_new();
713             tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
714             tcg_gen_andc_i32(cpu_sr_t, t1, t2);
715             tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
716             tcg_gen_mov_i32(REG(B7_4), t0);
717         }
718 	return;
719     case 0x2009:		/* and Rm,Rn */
720 	tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
721 	return;
722     case 0x3000:		/* cmp/eq Rm,Rn */
723         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
724 	return;
725     case 0x3003:		/* cmp/ge Rm,Rn */
726         tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
727 	return;
728     case 0x3007:		/* cmp/gt Rm,Rn */
729         tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
730 	return;
731     case 0x3006:		/* cmp/hi Rm,Rn */
732         tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
733 	return;
734     case 0x3002:		/* cmp/hs Rm,Rn */
735         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
736 	return;
737     case 0x200c:		/* cmp/str Rm,Rn */
738 	{
739 	    TCGv cmp1 = tcg_temp_new();
740 	    TCGv cmp2 = tcg_temp_new();
741             tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
742             tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
743             tcg_gen_andc_i32(cmp1, cmp1, cmp2);
744             tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
745             tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
746 	}
747 	return;
748     case 0x2007:		/* div0s Rm,Rn */
749         tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
750         tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
751         tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
752 	return;
753     case 0x3004:		/* div1 Rm,Rn */
754         {
755             TCGv t0 = tcg_temp_new();
756             TCGv t1 = tcg_temp_new();
757             TCGv t2 = tcg_temp_new();
758             TCGv zero = tcg_constant_i32(0);
759 
760             /* shift left arg1, saving the bit being pushed out and inserting
761                T on the right */
762             tcg_gen_shri_i32(t0, REG(B11_8), 31);
763             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
764             tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
765 
766             /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
767                using 64-bit temps, we compute arg0's high part from q ^ m, so
768                that it is 0x00000000 when adding the value or 0xffffffff when
769                subtracting it. */
770             tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
771             tcg_gen_subi_i32(t1, t1, 1);
772             tcg_gen_neg_i32(t2, REG(B7_4));
773             tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
774             tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
775 
776             /* compute T and Q depending on carry */
777             tcg_gen_andi_i32(t1, t1, 1);
778             tcg_gen_xor_i32(t1, t1, t0);
779             tcg_gen_xori_i32(cpu_sr_t, t1, 1);
780             tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
781         }
782 	return;
783     case 0x300d:		/* dmuls.l Rm,Rn */
784         tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
785 	return;
786     case 0x3005:		/* dmulu.l Rm,Rn */
787         tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
788 	return;
789     case 0x600e:		/* exts.b Rm,Rn */
790 	tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
791 	return;
792     case 0x600f:		/* exts.w Rm,Rn */
793 	tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
794 	return;
795     case 0x600c:		/* extu.b Rm,Rn */
796 	tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
797 	return;
798     case 0x600d:		/* extu.w Rm,Rn */
799 	tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
800 	return;
801     case 0x000f:		/* mac.l @Rm+,@Rn+ */
802 	{
803 	    TCGv arg0, arg1;
804 	    arg0 = tcg_temp_new();
805             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
806                                 MO_TESL | MO_ALIGN);
807 	    arg1 = tcg_temp_new();
808             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
809                                 MO_TESL | MO_ALIGN);
810             gen_helper_macl(tcg_env, arg0, arg1);
811 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
812 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
813 	}
814 	return;
815     case 0x400f:		/* mac.w @Rm+,@Rn+ */
816 	{
817 	    TCGv arg0, arg1;
818 	    arg0 = tcg_temp_new();
819             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
820                                 MO_TESL | MO_ALIGN);
821 	    arg1 = tcg_temp_new();
822             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
823                                 MO_TESL | MO_ALIGN);
824             gen_helper_macw(tcg_env, arg0, arg1);
825 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
826 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
827 	}
828 	return;
829     case 0x0007:		/* mul.l Rm,Rn */
830 	tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
831 	return;
832     case 0x200f:		/* muls.w Rm,Rn */
833 	{
834 	    TCGv arg0, arg1;
835 	    arg0 = tcg_temp_new();
836 	    tcg_gen_ext16s_i32(arg0, REG(B7_4));
837 	    arg1 = tcg_temp_new();
838 	    tcg_gen_ext16s_i32(arg1, REG(B11_8));
839 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
840 	}
841 	return;
842     case 0x200e:		/* mulu.w Rm,Rn */
843 	{
844 	    TCGv arg0, arg1;
845 	    arg0 = tcg_temp_new();
846 	    tcg_gen_ext16u_i32(arg0, REG(B7_4));
847 	    arg1 = tcg_temp_new();
848 	    tcg_gen_ext16u_i32(arg1, REG(B11_8));
849 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
850 	}
851 	return;
852     case 0x600b:		/* neg Rm,Rn */
853 	tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
854 	return;
855     case 0x600a:		/* negc Rm,Rn */
856         {
857             TCGv t0 = tcg_constant_i32(0);
858             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
859                              REG(B7_4), t0, cpu_sr_t, t0);
860             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
861                              t0, t0, REG(B11_8), cpu_sr_t);
862             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
863         }
864 	return;
865     case 0x6007:		/* not Rm,Rn */
866 	tcg_gen_not_i32(REG(B11_8), REG(B7_4));
867 	return;
868     case 0x200b:		/* or Rm,Rn */
869 	tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
870 	return;
871     case 0x400c:		/* shad Rm,Rn */
872 	{
873             TCGv t0 = tcg_temp_new();
874             TCGv t1 = tcg_temp_new();
875             TCGv t2 = tcg_temp_new();
876 
877             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
878 
879             /* positive case: shift to the left */
880             tcg_gen_shl_i32(t1, REG(B11_8), t0);
881 
882             /* negative case: shift to the right in two steps to
883                correctly handle the -32 case */
884             tcg_gen_xori_i32(t0, t0, 0x1f);
885             tcg_gen_sar_i32(t2, REG(B11_8), t0);
886             tcg_gen_sari_i32(t2, t2, 1);
887 
888             /* select between the two cases */
889             tcg_gen_movi_i32(t0, 0);
890             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
891 	}
892 	return;
893     case 0x400d:		/* shld Rm,Rn */
894 	{
895             TCGv t0 = tcg_temp_new();
896             TCGv t1 = tcg_temp_new();
897             TCGv t2 = tcg_temp_new();
898 
899             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
900 
901             /* positive case: shift to the left */
902             tcg_gen_shl_i32(t1, REG(B11_8), t0);
903 
904             /* negative case: shift to the right in two steps to
905                correctly handle the -32 case */
906             tcg_gen_xori_i32(t0, t0, 0x1f);
907             tcg_gen_shr_i32(t2, REG(B11_8), t0);
908             tcg_gen_shri_i32(t2, t2, 1);
909 
910             /* select between the two cases */
911             tcg_gen_movi_i32(t0, 0);
912             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
913 	}
914 	return;
915     case 0x3008:		/* sub Rm,Rn */
916 	tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
917 	return;
918     case 0x300a:		/* subc Rm,Rn */
919         {
920             TCGv t0, t1;
921             t0 = tcg_constant_tl(0);
922             t1 = tcg_temp_new();
923             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
924             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
925                              REG(B11_8), t0, t1, cpu_sr_t);
926             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
927         }
928 	return;
929     case 0x300b:		/* subv Rm,Rn */
930         {
931             TCGv t0, t1, t2;
932             t0 = tcg_temp_new();
933             tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
934             t1 = tcg_temp_new();
935             tcg_gen_xor_i32(t1, t0, REG(B7_4));
936             t2 = tcg_temp_new();
937             tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
938             tcg_gen_and_i32(t1, t1, t2);
939             tcg_gen_shri_i32(cpu_sr_t, t1, 31);
940             tcg_gen_mov_i32(REG(B11_8), t0);
941         }
942 	return;
943     case 0x2008:		/* tst Rm,Rn */
944 	{
945 	    TCGv val = tcg_temp_new();
946 	    tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
947             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
948 	}
949 	return;
950     case 0x200a:		/* xor Rm,Rn */
951 	tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
952 	return;
953     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
954 	CHECK_FPU_ENABLED
955         if (ctx->tbflags & FPSCR_SZ) {
956             int xsrc = XHACK(B7_4);
957             int xdst = XHACK(B11_8);
958             tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
959             tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
960 	} else {
961             tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
962 	}
963 	return;
964     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
965 	CHECK_FPU_ENABLED
966         if (ctx->tbflags & FPSCR_SZ) {
967             TCGv_i64 fp = tcg_temp_new_i64();
968             gen_load_fpr64(ctx, fp, XHACK(B7_4));
969             tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
970                                 MO_TEUQ | MO_ALIGN);
971 	} else {
972             tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
973                                 MO_TEUL | MO_ALIGN);
974 	}
975 	return;
976     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
977 	CHECK_FPU_ENABLED
978         if (ctx->tbflags & FPSCR_SZ) {
979             TCGv_i64 fp = tcg_temp_new_i64();
980             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
981                                 MO_TEUQ | MO_ALIGN);
982             gen_store_fpr64(ctx, fp, XHACK(B11_8));
983 	} else {
984             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
985                                 MO_TEUL | MO_ALIGN);
986 	}
987 	return;
988     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
989 	CHECK_FPU_ENABLED
990         if (ctx->tbflags & FPSCR_SZ) {
991             TCGv_i64 fp = tcg_temp_new_i64();
992             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
993                                 MO_TEUQ | MO_ALIGN);
994             gen_store_fpr64(ctx, fp, XHACK(B11_8));
995             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
996 	} else {
997             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
998                                 MO_TEUL | MO_ALIGN);
999 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1000 	}
1001 	return;
1002     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1003 	CHECK_FPU_ENABLED
1004         {
1005             TCGv addr = tcg_temp_new_i32();
1006             if (ctx->tbflags & FPSCR_SZ) {
1007                 TCGv_i64 fp = tcg_temp_new_i64();
1008                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1009                 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1010                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1011                                     MO_TEUQ | MO_ALIGN);
1012             } else {
1013                 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1014                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1015                                     MO_TEUL | MO_ALIGN);
1016             }
1017             tcg_gen_mov_i32(REG(B11_8), addr);
1018         }
1019 	return;
1020     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1021 	CHECK_FPU_ENABLED
1022 	{
1023 	    TCGv addr = tcg_temp_new_i32();
1024 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1025             if (ctx->tbflags & FPSCR_SZ) {
1026                 TCGv_i64 fp = tcg_temp_new_i64();
1027                 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1028                                     MO_TEUQ | MO_ALIGN);
1029                 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1030 	    } else {
1031                 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1032                                     MO_TEUL | MO_ALIGN);
1033 	    }
1034 	}
1035 	return;
1036     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1037 	CHECK_FPU_ENABLED
1038 	{
1039 	    TCGv addr = tcg_temp_new();
1040 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1041             if (ctx->tbflags & FPSCR_SZ) {
1042                 TCGv_i64 fp = tcg_temp_new_i64();
1043                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1044                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1045                                     MO_TEUQ | MO_ALIGN);
1046 	    } else {
1047                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1048                                     MO_TEUL | MO_ALIGN);
1049 	    }
1050 	}
1051 	return;
1052     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1056     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1057     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1058 	{
1059 	    CHECK_FPU_ENABLED
1060             if (ctx->tbflags & FPSCR_PR) {
1061                 TCGv_i64 fp0, fp1;
1062 
1063                 if (ctx->opcode & 0x0110) {
1064                     goto do_illegal;
1065                 }
1066 		fp0 = tcg_temp_new_i64();
1067 		fp1 = tcg_temp_new_i64();
1068                 gen_load_fpr64(ctx, fp0, B11_8);
1069                 gen_load_fpr64(ctx, fp1, B7_4);
1070                 switch (ctx->opcode & 0xf00f) {
1071                 case 0xf000:		/* fadd Rm,Rn */
1072                     gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
1073                     break;
1074                 case 0xf001:		/* fsub Rm,Rn */
1075                     gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
1076                     break;
1077                 case 0xf002:		/* fmul Rm,Rn */
1078                     gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
1079                     break;
1080                 case 0xf003:		/* fdiv Rm,Rn */
1081                     gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
1082                     break;
1083                 case 0xf004:		/* fcmp/eq Rm,Rn */
1084                     gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
1085                     return;
1086                 case 0xf005:		/* fcmp/gt Rm,Rn */
1087                     gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
1088                     return;
1089                 }
1090                 gen_store_fpr64(ctx, fp0, B11_8);
1091 	    } else {
1092                 switch (ctx->opcode & 0xf00f) {
1093                 case 0xf000:		/* fadd Rm,Rn */
1094                     gen_helper_fadd_FT(FREG(B11_8), tcg_env,
1095                                        FREG(B11_8), FREG(B7_4));
1096                     break;
1097                 case 0xf001:		/* fsub Rm,Rn */
1098                     gen_helper_fsub_FT(FREG(B11_8), tcg_env,
1099                                        FREG(B11_8), FREG(B7_4));
1100                     break;
1101                 case 0xf002:		/* fmul Rm,Rn */
1102                     gen_helper_fmul_FT(FREG(B11_8), tcg_env,
1103                                        FREG(B11_8), FREG(B7_4));
1104                     break;
1105                 case 0xf003:		/* fdiv Rm,Rn */
1106                     gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
1107                                        FREG(B11_8), FREG(B7_4));
1108                     break;
1109                 case 0xf004:		/* fcmp/eq Rm,Rn */
1110                     gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
1111                                           FREG(B11_8), FREG(B7_4));
1112                     return;
1113                 case 0xf005:		/* fcmp/gt Rm,Rn */
1114                     gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
1115                                           FREG(B11_8), FREG(B7_4));
1116                     return;
1117                 }
1118 	    }
1119 	}
1120 	return;
1121     case 0xf00e: /* fmac FR0,RM,Rn */
1122         CHECK_FPU_ENABLED
1123         CHECK_FPSCR_PR_0
1124         gen_helper_fmac_FT(FREG(B11_8), tcg_env,
1125                            FREG(0), FREG(B7_4), FREG(B11_8));
1126         return;
1127     }
1128 
1129     switch (ctx->opcode & 0xff00) {
1130     case 0xc900:		/* and #imm,R0 */
1131 	tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1132 	return;
1133     case 0xcd00:		/* and.b #imm,@(R0,GBR) */
1134 	{
1135 	    TCGv addr, val;
1136 	    addr = tcg_temp_new();
1137 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1138 	    val = tcg_temp_new();
1139             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1140 	    tcg_gen_andi_i32(val, val, B7_0);
1141             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1142 	}
1143 	return;
1144     case 0x8b00:		/* bf label */
1145 	CHECK_NOT_DELAY_SLOT
1146         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1147 	return;
1148     case 0x8f00:		/* bf/s label */
1149 	CHECK_NOT_DELAY_SLOT
1150         tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1151         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1152         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1153 	return;
1154     case 0x8900:		/* bt label */
1155 	CHECK_NOT_DELAY_SLOT
1156         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1157 	return;
1158     case 0x8d00:		/* bt/s label */
1159 	CHECK_NOT_DELAY_SLOT
1160         tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1161         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1162         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1163 	return;
1164     case 0x8800:		/* cmp/eq #imm,R0 */
1165         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1166 	return;
1167     case 0xc400:		/* mov.b @(disp,GBR),R0 */
1168 	{
1169 	    TCGv addr = tcg_temp_new();
1170 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1171             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1172 	}
1173 	return;
1174     case 0xc500:		/* mov.w @(disp,GBR),R0 */
1175 	{
1176 	    TCGv addr = tcg_temp_new();
1177 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1178             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
1179 	}
1180 	return;
1181     case 0xc600:		/* mov.l @(disp,GBR),R0 */
1182 	{
1183 	    TCGv addr = tcg_temp_new();
1184 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1185             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
1186 	}
1187 	return;
1188     case 0xc000:		/* mov.b R0,@(disp,GBR) */
1189 	{
1190 	    TCGv addr = tcg_temp_new();
1191 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1192             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1193 	}
1194 	return;
1195     case 0xc100:		/* mov.w R0,@(disp,GBR) */
1196 	{
1197 	    TCGv addr = tcg_temp_new();
1198 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1199             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
1200 	}
1201 	return;
1202     case 0xc200:		/* mov.l R0,@(disp,GBR) */
1203 	{
1204 	    TCGv addr = tcg_temp_new();
1205 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1206             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1207 	}
1208 	return;
1209     case 0x8000:		/* mov.b R0,@(disp,Rn) */
1210 	{
1211 	    TCGv addr = tcg_temp_new();
1212 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1213             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1214 	}
1215 	return;
1216     case 0x8100:		/* mov.w R0,@(disp,Rn) */
1217 	{
1218 	    TCGv addr = tcg_temp_new();
1219 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1220             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1221                                 MO_TEUW | UNALIGN(ctx));
1222 	}
1223 	return;
1224     case 0x8400:		/* mov.b @(disp,Rn),R0 */
1225 	{
1226 	    TCGv addr = tcg_temp_new();
1227 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1228             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1229 	}
1230 	return;
1231     case 0x8500:		/* mov.w @(disp,Rn),R0 */
1232 	{
1233 	    TCGv addr = tcg_temp_new();
1234 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1235             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1236                                 MO_TESW | UNALIGN(ctx));
1237 	}
1238 	return;
1239     case 0xc700:		/* mova @(disp,PC),R0 */
1240         tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1241                                   4 + B7_0 * 4) & ~3);
1242 	return;
1243     case 0xcb00:		/* or #imm,R0 */
1244 	tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1245 	return;
1246     case 0xcf00:		/* or.b #imm,@(R0,GBR) */
1247 	{
1248 	    TCGv addr, val;
1249 	    addr = tcg_temp_new();
1250 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1251 	    val = tcg_temp_new();
1252             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1253 	    tcg_gen_ori_i32(val, val, B7_0);
1254             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1255 	}
1256 	return;
1257     case 0xc300:		/* trapa #imm */
1258 	{
1259 	    TCGv imm;
1260 	    CHECK_NOT_DELAY_SLOT
1261             gen_save_cpu_state(ctx, true);
1262 	    imm = tcg_constant_i32(B7_0);
1263             gen_helper_trapa(tcg_env, imm);
1264             ctx->base.is_jmp = DISAS_NORETURN;
1265 	}
1266 	return;
1267     case 0xc800:		/* tst #imm,R0 */
1268 	{
1269 	    TCGv val = tcg_temp_new();
1270 	    tcg_gen_andi_i32(val, REG(0), B7_0);
1271             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1272 	}
1273 	return;
1274     case 0xcc00:		/* tst.b #imm,@(R0,GBR) */
1275 	{
1276 	    TCGv val = tcg_temp_new();
1277 	    tcg_gen_add_i32(val, REG(0), cpu_gbr);
1278             tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1279 	    tcg_gen_andi_i32(val, val, B7_0);
1280             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1281 	}
1282 	return;
1283     case 0xca00:		/* xor #imm,R0 */
1284 	tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1285 	return;
1286     case 0xce00:		/* xor.b #imm,@(R0,GBR) */
1287 	{
1288 	    TCGv addr, val;
1289 	    addr = tcg_temp_new();
1290 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1291 	    val = tcg_temp_new();
1292             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1293 	    tcg_gen_xori_i32(val, val, B7_0);
1294             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1295 	}
1296 	return;
1297     }
1298 
1299     switch (ctx->opcode & 0xf08f) {
1300     case 0x408e:		/* ldc Rm,Rn_BANK */
1301 	CHECK_PRIVILEGED
1302 	tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1303 	return;
1304     case 0x4087:		/* ldc.l @Rm+,Rn_BANK */
1305 	CHECK_PRIVILEGED
1306         tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1307                             MO_TESL | MO_ALIGN);
1308 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1309 	return;
1310     case 0x0082:		/* stc Rm_BANK,Rn */
1311 	CHECK_PRIVILEGED
1312 	tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1313 	return;
1314     case 0x4083:		/* stc.l Rm_BANK,@-Rn */
1315 	CHECK_PRIVILEGED
1316 	{
1317 	    TCGv addr = tcg_temp_new();
1318 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1319             tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1320                                 MO_TEUL | MO_ALIGN);
1321 	    tcg_gen_mov_i32(REG(B11_8), addr);
1322 	}
1323 	return;
1324     }
1325 
1326     switch (ctx->opcode & 0xf0ff) {
1327     case 0x0023:		/* braf Rn */
1328 	CHECK_NOT_DELAY_SLOT
1329         tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1330         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1331 	ctx->delayed_pc = (uint32_t) - 1;
1332 	return;
1333     case 0x0003:		/* bsrf Rn */
1334 	CHECK_NOT_DELAY_SLOT
1335         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1336 	tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1337         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1338 	ctx->delayed_pc = (uint32_t) - 1;
1339 	return;
1340     case 0x4015:		/* cmp/pl Rn */
1341         tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1342 	return;
1343     case 0x4011:		/* cmp/pz Rn */
1344         tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1345 	return;
1346     case 0x4010:		/* dt Rn */
1347 	tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1348         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1349 	return;
1350     case 0x402b:		/* jmp @Rn */
1351 	CHECK_NOT_DELAY_SLOT
1352 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1353         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1354 	ctx->delayed_pc = (uint32_t) - 1;
1355 	return;
1356     case 0x400b:		/* jsr @Rn */
1357 	CHECK_NOT_DELAY_SLOT
1358         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1359 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1360         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1361 	ctx->delayed_pc = (uint32_t) - 1;
1362 	return;
1363     case 0x400e:		/* ldc Rm,SR */
1364 	CHECK_PRIVILEGED
1365         {
1366             TCGv val = tcg_temp_new();
1367             tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1368             gen_write_sr(val);
1369             ctx->base.is_jmp = DISAS_STOP;
1370         }
1371 	return;
1372     case 0x4007:		/* ldc.l @Rm+,SR */
1373 	CHECK_PRIVILEGED
1374 	{
1375 	    TCGv val = tcg_temp_new();
1376             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1377                                 MO_TESL | MO_ALIGN);
1378             tcg_gen_andi_i32(val, val, 0x700083f3);
1379             gen_write_sr(val);
1380 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1381             ctx->base.is_jmp = DISAS_STOP;
1382 	}
1383 	return;
1384     case 0x0002:		/* stc SR,Rn */
1385 	CHECK_PRIVILEGED
1386         gen_read_sr(REG(B11_8));
1387 	return;
1388     case 0x4003:		/* stc SR,@-Rn */
1389 	CHECK_PRIVILEGED
1390 	{
1391 	    TCGv addr = tcg_temp_new();
1392             TCGv val = tcg_temp_new();
1393 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1394             gen_read_sr(val);
1395             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1396 	    tcg_gen_mov_i32(REG(B11_8), addr);
1397 	}
1398 	return;
1399 #define LD(reg,ldnum,ldpnum,prechk)		\
1400   case ldnum:							\
1401     prechk    							\
1402     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));			\
1403     return;							\
1404   case ldpnum:							\
1405     prechk    							\
1406     tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx,     \
1407                         MO_TESL | MO_ALIGN);                    \
1408     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);		\
1409     return;
1410 #define ST(reg,stnum,stpnum,prechk)		\
1411   case stnum:							\
1412     prechk    							\
1413     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);			\
1414     return;							\
1415   case stpnum:							\
1416     prechk    							\
1417     {								\
1418 	TCGv addr = tcg_temp_new();				\
1419 	tcg_gen_subi_i32(addr, REG(B11_8), 4);			\
1420         tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx,       \
1421                             MO_TEUL | MO_ALIGN);                \
1422 	tcg_gen_mov_i32(REG(B11_8), addr);			\
1423     }								\
1424     return;
1425 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)		\
1426 	LD(reg,ldnum,ldpnum,prechk)				\
1427 	ST(reg,stnum,stpnum,prechk)
1428 	LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1429 	LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1430 	LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1431 	LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1432 	ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1433         LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1434 	LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1435 	LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1436 	LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1437 	LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1438 	LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1439     case 0x406a:		/* lds Rm,FPSCR */
1440 	CHECK_FPU_ENABLED
1441         gen_helper_ld_fpscr(tcg_env, REG(B11_8));
1442         ctx->base.is_jmp = DISAS_STOP;
1443 	return;
1444     case 0x4066:		/* lds.l @Rm+,FPSCR */
1445 	CHECK_FPU_ENABLED
1446 	{
1447 	    TCGv addr = tcg_temp_new();
1448             tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1449                                 MO_TESL | MO_ALIGN);
1450 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1451             gen_helper_ld_fpscr(tcg_env, addr);
1452             ctx->base.is_jmp = DISAS_STOP;
1453 	}
1454 	return;
1455     case 0x006a:		/* sts FPSCR,Rn */
1456 	CHECK_FPU_ENABLED
1457 	tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1458 	return;
1459     case 0x4062:		/* sts FPSCR,@-Rn */
1460 	CHECK_FPU_ENABLED
1461 	{
1462 	    TCGv addr, val;
1463 	    val = tcg_temp_new();
1464 	    tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1465 	    addr = tcg_temp_new();
1466 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1467             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
1468 	    tcg_gen_mov_i32(REG(B11_8), addr);
1469 	}
1470 	return;
1471     case 0x00c3:		/* movca.l R0,@Rm */
1472         {
1473             TCGv val = tcg_temp_new();
1474             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1475                                 MO_TEUL | MO_ALIGN);
1476             gen_helper_movcal(tcg_env, REG(B11_8), val);
1477             tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1478                                 MO_TEUL | MO_ALIGN);
1479         }
1480         ctx->has_movcal = 1;
1481 	return;
1482     case 0x40a9:                /* movua.l @Rm,R0 */
1483         CHECK_SH4A
1484         /* Load non-boundary-aligned data */
1485         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1486                             MO_TEUL | MO_UNALN);
1487         return;
1488     case 0x40e9:                /* movua.l @Rm+,R0 */
1489         CHECK_SH4A
1490         /* Load non-boundary-aligned data */
1491         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1492                             MO_TEUL | MO_UNALN);
1493         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1494         return;
1495     case 0x0029:		/* movt Rn */
1496         tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1497 	return;
1498     case 0x0073:
1499         /* MOVCO.L
1500          *     LDST -> T
1501          *     If (T == 1) R0 -> (Rn)
1502          *     0 -> LDST
1503          *
1504          * The above description doesn't work in a parallel context.
1505          * Since we currently support no smp boards, this implies user-mode.
1506          * But we can still support the official mechanism while user-mode
1507          * is single-threaded.  */
1508         CHECK_SH4A
1509         {
1510             TCGLabel *fail = gen_new_label();
1511             TCGLabel *done = gen_new_label();
1512 
1513             if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1514                 TCGv tmp;
1515 
1516                 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1517                                    cpu_lock_addr, fail);
1518                 tmp = tcg_temp_new();
1519                 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1520                                            REG(0), ctx->memidx,
1521                                            MO_TEUL | MO_ALIGN);
1522                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1523             } else {
1524                 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1525                 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1526                                     MO_TEUL | MO_ALIGN);
1527                 tcg_gen_movi_i32(cpu_sr_t, 1);
1528             }
1529             tcg_gen_br(done);
1530 
1531             gen_set_label(fail);
1532             tcg_gen_movi_i32(cpu_sr_t, 0);
1533 
1534             gen_set_label(done);
1535             tcg_gen_movi_i32(cpu_lock_addr, -1);
1536         }
1537         return;
1538     case 0x0063:
1539         /* MOVLI.L @Rm,R0
1540          *     1 -> LDST
1541          *     (Rm) -> R0
1542          *     When interrupt/exception
1543          *     occurred 0 -> LDST
1544          *
1545          * In a parallel context, we must also save the loaded value
1546          * for use with the cmpxchg that we'll use with movco.l.  */
1547         CHECK_SH4A
1548         if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1549             TCGv tmp = tcg_temp_new();
1550             tcg_gen_mov_i32(tmp, REG(B11_8));
1551             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1552                                 MO_TESL | MO_ALIGN);
1553             tcg_gen_mov_i32(cpu_lock_value, REG(0));
1554             tcg_gen_mov_i32(cpu_lock_addr, tmp);
1555         } else {
1556             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1557                                 MO_TESL | MO_ALIGN);
1558             tcg_gen_movi_i32(cpu_lock_addr, 0);
1559         }
1560         return;
1561     case 0x0093:		/* ocbi @Rn */
1562 	{
1563             gen_helper_ocbi(tcg_env, REG(B11_8));
1564 	}
1565 	return;
1566     case 0x00a3:		/* ocbp @Rn */
1567     case 0x00b3:		/* ocbwb @Rn */
1568         /* These instructions are supposed to do nothing in case of
1569            a cache miss. Given that we only partially emulate caches
1570            it is safe to simply ignore them. */
1571 	return;
1572     case 0x0083:		/* pref @Rn */
1573 	return;
1574     case 0x00d3:		/* prefi @Rn */
1575         CHECK_SH4A
1576         return;
1577     case 0x00e3:		/* icbi @Rn */
1578         CHECK_SH4A
1579         return;
1580     case 0x00ab:		/* synco */
1581         CHECK_SH4A
1582         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1583         return;
1584     case 0x4024:		/* rotcl Rn */
1585 	{
1586 	    TCGv tmp = tcg_temp_new();
1587             tcg_gen_mov_i32(tmp, cpu_sr_t);
1588             tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1589 	    tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1590             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1591 	}
1592 	return;
1593     case 0x4025:		/* rotcr Rn */
1594 	{
1595 	    TCGv tmp = tcg_temp_new();
1596             tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1597             tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1598 	    tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1599             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1600 	}
1601 	return;
1602     case 0x4004:		/* rotl Rn */
1603 	tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1604         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1605 	return;
1606     case 0x4005:		/* rotr Rn */
1607         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1608 	tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1609 	return;
1610     case 0x4000:		/* shll Rn */
1611     case 0x4020:		/* shal Rn */
1612         tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1613 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1614 	return;
1615     case 0x4021:		/* shar Rn */
1616         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1617 	tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1618 	return;
1619     case 0x4001:		/* shlr Rn */
1620         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1621 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1622 	return;
1623     case 0x4008:		/* shll2 Rn */
1624 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1625 	return;
1626     case 0x4018:		/* shll8 Rn */
1627 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1628 	return;
1629     case 0x4028:		/* shll16 Rn */
1630 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1631 	return;
1632     case 0x4009:		/* shlr2 Rn */
1633 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1634 	return;
1635     case 0x4019:		/* shlr8 Rn */
1636 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1637 	return;
1638     case 0x4029:		/* shlr16 Rn */
1639 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1640 	return;
1641     case 0x401b:		/* tas.b @Rn */
1642         tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1643                                     tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1644         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
1645         return;
1646     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1647 	CHECK_FPU_ENABLED
1648         tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1649 	return;
1650     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1651 	CHECK_FPU_ENABLED
1652         tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1653 	return;
1654     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1655 	CHECK_FPU_ENABLED
1656         if (ctx->tbflags & FPSCR_PR) {
1657 	    TCGv_i64 fp;
1658             if (ctx->opcode & 0x0100) {
1659                 goto do_illegal;
1660             }
1661 	    fp = tcg_temp_new_i64();
1662             gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1663             gen_store_fpr64(ctx, fp, B11_8);
1664 	}
1665 	else {
1666             gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
1667 	}
1668 	return;
1669     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1670 	CHECK_FPU_ENABLED
1671         if (ctx->tbflags & FPSCR_PR) {
1672 	    TCGv_i64 fp;
1673             if (ctx->opcode & 0x0100) {
1674                 goto do_illegal;
1675             }
1676 	    fp = tcg_temp_new_i64();
1677             gen_load_fpr64(ctx, fp, B11_8);
1678             gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
1679 	}
1680 	else {
1681             gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
1682 	}
1683 	return;
1684     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1685 	CHECK_FPU_ENABLED
1686         tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1687 	return;
1688     case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1689 	CHECK_FPU_ENABLED
1690         tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1691 	return;
1692     case 0xf06d: /* fsqrt FRn */
1693 	CHECK_FPU_ENABLED
1694         if (ctx->tbflags & FPSCR_PR) {
1695             if (ctx->opcode & 0x0100) {
1696                 goto do_illegal;
1697             }
1698 	    TCGv_i64 fp = tcg_temp_new_i64();
1699             gen_load_fpr64(ctx, fp, B11_8);
1700             gen_helper_fsqrt_DT(fp, tcg_env, fp);
1701             gen_store_fpr64(ctx, fp, B11_8);
1702 	} else {
1703             gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1704 	}
1705 	return;
1706     case 0xf07d: /* fsrra FRn */
1707 	CHECK_FPU_ENABLED
1708         CHECK_FPSCR_PR_0
1709         gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
1710 	break;
1711     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1712 	CHECK_FPU_ENABLED
1713         CHECK_FPSCR_PR_0
1714         tcg_gen_movi_i32(FREG(B11_8), 0);
1715         return;
1716     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1717 	CHECK_FPU_ENABLED
1718         CHECK_FPSCR_PR_0
1719         tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1720         return;
1721     case 0xf0ad: /* fcnvsd FPUL,DRn */
1722 	CHECK_FPU_ENABLED
1723 	{
1724 	    TCGv_i64 fp = tcg_temp_new_i64();
1725             gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1726             gen_store_fpr64(ctx, fp, B11_8);
1727 	}
1728 	return;
1729     case 0xf0bd: /* fcnvds DRn,FPUL */
1730 	CHECK_FPU_ENABLED
1731 	{
1732 	    TCGv_i64 fp = tcg_temp_new_i64();
1733             gen_load_fpr64(ctx, fp, B11_8);
1734             gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
1735 	}
1736 	return;
1737     case 0xf0ed: /* fipr FVm,FVn */
1738         CHECK_FPU_ENABLED
1739         CHECK_FPSCR_PR_1
1740         {
1741             TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1742             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1743             gen_helper_fipr(tcg_env, m, n);
1744             return;
1745         }
1746         break;
1747     case 0xf0fd: /* ftrv XMTRX,FVn */
1748         CHECK_FPU_ENABLED
1749         CHECK_FPSCR_PR_1
1750         {
1751             if ((ctx->opcode & 0x0300) != 0x0100) {
1752                 goto do_illegal;
1753             }
1754             TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
1755             gen_helper_ftrv(tcg_env, n);
1756             return;
1757         }
1758         break;
1759     }
1760 #if 0
1761     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1762             ctx->opcode, ctx->base.pc_next);
1763     fflush(stderr);
1764 #endif
1765  do_illegal:
1766     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1767  do_illegal_slot:
1768         gen_save_cpu_state(ctx, true);
1769         gen_helper_raise_slot_illegal_instruction(tcg_env);
1770     } else {
1771         gen_save_cpu_state(ctx, true);
1772         gen_helper_raise_illegal_instruction(tcg_env);
1773     }
1774     ctx->base.is_jmp = DISAS_NORETURN;
1775     return;
1776 
1777  do_fpu_disabled:
1778     gen_save_cpu_state(ctx, true);
1779     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1780         gen_helper_raise_slot_fpu_disable(tcg_env);
1781     } else {
1782         gen_helper_raise_fpu_disable(tcg_env);
1783     }
1784     ctx->base.is_jmp = DISAS_NORETURN;
1785     return;
1786 }
1787 
1788 static void decode_opc(DisasContext * ctx)
1789 {
1790     uint32_t old_flags = ctx->envflags;
1791 
1792     _decode_opc(ctx);
1793 
1794     if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1795         /* go out of the delay slot */
1796         ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1797 
1798         /* When in an exclusive region, we must continue to the end
1799            for conditional branches.  */
1800         if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1801             && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1802             gen_delayed_conditional_jump(ctx);
1803             return;
1804         }
1805         /* Otherwise this is probably an invalid gUSA region.
1806            Drop the GUSA bits so the next TB doesn't see them.  */
1807         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1808 
1809         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1810         if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1811 	    gen_delayed_conditional_jump(ctx);
1812         } else {
1813             gen_jump(ctx);
1814 	}
1815     }
1816 }
1817 
1818 #ifdef CONFIG_USER_ONLY
1819 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1820    Upon an interrupt, a real kernel would simply notice magic values in
1821    the registers and reset the PC to the start of the sequence.
1822 
1823    For QEMU, we cannot do this in quite the same way.  Instead, we notice
1824    the normal start of such a sequence (mov #-x,r15).  While we can handle
1825    any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1826    sequences and transform them into atomic operations as seen by the host.
1827 */
1828 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1829 {
1830     uint16_t insns[5];
1831     int ld_adr, ld_dst, ld_mop;
1832     int op_dst, op_src, op_opc;
1833     int mv_src, mt_dst, st_src, st_mop;
1834     TCGv op_arg;
1835     uint32_t pc = ctx->base.pc_next;
1836     uint32_t pc_end = ctx->base.tb->cs_base;
1837     int max_insns = (pc_end - pc) / 2;
1838     int i;
1839 
1840     /* The state machine below will consume only a few insns.
1841        If there are more than that in a region, fail now.  */
1842     if (max_insns > ARRAY_SIZE(insns)) {
1843         goto fail;
1844     }
1845 
1846     /* Read all of the insns for the region.  */
1847     for (i = 0; i < max_insns; ++i) {
1848         insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1849     }
1850 
1851     ld_adr = ld_dst = ld_mop = -1;
1852     mv_src = -1;
1853     op_dst = op_src = op_opc = -1;
1854     mt_dst = -1;
1855     st_src = st_mop = -1;
1856     op_arg = NULL;
1857     i = 0;
1858 
1859 #define NEXT_INSN \
1860     do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1861 
1862     /*
1863      * Expect a load to begin the region.
1864      */
1865     NEXT_INSN;
1866     switch (ctx->opcode & 0xf00f) {
1867     case 0x6000: /* mov.b @Rm,Rn */
1868         ld_mop = MO_SB;
1869         break;
1870     case 0x6001: /* mov.w @Rm,Rn */
1871         ld_mop = MO_TESW;
1872         break;
1873     case 0x6002: /* mov.l @Rm,Rn */
1874         ld_mop = MO_TESL;
1875         break;
1876     default:
1877         goto fail;
1878     }
1879     ld_adr = B7_4;
1880     ld_dst = B11_8;
1881     if (ld_adr == ld_dst) {
1882         goto fail;
1883     }
1884     /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1885     op_dst = ld_dst;
1886 
1887     /*
1888      * Expect an optional register move.
1889      */
1890     NEXT_INSN;
1891     switch (ctx->opcode & 0xf00f) {
1892     case 0x6003: /* mov Rm,Rn */
1893         /*
1894          * Here we want to recognize ld_dst being saved for later consumption,
1895          * or for another input register being copied so that ld_dst need not
1896          * be clobbered during the operation.
1897          */
1898         op_dst = B11_8;
1899         mv_src = B7_4;
1900         if (op_dst == ld_dst) {
1901             /* Overwriting the load output.  */
1902             goto fail;
1903         }
1904         if (mv_src != ld_dst) {
1905             /* Copying a new input; constrain op_src to match the load.  */
1906             op_src = ld_dst;
1907         }
1908         break;
1909 
1910     default:
1911         /* Put back and re-examine as operation.  */
1912         --i;
1913     }
1914 
1915     /*
1916      * Expect the operation.
1917      */
1918     NEXT_INSN;
1919     switch (ctx->opcode & 0xf00f) {
1920     case 0x300c: /* add Rm,Rn */
1921         op_opc = INDEX_op_add_i32;
1922         goto do_reg_op;
1923     case 0x2009: /* and Rm,Rn */
1924         op_opc = INDEX_op_and_i32;
1925         goto do_reg_op;
1926     case 0x200a: /* xor Rm,Rn */
1927         op_opc = INDEX_op_xor_i32;
1928         goto do_reg_op;
1929     case 0x200b: /* or Rm,Rn */
1930         op_opc = INDEX_op_or_i32;
1931     do_reg_op:
1932         /* The operation register should be as expected, and the
1933            other input cannot depend on the load.  */
1934         if (op_dst != B11_8) {
1935             goto fail;
1936         }
1937         if (op_src < 0) {
1938             /* Unconstrainted input.  */
1939             op_src = B7_4;
1940         } else if (op_src == B7_4) {
1941             /* Constrained input matched load.  All operations are
1942                commutative; "swap" them by "moving" the load output
1943                to the (implicit) first argument and the move source
1944                to the (explicit) second argument.  */
1945             op_src = mv_src;
1946         } else {
1947             goto fail;
1948         }
1949         op_arg = REG(op_src);
1950         break;
1951 
1952     case 0x6007: /* not Rm,Rn */
1953         if (ld_dst != B7_4 || mv_src >= 0) {
1954             goto fail;
1955         }
1956         op_dst = B11_8;
1957         op_opc = INDEX_op_xor_i32;
1958         op_arg = tcg_constant_i32(-1);
1959         break;
1960 
1961     case 0x7000 ... 0x700f: /* add #imm,Rn */
1962         if (op_dst != B11_8 || mv_src >= 0) {
1963             goto fail;
1964         }
1965         op_opc = INDEX_op_add_i32;
1966         op_arg = tcg_constant_i32(B7_0s);
1967         break;
1968 
1969     case 0x3000: /* cmp/eq Rm,Rn */
1970         /* Looking for the middle of a compare-and-swap sequence,
1971            beginning with the compare.  Operands can be either order,
1972            but with only one overlapping the load.  */
1973         if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1974             goto fail;
1975         }
1976         op_opc = INDEX_op_setcond_i32;  /* placeholder */
1977         op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1978         op_arg = REG(op_src);
1979 
1980         NEXT_INSN;
1981         switch (ctx->opcode & 0xff00) {
1982         case 0x8b00: /* bf label */
1983         case 0x8f00: /* bf/s label */
1984             if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1985                 goto fail;
1986             }
1987             if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
1988                 break;
1989             }
1990             /* We're looking to unconditionally modify Rn with the
1991                result of the comparison, within the delay slot of
1992                the branch.  This is used by older gcc.  */
1993             NEXT_INSN;
1994             if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
1995                 mt_dst = B11_8;
1996             } else {
1997                 goto fail;
1998             }
1999             break;
2000 
2001         default:
2002             goto fail;
2003         }
2004         break;
2005 
2006     case 0x2008: /* tst Rm,Rn */
2007         /* Looking for a compare-and-swap against zero.  */
2008         if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2009             goto fail;
2010         }
2011         op_opc = INDEX_op_setcond_i32;
2012         op_arg = tcg_constant_i32(0);
2013 
2014         NEXT_INSN;
2015         if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2016             || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2017             goto fail;
2018         }
2019         break;
2020 
2021     default:
2022         /* Put back and re-examine as store.  */
2023         --i;
2024     }
2025 
2026     /*
2027      * Expect the store.
2028      */
2029     /* The store must be the last insn.  */
2030     if (i != max_insns - 1) {
2031         goto fail;
2032     }
2033     NEXT_INSN;
2034     switch (ctx->opcode & 0xf00f) {
2035     case 0x2000: /* mov.b Rm,@Rn */
2036         st_mop = MO_UB;
2037         break;
2038     case 0x2001: /* mov.w Rm,@Rn */
2039         st_mop = MO_UW;
2040         break;
2041     case 0x2002: /* mov.l Rm,@Rn */
2042         st_mop = MO_UL;
2043         break;
2044     default:
2045         goto fail;
2046     }
2047     /* The store must match the load.  */
2048     if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2049         goto fail;
2050     }
2051     st_src = B7_4;
2052 
2053 #undef NEXT_INSN
2054 
2055     /*
2056      * Emit the operation.
2057      */
2058     switch (op_opc) {
2059     case -1:
2060         /* No operation found.  Look for exchange pattern.  */
2061         if (st_src == ld_dst || mv_src >= 0) {
2062             goto fail;
2063         }
2064         tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2065                                 ctx->memidx, ld_mop);
2066         break;
2067 
2068     case INDEX_op_add_i32:
2069         if (op_dst != st_src) {
2070             goto fail;
2071         }
2072         if (op_dst == ld_dst && st_mop == MO_UL) {
2073             tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2074                                          op_arg, ctx->memidx, ld_mop);
2075         } else {
2076             tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2077                                          op_arg, ctx->memidx, ld_mop);
2078             if (op_dst != ld_dst) {
2079                 /* Note that mop sizes < 4 cannot use add_fetch
2080                    because it won't carry into the higher bits.  */
2081                 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2082             }
2083         }
2084         break;
2085 
2086     case INDEX_op_and_i32:
2087         if (op_dst != st_src) {
2088             goto fail;
2089         }
2090         if (op_dst == ld_dst) {
2091             tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2092                                          op_arg, ctx->memidx, ld_mop);
2093         } else {
2094             tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2095                                          op_arg, ctx->memidx, ld_mop);
2096             tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2097         }
2098         break;
2099 
2100     case INDEX_op_or_i32:
2101         if (op_dst != st_src) {
2102             goto fail;
2103         }
2104         if (op_dst == ld_dst) {
2105             tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2106                                         op_arg, ctx->memidx, ld_mop);
2107         } else {
2108             tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2109                                         op_arg, ctx->memidx, ld_mop);
2110             tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2111         }
2112         break;
2113 
2114     case INDEX_op_xor_i32:
2115         if (op_dst != st_src) {
2116             goto fail;
2117         }
2118         if (op_dst == ld_dst) {
2119             tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2120                                          op_arg, ctx->memidx, ld_mop);
2121         } else {
2122             tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2123                                          op_arg, ctx->memidx, ld_mop);
2124             tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2125         }
2126         break;
2127 
2128     case INDEX_op_setcond_i32:
2129         if (st_src == ld_dst) {
2130             goto fail;
2131         }
2132         tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2133                                    REG(st_src), ctx->memidx, ld_mop);
2134         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2135         if (mt_dst >= 0) {
2136             tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2137         }
2138         break;
2139 
2140     default:
2141         g_assert_not_reached();
2142     }
2143 
2144     /* The entire region has been translated.  */
2145     ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2146     goto done;
2147 
2148  fail:
2149     qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2150                   pc, pc_end);
2151 
2152     /* Restart with the EXCLUSIVE bit set, within a TB run via
2153        cpu_exec_step_atomic holding the exclusive lock.  */
2154     ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
2155     gen_save_cpu_state(ctx, false);
2156     gen_helper_exclusive(tcg_env);
2157     ctx->base.is_jmp = DISAS_NORETURN;
2158 
2159     /* We're not executing an instruction, but we must report one for the
2160        purposes of accounting within the TB.  We might as well report the
2161        entire region consumed via ctx->base.pc_next so that it's immediately
2162        available in the disassembly dump.  */
2163 
2164  done:
2165     ctx->base.pc_next = pc_end;
2166     ctx->base.num_insns += max_insns - 1;
2167 
2168     /*
2169      * Emit insn_start to cover each of the insns in the region.
2170      * This matches an assert in tcg.c making sure that we have
2171      * tb->icount * insn_start.
2172      */
2173     for (i = 1; i < max_insns; ++i) {
2174         tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2175     }
2176 }
2177 #endif
2178 
2179 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2180 {
2181     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2182     CPUSH4State *env = cpu_env(cs);
2183     uint32_t tbflags;
2184     int bound;
2185 
2186     ctx->tbflags = tbflags = ctx->base.tb->flags;
2187     ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2188     ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2189     /* We don't know if the delayed pc came from a dynamic or static branch,
2190        so assume it is a dynamic branch.  */
2191     ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2192     ctx->features = env->features;
2193     ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2194     ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2195                   (tbflags & (1 << SR_RB))) * 0x10;
2196     ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2197 
2198 #ifdef CONFIG_USER_ONLY
2199     if (tbflags & TB_FLAG_GUSA_MASK) {
2200         /* In gUSA exclusive region. */
2201         uint32_t pc = ctx->base.pc_next;
2202         uint32_t pc_end = ctx->base.tb->cs_base;
2203         int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2204         int max_insns = (pc_end - pc) / 2;
2205 
2206         if (pc != pc_end + backup || max_insns < 2) {
2207             /* This is a malformed gUSA region.  Don't do anything special,
2208                since the interpreter is likely to get confused.  */
2209             ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2210         } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2211             /* Regardless of single-stepping or the end of the page,
2212                we must complete execution of the gUSA region while
2213                holding the exclusive lock.  */
2214             ctx->base.max_insns = max_insns;
2215             return;
2216         }
2217     }
2218 #endif
2219 
2220     /* Since the ISA is fixed-width, we can bound by the number
2221        of instructions remaining on the page.  */
2222     bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2223     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2224 }
2225 
2226 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2227 {
2228 }
2229 
2230 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2231 {
2232     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2233 
2234     tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2235 }
2236 
2237 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2238 {
2239     CPUSH4State *env = cpu_env(cs);
2240     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2241 
2242 #ifdef CONFIG_USER_ONLY
2243     if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2244         && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2245         /* We're in an gUSA region, and we have not already fallen
2246            back on using an exclusive region.  Attempt to parse the
2247            region into a single supported atomic operation.  Failure
2248            is handled within the parser by raising an exception to
2249            retry using an exclusive region.  */
2250         decode_gusa(ctx, env);
2251         return;
2252     }
2253 #endif
2254 
2255     ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2256     decode_opc(ctx);
2257     ctx->base.pc_next += 2;
2258 }
2259 
2260 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2261 {
2262     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2263 
2264     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2265         /* Ending the region of exclusivity.  Clear the bits.  */
2266         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2267     }
2268 
2269     switch (ctx->base.is_jmp) {
2270     case DISAS_STOP:
2271         gen_save_cpu_state(ctx, true);
2272         tcg_gen_exit_tb(NULL, 0);
2273         break;
2274     case DISAS_NEXT:
2275     case DISAS_TOO_MANY:
2276         gen_save_cpu_state(ctx, false);
2277         gen_goto_tb(ctx, 0, ctx->base.pc_next);
2278         break;
2279     case DISAS_NORETURN:
2280         break;
2281     default:
2282         g_assert_not_reached();
2283     }
2284 }
2285 
2286 static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2287                              CPUState *cs, FILE *logfile)
2288 {
2289     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2290     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2291 }
2292 
2293 static const TranslatorOps sh4_tr_ops = {
2294     .init_disas_context = sh4_tr_init_disas_context,
2295     .tb_start           = sh4_tr_tb_start,
2296     .insn_start         = sh4_tr_insn_start,
2297     .translate_insn     = sh4_tr_translate_insn,
2298     .tb_stop            = sh4_tr_tb_stop,
2299     .disas_log          = sh4_tr_disas_log,
2300 };
2301 
2302 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2303                            target_ulong pc, void *host_pc)
2304 {
2305     DisasContext ctx;
2306 
2307     translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
2308 }
2309