xref: /openbmc/qemu/target/sh4/translate.c (revision fb72e779)
1 /*
2  *  SH4 translation
3  *
4  *  Copyright (c) 2005 Samuel Tardieu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #define DEBUG_DISAS
21 
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "qemu/qemu-print.h"
33 
34 
35 typedef struct DisasContext {
36     DisasContextBase base;
37 
38     uint32_t tbflags;  /* should stay unmodified during the TB translation */
39     uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
40     int memidx;
41     int gbank;
42     int fbank;
43     uint32_t delayed_pc;
44     uint32_t features;
45 
46     uint16_t opcode;
47 
48     bool has_movcal;
49 } DisasContext;
50 
51 #if defined(CONFIG_USER_ONLY)
52 #define IS_USER(ctx) 1
53 #define UNALIGN(C)   (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
54 #else
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
56 #define UNALIGN(C)   0
57 #endif
58 
59 /* Target-specific values for ctx->base.is_jmp.  */
60 /* We want to exit back to the cpu loop for some reason.
61    Usually this is to recognize interrupts immediately.  */
62 #define DISAS_STOP    DISAS_TARGET_0
63 
64 /* global register indexes */
65 static TCGv cpu_gregs[32];
66 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
67 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
68 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
69 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
70 static TCGv cpu_lock_addr, cpu_lock_value;
71 static TCGv cpu_fregs[32];
72 
73 /* internal register indexes */
74 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
75 
76 #include "exec/gen-icount.h"
77 
78 void sh4_translate_init(void)
79 {
80     int i;
81     static const char * const gregnames[24] = {
82         "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83         "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84         "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85         "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86         "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87     };
88     static const char * const fregnames[32] = {
89          "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
90          "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
91          "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92         "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93          "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
94          "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
95          "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96         "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97     };
98 
99     for (i = 0; i < 24; i++) {
100         cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
101                                               offsetof(CPUSH4State, gregs[i]),
102                                               gregnames[i]);
103     }
104     memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
105 
106     cpu_pc = tcg_global_mem_new_i32(cpu_env,
107                                     offsetof(CPUSH4State, pc), "PC");
108     cpu_sr = tcg_global_mem_new_i32(cpu_env,
109                                     offsetof(CPUSH4State, sr), "SR");
110     cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
111                                       offsetof(CPUSH4State, sr_m), "SR_M");
112     cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
113                                       offsetof(CPUSH4State, sr_q), "SR_Q");
114     cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
115                                       offsetof(CPUSH4State, sr_t), "SR_T");
116     cpu_ssr = tcg_global_mem_new_i32(cpu_env,
117                                      offsetof(CPUSH4State, ssr), "SSR");
118     cpu_spc = tcg_global_mem_new_i32(cpu_env,
119                                      offsetof(CPUSH4State, spc), "SPC");
120     cpu_gbr = tcg_global_mem_new_i32(cpu_env,
121                                      offsetof(CPUSH4State, gbr), "GBR");
122     cpu_vbr = tcg_global_mem_new_i32(cpu_env,
123                                      offsetof(CPUSH4State, vbr), "VBR");
124     cpu_sgr = tcg_global_mem_new_i32(cpu_env,
125                                      offsetof(CPUSH4State, sgr), "SGR");
126     cpu_dbr = tcg_global_mem_new_i32(cpu_env,
127                                      offsetof(CPUSH4State, dbr), "DBR");
128     cpu_mach = tcg_global_mem_new_i32(cpu_env,
129                                       offsetof(CPUSH4State, mach), "MACH");
130     cpu_macl = tcg_global_mem_new_i32(cpu_env,
131                                       offsetof(CPUSH4State, macl), "MACL");
132     cpu_pr = tcg_global_mem_new_i32(cpu_env,
133                                     offsetof(CPUSH4State, pr), "PR");
134     cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
135                                        offsetof(CPUSH4State, fpscr), "FPSCR");
136     cpu_fpul = tcg_global_mem_new_i32(cpu_env,
137                                       offsetof(CPUSH4State, fpul), "FPUL");
138 
139     cpu_flags = tcg_global_mem_new_i32(cpu_env,
140 				       offsetof(CPUSH4State, flags), "_flags_");
141     cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
142 					    offsetof(CPUSH4State, delayed_pc),
143 					    "_delayed_pc_");
144     cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
145                                               offsetof(CPUSH4State,
146                                                        delayed_cond),
147                                               "_delayed_cond_");
148     cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
149                                            offsetof(CPUSH4State, lock_addr),
150                                            "_lock_addr_");
151     cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
152                                             offsetof(CPUSH4State, lock_value),
153                                             "_lock_value_");
154 
155     for (i = 0; i < 32; i++)
156         cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
157                                               offsetof(CPUSH4State, fregs[i]),
158                                               fregnames[i]);
159 }
160 
161 void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
162 {
163     SuperHCPU *cpu = SUPERH_CPU(cs);
164     CPUSH4State *env = &cpu->env;
165     int i;
166 
167     qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
168                  env->pc, cpu_read_sr(env), env->pr, env->fpscr);
169     qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
170                  env->spc, env->ssr, env->gbr, env->vbr);
171     qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
172                  env->sgr, env->dbr, env->delayed_pc, env->fpul);
173     for (i = 0; i < 24; i += 4) {
174         qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
175 		    i, env->gregs[i], i + 1, env->gregs[i + 1],
176 		    i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
177     }
178     if (env->flags & TB_FLAG_DELAY_SLOT) {
179         qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
180 		    env->delayed_pc);
181     } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
182         qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
183 		    env->delayed_pc);
184     } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
185         qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
186                      env->delayed_pc);
187     }
188 }
189 
190 static void gen_read_sr(TCGv dst)
191 {
192     TCGv t0 = tcg_temp_new();
193     tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
194     tcg_gen_or_i32(dst, dst, t0);
195     tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
196     tcg_gen_or_i32(dst, dst, t0);
197     tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
198     tcg_gen_or_i32(dst, cpu_sr, t0);
199     tcg_temp_free_i32(t0);
200 }
201 
202 static void gen_write_sr(TCGv src)
203 {
204     tcg_gen_andi_i32(cpu_sr, src,
205                      ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
206     tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
207     tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
208     tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
209 }
210 
211 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
212 {
213     if (save_pc) {
214         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
215     }
216     if (ctx->delayed_pc != (uint32_t) -1) {
217         tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
218     }
219     if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
220         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
221     }
222 }
223 
224 static inline bool use_exit_tb(DisasContext *ctx)
225 {
226     return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
227 }
228 
229 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
230 {
231     if (use_exit_tb(ctx)) {
232         return false;
233     }
234     return translator_use_goto_tb(&ctx->base, dest);
235 }
236 
237 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
238 {
239     if (use_goto_tb(ctx, dest)) {
240         tcg_gen_goto_tb(n);
241         tcg_gen_movi_i32(cpu_pc, dest);
242         tcg_gen_exit_tb(ctx->base.tb, n);
243     } else {
244         tcg_gen_movi_i32(cpu_pc, dest);
245         if (use_exit_tb(ctx)) {
246             tcg_gen_exit_tb(NULL, 0);
247         } else {
248             tcg_gen_lookup_and_goto_ptr();
249         }
250     }
251     ctx->base.is_jmp = DISAS_NORETURN;
252 }
253 
254 static void gen_jump(DisasContext * ctx)
255 {
256     if (ctx->delayed_pc == -1) {
257 	/* Target is not statically known, it comes necessarily from a
258 	   delayed jump as immediate jump are conditinal jumps */
259 	tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
260         tcg_gen_discard_i32(cpu_delayed_pc);
261         if (use_exit_tb(ctx)) {
262             tcg_gen_exit_tb(NULL, 0);
263         } else {
264             tcg_gen_lookup_and_goto_ptr();
265         }
266         ctx->base.is_jmp = DISAS_NORETURN;
267     } else {
268 	gen_goto_tb(ctx, 0, ctx->delayed_pc);
269     }
270 }
271 
272 /* Immediate conditional jump (bt or bf) */
273 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
274                                  bool jump_if_true)
275 {
276     TCGLabel *l1 = gen_new_label();
277     TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
278 
279     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
280         /* When in an exclusive region, we must continue to the end.
281            Therefore, exit the region on a taken branch, but otherwise
282            fall through to the next instruction.  */
283         tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
284         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
285         /* Note that this won't actually use a goto_tb opcode because we
286            disallow it in use_goto_tb, but it handles exit + singlestep.  */
287         gen_goto_tb(ctx, 0, dest);
288         gen_set_label(l1);
289         ctx->base.is_jmp = DISAS_NEXT;
290         return;
291     }
292 
293     gen_save_cpu_state(ctx, false);
294     tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
295     gen_goto_tb(ctx, 0, dest);
296     gen_set_label(l1);
297     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
298     ctx->base.is_jmp = DISAS_NORETURN;
299 }
300 
301 /* Delayed conditional jump (bt or bf) */
302 static void gen_delayed_conditional_jump(DisasContext * ctx)
303 {
304     TCGLabel *l1 = gen_new_label();
305     TCGv ds = tcg_temp_new();
306 
307     tcg_gen_mov_i32(ds, cpu_delayed_cond);
308     tcg_gen_discard_i32(cpu_delayed_cond);
309 
310     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
311         /* When in an exclusive region, we must continue to the end.
312            Therefore, exit the region on a taken branch, but otherwise
313            fall through to the next instruction.  */
314         tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
315 
316         /* Leave the gUSA region.  */
317         tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
318         gen_jump(ctx);
319 
320         gen_set_label(l1);
321         ctx->base.is_jmp = DISAS_NEXT;
322         return;
323     }
324 
325     tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
326     gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
327     gen_set_label(l1);
328     gen_jump(ctx);
329 }
330 
331 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
332 {
333     /* We have already signaled illegal instruction for odd Dr.  */
334     tcg_debug_assert((reg & 1) == 0);
335     reg ^= ctx->fbank;
336     tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
337 }
338 
339 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
340 {
341     /* We have already signaled illegal instruction for odd Dr.  */
342     tcg_debug_assert((reg & 1) == 0);
343     reg ^= ctx->fbank;
344     tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
345 }
346 
347 #define B3_0 (ctx->opcode & 0xf)
348 #define B6_4 ((ctx->opcode >> 4) & 0x7)
349 #define B7_4 ((ctx->opcode >> 4) & 0xf)
350 #define B7_0 (ctx->opcode & 0xff)
351 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
352 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
353   (ctx->opcode & 0xfff))
354 #define B11_8 ((ctx->opcode >> 8) & 0xf)
355 #define B15_12 ((ctx->opcode >> 12) & 0xf)
356 
357 #define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
358 #define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
359 #define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
360 
361 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
362 
363 #define CHECK_NOT_DELAY_SLOT \
364     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {  \
365         goto do_illegal_slot;                       \
366     }
367 
368 #define CHECK_PRIVILEGED \
369     if (IS_USER(ctx)) {                     \
370         goto do_illegal;                    \
371     }
372 
373 #define CHECK_FPU_ENABLED \
374     if (ctx->tbflags & (1u << SR_FD)) {     \
375         goto do_fpu_disabled;               \
376     }
377 
378 #define CHECK_FPSCR_PR_0 \
379     if (ctx->tbflags & FPSCR_PR) {          \
380         goto do_illegal;                    \
381     }
382 
383 #define CHECK_FPSCR_PR_1 \
384     if (!(ctx->tbflags & FPSCR_PR)) {       \
385         goto do_illegal;                    \
386     }
387 
388 #define CHECK_SH4A \
389     if (!(ctx->features & SH_FEATURE_SH4A)) { \
390         goto do_illegal;                      \
391     }
392 
393 static void _decode_opc(DisasContext * ctx)
394 {
395     /* This code tries to make movcal emulation sufficiently
396        accurate for Linux purposes.  This instruction writes
397        memory, and prior to that, always allocates a cache line.
398        It is used in two contexts:
399        - in memcpy, where data is copied in blocks, the first write
400        of to a block uses movca.l for performance.
401        - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
402        to flush the cache. Here, the data written by movcal.l is never
403        written to memory, and the data written is just bogus.
404 
405        To simulate this, we simulate movcal.l, we store the value to memory,
406        but we also remember the previous content. If we see ocbi, we check
407        if movcal.l for that address was done previously. If so, the write should
408        not have hit the memory, so we restore the previous content.
409        When we see an instruction that is neither movca.l
410        nor ocbi, the previous content is discarded.
411 
412        To optimize, we only try to flush stores when we're at the start of
413        TB, or if we already saw movca.l in this TB and did not flush stores
414        yet.  */
415     if (ctx->has_movcal)
416 	{
417 	  int opcode = ctx->opcode & 0xf0ff;
418 	  if (opcode != 0x0093 /* ocbi */
419 	      && opcode != 0x00c3 /* movca.l */)
420 	      {
421                   gen_helper_discard_movcal_backup(cpu_env);
422 		  ctx->has_movcal = 0;
423 	      }
424 	}
425 
426 #if 0
427     fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
428 #endif
429 
430     switch (ctx->opcode) {
431     case 0x0019:		/* div0u */
432         tcg_gen_movi_i32(cpu_sr_m, 0);
433         tcg_gen_movi_i32(cpu_sr_q, 0);
434         tcg_gen_movi_i32(cpu_sr_t, 0);
435 	return;
436     case 0x000b:		/* rts */
437 	CHECK_NOT_DELAY_SLOT
438 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
439         ctx->envflags |= TB_FLAG_DELAY_SLOT;
440 	ctx->delayed_pc = (uint32_t) - 1;
441 	return;
442     case 0x0028:		/* clrmac */
443 	tcg_gen_movi_i32(cpu_mach, 0);
444 	tcg_gen_movi_i32(cpu_macl, 0);
445 	return;
446     case 0x0048:		/* clrs */
447         tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
448 	return;
449     case 0x0008:		/* clrt */
450         tcg_gen_movi_i32(cpu_sr_t, 0);
451 	return;
452     case 0x0038:		/* ldtlb */
453 	CHECK_PRIVILEGED
454         gen_helper_ldtlb(cpu_env);
455 	return;
456     case 0x002b:		/* rte */
457 	CHECK_PRIVILEGED
458 	CHECK_NOT_DELAY_SLOT
459         gen_write_sr(cpu_ssr);
460 	tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
461         ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
462 	ctx->delayed_pc = (uint32_t) - 1;
463         ctx->base.is_jmp = DISAS_STOP;
464 	return;
465     case 0x0058:		/* sets */
466         tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
467 	return;
468     case 0x0018:		/* sett */
469         tcg_gen_movi_i32(cpu_sr_t, 1);
470 	return;
471     case 0xfbfd:		/* frchg */
472         CHECK_FPSCR_PR_0
473 	tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
474         ctx->base.is_jmp = DISAS_STOP;
475 	return;
476     case 0xf3fd:		/* fschg */
477         CHECK_FPSCR_PR_0
478         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
479         ctx->base.is_jmp = DISAS_STOP;
480 	return;
481     case 0xf7fd:                /* fpchg */
482         CHECK_SH4A
483         tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
484         ctx->base.is_jmp = DISAS_STOP;
485         return;
486     case 0x0009:		/* nop */
487 	return;
488     case 0x001b:		/* sleep */
489 	CHECK_PRIVILEGED
490         tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
491         gen_helper_sleep(cpu_env);
492 	return;
493     }
494 
495     switch (ctx->opcode & 0xf000) {
496     case 0x1000:		/* mov.l Rm,@(disp,Rn) */
497 	{
498 	    TCGv addr = tcg_temp_new();
499 	    tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
500             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
501                                 MO_TEUL | UNALIGN(ctx));
502 	    tcg_temp_free(addr);
503 	}
504 	return;
505     case 0x5000:		/* mov.l @(disp,Rm),Rn */
506 	{
507 	    TCGv addr = tcg_temp_new();
508 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
509             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
510                                 MO_TESL | UNALIGN(ctx));
511 	    tcg_temp_free(addr);
512 	}
513 	return;
514     case 0xe000:		/* mov #imm,Rn */
515 #ifdef CONFIG_USER_ONLY
516         /*
517          * Detect the start of a gUSA region (mov #-n, r15).
518          * If so, update envflags and end the TB.  This will allow us
519          * to see the end of the region (stored in R0) in the next TB.
520          */
521         if (B11_8 == 15 && B7_0s < 0 &&
522             (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
523             ctx->envflags =
524                 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
525             ctx->base.is_jmp = DISAS_STOP;
526         }
527 #endif
528 	tcg_gen_movi_i32(REG(B11_8), B7_0s);
529 	return;
530     case 0x9000:		/* mov.w @(disp,PC),Rn */
531 	{
532             TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
533             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
534 	    tcg_temp_free(addr);
535 	}
536 	return;
537     case 0xd000:		/* mov.l @(disp,PC),Rn */
538 	{
539             TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
540             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
541 	    tcg_temp_free(addr);
542 	}
543 	return;
544     case 0x7000:		/* add #imm,Rn */
545 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
546 	return;
547     case 0xa000:		/* bra disp */
548 	CHECK_NOT_DELAY_SLOT
549         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
550         ctx->envflags |= TB_FLAG_DELAY_SLOT;
551 	return;
552     case 0xb000:		/* bsr disp */
553 	CHECK_NOT_DELAY_SLOT
554         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
555         ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
556         ctx->envflags |= TB_FLAG_DELAY_SLOT;
557 	return;
558     }
559 
560     switch (ctx->opcode & 0xf00f) {
561     case 0x6003:		/* mov Rm,Rn */
562 	tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
563 	return;
564     case 0x2000:		/* mov.b Rm,@Rn */
565         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
566 	return;
567     case 0x2001:		/* mov.w Rm,@Rn */
568         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569                             MO_TEUW | UNALIGN(ctx));
570 	return;
571     case 0x2002:		/* mov.l Rm,@Rn */
572         tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
573                             MO_TEUL | UNALIGN(ctx));
574 	return;
575     case 0x6000:		/* mov.b @Rm,Rn */
576         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
577 	return;
578     case 0x6001:		/* mov.w @Rm,Rn */
579         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580                             MO_TESW | UNALIGN(ctx));
581 	return;
582     case 0x6002:		/* mov.l @Rm,Rn */
583         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
584                             MO_TESL | UNALIGN(ctx));
585 	return;
586     case 0x2004:		/* mov.b Rm,@-Rn */
587 	{
588 	    TCGv addr = tcg_temp_new();
589 	    tcg_gen_subi_i32(addr, REG(B11_8), 1);
590             /* might cause re-execution */
591             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
592 	    tcg_gen_mov_i32(REG(B11_8), addr);			/* modify register status */
593 	    tcg_temp_free(addr);
594 	}
595 	return;
596     case 0x2005:		/* mov.w Rm,@-Rn */
597 	{
598 	    TCGv addr = tcg_temp_new();
599 	    tcg_gen_subi_i32(addr, REG(B11_8), 2);
600             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
601                                 MO_TEUW | UNALIGN(ctx));
602 	    tcg_gen_mov_i32(REG(B11_8), addr);
603 	    tcg_temp_free(addr);
604 	}
605 	return;
606     case 0x2006:		/* mov.l Rm,@-Rn */
607 	{
608 	    TCGv addr = tcg_temp_new();
609 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
610             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
611                                 MO_TEUL | UNALIGN(ctx));
612 	    tcg_gen_mov_i32(REG(B11_8), addr);
613         tcg_temp_free(addr);
614 	}
615 	return;
616     case 0x6004:		/* mov.b @Rm+,Rn */
617         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
618 	if ( B11_8 != B7_4 )
619 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
620 	return;
621     case 0x6005:		/* mov.w @Rm+,Rn */
622         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
623                             MO_TESW | UNALIGN(ctx));
624 	if ( B11_8 != B7_4 )
625 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
626 	return;
627     case 0x6006:		/* mov.l @Rm+,Rn */
628         tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
629                             MO_TESL | UNALIGN(ctx));
630 	if ( B11_8 != B7_4 )
631 		tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
632 	return;
633     case 0x0004:		/* mov.b Rm,@(R0,Rn) */
634 	{
635 	    TCGv addr = tcg_temp_new();
636 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
637             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
638 	    tcg_temp_free(addr);
639 	}
640 	return;
641     case 0x0005:		/* mov.w Rm,@(R0,Rn) */
642 	{
643 	    TCGv addr = tcg_temp_new();
644 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
645             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646                                 MO_TEUW | UNALIGN(ctx));
647 	    tcg_temp_free(addr);
648 	}
649 	return;
650     case 0x0006:		/* mov.l Rm,@(R0,Rn) */
651 	{
652 	    TCGv addr = tcg_temp_new();
653 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
654             tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
655                                 MO_TEUL | UNALIGN(ctx));
656 	    tcg_temp_free(addr);
657 	}
658 	return;
659     case 0x000c:		/* mov.b @(R0,Rm),Rn */
660 	{
661 	    TCGv addr = tcg_temp_new();
662 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
663             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
664 	    tcg_temp_free(addr);
665 	}
666 	return;
667     case 0x000d:		/* mov.w @(R0,Rm),Rn */
668 	{
669 	    TCGv addr = tcg_temp_new();
670 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
671             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
672                                 MO_TESW | UNALIGN(ctx));
673 	    tcg_temp_free(addr);
674 	}
675 	return;
676     case 0x000e:		/* mov.l @(R0,Rm),Rn */
677 	{
678 	    TCGv addr = tcg_temp_new();
679 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
680             tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
681                                 MO_TESL | UNALIGN(ctx));
682 	    tcg_temp_free(addr);
683 	}
684 	return;
685     case 0x6008:		/* swap.b Rm,Rn */
686 	{
687             TCGv low = tcg_temp_new();
688             tcg_gen_bswap16_i32(low, REG(B7_4), 0);
689             tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
690 	    tcg_temp_free(low);
691 	}
692 	return;
693     case 0x6009:		/* swap.w Rm,Rn */
694         tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
695 	return;
696     case 0x200d:		/* xtrct Rm,Rn */
697 	{
698 	    TCGv high, low;
699 	    high = tcg_temp_new();
700 	    tcg_gen_shli_i32(high, REG(B7_4), 16);
701 	    low = tcg_temp_new();
702 	    tcg_gen_shri_i32(low, REG(B11_8), 16);
703 	    tcg_gen_or_i32(REG(B11_8), high, low);
704 	    tcg_temp_free(low);
705 	    tcg_temp_free(high);
706 	}
707 	return;
708     case 0x300c:		/* add Rm,Rn */
709 	tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
710 	return;
711     case 0x300e:		/* addc Rm,Rn */
712         {
713             TCGv t0, t1;
714             t0 = tcg_const_tl(0);
715             t1 = tcg_temp_new();
716             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
717             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
718                              REG(B11_8), t0, t1, cpu_sr_t);
719             tcg_temp_free(t0);
720             tcg_temp_free(t1);
721         }
722 	return;
723     case 0x300f:		/* addv Rm,Rn */
724         {
725             TCGv t0, t1, t2;
726             t0 = tcg_temp_new();
727             tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
728             t1 = tcg_temp_new();
729             tcg_gen_xor_i32(t1, t0, REG(B11_8));
730             t2 = tcg_temp_new();
731             tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
732             tcg_gen_andc_i32(cpu_sr_t, t1, t2);
733             tcg_temp_free(t2);
734             tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
735             tcg_temp_free(t1);
736             tcg_gen_mov_i32(REG(B7_4), t0);
737             tcg_temp_free(t0);
738         }
739 	return;
740     case 0x2009:		/* and Rm,Rn */
741 	tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
742 	return;
743     case 0x3000:		/* cmp/eq Rm,Rn */
744         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
745 	return;
746     case 0x3003:		/* cmp/ge Rm,Rn */
747         tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
748 	return;
749     case 0x3007:		/* cmp/gt Rm,Rn */
750         tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
751 	return;
752     case 0x3006:		/* cmp/hi Rm,Rn */
753         tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
754 	return;
755     case 0x3002:		/* cmp/hs Rm,Rn */
756         tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
757 	return;
758     case 0x200c:		/* cmp/str Rm,Rn */
759 	{
760 	    TCGv cmp1 = tcg_temp_new();
761 	    TCGv cmp2 = tcg_temp_new();
762             tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
763             tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
764             tcg_gen_andc_i32(cmp1, cmp1, cmp2);
765             tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
766             tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
767 	    tcg_temp_free(cmp2);
768 	    tcg_temp_free(cmp1);
769 	}
770 	return;
771     case 0x2007:		/* div0s Rm,Rn */
772         tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
773         tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
774         tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
775 	return;
776     case 0x3004:		/* div1 Rm,Rn */
777         {
778             TCGv t0 = tcg_temp_new();
779             TCGv t1 = tcg_temp_new();
780             TCGv t2 = tcg_temp_new();
781             TCGv zero = tcg_const_i32(0);
782 
783             /* shift left arg1, saving the bit being pushed out and inserting
784                T on the right */
785             tcg_gen_shri_i32(t0, REG(B11_8), 31);
786             tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
787             tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
788 
789             /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
790                using 64-bit temps, we compute arg0's high part from q ^ m, so
791                that it is 0x00000000 when adding the value or 0xffffffff when
792                subtracting it. */
793             tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
794             tcg_gen_subi_i32(t1, t1, 1);
795             tcg_gen_neg_i32(t2, REG(B7_4));
796             tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
797             tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
798 
799             /* compute T and Q depending on carry */
800             tcg_gen_andi_i32(t1, t1, 1);
801             tcg_gen_xor_i32(t1, t1, t0);
802             tcg_gen_xori_i32(cpu_sr_t, t1, 1);
803             tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
804 
805             tcg_temp_free(zero);
806             tcg_temp_free(t2);
807             tcg_temp_free(t1);
808             tcg_temp_free(t0);
809         }
810 	return;
811     case 0x300d:		/* dmuls.l Rm,Rn */
812         tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
813 	return;
814     case 0x3005:		/* dmulu.l Rm,Rn */
815         tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
816 	return;
817     case 0x600e:		/* exts.b Rm,Rn */
818 	tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
819 	return;
820     case 0x600f:		/* exts.w Rm,Rn */
821 	tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
822 	return;
823     case 0x600c:		/* extu.b Rm,Rn */
824 	tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
825 	return;
826     case 0x600d:		/* extu.w Rm,Rn */
827 	tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
828 	return;
829     case 0x000f:		/* mac.l @Rm+,@Rn+ */
830 	{
831 	    TCGv arg0, arg1;
832 	    arg0 = tcg_temp_new();
833             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
834 	    arg1 = tcg_temp_new();
835             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
836             gen_helper_macl(cpu_env, arg0, arg1);
837 	    tcg_temp_free(arg1);
838 	    tcg_temp_free(arg0);
839 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
840 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
841 	}
842 	return;
843     case 0x400f:		/* mac.w @Rm+,@Rn+ */
844 	{
845 	    TCGv arg0, arg1;
846 	    arg0 = tcg_temp_new();
847             tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
848 	    arg1 = tcg_temp_new();
849             tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
850             gen_helper_macw(cpu_env, arg0, arg1);
851 	    tcg_temp_free(arg1);
852 	    tcg_temp_free(arg0);
853 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
854 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
855 	}
856 	return;
857     case 0x0007:		/* mul.l Rm,Rn */
858 	tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
859 	return;
860     case 0x200f:		/* muls.w Rm,Rn */
861 	{
862 	    TCGv arg0, arg1;
863 	    arg0 = tcg_temp_new();
864 	    tcg_gen_ext16s_i32(arg0, REG(B7_4));
865 	    arg1 = tcg_temp_new();
866 	    tcg_gen_ext16s_i32(arg1, REG(B11_8));
867 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
868 	    tcg_temp_free(arg1);
869 	    tcg_temp_free(arg0);
870 	}
871 	return;
872     case 0x200e:		/* mulu.w Rm,Rn */
873 	{
874 	    TCGv arg0, arg1;
875 	    arg0 = tcg_temp_new();
876 	    tcg_gen_ext16u_i32(arg0, REG(B7_4));
877 	    arg1 = tcg_temp_new();
878 	    tcg_gen_ext16u_i32(arg1, REG(B11_8));
879 	    tcg_gen_mul_i32(cpu_macl, arg0, arg1);
880 	    tcg_temp_free(arg1);
881 	    tcg_temp_free(arg0);
882 	}
883 	return;
884     case 0x600b:		/* neg Rm,Rn */
885 	tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
886 	return;
887     case 0x600a:		/* negc Rm,Rn */
888         {
889             TCGv t0 = tcg_const_i32(0);
890             tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
891                              REG(B7_4), t0, cpu_sr_t, t0);
892             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
893                              t0, t0, REG(B11_8), cpu_sr_t);
894             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
895             tcg_temp_free(t0);
896         }
897 	return;
898     case 0x6007:		/* not Rm,Rn */
899 	tcg_gen_not_i32(REG(B11_8), REG(B7_4));
900 	return;
901     case 0x200b:		/* or Rm,Rn */
902 	tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
903 	return;
904     case 0x400c:		/* shad Rm,Rn */
905 	{
906             TCGv t0 = tcg_temp_new();
907             TCGv t1 = tcg_temp_new();
908             TCGv t2 = tcg_temp_new();
909 
910             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
911 
912             /* positive case: shift to the left */
913             tcg_gen_shl_i32(t1, REG(B11_8), t0);
914 
915             /* negative case: shift to the right in two steps to
916                correctly handle the -32 case */
917             tcg_gen_xori_i32(t0, t0, 0x1f);
918             tcg_gen_sar_i32(t2, REG(B11_8), t0);
919             tcg_gen_sari_i32(t2, t2, 1);
920 
921             /* select between the two cases */
922             tcg_gen_movi_i32(t0, 0);
923             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
924 
925             tcg_temp_free(t0);
926             tcg_temp_free(t1);
927             tcg_temp_free(t2);
928 	}
929 	return;
930     case 0x400d:		/* shld Rm,Rn */
931 	{
932             TCGv t0 = tcg_temp_new();
933             TCGv t1 = tcg_temp_new();
934             TCGv t2 = tcg_temp_new();
935 
936             tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
937 
938             /* positive case: shift to the left */
939             tcg_gen_shl_i32(t1, REG(B11_8), t0);
940 
941             /* negative case: shift to the right in two steps to
942                correctly handle the -32 case */
943             tcg_gen_xori_i32(t0, t0, 0x1f);
944             tcg_gen_shr_i32(t2, REG(B11_8), t0);
945             tcg_gen_shri_i32(t2, t2, 1);
946 
947             /* select between the two cases */
948             tcg_gen_movi_i32(t0, 0);
949             tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
950 
951             tcg_temp_free(t0);
952             tcg_temp_free(t1);
953             tcg_temp_free(t2);
954 	}
955 	return;
956     case 0x3008:		/* sub Rm,Rn */
957 	tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
958 	return;
959     case 0x300a:		/* subc Rm,Rn */
960         {
961             TCGv t0, t1;
962             t0 = tcg_const_tl(0);
963             t1 = tcg_temp_new();
964             tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
965             tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
966                              REG(B11_8), t0, t1, cpu_sr_t);
967             tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
968             tcg_temp_free(t0);
969             tcg_temp_free(t1);
970         }
971 	return;
972     case 0x300b:		/* subv Rm,Rn */
973         {
974             TCGv t0, t1, t2;
975             t0 = tcg_temp_new();
976             tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
977             t1 = tcg_temp_new();
978             tcg_gen_xor_i32(t1, t0, REG(B7_4));
979             t2 = tcg_temp_new();
980             tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
981             tcg_gen_and_i32(t1, t1, t2);
982             tcg_temp_free(t2);
983             tcg_gen_shri_i32(cpu_sr_t, t1, 31);
984             tcg_temp_free(t1);
985             tcg_gen_mov_i32(REG(B11_8), t0);
986             tcg_temp_free(t0);
987         }
988 	return;
989     case 0x2008:		/* tst Rm,Rn */
990 	{
991 	    TCGv val = tcg_temp_new();
992 	    tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
993             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
994 	    tcg_temp_free(val);
995 	}
996 	return;
997     case 0x200a:		/* xor Rm,Rn */
998 	tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
999 	return;
1000     case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1001 	CHECK_FPU_ENABLED
1002         if (ctx->tbflags & FPSCR_SZ) {
1003             int xsrc = XHACK(B7_4);
1004             int xdst = XHACK(B11_8);
1005             tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
1006             tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
1007 	} else {
1008             tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
1009 	}
1010 	return;
1011     case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1012 	CHECK_FPU_ENABLED
1013         if (ctx->tbflags & FPSCR_SZ) {
1014             TCGv_i64 fp = tcg_temp_new_i64();
1015             gen_load_fpr64(ctx, fp, XHACK(B7_4));
1016             tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ);
1017             tcg_temp_free_i64(fp);
1018 	} else {
1019             tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1020 	}
1021 	return;
1022     case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1023 	CHECK_FPU_ENABLED
1024         if (ctx->tbflags & FPSCR_SZ) {
1025             TCGv_i64 fp = tcg_temp_new_i64();
1026             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
1027             gen_store_fpr64(ctx, fp, XHACK(B11_8));
1028             tcg_temp_free_i64(fp);
1029 	} else {
1030             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1031 	}
1032 	return;
1033     case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1034 	CHECK_FPU_ENABLED
1035         if (ctx->tbflags & FPSCR_SZ) {
1036             TCGv_i64 fp = tcg_temp_new_i64();
1037             tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
1038             gen_store_fpr64(ctx, fp, XHACK(B11_8));
1039             tcg_temp_free_i64(fp);
1040             tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1041 	} else {
1042             tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1043 	    tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1044 	}
1045 	return;
1046     case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1047 	CHECK_FPU_ENABLED
1048         {
1049             TCGv addr = tcg_temp_new_i32();
1050             if (ctx->tbflags & FPSCR_SZ) {
1051                 TCGv_i64 fp = tcg_temp_new_i64();
1052                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1053                 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1054                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
1055                 tcg_temp_free_i64(fp);
1056             } else {
1057                 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1058                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1059             }
1060             tcg_gen_mov_i32(REG(B11_8), addr);
1061             tcg_temp_free(addr);
1062         }
1063 	return;
1064     case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1065 	CHECK_FPU_ENABLED
1066 	{
1067 	    TCGv addr = tcg_temp_new_i32();
1068 	    tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1069             if (ctx->tbflags & FPSCR_SZ) {
1070                 TCGv_i64 fp = tcg_temp_new_i64();
1071                 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ);
1072                 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1073                 tcg_temp_free_i64(fp);
1074 	    } else {
1075                 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1076 	    }
1077 	    tcg_temp_free(addr);
1078 	}
1079 	return;
1080     case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1081 	CHECK_FPU_ENABLED
1082 	{
1083 	    TCGv addr = tcg_temp_new();
1084 	    tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1085             if (ctx->tbflags & FPSCR_SZ) {
1086                 TCGv_i64 fp = tcg_temp_new_i64();
1087                 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1088                 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
1089                 tcg_temp_free_i64(fp);
1090 	    } else {
1091                 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1092 	    }
1093 	    tcg_temp_free(addr);
1094 	}
1095 	return;
1096     case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1097     case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1098     case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1099     case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1100     case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1101     case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1102 	{
1103 	    CHECK_FPU_ENABLED
1104             if (ctx->tbflags & FPSCR_PR) {
1105                 TCGv_i64 fp0, fp1;
1106 
1107                 if (ctx->opcode & 0x0110) {
1108                     goto do_illegal;
1109                 }
1110 		fp0 = tcg_temp_new_i64();
1111 		fp1 = tcg_temp_new_i64();
1112                 gen_load_fpr64(ctx, fp0, B11_8);
1113                 gen_load_fpr64(ctx, fp1, B7_4);
1114                 switch (ctx->opcode & 0xf00f) {
1115                 case 0xf000:		/* fadd Rm,Rn */
1116                     gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1117                     break;
1118                 case 0xf001:		/* fsub Rm,Rn */
1119                     gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1120                     break;
1121                 case 0xf002:		/* fmul Rm,Rn */
1122                     gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1123                     break;
1124                 case 0xf003:		/* fdiv Rm,Rn */
1125                     gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1126                     break;
1127                 case 0xf004:		/* fcmp/eq Rm,Rn */
1128                     gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1129                     return;
1130                 case 0xf005:		/* fcmp/gt Rm,Rn */
1131                     gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1132                     return;
1133                 }
1134                 gen_store_fpr64(ctx, fp0, B11_8);
1135                 tcg_temp_free_i64(fp0);
1136                 tcg_temp_free_i64(fp1);
1137 	    } else {
1138                 switch (ctx->opcode & 0xf00f) {
1139                 case 0xf000:		/* fadd Rm,Rn */
1140                     gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1141                                        FREG(B11_8), FREG(B7_4));
1142                     break;
1143                 case 0xf001:		/* fsub Rm,Rn */
1144                     gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1145                                        FREG(B11_8), FREG(B7_4));
1146                     break;
1147                 case 0xf002:		/* fmul Rm,Rn */
1148                     gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1149                                        FREG(B11_8), FREG(B7_4));
1150                     break;
1151                 case 0xf003:		/* fdiv Rm,Rn */
1152                     gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1153                                        FREG(B11_8), FREG(B7_4));
1154                     break;
1155                 case 0xf004:		/* fcmp/eq Rm,Rn */
1156                     gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1157                                           FREG(B11_8), FREG(B7_4));
1158                     return;
1159                 case 0xf005:		/* fcmp/gt Rm,Rn */
1160                     gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1161                                           FREG(B11_8), FREG(B7_4));
1162                     return;
1163                 }
1164 	    }
1165 	}
1166 	return;
1167     case 0xf00e: /* fmac FR0,RM,Rn */
1168         CHECK_FPU_ENABLED
1169         CHECK_FPSCR_PR_0
1170         gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1171                            FREG(0), FREG(B7_4), FREG(B11_8));
1172         return;
1173     }
1174 
1175     switch (ctx->opcode & 0xff00) {
1176     case 0xc900:		/* and #imm,R0 */
1177 	tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1178 	return;
1179     case 0xcd00:		/* and.b #imm,@(R0,GBR) */
1180 	{
1181 	    TCGv addr, val;
1182 	    addr = tcg_temp_new();
1183 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1184 	    val = tcg_temp_new();
1185             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1186 	    tcg_gen_andi_i32(val, val, B7_0);
1187             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1188 	    tcg_temp_free(val);
1189 	    tcg_temp_free(addr);
1190 	}
1191 	return;
1192     case 0x8b00:		/* bf label */
1193 	CHECK_NOT_DELAY_SLOT
1194         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1195 	return;
1196     case 0x8f00:		/* bf/s label */
1197 	CHECK_NOT_DELAY_SLOT
1198         tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1199         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1200         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1201 	return;
1202     case 0x8900:		/* bt label */
1203 	CHECK_NOT_DELAY_SLOT
1204         gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1205 	return;
1206     case 0x8d00:		/* bt/s label */
1207 	CHECK_NOT_DELAY_SLOT
1208         tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1209         ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1210         ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
1211 	return;
1212     case 0x8800:		/* cmp/eq #imm,R0 */
1213         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1214 	return;
1215     case 0xc400:		/* mov.b @(disp,GBR),R0 */
1216 	{
1217 	    TCGv addr = tcg_temp_new();
1218 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1219             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1220 	    tcg_temp_free(addr);
1221 	}
1222 	return;
1223     case 0xc500:		/* mov.w @(disp,GBR),R0 */
1224 	{
1225 	    TCGv addr = tcg_temp_new();
1226 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1227             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1228 	    tcg_temp_free(addr);
1229 	}
1230 	return;
1231     case 0xc600:		/* mov.l @(disp,GBR),R0 */
1232 	{
1233 	    TCGv addr = tcg_temp_new();
1234 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1235             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1236 	    tcg_temp_free(addr);
1237 	}
1238 	return;
1239     case 0xc000:		/* mov.b R0,@(disp,GBR) */
1240 	{
1241 	    TCGv addr = tcg_temp_new();
1242 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1243             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1244 	    tcg_temp_free(addr);
1245 	}
1246 	return;
1247     case 0xc100:		/* mov.w R0,@(disp,GBR) */
1248 	{
1249 	    TCGv addr = tcg_temp_new();
1250 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1251             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1252 	    tcg_temp_free(addr);
1253 	}
1254 	return;
1255     case 0xc200:		/* mov.l R0,@(disp,GBR) */
1256 	{
1257 	    TCGv addr = tcg_temp_new();
1258 	    tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1259             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1260 	    tcg_temp_free(addr);
1261 	}
1262 	return;
1263     case 0x8000:		/* mov.b R0,@(disp,Rn) */
1264 	{
1265 	    TCGv addr = tcg_temp_new();
1266 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1267             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1268 	    tcg_temp_free(addr);
1269 	}
1270 	return;
1271     case 0x8100:		/* mov.w R0,@(disp,Rn) */
1272 	{
1273 	    TCGv addr = tcg_temp_new();
1274 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1275             tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1276                                 MO_TEUW | UNALIGN(ctx));
1277 	    tcg_temp_free(addr);
1278 	}
1279 	return;
1280     case 0x8400:		/* mov.b @(disp,Rn),R0 */
1281 	{
1282 	    TCGv addr = tcg_temp_new();
1283 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1284             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1285 	    tcg_temp_free(addr);
1286 	}
1287 	return;
1288     case 0x8500:		/* mov.w @(disp,Rn),R0 */
1289 	{
1290 	    TCGv addr = tcg_temp_new();
1291 	    tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1292             tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1293                                 MO_TESW | UNALIGN(ctx));
1294 	    tcg_temp_free(addr);
1295 	}
1296 	return;
1297     case 0xc700:		/* mova @(disp,PC),R0 */
1298         tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1299                                   4 + B7_0 * 4) & ~3);
1300 	return;
1301     case 0xcb00:		/* or #imm,R0 */
1302 	tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1303 	return;
1304     case 0xcf00:		/* or.b #imm,@(R0,GBR) */
1305 	{
1306 	    TCGv addr, val;
1307 	    addr = tcg_temp_new();
1308 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1309 	    val = tcg_temp_new();
1310             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1311 	    tcg_gen_ori_i32(val, val, B7_0);
1312             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1313 	    tcg_temp_free(val);
1314 	    tcg_temp_free(addr);
1315 	}
1316 	return;
1317     case 0xc300:		/* trapa #imm */
1318 	{
1319 	    TCGv imm;
1320 	    CHECK_NOT_DELAY_SLOT
1321             gen_save_cpu_state(ctx, true);
1322 	    imm = tcg_const_i32(B7_0);
1323             gen_helper_trapa(cpu_env, imm);
1324 	    tcg_temp_free(imm);
1325             ctx->base.is_jmp = DISAS_NORETURN;
1326 	}
1327 	return;
1328     case 0xc800:		/* tst #imm,R0 */
1329 	{
1330 	    TCGv val = tcg_temp_new();
1331 	    tcg_gen_andi_i32(val, REG(0), B7_0);
1332             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1333 	    tcg_temp_free(val);
1334 	}
1335 	return;
1336     case 0xcc00:		/* tst.b #imm,@(R0,GBR) */
1337 	{
1338 	    TCGv val = tcg_temp_new();
1339 	    tcg_gen_add_i32(val, REG(0), cpu_gbr);
1340             tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1341 	    tcg_gen_andi_i32(val, val, B7_0);
1342             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1343 	    tcg_temp_free(val);
1344 	}
1345 	return;
1346     case 0xca00:		/* xor #imm,R0 */
1347 	tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1348 	return;
1349     case 0xce00:		/* xor.b #imm,@(R0,GBR) */
1350 	{
1351 	    TCGv addr, val;
1352 	    addr = tcg_temp_new();
1353 	    tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1354 	    val = tcg_temp_new();
1355             tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1356 	    tcg_gen_xori_i32(val, val, B7_0);
1357             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1358 	    tcg_temp_free(val);
1359 	    tcg_temp_free(addr);
1360 	}
1361 	return;
1362     }
1363 
1364     switch (ctx->opcode & 0xf08f) {
1365     case 0x408e:		/* ldc Rm,Rn_BANK */
1366 	CHECK_PRIVILEGED
1367 	tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1368 	return;
1369     case 0x4087:		/* ldc.l @Rm+,Rn_BANK */
1370 	CHECK_PRIVILEGED
1371         tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1372 	tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1373 	return;
1374     case 0x0082:		/* stc Rm_BANK,Rn */
1375 	CHECK_PRIVILEGED
1376 	tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1377 	return;
1378     case 0x4083:		/* stc.l Rm_BANK,@-Rn */
1379 	CHECK_PRIVILEGED
1380 	{
1381 	    TCGv addr = tcg_temp_new();
1382 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1383             tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1384 	    tcg_gen_mov_i32(REG(B11_8), addr);
1385 	    tcg_temp_free(addr);
1386 	}
1387 	return;
1388     }
1389 
1390     switch (ctx->opcode & 0xf0ff) {
1391     case 0x0023:		/* braf Rn */
1392 	CHECK_NOT_DELAY_SLOT
1393         tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1394         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1395 	ctx->delayed_pc = (uint32_t) - 1;
1396 	return;
1397     case 0x0003:		/* bsrf Rn */
1398 	CHECK_NOT_DELAY_SLOT
1399         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1400 	tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1401         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1402 	ctx->delayed_pc = (uint32_t) - 1;
1403 	return;
1404     case 0x4015:		/* cmp/pl Rn */
1405         tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1406 	return;
1407     case 0x4011:		/* cmp/pz Rn */
1408         tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1409 	return;
1410     case 0x4010:		/* dt Rn */
1411 	tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1412         tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1413 	return;
1414     case 0x402b:		/* jmp @Rn */
1415 	CHECK_NOT_DELAY_SLOT
1416 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1417         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1418 	ctx->delayed_pc = (uint32_t) - 1;
1419 	return;
1420     case 0x400b:		/* jsr @Rn */
1421 	CHECK_NOT_DELAY_SLOT
1422         tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1423 	tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1424         ctx->envflags |= TB_FLAG_DELAY_SLOT;
1425 	ctx->delayed_pc = (uint32_t) - 1;
1426 	return;
1427     case 0x400e:		/* ldc Rm,SR */
1428 	CHECK_PRIVILEGED
1429         {
1430             TCGv val = tcg_temp_new();
1431             tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1432             gen_write_sr(val);
1433             tcg_temp_free(val);
1434             ctx->base.is_jmp = DISAS_STOP;
1435         }
1436 	return;
1437     case 0x4007:		/* ldc.l @Rm+,SR */
1438 	CHECK_PRIVILEGED
1439 	{
1440 	    TCGv val = tcg_temp_new();
1441             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1442             tcg_gen_andi_i32(val, val, 0x700083f3);
1443             gen_write_sr(val);
1444 	    tcg_temp_free(val);
1445 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1446             ctx->base.is_jmp = DISAS_STOP;
1447 	}
1448 	return;
1449     case 0x0002:		/* stc SR,Rn */
1450 	CHECK_PRIVILEGED
1451         gen_read_sr(REG(B11_8));
1452 	return;
1453     case 0x4003:		/* stc SR,@-Rn */
1454 	CHECK_PRIVILEGED
1455 	{
1456 	    TCGv addr = tcg_temp_new();
1457             TCGv val = tcg_temp_new();
1458 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1459             gen_read_sr(val);
1460             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1461 	    tcg_gen_mov_i32(REG(B11_8), addr);
1462             tcg_temp_free(val);
1463 	    tcg_temp_free(addr);
1464 	}
1465 	return;
1466 #define LD(reg,ldnum,ldpnum,prechk)		\
1467   case ldnum:							\
1468     prechk    							\
1469     tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));			\
1470     return;							\
1471   case ldpnum:							\
1472     prechk    							\
1473     tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1474     tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);		\
1475     return;
1476 #define ST(reg,stnum,stpnum,prechk)		\
1477   case stnum:							\
1478     prechk    							\
1479     tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);			\
1480     return;							\
1481   case stpnum:							\
1482     prechk    							\
1483     {								\
1484 	TCGv addr = tcg_temp_new();				\
1485 	tcg_gen_subi_i32(addr, REG(B11_8), 4);			\
1486         tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1487 	tcg_gen_mov_i32(REG(B11_8), addr);			\
1488 	tcg_temp_free(addr);					\
1489     }								\
1490     return;
1491 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)		\
1492 	LD(reg,ldnum,ldpnum,prechk)				\
1493 	ST(reg,stnum,stpnum,prechk)
1494 	LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1495 	LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1496 	LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1497 	LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1498 	ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1499         LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1500 	LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1501 	LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1502 	LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1503 	LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1504 	LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1505     case 0x406a:		/* lds Rm,FPSCR */
1506 	CHECK_FPU_ENABLED
1507         gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1508         ctx->base.is_jmp = DISAS_STOP;
1509 	return;
1510     case 0x4066:		/* lds.l @Rm+,FPSCR */
1511 	CHECK_FPU_ENABLED
1512 	{
1513 	    TCGv addr = tcg_temp_new();
1514             tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1515 	    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1516             gen_helper_ld_fpscr(cpu_env, addr);
1517 	    tcg_temp_free(addr);
1518             ctx->base.is_jmp = DISAS_STOP;
1519 	}
1520 	return;
1521     case 0x006a:		/* sts FPSCR,Rn */
1522 	CHECK_FPU_ENABLED
1523 	tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1524 	return;
1525     case 0x4062:		/* sts FPSCR,@-Rn */
1526 	CHECK_FPU_ENABLED
1527 	{
1528 	    TCGv addr, val;
1529 	    val = tcg_temp_new();
1530 	    tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1531 	    addr = tcg_temp_new();
1532 	    tcg_gen_subi_i32(addr, REG(B11_8), 4);
1533             tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1534 	    tcg_gen_mov_i32(REG(B11_8), addr);
1535 	    tcg_temp_free(addr);
1536 	    tcg_temp_free(val);
1537 	}
1538 	return;
1539     case 0x00c3:		/* movca.l R0,@Rm */
1540         {
1541             TCGv val = tcg_temp_new();
1542             tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1543             gen_helper_movcal(cpu_env, REG(B11_8), val);
1544             tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1545             tcg_temp_free(val);
1546         }
1547         ctx->has_movcal = 1;
1548 	return;
1549     case 0x40a9:                /* movua.l @Rm,R0 */
1550         CHECK_SH4A
1551         /* Load non-boundary-aligned data */
1552         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1553                             MO_TEUL | MO_UNALN);
1554         return;
1555     case 0x40e9:                /* movua.l @Rm+,R0 */
1556         CHECK_SH4A
1557         /* Load non-boundary-aligned data */
1558         tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1559                             MO_TEUL | MO_UNALN);
1560         tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1561         return;
1562     case 0x0029:		/* movt Rn */
1563         tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1564 	return;
1565     case 0x0073:
1566         /* MOVCO.L
1567          *     LDST -> T
1568          *     If (T == 1) R0 -> (Rn)
1569          *     0 -> LDST
1570          *
1571          * The above description doesn't work in a parallel context.
1572          * Since we currently support no smp boards, this implies user-mode.
1573          * But we can still support the official mechanism while user-mode
1574          * is single-threaded.  */
1575         CHECK_SH4A
1576         {
1577             TCGLabel *fail = gen_new_label();
1578             TCGLabel *done = gen_new_label();
1579 
1580             if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1581                 TCGv tmp;
1582 
1583                 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1584                                    cpu_lock_addr, fail);
1585                 tmp = tcg_temp_new();
1586                 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1587                                            REG(0), ctx->memidx, MO_TEUL);
1588                 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1589                 tcg_temp_free(tmp);
1590             } else {
1591                 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1592                 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1593                 tcg_gen_movi_i32(cpu_sr_t, 1);
1594             }
1595             tcg_gen_br(done);
1596 
1597             gen_set_label(fail);
1598             tcg_gen_movi_i32(cpu_sr_t, 0);
1599 
1600             gen_set_label(done);
1601             tcg_gen_movi_i32(cpu_lock_addr, -1);
1602         }
1603         return;
1604     case 0x0063:
1605         /* MOVLI.L @Rm,R0
1606          *     1 -> LDST
1607          *     (Rm) -> R0
1608          *     When interrupt/exception
1609          *     occurred 0 -> LDST
1610          *
1611          * In a parallel context, we must also save the loaded value
1612          * for use with the cmpxchg that we'll use with movco.l.  */
1613         CHECK_SH4A
1614         if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1615             TCGv tmp = tcg_temp_new();
1616             tcg_gen_mov_i32(tmp, REG(B11_8));
1617             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1618             tcg_gen_mov_i32(cpu_lock_value, REG(0));
1619             tcg_gen_mov_i32(cpu_lock_addr, tmp);
1620             tcg_temp_free(tmp);
1621         } else {
1622             tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1623             tcg_gen_movi_i32(cpu_lock_addr, 0);
1624         }
1625         return;
1626     case 0x0093:		/* ocbi @Rn */
1627 	{
1628             gen_helper_ocbi(cpu_env, REG(B11_8));
1629 	}
1630 	return;
1631     case 0x00a3:		/* ocbp @Rn */
1632     case 0x00b3:		/* ocbwb @Rn */
1633         /* These instructions are supposed to do nothing in case of
1634            a cache miss. Given that we only partially emulate caches
1635            it is safe to simply ignore them. */
1636 	return;
1637     case 0x0083:		/* pref @Rn */
1638 	return;
1639     case 0x00d3:		/* prefi @Rn */
1640         CHECK_SH4A
1641         return;
1642     case 0x00e3:		/* icbi @Rn */
1643         CHECK_SH4A
1644         return;
1645     case 0x00ab:		/* synco */
1646         CHECK_SH4A
1647         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1648         return;
1649     case 0x4024:		/* rotcl Rn */
1650 	{
1651 	    TCGv tmp = tcg_temp_new();
1652             tcg_gen_mov_i32(tmp, cpu_sr_t);
1653             tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1654 	    tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1655             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1656 	    tcg_temp_free(tmp);
1657 	}
1658 	return;
1659     case 0x4025:		/* rotcr Rn */
1660 	{
1661 	    TCGv tmp = tcg_temp_new();
1662             tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1663             tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1664 	    tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1665             tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1666 	    tcg_temp_free(tmp);
1667 	}
1668 	return;
1669     case 0x4004:		/* rotl Rn */
1670 	tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1671         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1672 	return;
1673     case 0x4005:		/* rotr Rn */
1674         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1675 	tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1676 	return;
1677     case 0x4000:		/* shll Rn */
1678     case 0x4020:		/* shal Rn */
1679         tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1680 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1681 	return;
1682     case 0x4021:		/* shar Rn */
1683         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1684 	tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1685 	return;
1686     case 0x4001:		/* shlr Rn */
1687         tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1688 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1689 	return;
1690     case 0x4008:		/* shll2 Rn */
1691 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1692 	return;
1693     case 0x4018:		/* shll8 Rn */
1694 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1695 	return;
1696     case 0x4028:		/* shll16 Rn */
1697 	tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1698 	return;
1699     case 0x4009:		/* shlr2 Rn */
1700 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1701 	return;
1702     case 0x4019:		/* shlr8 Rn */
1703 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1704 	return;
1705     case 0x4029:		/* shlr16 Rn */
1706 	tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1707 	return;
1708     case 0x401b:		/* tas.b @Rn */
1709         {
1710             TCGv val = tcg_const_i32(0x80);
1711             tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1712                                         ctx->memidx, MO_UB);
1713             tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1714             tcg_temp_free(val);
1715         }
1716         return;
1717     case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1718 	CHECK_FPU_ENABLED
1719         tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1720 	return;
1721     case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1722 	CHECK_FPU_ENABLED
1723         tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1724 	return;
1725     case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1726 	CHECK_FPU_ENABLED
1727         if (ctx->tbflags & FPSCR_PR) {
1728 	    TCGv_i64 fp;
1729             if (ctx->opcode & 0x0100) {
1730                 goto do_illegal;
1731             }
1732 	    fp = tcg_temp_new_i64();
1733             gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1734             gen_store_fpr64(ctx, fp, B11_8);
1735 	    tcg_temp_free_i64(fp);
1736 	}
1737 	else {
1738             gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1739 	}
1740 	return;
1741     case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1742 	CHECK_FPU_ENABLED
1743         if (ctx->tbflags & FPSCR_PR) {
1744 	    TCGv_i64 fp;
1745             if (ctx->opcode & 0x0100) {
1746                 goto do_illegal;
1747             }
1748 	    fp = tcg_temp_new_i64();
1749             gen_load_fpr64(ctx, fp, B11_8);
1750             gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1751 	    tcg_temp_free_i64(fp);
1752 	}
1753 	else {
1754             gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1755 	}
1756 	return;
1757     case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1758 	CHECK_FPU_ENABLED
1759         tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1760 	return;
1761     case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1762 	CHECK_FPU_ENABLED
1763         tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1764 	return;
1765     case 0xf06d: /* fsqrt FRn */
1766 	CHECK_FPU_ENABLED
1767         if (ctx->tbflags & FPSCR_PR) {
1768             if (ctx->opcode & 0x0100) {
1769                 goto do_illegal;
1770             }
1771 	    TCGv_i64 fp = tcg_temp_new_i64();
1772             gen_load_fpr64(ctx, fp, B11_8);
1773             gen_helper_fsqrt_DT(fp, cpu_env, fp);
1774             gen_store_fpr64(ctx, fp, B11_8);
1775 	    tcg_temp_free_i64(fp);
1776 	} else {
1777             gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1778 	}
1779 	return;
1780     case 0xf07d: /* fsrra FRn */
1781 	CHECK_FPU_ENABLED
1782         CHECK_FPSCR_PR_0
1783         gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1784 	break;
1785     case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1786 	CHECK_FPU_ENABLED
1787         CHECK_FPSCR_PR_0
1788         tcg_gen_movi_i32(FREG(B11_8), 0);
1789         return;
1790     case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1791 	CHECK_FPU_ENABLED
1792         CHECK_FPSCR_PR_0
1793         tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1794         return;
1795     case 0xf0ad: /* fcnvsd FPUL,DRn */
1796 	CHECK_FPU_ENABLED
1797 	{
1798 	    TCGv_i64 fp = tcg_temp_new_i64();
1799             gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1800             gen_store_fpr64(ctx, fp, B11_8);
1801 	    tcg_temp_free_i64(fp);
1802 	}
1803 	return;
1804     case 0xf0bd: /* fcnvds DRn,FPUL */
1805 	CHECK_FPU_ENABLED
1806 	{
1807 	    TCGv_i64 fp = tcg_temp_new_i64();
1808             gen_load_fpr64(ctx, fp, B11_8);
1809             gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1810 	    tcg_temp_free_i64(fp);
1811 	}
1812 	return;
1813     case 0xf0ed: /* fipr FVm,FVn */
1814         CHECK_FPU_ENABLED
1815         CHECK_FPSCR_PR_1
1816         {
1817             TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1818             TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1819             gen_helper_fipr(cpu_env, m, n);
1820             tcg_temp_free(m);
1821             tcg_temp_free(n);
1822             return;
1823         }
1824         break;
1825     case 0xf0fd: /* ftrv XMTRX,FVn */
1826         CHECK_FPU_ENABLED
1827         CHECK_FPSCR_PR_1
1828         {
1829             if ((ctx->opcode & 0x0300) != 0x0100) {
1830                 goto do_illegal;
1831             }
1832             TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1833             gen_helper_ftrv(cpu_env, n);
1834             tcg_temp_free(n);
1835             return;
1836         }
1837         break;
1838     }
1839 #if 0
1840     fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1841             ctx->opcode, ctx->base.pc_next);
1842     fflush(stderr);
1843 #endif
1844  do_illegal:
1845     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1846  do_illegal_slot:
1847         gen_save_cpu_state(ctx, true);
1848         gen_helper_raise_slot_illegal_instruction(cpu_env);
1849     } else {
1850         gen_save_cpu_state(ctx, true);
1851         gen_helper_raise_illegal_instruction(cpu_env);
1852     }
1853     ctx->base.is_jmp = DISAS_NORETURN;
1854     return;
1855 
1856  do_fpu_disabled:
1857     gen_save_cpu_state(ctx, true);
1858     if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
1859         gen_helper_raise_slot_fpu_disable(cpu_env);
1860     } else {
1861         gen_helper_raise_fpu_disable(cpu_env);
1862     }
1863     ctx->base.is_jmp = DISAS_NORETURN;
1864     return;
1865 }
1866 
1867 static void decode_opc(DisasContext * ctx)
1868 {
1869     uint32_t old_flags = ctx->envflags;
1870 
1871     _decode_opc(ctx);
1872 
1873     if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
1874         /* go out of the delay slot */
1875         ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
1876 
1877         /* When in an exclusive region, we must continue to the end
1878            for conditional branches.  */
1879         if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1880             && old_flags & TB_FLAG_DELAY_SLOT_COND) {
1881             gen_delayed_conditional_jump(ctx);
1882             return;
1883         }
1884         /* Otherwise this is probably an invalid gUSA region.
1885            Drop the GUSA bits so the next TB doesn't see them.  */
1886         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
1887 
1888         tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1889         if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
1890 	    gen_delayed_conditional_jump(ctx);
1891         } else {
1892             gen_jump(ctx);
1893 	}
1894     }
1895 }
1896 
1897 #ifdef CONFIG_USER_ONLY
1898 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1899    Upon an interrupt, a real kernel would simply notice magic values in
1900    the registers and reset the PC to the start of the sequence.
1901 
1902    For QEMU, we cannot do this in quite the same way.  Instead, we notice
1903    the normal start of such a sequence (mov #-x,r15).  While we can handle
1904    any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1905    sequences and transform them into atomic operations as seen by the host.
1906 */
1907 static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1908 {
1909     uint16_t insns[5];
1910     int ld_adr, ld_dst, ld_mop;
1911     int op_dst, op_src, op_opc;
1912     int mv_src, mt_dst, st_src, st_mop;
1913     TCGv op_arg;
1914     uint32_t pc = ctx->base.pc_next;
1915     uint32_t pc_end = ctx->base.tb->cs_base;
1916     int max_insns = (pc_end - pc) / 2;
1917     int i;
1918 
1919     /* The state machine below will consume only a few insns.
1920        If there are more than that in a region, fail now.  */
1921     if (max_insns > ARRAY_SIZE(insns)) {
1922         goto fail;
1923     }
1924 
1925     /* Read all of the insns for the region.  */
1926     for (i = 0; i < max_insns; ++i) {
1927         insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
1928     }
1929 
1930     ld_adr = ld_dst = ld_mop = -1;
1931     mv_src = -1;
1932     op_dst = op_src = op_opc = -1;
1933     mt_dst = -1;
1934     st_src = st_mop = -1;
1935     op_arg = NULL;
1936     i = 0;
1937 
1938 #define NEXT_INSN \
1939     do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1940 
1941     /*
1942      * Expect a load to begin the region.
1943      */
1944     NEXT_INSN;
1945     switch (ctx->opcode & 0xf00f) {
1946     case 0x6000: /* mov.b @Rm,Rn */
1947         ld_mop = MO_SB;
1948         break;
1949     case 0x6001: /* mov.w @Rm,Rn */
1950         ld_mop = MO_TESW;
1951         break;
1952     case 0x6002: /* mov.l @Rm,Rn */
1953         ld_mop = MO_TESL;
1954         break;
1955     default:
1956         goto fail;
1957     }
1958     ld_adr = B7_4;
1959     ld_dst = B11_8;
1960     if (ld_adr == ld_dst) {
1961         goto fail;
1962     }
1963     /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1964     op_dst = ld_dst;
1965 
1966     /*
1967      * Expect an optional register move.
1968      */
1969     NEXT_INSN;
1970     switch (ctx->opcode & 0xf00f) {
1971     case 0x6003: /* mov Rm,Rn */
1972         /*
1973          * Here we want to recognize ld_dst being saved for later consumption,
1974          * or for another input register being copied so that ld_dst need not
1975          * be clobbered during the operation.
1976          */
1977         op_dst = B11_8;
1978         mv_src = B7_4;
1979         if (op_dst == ld_dst) {
1980             /* Overwriting the load output.  */
1981             goto fail;
1982         }
1983         if (mv_src != ld_dst) {
1984             /* Copying a new input; constrain op_src to match the load.  */
1985             op_src = ld_dst;
1986         }
1987         break;
1988 
1989     default:
1990         /* Put back and re-examine as operation.  */
1991         --i;
1992     }
1993 
1994     /*
1995      * Expect the operation.
1996      */
1997     NEXT_INSN;
1998     switch (ctx->opcode & 0xf00f) {
1999     case 0x300c: /* add Rm,Rn */
2000         op_opc = INDEX_op_add_i32;
2001         goto do_reg_op;
2002     case 0x2009: /* and Rm,Rn */
2003         op_opc = INDEX_op_and_i32;
2004         goto do_reg_op;
2005     case 0x200a: /* xor Rm,Rn */
2006         op_opc = INDEX_op_xor_i32;
2007         goto do_reg_op;
2008     case 0x200b: /* or Rm,Rn */
2009         op_opc = INDEX_op_or_i32;
2010     do_reg_op:
2011         /* The operation register should be as expected, and the
2012            other input cannot depend on the load.  */
2013         if (op_dst != B11_8) {
2014             goto fail;
2015         }
2016         if (op_src < 0) {
2017             /* Unconstrainted input.  */
2018             op_src = B7_4;
2019         } else if (op_src == B7_4) {
2020             /* Constrained input matched load.  All operations are
2021                commutative; "swap" them by "moving" the load output
2022                to the (implicit) first argument and the move source
2023                to the (explicit) second argument.  */
2024             op_src = mv_src;
2025         } else {
2026             goto fail;
2027         }
2028         op_arg = REG(op_src);
2029         break;
2030 
2031     case 0x6007: /* not Rm,Rn */
2032         if (ld_dst != B7_4 || mv_src >= 0) {
2033             goto fail;
2034         }
2035         op_dst = B11_8;
2036         op_opc = INDEX_op_xor_i32;
2037         op_arg = tcg_const_i32(-1);
2038         break;
2039 
2040     case 0x7000 ... 0x700f: /* add #imm,Rn */
2041         if (op_dst != B11_8 || mv_src >= 0) {
2042             goto fail;
2043         }
2044         op_opc = INDEX_op_add_i32;
2045         op_arg = tcg_const_i32(B7_0s);
2046         break;
2047 
2048     case 0x3000: /* cmp/eq Rm,Rn */
2049         /* Looking for the middle of a compare-and-swap sequence,
2050            beginning with the compare.  Operands can be either order,
2051            but with only one overlapping the load.  */
2052         if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2053             goto fail;
2054         }
2055         op_opc = INDEX_op_setcond_i32;  /* placeholder */
2056         op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2057         op_arg = REG(op_src);
2058 
2059         NEXT_INSN;
2060         switch (ctx->opcode & 0xff00) {
2061         case 0x8b00: /* bf label */
2062         case 0x8f00: /* bf/s label */
2063             if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2064                 goto fail;
2065             }
2066             if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2067                 break;
2068             }
2069             /* We're looking to unconditionally modify Rn with the
2070                result of the comparison, within the delay slot of
2071                the branch.  This is used by older gcc.  */
2072             NEXT_INSN;
2073             if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2074                 mt_dst = B11_8;
2075             } else {
2076                 goto fail;
2077             }
2078             break;
2079 
2080         default:
2081             goto fail;
2082         }
2083         break;
2084 
2085     case 0x2008: /* tst Rm,Rn */
2086         /* Looking for a compare-and-swap against zero.  */
2087         if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2088             goto fail;
2089         }
2090         op_opc = INDEX_op_setcond_i32;
2091         op_arg = tcg_const_i32(0);
2092 
2093         NEXT_INSN;
2094         if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2095             || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2096             goto fail;
2097         }
2098         break;
2099 
2100     default:
2101         /* Put back and re-examine as store.  */
2102         --i;
2103     }
2104 
2105     /*
2106      * Expect the store.
2107      */
2108     /* The store must be the last insn.  */
2109     if (i != max_insns - 1) {
2110         goto fail;
2111     }
2112     NEXT_INSN;
2113     switch (ctx->opcode & 0xf00f) {
2114     case 0x2000: /* mov.b Rm,@Rn */
2115         st_mop = MO_UB;
2116         break;
2117     case 0x2001: /* mov.w Rm,@Rn */
2118         st_mop = MO_UW;
2119         break;
2120     case 0x2002: /* mov.l Rm,@Rn */
2121         st_mop = MO_UL;
2122         break;
2123     default:
2124         goto fail;
2125     }
2126     /* The store must match the load.  */
2127     if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2128         goto fail;
2129     }
2130     st_src = B7_4;
2131 
2132 #undef NEXT_INSN
2133 
2134     /*
2135      * Emit the operation.
2136      */
2137     switch (op_opc) {
2138     case -1:
2139         /* No operation found.  Look for exchange pattern.  */
2140         if (st_src == ld_dst || mv_src >= 0) {
2141             goto fail;
2142         }
2143         tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2144                                 ctx->memidx, ld_mop);
2145         break;
2146 
2147     case INDEX_op_add_i32:
2148         if (op_dst != st_src) {
2149             goto fail;
2150         }
2151         if (op_dst == ld_dst && st_mop == MO_UL) {
2152             tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2153                                          op_arg, ctx->memidx, ld_mop);
2154         } else {
2155             tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2156                                          op_arg, ctx->memidx, ld_mop);
2157             if (op_dst != ld_dst) {
2158                 /* Note that mop sizes < 4 cannot use add_fetch
2159                    because it won't carry into the higher bits.  */
2160                 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2161             }
2162         }
2163         break;
2164 
2165     case INDEX_op_and_i32:
2166         if (op_dst != st_src) {
2167             goto fail;
2168         }
2169         if (op_dst == ld_dst) {
2170             tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2171                                          op_arg, ctx->memidx, ld_mop);
2172         } else {
2173             tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2174                                          op_arg, ctx->memidx, ld_mop);
2175             tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2176         }
2177         break;
2178 
2179     case INDEX_op_or_i32:
2180         if (op_dst != st_src) {
2181             goto fail;
2182         }
2183         if (op_dst == ld_dst) {
2184             tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2185                                         op_arg, ctx->memidx, ld_mop);
2186         } else {
2187             tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2188                                         op_arg, ctx->memidx, ld_mop);
2189             tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2190         }
2191         break;
2192 
2193     case INDEX_op_xor_i32:
2194         if (op_dst != st_src) {
2195             goto fail;
2196         }
2197         if (op_dst == ld_dst) {
2198             tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2199                                          op_arg, ctx->memidx, ld_mop);
2200         } else {
2201             tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2202                                          op_arg, ctx->memidx, ld_mop);
2203             tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2204         }
2205         break;
2206 
2207     case INDEX_op_setcond_i32:
2208         if (st_src == ld_dst) {
2209             goto fail;
2210         }
2211         tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2212                                    REG(st_src), ctx->memidx, ld_mop);
2213         tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2214         if (mt_dst >= 0) {
2215             tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2216         }
2217         break;
2218 
2219     default:
2220         g_assert_not_reached();
2221     }
2222 
2223     /* If op_src is not a valid register, then op_arg was a constant.  */
2224     if (op_src < 0 && op_arg) {
2225         tcg_temp_free_i32(op_arg);
2226     }
2227 
2228     /* The entire region has been translated.  */
2229     ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2230     ctx->base.pc_next = pc_end;
2231     ctx->base.num_insns += max_insns - 1;
2232     return;
2233 
2234  fail:
2235     qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2236                   pc, pc_end);
2237 
2238     /* Restart with the EXCLUSIVE bit set, within a TB run via
2239        cpu_exec_step_atomic holding the exclusive lock.  */
2240     ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
2241     gen_save_cpu_state(ctx, false);
2242     gen_helper_exclusive(cpu_env);
2243     ctx->base.is_jmp = DISAS_NORETURN;
2244 
2245     /* We're not executing an instruction, but we must report one for the
2246        purposes of accounting within the TB.  We might as well report the
2247        entire region consumed via ctx->base.pc_next so that it's immediately
2248        available in the disassembly dump.  */
2249     ctx->base.pc_next = pc_end;
2250     ctx->base.num_insns += max_insns - 1;
2251 }
2252 #endif
2253 
2254 static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2255 {
2256     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2257     CPUSH4State *env = cs->env_ptr;
2258     uint32_t tbflags;
2259     int bound;
2260 
2261     ctx->tbflags = tbflags = ctx->base.tb->flags;
2262     ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2263     ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2264     /* We don't know if the delayed pc came from a dynamic or static branch,
2265        so assume it is a dynamic branch.  */
2266     ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2267     ctx->features = env->features;
2268     ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2269     ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2270                   (tbflags & (1 << SR_RB))) * 0x10;
2271     ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2272 
2273 #ifdef CONFIG_USER_ONLY
2274     if (tbflags & TB_FLAG_GUSA_MASK) {
2275         /* In gUSA exclusive region. */
2276         uint32_t pc = ctx->base.pc_next;
2277         uint32_t pc_end = ctx->base.tb->cs_base;
2278         int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
2279         int max_insns = (pc_end - pc) / 2;
2280 
2281         if (pc != pc_end + backup || max_insns < 2) {
2282             /* This is a malformed gUSA region.  Don't do anything special,
2283                since the interpreter is likely to get confused.  */
2284             ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2285         } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2286             /* Regardless of single-stepping or the end of the page,
2287                we must complete execution of the gUSA region while
2288                holding the exclusive lock.  */
2289             ctx->base.max_insns = max_insns;
2290             return;
2291         }
2292     }
2293 #endif
2294 
2295     /* Since the ISA is fixed-width, we can bound by the number
2296        of instructions remaining on the page.  */
2297     bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2298     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2299 }
2300 
2301 static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2302 {
2303 }
2304 
2305 static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2306 {
2307     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2308 
2309     tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2310 }
2311 
2312 static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2313 {
2314     CPUSH4State *env = cs->env_ptr;
2315     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2316 
2317 #ifdef CONFIG_USER_ONLY
2318     if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2319         && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
2320         /* We're in an gUSA region, and we have not already fallen
2321            back on using an exclusive region.  Attempt to parse the
2322            region into a single supported atomic operation.  Failure
2323            is handled within the parser by raising an exception to
2324            retry using an exclusive region.  */
2325         decode_gusa(ctx, env);
2326         return;
2327     }
2328 #endif
2329 
2330     ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
2331     decode_opc(ctx);
2332     ctx->base.pc_next += 2;
2333 }
2334 
2335 static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2336 {
2337     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2338 
2339     if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
2340         /* Ending the region of exclusivity.  Clear the bits.  */
2341         ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2342     }
2343 
2344     switch (ctx->base.is_jmp) {
2345     case DISAS_STOP:
2346         gen_save_cpu_state(ctx, true);
2347         tcg_gen_exit_tb(NULL, 0);
2348         break;
2349     case DISAS_NEXT:
2350     case DISAS_TOO_MANY:
2351         gen_save_cpu_state(ctx, false);
2352         gen_goto_tb(ctx, 0, ctx->base.pc_next);
2353         break;
2354     case DISAS_NORETURN:
2355         break;
2356     default:
2357         g_assert_not_reached();
2358     }
2359 }
2360 
2361 static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2362                              CPUState *cs, FILE *logfile)
2363 {
2364     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2365     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2366 }
2367 
2368 static const TranslatorOps sh4_tr_ops = {
2369     .init_disas_context = sh4_tr_init_disas_context,
2370     .tb_start           = sh4_tr_tb_start,
2371     .insn_start         = sh4_tr_insn_start,
2372     .translate_insn     = sh4_tr_translate_insn,
2373     .tb_stop            = sh4_tr_tb_stop,
2374     .disas_log          = sh4_tr_disas_log,
2375 };
2376 
2377 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
2378                            target_ulong pc, void *host_pc)
2379 {
2380     DisasContext ctx;
2381 
2382     translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
2383 }
2384