xref: /openbmc/qemu/target/m68k/translate.c (revision a1264259)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 //#define DEBUG_DISPATCH 1
42 
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #include "qregs.h.inc"
46 #undef DEFO32
47 #undef DEFO64
48 
49 static TCGv_i32 cpu_halted;
50 static TCGv_i32 cpu_exception_index;
51 
52 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
53 static TCGv cpu_dregs[8];
54 static TCGv cpu_aregs[8];
55 static TCGv_i64 cpu_macc[4];
56 
57 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
58 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
59 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
60 #define MACREG(acc)     cpu_macc[acc]
61 #define QREG_SP         get_areg(s, 7)
62 
63 static TCGv NULL_QREG;
64 #define IS_NULL_QREG(t) (t == NULL_QREG)
65 /* Used to distinguish stores from bad addressing modes.  */
66 static TCGv store_dummy;
67 
68 void m68k_tcg_init(void)
69 {
70     char *p;
71     int i;
72 
73 #define DEFO32(name, offset) \
74     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78         offsetof(CPUM68KState, offset), #name);
79 #include "qregs.h.inc"
80 #undef DEFO32
81 #undef DEFO64
82 
83     cpu_halted = tcg_global_mem_new_i32(cpu_env,
84                                         -offsetof(M68kCPU, env) +
85                                         offsetof(CPUState, halted), "HALTED");
86     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87                                                  -offsetof(M68kCPU, env) +
88                                                  offsetof(CPUState, exception_index),
89                                                  "EXCEPTION");
90 
91     p = cpu_reg_names;
92     for (i = 0; i < 8; i++) {
93         sprintf(p, "D%d", i);
94         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95                                           offsetof(CPUM68KState, dregs[i]), p);
96         p += 3;
97         sprintf(p, "A%d", i);
98         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99                                           offsetof(CPUM68KState, aregs[i]), p);
100         p += 3;
101     }
102     for (i = 0; i < 4; i++) {
103         sprintf(p, "ACC%d", i);
104         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105                                          offsetof(CPUM68KState, macc[i]), p);
106         p += 5;
107     }
108 
109     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
111 }
112 
113 /* internal defines */
114 typedef struct DisasContext {
115     DisasContextBase base;
116     CPUM68KState *env;
117     target_ulong pc;
118     target_ulong pc_prev;
119     CCOp cc_op; /* Current CC operation */
120     int cc_op_synced;
121     TCGv_i64 mactmp;
122     int done_mac;
123     int writeback_mask;
124     TCGv writeback[8];
125     bool ss_active;
126 } DisasContext;
127 
128 static TCGv get_areg(DisasContext *s, unsigned regno)
129 {
130     if (s->writeback_mask & (1 << regno)) {
131         return s->writeback[regno];
132     } else {
133         return cpu_aregs[regno];
134     }
135 }
136 
137 static void delay_set_areg(DisasContext *s, unsigned regno,
138                            TCGv val, bool give_temp)
139 {
140     if (s->writeback_mask & (1 << regno)) {
141         if (give_temp) {
142             s->writeback[regno] = val;
143         } else {
144             tcg_gen_mov_i32(s->writeback[regno], val);
145         }
146     } else {
147         s->writeback_mask |= 1 << regno;
148         if (give_temp) {
149             s->writeback[regno] = val;
150         } else {
151             TCGv tmp = tcg_temp_new();
152             s->writeback[regno] = tmp;
153             tcg_gen_mov_i32(tmp, val);
154         }
155     }
156 }
157 
158 static void do_writebacks(DisasContext *s)
159 {
160     unsigned mask = s->writeback_mask;
161     if (mask) {
162         s->writeback_mask = 0;
163         do {
164             unsigned regno = ctz32(mask);
165             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
166             mask &= mask - 1;
167         } while (mask);
168     }
169 }
170 
171 /* is_jmp field values */
172 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
173 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
174 
175 #if defined(CONFIG_USER_ONLY)
176 #define IS_USER(s) 1
177 #else
178 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
179 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
180                       MMU_KERNEL_IDX : MMU_USER_IDX)
181 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
182                       MMU_KERNEL_IDX : MMU_USER_IDX)
183 #endif
184 
185 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
186 
187 #ifdef DEBUG_DISPATCH
188 #define DISAS_INSN(name)                                                \
189     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
190                                   uint16_t insn);                       \
191     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
192                              uint16_t insn)                             \
193     {                                                                   \
194         qemu_log("Dispatch " #name "\n");                               \
195         real_disas_##name(env, s, insn);                                \
196     }                                                                   \
197     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
198                                   uint16_t insn)
199 #else
200 #define DISAS_INSN(name)                                                \
201     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
202                              uint16_t insn)
203 #endif
204 
205 static const uint8_t cc_op_live[CC_OP_NB] = {
206     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
208     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
211     [CC_OP_LOGIC] = CCF_X | CCF_N
212 };
213 
214 static void set_cc_op(DisasContext *s, CCOp op)
215 {
216     CCOp old_op = s->cc_op;
217     int dead;
218 
219     if (old_op == op) {
220         return;
221     }
222     s->cc_op = op;
223     s->cc_op_synced = 0;
224 
225     /*
226      * Discard CC computation that will no longer be used.
227      * Note that X and N are never dead.
228      */
229     dead = cc_op_live[old_op] & ~cc_op_live[op];
230     if (dead & CCF_C) {
231         tcg_gen_discard_i32(QREG_CC_C);
232     }
233     if (dead & CCF_Z) {
234         tcg_gen_discard_i32(QREG_CC_Z);
235     }
236     if (dead & CCF_V) {
237         tcg_gen_discard_i32(QREG_CC_V);
238     }
239 }
240 
241 /* Update the CPU env CC_OP state.  */
242 static void update_cc_op(DisasContext *s)
243 {
244     if (!s->cc_op_synced) {
245         s->cc_op_synced = 1;
246         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
247     }
248 }
249 
250 /* Generate a jump to an immediate address.  */
251 static void gen_jmp_im(DisasContext *s, uint32_t dest)
252 {
253     update_cc_op(s);
254     tcg_gen_movi_i32(QREG_PC, dest);
255     s->base.is_jmp = DISAS_JUMP;
256 }
257 
258 /* Generate a jump to the address in qreg DEST.  */
259 static void gen_jmp(DisasContext *s, TCGv dest)
260 {
261     update_cc_op(s);
262     tcg_gen_mov_i32(QREG_PC, dest);
263     s->base.is_jmp = DISAS_JUMP;
264 }
265 
266 static void gen_raise_exception(int nr)
267 {
268     gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
269 }
270 
271 static void gen_raise_exception_format2(DisasContext *s, int nr,
272                                         target_ulong this_pc)
273 {
274     /*
275      * Pass the address of the insn to the exception handler,
276      * for recording in the Format $2 (6-word) stack frame.
277      * Re-use mmu.ar for the purpose, since that's only valid
278      * after tlb_fill.
279      */
280     tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
281                    offsetof(CPUM68KState, mmu.ar));
282     gen_raise_exception(nr);
283     s->base.is_jmp = DISAS_NORETURN;
284 }
285 
286 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
287 {
288     update_cc_op(s);
289     tcg_gen_movi_i32(QREG_PC, dest);
290 
291     gen_raise_exception(nr);
292 
293     s->base.is_jmp = DISAS_NORETURN;
294 }
295 
296 static inline void gen_addr_fault(DisasContext *s)
297 {
298     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
299 }
300 
301 /*
302  * Generate a load from the specified address.  Narrow values are
303  *  sign extended to full register width.
304  */
305 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
306                             int sign, int index)
307 {
308     TCGv tmp = tcg_temp_new_i32();
309 
310     switch (opsize) {
311     case OS_BYTE:
312     case OS_WORD:
313     case OS_LONG:
314         tcg_gen_qemu_ld_tl(tmp, addr, index,
315                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
316         break;
317     default:
318         g_assert_not_reached();
319     }
320     return tmp;
321 }
322 
323 /* Generate a store.  */
324 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
325                              int index)
326 {
327     switch (opsize) {
328     case OS_BYTE:
329     case OS_WORD:
330     case OS_LONG:
331         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
332         break;
333     default:
334         g_assert_not_reached();
335     }
336 }
337 
338 typedef enum {
339     EA_STORE,
340     EA_LOADU,
341     EA_LOADS
342 } ea_what;
343 
344 /*
345  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
346  * otherwise generate a store.
347  */
348 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
349                      ea_what what, int index)
350 {
351     if (what == EA_STORE) {
352         gen_store(s, opsize, addr, val, index);
353         return store_dummy;
354     } else {
355         return gen_load(s, opsize, addr, what == EA_LOADS, index);
356     }
357 }
358 
359 /* Read a 16-bit immediate constant */
360 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
361 {
362     uint16_t im;
363     im = translator_lduw(env, &s->base, s->pc);
364     s->pc += 2;
365     return im;
366 }
367 
368 /* Read an 8-bit immediate constant */
369 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
370 {
371     return read_im16(env, s);
372 }
373 
374 /* Read a 32-bit immediate constant.  */
375 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
376 {
377     uint32_t im;
378     im = read_im16(env, s) << 16;
379     im |= 0xffff & read_im16(env, s);
380     return im;
381 }
382 
383 /* Read a 64-bit immediate constant.  */
384 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
385 {
386     uint64_t im;
387     im = (uint64_t)read_im32(env, s) << 32;
388     im |= (uint64_t)read_im32(env, s);
389     return im;
390 }
391 
392 /* Calculate and address index.  */
393 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
394 {
395     TCGv add;
396     int scale;
397 
398     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
399     if ((ext & 0x800) == 0) {
400         tcg_gen_ext16s_i32(tmp, add);
401         add = tmp;
402     }
403     scale = (ext >> 9) & 3;
404     if (scale != 0) {
405         tcg_gen_shli_i32(tmp, add, scale);
406         add = tmp;
407     }
408     return add;
409 }
410 
411 /*
412  * Handle a base + index + displacement effective address.
413  * A NULL_QREG base means pc-relative.
414  */
415 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
416 {
417     uint32_t offset;
418     uint16_t ext;
419     TCGv add;
420     TCGv tmp;
421     uint32_t bd, od;
422 
423     offset = s->pc;
424     ext = read_im16(env, s);
425 
426     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
427         return NULL_QREG;
428 
429     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
430         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
431         ext &= ~(3 << 9);
432     }
433 
434     if (ext & 0x100) {
435         /* full extension word format */
436         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
437             return NULL_QREG;
438 
439         if ((ext & 0x30) > 0x10) {
440             /* base displacement */
441             if ((ext & 0x30) == 0x20) {
442                 bd = (int16_t)read_im16(env, s);
443             } else {
444                 bd = read_im32(env, s);
445             }
446         } else {
447             bd = 0;
448         }
449         tmp = tcg_temp_new();
450         if ((ext & 0x44) == 0) {
451             /* pre-index */
452             add = gen_addr_index(s, ext, tmp);
453         } else {
454             add = NULL_QREG;
455         }
456         if ((ext & 0x80) == 0) {
457             /* base not suppressed */
458             if (IS_NULL_QREG(base)) {
459                 base = tcg_constant_i32(offset + bd);
460                 bd = 0;
461             }
462             if (!IS_NULL_QREG(add)) {
463                 tcg_gen_add_i32(tmp, add, base);
464                 add = tmp;
465             } else {
466                 add = base;
467             }
468         }
469         if (!IS_NULL_QREG(add)) {
470             if (bd != 0) {
471                 tcg_gen_addi_i32(tmp, add, bd);
472                 add = tmp;
473             }
474         } else {
475             add = tcg_constant_i32(bd);
476         }
477         if ((ext & 3) != 0) {
478             /* memory indirect */
479             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
480             if ((ext & 0x44) == 4) {
481                 add = gen_addr_index(s, ext, tmp);
482                 tcg_gen_add_i32(tmp, add, base);
483                 add = tmp;
484             } else {
485                 add = base;
486             }
487             if ((ext & 3) > 1) {
488                 /* outer displacement */
489                 if ((ext & 3) == 2) {
490                     od = (int16_t)read_im16(env, s);
491                 } else {
492                     od = read_im32(env, s);
493                 }
494             } else {
495                 od = 0;
496             }
497             if (od != 0) {
498                 tcg_gen_addi_i32(tmp, add, od);
499                 add = tmp;
500             }
501         }
502     } else {
503         /* brief extension word format */
504         tmp = tcg_temp_new();
505         add = gen_addr_index(s, ext, tmp);
506         if (!IS_NULL_QREG(base)) {
507             tcg_gen_add_i32(tmp, add, base);
508             if ((int8_t)ext)
509                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
510         } else {
511             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
512         }
513         add = tmp;
514     }
515     return add;
516 }
517 
518 /* Sign or zero extend a value.  */
519 
520 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
521 {
522     switch (opsize) {
523     case OS_BYTE:
524         if (sign) {
525             tcg_gen_ext8s_i32(res, val);
526         } else {
527             tcg_gen_ext8u_i32(res, val);
528         }
529         break;
530     case OS_WORD:
531         if (sign) {
532             tcg_gen_ext16s_i32(res, val);
533         } else {
534             tcg_gen_ext16u_i32(res, val);
535         }
536         break;
537     case OS_LONG:
538         tcg_gen_mov_i32(res, val);
539         break;
540     default:
541         g_assert_not_reached();
542     }
543 }
544 
545 /* Evaluate all the CC flags.  */
546 
547 static void gen_flush_flags(DisasContext *s)
548 {
549     TCGv t0, t1;
550 
551     switch (s->cc_op) {
552     case CC_OP_FLAGS:
553         return;
554 
555     case CC_OP_ADDB:
556     case CC_OP_ADDW:
557     case CC_OP_ADDL:
558         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
559         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
560         /* Compute signed overflow for addition.  */
561         t0 = tcg_temp_new();
562         t1 = tcg_temp_new();
563         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
564         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
565         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
566         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
567         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
568         break;
569 
570     case CC_OP_SUBB:
571     case CC_OP_SUBW:
572     case CC_OP_SUBL:
573         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
574         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
575         /* Compute signed overflow for subtraction.  */
576         t0 = tcg_temp_new();
577         t1 = tcg_temp_new();
578         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
579         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
580         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
581         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
582         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
583         break;
584 
585     case CC_OP_CMPB:
586     case CC_OP_CMPW:
587     case CC_OP_CMPL:
588         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
589         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
590         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
591         /* Compute signed overflow for subtraction.  */
592         t0 = tcg_temp_new();
593         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
594         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
595         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
596         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
597         break;
598 
599     case CC_OP_LOGIC:
600         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
601         tcg_gen_movi_i32(QREG_CC_C, 0);
602         tcg_gen_movi_i32(QREG_CC_V, 0);
603         break;
604 
605     case CC_OP_DYNAMIC:
606         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
607         s->cc_op_synced = 1;
608         break;
609 
610     default:
611         gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
612         s->cc_op_synced = 1;
613         break;
614     }
615 
616     /* Note that flush_flags also assigned to env->cc_op.  */
617     s->cc_op = CC_OP_FLAGS;
618 }
619 
620 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
621 {
622     TCGv tmp;
623 
624     if (opsize == OS_LONG) {
625         tmp = val;
626     } else {
627         tmp = tcg_temp_new();
628         gen_ext(tmp, val, opsize, sign);
629     }
630 
631     return tmp;
632 }
633 
634 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
635 {
636     gen_ext(QREG_CC_N, val, opsize, 1);
637     set_cc_op(s, CC_OP_LOGIC);
638 }
639 
640 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
641 {
642     tcg_gen_mov_i32(QREG_CC_N, dest);
643     tcg_gen_mov_i32(QREG_CC_V, src);
644     set_cc_op(s, CC_OP_CMPB + opsize);
645 }
646 
647 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
648 {
649     gen_ext(QREG_CC_N, dest, opsize, 1);
650     tcg_gen_mov_i32(QREG_CC_V, src);
651 }
652 
653 static inline int opsize_bytes(int opsize)
654 {
655     switch (opsize) {
656     case OS_BYTE: return 1;
657     case OS_WORD: return 2;
658     case OS_LONG: return 4;
659     case OS_SINGLE: return 4;
660     case OS_DOUBLE: return 8;
661     case OS_EXTENDED: return 12;
662     case OS_PACKED: return 12;
663     default:
664         g_assert_not_reached();
665     }
666 }
667 
668 static inline int insn_opsize(int insn)
669 {
670     switch ((insn >> 6) & 3) {
671     case 0: return OS_BYTE;
672     case 1: return OS_WORD;
673     case 2: return OS_LONG;
674     default:
675         g_assert_not_reached();
676     }
677 }
678 
679 static inline int ext_opsize(int ext, int pos)
680 {
681     switch ((ext >> pos) & 7) {
682     case 0: return OS_LONG;
683     case 1: return OS_SINGLE;
684     case 2: return OS_EXTENDED;
685     case 3: return OS_PACKED;
686     case 4: return OS_WORD;
687     case 5: return OS_DOUBLE;
688     case 6: return OS_BYTE;
689     default:
690         g_assert_not_reached();
691     }
692 }
693 
694 /*
695  * Assign value to a register.  If the width is less than the register width
696  * only the low part of the register is set.
697  */
698 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
699 {
700     switch (opsize) {
701     case OS_BYTE:
702         tcg_gen_deposit_i32(reg, reg, val, 0, 8);
703         break;
704     case OS_WORD:
705         tcg_gen_deposit_i32(reg, reg, val, 0, 16);
706         break;
707     case OS_LONG:
708     case OS_SINGLE:
709         tcg_gen_mov_i32(reg, val);
710         break;
711     default:
712         g_assert_not_reached();
713     }
714 }
715 
716 /*
717  * Generate code for an "effective address".  Does not adjust the base
718  * register for autoincrement addressing modes.
719  */
720 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
721                          int mode, int reg0, int opsize)
722 {
723     TCGv reg;
724     TCGv tmp;
725     uint16_t ext;
726     uint32_t offset;
727 
728     switch (mode) {
729     case 0: /* Data register direct.  */
730     case 1: /* Address register direct.  */
731         return NULL_QREG;
732     case 3: /* Indirect postincrement.  */
733         if (opsize == OS_UNSIZED) {
734             return NULL_QREG;
735         }
736         /* fallthru */
737     case 2: /* Indirect register */
738         return get_areg(s, reg0);
739     case 4: /* Indirect predecrememnt.  */
740         if (opsize == OS_UNSIZED) {
741             return NULL_QREG;
742         }
743         reg = get_areg(s, reg0);
744         tmp = tcg_temp_new();
745         if (reg0 == 7 && opsize == OS_BYTE &&
746             m68k_feature(s->env, M68K_FEATURE_M68K)) {
747             tcg_gen_subi_i32(tmp, reg, 2);
748         } else {
749             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
750         }
751         return tmp;
752     case 5: /* Indirect displacement.  */
753         reg = get_areg(s, reg0);
754         tmp = tcg_temp_new();
755         ext = read_im16(env, s);
756         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
757         return tmp;
758     case 6: /* Indirect index + displacement.  */
759         reg = get_areg(s, reg0);
760         return gen_lea_indexed(env, s, reg);
761     case 7: /* Other */
762         switch (reg0) {
763         case 0: /* Absolute short.  */
764             offset = (int16_t)read_im16(env, s);
765             return tcg_constant_i32(offset);
766         case 1: /* Absolute long.  */
767             offset = read_im32(env, s);
768             return tcg_constant_i32(offset);
769         case 2: /* pc displacement  */
770             offset = s->pc;
771             offset += (int16_t)read_im16(env, s);
772             return tcg_constant_i32(offset);
773         case 3: /* pc index+displacement.  */
774             return gen_lea_indexed(env, s, NULL_QREG);
775         case 4: /* Immediate.  */
776         default:
777             return NULL_QREG;
778         }
779     }
780     /* Should never happen.  */
781     return NULL_QREG;
782 }
783 
784 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
785                     int opsize)
786 {
787     int mode = extract32(insn, 3, 3);
788     int reg0 = REG(insn, 0);
789     return gen_lea_mode(env, s, mode, reg0, opsize);
790 }
791 
792 /*
793  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
794  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
795  * ADDRP is non-null for readwrite operands.
796  */
797 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
798                         int opsize, TCGv val, TCGv *addrp, ea_what what,
799                         int index)
800 {
801     TCGv reg, tmp, result;
802     int32_t offset;
803 
804     switch (mode) {
805     case 0: /* Data register direct.  */
806         reg = cpu_dregs[reg0];
807         if (what == EA_STORE) {
808             gen_partset_reg(opsize, reg, val);
809             return store_dummy;
810         } else {
811             return gen_extend(s, reg, opsize, what == EA_LOADS);
812         }
813     case 1: /* Address register direct.  */
814         reg = get_areg(s, reg0);
815         if (what == EA_STORE) {
816             tcg_gen_mov_i32(reg, val);
817             return store_dummy;
818         } else {
819             return gen_extend(s, reg, opsize, what == EA_LOADS);
820         }
821     case 2: /* Indirect register */
822         reg = get_areg(s, reg0);
823         return gen_ldst(s, opsize, reg, val, what, index);
824     case 3: /* Indirect postincrement.  */
825         reg = get_areg(s, reg0);
826         result = gen_ldst(s, opsize, reg, val, what, index);
827         if (what == EA_STORE || !addrp) {
828             TCGv tmp = tcg_temp_new();
829             if (reg0 == 7 && opsize == OS_BYTE &&
830                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
831                 tcg_gen_addi_i32(tmp, reg, 2);
832             } else {
833                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
834             }
835             delay_set_areg(s, reg0, tmp, true);
836         }
837         return result;
838     case 4: /* Indirect predecrememnt.  */
839         if (addrp && what == EA_STORE) {
840             tmp = *addrp;
841         } else {
842             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
843             if (IS_NULL_QREG(tmp)) {
844                 return tmp;
845             }
846             if (addrp) {
847                 *addrp = tmp;
848             }
849         }
850         result = gen_ldst(s, opsize, tmp, val, what, index);
851         if (what == EA_STORE || !addrp) {
852             delay_set_areg(s, reg0, tmp, false);
853         }
854         return result;
855     case 5: /* Indirect displacement.  */
856     case 6: /* Indirect index + displacement.  */
857     do_indirect:
858         if (addrp && what == EA_STORE) {
859             tmp = *addrp;
860         } else {
861             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
862             if (IS_NULL_QREG(tmp)) {
863                 return tmp;
864             }
865             if (addrp) {
866                 *addrp = tmp;
867             }
868         }
869         return gen_ldst(s, opsize, tmp, val, what, index);
870     case 7: /* Other */
871         switch (reg0) {
872         case 0: /* Absolute short.  */
873         case 1: /* Absolute long.  */
874         case 2: /* pc displacement  */
875         case 3: /* pc index+displacement.  */
876             goto do_indirect;
877         case 4: /* Immediate.  */
878             /* Sign extend values for consistency.  */
879             switch (opsize) {
880             case OS_BYTE:
881                 if (what == EA_LOADS) {
882                     offset = (int8_t)read_im8(env, s);
883                 } else {
884                     offset = read_im8(env, s);
885                 }
886                 break;
887             case OS_WORD:
888                 if (what == EA_LOADS) {
889                     offset = (int16_t)read_im16(env, s);
890                 } else {
891                     offset = read_im16(env, s);
892                 }
893                 break;
894             case OS_LONG:
895                 offset = read_im32(env, s);
896                 break;
897             default:
898                 g_assert_not_reached();
899             }
900             return tcg_constant_i32(offset);
901         default:
902             return NULL_QREG;
903         }
904     }
905     /* Should never happen.  */
906     return NULL_QREG;
907 }
908 
909 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
910                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
911 {
912     int mode = extract32(insn, 3, 3);
913     int reg0 = REG(insn, 0);
914     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
915 }
916 
917 static TCGv_ptr gen_fp_ptr(int freg)
918 {
919     TCGv_ptr fp = tcg_temp_new_ptr();
920     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
921     return fp;
922 }
923 
924 static TCGv_ptr gen_fp_result_ptr(void)
925 {
926     TCGv_ptr fp = tcg_temp_new_ptr();
927     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
928     return fp;
929 }
930 
931 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
932 {
933     TCGv t32;
934     TCGv_i64 t64;
935 
936     t32 = tcg_temp_new();
937     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
938     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
939 
940     t64 = tcg_temp_new_i64();
941     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
942     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
943 }
944 
945 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
946                         int index)
947 {
948     TCGv tmp;
949     TCGv_i64 t64;
950 
951     t64 = tcg_temp_new_i64();
952     tmp = tcg_temp_new();
953     switch (opsize) {
954     case OS_BYTE:
955     case OS_WORD:
956     case OS_LONG:
957         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
958         gen_helper_exts32(cpu_env, fp, tmp);
959         break;
960     case OS_SINGLE:
961         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
962         gen_helper_extf32(cpu_env, fp, tmp);
963         break;
964     case OS_DOUBLE:
965         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
966         gen_helper_extf64(cpu_env, fp, t64);
967         break;
968     case OS_EXTENDED:
969         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
970             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
971             break;
972         }
973         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
974         tcg_gen_shri_i32(tmp, tmp, 16);
975         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
976         tcg_gen_addi_i32(tmp, addr, 4);
977         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
978         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
979         break;
980     case OS_PACKED:
981         /*
982          * unimplemented data type on 68040/ColdFire
983          * FIXME if needed for another FPU
984          */
985         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
986         break;
987     default:
988         g_assert_not_reached();
989     }
990 }
991 
992 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
993                          int index)
994 {
995     TCGv tmp;
996     TCGv_i64 t64;
997 
998     t64 = tcg_temp_new_i64();
999     tmp = tcg_temp_new();
1000     switch (opsize) {
1001     case OS_BYTE:
1002     case OS_WORD:
1003     case OS_LONG:
1004         gen_helper_reds32(tmp, cpu_env, fp);
1005         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1006         break;
1007     case OS_SINGLE:
1008         gen_helper_redf32(tmp, cpu_env, fp);
1009         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1010         break;
1011     case OS_DOUBLE:
1012         gen_helper_redf64(t64, cpu_env, fp);
1013         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1014         break;
1015     case OS_EXTENDED:
1016         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1017             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1018             break;
1019         }
1020         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1021         tcg_gen_shli_i32(tmp, tmp, 16);
1022         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1023         tcg_gen_addi_i32(tmp, addr, 4);
1024         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1025         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1026         break;
1027     case OS_PACKED:
1028         /*
1029          * unimplemented data type on 68040/ColdFire
1030          * FIXME if needed for another FPU
1031          */
1032         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1033         break;
1034     default:
1035         g_assert_not_reached();
1036     }
1037 }
1038 
1039 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1040                         TCGv_ptr fp, ea_what what, int index)
1041 {
1042     if (what == EA_STORE) {
1043         gen_store_fp(s, opsize, addr, fp, index);
1044     } else {
1045         gen_load_fp(s, opsize, addr, fp, index);
1046     }
1047 }
1048 
1049 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1050                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1051                           int index)
1052 {
1053     TCGv reg, addr, tmp;
1054     TCGv_i64 t64;
1055 
1056     switch (mode) {
1057     case 0: /* Data register direct.  */
1058         reg = cpu_dregs[reg0];
1059         if (what == EA_STORE) {
1060             switch (opsize) {
1061             case OS_BYTE:
1062             case OS_WORD:
1063             case OS_LONG:
1064                 gen_helper_reds32(reg, cpu_env, fp);
1065                 break;
1066             case OS_SINGLE:
1067                 gen_helper_redf32(reg, cpu_env, fp);
1068                 break;
1069             default:
1070                 g_assert_not_reached();
1071             }
1072         } else {
1073             tmp = tcg_temp_new();
1074             switch (opsize) {
1075             case OS_BYTE:
1076                 tcg_gen_ext8s_i32(tmp, reg);
1077                 gen_helper_exts32(cpu_env, fp, tmp);
1078                 break;
1079             case OS_WORD:
1080                 tcg_gen_ext16s_i32(tmp, reg);
1081                 gen_helper_exts32(cpu_env, fp, tmp);
1082                 break;
1083             case OS_LONG:
1084                 gen_helper_exts32(cpu_env, fp, reg);
1085                 break;
1086             case OS_SINGLE:
1087                 gen_helper_extf32(cpu_env, fp, reg);
1088                 break;
1089             default:
1090                 g_assert_not_reached();
1091             }
1092         }
1093         return 0;
1094     case 1: /* Address register direct.  */
1095         return -1;
1096     case 2: /* Indirect register */
1097         addr = get_areg(s, reg0);
1098         gen_ldst_fp(s, opsize, addr, fp, what, index);
1099         return 0;
1100     case 3: /* Indirect postincrement.  */
1101         addr = cpu_aregs[reg0];
1102         gen_ldst_fp(s, opsize, addr, fp, what, index);
1103         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1104         return 0;
1105     case 4: /* Indirect predecrememnt.  */
1106         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1107         if (IS_NULL_QREG(addr)) {
1108             return -1;
1109         }
1110         gen_ldst_fp(s, opsize, addr, fp, what, index);
1111         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1112         return 0;
1113     case 5: /* Indirect displacement.  */
1114     case 6: /* Indirect index + displacement.  */
1115     do_indirect:
1116         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1117         if (IS_NULL_QREG(addr)) {
1118             return -1;
1119         }
1120         gen_ldst_fp(s, opsize, addr, fp, what, index);
1121         return 0;
1122     case 7: /* Other */
1123         switch (reg0) {
1124         case 0: /* Absolute short.  */
1125         case 1: /* Absolute long.  */
1126         case 2: /* pc displacement  */
1127         case 3: /* pc index+displacement.  */
1128             goto do_indirect;
1129         case 4: /* Immediate.  */
1130             if (what == EA_STORE) {
1131                 return -1;
1132             }
1133             switch (opsize) {
1134             case OS_BYTE:
1135                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1136                 gen_helper_exts32(cpu_env, fp, tmp);
1137                 break;
1138             case OS_WORD:
1139                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1140                 gen_helper_exts32(cpu_env, fp, tmp);
1141                 break;
1142             case OS_LONG:
1143                 tmp = tcg_constant_i32(read_im32(env, s));
1144                 gen_helper_exts32(cpu_env, fp, tmp);
1145                 break;
1146             case OS_SINGLE:
1147                 tmp = tcg_constant_i32(read_im32(env, s));
1148                 gen_helper_extf32(cpu_env, fp, tmp);
1149                 break;
1150             case OS_DOUBLE:
1151                 t64 = tcg_constant_i64(read_im64(env, s));
1152                 gen_helper_extf64(cpu_env, fp, t64);
1153                 break;
1154             case OS_EXTENDED:
1155                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1156                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1157                     break;
1158                 }
1159                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1160                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1161                 t64 = tcg_constant_i64(read_im64(env, s));
1162                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1163                 break;
1164             case OS_PACKED:
1165                 /*
1166                  * unimplemented data type on 68040/ColdFire
1167                  * FIXME if needed for another FPU
1168                  */
1169                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1170                 break;
1171             default:
1172                 g_assert_not_reached();
1173             }
1174             return 0;
1175         default:
1176             return -1;
1177         }
1178     }
1179     return -1;
1180 }
1181 
1182 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1183                        int opsize, TCGv_ptr fp, ea_what what, int index)
1184 {
1185     int mode = extract32(insn, 3, 3);
1186     int reg0 = REG(insn, 0);
1187     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1188 }
1189 
1190 typedef struct {
1191     TCGCond tcond;
1192     TCGv v1;
1193     TCGv v2;
1194 } DisasCompare;
1195 
1196 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1197 {
1198     TCGv tmp, tmp2;
1199     TCGCond tcond;
1200     CCOp op = s->cc_op;
1201 
1202     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1203     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1204         c->v1 = QREG_CC_N;
1205         c->v2 = QREG_CC_V;
1206         switch (cond) {
1207         case 2: /* HI */
1208         case 3: /* LS */
1209             tcond = TCG_COND_LEU;
1210             goto done;
1211         case 4: /* CC */
1212         case 5: /* CS */
1213             tcond = TCG_COND_LTU;
1214             goto done;
1215         case 6: /* NE */
1216         case 7: /* EQ */
1217             tcond = TCG_COND_EQ;
1218             goto done;
1219         case 10: /* PL */
1220         case 11: /* MI */
1221             c->v2 = tcg_constant_i32(0);
1222             c->v1 = tmp = tcg_temp_new();
1223             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1224             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1225             /* fallthru */
1226         case 12: /* GE */
1227         case 13: /* LT */
1228             tcond = TCG_COND_LT;
1229             goto done;
1230         case 14: /* GT */
1231         case 15: /* LE */
1232             tcond = TCG_COND_LE;
1233             goto done;
1234         }
1235     }
1236 
1237     c->v2 = tcg_constant_i32(0);
1238 
1239     switch (cond) {
1240     case 0: /* T */
1241     case 1: /* F */
1242         c->v1 = c->v2;
1243         tcond = TCG_COND_NEVER;
1244         goto done;
1245     case 14: /* GT (!(Z || (N ^ V))) */
1246     case 15: /* LE (Z || (N ^ V)) */
1247         /*
1248          * Logic operations clear V, which simplifies LE to (Z || N),
1249          * and since Z and N are co-located, this becomes a normal
1250          * comparison vs N.
1251          */
1252         if (op == CC_OP_LOGIC) {
1253             c->v1 = QREG_CC_N;
1254             tcond = TCG_COND_LE;
1255             goto done;
1256         }
1257         break;
1258     case 12: /* GE (!(N ^ V)) */
1259     case 13: /* LT (N ^ V) */
1260         /* Logic operations clear V, which simplifies this to N.  */
1261         if (op != CC_OP_LOGIC) {
1262             break;
1263         }
1264         /* fallthru */
1265     case 10: /* PL (!N) */
1266     case 11: /* MI (N) */
1267         /* Several cases represent N normally.  */
1268         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1269             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1270             op == CC_OP_LOGIC) {
1271             c->v1 = QREG_CC_N;
1272             tcond = TCG_COND_LT;
1273             goto done;
1274         }
1275         break;
1276     case 6: /* NE (!Z) */
1277     case 7: /* EQ (Z) */
1278         /* Some cases fold Z into N.  */
1279         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1280             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1281             op == CC_OP_LOGIC) {
1282             tcond = TCG_COND_EQ;
1283             c->v1 = QREG_CC_N;
1284             goto done;
1285         }
1286         break;
1287     case 4: /* CC (!C) */
1288     case 5: /* CS (C) */
1289         /* Some cases fold C into X.  */
1290         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1291             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1292             tcond = TCG_COND_NE;
1293             c->v1 = QREG_CC_X;
1294             goto done;
1295         }
1296         /* fallthru */
1297     case 8: /* VC (!V) */
1298     case 9: /* VS (V) */
1299         /* Logic operations clear V and C.  */
1300         if (op == CC_OP_LOGIC) {
1301             tcond = TCG_COND_NEVER;
1302             c->v1 = c->v2;
1303             goto done;
1304         }
1305         break;
1306     }
1307 
1308     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1309     gen_flush_flags(s);
1310 
1311     switch (cond) {
1312     case 0: /* T */
1313     case 1: /* F */
1314     default:
1315         /* Invalid, or handled above.  */
1316         abort();
1317     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1318     case 3: /* LS (C || Z) */
1319         c->v1 = tmp = tcg_temp_new();
1320         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1321         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1322         tcond = TCG_COND_NE;
1323         break;
1324     case 4: /* CC (!C) */
1325     case 5: /* CS (C) */
1326         c->v1 = QREG_CC_C;
1327         tcond = TCG_COND_NE;
1328         break;
1329     case 6: /* NE (!Z) */
1330     case 7: /* EQ (Z) */
1331         c->v1 = QREG_CC_Z;
1332         tcond = TCG_COND_EQ;
1333         break;
1334     case 8: /* VC (!V) */
1335     case 9: /* VS (V) */
1336         c->v1 = QREG_CC_V;
1337         tcond = TCG_COND_LT;
1338         break;
1339     case 10: /* PL (!N) */
1340     case 11: /* MI (N) */
1341         c->v1 = QREG_CC_N;
1342         tcond = TCG_COND_LT;
1343         break;
1344     case 12: /* GE (!(N ^ V)) */
1345     case 13: /* LT (N ^ V) */
1346         c->v1 = tmp = tcg_temp_new();
1347         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1348         tcond = TCG_COND_LT;
1349         break;
1350     case 14: /* GT (!(Z || (N ^ V))) */
1351     case 15: /* LE (Z || (N ^ V)) */
1352         c->v1 = tmp = tcg_temp_new();
1353         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1354         tcg_gen_neg_i32(tmp, tmp);
1355         tmp2 = tcg_temp_new();
1356         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1357         tcg_gen_or_i32(tmp, tmp, tmp2);
1358         tcond = TCG_COND_LT;
1359         break;
1360     }
1361 
1362  done:
1363     if ((cond & 1) == 0) {
1364         tcond = tcg_invert_cond(tcond);
1365     }
1366     c->tcond = tcond;
1367 }
1368 
1369 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1370 {
1371   DisasCompare c;
1372 
1373   gen_cc_cond(&c, s, cond);
1374   update_cc_op(s);
1375   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1376 }
1377 
1378 /* Force a TB lookup after an instruction that changes the CPU state.  */
1379 static void gen_exit_tb(DisasContext *s)
1380 {
1381     update_cc_op(s);
1382     tcg_gen_movi_i32(QREG_PC, s->pc);
1383     s->base.is_jmp = DISAS_EXIT;
1384 }
1385 
1386 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1387         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1388                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1389         if (IS_NULL_QREG(result)) {                                     \
1390             gen_addr_fault(s);                                          \
1391             return;                                                     \
1392         }                                                               \
1393     } while (0)
1394 
1395 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1396         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1397                                 EA_STORE, IS_USER(s));                  \
1398         if (IS_NULL_QREG(ea_result)) {                                  \
1399             gen_addr_fault(s);                                          \
1400             return;                                                     \
1401         }                                                               \
1402     } while (0)
1403 
1404 /* Generate a jump to an immediate address.  */
1405 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1406                        target_ulong src)
1407 {
1408     if (unlikely(s->ss_active)) {
1409         update_cc_op(s);
1410         tcg_gen_movi_i32(QREG_PC, dest);
1411         gen_raise_exception_format2(s, EXCP_TRACE, src);
1412     } else if (translator_use_goto_tb(&s->base, dest)) {
1413         tcg_gen_goto_tb(n);
1414         tcg_gen_movi_i32(QREG_PC, dest);
1415         tcg_gen_exit_tb(s->base.tb, n);
1416     } else {
1417         gen_jmp_im(s, dest);
1418         tcg_gen_exit_tb(NULL, 0);
1419     }
1420     s->base.is_jmp = DISAS_NORETURN;
1421 }
1422 
1423 DISAS_INSN(scc)
1424 {
1425     DisasCompare c;
1426     int cond;
1427     TCGv tmp;
1428 
1429     cond = (insn >> 8) & 0xf;
1430     gen_cc_cond(&c, s, cond);
1431 
1432     tmp = tcg_temp_new();
1433     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1434 
1435     tcg_gen_neg_i32(tmp, tmp);
1436     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1437 }
1438 
1439 DISAS_INSN(dbcc)
1440 {
1441     TCGLabel *l1;
1442     TCGv reg;
1443     TCGv tmp;
1444     int16_t offset;
1445     uint32_t base;
1446 
1447     reg = DREG(insn, 0);
1448     base = s->pc;
1449     offset = (int16_t)read_im16(env, s);
1450     l1 = gen_new_label();
1451     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1452 
1453     tmp = tcg_temp_new();
1454     tcg_gen_ext16s_i32(tmp, reg);
1455     tcg_gen_addi_i32(tmp, tmp, -1);
1456     gen_partset_reg(OS_WORD, reg, tmp);
1457     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1458     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1459     gen_set_label(l1);
1460     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1461 }
1462 
1463 DISAS_INSN(undef_mac)
1464 {
1465     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1466 }
1467 
1468 DISAS_INSN(undef_fpu)
1469 {
1470     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1471 }
1472 
1473 DISAS_INSN(undef)
1474 {
1475     /*
1476      * ??? This is both instructions that are as yet unimplemented
1477      * for the 680x0 series, as well as those that are implemented
1478      * but actually illegal for CPU32 or pre-68020.
1479      */
1480     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1481                   insn, s->base.pc_next);
1482     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1483 }
1484 
1485 DISAS_INSN(mulw)
1486 {
1487     TCGv reg;
1488     TCGv tmp;
1489     TCGv src;
1490     int sign;
1491 
1492     sign = (insn & 0x100) != 0;
1493     reg = DREG(insn, 9);
1494     tmp = tcg_temp_new();
1495     if (sign)
1496         tcg_gen_ext16s_i32(tmp, reg);
1497     else
1498         tcg_gen_ext16u_i32(tmp, reg);
1499     SRC_EA(env, src, OS_WORD, sign, NULL);
1500     tcg_gen_mul_i32(tmp, tmp, src);
1501     tcg_gen_mov_i32(reg, tmp);
1502     gen_logic_cc(s, tmp, OS_LONG);
1503 }
1504 
1505 DISAS_INSN(divw)
1506 {
1507     int sign;
1508     TCGv src;
1509     TCGv destr;
1510     TCGv ilen;
1511 
1512     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1513 
1514     sign = (insn & 0x100) != 0;
1515 
1516     /* dest.l / src.w */
1517 
1518     SRC_EA(env, src, OS_WORD, sign, NULL);
1519     destr = tcg_constant_i32(REG(insn, 9));
1520     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1521     if (sign) {
1522         gen_helper_divsw(cpu_env, destr, src, ilen);
1523     } else {
1524         gen_helper_divuw(cpu_env, destr, src, ilen);
1525     }
1526 
1527     set_cc_op(s, CC_OP_FLAGS);
1528 }
1529 
1530 DISAS_INSN(divl)
1531 {
1532     TCGv num, reg, den, ilen;
1533     int sign;
1534     uint16_t ext;
1535 
1536     ext = read_im16(env, s);
1537 
1538     sign = (ext & 0x0800) != 0;
1539 
1540     if (ext & 0x400) {
1541         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1542             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1543             return;
1544         }
1545 
1546         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1547 
1548         SRC_EA(env, den, OS_LONG, 0, NULL);
1549         num = tcg_constant_i32(REG(ext, 12));
1550         reg = tcg_constant_i32(REG(ext, 0));
1551         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1552         if (sign) {
1553             gen_helper_divsll(cpu_env, num, reg, den, ilen);
1554         } else {
1555             gen_helper_divull(cpu_env, num, reg, den, ilen);
1556         }
1557         set_cc_op(s, CC_OP_FLAGS);
1558         return;
1559     }
1560 
1561     /* divX.l <EA>, Dq        32/32 -> 32q     */
1562     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1563 
1564     SRC_EA(env, den, OS_LONG, 0, NULL);
1565     num = tcg_constant_i32(REG(ext, 12));
1566     reg = tcg_constant_i32(REG(ext, 0));
1567     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1568     if (sign) {
1569         gen_helper_divsl(cpu_env, num, reg, den, ilen);
1570     } else {
1571         gen_helper_divul(cpu_env, num, reg, den, ilen);
1572     }
1573 
1574     set_cc_op(s, CC_OP_FLAGS);
1575 }
1576 
1577 static void bcd_add(TCGv dest, TCGv src)
1578 {
1579     TCGv t0, t1;
1580 
1581     /*
1582      * dest10 = dest10 + src10 + X
1583      *
1584      *        t1 = src
1585      *        t2 = t1 + 0x066
1586      *        t3 = t2 + dest + X
1587      *        t4 = t2 ^ dest
1588      *        t5 = t3 ^ t4
1589      *        t6 = ~t5 & 0x110
1590      *        t7 = (t6 >> 2) | (t6 >> 3)
1591      *        return t3 - t7
1592      */
1593 
1594     /*
1595      * t1 = (src + 0x066) + dest + X
1596      *    = result with some possible exceeding 0x6
1597      */
1598 
1599     t0 = tcg_temp_new();
1600     tcg_gen_addi_i32(t0, src, 0x066);
1601 
1602     t1 = tcg_temp_new();
1603     tcg_gen_add_i32(t1, t0, dest);
1604     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1605 
1606     /* we will remove exceeding 0x6 where there is no carry */
1607 
1608     /*
1609      * t0 = (src + 0x0066) ^ dest
1610      *    = t1 without carries
1611      */
1612 
1613     tcg_gen_xor_i32(t0, t0, dest);
1614 
1615     /*
1616      * extract the carries
1617      * t0 = t0 ^ t1
1618      *    = only the carries
1619      */
1620 
1621     tcg_gen_xor_i32(t0, t0, t1);
1622 
1623     /*
1624      * generate 0x1 where there is no carry
1625      * and for each 0x10, generate a 0x6
1626      */
1627 
1628     tcg_gen_shri_i32(t0, t0, 3);
1629     tcg_gen_not_i32(t0, t0);
1630     tcg_gen_andi_i32(t0, t0, 0x22);
1631     tcg_gen_add_i32(dest, t0, t0);
1632     tcg_gen_add_i32(dest, dest, t0);
1633 
1634     /*
1635      * remove the exceeding 0x6
1636      * for digits that have not generated a carry
1637      */
1638 
1639     tcg_gen_sub_i32(dest, t1, dest);
1640 }
1641 
1642 static void bcd_sub(TCGv dest, TCGv src)
1643 {
1644     TCGv t0, t1, t2;
1645 
1646     /*
1647      *  dest10 = dest10 - src10 - X
1648      *         = bcd_add(dest + 1 - X, 0x199 - src)
1649      */
1650 
1651     /* t0 = 0x066 + (0x199 - src) */
1652 
1653     t0 = tcg_temp_new();
1654     tcg_gen_subfi_i32(t0, 0x1ff, src);
1655 
1656     /* t1 = t0 + dest + 1 - X*/
1657 
1658     t1 = tcg_temp_new();
1659     tcg_gen_add_i32(t1, t0, dest);
1660     tcg_gen_addi_i32(t1, t1, 1);
1661     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1662 
1663     /* t2 = t0 ^ dest */
1664 
1665     t2 = tcg_temp_new();
1666     tcg_gen_xor_i32(t2, t0, dest);
1667 
1668     /* t0 = t1 ^ t2 */
1669 
1670     tcg_gen_xor_i32(t0, t1, t2);
1671 
1672     /*
1673      * t2 = ~t0 & 0x110
1674      * t0 = (t2 >> 2) | (t2 >> 3)
1675      *
1676      * to fit on 8bit operands, changed in:
1677      *
1678      * t2 = ~(t0 >> 3) & 0x22
1679      * t0 = t2 + t2
1680      * t0 = t0 + t2
1681      */
1682 
1683     tcg_gen_shri_i32(t2, t0, 3);
1684     tcg_gen_not_i32(t2, t2);
1685     tcg_gen_andi_i32(t2, t2, 0x22);
1686     tcg_gen_add_i32(t0, t2, t2);
1687     tcg_gen_add_i32(t0, t0, t2);
1688 
1689     /* return t1 - t0 */
1690 
1691     tcg_gen_sub_i32(dest, t1, t0);
1692 }
1693 
1694 static void bcd_flags(TCGv val)
1695 {
1696     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1697     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1698 
1699     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1700 
1701     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1702 }
1703 
1704 DISAS_INSN(abcd_reg)
1705 {
1706     TCGv src;
1707     TCGv dest;
1708 
1709     gen_flush_flags(s); /* !Z is sticky */
1710 
1711     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1712     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1713     bcd_add(dest, src);
1714     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1715 
1716     bcd_flags(dest);
1717 }
1718 
1719 DISAS_INSN(abcd_mem)
1720 {
1721     TCGv src, dest, addr;
1722 
1723     gen_flush_flags(s); /* !Z is sticky */
1724 
1725     /* Indirect pre-decrement load (mode 4) */
1726 
1727     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1728                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1729     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1730                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1731 
1732     bcd_add(dest, src);
1733 
1734     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1735                 EA_STORE, IS_USER(s));
1736 
1737     bcd_flags(dest);
1738 }
1739 
1740 DISAS_INSN(sbcd_reg)
1741 {
1742     TCGv src, dest;
1743 
1744     gen_flush_flags(s); /* !Z is sticky */
1745 
1746     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1747     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1748 
1749     bcd_sub(dest, src);
1750 
1751     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1752 
1753     bcd_flags(dest);
1754 }
1755 
1756 DISAS_INSN(sbcd_mem)
1757 {
1758     TCGv src, dest, addr;
1759 
1760     gen_flush_flags(s); /* !Z is sticky */
1761 
1762     /* Indirect pre-decrement load (mode 4) */
1763 
1764     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1765                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1766     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1767                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1768 
1769     bcd_sub(dest, src);
1770 
1771     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1772                 EA_STORE, IS_USER(s));
1773 
1774     bcd_flags(dest);
1775 }
1776 
1777 DISAS_INSN(nbcd)
1778 {
1779     TCGv src, dest;
1780     TCGv addr;
1781 
1782     gen_flush_flags(s); /* !Z is sticky */
1783 
1784     SRC_EA(env, src, OS_BYTE, 0, &addr);
1785 
1786     dest = tcg_temp_new();
1787     tcg_gen_movi_i32(dest, 0);
1788     bcd_sub(dest, src);
1789 
1790     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1791 
1792     bcd_flags(dest);
1793 }
1794 
1795 DISAS_INSN(addsub)
1796 {
1797     TCGv reg;
1798     TCGv dest;
1799     TCGv src;
1800     TCGv tmp;
1801     TCGv addr;
1802     int add;
1803     int opsize;
1804 
1805     add = (insn & 0x4000) != 0;
1806     opsize = insn_opsize(insn);
1807     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1808     dest = tcg_temp_new();
1809     if (insn & 0x100) {
1810         SRC_EA(env, tmp, opsize, 1, &addr);
1811         src = reg;
1812     } else {
1813         tmp = reg;
1814         SRC_EA(env, src, opsize, 1, NULL);
1815     }
1816     if (add) {
1817         tcg_gen_add_i32(dest, tmp, src);
1818         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1819         set_cc_op(s, CC_OP_ADDB + opsize);
1820     } else {
1821         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1822         tcg_gen_sub_i32(dest, tmp, src);
1823         set_cc_op(s, CC_OP_SUBB + opsize);
1824     }
1825     gen_update_cc_add(dest, src, opsize);
1826     if (insn & 0x100) {
1827         DEST_EA(env, insn, opsize, dest, &addr);
1828     } else {
1829         gen_partset_reg(opsize, DREG(insn, 9), dest);
1830     }
1831 }
1832 
1833 /* Reverse the order of the bits in REG.  */
1834 DISAS_INSN(bitrev)
1835 {
1836     TCGv reg;
1837     reg = DREG(insn, 0);
1838     gen_helper_bitrev(reg, reg);
1839 }
1840 
1841 DISAS_INSN(bitop_reg)
1842 {
1843     int opsize;
1844     int op;
1845     TCGv src1;
1846     TCGv src2;
1847     TCGv tmp;
1848     TCGv addr;
1849     TCGv dest;
1850 
1851     if ((insn & 0x38) != 0)
1852         opsize = OS_BYTE;
1853     else
1854         opsize = OS_LONG;
1855     op = (insn >> 6) & 3;
1856     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1857 
1858     gen_flush_flags(s);
1859     src2 = tcg_temp_new();
1860     if (opsize == OS_BYTE)
1861         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1862     else
1863         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1864 
1865     tmp = tcg_temp_new();
1866     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1867 
1868     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1869 
1870     dest = tcg_temp_new();
1871     switch (op) {
1872     case 1: /* bchg */
1873         tcg_gen_xor_i32(dest, src1, tmp);
1874         break;
1875     case 2: /* bclr */
1876         tcg_gen_andc_i32(dest, src1, tmp);
1877         break;
1878     case 3: /* bset */
1879         tcg_gen_or_i32(dest, src1, tmp);
1880         break;
1881     default: /* btst */
1882         break;
1883     }
1884     if (op) {
1885         DEST_EA(env, insn, opsize, dest, &addr);
1886     }
1887 }
1888 
1889 DISAS_INSN(sats)
1890 {
1891     TCGv reg;
1892     reg = DREG(insn, 0);
1893     gen_flush_flags(s);
1894     gen_helper_sats(reg, reg, QREG_CC_V);
1895     gen_logic_cc(s, reg, OS_LONG);
1896 }
1897 
1898 static void gen_push(DisasContext *s, TCGv val)
1899 {
1900     TCGv tmp;
1901 
1902     tmp = tcg_temp_new();
1903     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1904     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1905     tcg_gen_mov_i32(QREG_SP, tmp);
1906 }
1907 
1908 static TCGv mreg(int reg)
1909 {
1910     if (reg < 8) {
1911         /* Dx */
1912         return cpu_dregs[reg];
1913     }
1914     /* Ax */
1915     return cpu_aregs[reg & 7];
1916 }
1917 
1918 DISAS_INSN(movem)
1919 {
1920     TCGv addr, incr, tmp, r[16];
1921     int is_load = (insn & 0x0400) != 0;
1922     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1923     uint16_t mask = read_im16(env, s);
1924     int mode = extract32(insn, 3, 3);
1925     int reg0 = REG(insn, 0);
1926     int i;
1927 
1928     tmp = cpu_aregs[reg0];
1929 
1930     switch (mode) {
1931     case 0: /* data register direct */
1932     case 1: /* addr register direct */
1933     do_addr_fault:
1934         gen_addr_fault(s);
1935         return;
1936 
1937     case 2: /* indirect */
1938         break;
1939 
1940     case 3: /* indirect post-increment */
1941         if (!is_load) {
1942             /* post-increment is not allowed */
1943             goto do_addr_fault;
1944         }
1945         break;
1946 
1947     case 4: /* indirect pre-decrement */
1948         if (is_load) {
1949             /* pre-decrement is not allowed */
1950             goto do_addr_fault;
1951         }
1952         /*
1953          * We want a bare copy of the address reg, without any pre-decrement
1954          * adjustment, as gen_lea would provide.
1955          */
1956         break;
1957 
1958     default:
1959         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1960         if (IS_NULL_QREG(tmp)) {
1961             goto do_addr_fault;
1962         }
1963         break;
1964     }
1965 
1966     addr = tcg_temp_new();
1967     tcg_gen_mov_i32(addr, tmp);
1968     incr = tcg_constant_i32(opsize_bytes(opsize));
1969 
1970     if (is_load) {
1971         /* memory to register */
1972         for (i = 0; i < 16; i++) {
1973             if (mask & (1 << i)) {
1974                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1975                 tcg_gen_add_i32(addr, addr, incr);
1976             }
1977         }
1978         for (i = 0; i < 16; i++) {
1979             if (mask & (1 << i)) {
1980                 tcg_gen_mov_i32(mreg(i), r[i]);
1981             }
1982         }
1983         if (mode == 3) {
1984             /* post-increment: movem (An)+,X */
1985             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1986         }
1987     } else {
1988         /* register to memory */
1989         if (mode == 4) {
1990             /* pre-decrement: movem X,-(An) */
1991             for (i = 15; i >= 0; i--) {
1992                 if ((mask << i) & 0x8000) {
1993                     tcg_gen_sub_i32(addr, addr, incr);
1994                     if (reg0 + 8 == i &&
1995                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1996                         /*
1997                          * M68020+: if the addressing register is the
1998                          * register moved to memory, the value written
1999                          * is the initial value decremented by the size of
2000                          * the operation, regardless of how many actual
2001                          * stores have been performed until this point.
2002                          * M68000/M68010: the value is the initial value.
2003                          */
2004                         tmp = tcg_temp_new();
2005                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2006                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2007                     } else {
2008                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2009                     }
2010                 }
2011             }
2012             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2013         } else {
2014             for (i = 0; i < 16; i++) {
2015                 if (mask & (1 << i)) {
2016                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2017                     tcg_gen_add_i32(addr, addr, incr);
2018                 }
2019             }
2020         }
2021     }
2022 }
2023 
2024 DISAS_INSN(movep)
2025 {
2026     uint8_t i;
2027     int16_t displ;
2028     TCGv reg;
2029     TCGv addr;
2030     TCGv abuf;
2031     TCGv dbuf;
2032 
2033     displ = read_im16(env, s);
2034 
2035     addr = AREG(insn, 0);
2036     reg = DREG(insn, 9);
2037 
2038     abuf = tcg_temp_new();
2039     tcg_gen_addi_i32(abuf, addr, displ);
2040     dbuf = tcg_temp_new();
2041 
2042     if (insn & 0x40) {
2043         i = 4;
2044     } else {
2045         i = 2;
2046     }
2047 
2048     if (insn & 0x80) {
2049         for ( ; i > 0 ; i--) {
2050             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2051             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2052             if (i > 1) {
2053                 tcg_gen_addi_i32(abuf, abuf, 2);
2054             }
2055         }
2056     } else {
2057         for ( ; i > 0 ; i--) {
2058             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2059             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2060             if (i > 1) {
2061                 tcg_gen_addi_i32(abuf, abuf, 2);
2062             }
2063         }
2064     }
2065 }
2066 
2067 DISAS_INSN(bitop_im)
2068 {
2069     int opsize;
2070     int op;
2071     TCGv src1;
2072     uint32_t mask;
2073     int bitnum;
2074     TCGv tmp;
2075     TCGv addr;
2076 
2077     if ((insn & 0x38) != 0)
2078         opsize = OS_BYTE;
2079     else
2080         opsize = OS_LONG;
2081     op = (insn >> 6) & 3;
2082 
2083     bitnum = read_im16(env, s);
2084     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2085         if (bitnum & 0xfe00) {
2086             disas_undef(env, s, insn);
2087             return;
2088         }
2089     } else {
2090         if (bitnum & 0xff00) {
2091             disas_undef(env, s, insn);
2092             return;
2093         }
2094     }
2095 
2096     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2097 
2098     gen_flush_flags(s);
2099     if (opsize == OS_BYTE)
2100         bitnum &= 7;
2101     else
2102         bitnum &= 31;
2103     mask = 1 << bitnum;
2104 
2105    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2106 
2107     if (op) {
2108         tmp = tcg_temp_new();
2109         switch (op) {
2110         case 1: /* bchg */
2111             tcg_gen_xori_i32(tmp, src1, mask);
2112             break;
2113         case 2: /* bclr */
2114             tcg_gen_andi_i32(tmp, src1, ~mask);
2115             break;
2116         case 3: /* bset */
2117             tcg_gen_ori_i32(tmp, src1, mask);
2118             break;
2119         default: /* btst */
2120             break;
2121         }
2122         DEST_EA(env, insn, opsize, tmp, &addr);
2123     }
2124 }
2125 
2126 static TCGv gen_get_ccr(DisasContext *s)
2127 {
2128     TCGv dest;
2129 
2130     update_cc_op(s);
2131     dest = tcg_temp_new();
2132     gen_helper_get_ccr(dest, cpu_env);
2133     return dest;
2134 }
2135 
2136 static TCGv gen_get_sr(DisasContext *s)
2137 {
2138     TCGv ccr;
2139     TCGv sr;
2140 
2141     ccr = gen_get_ccr(s);
2142     sr = tcg_temp_new();
2143     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2144     tcg_gen_or_i32(sr, sr, ccr);
2145     return sr;
2146 }
2147 
2148 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2149 {
2150     if (ccr_only) {
2151         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2152         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2153         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2154         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2155         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2156     } else {
2157         /* Must writeback before changing security state. */
2158         do_writebacks(s);
2159         gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2160     }
2161     set_cc_op(s, CC_OP_FLAGS);
2162 }
2163 
2164 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2165 {
2166     if (ccr_only) {
2167         gen_helper_set_ccr(cpu_env, val);
2168     } else {
2169         /* Must writeback before changing security state. */
2170         do_writebacks(s);
2171         gen_helper_set_sr(cpu_env, val);
2172     }
2173     set_cc_op(s, CC_OP_FLAGS);
2174 }
2175 
2176 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2177                            bool ccr_only)
2178 {
2179     if ((insn & 0x3f) == 0x3c) {
2180         uint16_t val;
2181         val = read_im16(env, s);
2182         gen_set_sr_im(s, val, ccr_only);
2183     } else {
2184         TCGv src;
2185         SRC_EA(env, src, OS_WORD, 0, NULL);
2186         gen_set_sr(s, src, ccr_only);
2187     }
2188 }
2189 
2190 DISAS_INSN(arith_im)
2191 {
2192     int op;
2193     TCGv im;
2194     TCGv src1;
2195     TCGv dest;
2196     TCGv addr;
2197     int opsize;
2198     bool with_SR = ((insn & 0x3f) == 0x3c);
2199 
2200     op = (insn >> 9) & 7;
2201     opsize = insn_opsize(insn);
2202     switch (opsize) {
2203     case OS_BYTE:
2204         im = tcg_constant_i32((int8_t)read_im8(env, s));
2205         break;
2206     case OS_WORD:
2207         im = tcg_constant_i32((int16_t)read_im16(env, s));
2208         break;
2209     case OS_LONG:
2210         im = tcg_constant_i32(read_im32(env, s));
2211         break;
2212     default:
2213         g_assert_not_reached();
2214     }
2215 
2216     if (with_SR) {
2217         /* SR/CCR can only be used with andi/eori/ori */
2218         if (op == 2 || op == 3 || op == 6) {
2219             disas_undef(env, s, insn);
2220             return;
2221         }
2222         switch (opsize) {
2223         case OS_BYTE:
2224             src1 = gen_get_ccr(s);
2225             break;
2226         case OS_WORD:
2227             if (IS_USER(s)) {
2228                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2229                 return;
2230             }
2231             src1 = gen_get_sr(s);
2232             break;
2233         default:
2234             /* OS_LONG; others already g_assert_not_reached.  */
2235             disas_undef(env, s, insn);
2236             return;
2237         }
2238     } else {
2239         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2240     }
2241     dest = tcg_temp_new();
2242     switch (op) {
2243     case 0: /* ori */
2244         tcg_gen_or_i32(dest, src1, im);
2245         if (with_SR) {
2246             gen_set_sr(s, dest, opsize == OS_BYTE);
2247             gen_exit_tb(s);
2248         } else {
2249             DEST_EA(env, insn, opsize, dest, &addr);
2250             gen_logic_cc(s, dest, opsize);
2251         }
2252         break;
2253     case 1: /* andi */
2254         tcg_gen_and_i32(dest, src1, im);
2255         if (with_SR) {
2256             gen_set_sr(s, dest, opsize == OS_BYTE);
2257             gen_exit_tb(s);
2258         } else {
2259             DEST_EA(env, insn, opsize, dest, &addr);
2260             gen_logic_cc(s, dest, opsize);
2261         }
2262         break;
2263     case 2: /* subi */
2264         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2265         tcg_gen_sub_i32(dest, src1, im);
2266         gen_update_cc_add(dest, im, opsize);
2267         set_cc_op(s, CC_OP_SUBB + opsize);
2268         DEST_EA(env, insn, opsize, dest, &addr);
2269         break;
2270     case 3: /* addi */
2271         tcg_gen_add_i32(dest, src1, im);
2272         gen_update_cc_add(dest, im, opsize);
2273         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2274         set_cc_op(s, CC_OP_ADDB + opsize);
2275         DEST_EA(env, insn, opsize, dest, &addr);
2276         break;
2277     case 5: /* eori */
2278         tcg_gen_xor_i32(dest, src1, im);
2279         if (with_SR) {
2280             gen_set_sr(s, dest, opsize == OS_BYTE);
2281             gen_exit_tb(s);
2282         } else {
2283             DEST_EA(env, insn, opsize, dest, &addr);
2284             gen_logic_cc(s, dest, opsize);
2285         }
2286         break;
2287     case 6: /* cmpi */
2288         gen_update_cc_cmp(s, src1, im, opsize);
2289         break;
2290     default:
2291         abort();
2292     }
2293 }
2294 
2295 DISAS_INSN(cas)
2296 {
2297     int opsize;
2298     TCGv addr;
2299     uint16_t ext;
2300     TCGv load;
2301     TCGv cmp;
2302     MemOp opc;
2303 
2304     switch ((insn >> 9) & 3) {
2305     case 1:
2306         opsize = OS_BYTE;
2307         opc = MO_SB;
2308         break;
2309     case 2:
2310         opsize = OS_WORD;
2311         opc = MO_TESW;
2312         break;
2313     case 3:
2314         opsize = OS_LONG;
2315         opc = MO_TESL;
2316         break;
2317     default:
2318         g_assert_not_reached();
2319     }
2320 
2321     ext = read_im16(env, s);
2322 
2323     /* cas Dc,Du,<EA> */
2324 
2325     addr = gen_lea(env, s, insn, opsize);
2326     if (IS_NULL_QREG(addr)) {
2327         gen_addr_fault(s);
2328         return;
2329     }
2330 
2331     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2332 
2333     /*
2334      * if  <EA> == Dc then
2335      *     <EA> = Du
2336      *     Dc = <EA> (because <EA> == Dc)
2337      * else
2338      *     Dc = <EA>
2339      */
2340 
2341     load = tcg_temp_new();
2342     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2343                                IS_USER(s), opc);
2344     /* update flags before setting cmp to load */
2345     gen_update_cc_cmp(s, load, cmp, opsize);
2346     gen_partset_reg(opsize, DREG(ext, 0), load);
2347 
2348     switch (extract32(insn, 3, 3)) {
2349     case 3: /* Indirect postincrement.  */
2350         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2351         break;
2352     case 4: /* Indirect predecrememnt.  */
2353         tcg_gen_mov_i32(AREG(insn, 0), addr);
2354         break;
2355     }
2356 }
2357 
2358 DISAS_INSN(cas2w)
2359 {
2360     uint16_t ext1, ext2;
2361     TCGv addr1, addr2;
2362 
2363     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2364 
2365     ext1 = read_im16(env, s);
2366 
2367     if (ext1 & 0x8000) {
2368         /* Address Register */
2369         addr1 = AREG(ext1, 12);
2370     } else {
2371         /* Data Register */
2372         addr1 = DREG(ext1, 12);
2373     }
2374 
2375     ext2 = read_im16(env, s);
2376     if (ext2 & 0x8000) {
2377         /* Address Register */
2378         addr2 = AREG(ext2, 12);
2379     } else {
2380         /* Data Register */
2381         addr2 = DREG(ext2, 12);
2382     }
2383 
2384     /*
2385      * if (R1) == Dc1 && (R2) == Dc2 then
2386      *     (R1) = Du1
2387      *     (R2) = Du2
2388      * else
2389      *     Dc1 = (R1)
2390      *     Dc2 = (R2)
2391      */
2392 
2393     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2394         gen_helper_exit_atomic(cpu_env);
2395     } else {
2396         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2397                                      (REG(ext1, 6) << 3) |
2398                                      (REG(ext2, 0) << 6) |
2399                                      (REG(ext1, 0) << 9));
2400         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2401     }
2402 
2403     /* Note that cas2w also assigned to env->cc_op.  */
2404     s->cc_op = CC_OP_CMPW;
2405     s->cc_op_synced = 1;
2406 }
2407 
2408 DISAS_INSN(cas2l)
2409 {
2410     uint16_t ext1, ext2;
2411     TCGv addr1, addr2, regs;
2412 
2413     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2414 
2415     ext1 = read_im16(env, s);
2416 
2417     if (ext1 & 0x8000) {
2418         /* Address Register */
2419         addr1 = AREG(ext1, 12);
2420     } else {
2421         /* Data Register */
2422         addr1 = DREG(ext1, 12);
2423     }
2424 
2425     ext2 = read_im16(env, s);
2426     if (ext2 & 0x8000) {
2427         /* Address Register */
2428         addr2 = AREG(ext2, 12);
2429     } else {
2430         /* Data Register */
2431         addr2 = DREG(ext2, 12);
2432     }
2433 
2434     /*
2435      * if (R1) == Dc1 && (R2) == Dc2 then
2436      *     (R1) = Du1
2437      *     (R2) = Du2
2438      * else
2439      *     Dc1 = (R1)
2440      *     Dc2 = (R2)
2441      */
2442 
2443     regs = tcg_constant_i32(REG(ext2, 6) |
2444                             (REG(ext1, 6) << 3) |
2445                             (REG(ext2, 0) << 6) |
2446                             (REG(ext1, 0) << 9));
2447     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2448         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2449     } else {
2450         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2451     }
2452 
2453     /* Note that cas2l also assigned to env->cc_op.  */
2454     s->cc_op = CC_OP_CMPL;
2455     s->cc_op_synced = 1;
2456 }
2457 
2458 DISAS_INSN(byterev)
2459 {
2460     TCGv reg;
2461 
2462     reg = DREG(insn, 0);
2463     tcg_gen_bswap32_i32(reg, reg);
2464 }
2465 
2466 DISAS_INSN(move)
2467 {
2468     TCGv src;
2469     TCGv dest;
2470     int op;
2471     int opsize;
2472 
2473     switch (insn >> 12) {
2474     case 1: /* move.b */
2475         opsize = OS_BYTE;
2476         break;
2477     case 2: /* move.l */
2478         opsize = OS_LONG;
2479         break;
2480     case 3: /* move.w */
2481         opsize = OS_WORD;
2482         break;
2483     default:
2484         abort();
2485     }
2486     SRC_EA(env, src, opsize, 1, NULL);
2487     op = (insn >> 6) & 7;
2488     if (op == 1) {
2489         /* movea */
2490         /* The value will already have been sign extended.  */
2491         dest = AREG(insn, 9);
2492         tcg_gen_mov_i32(dest, src);
2493     } else {
2494         /* normal move */
2495         uint16_t dest_ea;
2496         dest_ea = ((insn >> 9) & 7) | (op << 3);
2497         DEST_EA(env, dest_ea, opsize, src, NULL);
2498         /* This will be correct because loads sign extend.  */
2499         gen_logic_cc(s, src, opsize);
2500     }
2501 }
2502 
2503 DISAS_INSN(negx)
2504 {
2505     TCGv z;
2506     TCGv src;
2507     TCGv addr;
2508     int opsize;
2509 
2510     opsize = insn_opsize(insn);
2511     SRC_EA(env, src, opsize, 1, &addr);
2512 
2513     gen_flush_flags(s); /* compute old Z */
2514 
2515     /*
2516      * Perform subtract with borrow.
2517      * (X, N) =  -(src + X);
2518      */
2519 
2520     z = tcg_constant_i32(0);
2521     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2522     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2523     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2524 
2525     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2526 
2527     /*
2528      * Compute signed-overflow for negation.  The normal formula for
2529      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2530      * this simplifies to res & src.
2531      */
2532 
2533     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2534 
2535     /* Copy the rest of the results into place.  */
2536     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2537     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2538 
2539     set_cc_op(s, CC_OP_FLAGS);
2540 
2541     /* result is in QREG_CC_N */
2542 
2543     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2544 }
2545 
2546 DISAS_INSN(lea)
2547 {
2548     TCGv reg;
2549     TCGv tmp;
2550 
2551     reg = AREG(insn, 9);
2552     tmp = gen_lea(env, s, insn, OS_LONG);
2553     if (IS_NULL_QREG(tmp)) {
2554         gen_addr_fault(s);
2555         return;
2556     }
2557     tcg_gen_mov_i32(reg, tmp);
2558 }
2559 
2560 DISAS_INSN(clr)
2561 {
2562     int opsize;
2563     TCGv zero;
2564 
2565     zero = tcg_constant_i32(0);
2566     opsize = insn_opsize(insn);
2567     DEST_EA(env, insn, opsize, zero, NULL);
2568     gen_logic_cc(s, zero, opsize);
2569 }
2570 
2571 DISAS_INSN(move_from_ccr)
2572 {
2573     TCGv ccr;
2574 
2575     ccr = gen_get_ccr(s);
2576     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2577 }
2578 
2579 DISAS_INSN(neg)
2580 {
2581     TCGv src1;
2582     TCGv dest;
2583     TCGv addr;
2584     int opsize;
2585 
2586     opsize = insn_opsize(insn);
2587     SRC_EA(env, src1, opsize, 1, &addr);
2588     dest = tcg_temp_new();
2589     tcg_gen_neg_i32(dest, src1);
2590     set_cc_op(s, CC_OP_SUBB + opsize);
2591     gen_update_cc_add(dest, src1, opsize);
2592     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2593     DEST_EA(env, insn, opsize, dest, &addr);
2594 }
2595 
2596 DISAS_INSN(move_to_ccr)
2597 {
2598     gen_move_to_sr(env, s, insn, true);
2599 }
2600 
2601 DISAS_INSN(not)
2602 {
2603     TCGv src1;
2604     TCGv dest;
2605     TCGv addr;
2606     int opsize;
2607 
2608     opsize = insn_opsize(insn);
2609     SRC_EA(env, src1, opsize, 1, &addr);
2610     dest = tcg_temp_new();
2611     tcg_gen_not_i32(dest, src1);
2612     DEST_EA(env, insn, opsize, dest, &addr);
2613     gen_logic_cc(s, dest, opsize);
2614 }
2615 
2616 DISAS_INSN(swap)
2617 {
2618     TCGv src1;
2619     TCGv src2;
2620     TCGv reg;
2621 
2622     src1 = tcg_temp_new();
2623     src2 = tcg_temp_new();
2624     reg = DREG(insn, 0);
2625     tcg_gen_shli_i32(src1, reg, 16);
2626     tcg_gen_shri_i32(src2, reg, 16);
2627     tcg_gen_or_i32(reg, src1, src2);
2628     gen_logic_cc(s, reg, OS_LONG);
2629 }
2630 
2631 DISAS_INSN(bkpt)
2632 {
2633 #if defined(CONFIG_USER_ONLY)
2634     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2635 #else
2636     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2637 #endif
2638 }
2639 
2640 DISAS_INSN(pea)
2641 {
2642     TCGv tmp;
2643 
2644     tmp = gen_lea(env, s, insn, OS_LONG);
2645     if (IS_NULL_QREG(tmp)) {
2646         gen_addr_fault(s);
2647         return;
2648     }
2649     gen_push(s, tmp);
2650 }
2651 
2652 DISAS_INSN(ext)
2653 {
2654     int op;
2655     TCGv reg;
2656     TCGv tmp;
2657 
2658     reg = DREG(insn, 0);
2659     op = (insn >> 6) & 7;
2660     tmp = tcg_temp_new();
2661     if (op == 3)
2662         tcg_gen_ext16s_i32(tmp, reg);
2663     else
2664         tcg_gen_ext8s_i32(tmp, reg);
2665     if (op == 2)
2666         gen_partset_reg(OS_WORD, reg, tmp);
2667     else
2668         tcg_gen_mov_i32(reg, tmp);
2669     gen_logic_cc(s, tmp, OS_LONG);
2670 }
2671 
2672 DISAS_INSN(tst)
2673 {
2674     int opsize;
2675     TCGv tmp;
2676 
2677     opsize = insn_opsize(insn);
2678     SRC_EA(env, tmp, opsize, 1, NULL);
2679     gen_logic_cc(s, tmp, opsize);
2680 }
2681 
2682 DISAS_INSN(pulse)
2683 {
2684   /* Implemented as a NOP.  */
2685 }
2686 
2687 DISAS_INSN(illegal)
2688 {
2689     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2690 }
2691 
2692 DISAS_INSN(tas)
2693 {
2694     int mode = extract32(insn, 3, 3);
2695     int reg0 = REG(insn, 0);
2696 
2697     if (mode == 0) {
2698         /* data register direct */
2699         TCGv dest = cpu_dregs[reg0];
2700         gen_logic_cc(s, dest, OS_BYTE);
2701         tcg_gen_ori_tl(dest, dest, 0x80);
2702     } else {
2703         TCGv src1, addr;
2704 
2705         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2706         if (IS_NULL_QREG(addr)) {
2707             gen_addr_fault(s);
2708             return;
2709         }
2710         src1 = tcg_temp_new();
2711         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2712                                    IS_USER(s), MO_SB);
2713         gen_logic_cc(s, src1, OS_BYTE);
2714 
2715         switch (mode) {
2716         case 3: /* Indirect postincrement.  */
2717             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2718             break;
2719         case 4: /* Indirect predecrememnt.  */
2720             tcg_gen_mov_i32(AREG(insn, 0), addr);
2721             break;
2722         }
2723     }
2724 }
2725 
2726 DISAS_INSN(mull)
2727 {
2728     uint16_t ext;
2729     TCGv src1;
2730     int sign;
2731 
2732     ext = read_im16(env, s);
2733 
2734     sign = ext & 0x800;
2735 
2736     if (ext & 0x400) {
2737         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2738             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2739             return;
2740         }
2741 
2742         SRC_EA(env, src1, OS_LONG, 0, NULL);
2743 
2744         if (sign) {
2745             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2746         } else {
2747             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2748         }
2749         /* if Dl == Dh, 68040 returns low word */
2750         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2751         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2752         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2753 
2754         tcg_gen_movi_i32(QREG_CC_V, 0);
2755         tcg_gen_movi_i32(QREG_CC_C, 0);
2756 
2757         set_cc_op(s, CC_OP_FLAGS);
2758         return;
2759     }
2760     SRC_EA(env, src1, OS_LONG, 0, NULL);
2761     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2762         tcg_gen_movi_i32(QREG_CC_C, 0);
2763         if (sign) {
2764             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2765             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2766             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2767             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2768         } else {
2769             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2770             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2771             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2772         }
2773         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2774         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2775 
2776         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2777 
2778         set_cc_op(s, CC_OP_FLAGS);
2779     } else {
2780         /*
2781          * The upper 32 bits of the product are discarded, so
2782          * muls.l and mulu.l are functionally equivalent.
2783          */
2784         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2785         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2786     }
2787 }
2788 
2789 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2790 {
2791     TCGv reg;
2792     TCGv tmp;
2793 
2794     reg = AREG(insn, 0);
2795     tmp = tcg_temp_new();
2796     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2797     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2798     if ((insn & 7) != 7) {
2799         tcg_gen_mov_i32(reg, tmp);
2800     }
2801     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2802 }
2803 
2804 DISAS_INSN(link)
2805 {
2806     int16_t offset;
2807 
2808     offset = read_im16(env, s);
2809     gen_link(s, insn, offset);
2810 }
2811 
2812 DISAS_INSN(linkl)
2813 {
2814     int32_t offset;
2815 
2816     offset = read_im32(env, s);
2817     gen_link(s, insn, offset);
2818 }
2819 
2820 DISAS_INSN(unlk)
2821 {
2822     TCGv src;
2823     TCGv reg;
2824     TCGv tmp;
2825 
2826     src = tcg_temp_new();
2827     reg = AREG(insn, 0);
2828     tcg_gen_mov_i32(src, reg);
2829     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2830     tcg_gen_mov_i32(reg, tmp);
2831     tcg_gen_addi_i32(QREG_SP, src, 4);
2832 }
2833 
2834 #if !defined(CONFIG_USER_ONLY)
2835 DISAS_INSN(reset)
2836 {
2837     if (IS_USER(s)) {
2838         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2839         return;
2840     }
2841 
2842     gen_helper_reset(cpu_env);
2843 }
2844 #endif
2845 
2846 DISAS_INSN(nop)
2847 {
2848 }
2849 
2850 DISAS_INSN(rtd)
2851 {
2852     TCGv tmp;
2853     int16_t offset = read_im16(env, s);
2854 
2855     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2856     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2857     gen_jmp(s, tmp);
2858 }
2859 
2860 DISAS_INSN(rtr)
2861 {
2862     TCGv tmp;
2863     TCGv ccr;
2864     TCGv sp;
2865 
2866     sp = tcg_temp_new();
2867     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2868     tcg_gen_addi_i32(sp, QREG_SP, 2);
2869     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2870     tcg_gen_addi_i32(QREG_SP, sp, 4);
2871 
2872     gen_set_sr(s, ccr, true);
2873 
2874     gen_jmp(s, tmp);
2875 }
2876 
2877 DISAS_INSN(rts)
2878 {
2879     TCGv tmp;
2880 
2881     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2882     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2883     gen_jmp(s, tmp);
2884 }
2885 
2886 DISAS_INSN(jump)
2887 {
2888     TCGv tmp;
2889 
2890     /*
2891      * Load the target address first to ensure correct exception
2892      * behavior.
2893      */
2894     tmp = gen_lea(env, s, insn, OS_LONG);
2895     if (IS_NULL_QREG(tmp)) {
2896         gen_addr_fault(s);
2897         return;
2898     }
2899     if ((insn & 0x40) == 0) {
2900         /* jsr */
2901         gen_push(s, tcg_constant_i32(s->pc));
2902     }
2903     gen_jmp(s, tmp);
2904 }
2905 
2906 DISAS_INSN(addsubq)
2907 {
2908     TCGv src;
2909     TCGv dest;
2910     TCGv val;
2911     int imm;
2912     TCGv addr;
2913     int opsize;
2914 
2915     if ((insn & 070) == 010) {
2916         /* Operation on address register is always long.  */
2917         opsize = OS_LONG;
2918     } else {
2919         opsize = insn_opsize(insn);
2920     }
2921     SRC_EA(env, src, opsize, 1, &addr);
2922     imm = (insn >> 9) & 7;
2923     if (imm == 0) {
2924         imm = 8;
2925     }
2926     val = tcg_constant_i32(imm);
2927     dest = tcg_temp_new();
2928     tcg_gen_mov_i32(dest, src);
2929     if ((insn & 0x38) == 0x08) {
2930         /*
2931          * Don't update condition codes if the destination is an
2932          * address register.
2933          */
2934         if (insn & 0x0100) {
2935             tcg_gen_sub_i32(dest, dest, val);
2936         } else {
2937             tcg_gen_add_i32(dest, dest, val);
2938         }
2939     } else {
2940         if (insn & 0x0100) {
2941             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2942             tcg_gen_sub_i32(dest, dest, val);
2943             set_cc_op(s, CC_OP_SUBB + opsize);
2944         } else {
2945             tcg_gen_add_i32(dest, dest, val);
2946             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2947             set_cc_op(s, CC_OP_ADDB + opsize);
2948         }
2949         gen_update_cc_add(dest, val, opsize);
2950     }
2951     DEST_EA(env, insn, opsize, dest, &addr);
2952 }
2953 
2954 DISAS_INSN(branch)
2955 {
2956     int32_t offset;
2957     uint32_t base;
2958     int op;
2959 
2960     base = s->pc;
2961     op = (insn >> 8) & 0xf;
2962     offset = (int8_t)insn;
2963     if (offset == 0) {
2964         offset = (int16_t)read_im16(env, s);
2965     } else if (offset == -1) {
2966         offset = read_im32(env, s);
2967     }
2968     if (op == 1) {
2969         /* bsr */
2970         gen_push(s, tcg_constant_i32(s->pc));
2971     }
2972     if (op > 1) {
2973         /* Bcc */
2974         TCGLabel *l1 = gen_new_label();
2975         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2976         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2977         gen_set_label(l1);
2978         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2979     } else {
2980         /* Unconditional branch.  */
2981         update_cc_op(s);
2982         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2983     }
2984 }
2985 
2986 DISAS_INSN(moveq)
2987 {
2988     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2989     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2990 }
2991 
2992 DISAS_INSN(mvzs)
2993 {
2994     int opsize;
2995     TCGv src;
2996     TCGv reg;
2997 
2998     if (insn & 0x40)
2999         opsize = OS_WORD;
3000     else
3001         opsize = OS_BYTE;
3002     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3003     reg = DREG(insn, 9);
3004     tcg_gen_mov_i32(reg, src);
3005     gen_logic_cc(s, src, opsize);
3006 }
3007 
3008 DISAS_INSN(or)
3009 {
3010     TCGv reg;
3011     TCGv dest;
3012     TCGv src;
3013     TCGv addr;
3014     int opsize;
3015 
3016     opsize = insn_opsize(insn);
3017     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3018     dest = tcg_temp_new();
3019     if (insn & 0x100) {
3020         SRC_EA(env, src, opsize, 0, &addr);
3021         tcg_gen_or_i32(dest, src, reg);
3022         DEST_EA(env, insn, opsize, dest, &addr);
3023     } else {
3024         SRC_EA(env, src, opsize, 0, NULL);
3025         tcg_gen_or_i32(dest, src, reg);
3026         gen_partset_reg(opsize, DREG(insn, 9), dest);
3027     }
3028     gen_logic_cc(s, dest, opsize);
3029 }
3030 
3031 DISAS_INSN(suba)
3032 {
3033     TCGv src;
3034     TCGv reg;
3035 
3036     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3037     reg = AREG(insn, 9);
3038     tcg_gen_sub_i32(reg, reg, src);
3039 }
3040 
3041 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3042 {
3043     TCGv tmp, zero;
3044 
3045     gen_flush_flags(s); /* compute old Z */
3046 
3047     /*
3048      * Perform subtract with borrow.
3049      * (X, N) = dest - (src + X);
3050      */
3051 
3052     zero = tcg_constant_i32(0);
3053     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3054     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3055     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3056     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3057 
3058     /* Compute signed-overflow for subtract.  */
3059 
3060     tmp = tcg_temp_new();
3061     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3062     tcg_gen_xor_i32(tmp, dest, src);
3063     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3064 
3065     /* Copy the rest of the results into place.  */
3066     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3067     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3068 
3069     set_cc_op(s, CC_OP_FLAGS);
3070 
3071     /* result is in QREG_CC_N */
3072 }
3073 
3074 DISAS_INSN(subx_reg)
3075 {
3076     TCGv dest;
3077     TCGv src;
3078     int opsize;
3079 
3080     opsize = insn_opsize(insn);
3081 
3082     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3083     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3084 
3085     gen_subx(s, src, dest, opsize);
3086 
3087     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3088 }
3089 
3090 DISAS_INSN(subx_mem)
3091 {
3092     TCGv src;
3093     TCGv addr_src;
3094     TCGv dest;
3095     TCGv addr_dest;
3096     int opsize;
3097 
3098     opsize = insn_opsize(insn);
3099 
3100     addr_src = AREG(insn, 0);
3101     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3102     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3103 
3104     addr_dest = AREG(insn, 9);
3105     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3106     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3107 
3108     gen_subx(s, src, dest, opsize);
3109 
3110     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3111 }
3112 
3113 DISAS_INSN(mov3q)
3114 {
3115     TCGv src;
3116     int val;
3117 
3118     val = (insn >> 9) & 7;
3119     if (val == 0) {
3120         val = -1;
3121     }
3122     src = tcg_constant_i32(val);
3123     gen_logic_cc(s, src, OS_LONG);
3124     DEST_EA(env, insn, OS_LONG, src, NULL);
3125 }
3126 
3127 DISAS_INSN(cmp)
3128 {
3129     TCGv src;
3130     TCGv reg;
3131     int opsize;
3132 
3133     opsize = insn_opsize(insn);
3134     SRC_EA(env, src, opsize, 1, NULL);
3135     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3136     gen_update_cc_cmp(s, reg, src, opsize);
3137 }
3138 
3139 DISAS_INSN(cmpa)
3140 {
3141     int opsize;
3142     TCGv src;
3143     TCGv reg;
3144 
3145     if (insn & 0x100) {
3146         opsize = OS_LONG;
3147     } else {
3148         opsize = OS_WORD;
3149     }
3150     SRC_EA(env, src, opsize, 1, NULL);
3151     reg = AREG(insn, 9);
3152     gen_update_cc_cmp(s, reg, src, OS_LONG);
3153 }
3154 
3155 DISAS_INSN(cmpm)
3156 {
3157     int opsize = insn_opsize(insn);
3158     TCGv src, dst;
3159 
3160     /* Post-increment load (mode 3) from Ay.  */
3161     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3162                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3163     /* Post-increment load (mode 3) from Ax.  */
3164     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3165                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3166 
3167     gen_update_cc_cmp(s, dst, src, opsize);
3168 }
3169 
3170 DISAS_INSN(eor)
3171 {
3172     TCGv src;
3173     TCGv dest;
3174     TCGv addr;
3175     int opsize;
3176 
3177     opsize = insn_opsize(insn);
3178 
3179     SRC_EA(env, src, opsize, 0, &addr);
3180     dest = tcg_temp_new();
3181     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3182     gen_logic_cc(s, dest, opsize);
3183     DEST_EA(env, insn, opsize, dest, &addr);
3184 }
3185 
3186 static void do_exg(TCGv reg1, TCGv reg2)
3187 {
3188     TCGv temp = tcg_temp_new();
3189     tcg_gen_mov_i32(temp, reg1);
3190     tcg_gen_mov_i32(reg1, reg2);
3191     tcg_gen_mov_i32(reg2, temp);
3192 }
3193 
3194 DISAS_INSN(exg_dd)
3195 {
3196     /* exchange Dx and Dy */
3197     do_exg(DREG(insn, 9), DREG(insn, 0));
3198 }
3199 
3200 DISAS_INSN(exg_aa)
3201 {
3202     /* exchange Ax and Ay */
3203     do_exg(AREG(insn, 9), AREG(insn, 0));
3204 }
3205 
3206 DISAS_INSN(exg_da)
3207 {
3208     /* exchange Dx and Ay */
3209     do_exg(DREG(insn, 9), AREG(insn, 0));
3210 }
3211 
3212 DISAS_INSN(and)
3213 {
3214     TCGv src;
3215     TCGv reg;
3216     TCGv dest;
3217     TCGv addr;
3218     int opsize;
3219 
3220     dest = tcg_temp_new();
3221 
3222     opsize = insn_opsize(insn);
3223     reg = DREG(insn, 9);
3224     if (insn & 0x100) {
3225         SRC_EA(env, src, opsize, 0, &addr);
3226         tcg_gen_and_i32(dest, src, reg);
3227         DEST_EA(env, insn, opsize, dest, &addr);
3228     } else {
3229         SRC_EA(env, src, opsize, 0, NULL);
3230         tcg_gen_and_i32(dest, src, reg);
3231         gen_partset_reg(opsize, reg, dest);
3232     }
3233     gen_logic_cc(s, dest, opsize);
3234 }
3235 
3236 DISAS_INSN(adda)
3237 {
3238     TCGv src;
3239     TCGv reg;
3240 
3241     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3242     reg = AREG(insn, 9);
3243     tcg_gen_add_i32(reg, reg, src);
3244 }
3245 
3246 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3247 {
3248     TCGv tmp, zero;
3249 
3250     gen_flush_flags(s); /* compute old Z */
3251 
3252     /*
3253      * Perform addition with carry.
3254      * (X, N) = src + dest + X;
3255      */
3256 
3257     zero = tcg_constant_i32(0);
3258     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3259     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3260     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3261 
3262     /* Compute signed-overflow for addition.  */
3263 
3264     tmp = tcg_temp_new();
3265     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3266     tcg_gen_xor_i32(tmp, dest, src);
3267     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3268 
3269     /* Copy the rest of the results into place.  */
3270     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3271     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3272 
3273     set_cc_op(s, CC_OP_FLAGS);
3274 
3275     /* result is in QREG_CC_N */
3276 }
3277 
3278 DISAS_INSN(addx_reg)
3279 {
3280     TCGv dest;
3281     TCGv src;
3282     int opsize;
3283 
3284     opsize = insn_opsize(insn);
3285 
3286     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3287     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3288 
3289     gen_addx(s, src, dest, opsize);
3290 
3291     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3292 }
3293 
3294 DISAS_INSN(addx_mem)
3295 {
3296     TCGv src;
3297     TCGv addr_src;
3298     TCGv dest;
3299     TCGv addr_dest;
3300     int opsize;
3301 
3302     opsize = insn_opsize(insn);
3303 
3304     addr_src = AREG(insn, 0);
3305     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3306     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3307 
3308     addr_dest = AREG(insn, 9);
3309     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3310     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3311 
3312     gen_addx(s, src, dest, opsize);
3313 
3314     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3315 }
3316 
3317 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3318 {
3319     int count = (insn >> 9) & 7;
3320     int logical = insn & 8;
3321     int left = insn & 0x100;
3322     int bits = opsize_bytes(opsize) * 8;
3323     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3324 
3325     if (count == 0) {
3326         count = 8;
3327     }
3328 
3329     tcg_gen_movi_i32(QREG_CC_V, 0);
3330     if (left) {
3331         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3332         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3333 
3334         /*
3335          * Note that ColdFire always clears V (done above),
3336          * while M68000 sets if the most significant bit is changed at
3337          * any time during the shift operation.
3338          */
3339         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3340             /* if shift count >= bits, V is (reg != 0) */
3341             if (count >= bits) {
3342                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3343             } else {
3344                 TCGv t0 = tcg_temp_new();
3345                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3346                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3347                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3348             }
3349             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3350         }
3351     } else {
3352         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3353         if (logical) {
3354             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3355         } else {
3356             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3357         }
3358     }
3359 
3360     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3361     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3362     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3363     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3364 
3365     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3366     set_cc_op(s, CC_OP_FLAGS);
3367 }
3368 
3369 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3370 {
3371     int logical = insn & 8;
3372     int left = insn & 0x100;
3373     int bits = opsize_bytes(opsize) * 8;
3374     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3375     TCGv s32;
3376     TCGv_i64 t64, s64;
3377 
3378     t64 = tcg_temp_new_i64();
3379     s64 = tcg_temp_new_i64();
3380     s32 = tcg_temp_new();
3381 
3382     /*
3383      * Note that m68k truncates the shift count modulo 64, not 32.
3384      * In addition, a 64-bit shift makes it easy to find "the last
3385      * bit shifted out", for the carry flag.
3386      */
3387     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3388     tcg_gen_extu_i32_i64(s64, s32);
3389     tcg_gen_extu_i32_i64(t64, reg);
3390 
3391     /* Optimistically set V=0.  Also used as a zero source below.  */
3392     tcg_gen_movi_i32(QREG_CC_V, 0);
3393     if (left) {
3394         tcg_gen_shl_i64(t64, t64, s64);
3395 
3396         if (opsize == OS_LONG) {
3397             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3398             /* Note that C=0 if shift count is 0, and we get that for free.  */
3399         } else {
3400             TCGv zero = tcg_constant_i32(0);
3401             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3402             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3403             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3404                                 s32, zero, zero, QREG_CC_C);
3405         }
3406         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3407 
3408         /* X = C, but only if the shift count was non-zero.  */
3409         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3410                             QREG_CC_C, QREG_CC_X);
3411 
3412         /*
3413          * M68000 sets V if the most significant bit is changed at
3414          * any time during the shift operation.  Do this via creating
3415          * an extension of the sign bit, comparing, and discarding
3416          * the bits below the sign bit.  I.e.
3417          *     int64_t s = (intN_t)reg;
3418          *     int64_t t = (int64_t)(intN_t)reg << count;
3419          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3420          */
3421         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3422             TCGv_i64 tt = tcg_constant_i64(32);
3423             /* if shift is greater than 32, use 32 */
3424             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3425             /* Sign extend the input to 64 bits; re-do the shift.  */
3426             tcg_gen_ext_i32_i64(t64, reg);
3427             tcg_gen_shl_i64(s64, t64, s64);
3428             /* Clear all bits that are unchanged.  */
3429             tcg_gen_xor_i64(t64, t64, s64);
3430             /* Ignore the bits below the sign bit.  */
3431             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3432             /* If any bits remain set, we have overflow.  */
3433             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3434             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3435             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3436         }
3437     } else {
3438         tcg_gen_shli_i64(t64, t64, 32);
3439         if (logical) {
3440             tcg_gen_shr_i64(t64, t64, s64);
3441         } else {
3442             tcg_gen_sar_i64(t64, t64, s64);
3443         }
3444         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3445 
3446         /* Note that C=0 if shift count is 0, and we get that for free.  */
3447         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3448 
3449         /* X = C, but only if the shift count was non-zero.  */
3450         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3451                             QREG_CC_C, QREG_CC_X);
3452     }
3453     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3454     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3455 
3456     /* Write back the result.  */
3457     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3458     set_cc_op(s, CC_OP_FLAGS);
3459 }
3460 
3461 DISAS_INSN(shift8_im)
3462 {
3463     shift_im(s, insn, OS_BYTE);
3464 }
3465 
3466 DISAS_INSN(shift16_im)
3467 {
3468     shift_im(s, insn, OS_WORD);
3469 }
3470 
3471 DISAS_INSN(shift_im)
3472 {
3473     shift_im(s, insn, OS_LONG);
3474 }
3475 
3476 DISAS_INSN(shift8_reg)
3477 {
3478     shift_reg(s, insn, OS_BYTE);
3479 }
3480 
3481 DISAS_INSN(shift16_reg)
3482 {
3483     shift_reg(s, insn, OS_WORD);
3484 }
3485 
3486 DISAS_INSN(shift_reg)
3487 {
3488     shift_reg(s, insn, OS_LONG);
3489 }
3490 
3491 DISAS_INSN(shift_mem)
3492 {
3493     int logical = insn & 8;
3494     int left = insn & 0x100;
3495     TCGv src;
3496     TCGv addr;
3497 
3498     SRC_EA(env, src, OS_WORD, !logical, &addr);
3499     tcg_gen_movi_i32(QREG_CC_V, 0);
3500     if (left) {
3501         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3502         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3503 
3504         /*
3505          * Note that ColdFire always clears V,
3506          * while M68000 sets if the most significant bit is changed at
3507          * any time during the shift operation
3508          */
3509         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3510             src = gen_extend(s, src, OS_WORD, 1);
3511             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3512         }
3513     } else {
3514         tcg_gen_mov_i32(QREG_CC_C, src);
3515         if (logical) {
3516             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3517         } else {
3518             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3519         }
3520     }
3521 
3522     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3523     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3524     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3525     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3526 
3527     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3528     set_cc_op(s, CC_OP_FLAGS);
3529 }
3530 
3531 static void rotate(TCGv reg, TCGv shift, int left, int size)
3532 {
3533     switch (size) {
3534     case 8:
3535         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3536         tcg_gen_ext8u_i32(reg, reg);
3537         tcg_gen_muli_i32(reg, reg, 0x01010101);
3538         goto do_long;
3539     case 16:
3540         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3541         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3542         goto do_long;
3543     do_long:
3544     default:
3545         if (left) {
3546             tcg_gen_rotl_i32(reg, reg, shift);
3547         } else {
3548             tcg_gen_rotr_i32(reg, reg, shift);
3549         }
3550     }
3551 
3552     /* compute flags */
3553 
3554     switch (size) {
3555     case 8:
3556         tcg_gen_ext8s_i32(reg, reg);
3557         break;
3558     case 16:
3559         tcg_gen_ext16s_i32(reg, reg);
3560         break;
3561     default:
3562         break;
3563     }
3564 
3565     /* QREG_CC_X is not affected */
3566 
3567     tcg_gen_mov_i32(QREG_CC_N, reg);
3568     tcg_gen_mov_i32(QREG_CC_Z, reg);
3569 
3570     if (left) {
3571         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3572     } else {
3573         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3574     }
3575 
3576     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3577 }
3578 
3579 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3580 {
3581     switch (size) {
3582     case 8:
3583         tcg_gen_ext8s_i32(reg, reg);
3584         break;
3585     case 16:
3586         tcg_gen_ext16s_i32(reg, reg);
3587         break;
3588     default:
3589         break;
3590     }
3591     tcg_gen_mov_i32(QREG_CC_N, reg);
3592     tcg_gen_mov_i32(QREG_CC_Z, reg);
3593     tcg_gen_mov_i32(QREG_CC_X, X);
3594     tcg_gen_mov_i32(QREG_CC_C, X);
3595     tcg_gen_movi_i32(QREG_CC_V, 0);
3596 }
3597 
3598 /* Result of rotate_x() is valid if 0 <= shift <= size */
3599 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3600 {
3601     TCGv X, shl, shr, shx, sz, zero;
3602 
3603     sz = tcg_constant_i32(size);
3604 
3605     shr = tcg_temp_new();
3606     shl = tcg_temp_new();
3607     shx = tcg_temp_new();
3608     if (left) {
3609         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3610         tcg_gen_movi_i32(shr, size + 1);
3611         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3612         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3613         /* shx = shx < 0 ? size : shx; */
3614         zero = tcg_constant_i32(0);
3615         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3616     } else {
3617         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3618         tcg_gen_movi_i32(shl, size + 1);
3619         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3620         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3621     }
3622 
3623     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3624 
3625     tcg_gen_shl_i32(shl, reg, shl);
3626     tcg_gen_shr_i32(shr, reg, shr);
3627     tcg_gen_or_i32(reg, shl, shr);
3628     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3629     tcg_gen_or_i32(reg, reg, shx);
3630 
3631     /* X = (reg >> size) & 1 */
3632 
3633     X = tcg_temp_new();
3634     tcg_gen_extract_i32(X, reg, size, 1);
3635 
3636     return X;
3637 }
3638 
3639 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3640 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3641 {
3642     TCGv_i64 t0, shift64;
3643     TCGv X, lo, hi, zero;
3644 
3645     shift64 = tcg_temp_new_i64();
3646     tcg_gen_extu_i32_i64(shift64, shift);
3647 
3648     t0 = tcg_temp_new_i64();
3649 
3650     X = tcg_temp_new();
3651     lo = tcg_temp_new();
3652     hi = tcg_temp_new();
3653 
3654     if (left) {
3655         /* create [reg:X:..] */
3656 
3657         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3658         tcg_gen_concat_i32_i64(t0, lo, reg);
3659 
3660         /* rotate */
3661 
3662         tcg_gen_rotl_i64(t0, t0, shift64);
3663 
3664         /* result is [reg:..:reg:X] */
3665 
3666         tcg_gen_extr_i64_i32(lo, hi, t0);
3667         tcg_gen_andi_i32(X, lo, 1);
3668 
3669         tcg_gen_shri_i32(lo, lo, 1);
3670     } else {
3671         /* create [..:X:reg] */
3672 
3673         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3674 
3675         tcg_gen_rotr_i64(t0, t0, shift64);
3676 
3677         /* result is value: [X:reg:..:reg] */
3678 
3679         tcg_gen_extr_i64_i32(lo, hi, t0);
3680 
3681         /* extract X */
3682 
3683         tcg_gen_shri_i32(X, hi, 31);
3684 
3685         /* extract result */
3686 
3687         tcg_gen_shli_i32(hi, hi, 1);
3688     }
3689     tcg_gen_or_i32(lo, lo, hi);
3690 
3691     /* if shift == 0, register and X are not affected */
3692 
3693     zero = tcg_constant_i32(0);
3694     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3695     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3696 
3697     return X;
3698 }
3699 
3700 DISAS_INSN(rotate_im)
3701 {
3702     TCGv shift;
3703     int tmp;
3704     int left = (insn & 0x100);
3705 
3706     tmp = (insn >> 9) & 7;
3707     if (tmp == 0) {
3708         tmp = 8;
3709     }
3710 
3711     shift = tcg_constant_i32(tmp);
3712     if (insn & 8) {
3713         rotate(DREG(insn, 0), shift, left, 32);
3714     } else {
3715         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3716         rotate_x_flags(DREG(insn, 0), X, 32);
3717     }
3718 
3719     set_cc_op(s, CC_OP_FLAGS);
3720 }
3721 
3722 DISAS_INSN(rotate8_im)
3723 {
3724     int left = (insn & 0x100);
3725     TCGv reg;
3726     TCGv shift;
3727     int tmp;
3728 
3729     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3730 
3731     tmp = (insn >> 9) & 7;
3732     if (tmp == 0) {
3733         tmp = 8;
3734     }
3735 
3736     shift = tcg_constant_i32(tmp);
3737     if (insn & 8) {
3738         rotate(reg, shift, left, 8);
3739     } else {
3740         TCGv X = rotate_x(reg, shift, left, 8);
3741         rotate_x_flags(reg, X, 8);
3742     }
3743     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3744     set_cc_op(s, CC_OP_FLAGS);
3745 }
3746 
3747 DISAS_INSN(rotate16_im)
3748 {
3749     int left = (insn & 0x100);
3750     TCGv reg;
3751     TCGv shift;
3752     int tmp;
3753 
3754     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3755     tmp = (insn >> 9) & 7;
3756     if (tmp == 0) {
3757         tmp = 8;
3758     }
3759 
3760     shift = tcg_constant_i32(tmp);
3761     if (insn & 8) {
3762         rotate(reg, shift, left, 16);
3763     } else {
3764         TCGv X = rotate_x(reg, shift, left, 16);
3765         rotate_x_flags(reg, X, 16);
3766     }
3767     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3768     set_cc_op(s, CC_OP_FLAGS);
3769 }
3770 
3771 DISAS_INSN(rotate_reg)
3772 {
3773     TCGv reg;
3774     TCGv src;
3775     TCGv t0, t1;
3776     int left = (insn & 0x100);
3777 
3778     reg = DREG(insn, 0);
3779     src = DREG(insn, 9);
3780     /* shift in [0..63] */
3781     t0 = tcg_temp_new();
3782     tcg_gen_andi_i32(t0, src, 63);
3783     t1 = tcg_temp_new_i32();
3784     if (insn & 8) {
3785         tcg_gen_andi_i32(t1, src, 31);
3786         rotate(reg, t1, left, 32);
3787         /* if shift == 0, clear C */
3788         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3789                             t0, QREG_CC_V /* 0 */,
3790                             QREG_CC_V /* 0 */, QREG_CC_C);
3791     } else {
3792         TCGv X;
3793         /* modulo 33 */
3794         tcg_gen_movi_i32(t1, 33);
3795         tcg_gen_remu_i32(t1, t0, t1);
3796         X = rotate32_x(DREG(insn, 0), t1, left);
3797         rotate_x_flags(DREG(insn, 0), X, 32);
3798     }
3799     set_cc_op(s, CC_OP_FLAGS);
3800 }
3801 
3802 DISAS_INSN(rotate8_reg)
3803 {
3804     TCGv reg;
3805     TCGv src;
3806     TCGv t0, t1;
3807     int left = (insn & 0x100);
3808 
3809     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3810     src = DREG(insn, 9);
3811     /* shift in [0..63] */
3812     t0 = tcg_temp_new_i32();
3813     tcg_gen_andi_i32(t0, src, 63);
3814     t1 = tcg_temp_new_i32();
3815     if (insn & 8) {
3816         tcg_gen_andi_i32(t1, src, 7);
3817         rotate(reg, t1, left, 8);
3818         /* if shift == 0, clear C */
3819         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3820                             t0, QREG_CC_V /* 0 */,
3821                             QREG_CC_V /* 0 */, QREG_CC_C);
3822     } else {
3823         TCGv X;
3824         /* modulo 9 */
3825         tcg_gen_movi_i32(t1, 9);
3826         tcg_gen_remu_i32(t1, t0, t1);
3827         X = rotate_x(reg, t1, left, 8);
3828         rotate_x_flags(reg, X, 8);
3829     }
3830     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3831     set_cc_op(s, CC_OP_FLAGS);
3832 }
3833 
3834 DISAS_INSN(rotate16_reg)
3835 {
3836     TCGv reg;
3837     TCGv src;
3838     TCGv t0, t1;
3839     int left = (insn & 0x100);
3840 
3841     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3842     src = DREG(insn, 9);
3843     /* shift in [0..63] */
3844     t0 = tcg_temp_new_i32();
3845     tcg_gen_andi_i32(t0, src, 63);
3846     t1 = tcg_temp_new_i32();
3847     if (insn & 8) {
3848         tcg_gen_andi_i32(t1, src, 15);
3849         rotate(reg, t1, left, 16);
3850         /* if shift == 0, clear C */
3851         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3852                             t0, QREG_CC_V /* 0 */,
3853                             QREG_CC_V /* 0 */, QREG_CC_C);
3854     } else {
3855         TCGv X;
3856         /* modulo 17 */
3857         tcg_gen_movi_i32(t1, 17);
3858         tcg_gen_remu_i32(t1, t0, t1);
3859         X = rotate_x(reg, t1, left, 16);
3860         rotate_x_flags(reg, X, 16);
3861     }
3862     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3863     set_cc_op(s, CC_OP_FLAGS);
3864 }
3865 
3866 DISAS_INSN(rotate_mem)
3867 {
3868     TCGv src;
3869     TCGv addr;
3870     TCGv shift;
3871     int left = (insn & 0x100);
3872 
3873     SRC_EA(env, src, OS_WORD, 0, &addr);
3874 
3875     shift = tcg_constant_i32(1);
3876     if (insn & 0x0200) {
3877         rotate(src, shift, left, 16);
3878     } else {
3879         TCGv X = rotate_x(src, shift, left, 16);
3880         rotate_x_flags(src, X, 16);
3881     }
3882     DEST_EA(env, insn, OS_WORD, src, &addr);
3883     set_cc_op(s, CC_OP_FLAGS);
3884 }
3885 
3886 DISAS_INSN(bfext_reg)
3887 {
3888     int ext = read_im16(env, s);
3889     int is_sign = insn & 0x200;
3890     TCGv src = DREG(insn, 0);
3891     TCGv dst = DREG(ext, 12);
3892     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3893     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3894     int pos = 32 - ofs - len;        /* little bit-endian */
3895     TCGv tmp = tcg_temp_new();
3896     TCGv shift;
3897 
3898     /*
3899      * In general, we're going to rotate the field so that it's at the
3900      * top of the word and then right-shift by the complement of the
3901      * width to extend the field.
3902      */
3903     if (ext & 0x20) {
3904         /* Variable width.  */
3905         if (ext & 0x800) {
3906             /* Variable offset.  */
3907             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3908             tcg_gen_rotl_i32(tmp, src, tmp);
3909         } else {
3910             tcg_gen_rotli_i32(tmp, src, ofs);
3911         }
3912 
3913         shift = tcg_temp_new();
3914         tcg_gen_neg_i32(shift, DREG(ext, 0));
3915         tcg_gen_andi_i32(shift, shift, 31);
3916         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3917         if (is_sign) {
3918             tcg_gen_mov_i32(dst, QREG_CC_N);
3919         } else {
3920             tcg_gen_shr_i32(dst, tmp, shift);
3921         }
3922     } else {
3923         /* Immediate width.  */
3924         if (ext & 0x800) {
3925             /* Variable offset */
3926             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3927             tcg_gen_rotl_i32(tmp, src, tmp);
3928             src = tmp;
3929             pos = 32 - len;
3930         } else {
3931             /*
3932              * Immediate offset.  If the field doesn't wrap around the
3933              * end of the word, rely on (s)extract completely.
3934              */
3935             if (pos < 0) {
3936                 tcg_gen_rotli_i32(tmp, src, ofs);
3937                 src = tmp;
3938                 pos = 32 - len;
3939             }
3940         }
3941 
3942         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3943         if (is_sign) {
3944             tcg_gen_mov_i32(dst, QREG_CC_N);
3945         } else {
3946             tcg_gen_extract_i32(dst, src, pos, len);
3947         }
3948     }
3949 
3950     set_cc_op(s, CC_OP_LOGIC);
3951 }
3952 
3953 DISAS_INSN(bfext_mem)
3954 {
3955     int ext = read_im16(env, s);
3956     int is_sign = insn & 0x200;
3957     TCGv dest = DREG(ext, 12);
3958     TCGv addr, len, ofs;
3959 
3960     addr = gen_lea(env, s, insn, OS_UNSIZED);
3961     if (IS_NULL_QREG(addr)) {
3962         gen_addr_fault(s);
3963         return;
3964     }
3965 
3966     if (ext & 0x20) {
3967         len = DREG(ext, 0);
3968     } else {
3969         len = tcg_constant_i32(extract32(ext, 0, 5));
3970     }
3971     if (ext & 0x800) {
3972         ofs = DREG(ext, 6);
3973     } else {
3974         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3975     }
3976 
3977     if (is_sign) {
3978         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3979         tcg_gen_mov_i32(QREG_CC_N, dest);
3980     } else {
3981         TCGv_i64 tmp = tcg_temp_new_i64();
3982         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3983         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3984     }
3985     set_cc_op(s, CC_OP_LOGIC);
3986 }
3987 
3988 DISAS_INSN(bfop_reg)
3989 {
3990     int ext = read_im16(env, s);
3991     TCGv src = DREG(insn, 0);
3992     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3993     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3994     TCGv mask, tofs = NULL, tlen = NULL;
3995     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
3996 
3997     if ((ext & 0x820) == 0) {
3998         /* Immediate width and offset.  */
3999         uint32_t maski = 0x7fffffffu >> (len - 1);
4000         if (ofs + len <= 32) {
4001             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4002         } else {
4003             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4004         }
4005         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4006 
4007         mask = tcg_constant_i32(ror32(maski, ofs));
4008         if (is_bfffo) {
4009             tofs = tcg_constant_i32(ofs);
4010             tlen = tcg_constant_i32(len);
4011         }
4012     } else {
4013         TCGv tmp = tcg_temp_new();
4014 
4015         mask = tcg_temp_new();
4016         if (ext & 0x20) {
4017             /* Variable width */
4018             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4019             tcg_gen_andi_i32(tmp, tmp, 31);
4020             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4021             if (is_bfffo) {
4022                 tlen = tcg_temp_new();
4023                 tcg_gen_addi_i32(tlen, tmp, 1);
4024             }
4025         } else {
4026             /* Immediate width */
4027             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4028             if (is_bfffo) {
4029                 tlen = tcg_constant_i32(len);
4030             }
4031         }
4032 
4033         if (ext & 0x800) {
4034             /* Variable offset */
4035             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4036             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4037             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4038             tcg_gen_rotr_i32(mask, mask, tmp);
4039             if (is_bfffo) {
4040                 tofs = tmp;
4041             }
4042         } else {
4043             /* Immediate offset (and variable width) */
4044             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4045             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4046             tcg_gen_rotri_i32(mask, mask, ofs);
4047             if (is_bfffo) {
4048                 tofs = tcg_constant_i32(ofs);
4049             }
4050         }
4051     }
4052     set_cc_op(s, CC_OP_LOGIC);
4053 
4054     switch (insn & 0x0f00) {
4055     case 0x0a00: /* bfchg */
4056         tcg_gen_eqv_i32(src, src, mask);
4057         break;
4058     case 0x0c00: /* bfclr */
4059         tcg_gen_and_i32(src, src, mask);
4060         break;
4061     case 0x0d00: /* bfffo */
4062         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4063         break;
4064     case 0x0e00: /* bfset */
4065         tcg_gen_orc_i32(src, src, mask);
4066         break;
4067     case 0x0800: /* bftst */
4068         /* flags already set; no other work to do.  */
4069         break;
4070     default:
4071         g_assert_not_reached();
4072     }
4073 }
4074 
4075 DISAS_INSN(bfop_mem)
4076 {
4077     int ext = read_im16(env, s);
4078     TCGv addr, len, ofs;
4079     TCGv_i64 t64;
4080 
4081     addr = gen_lea(env, s, insn, OS_UNSIZED);
4082     if (IS_NULL_QREG(addr)) {
4083         gen_addr_fault(s);
4084         return;
4085     }
4086 
4087     if (ext & 0x20) {
4088         len = DREG(ext, 0);
4089     } else {
4090         len = tcg_constant_i32(extract32(ext, 0, 5));
4091     }
4092     if (ext & 0x800) {
4093         ofs = DREG(ext, 6);
4094     } else {
4095         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4096     }
4097 
4098     switch (insn & 0x0f00) {
4099     case 0x0a00: /* bfchg */
4100         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4101         break;
4102     case 0x0c00: /* bfclr */
4103         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4104         break;
4105     case 0x0d00: /* bfffo */
4106         t64 = tcg_temp_new_i64();
4107         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4108         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4109         break;
4110     case 0x0e00: /* bfset */
4111         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4112         break;
4113     case 0x0800: /* bftst */
4114         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4115         break;
4116     default:
4117         g_assert_not_reached();
4118     }
4119     set_cc_op(s, CC_OP_LOGIC);
4120 }
4121 
4122 DISAS_INSN(bfins_reg)
4123 {
4124     int ext = read_im16(env, s);
4125     TCGv dst = DREG(insn, 0);
4126     TCGv src = DREG(ext, 12);
4127     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4128     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4129     int pos = 32 - ofs - len;        /* little bit-endian */
4130     TCGv tmp;
4131 
4132     tmp = tcg_temp_new();
4133 
4134     if (ext & 0x20) {
4135         /* Variable width */
4136         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4137         tcg_gen_andi_i32(tmp, tmp, 31);
4138         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4139     } else {
4140         /* Immediate width */
4141         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4142     }
4143     set_cc_op(s, CC_OP_LOGIC);
4144 
4145     /* Immediate width and offset */
4146     if ((ext & 0x820) == 0) {
4147         /* Check for suitability for deposit.  */
4148         if (pos >= 0) {
4149             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4150         } else {
4151             uint32_t maski = -2U << (len - 1);
4152             uint32_t roti = (ofs + len) & 31;
4153             tcg_gen_andi_i32(tmp, src, ~maski);
4154             tcg_gen_rotri_i32(tmp, tmp, roti);
4155             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4156             tcg_gen_or_i32(dst, dst, tmp);
4157         }
4158     } else {
4159         TCGv mask = tcg_temp_new();
4160         TCGv rot = tcg_temp_new();
4161 
4162         if (ext & 0x20) {
4163             /* Variable width */
4164             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4165             tcg_gen_andi_i32(rot, rot, 31);
4166             tcg_gen_movi_i32(mask, -2);
4167             tcg_gen_shl_i32(mask, mask, rot);
4168             tcg_gen_mov_i32(rot, DREG(ext, 0));
4169             tcg_gen_andc_i32(tmp, src, mask);
4170         } else {
4171             /* Immediate width (variable offset) */
4172             uint32_t maski = -2U << (len - 1);
4173             tcg_gen_andi_i32(tmp, src, ~maski);
4174             tcg_gen_movi_i32(mask, maski);
4175             tcg_gen_movi_i32(rot, len & 31);
4176         }
4177         if (ext & 0x800) {
4178             /* Variable offset */
4179             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4180         } else {
4181             /* Immediate offset (variable width) */
4182             tcg_gen_addi_i32(rot, rot, ofs);
4183         }
4184         tcg_gen_andi_i32(rot, rot, 31);
4185         tcg_gen_rotr_i32(mask, mask, rot);
4186         tcg_gen_rotr_i32(tmp, tmp, rot);
4187         tcg_gen_and_i32(dst, dst, mask);
4188         tcg_gen_or_i32(dst, dst, tmp);
4189     }
4190 }
4191 
4192 DISAS_INSN(bfins_mem)
4193 {
4194     int ext = read_im16(env, s);
4195     TCGv src = DREG(ext, 12);
4196     TCGv addr, len, ofs;
4197 
4198     addr = gen_lea(env, s, insn, OS_UNSIZED);
4199     if (IS_NULL_QREG(addr)) {
4200         gen_addr_fault(s);
4201         return;
4202     }
4203 
4204     if (ext & 0x20) {
4205         len = DREG(ext, 0);
4206     } else {
4207         len = tcg_constant_i32(extract32(ext, 0, 5));
4208     }
4209     if (ext & 0x800) {
4210         ofs = DREG(ext, 6);
4211     } else {
4212         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4213     }
4214 
4215     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4216     set_cc_op(s, CC_OP_LOGIC);
4217 }
4218 
4219 DISAS_INSN(ff1)
4220 {
4221     TCGv reg;
4222     reg = DREG(insn, 0);
4223     gen_logic_cc(s, reg, OS_LONG);
4224     gen_helper_ff1(reg, reg);
4225 }
4226 
4227 DISAS_INSN(chk)
4228 {
4229     TCGv src, reg;
4230     int opsize;
4231 
4232     switch ((insn >> 7) & 3) {
4233     case 3:
4234         opsize = OS_WORD;
4235         break;
4236     case 2:
4237         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4238             opsize = OS_LONG;
4239             break;
4240         }
4241         /* fallthru */
4242     default:
4243         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4244         return;
4245     }
4246     SRC_EA(env, src, opsize, 1, NULL);
4247     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4248 
4249     gen_flush_flags(s);
4250     gen_helper_chk(cpu_env, reg, src);
4251 }
4252 
4253 DISAS_INSN(chk2)
4254 {
4255     uint16_t ext;
4256     TCGv addr1, addr2, bound1, bound2, reg;
4257     int opsize;
4258 
4259     switch ((insn >> 9) & 3) {
4260     case 0:
4261         opsize = OS_BYTE;
4262         break;
4263     case 1:
4264         opsize = OS_WORD;
4265         break;
4266     case 2:
4267         opsize = OS_LONG;
4268         break;
4269     default:
4270         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4271         return;
4272     }
4273 
4274     ext = read_im16(env, s);
4275     if ((ext & 0x0800) == 0) {
4276         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4277         return;
4278     }
4279 
4280     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4281     addr2 = tcg_temp_new();
4282     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4283 
4284     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4285     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4286 
4287     reg = tcg_temp_new();
4288     if (ext & 0x8000) {
4289         tcg_gen_mov_i32(reg, AREG(ext, 12));
4290     } else {
4291         gen_ext(reg, DREG(ext, 12), opsize, 1);
4292     }
4293 
4294     gen_flush_flags(s);
4295     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4296 }
4297 
4298 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4299 {
4300     TCGv addr;
4301     TCGv_i64 t0, t1;
4302 
4303     addr = tcg_temp_new();
4304 
4305     t0 = tcg_temp_new_i64();
4306     t1 = tcg_temp_new_i64();
4307 
4308     tcg_gen_andi_i32(addr, src, ~15);
4309     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4310     tcg_gen_addi_i32(addr, addr, 8);
4311     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4312 
4313     tcg_gen_andi_i32(addr, dst, ~15);
4314     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4315     tcg_gen_addi_i32(addr, addr, 8);
4316     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4317 }
4318 
4319 DISAS_INSN(move16_reg)
4320 {
4321     int index = IS_USER(s);
4322     TCGv tmp;
4323     uint16_t ext;
4324 
4325     ext = read_im16(env, s);
4326     if ((ext & (1 << 15)) == 0) {
4327         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4328     }
4329 
4330     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4331 
4332     /* Ax can be Ay, so save Ay before incrementing Ax */
4333     tmp = tcg_temp_new();
4334     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4335     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4336     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4337 }
4338 
4339 DISAS_INSN(move16_mem)
4340 {
4341     int index = IS_USER(s);
4342     TCGv reg, addr;
4343 
4344     reg = AREG(insn, 0);
4345     addr = tcg_constant_i32(read_im32(env, s));
4346 
4347     if ((insn >> 3) & 1) {
4348         /* MOVE16 (xxx).L, (Ay) */
4349         m68k_copy_line(reg, addr, index);
4350     } else {
4351         /* MOVE16 (Ay), (xxx).L */
4352         m68k_copy_line(addr, reg, index);
4353     }
4354 
4355     if (((insn >> 3) & 2) == 0) {
4356         /* (Ay)+ */
4357         tcg_gen_addi_i32(reg, reg, 16);
4358     }
4359 }
4360 
4361 DISAS_INSN(strldsr)
4362 {
4363     uint16_t ext;
4364     uint32_t addr;
4365 
4366     addr = s->pc - 2;
4367     ext = read_im16(env, s);
4368     if (ext != 0x46FC) {
4369         gen_exception(s, addr, EXCP_ILLEGAL);
4370         return;
4371     }
4372     ext = read_im16(env, s);
4373     if (IS_USER(s) || (ext & SR_S) == 0) {
4374         gen_exception(s, addr, EXCP_PRIVILEGE);
4375         return;
4376     }
4377     gen_push(s, gen_get_sr(s));
4378     gen_set_sr_im(s, ext, 0);
4379     gen_exit_tb(s);
4380 }
4381 
4382 DISAS_INSN(move_from_sr)
4383 {
4384     TCGv sr;
4385 
4386     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4387         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4388         return;
4389     }
4390     sr = gen_get_sr(s);
4391     DEST_EA(env, insn, OS_WORD, sr, NULL);
4392 }
4393 
4394 #if !defined(CONFIG_USER_ONLY)
4395 DISAS_INSN(moves)
4396 {
4397     int opsize;
4398     uint16_t ext;
4399     TCGv reg;
4400     TCGv addr;
4401     int extend;
4402 
4403     if (IS_USER(s)) {
4404         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4405         return;
4406     }
4407 
4408     ext = read_im16(env, s);
4409 
4410     opsize = insn_opsize(insn);
4411 
4412     if (ext & 0x8000) {
4413         /* address register */
4414         reg = AREG(ext, 12);
4415         extend = 1;
4416     } else {
4417         /* data register */
4418         reg = DREG(ext, 12);
4419         extend = 0;
4420     }
4421 
4422     addr = gen_lea(env, s, insn, opsize);
4423     if (IS_NULL_QREG(addr)) {
4424         gen_addr_fault(s);
4425         return;
4426     }
4427 
4428     if (ext & 0x0800) {
4429         /* from reg to ea */
4430         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4431     } else {
4432         /* from ea to reg */
4433         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4434         if (extend) {
4435             gen_ext(reg, tmp, opsize, 1);
4436         } else {
4437             gen_partset_reg(opsize, reg, tmp);
4438         }
4439     }
4440     switch (extract32(insn, 3, 3)) {
4441     case 3: /* Indirect postincrement.  */
4442         tcg_gen_addi_i32(AREG(insn, 0), addr,
4443                          REG(insn, 0) == 7 && opsize == OS_BYTE
4444                          ? 2
4445                          : opsize_bytes(opsize));
4446         break;
4447     case 4: /* Indirect predecrememnt.  */
4448         tcg_gen_mov_i32(AREG(insn, 0), addr);
4449         break;
4450     }
4451 }
4452 
4453 DISAS_INSN(move_to_sr)
4454 {
4455     if (IS_USER(s)) {
4456         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4457         return;
4458     }
4459     gen_move_to_sr(env, s, insn, false);
4460     gen_exit_tb(s);
4461 }
4462 
4463 DISAS_INSN(move_from_usp)
4464 {
4465     if (IS_USER(s)) {
4466         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4467         return;
4468     }
4469     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4470                    offsetof(CPUM68KState, sp[M68K_USP]));
4471 }
4472 
4473 DISAS_INSN(move_to_usp)
4474 {
4475     if (IS_USER(s)) {
4476         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4477         return;
4478     }
4479     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4480                    offsetof(CPUM68KState, sp[M68K_USP]));
4481 }
4482 
4483 DISAS_INSN(halt)
4484 {
4485     if (IS_USER(s)) {
4486         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4487         return;
4488     }
4489 
4490     gen_exception(s, s->pc, EXCP_HALT_INSN);
4491 }
4492 
4493 DISAS_INSN(stop)
4494 {
4495     uint16_t ext;
4496 
4497     if (IS_USER(s)) {
4498         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4499         return;
4500     }
4501 
4502     ext = read_im16(env, s);
4503 
4504     gen_set_sr_im(s, ext, 0);
4505     tcg_gen_movi_i32(cpu_halted, 1);
4506     gen_exception(s, s->pc, EXCP_HLT);
4507 }
4508 
4509 DISAS_INSN(rte)
4510 {
4511     if (IS_USER(s)) {
4512         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4513         return;
4514     }
4515     gen_exception(s, s->base.pc_next, EXCP_RTE);
4516 }
4517 
4518 DISAS_INSN(cf_movec)
4519 {
4520     uint16_t ext;
4521     TCGv reg;
4522 
4523     if (IS_USER(s)) {
4524         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4525         return;
4526     }
4527 
4528     ext = read_im16(env, s);
4529 
4530     if (ext & 0x8000) {
4531         reg = AREG(ext, 12);
4532     } else {
4533         reg = DREG(ext, 12);
4534     }
4535     gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4536     gen_exit_tb(s);
4537 }
4538 
4539 DISAS_INSN(m68k_movec)
4540 {
4541     uint16_t ext;
4542     TCGv reg, creg;
4543 
4544     if (IS_USER(s)) {
4545         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4546         return;
4547     }
4548 
4549     ext = read_im16(env, s);
4550 
4551     if (ext & 0x8000) {
4552         reg = AREG(ext, 12);
4553     } else {
4554         reg = DREG(ext, 12);
4555     }
4556     creg = tcg_constant_i32(ext & 0xfff);
4557     if (insn & 1) {
4558         gen_helper_m68k_movec_to(cpu_env, creg, reg);
4559     } else {
4560         gen_helper_m68k_movec_from(reg, cpu_env, creg);
4561     }
4562     gen_exit_tb(s);
4563 }
4564 
4565 DISAS_INSN(intouch)
4566 {
4567     if (IS_USER(s)) {
4568         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4569         return;
4570     }
4571     /* ICache fetch.  Implement as no-op.  */
4572 }
4573 
4574 DISAS_INSN(cpushl)
4575 {
4576     if (IS_USER(s)) {
4577         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4578         return;
4579     }
4580     /* Cache push/invalidate.  Implement as no-op.  */
4581 }
4582 
4583 DISAS_INSN(cpush)
4584 {
4585     if (IS_USER(s)) {
4586         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4587         return;
4588     }
4589     /* Cache push/invalidate.  Implement as no-op.  */
4590 }
4591 
4592 DISAS_INSN(cinv)
4593 {
4594     if (IS_USER(s)) {
4595         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4596         return;
4597     }
4598     /* Invalidate cache line.  Implement as no-op.  */
4599 }
4600 
4601 #if !defined(CONFIG_USER_ONLY)
4602 DISAS_INSN(pflush)
4603 {
4604     TCGv opmode;
4605 
4606     if (IS_USER(s)) {
4607         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4608         return;
4609     }
4610 
4611     opmode = tcg_constant_i32((insn >> 3) & 3);
4612     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4613 }
4614 
4615 DISAS_INSN(ptest)
4616 {
4617     TCGv is_read;
4618 
4619     if (IS_USER(s)) {
4620         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4621         return;
4622     }
4623     is_read = tcg_constant_i32((insn >> 5) & 1);
4624     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4625 }
4626 #endif
4627 
4628 DISAS_INSN(wddata)
4629 {
4630     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4631 }
4632 
4633 DISAS_INSN(wdebug)
4634 {
4635     if (IS_USER(s)) {
4636         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4637         return;
4638     }
4639     /* TODO: Implement wdebug.  */
4640     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4641 }
4642 #endif
4643 
4644 DISAS_INSN(trap)
4645 {
4646     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4647 }
4648 
4649 static void do_trapcc(DisasContext *s, DisasCompare *c)
4650 {
4651     if (c->tcond != TCG_COND_NEVER) {
4652         TCGLabel *over = NULL;
4653 
4654         update_cc_op(s);
4655 
4656         if (c->tcond != TCG_COND_ALWAYS) {
4657             /* Jump over if !c. */
4658             over = gen_new_label();
4659             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4660         }
4661 
4662         tcg_gen_movi_i32(QREG_PC, s->pc);
4663         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4664 
4665         if (over != NULL) {
4666             gen_set_label(over);
4667             s->base.is_jmp = DISAS_NEXT;
4668         }
4669     }
4670 }
4671 
4672 DISAS_INSN(trapcc)
4673 {
4674     DisasCompare c;
4675 
4676     /* Consume and discard the immediate operand. */
4677     switch (extract32(insn, 0, 3)) {
4678     case 2: /* trapcc.w */
4679         (void)read_im16(env, s);
4680         break;
4681     case 3: /* trapcc.l */
4682         (void)read_im32(env, s);
4683         break;
4684     case 4: /* trapcc (no operand) */
4685         break;
4686     default:
4687         /* trapcc registered with only valid opmodes */
4688         g_assert_not_reached();
4689     }
4690 
4691     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4692     do_trapcc(s, &c);
4693 }
4694 
4695 DISAS_INSN(trapv)
4696 {
4697     DisasCompare c;
4698 
4699     gen_cc_cond(&c, s, 9); /* V set */
4700     do_trapcc(s, &c);
4701 }
4702 
4703 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4704 {
4705     switch (reg) {
4706     case M68K_FPIAR:
4707         tcg_gen_movi_i32(res, 0);
4708         break;
4709     case M68K_FPSR:
4710         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4711         break;
4712     case M68K_FPCR:
4713         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4714         break;
4715     }
4716 }
4717 
4718 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4719 {
4720     switch (reg) {
4721     case M68K_FPIAR:
4722         break;
4723     case M68K_FPSR:
4724         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4725         break;
4726     case M68K_FPCR:
4727         gen_helper_set_fpcr(cpu_env, val);
4728         break;
4729     }
4730 }
4731 
4732 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4733 {
4734     int index = IS_USER(s);
4735     TCGv tmp;
4736 
4737     tmp = tcg_temp_new();
4738     gen_load_fcr(s, tmp, reg);
4739     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4740 }
4741 
4742 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4743 {
4744     int index = IS_USER(s);
4745     TCGv tmp;
4746 
4747     tmp = tcg_temp_new();
4748     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4749     gen_store_fcr(s, tmp, reg);
4750 }
4751 
4752 
4753 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4754                              uint32_t insn, uint32_t ext)
4755 {
4756     int mask = (ext >> 10) & 7;
4757     int is_write = (ext >> 13) & 1;
4758     int mode = extract32(insn, 3, 3);
4759     int i;
4760     TCGv addr, tmp;
4761 
4762     switch (mode) {
4763     case 0: /* Dn */
4764         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4765             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4766             return;
4767         }
4768         if (is_write) {
4769             gen_load_fcr(s, DREG(insn, 0), mask);
4770         } else {
4771             gen_store_fcr(s, DREG(insn, 0), mask);
4772         }
4773         return;
4774     case 1: /* An, only with FPIAR */
4775         if (mask != M68K_FPIAR) {
4776             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4777             return;
4778         }
4779         if (is_write) {
4780             gen_load_fcr(s, AREG(insn, 0), mask);
4781         } else {
4782             gen_store_fcr(s, AREG(insn, 0), mask);
4783         }
4784         return;
4785     case 7: /* Immediate */
4786         if (REG(insn, 0) == 4) {
4787             if (is_write ||
4788                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4789                  mask != M68K_FPCR)) {
4790                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4791                 return;
4792             }
4793             tmp = tcg_constant_i32(read_im32(env, s));
4794             gen_store_fcr(s, tmp, mask);
4795             return;
4796         }
4797         break;
4798     default:
4799         break;
4800     }
4801 
4802     tmp = gen_lea(env, s, insn, OS_LONG);
4803     if (IS_NULL_QREG(tmp)) {
4804         gen_addr_fault(s);
4805         return;
4806     }
4807 
4808     addr = tcg_temp_new();
4809     tcg_gen_mov_i32(addr, tmp);
4810 
4811     /*
4812      * mask:
4813      *
4814      * 0b100 Floating-Point Control Register
4815      * 0b010 Floating-Point Status Register
4816      * 0b001 Floating-Point Instruction Address Register
4817      *
4818      */
4819 
4820     if (is_write && mode == 4) {
4821         for (i = 2; i >= 0; i--, mask >>= 1) {
4822             if (mask & 1) {
4823                 gen_qemu_store_fcr(s, addr, 1 << i);
4824                 if (mask != 1) {
4825                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4826                 }
4827             }
4828        }
4829        tcg_gen_mov_i32(AREG(insn, 0), addr);
4830     } else {
4831         for (i = 0; i < 3; i++, mask >>= 1) {
4832             if (mask & 1) {
4833                 if (is_write) {
4834                     gen_qemu_store_fcr(s, addr, 1 << i);
4835                 } else {
4836                     gen_qemu_load_fcr(s, addr, 1 << i);
4837                 }
4838                 if (mask != 1 || mode == 3) {
4839                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4840                 }
4841             }
4842         }
4843         if (mode == 3) {
4844             tcg_gen_mov_i32(AREG(insn, 0), addr);
4845         }
4846     }
4847 }
4848 
4849 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4850                           uint32_t insn, uint32_t ext)
4851 {
4852     int opsize;
4853     TCGv addr, tmp;
4854     int mode = (ext >> 11) & 0x3;
4855     int is_load = ((ext & 0x2000) == 0);
4856 
4857     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4858         opsize = OS_EXTENDED;
4859     } else {
4860         opsize = OS_DOUBLE;  /* FIXME */
4861     }
4862 
4863     addr = gen_lea(env, s, insn, opsize);
4864     if (IS_NULL_QREG(addr)) {
4865         gen_addr_fault(s);
4866         return;
4867     }
4868 
4869     tmp = tcg_temp_new();
4870     if (mode & 0x1) {
4871         /* Dynamic register list */
4872         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4873     } else {
4874         /* Static register list */
4875         tcg_gen_movi_i32(tmp, ext & 0xff);
4876     }
4877 
4878     if (!is_load && (mode & 2) == 0) {
4879         /*
4880          * predecrement addressing mode
4881          * only available to store register to memory
4882          */
4883         if (opsize == OS_EXTENDED) {
4884             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4885         } else {
4886             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4887         }
4888     } else {
4889         /* postincrement addressing mode */
4890         if (opsize == OS_EXTENDED) {
4891             if (is_load) {
4892                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4893             } else {
4894                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4895             }
4896         } else {
4897             if (is_load) {
4898                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4899             } else {
4900                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4901             }
4902         }
4903     }
4904     if ((insn & 070) == 030 || (insn & 070) == 040) {
4905         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4906     }
4907 }
4908 
4909 /*
4910  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4911  * immediately before the next FP instruction is executed.
4912  */
4913 DISAS_INSN(fpu)
4914 {
4915     uint16_t ext;
4916     int opmode;
4917     int opsize;
4918     TCGv_ptr cpu_src, cpu_dest;
4919 
4920     ext = read_im16(env, s);
4921     opmode = ext & 0x7f;
4922     switch ((ext >> 13) & 7) {
4923     case 0:
4924         break;
4925     case 1:
4926         goto undef;
4927     case 2:
4928         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4929             /* fmovecr */
4930             TCGv rom_offset = tcg_constant_i32(opmode);
4931             cpu_dest = gen_fp_ptr(REG(ext, 7));
4932             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4933             return;
4934         }
4935         break;
4936     case 3: /* fmove out */
4937         cpu_src = gen_fp_ptr(REG(ext, 7));
4938         opsize = ext_opsize(ext, 10);
4939         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4940                       EA_STORE, IS_USER(s)) == -1) {
4941             gen_addr_fault(s);
4942         }
4943         gen_helper_ftst(cpu_env, cpu_src);
4944         return;
4945     case 4: /* fmove to control register.  */
4946     case 5: /* fmove from control register.  */
4947         gen_op_fmove_fcr(env, s, insn, ext);
4948         return;
4949     case 6: /* fmovem */
4950     case 7:
4951         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4952             goto undef;
4953         }
4954         gen_op_fmovem(env, s, insn, ext);
4955         return;
4956     }
4957     if (ext & (1 << 14)) {
4958         /* Source effective address.  */
4959         opsize = ext_opsize(ext, 10);
4960         cpu_src = gen_fp_result_ptr();
4961         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4962                       EA_LOADS, IS_USER(s)) == -1) {
4963             gen_addr_fault(s);
4964             return;
4965         }
4966     } else {
4967         /* Source register.  */
4968         opsize = OS_EXTENDED;
4969         cpu_src = gen_fp_ptr(REG(ext, 10));
4970     }
4971     cpu_dest = gen_fp_ptr(REG(ext, 7));
4972     switch (opmode) {
4973     case 0: /* fmove */
4974         gen_fp_move(cpu_dest, cpu_src);
4975         break;
4976     case 0x40: /* fsmove */
4977         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4978         break;
4979     case 0x44: /* fdmove */
4980         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4981         break;
4982     case 1: /* fint */
4983         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4984         break;
4985     case 2: /* fsinh */
4986         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4987         break;
4988     case 3: /* fintrz */
4989         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4990         break;
4991     case 4: /* fsqrt */
4992         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4993         break;
4994     case 0x41: /* fssqrt */
4995         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4996         break;
4997     case 0x45: /* fdsqrt */
4998         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4999         break;
5000     case 0x06: /* flognp1 */
5001         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5002         break;
5003     case 0x08: /* fetoxm1 */
5004         gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5005         break;
5006     case 0x09: /* ftanh */
5007         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5008         break;
5009     case 0x0a: /* fatan */
5010         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5011         break;
5012     case 0x0c: /* fasin */
5013         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5014         break;
5015     case 0x0d: /* fatanh */
5016         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5017         break;
5018     case 0x0e: /* fsin */
5019         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5020         break;
5021     case 0x0f: /* ftan */
5022         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5023         break;
5024     case 0x10: /* fetox */
5025         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5026         break;
5027     case 0x11: /* ftwotox */
5028         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5029         break;
5030     case 0x12: /* ftentox */
5031         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5032         break;
5033     case 0x14: /* flogn */
5034         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5035         break;
5036     case 0x15: /* flog10 */
5037         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5038         break;
5039     case 0x16: /* flog2 */
5040         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5041         break;
5042     case 0x18: /* fabs */
5043         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5044         break;
5045     case 0x58: /* fsabs */
5046         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5047         break;
5048     case 0x5c: /* fdabs */
5049         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5050         break;
5051     case 0x19: /* fcosh */
5052         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5053         break;
5054     case 0x1a: /* fneg */
5055         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5056         break;
5057     case 0x5a: /* fsneg */
5058         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5059         break;
5060     case 0x5e: /* fdneg */
5061         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x1c: /* facos */
5064         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x1d: /* fcos */
5067         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5068         break;
5069     case 0x1e: /* fgetexp */
5070         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5071         break;
5072     case 0x1f: /* fgetman */
5073         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5074         break;
5075     case 0x20: /* fdiv */
5076         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5077         break;
5078     case 0x60: /* fsdiv */
5079         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5080         break;
5081     case 0x64: /* fddiv */
5082         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5083         break;
5084     case 0x21: /* fmod */
5085         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5086         break;
5087     case 0x22: /* fadd */
5088         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5089         break;
5090     case 0x62: /* fsadd */
5091         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5092         break;
5093     case 0x66: /* fdadd */
5094         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5095         break;
5096     case 0x23: /* fmul */
5097         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5098         break;
5099     case 0x63: /* fsmul */
5100         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5101         break;
5102     case 0x67: /* fdmul */
5103         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5104         break;
5105     case 0x24: /* fsgldiv */
5106         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5107         break;
5108     case 0x25: /* frem */
5109         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5110         break;
5111     case 0x26: /* fscale */
5112         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5113         break;
5114     case 0x27: /* fsglmul */
5115         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5116         break;
5117     case 0x28: /* fsub */
5118         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5119         break;
5120     case 0x68: /* fssub */
5121         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5122         break;
5123     case 0x6c: /* fdsub */
5124         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5125         break;
5126     case 0x30: case 0x31: case 0x32:
5127     case 0x33: case 0x34: case 0x35:
5128     case 0x36: case 0x37: {
5129             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5130             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5131         }
5132         break;
5133     case 0x38: /* fcmp */
5134         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5135         return;
5136     case 0x3a: /* ftst */
5137         gen_helper_ftst(cpu_env, cpu_src);
5138         return;
5139     default:
5140         goto undef;
5141     }
5142     gen_helper_ftst(cpu_env, cpu_dest);
5143     return;
5144 undef:
5145     /* FIXME: Is this right for offset addressing modes?  */
5146     s->pc -= 2;
5147     disas_undef_fpu(env, s, insn);
5148 }
5149 
5150 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5151 {
5152     TCGv fpsr;
5153 
5154     c->v2 = tcg_constant_i32(0);
5155     /* TODO: Raise BSUN exception.  */
5156     fpsr = tcg_temp_new();
5157     gen_load_fcr(s, fpsr, M68K_FPSR);
5158     switch (cond) {
5159     case 0:  /* False */
5160     case 16: /* Signaling False */
5161         c->v1 = c->v2;
5162         c->tcond = TCG_COND_NEVER;
5163         break;
5164     case 1:  /* EQual Z */
5165     case 17: /* Signaling EQual Z */
5166         c->v1 = tcg_temp_new();
5167         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5168         c->tcond = TCG_COND_NE;
5169         break;
5170     case 2:  /* Ordered Greater Than !(A || Z || N) */
5171     case 18: /* Greater Than !(A || Z || N) */
5172         c->v1 = tcg_temp_new();
5173         tcg_gen_andi_i32(c->v1, fpsr,
5174                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5175         c->tcond = TCG_COND_EQ;
5176         break;
5177     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5178     case 19: /* Greater than or Equal Z || !(A || N) */
5179         c->v1 = tcg_temp_new();
5180         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5181         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5182         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5183         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5184         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5185         c->tcond = TCG_COND_NE;
5186         break;
5187     case 4:  /* Ordered Less Than !(!N || A || Z); */
5188     case 20: /* Less Than !(!N || A || Z); */
5189         c->v1 = tcg_temp_new();
5190         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5191         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5192         c->tcond = TCG_COND_EQ;
5193         break;
5194     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5195     case 21: /* Less than or Equal Z || (N && !A) */
5196         c->v1 = tcg_temp_new();
5197         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5198         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5199         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5200         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5201         c->tcond = TCG_COND_NE;
5202         break;
5203     case 6:  /* Ordered Greater or Less than !(A || Z) */
5204     case 22: /* Greater or Less than !(A || Z) */
5205         c->v1 = tcg_temp_new();
5206         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5207         c->tcond = TCG_COND_EQ;
5208         break;
5209     case 7:  /* Ordered !A */
5210     case 23: /* Greater, Less or Equal !A */
5211         c->v1 = tcg_temp_new();
5212         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5213         c->tcond = TCG_COND_EQ;
5214         break;
5215     case 8:  /* Unordered A */
5216     case 24: /* Not Greater, Less or Equal A */
5217         c->v1 = tcg_temp_new();
5218         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5219         c->tcond = TCG_COND_NE;
5220         break;
5221     case 9:  /* Unordered or Equal A || Z */
5222     case 25: /* Not Greater or Less then A || Z */
5223         c->v1 = tcg_temp_new();
5224         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5225         c->tcond = TCG_COND_NE;
5226         break;
5227     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5228     case 26: /* Not Less or Equal A || !(N || Z)) */
5229         c->v1 = tcg_temp_new();
5230         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5231         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5232         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5233         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5234         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5235         c->tcond = TCG_COND_NE;
5236         break;
5237     case 11: /* Unordered or Greater or Equal A || Z || !N */
5238     case 27: /* Not Less Than A || Z || !N */
5239         c->v1 = tcg_temp_new();
5240         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5241         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5242         c->tcond = TCG_COND_NE;
5243         break;
5244     case 12: /* Unordered or Less Than A || (N && !Z) */
5245     case 28: /* Not Greater than or Equal A || (N && !Z) */
5246         c->v1 = tcg_temp_new();
5247         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5248         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5249         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5250         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5251         c->tcond = TCG_COND_NE;
5252         break;
5253     case 13: /* Unordered or Less or Equal A || Z || N */
5254     case 29: /* Not Greater Than A || Z || N */
5255         c->v1 = tcg_temp_new();
5256         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5257         c->tcond = TCG_COND_NE;
5258         break;
5259     case 14: /* Not Equal !Z */
5260     case 30: /* Signaling Not Equal !Z */
5261         c->v1 = tcg_temp_new();
5262         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5263         c->tcond = TCG_COND_EQ;
5264         break;
5265     case 15: /* True */
5266     case 31: /* Signaling True */
5267         c->v1 = c->v2;
5268         c->tcond = TCG_COND_ALWAYS;
5269         break;
5270     }
5271 }
5272 
5273 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5274 {
5275     DisasCompare c;
5276 
5277     gen_fcc_cond(&c, s, cond);
5278     update_cc_op(s);
5279     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5280 }
5281 
5282 DISAS_INSN(fbcc)
5283 {
5284     uint32_t offset;
5285     uint32_t base;
5286     TCGLabel *l1;
5287 
5288     base = s->pc;
5289     offset = (int16_t)read_im16(env, s);
5290     if (insn & (1 << 6)) {
5291         offset = (offset << 16) | read_im16(env, s);
5292     }
5293 
5294     l1 = gen_new_label();
5295     update_cc_op(s);
5296     gen_fjmpcc(s, insn & 0x3f, l1);
5297     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5298     gen_set_label(l1);
5299     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5300 }
5301 
5302 DISAS_INSN(fscc)
5303 {
5304     DisasCompare c;
5305     int cond;
5306     TCGv tmp;
5307     uint16_t ext;
5308 
5309     ext = read_im16(env, s);
5310     cond = ext & 0x3f;
5311     gen_fcc_cond(&c, s, cond);
5312 
5313     tmp = tcg_temp_new();
5314     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5315 
5316     tcg_gen_neg_i32(tmp, tmp);
5317     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5318 }
5319 
5320 DISAS_INSN(ftrapcc)
5321 {
5322     DisasCompare c;
5323     uint16_t ext;
5324     int cond;
5325 
5326     ext = read_im16(env, s);
5327     cond = ext & 0x3f;
5328 
5329     /* Consume and discard the immediate operand. */
5330     switch (extract32(insn, 0, 3)) {
5331     case 2: /* ftrapcc.w */
5332         (void)read_im16(env, s);
5333         break;
5334     case 3: /* ftrapcc.l */
5335         (void)read_im32(env, s);
5336         break;
5337     case 4: /* ftrapcc (no operand) */
5338         break;
5339     default:
5340         /* ftrapcc registered with only valid opmodes */
5341         g_assert_not_reached();
5342     }
5343 
5344     gen_fcc_cond(&c, s, cond);
5345     do_trapcc(s, &c);
5346 }
5347 
5348 #if !defined(CONFIG_USER_ONLY)
5349 DISAS_INSN(frestore)
5350 {
5351     TCGv addr;
5352 
5353     if (IS_USER(s)) {
5354         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5355         return;
5356     }
5357     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5358         SRC_EA(env, addr, OS_LONG, 0, NULL);
5359         /* FIXME: check the state frame */
5360     } else {
5361         disas_undef(env, s, insn);
5362     }
5363 }
5364 
5365 DISAS_INSN(fsave)
5366 {
5367     if (IS_USER(s)) {
5368         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5369         return;
5370     }
5371 
5372     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5373         /* always write IDLE */
5374         TCGv idle = tcg_constant_i32(0x41000000);
5375         DEST_EA(env, insn, OS_LONG, idle, NULL);
5376     } else {
5377         disas_undef(env, s, insn);
5378     }
5379 }
5380 #endif
5381 
5382 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5383 {
5384     TCGv tmp = tcg_temp_new();
5385     if (s->env->macsr & MACSR_FI) {
5386         if (upper)
5387             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5388         else
5389             tcg_gen_shli_i32(tmp, val, 16);
5390     } else if (s->env->macsr & MACSR_SU) {
5391         if (upper)
5392             tcg_gen_sari_i32(tmp, val, 16);
5393         else
5394             tcg_gen_ext16s_i32(tmp, val);
5395     } else {
5396         if (upper)
5397             tcg_gen_shri_i32(tmp, val, 16);
5398         else
5399             tcg_gen_ext16u_i32(tmp, val);
5400     }
5401     return tmp;
5402 }
5403 
5404 static void gen_mac_clear_flags(void)
5405 {
5406     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5407                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5408 }
5409 
5410 DISAS_INSN(mac)
5411 {
5412     TCGv rx;
5413     TCGv ry;
5414     uint16_t ext;
5415     int acc;
5416     TCGv tmp;
5417     TCGv addr;
5418     TCGv loadval;
5419     int dual;
5420     TCGv saved_flags;
5421 
5422     if (!s->done_mac) {
5423         s->mactmp = tcg_temp_new_i64();
5424         s->done_mac = 1;
5425     }
5426 
5427     ext = read_im16(env, s);
5428 
5429     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5430     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5431     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5432         disas_undef(env, s, insn);
5433         return;
5434     }
5435     if (insn & 0x30) {
5436         /* MAC with load.  */
5437         tmp = gen_lea(env, s, insn, OS_LONG);
5438         addr = tcg_temp_new();
5439         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5440         /*
5441          * Load the value now to ensure correct exception behavior.
5442          * Perform writeback after reading the MAC inputs.
5443          */
5444         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5445 
5446         acc ^= 1;
5447         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5448         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5449     } else {
5450         loadval = addr = NULL_QREG;
5451         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5452         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5453     }
5454 
5455     gen_mac_clear_flags();
5456 #if 0
5457     l1 = -1;
5458     /* Disabled because conditional branches clobber temporary vars.  */
5459     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5460         /* Skip the multiply if we know we will ignore it.  */
5461         l1 = gen_new_label();
5462         tmp = tcg_temp_new();
5463         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5464         gen_op_jmp_nz32(tmp, l1);
5465     }
5466 #endif
5467 
5468     if ((ext & 0x0800) == 0) {
5469         /* Word.  */
5470         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5471         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5472     }
5473     if (s->env->macsr & MACSR_FI) {
5474         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5475     } else {
5476         if (s->env->macsr & MACSR_SU)
5477             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5478         else
5479             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5480         switch ((ext >> 9) & 3) {
5481         case 1:
5482             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5483             break;
5484         case 3:
5485             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5486             break;
5487         }
5488     }
5489 
5490     if (dual) {
5491         /* Save the overflow flag from the multiply.  */
5492         saved_flags = tcg_temp_new();
5493         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5494     } else {
5495         saved_flags = NULL_QREG;
5496     }
5497 
5498 #if 0
5499     /* Disabled because conditional branches clobber temporary vars.  */
5500     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5501         /* Skip the accumulate if the value is already saturated.  */
5502         l1 = gen_new_label();
5503         tmp = tcg_temp_new();
5504         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5505         gen_op_jmp_nz32(tmp, l1);
5506     }
5507 #endif
5508 
5509     if (insn & 0x100)
5510         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5511     else
5512         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5513 
5514     if (s->env->macsr & MACSR_FI)
5515         gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5516     else if (s->env->macsr & MACSR_SU)
5517         gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5518     else
5519         gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5520 
5521 #if 0
5522     /* Disabled because conditional branches clobber temporary vars.  */
5523     if (l1 != -1)
5524         gen_set_label(l1);
5525 #endif
5526 
5527     if (dual) {
5528         /* Dual accumulate variant.  */
5529         acc = (ext >> 2) & 3;
5530         /* Restore the overflow flag from the multiplier.  */
5531         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5532 #if 0
5533         /* Disabled because conditional branches clobber temporary vars.  */
5534         if ((s->env->macsr & MACSR_OMC) != 0) {
5535             /* Skip the accumulate if the value is already saturated.  */
5536             l1 = gen_new_label();
5537             tmp = tcg_temp_new();
5538             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5539             gen_op_jmp_nz32(tmp, l1);
5540         }
5541 #endif
5542         if (ext & 2)
5543             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5544         else
5545             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5546         if (s->env->macsr & MACSR_FI)
5547             gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5548         else if (s->env->macsr & MACSR_SU)
5549             gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5550         else
5551             gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5552 #if 0
5553         /* Disabled because conditional branches clobber temporary vars.  */
5554         if (l1 != -1)
5555             gen_set_label(l1);
5556 #endif
5557     }
5558     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5559 
5560     if (insn & 0x30) {
5561         TCGv rw;
5562         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5563         tcg_gen_mov_i32(rw, loadval);
5564         /*
5565          * FIXME: Should address writeback happen with the masked or
5566          * unmasked value?
5567          */
5568         switch ((insn >> 3) & 7) {
5569         case 3: /* Post-increment.  */
5570             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5571             break;
5572         case 4: /* Pre-decrement.  */
5573             tcg_gen_mov_i32(AREG(insn, 0), addr);
5574         }
5575     }
5576 }
5577 
5578 DISAS_INSN(from_mac)
5579 {
5580     TCGv rx;
5581     TCGv_i64 acc;
5582     int accnum;
5583 
5584     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5585     accnum = (insn >> 9) & 3;
5586     acc = MACREG(accnum);
5587     if (s->env->macsr & MACSR_FI) {
5588         gen_helper_get_macf(rx, cpu_env, acc);
5589     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5590         tcg_gen_extrl_i64_i32(rx, acc);
5591     } else if (s->env->macsr & MACSR_SU) {
5592         gen_helper_get_macs(rx, acc);
5593     } else {
5594         gen_helper_get_macu(rx, acc);
5595     }
5596     if (insn & 0x40) {
5597         tcg_gen_movi_i64(acc, 0);
5598         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5599     }
5600 }
5601 
5602 DISAS_INSN(move_mac)
5603 {
5604     /* FIXME: This can be done without a helper.  */
5605     int src;
5606     TCGv dest;
5607     src = insn & 3;
5608     dest = tcg_constant_i32((insn >> 9) & 3);
5609     gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5610     gen_mac_clear_flags();
5611     gen_helper_mac_set_flags(cpu_env, dest);
5612 }
5613 
5614 DISAS_INSN(from_macsr)
5615 {
5616     TCGv reg;
5617 
5618     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5619     tcg_gen_mov_i32(reg, QREG_MACSR);
5620 }
5621 
5622 DISAS_INSN(from_mask)
5623 {
5624     TCGv reg;
5625     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5626     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5627 }
5628 
5629 DISAS_INSN(from_mext)
5630 {
5631     TCGv reg;
5632     TCGv acc;
5633     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5634     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5635     if (s->env->macsr & MACSR_FI)
5636         gen_helper_get_mac_extf(reg, cpu_env, acc);
5637     else
5638         gen_helper_get_mac_exti(reg, cpu_env, acc);
5639 }
5640 
5641 DISAS_INSN(macsr_to_ccr)
5642 {
5643     TCGv tmp = tcg_temp_new();
5644 
5645     /* Note that X and C are always cleared. */
5646     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5647     gen_helper_set_ccr(cpu_env, tmp);
5648     set_cc_op(s, CC_OP_FLAGS);
5649 }
5650 
5651 DISAS_INSN(to_mac)
5652 {
5653     TCGv_i64 acc;
5654     TCGv val;
5655     int accnum;
5656     accnum = (insn >> 9) & 3;
5657     acc = MACREG(accnum);
5658     SRC_EA(env, val, OS_LONG, 0, NULL);
5659     if (s->env->macsr & MACSR_FI) {
5660         tcg_gen_ext_i32_i64(acc, val);
5661         tcg_gen_shli_i64(acc, acc, 8);
5662     } else if (s->env->macsr & MACSR_SU) {
5663         tcg_gen_ext_i32_i64(acc, val);
5664     } else {
5665         tcg_gen_extu_i32_i64(acc, val);
5666     }
5667     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5668     gen_mac_clear_flags();
5669     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5670 }
5671 
5672 DISAS_INSN(to_macsr)
5673 {
5674     TCGv val;
5675     SRC_EA(env, val, OS_LONG, 0, NULL);
5676     gen_helper_set_macsr(cpu_env, val);
5677     gen_exit_tb(s);
5678 }
5679 
5680 DISAS_INSN(to_mask)
5681 {
5682     TCGv val;
5683     SRC_EA(env, val, OS_LONG, 0, NULL);
5684     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5685 }
5686 
5687 DISAS_INSN(to_mext)
5688 {
5689     TCGv val;
5690     TCGv acc;
5691     SRC_EA(env, val, OS_LONG, 0, NULL);
5692     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5693     if (s->env->macsr & MACSR_FI)
5694         gen_helper_set_mac_extf(cpu_env, val, acc);
5695     else if (s->env->macsr & MACSR_SU)
5696         gen_helper_set_mac_exts(cpu_env, val, acc);
5697     else
5698         gen_helper_set_mac_extu(cpu_env, val, acc);
5699 }
5700 
5701 static disas_proc opcode_table[65536];
5702 
5703 static void
5704 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5705 {
5706   int i;
5707   int from;
5708   int to;
5709 
5710   /* Sanity check.  All set bits must be included in the mask.  */
5711   if (opcode & ~mask) {
5712       fprintf(stderr,
5713               "qemu internal error: bogus opcode definition %04x/%04x\n",
5714               opcode, mask);
5715       abort();
5716   }
5717   /*
5718    * This could probably be cleverer.  For now just optimize the case where
5719    * the top bits are known.
5720    */
5721   /* Find the first zero bit in the mask.  */
5722   i = 0x8000;
5723   while ((i & mask) != 0)
5724       i >>= 1;
5725   /* Iterate over all combinations of this and lower bits.  */
5726   if (i == 0)
5727       i = 1;
5728   else
5729       i <<= 1;
5730   from = opcode & ~(i - 1);
5731   to = from + i;
5732   for (i = from; i < to; i++) {
5733       if ((i & mask) == opcode)
5734           opcode_table[i] = proc;
5735   }
5736 }
5737 
5738 /*
5739  * Register m68k opcode handlers.  Order is important.
5740  * Later insn override earlier ones.
5741  */
5742 void register_m68k_insns (CPUM68KState *env)
5743 {
5744     /*
5745      * Build the opcode table only once to avoid
5746      * multithreading issues.
5747      */
5748     if (opcode_table[0] != NULL) {
5749         return;
5750     }
5751 
5752     /*
5753      * use BASE() for instruction available
5754      * for CF_ISA_A and M68000.
5755      */
5756 #define BASE(name, opcode, mask) \
5757     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5758 #define INSN(name, opcode, mask, feature) do { \
5759     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5760         BASE(name, opcode, mask); \
5761     } while(0)
5762     BASE(undef,     0000, 0000);
5763     INSN(arith_im,  0080, fff8, CF_ISA_A);
5764     INSN(arith_im,  0000, ff00, M68K);
5765     INSN(chk2,      00c0, f9c0, CHK2);
5766     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5767     BASE(bitop_reg, 0100, f1c0);
5768     BASE(bitop_reg, 0140, f1c0);
5769     BASE(bitop_reg, 0180, f1c0);
5770     BASE(bitop_reg, 01c0, f1c0);
5771     INSN(movep,     0108, f138, MOVEP);
5772     INSN(arith_im,  0280, fff8, CF_ISA_A);
5773     INSN(arith_im,  0200, ff00, M68K);
5774     INSN(undef,     02c0, ffc0, M68K);
5775     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5776     INSN(arith_im,  0480, fff8, CF_ISA_A);
5777     INSN(arith_im,  0400, ff00, M68K);
5778     INSN(undef,     04c0, ffc0, M68K);
5779     INSN(arith_im,  0600, ff00, M68K);
5780     INSN(undef,     06c0, ffc0, M68K);
5781     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5782     INSN(arith_im,  0680, fff8, CF_ISA_A);
5783     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5784     INSN(arith_im,  0c00, ff00, M68K);
5785     BASE(bitop_im,  0800, ffc0);
5786     BASE(bitop_im,  0840, ffc0);
5787     BASE(bitop_im,  0880, ffc0);
5788     BASE(bitop_im,  08c0, ffc0);
5789     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5790     INSN(arith_im,  0a00, ff00, M68K);
5791 #if !defined(CONFIG_USER_ONLY)
5792     INSN(moves,     0e00, ff00, M68K);
5793 #endif
5794     INSN(cas,       0ac0, ffc0, CAS);
5795     INSN(cas,       0cc0, ffc0, CAS);
5796     INSN(cas,       0ec0, ffc0, CAS);
5797     INSN(cas2w,     0cfc, ffff, CAS);
5798     INSN(cas2l,     0efc, ffff, CAS);
5799     BASE(move,      1000, f000);
5800     BASE(move,      2000, f000);
5801     BASE(move,      3000, f000);
5802     INSN(chk,       4000, f040, M68K);
5803     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5804     INSN(negx,      4080, fff8, CF_ISA_A);
5805     INSN(negx,      4000, ff00, M68K);
5806     INSN(undef,     40c0, ffc0, M68K);
5807     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5808     INSN(move_from_sr, 40c0, ffc0, M68K);
5809     BASE(lea,       41c0, f1c0);
5810     BASE(clr,       4200, ff00);
5811     BASE(undef,     42c0, ffc0);
5812     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5813     INSN(move_from_ccr, 42c0, ffc0, M68K);
5814     INSN(neg,       4480, fff8, CF_ISA_A);
5815     INSN(neg,       4400, ff00, M68K);
5816     INSN(undef,     44c0, ffc0, M68K);
5817     BASE(move_to_ccr, 44c0, ffc0);
5818     INSN(not,       4680, fff8, CF_ISA_A);
5819     INSN(not,       4600, ff00, M68K);
5820 #if !defined(CONFIG_USER_ONLY)
5821     BASE(move_to_sr, 46c0, ffc0);
5822 #endif
5823     INSN(nbcd,      4800, ffc0, M68K);
5824     INSN(linkl,     4808, fff8, M68K);
5825     BASE(pea,       4840, ffc0);
5826     BASE(swap,      4840, fff8);
5827     INSN(bkpt,      4848, fff8, BKPT);
5828     INSN(movem,     48d0, fbf8, CF_ISA_A);
5829     INSN(movem,     48e8, fbf8, CF_ISA_A);
5830     INSN(movem,     4880, fb80, M68K);
5831     BASE(ext,       4880, fff8);
5832     BASE(ext,       48c0, fff8);
5833     BASE(ext,       49c0, fff8);
5834     BASE(tst,       4a00, ff00);
5835     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5836     INSN(tas,       4ac0, ffc0, M68K);
5837 #if !defined(CONFIG_USER_ONLY)
5838     INSN(halt,      4ac8, ffff, CF_ISA_A);
5839     INSN(halt,      4ac8, ffff, M68K);
5840 #endif
5841     INSN(pulse,     4acc, ffff, CF_ISA_A);
5842     BASE(illegal,   4afc, ffff);
5843     INSN(mull,      4c00, ffc0, CF_ISA_A);
5844     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5845     INSN(divl,      4c40, ffc0, CF_ISA_A);
5846     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5847     INSN(sats,      4c80, fff8, CF_ISA_B);
5848     BASE(trap,      4e40, fff0);
5849     BASE(link,      4e50, fff8);
5850     BASE(unlk,      4e58, fff8);
5851 #if !defined(CONFIG_USER_ONLY)
5852     INSN(move_to_usp, 4e60, fff8, USP);
5853     INSN(move_from_usp, 4e68, fff8, USP);
5854     INSN(reset,     4e70, ffff, M68K);
5855     BASE(stop,      4e72, ffff);
5856     BASE(rte,       4e73, ffff);
5857     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5858     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5859 #endif
5860     BASE(nop,       4e71, ffff);
5861     INSN(rtd,       4e74, ffff, RTD);
5862     BASE(rts,       4e75, ffff);
5863     INSN(trapv,     4e76, ffff, M68K);
5864     INSN(rtr,       4e77, ffff, M68K);
5865     BASE(jump,      4e80, ffc0);
5866     BASE(jump,      4ec0, ffc0);
5867     INSN(addsubq,   5000, f080, M68K);
5868     BASE(addsubq,   5080, f0c0);
5869     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5870     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5871     INSN(dbcc,      50c8, f0f8, M68K);
5872     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5873     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5874     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5875     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5876 
5877     /* Branch instructions.  */
5878     BASE(branch,    6000, f000);
5879     /* Disable long branch instructions, then add back the ones we want.  */
5880     BASE(undef,     60ff, f0ff); /* All long branches.  */
5881     INSN(branch,    60ff, f0ff, CF_ISA_B);
5882     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5883     INSN(branch,    60ff, ffff, BRAL);
5884     INSN(branch,    60ff, f0ff, BCCL);
5885 
5886     BASE(moveq,     7000, f100);
5887     INSN(mvzs,      7100, f100, CF_ISA_B);
5888     BASE(or,        8000, f000);
5889     BASE(divw,      80c0, f0c0);
5890     INSN(sbcd_reg,  8100, f1f8, M68K);
5891     INSN(sbcd_mem,  8108, f1f8, M68K);
5892     BASE(addsub,    9000, f000);
5893     INSN(undef,     90c0, f0c0, CF_ISA_A);
5894     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5895     INSN(subx_reg,  9100, f138, M68K);
5896     INSN(subx_mem,  9108, f138, M68K);
5897     INSN(suba,      91c0, f1c0, CF_ISA_A);
5898     INSN(suba,      90c0, f0c0, M68K);
5899 
5900     BASE(undef_mac, a000, f000);
5901     INSN(mac,       a000, f100, CF_EMAC);
5902     INSN(from_mac,  a180, f9b0, CF_EMAC);
5903     INSN(move_mac,  a110, f9fc, CF_EMAC);
5904     INSN(from_macsr,a980, f9f0, CF_EMAC);
5905     INSN(from_mask, ad80, fff0, CF_EMAC);
5906     INSN(from_mext, ab80, fbf0, CF_EMAC);
5907     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5908     INSN(to_mac,    a100, f9c0, CF_EMAC);
5909     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5910     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5911     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5912 
5913     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5914     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5915     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5916     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5917     INSN(cmp,       b080, f1c0, CF_ISA_A);
5918     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5919     INSN(cmp,       b000, f100, M68K);
5920     INSN(eor,       b100, f100, M68K);
5921     INSN(cmpm,      b108, f138, M68K);
5922     INSN(cmpa,      b0c0, f0c0, M68K);
5923     INSN(eor,       b180, f1c0, CF_ISA_A);
5924     BASE(and,       c000, f000);
5925     INSN(exg_dd,    c140, f1f8, M68K);
5926     INSN(exg_aa,    c148, f1f8, M68K);
5927     INSN(exg_da,    c188, f1f8, M68K);
5928     BASE(mulw,      c0c0, f0c0);
5929     INSN(abcd_reg,  c100, f1f8, M68K);
5930     INSN(abcd_mem,  c108, f1f8, M68K);
5931     BASE(addsub,    d000, f000);
5932     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5933     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5934     INSN(addx_reg,  d100, f138, M68K);
5935     INSN(addx_mem,  d108, f138, M68K);
5936     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5937     INSN(adda,      d0c0, f0c0, M68K);
5938     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5939     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5940     INSN(shift8_im, e000, f0f0, M68K);
5941     INSN(shift16_im, e040, f0f0, M68K);
5942     INSN(shift_im,  e080, f0f0, M68K);
5943     INSN(shift8_reg, e020, f0f0, M68K);
5944     INSN(shift16_reg, e060, f0f0, M68K);
5945     INSN(shift_reg, e0a0, f0f0, M68K);
5946     INSN(shift_mem, e0c0, fcc0, M68K);
5947     INSN(rotate_im, e090, f0f0, M68K);
5948     INSN(rotate8_im, e010, f0f0, M68K);
5949     INSN(rotate16_im, e050, f0f0, M68K);
5950     INSN(rotate_reg, e0b0, f0f0, M68K);
5951     INSN(rotate8_reg, e030, f0f0, M68K);
5952     INSN(rotate16_reg, e070, f0f0, M68K);
5953     INSN(rotate_mem, e4c0, fcc0, M68K);
5954     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5955     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5956     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5957     INSN(bfins_reg, efc0, fff8, BITFIELD);
5958     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5959     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5960     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5961     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5962     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5963     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5964     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5965     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5966     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5967     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5968     BASE(undef_fpu, f000, f000);
5969     INSN(fpu,       f200, ffc0, CF_FPU);
5970     INSN(fbcc,      f280, ffc0, CF_FPU);
5971     INSN(fpu,       f200, ffc0, FPU);
5972     INSN(fscc,      f240, ffc0, FPU);
5973     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5974     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5975     INSN(fbcc,      f280, ff80, FPU);
5976 #if !defined(CONFIG_USER_ONLY)
5977     INSN(frestore,  f340, ffc0, CF_FPU);
5978     INSN(fsave,     f300, ffc0, CF_FPU);
5979     INSN(frestore,  f340, ffc0, FPU);
5980     INSN(fsave,     f300, ffc0, FPU);
5981     INSN(intouch,   f340, ffc0, CF_ISA_A);
5982     INSN(cpushl,    f428, ff38, CF_ISA_A);
5983     INSN(cpush,     f420, ff20, M68040);
5984     INSN(cinv,      f400, ff20, M68040);
5985     INSN(pflush,    f500, ffe0, M68040);
5986     INSN(ptest,     f548, ffd8, M68040);
5987     INSN(wddata,    fb00, ff00, CF_ISA_A);
5988     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5989 #endif
5990     INSN(move16_mem, f600, ffe0, M68040);
5991     INSN(move16_reg, f620, fff8, M68040);
5992 #undef INSN
5993 }
5994 
5995 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
5996 {
5997     DisasContext *dc = container_of(dcbase, DisasContext, base);
5998     CPUM68KState *env = cpu->env_ptr;
5999 
6000     dc->env = env;
6001     dc->pc = dc->base.pc_first;
6002     /* This value will always be filled in properly before m68k_tr_tb_stop. */
6003     dc->pc_prev = 0xdeadbeef;
6004     dc->cc_op = CC_OP_DYNAMIC;
6005     dc->cc_op_synced = 1;
6006     dc->done_mac = 0;
6007     dc->writeback_mask = 0;
6008 
6009     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6010     /* If architectural single step active, limit to 1 */
6011     if (dc->ss_active) {
6012         dc->base.max_insns = 1;
6013     }
6014 }
6015 
6016 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6017 {
6018 }
6019 
6020 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6021 {
6022     DisasContext *dc = container_of(dcbase, DisasContext, base);
6023     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6024 }
6025 
6026 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6027 {
6028     DisasContext *dc = container_of(dcbase, DisasContext, base);
6029     CPUM68KState *env = cpu->env_ptr;
6030     uint16_t insn = read_im16(env, dc);
6031 
6032     opcode_table[insn](env, dc, insn);
6033     do_writebacks(dc);
6034 
6035     dc->pc_prev = dc->base.pc_next;
6036     dc->base.pc_next = dc->pc;
6037 
6038     if (dc->base.is_jmp == DISAS_NEXT) {
6039         /*
6040          * Stop translation when the next insn might touch a new page.
6041          * This ensures that prefetch aborts at the right place.
6042          *
6043          * We cannot determine the size of the next insn without
6044          * completely decoding it.  However, the maximum insn size
6045          * is 32 bytes, so end if we do not have that much remaining.
6046          * This may produce several small TBs at the end of each page,
6047          * but they will all be linked with goto_tb.
6048          *
6049          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6050          * smaller than MC68020's.
6051          */
6052         target_ulong start_page_offset
6053             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6054 
6055         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6056             dc->base.is_jmp = DISAS_TOO_MANY;
6057         }
6058     }
6059 }
6060 
6061 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6062 {
6063     DisasContext *dc = container_of(dcbase, DisasContext, base);
6064 
6065     switch (dc->base.is_jmp) {
6066     case DISAS_NORETURN:
6067         break;
6068     case DISAS_TOO_MANY:
6069         update_cc_op(dc);
6070         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6071         break;
6072     case DISAS_JUMP:
6073         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6074         if (dc->ss_active) {
6075             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6076         } else {
6077             tcg_gen_lookup_and_goto_ptr();
6078         }
6079         break;
6080     case DISAS_EXIT:
6081         /*
6082          * We updated CC_OP and PC in gen_exit_tb, but also modified
6083          * other state that may require returning to the main loop.
6084          */
6085         if (dc->ss_active) {
6086             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6087         } else {
6088             tcg_gen_exit_tb(NULL, 0);
6089         }
6090         break;
6091     default:
6092         g_assert_not_reached();
6093     }
6094 }
6095 
6096 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6097                               CPUState *cpu, FILE *logfile)
6098 {
6099     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6100     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6101 }
6102 
6103 static const TranslatorOps m68k_tr_ops = {
6104     .init_disas_context = m68k_tr_init_disas_context,
6105     .tb_start           = m68k_tr_tb_start,
6106     .insn_start         = m68k_tr_insn_start,
6107     .translate_insn     = m68k_tr_translate_insn,
6108     .tb_stop            = m68k_tr_tb_stop,
6109     .disas_log          = m68k_tr_disas_log,
6110 };
6111 
6112 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6113                            target_ulong pc, void *host_pc)
6114 {
6115     DisasContext dc;
6116     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6117 }
6118 
6119 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6120 {
6121     floatx80 a = { .high = high, .low = low };
6122     union {
6123         float64 f64;
6124         double d;
6125     } u;
6126 
6127     u.f64 = floatx80_to_float64(a, &env->fp_status);
6128     return u.d;
6129 }
6130 
6131 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6132 {
6133     M68kCPU *cpu = M68K_CPU(cs);
6134     CPUM68KState *env = &cpu->env;
6135     int i;
6136     uint16_t sr;
6137     for (i = 0; i < 8; i++) {
6138         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6139                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6140                      i, env->dregs[i], i, env->aregs[i],
6141                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6142                      floatx80_to_double(env, env->fregs[i].l.upper,
6143                                         env->fregs[i].l.lower));
6144     }
6145     qemu_fprintf(f, "PC = %08x   ", env->pc);
6146     sr = env->sr | cpu_m68k_get_ccr(env);
6147     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6148                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6149                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6150                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6151                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6152                  (sr & CCF_C) ? 'C' : '-');
6153     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6154                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6155                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6156                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6157                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6158     qemu_fprintf(f, "\n                                "
6159                  "FPCR =     %04x ", env->fpcr);
6160     switch (env->fpcr & FPCR_PREC_MASK) {
6161     case FPCR_PREC_X:
6162         qemu_fprintf(f, "X ");
6163         break;
6164     case FPCR_PREC_S:
6165         qemu_fprintf(f, "S ");
6166         break;
6167     case FPCR_PREC_D:
6168         qemu_fprintf(f, "D ");
6169         break;
6170     }
6171     switch (env->fpcr & FPCR_RND_MASK) {
6172     case FPCR_RND_N:
6173         qemu_fprintf(f, "RN ");
6174         break;
6175     case FPCR_RND_Z:
6176         qemu_fprintf(f, "RZ ");
6177         break;
6178     case FPCR_RND_M:
6179         qemu_fprintf(f, "RM ");
6180         break;
6181     case FPCR_RND_P:
6182         qemu_fprintf(f, "RP ");
6183         break;
6184     }
6185     qemu_fprintf(f, "\n");
6186 #ifndef CONFIG_USER_ONLY
6187     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6188                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6189                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6190                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6191     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6192     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6193     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6194                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6195     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6196                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6197                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6198     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6199                  env->mmu.mmusr, env->mmu.ar);
6200 #endif /* !CONFIG_USER_ONLY */
6201 }
6202