xref: /openbmc/qemu/target/m68k/translate.c (revision 7f750efc)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 //#define DEBUG_DISPATCH 1
42 
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #include "qregs.h.inc"
46 #undef DEFO32
47 #undef DEFO64
48 
49 static TCGv_i32 cpu_halted;
50 static TCGv_i32 cpu_exception_index;
51 
52 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
53 static TCGv cpu_dregs[8];
54 static TCGv cpu_aregs[8];
55 static TCGv_i64 cpu_macc[4];
56 
57 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
58 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
59 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
60 #define MACREG(acc)     cpu_macc[acc]
61 #define QREG_SP         get_areg(s, 7)
62 
63 static TCGv NULL_QREG;
64 #define IS_NULL_QREG(t) (t == NULL_QREG)
65 /* Used to distinguish stores from bad addressing modes.  */
66 static TCGv store_dummy;
67 
68 void m68k_tcg_init(void)
69 {
70     char *p;
71     int i;
72 
73 #define DEFO32(name, offset) \
74     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78         offsetof(CPUM68KState, offset), #name);
79 #include "qregs.h.inc"
80 #undef DEFO32
81 #undef DEFO64
82 
83     cpu_halted = tcg_global_mem_new_i32(cpu_env,
84                                         -offsetof(M68kCPU, env) +
85                                         offsetof(CPUState, halted), "HALTED");
86     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87                                                  -offsetof(M68kCPU, env) +
88                                                  offsetof(CPUState, exception_index),
89                                                  "EXCEPTION");
90 
91     p = cpu_reg_names;
92     for (i = 0; i < 8; i++) {
93         sprintf(p, "D%d", i);
94         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95                                           offsetof(CPUM68KState, dregs[i]), p);
96         p += 3;
97         sprintf(p, "A%d", i);
98         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99                                           offsetof(CPUM68KState, aregs[i]), p);
100         p += 3;
101     }
102     for (i = 0; i < 4; i++) {
103         sprintf(p, "ACC%d", i);
104         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105                                          offsetof(CPUM68KState, macc[i]), p);
106         p += 5;
107     }
108 
109     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
111 }
112 
113 /* internal defines */
114 typedef struct DisasContext {
115     DisasContextBase base;
116     CPUM68KState *env;
117     target_ulong pc;
118     target_ulong pc_prev;
119     CCOp cc_op; /* Current CC operation */
120     int cc_op_synced;
121     TCGv_i64 mactmp;
122     int done_mac;
123     int writeback_mask;
124     TCGv writeback[8];
125     bool ss_active;
126 } DisasContext;
127 
128 static TCGv get_areg(DisasContext *s, unsigned regno)
129 {
130     if (s->writeback_mask & (1 << regno)) {
131         return s->writeback[regno];
132     } else {
133         return cpu_aregs[regno];
134     }
135 }
136 
137 static void delay_set_areg(DisasContext *s, unsigned regno,
138                            TCGv val, bool give_temp)
139 {
140     if (s->writeback_mask & (1 << regno)) {
141         if (give_temp) {
142             s->writeback[regno] = val;
143         } else {
144             tcg_gen_mov_i32(s->writeback[regno], val);
145         }
146     } else {
147         s->writeback_mask |= 1 << regno;
148         if (give_temp) {
149             s->writeback[regno] = val;
150         } else {
151             TCGv tmp = tcg_temp_new();
152             s->writeback[regno] = tmp;
153             tcg_gen_mov_i32(tmp, val);
154         }
155     }
156 }
157 
158 static void do_writebacks(DisasContext *s)
159 {
160     unsigned mask = s->writeback_mask;
161     if (mask) {
162         s->writeback_mask = 0;
163         do {
164             unsigned regno = ctz32(mask);
165             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
166             mask &= mask - 1;
167         } while (mask);
168     }
169 }
170 
171 /* is_jmp field values */
172 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
173 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
174 
175 #if defined(CONFIG_USER_ONLY)
176 #define IS_USER(s) 1
177 #else
178 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
179 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
180                       MMU_KERNEL_IDX : MMU_USER_IDX)
181 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
182                       MMU_KERNEL_IDX : MMU_USER_IDX)
183 #endif
184 
185 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
186 
187 #ifdef DEBUG_DISPATCH
188 #define DISAS_INSN(name)                                                \
189     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
190                                   uint16_t insn);                       \
191     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
192                              uint16_t insn)                             \
193     {                                                                   \
194         qemu_log("Dispatch " #name "\n");                               \
195         real_disas_##name(env, s, insn);                                \
196     }                                                                   \
197     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
198                                   uint16_t insn)
199 #else
200 #define DISAS_INSN(name)                                                \
201     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
202                              uint16_t insn)
203 #endif
204 
205 static const uint8_t cc_op_live[CC_OP_NB] = {
206     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
208     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
211     [CC_OP_LOGIC] = CCF_X | CCF_N
212 };
213 
214 static void set_cc_op(DisasContext *s, CCOp op)
215 {
216     CCOp old_op = s->cc_op;
217     int dead;
218 
219     if (old_op == op) {
220         return;
221     }
222     s->cc_op = op;
223     s->cc_op_synced = 0;
224 
225     /*
226      * Discard CC computation that will no longer be used.
227      * Note that X and N are never dead.
228      */
229     dead = cc_op_live[old_op] & ~cc_op_live[op];
230     if (dead & CCF_C) {
231         tcg_gen_discard_i32(QREG_CC_C);
232     }
233     if (dead & CCF_Z) {
234         tcg_gen_discard_i32(QREG_CC_Z);
235     }
236     if (dead & CCF_V) {
237         tcg_gen_discard_i32(QREG_CC_V);
238     }
239 }
240 
241 /* Update the CPU env CC_OP state.  */
242 static void update_cc_op(DisasContext *s)
243 {
244     if (!s->cc_op_synced) {
245         s->cc_op_synced = 1;
246         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
247     }
248 }
249 
250 /* Generate a jump to an immediate address.  */
251 static void gen_jmp_im(DisasContext *s, uint32_t dest)
252 {
253     update_cc_op(s);
254     tcg_gen_movi_i32(QREG_PC, dest);
255     s->base.is_jmp = DISAS_JUMP;
256 }
257 
258 /* Generate a jump to the address in qreg DEST.  */
259 static void gen_jmp(DisasContext *s, TCGv dest)
260 {
261     update_cc_op(s);
262     tcg_gen_mov_i32(QREG_PC, dest);
263     s->base.is_jmp = DISAS_JUMP;
264 }
265 
266 static void gen_raise_exception(int nr)
267 {
268     gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
269 }
270 
271 static void gen_raise_exception_format2(DisasContext *s, int nr,
272                                         target_ulong this_pc)
273 {
274     /*
275      * Pass the address of the insn to the exception handler,
276      * for recording in the Format $2 (6-word) stack frame.
277      * Re-use mmu.ar for the purpose, since that's only valid
278      * after tlb_fill.
279      */
280     tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
281                    offsetof(CPUM68KState, mmu.ar));
282     gen_raise_exception(nr);
283     s->base.is_jmp = DISAS_NORETURN;
284 }
285 
286 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
287 {
288     update_cc_op(s);
289     tcg_gen_movi_i32(QREG_PC, dest);
290 
291     gen_raise_exception(nr);
292 
293     s->base.is_jmp = DISAS_NORETURN;
294 }
295 
296 static inline void gen_addr_fault(DisasContext *s)
297 {
298     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
299 }
300 
301 /*
302  * Generate a load from the specified address.  Narrow values are
303  *  sign extended to full register width.
304  */
305 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
306                             int sign, int index)
307 {
308     TCGv tmp = tcg_temp_new_i32();
309 
310     switch (opsize) {
311     case OS_BYTE:
312     case OS_WORD:
313     case OS_LONG:
314         tcg_gen_qemu_ld_tl(tmp, addr, index,
315                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
316         break;
317     default:
318         g_assert_not_reached();
319     }
320     return tmp;
321 }
322 
323 /* Generate a store.  */
324 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
325                              int index)
326 {
327     switch (opsize) {
328     case OS_BYTE:
329     case OS_WORD:
330     case OS_LONG:
331         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
332         break;
333     default:
334         g_assert_not_reached();
335     }
336 }
337 
338 typedef enum {
339     EA_STORE,
340     EA_LOADU,
341     EA_LOADS
342 } ea_what;
343 
344 /*
345  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
346  * otherwise generate a store.
347  */
348 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
349                      ea_what what, int index)
350 {
351     if (what == EA_STORE) {
352         gen_store(s, opsize, addr, val, index);
353         return store_dummy;
354     } else {
355         return gen_load(s, opsize, addr, what == EA_LOADS, index);
356     }
357 }
358 
359 /* Read a 16-bit immediate constant */
360 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
361 {
362     uint16_t im;
363     im = translator_lduw(env, &s->base, s->pc);
364     s->pc += 2;
365     return im;
366 }
367 
368 /* Read an 8-bit immediate constant */
369 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
370 {
371     return read_im16(env, s);
372 }
373 
374 /* Read a 32-bit immediate constant.  */
375 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
376 {
377     uint32_t im;
378     im = read_im16(env, s) << 16;
379     im |= 0xffff & read_im16(env, s);
380     return im;
381 }
382 
383 /* Read a 64-bit immediate constant.  */
384 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
385 {
386     uint64_t im;
387     im = (uint64_t)read_im32(env, s) << 32;
388     im |= (uint64_t)read_im32(env, s);
389     return im;
390 }
391 
392 /* Calculate and address index.  */
393 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
394 {
395     TCGv add;
396     int scale;
397 
398     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
399     if ((ext & 0x800) == 0) {
400         tcg_gen_ext16s_i32(tmp, add);
401         add = tmp;
402     }
403     scale = (ext >> 9) & 3;
404     if (scale != 0) {
405         tcg_gen_shli_i32(tmp, add, scale);
406         add = tmp;
407     }
408     return add;
409 }
410 
411 /*
412  * Handle a base + index + displacement effective address.
413  * A NULL_QREG base means pc-relative.
414  */
415 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
416 {
417     uint32_t offset;
418     uint16_t ext;
419     TCGv add;
420     TCGv tmp;
421     uint32_t bd, od;
422 
423     offset = s->pc;
424     ext = read_im16(env, s);
425 
426     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
427         return NULL_QREG;
428 
429     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
430         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
431         ext &= ~(3 << 9);
432     }
433 
434     if (ext & 0x100) {
435         /* full extension word format */
436         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
437             return NULL_QREG;
438 
439         if ((ext & 0x30) > 0x10) {
440             /* base displacement */
441             if ((ext & 0x30) == 0x20) {
442                 bd = (int16_t)read_im16(env, s);
443             } else {
444                 bd = read_im32(env, s);
445             }
446         } else {
447             bd = 0;
448         }
449         tmp = tcg_temp_new();
450         if ((ext & 0x44) == 0) {
451             /* pre-index */
452             add = gen_addr_index(s, ext, tmp);
453         } else {
454             add = NULL_QREG;
455         }
456         if ((ext & 0x80) == 0) {
457             /* base not suppressed */
458             if (IS_NULL_QREG(base)) {
459                 base = tcg_constant_i32(offset + bd);
460                 bd = 0;
461             }
462             if (!IS_NULL_QREG(add)) {
463                 tcg_gen_add_i32(tmp, add, base);
464                 add = tmp;
465             } else {
466                 add = base;
467             }
468         }
469         if (!IS_NULL_QREG(add)) {
470             if (bd != 0) {
471                 tcg_gen_addi_i32(tmp, add, bd);
472                 add = tmp;
473             }
474         } else {
475             add = tcg_constant_i32(bd);
476         }
477         if ((ext & 3) != 0) {
478             /* memory indirect */
479             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
480             if ((ext & 0x44) == 4) {
481                 add = gen_addr_index(s, ext, tmp);
482                 tcg_gen_add_i32(tmp, add, base);
483                 add = tmp;
484             } else {
485                 add = base;
486             }
487             if ((ext & 3) > 1) {
488                 /* outer displacement */
489                 if ((ext & 3) == 2) {
490                     od = (int16_t)read_im16(env, s);
491                 } else {
492                     od = read_im32(env, s);
493                 }
494             } else {
495                 od = 0;
496             }
497             if (od != 0) {
498                 tcg_gen_addi_i32(tmp, add, od);
499                 add = tmp;
500             }
501         }
502     } else {
503         /* brief extension word format */
504         tmp = tcg_temp_new();
505         add = gen_addr_index(s, ext, tmp);
506         if (!IS_NULL_QREG(base)) {
507             tcg_gen_add_i32(tmp, add, base);
508             if ((int8_t)ext)
509                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
510         } else {
511             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
512         }
513         add = tmp;
514     }
515     return add;
516 }
517 
518 /* Sign or zero extend a value.  */
519 
520 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
521 {
522     switch (opsize) {
523     case OS_BYTE:
524         if (sign) {
525             tcg_gen_ext8s_i32(res, val);
526         } else {
527             tcg_gen_ext8u_i32(res, val);
528         }
529         break;
530     case OS_WORD:
531         if (sign) {
532             tcg_gen_ext16s_i32(res, val);
533         } else {
534             tcg_gen_ext16u_i32(res, val);
535         }
536         break;
537     case OS_LONG:
538         tcg_gen_mov_i32(res, val);
539         break;
540     default:
541         g_assert_not_reached();
542     }
543 }
544 
545 /* Evaluate all the CC flags.  */
546 
547 static void gen_flush_flags(DisasContext *s)
548 {
549     TCGv t0, t1;
550 
551     switch (s->cc_op) {
552     case CC_OP_FLAGS:
553         return;
554 
555     case CC_OP_ADDB:
556     case CC_OP_ADDW:
557     case CC_OP_ADDL:
558         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
559         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
560         /* Compute signed overflow for addition.  */
561         t0 = tcg_temp_new();
562         t1 = tcg_temp_new();
563         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
564         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
565         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
566         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
567         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
568         break;
569 
570     case CC_OP_SUBB:
571     case CC_OP_SUBW:
572     case CC_OP_SUBL:
573         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
574         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
575         /* Compute signed overflow for subtraction.  */
576         t0 = tcg_temp_new();
577         t1 = tcg_temp_new();
578         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
579         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
580         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
581         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
582         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
583         break;
584 
585     case CC_OP_CMPB:
586     case CC_OP_CMPW:
587     case CC_OP_CMPL:
588         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
589         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
590         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
591         /* Compute signed overflow for subtraction.  */
592         t0 = tcg_temp_new();
593         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
594         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
595         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
596         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
597         break;
598 
599     case CC_OP_LOGIC:
600         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
601         tcg_gen_movi_i32(QREG_CC_C, 0);
602         tcg_gen_movi_i32(QREG_CC_V, 0);
603         break;
604 
605     case CC_OP_DYNAMIC:
606         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
607         s->cc_op_synced = 1;
608         break;
609 
610     default:
611         gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
612         s->cc_op_synced = 1;
613         break;
614     }
615 
616     /* Note that flush_flags also assigned to env->cc_op.  */
617     s->cc_op = CC_OP_FLAGS;
618 }
619 
620 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
621 {
622     TCGv tmp;
623 
624     if (opsize == OS_LONG) {
625         tmp = val;
626     } else {
627         tmp = tcg_temp_new();
628         gen_ext(tmp, val, opsize, sign);
629     }
630 
631     return tmp;
632 }
633 
634 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
635 {
636     gen_ext(QREG_CC_N, val, opsize, 1);
637     set_cc_op(s, CC_OP_LOGIC);
638 }
639 
640 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
641 {
642     tcg_gen_mov_i32(QREG_CC_N, dest);
643     tcg_gen_mov_i32(QREG_CC_V, src);
644     set_cc_op(s, CC_OP_CMPB + opsize);
645 }
646 
647 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
648 {
649     gen_ext(QREG_CC_N, dest, opsize, 1);
650     tcg_gen_mov_i32(QREG_CC_V, src);
651 }
652 
653 static inline int opsize_bytes(int opsize)
654 {
655     switch (opsize) {
656     case OS_BYTE: return 1;
657     case OS_WORD: return 2;
658     case OS_LONG: return 4;
659     case OS_SINGLE: return 4;
660     case OS_DOUBLE: return 8;
661     case OS_EXTENDED: return 12;
662     case OS_PACKED: return 12;
663     default:
664         g_assert_not_reached();
665     }
666 }
667 
668 static inline int insn_opsize(int insn)
669 {
670     switch ((insn >> 6) & 3) {
671     case 0: return OS_BYTE;
672     case 1: return OS_WORD;
673     case 2: return OS_LONG;
674     default:
675         g_assert_not_reached();
676     }
677 }
678 
679 static inline int ext_opsize(int ext, int pos)
680 {
681     switch ((ext >> pos) & 7) {
682     case 0: return OS_LONG;
683     case 1: return OS_SINGLE;
684     case 2: return OS_EXTENDED;
685     case 3: return OS_PACKED;
686     case 4: return OS_WORD;
687     case 5: return OS_DOUBLE;
688     case 6: return OS_BYTE;
689     default:
690         g_assert_not_reached();
691     }
692 }
693 
694 /*
695  * Assign value to a register.  If the width is less than the register width
696  * only the low part of the register is set.
697  */
698 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
699 {
700     TCGv tmp;
701     switch (opsize) {
702     case OS_BYTE:
703         tcg_gen_andi_i32(reg, reg, 0xffffff00);
704         tmp = tcg_temp_new();
705         tcg_gen_ext8u_i32(tmp, val);
706         tcg_gen_or_i32(reg, reg, tmp);
707         break;
708     case OS_WORD:
709         tcg_gen_andi_i32(reg, reg, 0xffff0000);
710         tmp = tcg_temp_new();
711         tcg_gen_ext16u_i32(tmp, val);
712         tcg_gen_or_i32(reg, reg, tmp);
713         break;
714     case OS_LONG:
715     case OS_SINGLE:
716         tcg_gen_mov_i32(reg, val);
717         break;
718     default:
719         g_assert_not_reached();
720     }
721 }
722 
723 /*
724  * Generate code for an "effective address".  Does not adjust the base
725  * register for autoincrement addressing modes.
726  */
727 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
728                          int mode, int reg0, int opsize)
729 {
730     TCGv reg;
731     TCGv tmp;
732     uint16_t ext;
733     uint32_t offset;
734 
735     switch (mode) {
736     case 0: /* Data register direct.  */
737     case 1: /* Address register direct.  */
738         return NULL_QREG;
739     case 3: /* Indirect postincrement.  */
740         if (opsize == OS_UNSIZED) {
741             return NULL_QREG;
742         }
743         /* fallthru */
744     case 2: /* Indirect register */
745         return get_areg(s, reg0);
746     case 4: /* Indirect predecrememnt.  */
747         if (opsize == OS_UNSIZED) {
748             return NULL_QREG;
749         }
750         reg = get_areg(s, reg0);
751         tmp = tcg_temp_new();
752         if (reg0 == 7 && opsize == OS_BYTE &&
753             m68k_feature(s->env, M68K_FEATURE_M68K)) {
754             tcg_gen_subi_i32(tmp, reg, 2);
755         } else {
756             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
757         }
758         return tmp;
759     case 5: /* Indirect displacement.  */
760         reg = get_areg(s, reg0);
761         tmp = tcg_temp_new();
762         ext = read_im16(env, s);
763         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
764         return tmp;
765     case 6: /* Indirect index + displacement.  */
766         reg = get_areg(s, reg0);
767         return gen_lea_indexed(env, s, reg);
768     case 7: /* Other */
769         switch (reg0) {
770         case 0: /* Absolute short.  */
771             offset = (int16_t)read_im16(env, s);
772             return tcg_constant_i32(offset);
773         case 1: /* Absolute long.  */
774             offset = read_im32(env, s);
775             return tcg_constant_i32(offset);
776         case 2: /* pc displacement  */
777             offset = s->pc;
778             offset += (int16_t)read_im16(env, s);
779             return tcg_constant_i32(offset);
780         case 3: /* pc index+displacement.  */
781             return gen_lea_indexed(env, s, NULL_QREG);
782         case 4: /* Immediate.  */
783         default:
784             return NULL_QREG;
785         }
786     }
787     /* Should never happen.  */
788     return NULL_QREG;
789 }
790 
791 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
792                     int opsize)
793 {
794     int mode = extract32(insn, 3, 3);
795     int reg0 = REG(insn, 0);
796     return gen_lea_mode(env, s, mode, reg0, opsize);
797 }
798 
799 /*
800  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
801  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
802  * ADDRP is non-null for readwrite operands.
803  */
804 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
805                         int opsize, TCGv val, TCGv *addrp, ea_what what,
806                         int index)
807 {
808     TCGv reg, tmp, result;
809     int32_t offset;
810 
811     switch (mode) {
812     case 0: /* Data register direct.  */
813         reg = cpu_dregs[reg0];
814         if (what == EA_STORE) {
815             gen_partset_reg(opsize, reg, val);
816             return store_dummy;
817         } else {
818             return gen_extend(s, reg, opsize, what == EA_LOADS);
819         }
820     case 1: /* Address register direct.  */
821         reg = get_areg(s, reg0);
822         if (what == EA_STORE) {
823             tcg_gen_mov_i32(reg, val);
824             return store_dummy;
825         } else {
826             return gen_extend(s, reg, opsize, what == EA_LOADS);
827         }
828     case 2: /* Indirect register */
829         reg = get_areg(s, reg0);
830         return gen_ldst(s, opsize, reg, val, what, index);
831     case 3: /* Indirect postincrement.  */
832         reg = get_areg(s, reg0);
833         result = gen_ldst(s, opsize, reg, val, what, index);
834         if (what == EA_STORE || !addrp) {
835             TCGv tmp = tcg_temp_new();
836             if (reg0 == 7 && opsize == OS_BYTE &&
837                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
838                 tcg_gen_addi_i32(tmp, reg, 2);
839             } else {
840                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
841             }
842             delay_set_areg(s, reg0, tmp, true);
843         }
844         return result;
845     case 4: /* Indirect predecrememnt.  */
846         if (addrp && what == EA_STORE) {
847             tmp = *addrp;
848         } else {
849             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
850             if (IS_NULL_QREG(tmp)) {
851                 return tmp;
852             }
853             if (addrp) {
854                 *addrp = tmp;
855             }
856         }
857         result = gen_ldst(s, opsize, tmp, val, what, index);
858         if (what == EA_STORE || !addrp) {
859             delay_set_areg(s, reg0, tmp, false);
860         }
861         return result;
862     case 5: /* Indirect displacement.  */
863     case 6: /* Indirect index + displacement.  */
864     do_indirect:
865         if (addrp && what == EA_STORE) {
866             tmp = *addrp;
867         } else {
868             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
869             if (IS_NULL_QREG(tmp)) {
870                 return tmp;
871             }
872             if (addrp) {
873                 *addrp = tmp;
874             }
875         }
876         return gen_ldst(s, opsize, tmp, val, what, index);
877     case 7: /* Other */
878         switch (reg0) {
879         case 0: /* Absolute short.  */
880         case 1: /* Absolute long.  */
881         case 2: /* pc displacement  */
882         case 3: /* pc index+displacement.  */
883             goto do_indirect;
884         case 4: /* Immediate.  */
885             /* Sign extend values for consistency.  */
886             switch (opsize) {
887             case OS_BYTE:
888                 if (what == EA_LOADS) {
889                     offset = (int8_t)read_im8(env, s);
890                 } else {
891                     offset = read_im8(env, s);
892                 }
893                 break;
894             case OS_WORD:
895                 if (what == EA_LOADS) {
896                     offset = (int16_t)read_im16(env, s);
897                 } else {
898                     offset = read_im16(env, s);
899                 }
900                 break;
901             case OS_LONG:
902                 offset = read_im32(env, s);
903                 break;
904             default:
905                 g_assert_not_reached();
906             }
907             return tcg_constant_i32(offset);
908         default:
909             return NULL_QREG;
910         }
911     }
912     /* Should never happen.  */
913     return NULL_QREG;
914 }
915 
916 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
917                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
918 {
919     int mode = extract32(insn, 3, 3);
920     int reg0 = REG(insn, 0);
921     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
922 }
923 
924 static TCGv_ptr gen_fp_ptr(int freg)
925 {
926     TCGv_ptr fp = tcg_temp_new_ptr();
927     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
928     return fp;
929 }
930 
931 static TCGv_ptr gen_fp_result_ptr(void)
932 {
933     TCGv_ptr fp = tcg_temp_new_ptr();
934     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
935     return fp;
936 }
937 
938 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
939 {
940     TCGv t32;
941     TCGv_i64 t64;
942 
943     t32 = tcg_temp_new();
944     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
945     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
946 
947     t64 = tcg_temp_new_i64();
948     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
949     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
950 }
951 
952 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
953                         int index)
954 {
955     TCGv tmp;
956     TCGv_i64 t64;
957 
958     t64 = tcg_temp_new_i64();
959     tmp = tcg_temp_new();
960     switch (opsize) {
961     case OS_BYTE:
962     case OS_WORD:
963     case OS_LONG:
964         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
965         gen_helper_exts32(cpu_env, fp, tmp);
966         break;
967     case OS_SINGLE:
968         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
969         gen_helper_extf32(cpu_env, fp, tmp);
970         break;
971     case OS_DOUBLE:
972         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
973         gen_helper_extf64(cpu_env, fp, t64);
974         break;
975     case OS_EXTENDED:
976         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
977             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
978             break;
979         }
980         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
981         tcg_gen_shri_i32(tmp, tmp, 16);
982         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
983         tcg_gen_addi_i32(tmp, addr, 4);
984         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
985         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
986         break;
987     case OS_PACKED:
988         /*
989          * unimplemented data type on 68040/ColdFire
990          * FIXME if needed for another FPU
991          */
992         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
993         break;
994     default:
995         g_assert_not_reached();
996     }
997 }
998 
999 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1000                          int index)
1001 {
1002     TCGv tmp;
1003     TCGv_i64 t64;
1004 
1005     t64 = tcg_temp_new_i64();
1006     tmp = tcg_temp_new();
1007     switch (opsize) {
1008     case OS_BYTE:
1009     case OS_WORD:
1010     case OS_LONG:
1011         gen_helper_reds32(tmp, cpu_env, fp);
1012         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1013         break;
1014     case OS_SINGLE:
1015         gen_helper_redf32(tmp, cpu_env, fp);
1016         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1017         break;
1018     case OS_DOUBLE:
1019         gen_helper_redf64(t64, cpu_env, fp);
1020         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1021         break;
1022     case OS_EXTENDED:
1023         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1024             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1025             break;
1026         }
1027         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1028         tcg_gen_shli_i32(tmp, tmp, 16);
1029         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1030         tcg_gen_addi_i32(tmp, addr, 4);
1031         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1032         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1033         break;
1034     case OS_PACKED:
1035         /*
1036          * unimplemented data type on 68040/ColdFire
1037          * FIXME if needed for another FPU
1038          */
1039         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1040         break;
1041     default:
1042         g_assert_not_reached();
1043     }
1044 }
1045 
1046 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1047                         TCGv_ptr fp, ea_what what, int index)
1048 {
1049     if (what == EA_STORE) {
1050         gen_store_fp(s, opsize, addr, fp, index);
1051     } else {
1052         gen_load_fp(s, opsize, addr, fp, index);
1053     }
1054 }
1055 
1056 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1057                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1058                           int index)
1059 {
1060     TCGv reg, addr, tmp;
1061     TCGv_i64 t64;
1062 
1063     switch (mode) {
1064     case 0: /* Data register direct.  */
1065         reg = cpu_dregs[reg0];
1066         if (what == EA_STORE) {
1067             switch (opsize) {
1068             case OS_BYTE:
1069             case OS_WORD:
1070             case OS_LONG:
1071                 gen_helper_reds32(reg, cpu_env, fp);
1072                 break;
1073             case OS_SINGLE:
1074                 gen_helper_redf32(reg, cpu_env, fp);
1075                 break;
1076             default:
1077                 g_assert_not_reached();
1078             }
1079         } else {
1080             tmp = tcg_temp_new();
1081             switch (opsize) {
1082             case OS_BYTE:
1083                 tcg_gen_ext8s_i32(tmp, reg);
1084                 gen_helper_exts32(cpu_env, fp, tmp);
1085                 break;
1086             case OS_WORD:
1087                 tcg_gen_ext16s_i32(tmp, reg);
1088                 gen_helper_exts32(cpu_env, fp, tmp);
1089                 break;
1090             case OS_LONG:
1091                 gen_helper_exts32(cpu_env, fp, reg);
1092                 break;
1093             case OS_SINGLE:
1094                 gen_helper_extf32(cpu_env, fp, reg);
1095                 break;
1096             default:
1097                 g_assert_not_reached();
1098             }
1099         }
1100         return 0;
1101     case 1: /* Address register direct.  */
1102         return -1;
1103     case 2: /* Indirect register */
1104         addr = get_areg(s, reg0);
1105         gen_ldst_fp(s, opsize, addr, fp, what, index);
1106         return 0;
1107     case 3: /* Indirect postincrement.  */
1108         addr = cpu_aregs[reg0];
1109         gen_ldst_fp(s, opsize, addr, fp, what, index);
1110         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1111         return 0;
1112     case 4: /* Indirect predecrememnt.  */
1113         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1114         if (IS_NULL_QREG(addr)) {
1115             return -1;
1116         }
1117         gen_ldst_fp(s, opsize, addr, fp, what, index);
1118         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1119         return 0;
1120     case 5: /* Indirect displacement.  */
1121     case 6: /* Indirect index + displacement.  */
1122     do_indirect:
1123         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1124         if (IS_NULL_QREG(addr)) {
1125             return -1;
1126         }
1127         gen_ldst_fp(s, opsize, addr, fp, what, index);
1128         return 0;
1129     case 7: /* Other */
1130         switch (reg0) {
1131         case 0: /* Absolute short.  */
1132         case 1: /* Absolute long.  */
1133         case 2: /* pc displacement  */
1134         case 3: /* pc index+displacement.  */
1135             goto do_indirect;
1136         case 4: /* Immediate.  */
1137             if (what == EA_STORE) {
1138                 return -1;
1139             }
1140             switch (opsize) {
1141             case OS_BYTE:
1142                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1143                 gen_helper_exts32(cpu_env, fp, tmp);
1144                 break;
1145             case OS_WORD:
1146                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1147                 gen_helper_exts32(cpu_env, fp, tmp);
1148                 break;
1149             case OS_LONG:
1150                 tmp = tcg_constant_i32(read_im32(env, s));
1151                 gen_helper_exts32(cpu_env, fp, tmp);
1152                 break;
1153             case OS_SINGLE:
1154                 tmp = tcg_constant_i32(read_im32(env, s));
1155                 gen_helper_extf32(cpu_env, fp, tmp);
1156                 break;
1157             case OS_DOUBLE:
1158                 t64 = tcg_constant_i64(read_im64(env, s));
1159                 gen_helper_extf64(cpu_env, fp, t64);
1160                 break;
1161             case OS_EXTENDED:
1162                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1163                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1164                     break;
1165                 }
1166                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1167                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1168                 t64 = tcg_constant_i64(read_im64(env, s));
1169                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1170                 break;
1171             case OS_PACKED:
1172                 /*
1173                  * unimplemented data type on 68040/ColdFire
1174                  * FIXME if needed for another FPU
1175                  */
1176                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1177                 break;
1178             default:
1179                 g_assert_not_reached();
1180             }
1181             return 0;
1182         default:
1183             return -1;
1184         }
1185     }
1186     return -1;
1187 }
1188 
1189 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1190                        int opsize, TCGv_ptr fp, ea_what what, int index)
1191 {
1192     int mode = extract32(insn, 3, 3);
1193     int reg0 = REG(insn, 0);
1194     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1195 }
1196 
1197 typedef struct {
1198     TCGCond tcond;
1199     TCGv v1;
1200     TCGv v2;
1201 } DisasCompare;
1202 
1203 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1204 {
1205     TCGv tmp, tmp2;
1206     TCGCond tcond;
1207     CCOp op = s->cc_op;
1208 
1209     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1210     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1211         c->v1 = QREG_CC_N;
1212         c->v2 = QREG_CC_V;
1213         switch (cond) {
1214         case 2: /* HI */
1215         case 3: /* LS */
1216             tcond = TCG_COND_LEU;
1217             goto done;
1218         case 4: /* CC */
1219         case 5: /* CS */
1220             tcond = TCG_COND_LTU;
1221             goto done;
1222         case 6: /* NE */
1223         case 7: /* EQ */
1224             tcond = TCG_COND_EQ;
1225             goto done;
1226         case 10: /* PL */
1227         case 11: /* MI */
1228             c->v2 = tcg_constant_i32(0);
1229             c->v1 = tmp = tcg_temp_new();
1230             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1231             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1232             /* fallthru */
1233         case 12: /* GE */
1234         case 13: /* LT */
1235             tcond = TCG_COND_LT;
1236             goto done;
1237         case 14: /* GT */
1238         case 15: /* LE */
1239             tcond = TCG_COND_LE;
1240             goto done;
1241         }
1242     }
1243 
1244     c->v2 = tcg_constant_i32(0);
1245 
1246     switch (cond) {
1247     case 0: /* T */
1248     case 1: /* F */
1249         c->v1 = c->v2;
1250         tcond = TCG_COND_NEVER;
1251         goto done;
1252     case 14: /* GT (!(Z || (N ^ V))) */
1253     case 15: /* LE (Z || (N ^ V)) */
1254         /*
1255          * Logic operations clear V, which simplifies LE to (Z || N),
1256          * and since Z and N are co-located, this becomes a normal
1257          * comparison vs N.
1258          */
1259         if (op == CC_OP_LOGIC) {
1260             c->v1 = QREG_CC_N;
1261             tcond = TCG_COND_LE;
1262             goto done;
1263         }
1264         break;
1265     case 12: /* GE (!(N ^ V)) */
1266     case 13: /* LT (N ^ V) */
1267         /* Logic operations clear V, which simplifies this to N.  */
1268         if (op != CC_OP_LOGIC) {
1269             break;
1270         }
1271         /* fallthru */
1272     case 10: /* PL (!N) */
1273     case 11: /* MI (N) */
1274         /* Several cases represent N normally.  */
1275         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1276             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1277             op == CC_OP_LOGIC) {
1278             c->v1 = QREG_CC_N;
1279             tcond = TCG_COND_LT;
1280             goto done;
1281         }
1282         break;
1283     case 6: /* NE (!Z) */
1284     case 7: /* EQ (Z) */
1285         /* Some cases fold Z into N.  */
1286         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1287             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1288             op == CC_OP_LOGIC) {
1289             tcond = TCG_COND_EQ;
1290             c->v1 = QREG_CC_N;
1291             goto done;
1292         }
1293         break;
1294     case 4: /* CC (!C) */
1295     case 5: /* CS (C) */
1296         /* Some cases fold C into X.  */
1297         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1298             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1299             tcond = TCG_COND_NE;
1300             c->v1 = QREG_CC_X;
1301             goto done;
1302         }
1303         /* fallthru */
1304     case 8: /* VC (!V) */
1305     case 9: /* VS (V) */
1306         /* Logic operations clear V and C.  */
1307         if (op == CC_OP_LOGIC) {
1308             tcond = TCG_COND_NEVER;
1309             c->v1 = c->v2;
1310             goto done;
1311         }
1312         break;
1313     }
1314 
1315     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1316     gen_flush_flags(s);
1317 
1318     switch (cond) {
1319     case 0: /* T */
1320     case 1: /* F */
1321     default:
1322         /* Invalid, or handled above.  */
1323         abort();
1324     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1325     case 3: /* LS (C || Z) */
1326         c->v1 = tmp = tcg_temp_new();
1327         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1328         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1329         tcond = TCG_COND_NE;
1330         break;
1331     case 4: /* CC (!C) */
1332     case 5: /* CS (C) */
1333         c->v1 = QREG_CC_C;
1334         tcond = TCG_COND_NE;
1335         break;
1336     case 6: /* NE (!Z) */
1337     case 7: /* EQ (Z) */
1338         c->v1 = QREG_CC_Z;
1339         tcond = TCG_COND_EQ;
1340         break;
1341     case 8: /* VC (!V) */
1342     case 9: /* VS (V) */
1343         c->v1 = QREG_CC_V;
1344         tcond = TCG_COND_LT;
1345         break;
1346     case 10: /* PL (!N) */
1347     case 11: /* MI (N) */
1348         c->v1 = QREG_CC_N;
1349         tcond = TCG_COND_LT;
1350         break;
1351     case 12: /* GE (!(N ^ V)) */
1352     case 13: /* LT (N ^ V) */
1353         c->v1 = tmp = tcg_temp_new();
1354         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1355         tcond = TCG_COND_LT;
1356         break;
1357     case 14: /* GT (!(Z || (N ^ V))) */
1358     case 15: /* LE (Z || (N ^ V)) */
1359         c->v1 = tmp = tcg_temp_new();
1360         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1361         tcg_gen_neg_i32(tmp, tmp);
1362         tmp2 = tcg_temp_new();
1363         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1364         tcg_gen_or_i32(tmp, tmp, tmp2);
1365         tcond = TCG_COND_LT;
1366         break;
1367     }
1368 
1369  done:
1370     if ((cond & 1) == 0) {
1371         tcond = tcg_invert_cond(tcond);
1372     }
1373     c->tcond = tcond;
1374 }
1375 
1376 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1377 {
1378   DisasCompare c;
1379 
1380   gen_cc_cond(&c, s, cond);
1381   update_cc_op(s);
1382   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1383 }
1384 
1385 /* Force a TB lookup after an instruction that changes the CPU state.  */
1386 static void gen_exit_tb(DisasContext *s)
1387 {
1388     update_cc_op(s);
1389     tcg_gen_movi_i32(QREG_PC, s->pc);
1390     s->base.is_jmp = DISAS_EXIT;
1391 }
1392 
1393 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1394         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1395                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1396         if (IS_NULL_QREG(result)) {                                     \
1397             gen_addr_fault(s);                                          \
1398             return;                                                     \
1399         }                                                               \
1400     } while (0)
1401 
1402 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1403         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1404                                 EA_STORE, IS_USER(s));                  \
1405         if (IS_NULL_QREG(ea_result)) {                                  \
1406             gen_addr_fault(s);                                          \
1407             return;                                                     \
1408         }                                                               \
1409     } while (0)
1410 
1411 /* Generate a jump to an immediate address.  */
1412 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1413                        target_ulong src)
1414 {
1415     if (unlikely(s->ss_active)) {
1416         update_cc_op(s);
1417         tcg_gen_movi_i32(QREG_PC, dest);
1418         gen_raise_exception_format2(s, EXCP_TRACE, src);
1419     } else if (translator_use_goto_tb(&s->base, dest)) {
1420         tcg_gen_goto_tb(n);
1421         tcg_gen_movi_i32(QREG_PC, dest);
1422         tcg_gen_exit_tb(s->base.tb, n);
1423     } else {
1424         gen_jmp_im(s, dest);
1425         tcg_gen_exit_tb(NULL, 0);
1426     }
1427     s->base.is_jmp = DISAS_NORETURN;
1428 }
1429 
1430 DISAS_INSN(scc)
1431 {
1432     DisasCompare c;
1433     int cond;
1434     TCGv tmp;
1435 
1436     cond = (insn >> 8) & 0xf;
1437     gen_cc_cond(&c, s, cond);
1438 
1439     tmp = tcg_temp_new();
1440     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1441 
1442     tcg_gen_neg_i32(tmp, tmp);
1443     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1444 }
1445 
1446 DISAS_INSN(dbcc)
1447 {
1448     TCGLabel *l1;
1449     TCGv reg;
1450     TCGv tmp;
1451     int16_t offset;
1452     uint32_t base;
1453 
1454     reg = DREG(insn, 0);
1455     base = s->pc;
1456     offset = (int16_t)read_im16(env, s);
1457     l1 = gen_new_label();
1458     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1459 
1460     tmp = tcg_temp_new();
1461     tcg_gen_ext16s_i32(tmp, reg);
1462     tcg_gen_addi_i32(tmp, tmp, -1);
1463     gen_partset_reg(OS_WORD, reg, tmp);
1464     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1465     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1466     gen_set_label(l1);
1467     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1468 }
1469 
1470 DISAS_INSN(undef_mac)
1471 {
1472     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1473 }
1474 
1475 DISAS_INSN(undef_fpu)
1476 {
1477     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1478 }
1479 
1480 DISAS_INSN(undef)
1481 {
1482     /*
1483      * ??? This is both instructions that are as yet unimplemented
1484      * for the 680x0 series, as well as those that are implemented
1485      * but actually illegal for CPU32 or pre-68020.
1486      */
1487     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1488                   insn, s->base.pc_next);
1489     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1490 }
1491 
1492 DISAS_INSN(mulw)
1493 {
1494     TCGv reg;
1495     TCGv tmp;
1496     TCGv src;
1497     int sign;
1498 
1499     sign = (insn & 0x100) != 0;
1500     reg = DREG(insn, 9);
1501     tmp = tcg_temp_new();
1502     if (sign)
1503         tcg_gen_ext16s_i32(tmp, reg);
1504     else
1505         tcg_gen_ext16u_i32(tmp, reg);
1506     SRC_EA(env, src, OS_WORD, sign, NULL);
1507     tcg_gen_mul_i32(tmp, tmp, src);
1508     tcg_gen_mov_i32(reg, tmp);
1509     gen_logic_cc(s, tmp, OS_LONG);
1510 }
1511 
1512 DISAS_INSN(divw)
1513 {
1514     int sign;
1515     TCGv src;
1516     TCGv destr;
1517     TCGv ilen;
1518 
1519     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1520 
1521     sign = (insn & 0x100) != 0;
1522 
1523     /* dest.l / src.w */
1524 
1525     SRC_EA(env, src, OS_WORD, sign, NULL);
1526     destr = tcg_constant_i32(REG(insn, 9));
1527     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1528     if (sign) {
1529         gen_helper_divsw(cpu_env, destr, src, ilen);
1530     } else {
1531         gen_helper_divuw(cpu_env, destr, src, ilen);
1532     }
1533 
1534     set_cc_op(s, CC_OP_FLAGS);
1535 }
1536 
1537 DISAS_INSN(divl)
1538 {
1539     TCGv num, reg, den, ilen;
1540     int sign;
1541     uint16_t ext;
1542 
1543     ext = read_im16(env, s);
1544 
1545     sign = (ext & 0x0800) != 0;
1546 
1547     if (ext & 0x400) {
1548         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1549             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1550             return;
1551         }
1552 
1553         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1554 
1555         SRC_EA(env, den, OS_LONG, 0, NULL);
1556         num = tcg_constant_i32(REG(ext, 12));
1557         reg = tcg_constant_i32(REG(ext, 0));
1558         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1559         if (sign) {
1560             gen_helper_divsll(cpu_env, num, reg, den, ilen);
1561         } else {
1562             gen_helper_divull(cpu_env, num, reg, den, ilen);
1563         }
1564         set_cc_op(s, CC_OP_FLAGS);
1565         return;
1566     }
1567 
1568     /* divX.l <EA>, Dq        32/32 -> 32q     */
1569     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1570 
1571     SRC_EA(env, den, OS_LONG, 0, NULL);
1572     num = tcg_constant_i32(REG(ext, 12));
1573     reg = tcg_constant_i32(REG(ext, 0));
1574     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1575     if (sign) {
1576         gen_helper_divsl(cpu_env, num, reg, den, ilen);
1577     } else {
1578         gen_helper_divul(cpu_env, num, reg, den, ilen);
1579     }
1580 
1581     set_cc_op(s, CC_OP_FLAGS);
1582 }
1583 
1584 static void bcd_add(TCGv dest, TCGv src)
1585 {
1586     TCGv t0, t1;
1587 
1588     /*
1589      * dest10 = dest10 + src10 + X
1590      *
1591      *        t1 = src
1592      *        t2 = t1 + 0x066
1593      *        t3 = t2 + dest + X
1594      *        t4 = t2 ^ dest
1595      *        t5 = t3 ^ t4
1596      *        t6 = ~t5 & 0x110
1597      *        t7 = (t6 >> 2) | (t6 >> 3)
1598      *        return t3 - t7
1599      */
1600 
1601     /*
1602      * t1 = (src + 0x066) + dest + X
1603      *    = result with some possible exceeding 0x6
1604      */
1605 
1606     t0 = tcg_temp_new();
1607     tcg_gen_addi_i32(t0, src, 0x066);
1608 
1609     t1 = tcg_temp_new();
1610     tcg_gen_add_i32(t1, t0, dest);
1611     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1612 
1613     /* we will remove exceeding 0x6 where there is no carry */
1614 
1615     /*
1616      * t0 = (src + 0x0066) ^ dest
1617      *    = t1 without carries
1618      */
1619 
1620     tcg_gen_xor_i32(t0, t0, dest);
1621 
1622     /*
1623      * extract the carries
1624      * t0 = t0 ^ t1
1625      *    = only the carries
1626      */
1627 
1628     tcg_gen_xor_i32(t0, t0, t1);
1629 
1630     /*
1631      * generate 0x1 where there is no carry
1632      * and for each 0x10, generate a 0x6
1633      */
1634 
1635     tcg_gen_shri_i32(t0, t0, 3);
1636     tcg_gen_not_i32(t0, t0);
1637     tcg_gen_andi_i32(t0, t0, 0x22);
1638     tcg_gen_add_i32(dest, t0, t0);
1639     tcg_gen_add_i32(dest, dest, t0);
1640 
1641     /*
1642      * remove the exceeding 0x6
1643      * for digits that have not generated a carry
1644      */
1645 
1646     tcg_gen_sub_i32(dest, t1, dest);
1647 }
1648 
1649 static void bcd_sub(TCGv dest, TCGv src)
1650 {
1651     TCGv t0, t1, t2;
1652 
1653     /*
1654      *  dest10 = dest10 - src10 - X
1655      *         = bcd_add(dest + 1 - X, 0x199 - src)
1656      */
1657 
1658     /* t0 = 0x066 + (0x199 - src) */
1659 
1660     t0 = tcg_temp_new();
1661     tcg_gen_subfi_i32(t0, 0x1ff, src);
1662 
1663     /* t1 = t0 + dest + 1 - X*/
1664 
1665     t1 = tcg_temp_new();
1666     tcg_gen_add_i32(t1, t0, dest);
1667     tcg_gen_addi_i32(t1, t1, 1);
1668     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1669 
1670     /* t2 = t0 ^ dest */
1671 
1672     t2 = tcg_temp_new();
1673     tcg_gen_xor_i32(t2, t0, dest);
1674 
1675     /* t0 = t1 ^ t2 */
1676 
1677     tcg_gen_xor_i32(t0, t1, t2);
1678 
1679     /*
1680      * t2 = ~t0 & 0x110
1681      * t0 = (t2 >> 2) | (t2 >> 3)
1682      *
1683      * to fit on 8bit operands, changed in:
1684      *
1685      * t2 = ~(t0 >> 3) & 0x22
1686      * t0 = t2 + t2
1687      * t0 = t0 + t2
1688      */
1689 
1690     tcg_gen_shri_i32(t2, t0, 3);
1691     tcg_gen_not_i32(t2, t2);
1692     tcg_gen_andi_i32(t2, t2, 0x22);
1693     tcg_gen_add_i32(t0, t2, t2);
1694     tcg_gen_add_i32(t0, t0, t2);
1695 
1696     /* return t1 - t0 */
1697 
1698     tcg_gen_sub_i32(dest, t1, t0);
1699 }
1700 
1701 static void bcd_flags(TCGv val)
1702 {
1703     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1704     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1705 
1706     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1707 
1708     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1709 }
1710 
1711 DISAS_INSN(abcd_reg)
1712 {
1713     TCGv src;
1714     TCGv dest;
1715 
1716     gen_flush_flags(s); /* !Z is sticky */
1717 
1718     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1719     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1720     bcd_add(dest, src);
1721     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1722 
1723     bcd_flags(dest);
1724 }
1725 
1726 DISAS_INSN(abcd_mem)
1727 {
1728     TCGv src, dest, addr;
1729 
1730     gen_flush_flags(s); /* !Z is sticky */
1731 
1732     /* Indirect pre-decrement load (mode 4) */
1733 
1734     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1735                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1736     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1737                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1738 
1739     bcd_add(dest, src);
1740 
1741     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1742                 EA_STORE, IS_USER(s));
1743 
1744     bcd_flags(dest);
1745 }
1746 
1747 DISAS_INSN(sbcd_reg)
1748 {
1749     TCGv src, dest;
1750 
1751     gen_flush_flags(s); /* !Z is sticky */
1752 
1753     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1754     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1755 
1756     bcd_sub(dest, src);
1757 
1758     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1759 
1760     bcd_flags(dest);
1761 }
1762 
1763 DISAS_INSN(sbcd_mem)
1764 {
1765     TCGv src, dest, addr;
1766 
1767     gen_flush_flags(s); /* !Z is sticky */
1768 
1769     /* Indirect pre-decrement load (mode 4) */
1770 
1771     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1772                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1773     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1774                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1775 
1776     bcd_sub(dest, src);
1777 
1778     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1779                 EA_STORE, IS_USER(s));
1780 
1781     bcd_flags(dest);
1782 }
1783 
1784 DISAS_INSN(nbcd)
1785 {
1786     TCGv src, dest;
1787     TCGv addr;
1788 
1789     gen_flush_flags(s); /* !Z is sticky */
1790 
1791     SRC_EA(env, src, OS_BYTE, 0, &addr);
1792 
1793     dest = tcg_temp_new();
1794     tcg_gen_movi_i32(dest, 0);
1795     bcd_sub(dest, src);
1796 
1797     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1798 
1799     bcd_flags(dest);
1800 }
1801 
1802 DISAS_INSN(addsub)
1803 {
1804     TCGv reg;
1805     TCGv dest;
1806     TCGv src;
1807     TCGv tmp;
1808     TCGv addr;
1809     int add;
1810     int opsize;
1811 
1812     add = (insn & 0x4000) != 0;
1813     opsize = insn_opsize(insn);
1814     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1815     dest = tcg_temp_new();
1816     if (insn & 0x100) {
1817         SRC_EA(env, tmp, opsize, 1, &addr);
1818         src = reg;
1819     } else {
1820         tmp = reg;
1821         SRC_EA(env, src, opsize, 1, NULL);
1822     }
1823     if (add) {
1824         tcg_gen_add_i32(dest, tmp, src);
1825         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1826         set_cc_op(s, CC_OP_ADDB + opsize);
1827     } else {
1828         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1829         tcg_gen_sub_i32(dest, tmp, src);
1830         set_cc_op(s, CC_OP_SUBB + opsize);
1831     }
1832     gen_update_cc_add(dest, src, opsize);
1833     if (insn & 0x100) {
1834         DEST_EA(env, insn, opsize, dest, &addr);
1835     } else {
1836         gen_partset_reg(opsize, DREG(insn, 9), dest);
1837     }
1838 }
1839 
1840 /* Reverse the order of the bits in REG.  */
1841 DISAS_INSN(bitrev)
1842 {
1843     TCGv reg;
1844     reg = DREG(insn, 0);
1845     gen_helper_bitrev(reg, reg);
1846 }
1847 
1848 DISAS_INSN(bitop_reg)
1849 {
1850     int opsize;
1851     int op;
1852     TCGv src1;
1853     TCGv src2;
1854     TCGv tmp;
1855     TCGv addr;
1856     TCGv dest;
1857 
1858     if ((insn & 0x38) != 0)
1859         opsize = OS_BYTE;
1860     else
1861         opsize = OS_LONG;
1862     op = (insn >> 6) & 3;
1863     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1864 
1865     gen_flush_flags(s);
1866     src2 = tcg_temp_new();
1867     if (opsize == OS_BYTE)
1868         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1869     else
1870         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1871 
1872     tmp = tcg_temp_new();
1873     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1874 
1875     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1876 
1877     dest = tcg_temp_new();
1878     switch (op) {
1879     case 1: /* bchg */
1880         tcg_gen_xor_i32(dest, src1, tmp);
1881         break;
1882     case 2: /* bclr */
1883         tcg_gen_andc_i32(dest, src1, tmp);
1884         break;
1885     case 3: /* bset */
1886         tcg_gen_or_i32(dest, src1, tmp);
1887         break;
1888     default: /* btst */
1889         break;
1890     }
1891     if (op) {
1892         DEST_EA(env, insn, opsize, dest, &addr);
1893     }
1894 }
1895 
1896 DISAS_INSN(sats)
1897 {
1898     TCGv reg;
1899     reg = DREG(insn, 0);
1900     gen_flush_flags(s);
1901     gen_helper_sats(reg, reg, QREG_CC_V);
1902     gen_logic_cc(s, reg, OS_LONG);
1903 }
1904 
1905 static void gen_push(DisasContext *s, TCGv val)
1906 {
1907     TCGv tmp;
1908 
1909     tmp = tcg_temp_new();
1910     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1911     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1912     tcg_gen_mov_i32(QREG_SP, tmp);
1913 }
1914 
1915 static TCGv mreg(int reg)
1916 {
1917     if (reg < 8) {
1918         /* Dx */
1919         return cpu_dregs[reg];
1920     }
1921     /* Ax */
1922     return cpu_aregs[reg & 7];
1923 }
1924 
1925 DISAS_INSN(movem)
1926 {
1927     TCGv addr, incr, tmp, r[16];
1928     int is_load = (insn & 0x0400) != 0;
1929     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1930     uint16_t mask = read_im16(env, s);
1931     int mode = extract32(insn, 3, 3);
1932     int reg0 = REG(insn, 0);
1933     int i;
1934 
1935     tmp = cpu_aregs[reg0];
1936 
1937     switch (mode) {
1938     case 0: /* data register direct */
1939     case 1: /* addr register direct */
1940     do_addr_fault:
1941         gen_addr_fault(s);
1942         return;
1943 
1944     case 2: /* indirect */
1945         break;
1946 
1947     case 3: /* indirect post-increment */
1948         if (!is_load) {
1949             /* post-increment is not allowed */
1950             goto do_addr_fault;
1951         }
1952         break;
1953 
1954     case 4: /* indirect pre-decrement */
1955         if (is_load) {
1956             /* pre-decrement is not allowed */
1957             goto do_addr_fault;
1958         }
1959         /*
1960          * We want a bare copy of the address reg, without any pre-decrement
1961          * adjustment, as gen_lea would provide.
1962          */
1963         break;
1964 
1965     default:
1966         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1967         if (IS_NULL_QREG(tmp)) {
1968             goto do_addr_fault;
1969         }
1970         break;
1971     }
1972 
1973     addr = tcg_temp_new();
1974     tcg_gen_mov_i32(addr, tmp);
1975     incr = tcg_constant_i32(opsize_bytes(opsize));
1976 
1977     if (is_load) {
1978         /* memory to register */
1979         for (i = 0; i < 16; i++) {
1980             if (mask & (1 << i)) {
1981                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1982                 tcg_gen_add_i32(addr, addr, incr);
1983             }
1984         }
1985         for (i = 0; i < 16; i++) {
1986             if (mask & (1 << i)) {
1987                 tcg_gen_mov_i32(mreg(i), r[i]);
1988             }
1989         }
1990         if (mode == 3) {
1991             /* post-increment: movem (An)+,X */
1992             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1993         }
1994     } else {
1995         /* register to memory */
1996         if (mode == 4) {
1997             /* pre-decrement: movem X,-(An) */
1998             for (i = 15; i >= 0; i--) {
1999                 if ((mask << i) & 0x8000) {
2000                     tcg_gen_sub_i32(addr, addr, incr);
2001                     if (reg0 + 8 == i &&
2002                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2003                         /*
2004                          * M68020+: if the addressing register is the
2005                          * register moved to memory, the value written
2006                          * is the initial value decremented by the size of
2007                          * the operation, regardless of how many actual
2008                          * stores have been performed until this point.
2009                          * M68000/M68010: the value is the initial value.
2010                          */
2011                         tmp = tcg_temp_new();
2012                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2013                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2014                     } else {
2015                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2016                     }
2017                 }
2018             }
2019             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2020         } else {
2021             for (i = 0; i < 16; i++) {
2022                 if (mask & (1 << i)) {
2023                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2024                     tcg_gen_add_i32(addr, addr, incr);
2025                 }
2026             }
2027         }
2028     }
2029 }
2030 
2031 DISAS_INSN(movep)
2032 {
2033     uint8_t i;
2034     int16_t displ;
2035     TCGv reg;
2036     TCGv addr;
2037     TCGv abuf;
2038     TCGv dbuf;
2039 
2040     displ = read_im16(env, s);
2041 
2042     addr = AREG(insn, 0);
2043     reg = DREG(insn, 9);
2044 
2045     abuf = tcg_temp_new();
2046     tcg_gen_addi_i32(abuf, addr, displ);
2047     dbuf = tcg_temp_new();
2048 
2049     if (insn & 0x40) {
2050         i = 4;
2051     } else {
2052         i = 2;
2053     }
2054 
2055     if (insn & 0x80) {
2056         for ( ; i > 0 ; i--) {
2057             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2058             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2059             if (i > 1) {
2060                 tcg_gen_addi_i32(abuf, abuf, 2);
2061             }
2062         }
2063     } else {
2064         for ( ; i > 0 ; i--) {
2065             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2066             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2067             if (i > 1) {
2068                 tcg_gen_addi_i32(abuf, abuf, 2);
2069             }
2070         }
2071     }
2072 }
2073 
2074 DISAS_INSN(bitop_im)
2075 {
2076     int opsize;
2077     int op;
2078     TCGv src1;
2079     uint32_t mask;
2080     int bitnum;
2081     TCGv tmp;
2082     TCGv addr;
2083 
2084     if ((insn & 0x38) != 0)
2085         opsize = OS_BYTE;
2086     else
2087         opsize = OS_LONG;
2088     op = (insn >> 6) & 3;
2089 
2090     bitnum = read_im16(env, s);
2091     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2092         if (bitnum & 0xfe00) {
2093             disas_undef(env, s, insn);
2094             return;
2095         }
2096     } else {
2097         if (bitnum & 0xff00) {
2098             disas_undef(env, s, insn);
2099             return;
2100         }
2101     }
2102 
2103     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2104 
2105     gen_flush_flags(s);
2106     if (opsize == OS_BYTE)
2107         bitnum &= 7;
2108     else
2109         bitnum &= 31;
2110     mask = 1 << bitnum;
2111 
2112    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2113 
2114     if (op) {
2115         tmp = tcg_temp_new();
2116         switch (op) {
2117         case 1: /* bchg */
2118             tcg_gen_xori_i32(tmp, src1, mask);
2119             break;
2120         case 2: /* bclr */
2121             tcg_gen_andi_i32(tmp, src1, ~mask);
2122             break;
2123         case 3: /* bset */
2124             tcg_gen_ori_i32(tmp, src1, mask);
2125             break;
2126         default: /* btst */
2127             break;
2128         }
2129         DEST_EA(env, insn, opsize, tmp, &addr);
2130     }
2131 }
2132 
2133 static TCGv gen_get_ccr(DisasContext *s)
2134 {
2135     TCGv dest;
2136 
2137     update_cc_op(s);
2138     dest = tcg_temp_new();
2139     gen_helper_get_ccr(dest, cpu_env);
2140     return dest;
2141 }
2142 
2143 static TCGv gen_get_sr(DisasContext *s)
2144 {
2145     TCGv ccr;
2146     TCGv sr;
2147 
2148     ccr = gen_get_ccr(s);
2149     sr = tcg_temp_new();
2150     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2151     tcg_gen_or_i32(sr, sr, ccr);
2152     return sr;
2153 }
2154 
2155 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2156 {
2157     if (ccr_only) {
2158         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2159         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2160         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2161         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2162         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2163     } else {
2164         /* Must writeback before changing security state. */
2165         do_writebacks(s);
2166         gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2167     }
2168     set_cc_op(s, CC_OP_FLAGS);
2169 }
2170 
2171 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2172 {
2173     if (ccr_only) {
2174         gen_helper_set_ccr(cpu_env, val);
2175     } else {
2176         /* Must writeback before changing security state. */
2177         do_writebacks(s);
2178         gen_helper_set_sr(cpu_env, val);
2179     }
2180     set_cc_op(s, CC_OP_FLAGS);
2181 }
2182 
2183 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2184                            bool ccr_only)
2185 {
2186     if ((insn & 0x3f) == 0x3c) {
2187         uint16_t val;
2188         val = read_im16(env, s);
2189         gen_set_sr_im(s, val, ccr_only);
2190     } else {
2191         TCGv src;
2192         SRC_EA(env, src, OS_WORD, 0, NULL);
2193         gen_set_sr(s, src, ccr_only);
2194     }
2195 }
2196 
2197 DISAS_INSN(arith_im)
2198 {
2199     int op;
2200     TCGv im;
2201     TCGv src1;
2202     TCGv dest;
2203     TCGv addr;
2204     int opsize;
2205     bool with_SR = ((insn & 0x3f) == 0x3c);
2206 
2207     op = (insn >> 9) & 7;
2208     opsize = insn_opsize(insn);
2209     switch (opsize) {
2210     case OS_BYTE:
2211         im = tcg_constant_i32((int8_t)read_im8(env, s));
2212         break;
2213     case OS_WORD:
2214         im = tcg_constant_i32((int16_t)read_im16(env, s));
2215         break;
2216     case OS_LONG:
2217         im = tcg_constant_i32(read_im32(env, s));
2218         break;
2219     default:
2220         g_assert_not_reached();
2221     }
2222 
2223     if (with_SR) {
2224         /* SR/CCR can only be used with andi/eori/ori */
2225         if (op == 2 || op == 3 || op == 6) {
2226             disas_undef(env, s, insn);
2227             return;
2228         }
2229         switch (opsize) {
2230         case OS_BYTE:
2231             src1 = gen_get_ccr(s);
2232             break;
2233         case OS_WORD:
2234             if (IS_USER(s)) {
2235                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2236                 return;
2237             }
2238             src1 = gen_get_sr(s);
2239             break;
2240         default:
2241             /* OS_LONG; others already g_assert_not_reached.  */
2242             disas_undef(env, s, insn);
2243             return;
2244         }
2245     } else {
2246         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2247     }
2248     dest = tcg_temp_new();
2249     switch (op) {
2250     case 0: /* ori */
2251         tcg_gen_or_i32(dest, src1, im);
2252         if (with_SR) {
2253             gen_set_sr(s, dest, opsize == OS_BYTE);
2254             gen_exit_tb(s);
2255         } else {
2256             DEST_EA(env, insn, opsize, dest, &addr);
2257             gen_logic_cc(s, dest, opsize);
2258         }
2259         break;
2260     case 1: /* andi */
2261         tcg_gen_and_i32(dest, src1, im);
2262         if (with_SR) {
2263             gen_set_sr(s, dest, opsize == OS_BYTE);
2264             gen_exit_tb(s);
2265         } else {
2266             DEST_EA(env, insn, opsize, dest, &addr);
2267             gen_logic_cc(s, dest, opsize);
2268         }
2269         break;
2270     case 2: /* subi */
2271         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2272         tcg_gen_sub_i32(dest, src1, im);
2273         gen_update_cc_add(dest, im, opsize);
2274         set_cc_op(s, CC_OP_SUBB + opsize);
2275         DEST_EA(env, insn, opsize, dest, &addr);
2276         break;
2277     case 3: /* addi */
2278         tcg_gen_add_i32(dest, src1, im);
2279         gen_update_cc_add(dest, im, opsize);
2280         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2281         set_cc_op(s, CC_OP_ADDB + opsize);
2282         DEST_EA(env, insn, opsize, dest, &addr);
2283         break;
2284     case 5: /* eori */
2285         tcg_gen_xor_i32(dest, src1, im);
2286         if (with_SR) {
2287             gen_set_sr(s, dest, opsize == OS_BYTE);
2288             gen_exit_tb(s);
2289         } else {
2290             DEST_EA(env, insn, opsize, dest, &addr);
2291             gen_logic_cc(s, dest, opsize);
2292         }
2293         break;
2294     case 6: /* cmpi */
2295         gen_update_cc_cmp(s, src1, im, opsize);
2296         break;
2297     default:
2298         abort();
2299     }
2300 }
2301 
2302 DISAS_INSN(cas)
2303 {
2304     int opsize;
2305     TCGv addr;
2306     uint16_t ext;
2307     TCGv load;
2308     TCGv cmp;
2309     MemOp opc;
2310 
2311     switch ((insn >> 9) & 3) {
2312     case 1:
2313         opsize = OS_BYTE;
2314         opc = MO_SB;
2315         break;
2316     case 2:
2317         opsize = OS_WORD;
2318         opc = MO_TESW;
2319         break;
2320     case 3:
2321         opsize = OS_LONG;
2322         opc = MO_TESL;
2323         break;
2324     default:
2325         g_assert_not_reached();
2326     }
2327 
2328     ext = read_im16(env, s);
2329 
2330     /* cas Dc,Du,<EA> */
2331 
2332     addr = gen_lea(env, s, insn, opsize);
2333     if (IS_NULL_QREG(addr)) {
2334         gen_addr_fault(s);
2335         return;
2336     }
2337 
2338     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2339 
2340     /*
2341      * if  <EA> == Dc then
2342      *     <EA> = Du
2343      *     Dc = <EA> (because <EA> == Dc)
2344      * else
2345      *     Dc = <EA>
2346      */
2347 
2348     load = tcg_temp_new();
2349     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2350                                IS_USER(s), opc);
2351     /* update flags before setting cmp to load */
2352     gen_update_cc_cmp(s, load, cmp, opsize);
2353     gen_partset_reg(opsize, DREG(ext, 0), load);
2354 
2355     switch (extract32(insn, 3, 3)) {
2356     case 3: /* Indirect postincrement.  */
2357         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2358         break;
2359     case 4: /* Indirect predecrememnt.  */
2360         tcg_gen_mov_i32(AREG(insn, 0), addr);
2361         break;
2362     }
2363 }
2364 
2365 DISAS_INSN(cas2w)
2366 {
2367     uint16_t ext1, ext2;
2368     TCGv addr1, addr2;
2369 
2370     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2371 
2372     ext1 = read_im16(env, s);
2373 
2374     if (ext1 & 0x8000) {
2375         /* Address Register */
2376         addr1 = AREG(ext1, 12);
2377     } else {
2378         /* Data Register */
2379         addr1 = DREG(ext1, 12);
2380     }
2381 
2382     ext2 = read_im16(env, s);
2383     if (ext2 & 0x8000) {
2384         /* Address Register */
2385         addr2 = AREG(ext2, 12);
2386     } else {
2387         /* Data Register */
2388         addr2 = DREG(ext2, 12);
2389     }
2390 
2391     /*
2392      * if (R1) == Dc1 && (R2) == Dc2 then
2393      *     (R1) = Du1
2394      *     (R2) = Du2
2395      * else
2396      *     Dc1 = (R1)
2397      *     Dc2 = (R2)
2398      */
2399 
2400     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2401         gen_helper_exit_atomic(cpu_env);
2402     } else {
2403         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2404                                      (REG(ext1, 6) << 3) |
2405                                      (REG(ext2, 0) << 6) |
2406                                      (REG(ext1, 0) << 9));
2407         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2408     }
2409 
2410     /* Note that cas2w also assigned to env->cc_op.  */
2411     s->cc_op = CC_OP_CMPW;
2412     s->cc_op_synced = 1;
2413 }
2414 
2415 DISAS_INSN(cas2l)
2416 {
2417     uint16_t ext1, ext2;
2418     TCGv addr1, addr2, regs;
2419 
2420     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2421 
2422     ext1 = read_im16(env, s);
2423 
2424     if (ext1 & 0x8000) {
2425         /* Address Register */
2426         addr1 = AREG(ext1, 12);
2427     } else {
2428         /* Data Register */
2429         addr1 = DREG(ext1, 12);
2430     }
2431 
2432     ext2 = read_im16(env, s);
2433     if (ext2 & 0x8000) {
2434         /* Address Register */
2435         addr2 = AREG(ext2, 12);
2436     } else {
2437         /* Data Register */
2438         addr2 = DREG(ext2, 12);
2439     }
2440 
2441     /*
2442      * if (R1) == Dc1 && (R2) == Dc2 then
2443      *     (R1) = Du1
2444      *     (R2) = Du2
2445      * else
2446      *     Dc1 = (R1)
2447      *     Dc2 = (R2)
2448      */
2449 
2450     regs = tcg_constant_i32(REG(ext2, 6) |
2451                             (REG(ext1, 6) << 3) |
2452                             (REG(ext2, 0) << 6) |
2453                             (REG(ext1, 0) << 9));
2454     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2455         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2456     } else {
2457         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2458     }
2459 
2460     /* Note that cas2l also assigned to env->cc_op.  */
2461     s->cc_op = CC_OP_CMPL;
2462     s->cc_op_synced = 1;
2463 }
2464 
2465 DISAS_INSN(byterev)
2466 {
2467     TCGv reg;
2468 
2469     reg = DREG(insn, 0);
2470     tcg_gen_bswap32_i32(reg, reg);
2471 }
2472 
2473 DISAS_INSN(move)
2474 {
2475     TCGv src;
2476     TCGv dest;
2477     int op;
2478     int opsize;
2479 
2480     switch (insn >> 12) {
2481     case 1: /* move.b */
2482         opsize = OS_BYTE;
2483         break;
2484     case 2: /* move.l */
2485         opsize = OS_LONG;
2486         break;
2487     case 3: /* move.w */
2488         opsize = OS_WORD;
2489         break;
2490     default:
2491         abort();
2492     }
2493     SRC_EA(env, src, opsize, 1, NULL);
2494     op = (insn >> 6) & 7;
2495     if (op == 1) {
2496         /* movea */
2497         /* The value will already have been sign extended.  */
2498         dest = AREG(insn, 9);
2499         tcg_gen_mov_i32(dest, src);
2500     } else {
2501         /* normal move */
2502         uint16_t dest_ea;
2503         dest_ea = ((insn >> 9) & 7) | (op << 3);
2504         DEST_EA(env, dest_ea, opsize, src, NULL);
2505         /* This will be correct because loads sign extend.  */
2506         gen_logic_cc(s, src, opsize);
2507     }
2508 }
2509 
2510 DISAS_INSN(negx)
2511 {
2512     TCGv z;
2513     TCGv src;
2514     TCGv addr;
2515     int opsize;
2516 
2517     opsize = insn_opsize(insn);
2518     SRC_EA(env, src, opsize, 1, &addr);
2519 
2520     gen_flush_flags(s); /* compute old Z */
2521 
2522     /*
2523      * Perform subtract with borrow.
2524      * (X, N) =  -(src + X);
2525      */
2526 
2527     z = tcg_constant_i32(0);
2528     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2529     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2530     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2531 
2532     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2533 
2534     /*
2535      * Compute signed-overflow for negation.  The normal formula for
2536      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2537      * this simplifies to res & src.
2538      */
2539 
2540     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2541 
2542     /* Copy the rest of the results into place.  */
2543     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2544     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2545 
2546     set_cc_op(s, CC_OP_FLAGS);
2547 
2548     /* result is in QREG_CC_N */
2549 
2550     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2551 }
2552 
2553 DISAS_INSN(lea)
2554 {
2555     TCGv reg;
2556     TCGv tmp;
2557 
2558     reg = AREG(insn, 9);
2559     tmp = gen_lea(env, s, insn, OS_LONG);
2560     if (IS_NULL_QREG(tmp)) {
2561         gen_addr_fault(s);
2562         return;
2563     }
2564     tcg_gen_mov_i32(reg, tmp);
2565 }
2566 
2567 DISAS_INSN(clr)
2568 {
2569     int opsize;
2570     TCGv zero;
2571 
2572     zero = tcg_constant_i32(0);
2573     opsize = insn_opsize(insn);
2574     DEST_EA(env, insn, opsize, zero, NULL);
2575     gen_logic_cc(s, zero, opsize);
2576 }
2577 
2578 DISAS_INSN(move_from_ccr)
2579 {
2580     TCGv ccr;
2581 
2582     ccr = gen_get_ccr(s);
2583     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2584 }
2585 
2586 DISAS_INSN(neg)
2587 {
2588     TCGv src1;
2589     TCGv dest;
2590     TCGv addr;
2591     int opsize;
2592 
2593     opsize = insn_opsize(insn);
2594     SRC_EA(env, src1, opsize, 1, &addr);
2595     dest = tcg_temp_new();
2596     tcg_gen_neg_i32(dest, src1);
2597     set_cc_op(s, CC_OP_SUBB + opsize);
2598     gen_update_cc_add(dest, src1, opsize);
2599     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2600     DEST_EA(env, insn, opsize, dest, &addr);
2601 }
2602 
2603 DISAS_INSN(move_to_ccr)
2604 {
2605     gen_move_to_sr(env, s, insn, true);
2606 }
2607 
2608 DISAS_INSN(not)
2609 {
2610     TCGv src1;
2611     TCGv dest;
2612     TCGv addr;
2613     int opsize;
2614 
2615     opsize = insn_opsize(insn);
2616     SRC_EA(env, src1, opsize, 1, &addr);
2617     dest = tcg_temp_new();
2618     tcg_gen_not_i32(dest, src1);
2619     DEST_EA(env, insn, opsize, dest, &addr);
2620     gen_logic_cc(s, dest, opsize);
2621 }
2622 
2623 DISAS_INSN(swap)
2624 {
2625     TCGv src1;
2626     TCGv src2;
2627     TCGv reg;
2628 
2629     src1 = tcg_temp_new();
2630     src2 = tcg_temp_new();
2631     reg = DREG(insn, 0);
2632     tcg_gen_shli_i32(src1, reg, 16);
2633     tcg_gen_shri_i32(src2, reg, 16);
2634     tcg_gen_or_i32(reg, src1, src2);
2635     gen_logic_cc(s, reg, OS_LONG);
2636 }
2637 
2638 DISAS_INSN(bkpt)
2639 {
2640 #if defined(CONFIG_SOFTMMU)
2641     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2642 #else
2643     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2644 #endif
2645 }
2646 
2647 DISAS_INSN(pea)
2648 {
2649     TCGv tmp;
2650 
2651     tmp = gen_lea(env, s, insn, OS_LONG);
2652     if (IS_NULL_QREG(tmp)) {
2653         gen_addr_fault(s);
2654         return;
2655     }
2656     gen_push(s, tmp);
2657 }
2658 
2659 DISAS_INSN(ext)
2660 {
2661     int op;
2662     TCGv reg;
2663     TCGv tmp;
2664 
2665     reg = DREG(insn, 0);
2666     op = (insn >> 6) & 7;
2667     tmp = tcg_temp_new();
2668     if (op == 3)
2669         tcg_gen_ext16s_i32(tmp, reg);
2670     else
2671         tcg_gen_ext8s_i32(tmp, reg);
2672     if (op == 2)
2673         gen_partset_reg(OS_WORD, reg, tmp);
2674     else
2675         tcg_gen_mov_i32(reg, tmp);
2676     gen_logic_cc(s, tmp, OS_LONG);
2677 }
2678 
2679 DISAS_INSN(tst)
2680 {
2681     int opsize;
2682     TCGv tmp;
2683 
2684     opsize = insn_opsize(insn);
2685     SRC_EA(env, tmp, opsize, 1, NULL);
2686     gen_logic_cc(s, tmp, opsize);
2687 }
2688 
2689 DISAS_INSN(pulse)
2690 {
2691   /* Implemented as a NOP.  */
2692 }
2693 
2694 DISAS_INSN(illegal)
2695 {
2696     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2697 }
2698 
2699 DISAS_INSN(tas)
2700 {
2701     int mode = extract32(insn, 3, 3);
2702     int reg0 = REG(insn, 0);
2703 
2704     if (mode == 0) {
2705         /* data register direct */
2706         TCGv dest = cpu_dregs[reg0];
2707         gen_logic_cc(s, dest, OS_BYTE);
2708         tcg_gen_ori_tl(dest, dest, 0x80);
2709     } else {
2710         TCGv src1, addr;
2711 
2712         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2713         if (IS_NULL_QREG(addr)) {
2714             gen_addr_fault(s);
2715             return;
2716         }
2717         src1 = tcg_temp_new();
2718         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2719                                    IS_USER(s), MO_SB);
2720         gen_logic_cc(s, src1, OS_BYTE);
2721 
2722         switch (mode) {
2723         case 3: /* Indirect postincrement.  */
2724             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2725             break;
2726         case 4: /* Indirect predecrememnt.  */
2727             tcg_gen_mov_i32(AREG(insn, 0), addr);
2728             break;
2729         }
2730     }
2731 }
2732 
2733 DISAS_INSN(mull)
2734 {
2735     uint16_t ext;
2736     TCGv src1;
2737     int sign;
2738 
2739     ext = read_im16(env, s);
2740 
2741     sign = ext & 0x800;
2742 
2743     if (ext & 0x400) {
2744         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2745             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2746             return;
2747         }
2748 
2749         SRC_EA(env, src1, OS_LONG, 0, NULL);
2750 
2751         if (sign) {
2752             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2753         } else {
2754             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2755         }
2756         /* if Dl == Dh, 68040 returns low word */
2757         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2758         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2759         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2760 
2761         tcg_gen_movi_i32(QREG_CC_V, 0);
2762         tcg_gen_movi_i32(QREG_CC_C, 0);
2763 
2764         set_cc_op(s, CC_OP_FLAGS);
2765         return;
2766     }
2767     SRC_EA(env, src1, OS_LONG, 0, NULL);
2768     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2769         tcg_gen_movi_i32(QREG_CC_C, 0);
2770         if (sign) {
2771             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2772             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2773             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2774             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2775         } else {
2776             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2777             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2778             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2779         }
2780         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2781         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2782 
2783         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2784 
2785         set_cc_op(s, CC_OP_FLAGS);
2786     } else {
2787         /*
2788          * The upper 32 bits of the product are discarded, so
2789          * muls.l and mulu.l are functionally equivalent.
2790          */
2791         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2792         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2793     }
2794 }
2795 
2796 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2797 {
2798     TCGv reg;
2799     TCGv tmp;
2800 
2801     reg = AREG(insn, 0);
2802     tmp = tcg_temp_new();
2803     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2804     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2805     if ((insn & 7) != 7) {
2806         tcg_gen_mov_i32(reg, tmp);
2807     }
2808     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2809 }
2810 
2811 DISAS_INSN(link)
2812 {
2813     int16_t offset;
2814 
2815     offset = read_im16(env, s);
2816     gen_link(s, insn, offset);
2817 }
2818 
2819 DISAS_INSN(linkl)
2820 {
2821     int32_t offset;
2822 
2823     offset = read_im32(env, s);
2824     gen_link(s, insn, offset);
2825 }
2826 
2827 DISAS_INSN(unlk)
2828 {
2829     TCGv src;
2830     TCGv reg;
2831     TCGv tmp;
2832 
2833     src = tcg_temp_new();
2834     reg = AREG(insn, 0);
2835     tcg_gen_mov_i32(src, reg);
2836     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2837     tcg_gen_mov_i32(reg, tmp);
2838     tcg_gen_addi_i32(QREG_SP, src, 4);
2839 }
2840 
2841 #if defined(CONFIG_SOFTMMU)
2842 DISAS_INSN(reset)
2843 {
2844     if (IS_USER(s)) {
2845         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2846         return;
2847     }
2848 
2849     gen_helper_reset(cpu_env);
2850 }
2851 #endif
2852 
2853 DISAS_INSN(nop)
2854 {
2855 }
2856 
2857 DISAS_INSN(rtd)
2858 {
2859     TCGv tmp;
2860     int16_t offset = read_im16(env, s);
2861 
2862     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2863     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2864     gen_jmp(s, tmp);
2865 }
2866 
2867 DISAS_INSN(rtr)
2868 {
2869     TCGv tmp;
2870     TCGv ccr;
2871     TCGv sp;
2872 
2873     sp = tcg_temp_new();
2874     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2875     tcg_gen_addi_i32(sp, QREG_SP, 2);
2876     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2877     tcg_gen_addi_i32(QREG_SP, sp, 4);
2878 
2879     gen_set_sr(s, ccr, true);
2880 
2881     gen_jmp(s, tmp);
2882 }
2883 
2884 DISAS_INSN(rts)
2885 {
2886     TCGv tmp;
2887 
2888     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2889     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2890     gen_jmp(s, tmp);
2891 }
2892 
2893 DISAS_INSN(jump)
2894 {
2895     TCGv tmp;
2896 
2897     /*
2898      * Load the target address first to ensure correct exception
2899      * behavior.
2900      */
2901     tmp = gen_lea(env, s, insn, OS_LONG);
2902     if (IS_NULL_QREG(tmp)) {
2903         gen_addr_fault(s);
2904         return;
2905     }
2906     if ((insn & 0x40) == 0) {
2907         /* jsr */
2908         gen_push(s, tcg_constant_i32(s->pc));
2909     }
2910     gen_jmp(s, tmp);
2911 }
2912 
2913 DISAS_INSN(addsubq)
2914 {
2915     TCGv src;
2916     TCGv dest;
2917     TCGv val;
2918     int imm;
2919     TCGv addr;
2920     int opsize;
2921 
2922     if ((insn & 070) == 010) {
2923         /* Operation on address register is always long.  */
2924         opsize = OS_LONG;
2925     } else {
2926         opsize = insn_opsize(insn);
2927     }
2928     SRC_EA(env, src, opsize, 1, &addr);
2929     imm = (insn >> 9) & 7;
2930     if (imm == 0) {
2931         imm = 8;
2932     }
2933     val = tcg_constant_i32(imm);
2934     dest = tcg_temp_new();
2935     tcg_gen_mov_i32(dest, src);
2936     if ((insn & 0x38) == 0x08) {
2937         /*
2938          * Don't update condition codes if the destination is an
2939          * address register.
2940          */
2941         if (insn & 0x0100) {
2942             tcg_gen_sub_i32(dest, dest, val);
2943         } else {
2944             tcg_gen_add_i32(dest, dest, val);
2945         }
2946     } else {
2947         if (insn & 0x0100) {
2948             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2949             tcg_gen_sub_i32(dest, dest, val);
2950             set_cc_op(s, CC_OP_SUBB + opsize);
2951         } else {
2952             tcg_gen_add_i32(dest, dest, val);
2953             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2954             set_cc_op(s, CC_OP_ADDB + opsize);
2955         }
2956         gen_update_cc_add(dest, val, opsize);
2957     }
2958     DEST_EA(env, insn, opsize, dest, &addr);
2959 }
2960 
2961 DISAS_INSN(branch)
2962 {
2963     int32_t offset;
2964     uint32_t base;
2965     int op;
2966 
2967     base = s->pc;
2968     op = (insn >> 8) & 0xf;
2969     offset = (int8_t)insn;
2970     if (offset == 0) {
2971         offset = (int16_t)read_im16(env, s);
2972     } else if (offset == -1) {
2973         offset = read_im32(env, s);
2974     }
2975     if (op == 1) {
2976         /* bsr */
2977         gen_push(s, tcg_constant_i32(s->pc));
2978     }
2979     if (op > 1) {
2980         /* Bcc */
2981         TCGLabel *l1 = gen_new_label();
2982         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2983         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2984         gen_set_label(l1);
2985         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2986     } else {
2987         /* Unconditional branch.  */
2988         update_cc_op(s);
2989         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2990     }
2991 }
2992 
2993 DISAS_INSN(moveq)
2994 {
2995     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2996     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2997 }
2998 
2999 DISAS_INSN(mvzs)
3000 {
3001     int opsize;
3002     TCGv src;
3003     TCGv reg;
3004 
3005     if (insn & 0x40)
3006         opsize = OS_WORD;
3007     else
3008         opsize = OS_BYTE;
3009     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3010     reg = DREG(insn, 9);
3011     tcg_gen_mov_i32(reg, src);
3012     gen_logic_cc(s, src, opsize);
3013 }
3014 
3015 DISAS_INSN(or)
3016 {
3017     TCGv reg;
3018     TCGv dest;
3019     TCGv src;
3020     TCGv addr;
3021     int opsize;
3022 
3023     opsize = insn_opsize(insn);
3024     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3025     dest = tcg_temp_new();
3026     if (insn & 0x100) {
3027         SRC_EA(env, src, opsize, 0, &addr);
3028         tcg_gen_or_i32(dest, src, reg);
3029         DEST_EA(env, insn, opsize, dest, &addr);
3030     } else {
3031         SRC_EA(env, src, opsize, 0, NULL);
3032         tcg_gen_or_i32(dest, src, reg);
3033         gen_partset_reg(opsize, DREG(insn, 9), dest);
3034     }
3035     gen_logic_cc(s, dest, opsize);
3036 }
3037 
3038 DISAS_INSN(suba)
3039 {
3040     TCGv src;
3041     TCGv reg;
3042 
3043     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3044     reg = AREG(insn, 9);
3045     tcg_gen_sub_i32(reg, reg, src);
3046 }
3047 
3048 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3049 {
3050     TCGv tmp, zero;
3051 
3052     gen_flush_flags(s); /* compute old Z */
3053 
3054     /*
3055      * Perform subtract with borrow.
3056      * (X, N) = dest - (src + X);
3057      */
3058 
3059     zero = tcg_constant_i32(0);
3060     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3061     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3062     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3063     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3064 
3065     /* Compute signed-overflow for subtract.  */
3066 
3067     tmp = tcg_temp_new();
3068     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3069     tcg_gen_xor_i32(tmp, dest, src);
3070     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3071 
3072     /* Copy the rest of the results into place.  */
3073     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3074     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3075 
3076     set_cc_op(s, CC_OP_FLAGS);
3077 
3078     /* result is in QREG_CC_N */
3079 }
3080 
3081 DISAS_INSN(subx_reg)
3082 {
3083     TCGv dest;
3084     TCGv src;
3085     int opsize;
3086 
3087     opsize = insn_opsize(insn);
3088 
3089     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3090     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3091 
3092     gen_subx(s, src, dest, opsize);
3093 
3094     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3095 }
3096 
3097 DISAS_INSN(subx_mem)
3098 {
3099     TCGv src;
3100     TCGv addr_src;
3101     TCGv dest;
3102     TCGv addr_dest;
3103     int opsize;
3104 
3105     opsize = insn_opsize(insn);
3106 
3107     addr_src = AREG(insn, 0);
3108     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3109     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3110 
3111     addr_dest = AREG(insn, 9);
3112     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3113     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3114 
3115     gen_subx(s, src, dest, opsize);
3116 
3117     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3118 }
3119 
3120 DISAS_INSN(mov3q)
3121 {
3122     TCGv src;
3123     int val;
3124 
3125     val = (insn >> 9) & 7;
3126     if (val == 0) {
3127         val = -1;
3128     }
3129     src = tcg_constant_i32(val);
3130     gen_logic_cc(s, src, OS_LONG);
3131     DEST_EA(env, insn, OS_LONG, src, NULL);
3132 }
3133 
3134 DISAS_INSN(cmp)
3135 {
3136     TCGv src;
3137     TCGv reg;
3138     int opsize;
3139 
3140     opsize = insn_opsize(insn);
3141     SRC_EA(env, src, opsize, 1, NULL);
3142     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3143     gen_update_cc_cmp(s, reg, src, opsize);
3144 }
3145 
3146 DISAS_INSN(cmpa)
3147 {
3148     int opsize;
3149     TCGv src;
3150     TCGv reg;
3151 
3152     if (insn & 0x100) {
3153         opsize = OS_LONG;
3154     } else {
3155         opsize = OS_WORD;
3156     }
3157     SRC_EA(env, src, opsize, 1, NULL);
3158     reg = AREG(insn, 9);
3159     gen_update_cc_cmp(s, reg, src, OS_LONG);
3160 }
3161 
3162 DISAS_INSN(cmpm)
3163 {
3164     int opsize = insn_opsize(insn);
3165     TCGv src, dst;
3166 
3167     /* Post-increment load (mode 3) from Ay.  */
3168     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3169                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3170     /* Post-increment load (mode 3) from Ax.  */
3171     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3172                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3173 
3174     gen_update_cc_cmp(s, dst, src, opsize);
3175 }
3176 
3177 DISAS_INSN(eor)
3178 {
3179     TCGv src;
3180     TCGv dest;
3181     TCGv addr;
3182     int opsize;
3183 
3184     opsize = insn_opsize(insn);
3185 
3186     SRC_EA(env, src, opsize, 0, &addr);
3187     dest = tcg_temp_new();
3188     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3189     gen_logic_cc(s, dest, opsize);
3190     DEST_EA(env, insn, opsize, dest, &addr);
3191 }
3192 
3193 static void do_exg(TCGv reg1, TCGv reg2)
3194 {
3195     TCGv temp = tcg_temp_new();
3196     tcg_gen_mov_i32(temp, reg1);
3197     tcg_gen_mov_i32(reg1, reg2);
3198     tcg_gen_mov_i32(reg2, temp);
3199 }
3200 
3201 DISAS_INSN(exg_dd)
3202 {
3203     /* exchange Dx and Dy */
3204     do_exg(DREG(insn, 9), DREG(insn, 0));
3205 }
3206 
3207 DISAS_INSN(exg_aa)
3208 {
3209     /* exchange Ax and Ay */
3210     do_exg(AREG(insn, 9), AREG(insn, 0));
3211 }
3212 
3213 DISAS_INSN(exg_da)
3214 {
3215     /* exchange Dx and Ay */
3216     do_exg(DREG(insn, 9), AREG(insn, 0));
3217 }
3218 
3219 DISAS_INSN(and)
3220 {
3221     TCGv src;
3222     TCGv reg;
3223     TCGv dest;
3224     TCGv addr;
3225     int opsize;
3226 
3227     dest = tcg_temp_new();
3228 
3229     opsize = insn_opsize(insn);
3230     reg = DREG(insn, 9);
3231     if (insn & 0x100) {
3232         SRC_EA(env, src, opsize, 0, &addr);
3233         tcg_gen_and_i32(dest, src, reg);
3234         DEST_EA(env, insn, opsize, dest, &addr);
3235     } else {
3236         SRC_EA(env, src, opsize, 0, NULL);
3237         tcg_gen_and_i32(dest, src, reg);
3238         gen_partset_reg(opsize, reg, dest);
3239     }
3240     gen_logic_cc(s, dest, opsize);
3241 }
3242 
3243 DISAS_INSN(adda)
3244 {
3245     TCGv src;
3246     TCGv reg;
3247 
3248     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3249     reg = AREG(insn, 9);
3250     tcg_gen_add_i32(reg, reg, src);
3251 }
3252 
3253 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3254 {
3255     TCGv tmp, zero;
3256 
3257     gen_flush_flags(s); /* compute old Z */
3258 
3259     /*
3260      * Perform addition with carry.
3261      * (X, N) = src + dest + X;
3262      */
3263 
3264     zero = tcg_constant_i32(0);
3265     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3266     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3267     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3268 
3269     /* Compute signed-overflow for addition.  */
3270 
3271     tmp = tcg_temp_new();
3272     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3273     tcg_gen_xor_i32(tmp, dest, src);
3274     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3275 
3276     /* Copy the rest of the results into place.  */
3277     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3278     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3279 
3280     set_cc_op(s, CC_OP_FLAGS);
3281 
3282     /* result is in QREG_CC_N */
3283 }
3284 
3285 DISAS_INSN(addx_reg)
3286 {
3287     TCGv dest;
3288     TCGv src;
3289     int opsize;
3290 
3291     opsize = insn_opsize(insn);
3292 
3293     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3294     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3295 
3296     gen_addx(s, src, dest, opsize);
3297 
3298     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3299 }
3300 
3301 DISAS_INSN(addx_mem)
3302 {
3303     TCGv src;
3304     TCGv addr_src;
3305     TCGv dest;
3306     TCGv addr_dest;
3307     int opsize;
3308 
3309     opsize = insn_opsize(insn);
3310 
3311     addr_src = AREG(insn, 0);
3312     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3313     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3314 
3315     addr_dest = AREG(insn, 9);
3316     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3317     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3318 
3319     gen_addx(s, src, dest, opsize);
3320 
3321     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3322 }
3323 
3324 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3325 {
3326     int count = (insn >> 9) & 7;
3327     int logical = insn & 8;
3328     int left = insn & 0x100;
3329     int bits = opsize_bytes(opsize) * 8;
3330     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3331 
3332     if (count == 0) {
3333         count = 8;
3334     }
3335 
3336     tcg_gen_movi_i32(QREG_CC_V, 0);
3337     if (left) {
3338         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3339         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3340 
3341         /*
3342          * Note that ColdFire always clears V (done above),
3343          * while M68000 sets if the most significant bit is changed at
3344          * any time during the shift operation.
3345          */
3346         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3347             /* if shift count >= bits, V is (reg != 0) */
3348             if (count >= bits) {
3349                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3350             } else {
3351                 TCGv t0 = tcg_temp_new();
3352                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3353                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3354                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3355             }
3356             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3357         }
3358     } else {
3359         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3360         if (logical) {
3361             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3362         } else {
3363             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3364         }
3365     }
3366 
3367     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3368     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3369     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3370     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3371 
3372     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3373     set_cc_op(s, CC_OP_FLAGS);
3374 }
3375 
3376 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3377 {
3378     int logical = insn & 8;
3379     int left = insn & 0x100;
3380     int bits = opsize_bytes(opsize) * 8;
3381     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3382     TCGv s32;
3383     TCGv_i64 t64, s64;
3384 
3385     t64 = tcg_temp_new_i64();
3386     s64 = tcg_temp_new_i64();
3387     s32 = tcg_temp_new();
3388 
3389     /*
3390      * Note that m68k truncates the shift count modulo 64, not 32.
3391      * In addition, a 64-bit shift makes it easy to find "the last
3392      * bit shifted out", for the carry flag.
3393      */
3394     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3395     tcg_gen_extu_i32_i64(s64, s32);
3396     tcg_gen_extu_i32_i64(t64, reg);
3397 
3398     /* Optimistically set V=0.  Also used as a zero source below.  */
3399     tcg_gen_movi_i32(QREG_CC_V, 0);
3400     if (left) {
3401         tcg_gen_shl_i64(t64, t64, s64);
3402 
3403         if (opsize == OS_LONG) {
3404             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3405             /* Note that C=0 if shift count is 0, and we get that for free.  */
3406         } else {
3407             TCGv zero = tcg_constant_i32(0);
3408             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3409             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3410             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3411                                 s32, zero, zero, QREG_CC_C);
3412         }
3413         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3414 
3415         /* X = C, but only if the shift count was non-zero.  */
3416         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3417                             QREG_CC_C, QREG_CC_X);
3418 
3419         /*
3420          * M68000 sets V if the most significant bit is changed at
3421          * any time during the shift operation.  Do this via creating
3422          * an extension of the sign bit, comparing, and discarding
3423          * the bits below the sign bit.  I.e.
3424          *     int64_t s = (intN_t)reg;
3425          *     int64_t t = (int64_t)(intN_t)reg << count;
3426          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3427          */
3428         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3429             TCGv_i64 tt = tcg_constant_i64(32);
3430             /* if shift is greater than 32, use 32 */
3431             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3432             /* Sign extend the input to 64 bits; re-do the shift.  */
3433             tcg_gen_ext_i32_i64(t64, reg);
3434             tcg_gen_shl_i64(s64, t64, s64);
3435             /* Clear all bits that are unchanged.  */
3436             tcg_gen_xor_i64(t64, t64, s64);
3437             /* Ignore the bits below the sign bit.  */
3438             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3439             /* If any bits remain set, we have overflow.  */
3440             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3441             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3442             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3443         }
3444     } else {
3445         tcg_gen_shli_i64(t64, t64, 32);
3446         if (logical) {
3447             tcg_gen_shr_i64(t64, t64, s64);
3448         } else {
3449             tcg_gen_sar_i64(t64, t64, s64);
3450         }
3451         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3452 
3453         /* Note that C=0 if shift count is 0, and we get that for free.  */
3454         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3455 
3456         /* X = C, but only if the shift count was non-zero.  */
3457         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3458                             QREG_CC_C, QREG_CC_X);
3459     }
3460     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3461     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3462 
3463     /* Write back the result.  */
3464     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3465     set_cc_op(s, CC_OP_FLAGS);
3466 }
3467 
3468 DISAS_INSN(shift8_im)
3469 {
3470     shift_im(s, insn, OS_BYTE);
3471 }
3472 
3473 DISAS_INSN(shift16_im)
3474 {
3475     shift_im(s, insn, OS_WORD);
3476 }
3477 
3478 DISAS_INSN(shift_im)
3479 {
3480     shift_im(s, insn, OS_LONG);
3481 }
3482 
3483 DISAS_INSN(shift8_reg)
3484 {
3485     shift_reg(s, insn, OS_BYTE);
3486 }
3487 
3488 DISAS_INSN(shift16_reg)
3489 {
3490     shift_reg(s, insn, OS_WORD);
3491 }
3492 
3493 DISAS_INSN(shift_reg)
3494 {
3495     shift_reg(s, insn, OS_LONG);
3496 }
3497 
3498 DISAS_INSN(shift_mem)
3499 {
3500     int logical = insn & 8;
3501     int left = insn & 0x100;
3502     TCGv src;
3503     TCGv addr;
3504 
3505     SRC_EA(env, src, OS_WORD, !logical, &addr);
3506     tcg_gen_movi_i32(QREG_CC_V, 0);
3507     if (left) {
3508         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3509         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3510 
3511         /*
3512          * Note that ColdFire always clears V,
3513          * while M68000 sets if the most significant bit is changed at
3514          * any time during the shift operation
3515          */
3516         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3517             src = gen_extend(s, src, OS_WORD, 1);
3518             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3519         }
3520     } else {
3521         tcg_gen_mov_i32(QREG_CC_C, src);
3522         if (logical) {
3523             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3524         } else {
3525             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3526         }
3527     }
3528 
3529     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3530     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3531     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3532     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3533 
3534     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3535     set_cc_op(s, CC_OP_FLAGS);
3536 }
3537 
3538 static void rotate(TCGv reg, TCGv shift, int left, int size)
3539 {
3540     switch (size) {
3541     case 8:
3542         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3543         tcg_gen_ext8u_i32(reg, reg);
3544         tcg_gen_muli_i32(reg, reg, 0x01010101);
3545         goto do_long;
3546     case 16:
3547         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3548         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3549         goto do_long;
3550     do_long:
3551     default:
3552         if (left) {
3553             tcg_gen_rotl_i32(reg, reg, shift);
3554         } else {
3555             tcg_gen_rotr_i32(reg, reg, shift);
3556         }
3557     }
3558 
3559     /* compute flags */
3560 
3561     switch (size) {
3562     case 8:
3563         tcg_gen_ext8s_i32(reg, reg);
3564         break;
3565     case 16:
3566         tcg_gen_ext16s_i32(reg, reg);
3567         break;
3568     default:
3569         break;
3570     }
3571 
3572     /* QREG_CC_X is not affected */
3573 
3574     tcg_gen_mov_i32(QREG_CC_N, reg);
3575     tcg_gen_mov_i32(QREG_CC_Z, reg);
3576 
3577     if (left) {
3578         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3579     } else {
3580         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3581     }
3582 
3583     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3584 }
3585 
3586 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3587 {
3588     switch (size) {
3589     case 8:
3590         tcg_gen_ext8s_i32(reg, reg);
3591         break;
3592     case 16:
3593         tcg_gen_ext16s_i32(reg, reg);
3594         break;
3595     default:
3596         break;
3597     }
3598     tcg_gen_mov_i32(QREG_CC_N, reg);
3599     tcg_gen_mov_i32(QREG_CC_Z, reg);
3600     tcg_gen_mov_i32(QREG_CC_X, X);
3601     tcg_gen_mov_i32(QREG_CC_C, X);
3602     tcg_gen_movi_i32(QREG_CC_V, 0);
3603 }
3604 
3605 /* Result of rotate_x() is valid if 0 <= shift <= size */
3606 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3607 {
3608     TCGv X, shl, shr, shx, sz, zero;
3609 
3610     sz = tcg_constant_i32(size);
3611 
3612     shr = tcg_temp_new();
3613     shl = tcg_temp_new();
3614     shx = tcg_temp_new();
3615     if (left) {
3616         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3617         tcg_gen_movi_i32(shr, size + 1);
3618         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3619         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3620         /* shx = shx < 0 ? size : shx; */
3621         zero = tcg_constant_i32(0);
3622         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3623     } else {
3624         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3625         tcg_gen_movi_i32(shl, size + 1);
3626         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3627         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3628     }
3629 
3630     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3631 
3632     tcg_gen_shl_i32(shl, reg, shl);
3633     tcg_gen_shr_i32(shr, reg, shr);
3634     tcg_gen_or_i32(reg, shl, shr);
3635     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3636     tcg_gen_or_i32(reg, reg, shx);
3637 
3638     /* X = (reg >> size) & 1 */
3639 
3640     X = tcg_temp_new();
3641     tcg_gen_extract_i32(X, reg, size, 1);
3642 
3643     return X;
3644 }
3645 
3646 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3647 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3648 {
3649     TCGv_i64 t0, shift64;
3650     TCGv X, lo, hi, zero;
3651 
3652     shift64 = tcg_temp_new_i64();
3653     tcg_gen_extu_i32_i64(shift64, shift);
3654 
3655     t0 = tcg_temp_new_i64();
3656 
3657     X = tcg_temp_new();
3658     lo = tcg_temp_new();
3659     hi = tcg_temp_new();
3660 
3661     if (left) {
3662         /* create [reg:X:..] */
3663 
3664         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3665         tcg_gen_concat_i32_i64(t0, lo, reg);
3666 
3667         /* rotate */
3668 
3669         tcg_gen_rotl_i64(t0, t0, shift64);
3670 
3671         /* result is [reg:..:reg:X] */
3672 
3673         tcg_gen_extr_i64_i32(lo, hi, t0);
3674         tcg_gen_andi_i32(X, lo, 1);
3675 
3676         tcg_gen_shri_i32(lo, lo, 1);
3677     } else {
3678         /* create [..:X:reg] */
3679 
3680         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3681 
3682         tcg_gen_rotr_i64(t0, t0, shift64);
3683 
3684         /* result is value: [X:reg:..:reg] */
3685 
3686         tcg_gen_extr_i64_i32(lo, hi, t0);
3687 
3688         /* extract X */
3689 
3690         tcg_gen_shri_i32(X, hi, 31);
3691 
3692         /* extract result */
3693 
3694         tcg_gen_shli_i32(hi, hi, 1);
3695     }
3696     tcg_gen_or_i32(lo, lo, hi);
3697 
3698     /* if shift == 0, register and X are not affected */
3699 
3700     zero = tcg_constant_i32(0);
3701     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3702     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3703 
3704     return X;
3705 }
3706 
3707 DISAS_INSN(rotate_im)
3708 {
3709     TCGv shift;
3710     int tmp;
3711     int left = (insn & 0x100);
3712 
3713     tmp = (insn >> 9) & 7;
3714     if (tmp == 0) {
3715         tmp = 8;
3716     }
3717 
3718     shift = tcg_constant_i32(tmp);
3719     if (insn & 8) {
3720         rotate(DREG(insn, 0), shift, left, 32);
3721     } else {
3722         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3723         rotate_x_flags(DREG(insn, 0), X, 32);
3724     }
3725 
3726     set_cc_op(s, CC_OP_FLAGS);
3727 }
3728 
3729 DISAS_INSN(rotate8_im)
3730 {
3731     int left = (insn & 0x100);
3732     TCGv reg;
3733     TCGv shift;
3734     int tmp;
3735 
3736     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3737 
3738     tmp = (insn >> 9) & 7;
3739     if (tmp == 0) {
3740         tmp = 8;
3741     }
3742 
3743     shift = tcg_constant_i32(tmp);
3744     if (insn & 8) {
3745         rotate(reg, shift, left, 8);
3746     } else {
3747         TCGv X = rotate_x(reg, shift, left, 8);
3748         rotate_x_flags(reg, X, 8);
3749     }
3750     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3751     set_cc_op(s, CC_OP_FLAGS);
3752 }
3753 
3754 DISAS_INSN(rotate16_im)
3755 {
3756     int left = (insn & 0x100);
3757     TCGv reg;
3758     TCGv shift;
3759     int tmp;
3760 
3761     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3762     tmp = (insn >> 9) & 7;
3763     if (tmp == 0) {
3764         tmp = 8;
3765     }
3766 
3767     shift = tcg_constant_i32(tmp);
3768     if (insn & 8) {
3769         rotate(reg, shift, left, 16);
3770     } else {
3771         TCGv X = rotate_x(reg, shift, left, 16);
3772         rotate_x_flags(reg, X, 16);
3773     }
3774     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3775     set_cc_op(s, CC_OP_FLAGS);
3776 }
3777 
3778 DISAS_INSN(rotate_reg)
3779 {
3780     TCGv reg;
3781     TCGv src;
3782     TCGv t0, t1;
3783     int left = (insn & 0x100);
3784 
3785     reg = DREG(insn, 0);
3786     src = DREG(insn, 9);
3787     /* shift in [0..63] */
3788     t0 = tcg_temp_new();
3789     tcg_gen_andi_i32(t0, src, 63);
3790     t1 = tcg_temp_new_i32();
3791     if (insn & 8) {
3792         tcg_gen_andi_i32(t1, src, 31);
3793         rotate(reg, t1, left, 32);
3794         /* if shift == 0, clear C */
3795         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3796                             t0, QREG_CC_V /* 0 */,
3797                             QREG_CC_V /* 0 */, QREG_CC_C);
3798     } else {
3799         TCGv X;
3800         /* modulo 33 */
3801         tcg_gen_movi_i32(t1, 33);
3802         tcg_gen_remu_i32(t1, t0, t1);
3803         X = rotate32_x(DREG(insn, 0), t1, left);
3804         rotate_x_flags(DREG(insn, 0), X, 32);
3805     }
3806     set_cc_op(s, CC_OP_FLAGS);
3807 }
3808 
3809 DISAS_INSN(rotate8_reg)
3810 {
3811     TCGv reg;
3812     TCGv src;
3813     TCGv t0, t1;
3814     int left = (insn & 0x100);
3815 
3816     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3817     src = DREG(insn, 9);
3818     /* shift in [0..63] */
3819     t0 = tcg_temp_new_i32();
3820     tcg_gen_andi_i32(t0, src, 63);
3821     t1 = tcg_temp_new_i32();
3822     if (insn & 8) {
3823         tcg_gen_andi_i32(t1, src, 7);
3824         rotate(reg, t1, left, 8);
3825         /* if shift == 0, clear C */
3826         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3827                             t0, QREG_CC_V /* 0 */,
3828                             QREG_CC_V /* 0 */, QREG_CC_C);
3829     } else {
3830         TCGv X;
3831         /* modulo 9 */
3832         tcg_gen_movi_i32(t1, 9);
3833         tcg_gen_remu_i32(t1, t0, t1);
3834         X = rotate_x(reg, t1, left, 8);
3835         rotate_x_flags(reg, X, 8);
3836     }
3837     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3838     set_cc_op(s, CC_OP_FLAGS);
3839 }
3840 
3841 DISAS_INSN(rotate16_reg)
3842 {
3843     TCGv reg;
3844     TCGv src;
3845     TCGv t0, t1;
3846     int left = (insn & 0x100);
3847 
3848     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3849     src = DREG(insn, 9);
3850     /* shift in [0..63] */
3851     t0 = tcg_temp_new_i32();
3852     tcg_gen_andi_i32(t0, src, 63);
3853     t1 = tcg_temp_new_i32();
3854     if (insn & 8) {
3855         tcg_gen_andi_i32(t1, src, 15);
3856         rotate(reg, t1, left, 16);
3857         /* if shift == 0, clear C */
3858         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3859                             t0, QREG_CC_V /* 0 */,
3860                             QREG_CC_V /* 0 */, QREG_CC_C);
3861     } else {
3862         TCGv X;
3863         /* modulo 17 */
3864         tcg_gen_movi_i32(t1, 17);
3865         tcg_gen_remu_i32(t1, t0, t1);
3866         X = rotate_x(reg, t1, left, 16);
3867         rotate_x_flags(reg, X, 16);
3868     }
3869     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3870     set_cc_op(s, CC_OP_FLAGS);
3871 }
3872 
3873 DISAS_INSN(rotate_mem)
3874 {
3875     TCGv src;
3876     TCGv addr;
3877     TCGv shift;
3878     int left = (insn & 0x100);
3879 
3880     SRC_EA(env, src, OS_WORD, 0, &addr);
3881 
3882     shift = tcg_constant_i32(1);
3883     if (insn & 0x0200) {
3884         rotate(src, shift, left, 16);
3885     } else {
3886         TCGv X = rotate_x(src, shift, left, 16);
3887         rotate_x_flags(src, X, 16);
3888     }
3889     DEST_EA(env, insn, OS_WORD, src, &addr);
3890     set_cc_op(s, CC_OP_FLAGS);
3891 }
3892 
3893 DISAS_INSN(bfext_reg)
3894 {
3895     int ext = read_im16(env, s);
3896     int is_sign = insn & 0x200;
3897     TCGv src = DREG(insn, 0);
3898     TCGv dst = DREG(ext, 12);
3899     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3900     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3901     int pos = 32 - ofs - len;        /* little bit-endian */
3902     TCGv tmp = tcg_temp_new();
3903     TCGv shift;
3904 
3905     /*
3906      * In general, we're going to rotate the field so that it's at the
3907      * top of the word and then right-shift by the complement of the
3908      * width to extend the field.
3909      */
3910     if (ext & 0x20) {
3911         /* Variable width.  */
3912         if (ext & 0x800) {
3913             /* Variable offset.  */
3914             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3915             tcg_gen_rotl_i32(tmp, src, tmp);
3916         } else {
3917             tcg_gen_rotli_i32(tmp, src, ofs);
3918         }
3919 
3920         shift = tcg_temp_new();
3921         tcg_gen_neg_i32(shift, DREG(ext, 0));
3922         tcg_gen_andi_i32(shift, shift, 31);
3923         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3924         if (is_sign) {
3925             tcg_gen_mov_i32(dst, QREG_CC_N);
3926         } else {
3927             tcg_gen_shr_i32(dst, tmp, shift);
3928         }
3929     } else {
3930         /* Immediate width.  */
3931         if (ext & 0x800) {
3932             /* Variable offset */
3933             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3934             tcg_gen_rotl_i32(tmp, src, tmp);
3935             src = tmp;
3936             pos = 32 - len;
3937         } else {
3938             /*
3939              * Immediate offset.  If the field doesn't wrap around the
3940              * end of the word, rely on (s)extract completely.
3941              */
3942             if (pos < 0) {
3943                 tcg_gen_rotli_i32(tmp, src, ofs);
3944                 src = tmp;
3945                 pos = 32 - len;
3946             }
3947         }
3948 
3949         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3950         if (is_sign) {
3951             tcg_gen_mov_i32(dst, QREG_CC_N);
3952         } else {
3953             tcg_gen_extract_i32(dst, src, pos, len);
3954         }
3955     }
3956 
3957     set_cc_op(s, CC_OP_LOGIC);
3958 }
3959 
3960 DISAS_INSN(bfext_mem)
3961 {
3962     int ext = read_im16(env, s);
3963     int is_sign = insn & 0x200;
3964     TCGv dest = DREG(ext, 12);
3965     TCGv addr, len, ofs;
3966 
3967     addr = gen_lea(env, s, insn, OS_UNSIZED);
3968     if (IS_NULL_QREG(addr)) {
3969         gen_addr_fault(s);
3970         return;
3971     }
3972 
3973     if (ext & 0x20) {
3974         len = DREG(ext, 0);
3975     } else {
3976         len = tcg_constant_i32(extract32(ext, 0, 5));
3977     }
3978     if (ext & 0x800) {
3979         ofs = DREG(ext, 6);
3980     } else {
3981         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3982     }
3983 
3984     if (is_sign) {
3985         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3986         tcg_gen_mov_i32(QREG_CC_N, dest);
3987     } else {
3988         TCGv_i64 tmp = tcg_temp_new_i64();
3989         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3990         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3991     }
3992     set_cc_op(s, CC_OP_LOGIC);
3993 }
3994 
3995 DISAS_INSN(bfop_reg)
3996 {
3997     int ext = read_im16(env, s);
3998     TCGv src = DREG(insn, 0);
3999     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4000     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4001     TCGv mask, tofs = NULL, tlen = NULL;
4002     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4003 
4004     if ((ext & 0x820) == 0) {
4005         /* Immediate width and offset.  */
4006         uint32_t maski = 0x7fffffffu >> (len - 1);
4007         if (ofs + len <= 32) {
4008             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4009         } else {
4010             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4011         }
4012         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4013 
4014         mask = tcg_constant_i32(ror32(maski, ofs));
4015         if (is_bfffo) {
4016             tofs = tcg_constant_i32(ofs);
4017             tlen = tcg_constant_i32(len);
4018         }
4019     } else {
4020         TCGv tmp = tcg_temp_new();
4021 
4022         mask = tcg_temp_new();
4023         if (ext & 0x20) {
4024             /* Variable width */
4025             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4026             tcg_gen_andi_i32(tmp, tmp, 31);
4027             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4028             if (is_bfffo) {
4029                 tlen = tcg_temp_new();
4030                 tcg_gen_addi_i32(tlen, tmp, 1);
4031             }
4032         } else {
4033             /* Immediate width */
4034             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4035             if (is_bfffo) {
4036                 tlen = tcg_constant_i32(len);
4037             }
4038         }
4039 
4040         if (ext & 0x800) {
4041             /* Variable offset */
4042             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4043             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4044             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4045             tcg_gen_rotr_i32(mask, mask, tmp);
4046             if (is_bfffo) {
4047                 tofs = tmp;
4048             }
4049         } else {
4050             /* Immediate offset (and variable width) */
4051             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4052             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4053             tcg_gen_rotri_i32(mask, mask, ofs);
4054             if (is_bfffo) {
4055                 tofs = tcg_constant_i32(ofs);
4056             }
4057         }
4058     }
4059     set_cc_op(s, CC_OP_LOGIC);
4060 
4061     switch (insn & 0x0f00) {
4062     case 0x0a00: /* bfchg */
4063         tcg_gen_eqv_i32(src, src, mask);
4064         break;
4065     case 0x0c00: /* bfclr */
4066         tcg_gen_and_i32(src, src, mask);
4067         break;
4068     case 0x0d00: /* bfffo */
4069         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4070         break;
4071     case 0x0e00: /* bfset */
4072         tcg_gen_orc_i32(src, src, mask);
4073         break;
4074     case 0x0800: /* bftst */
4075         /* flags already set; no other work to do.  */
4076         break;
4077     default:
4078         g_assert_not_reached();
4079     }
4080 }
4081 
4082 DISAS_INSN(bfop_mem)
4083 {
4084     int ext = read_im16(env, s);
4085     TCGv addr, len, ofs;
4086     TCGv_i64 t64;
4087 
4088     addr = gen_lea(env, s, insn, OS_UNSIZED);
4089     if (IS_NULL_QREG(addr)) {
4090         gen_addr_fault(s);
4091         return;
4092     }
4093 
4094     if (ext & 0x20) {
4095         len = DREG(ext, 0);
4096     } else {
4097         len = tcg_constant_i32(extract32(ext, 0, 5));
4098     }
4099     if (ext & 0x800) {
4100         ofs = DREG(ext, 6);
4101     } else {
4102         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4103     }
4104 
4105     switch (insn & 0x0f00) {
4106     case 0x0a00: /* bfchg */
4107         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4108         break;
4109     case 0x0c00: /* bfclr */
4110         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4111         break;
4112     case 0x0d00: /* bfffo */
4113         t64 = tcg_temp_new_i64();
4114         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4115         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4116         break;
4117     case 0x0e00: /* bfset */
4118         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4119         break;
4120     case 0x0800: /* bftst */
4121         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4122         break;
4123     default:
4124         g_assert_not_reached();
4125     }
4126     set_cc_op(s, CC_OP_LOGIC);
4127 }
4128 
4129 DISAS_INSN(bfins_reg)
4130 {
4131     int ext = read_im16(env, s);
4132     TCGv dst = DREG(insn, 0);
4133     TCGv src = DREG(ext, 12);
4134     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4135     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4136     int pos = 32 - ofs - len;        /* little bit-endian */
4137     TCGv tmp;
4138 
4139     tmp = tcg_temp_new();
4140 
4141     if (ext & 0x20) {
4142         /* Variable width */
4143         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4144         tcg_gen_andi_i32(tmp, tmp, 31);
4145         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4146     } else {
4147         /* Immediate width */
4148         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4149     }
4150     set_cc_op(s, CC_OP_LOGIC);
4151 
4152     /* Immediate width and offset */
4153     if ((ext & 0x820) == 0) {
4154         /* Check for suitability for deposit.  */
4155         if (pos >= 0) {
4156             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4157         } else {
4158             uint32_t maski = -2U << (len - 1);
4159             uint32_t roti = (ofs + len) & 31;
4160             tcg_gen_andi_i32(tmp, src, ~maski);
4161             tcg_gen_rotri_i32(tmp, tmp, roti);
4162             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4163             tcg_gen_or_i32(dst, dst, tmp);
4164         }
4165     } else {
4166         TCGv mask = tcg_temp_new();
4167         TCGv rot = tcg_temp_new();
4168 
4169         if (ext & 0x20) {
4170             /* Variable width */
4171             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4172             tcg_gen_andi_i32(rot, rot, 31);
4173             tcg_gen_movi_i32(mask, -2);
4174             tcg_gen_shl_i32(mask, mask, rot);
4175             tcg_gen_mov_i32(rot, DREG(ext, 0));
4176             tcg_gen_andc_i32(tmp, src, mask);
4177         } else {
4178             /* Immediate width (variable offset) */
4179             uint32_t maski = -2U << (len - 1);
4180             tcg_gen_andi_i32(tmp, src, ~maski);
4181             tcg_gen_movi_i32(mask, maski);
4182             tcg_gen_movi_i32(rot, len & 31);
4183         }
4184         if (ext & 0x800) {
4185             /* Variable offset */
4186             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4187         } else {
4188             /* Immediate offset (variable width) */
4189             tcg_gen_addi_i32(rot, rot, ofs);
4190         }
4191         tcg_gen_andi_i32(rot, rot, 31);
4192         tcg_gen_rotr_i32(mask, mask, rot);
4193         tcg_gen_rotr_i32(tmp, tmp, rot);
4194         tcg_gen_and_i32(dst, dst, mask);
4195         tcg_gen_or_i32(dst, dst, tmp);
4196     }
4197 }
4198 
4199 DISAS_INSN(bfins_mem)
4200 {
4201     int ext = read_im16(env, s);
4202     TCGv src = DREG(ext, 12);
4203     TCGv addr, len, ofs;
4204 
4205     addr = gen_lea(env, s, insn, OS_UNSIZED);
4206     if (IS_NULL_QREG(addr)) {
4207         gen_addr_fault(s);
4208         return;
4209     }
4210 
4211     if (ext & 0x20) {
4212         len = DREG(ext, 0);
4213     } else {
4214         len = tcg_constant_i32(extract32(ext, 0, 5));
4215     }
4216     if (ext & 0x800) {
4217         ofs = DREG(ext, 6);
4218     } else {
4219         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4220     }
4221 
4222     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4223     set_cc_op(s, CC_OP_LOGIC);
4224 }
4225 
4226 DISAS_INSN(ff1)
4227 {
4228     TCGv reg;
4229     reg = DREG(insn, 0);
4230     gen_logic_cc(s, reg, OS_LONG);
4231     gen_helper_ff1(reg, reg);
4232 }
4233 
4234 DISAS_INSN(chk)
4235 {
4236     TCGv src, reg;
4237     int opsize;
4238 
4239     switch ((insn >> 7) & 3) {
4240     case 3:
4241         opsize = OS_WORD;
4242         break;
4243     case 2:
4244         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4245             opsize = OS_LONG;
4246             break;
4247         }
4248         /* fallthru */
4249     default:
4250         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4251         return;
4252     }
4253     SRC_EA(env, src, opsize, 1, NULL);
4254     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4255 
4256     gen_flush_flags(s);
4257     gen_helper_chk(cpu_env, reg, src);
4258 }
4259 
4260 DISAS_INSN(chk2)
4261 {
4262     uint16_t ext;
4263     TCGv addr1, addr2, bound1, bound2, reg;
4264     int opsize;
4265 
4266     switch ((insn >> 9) & 3) {
4267     case 0:
4268         opsize = OS_BYTE;
4269         break;
4270     case 1:
4271         opsize = OS_WORD;
4272         break;
4273     case 2:
4274         opsize = OS_LONG;
4275         break;
4276     default:
4277         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4278         return;
4279     }
4280 
4281     ext = read_im16(env, s);
4282     if ((ext & 0x0800) == 0) {
4283         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4284         return;
4285     }
4286 
4287     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4288     addr2 = tcg_temp_new();
4289     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4290 
4291     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4292     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4293 
4294     reg = tcg_temp_new();
4295     if (ext & 0x8000) {
4296         tcg_gen_mov_i32(reg, AREG(ext, 12));
4297     } else {
4298         gen_ext(reg, DREG(ext, 12), opsize, 1);
4299     }
4300 
4301     gen_flush_flags(s);
4302     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4303 }
4304 
4305 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4306 {
4307     TCGv addr;
4308     TCGv_i64 t0, t1;
4309 
4310     addr = tcg_temp_new();
4311 
4312     t0 = tcg_temp_new_i64();
4313     t1 = tcg_temp_new_i64();
4314 
4315     tcg_gen_andi_i32(addr, src, ~15);
4316     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4317     tcg_gen_addi_i32(addr, addr, 8);
4318     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4319 
4320     tcg_gen_andi_i32(addr, dst, ~15);
4321     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4322     tcg_gen_addi_i32(addr, addr, 8);
4323     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4324 }
4325 
4326 DISAS_INSN(move16_reg)
4327 {
4328     int index = IS_USER(s);
4329     TCGv tmp;
4330     uint16_t ext;
4331 
4332     ext = read_im16(env, s);
4333     if ((ext & (1 << 15)) == 0) {
4334         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4335     }
4336 
4337     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4338 
4339     /* Ax can be Ay, so save Ay before incrementing Ax */
4340     tmp = tcg_temp_new();
4341     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4342     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4343     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4344 }
4345 
4346 DISAS_INSN(move16_mem)
4347 {
4348     int index = IS_USER(s);
4349     TCGv reg, addr;
4350 
4351     reg = AREG(insn, 0);
4352     addr = tcg_constant_i32(read_im32(env, s));
4353 
4354     if ((insn >> 3) & 1) {
4355         /* MOVE16 (xxx).L, (Ay) */
4356         m68k_copy_line(reg, addr, index);
4357     } else {
4358         /* MOVE16 (Ay), (xxx).L */
4359         m68k_copy_line(addr, reg, index);
4360     }
4361 
4362     if (((insn >> 3) & 2) == 0) {
4363         /* (Ay)+ */
4364         tcg_gen_addi_i32(reg, reg, 16);
4365     }
4366 }
4367 
4368 DISAS_INSN(strldsr)
4369 {
4370     uint16_t ext;
4371     uint32_t addr;
4372 
4373     addr = s->pc - 2;
4374     ext = read_im16(env, s);
4375     if (ext != 0x46FC) {
4376         gen_exception(s, addr, EXCP_ILLEGAL);
4377         return;
4378     }
4379     ext = read_im16(env, s);
4380     if (IS_USER(s) || (ext & SR_S) == 0) {
4381         gen_exception(s, addr, EXCP_PRIVILEGE);
4382         return;
4383     }
4384     gen_push(s, gen_get_sr(s));
4385     gen_set_sr_im(s, ext, 0);
4386     gen_exit_tb(s);
4387 }
4388 
4389 DISAS_INSN(move_from_sr)
4390 {
4391     TCGv sr;
4392 
4393     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4394         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4395         return;
4396     }
4397     sr = gen_get_sr(s);
4398     DEST_EA(env, insn, OS_WORD, sr, NULL);
4399 }
4400 
4401 #if defined(CONFIG_SOFTMMU)
4402 DISAS_INSN(moves)
4403 {
4404     int opsize;
4405     uint16_t ext;
4406     TCGv reg;
4407     TCGv addr;
4408     int extend;
4409 
4410     if (IS_USER(s)) {
4411         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4412         return;
4413     }
4414 
4415     ext = read_im16(env, s);
4416 
4417     opsize = insn_opsize(insn);
4418 
4419     if (ext & 0x8000) {
4420         /* address register */
4421         reg = AREG(ext, 12);
4422         extend = 1;
4423     } else {
4424         /* data register */
4425         reg = DREG(ext, 12);
4426         extend = 0;
4427     }
4428 
4429     addr = gen_lea(env, s, insn, opsize);
4430     if (IS_NULL_QREG(addr)) {
4431         gen_addr_fault(s);
4432         return;
4433     }
4434 
4435     if (ext & 0x0800) {
4436         /* from reg to ea */
4437         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4438     } else {
4439         /* from ea to reg */
4440         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4441         if (extend) {
4442             gen_ext(reg, tmp, opsize, 1);
4443         } else {
4444             gen_partset_reg(opsize, reg, tmp);
4445         }
4446     }
4447     switch (extract32(insn, 3, 3)) {
4448     case 3: /* Indirect postincrement.  */
4449         tcg_gen_addi_i32(AREG(insn, 0), addr,
4450                          REG(insn, 0) == 7 && opsize == OS_BYTE
4451                          ? 2
4452                          : opsize_bytes(opsize));
4453         break;
4454     case 4: /* Indirect predecrememnt.  */
4455         tcg_gen_mov_i32(AREG(insn, 0), addr);
4456         break;
4457     }
4458 }
4459 
4460 DISAS_INSN(move_to_sr)
4461 {
4462     if (IS_USER(s)) {
4463         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4464         return;
4465     }
4466     gen_move_to_sr(env, s, insn, false);
4467     gen_exit_tb(s);
4468 }
4469 
4470 DISAS_INSN(move_from_usp)
4471 {
4472     if (IS_USER(s)) {
4473         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4474         return;
4475     }
4476     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4477                    offsetof(CPUM68KState, sp[M68K_USP]));
4478 }
4479 
4480 DISAS_INSN(move_to_usp)
4481 {
4482     if (IS_USER(s)) {
4483         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4484         return;
4485     }
4486     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4487                    offsetof(CPUM68KState, sp[M68K_USP]));
4488 }
4489 
4490 DISAS_INSN(halt)
4491 {
4492     if (IS_USER(s)) {
4493         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4494         return;
4495     }
4496 
4497     gen_exception(s, s->pc, EXCP_HALT_INSN);
4498 }
4499 
4500 DISAS_INSN(stop)
4501 {
4502     uint16_t ext;
4503 
4504     if (IS_USER(s)) {
4505         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4506         return;
4507     }
4508 
4509     ext = read_im16(env, s);
4510 
4511     gen_set_sr_im(s, ext, 0);
4512     tcg_gen_movi_i32(cpu_halted, 1);
4513     gen_exception(s, s->pc, EXCP_HLT);
4514 }
4515 
4516 DISAS_INSN(rte)
4517 {
4518     if (IS_USER(s)) {
4519         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4520         return;
4521     }
4522     gen_exception(s, s->base.pc_next, EXCP_RTE);
4523 }
4524 
4525 DISAS_INSN(cf_movec)
4526 {
4527     uint16_t ext;
4528     TCGv reg;
4529 
4530     if (IS_USER(s)) {
4531         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4532         return;
4533     }
4534 
4535     ext = read_im16(env, s);
4536 
4537     if (ext & 0x8000) {
4538         reg = AREG(ext, 12);
4539     } else {
4540         reg = DREG(ext, 12);
4541     }
4542     gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4543     gen_exit_tb(s);
4544 }
4545 
4546 DISAS_INSN(m68k_movec)
4547 {
4548     uint16_t ext;
4549     TCGv reg, creg;
4550 
4551     if (IS_USER(s)) {
4552         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4553         return;
4554     }
4555 
4556     ext = read_im16(env, s);
4557 
4558     if (ext & 0x8000) {
4559         reg = AREG(ext, 12);
4560     } else {
4561         reg = DREG(ext, 12);
4562     }
4563     creg = tcg_constant_i32(ext & 0xfff);
4564     if (insn & 1) {
4565         gen_helper_m68k_movec_to(cpu_env, creg, reg);
4566     } else {
4567         gen_helper_m68k_movec_from(reg, cpu_env, creg);
4568     }
4569     gen_exit_tb(s);
4570 }
4571 
4572 DISAS_INSN(intouch)
4573 {
4574     if (IS_USER(s)) {
4575         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4576         return;
4577     }
4578     /* ICache fetch.  Implement as no-op.  */
4579 }
4580 
4581 DISAS_INSN(cpushl)
4582 {
4583     if (IS_USER(s)) {
4584         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4585         return;
4586     }
4587     /* Cache push/invalidate.  Implement as no-op.  */
4588 }
4589 
4590 DISAS_INSN(cpush)
4591 {
4592     if (IS_USER(s)) {
4593         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4594         return;
4595     }
4596     /* Cache push/invalidate.  Implement as no-op.  */
4597 }
4598 
4599 DISAS_INSN(cinv)
4600 {
4601     if (IS_USER(s)) {
4602         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4603         return;
4604     }
4605     /* Invalidate cache line.  Implement as no-op.  */
4606 }
4607 
4608 #if defined(CONFIG_SOFTMMU)
4609 DISAS_INSN(pflush)
4610 {
4611     TCGv opmode;
4612 
4613     if (IS_USER(s)) {
4614         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4615         return;
4616     }
4617 
4618     opmode = tcg_constant_i32((insn >> 3) & 3);
4619     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4620 }
4621 
4622 DISAS_INSN(ptest)
4623 {
4624     TCGv is_read;
4625 
4626     if (IS_USER(s)) {
4627         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4628         return;
4629     }
4630     is_read = tcg_constant_i32((insn >> 5) & 1);
4631     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4632 }
4633 #endif
4634 
4635 DISAS_INSN(wddata)
4636 {
4637     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4638 }
4639 
4640 DISAS_INSN(wdebug)
4641 {
4642     if (IS_USER(s)) {
4643         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4644         return;
4645     }
4646     /* TODO: Implement wdebug.  */
4647     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4648 }
4649 #endif
4650 
4651 DISAS_INSN(trap)
4652 {
4653     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4654 }
4655 
4656 static void do_trapcc(DisasContext *s, DisasCompare *c)
4657 {
4658     if (c->tcond != TCG_COND_NEVER) {
4659         TCGLabel *over = NULL;
4660 
4661         update_cc_op(s);
4662 
4663         if (c->tcond != TCG_COND_ALWAYS) {
4664             /* Jump over if !c. */
4665             over = gen_new_label();
4666             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4667         }
4668 
4669         tcg_gen_movi_i32(QREG_PC, s->pc);
4670         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4671 
4672         if (over != NULL) {
4673             gen_set_label(over);
4674             s->base.is_jmp = DISAS_NEXT;
4675         }
4676     }
4677 }
4678 
4679 DISAS_INSN(trapcc)
4680 {
4681     DisasCompare c;
4682 
4683     /* Consume and discard the immediate operand. */
4684     switch (extract32(insn, 0, 3)) {
4685     case 2: /* trapcc.w */
4686         (void)read_im16(env, s);
4687         break;
4688     case 3: /* trapcc.l */
4689         (void)read_im32(env, s);
4690         break;
4691     case 4: /* trapcc (no operand) */
4692         break;
4693     default:
4694         /* trapcc registered with only valid opmodes */
4695         g_assert_not_reached();
4696     }
4697 
4698     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4699     do_trapcc(s, &c);
4700 }
4701 
4702 DISAS_INSN(trapv)
4703 {
4704     DisasCompare c;
4705 
4706     gen_cc_cond(&c, s, 9); /* V set */
4707     do_trapcc(s, &c);
4708 }
4709 
4710 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4711 {
4712     switch (reg) {
4713     case M68K_FPIAR:
4714         tcg_gen_movi_i32(res, 0);
4715         break;
4716     case M68K_FPSR:
4717         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4718         break;
4719     case M68K_FPCR:
4720         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4721         break;
4722     }
4723 }
4724 
4725 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4726 {
4727     switch (reg) {
4728     case M68K_FPIAR:
4729         break;
4730     case M68K_FPSR:
4731         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4732         break;
4733     case M68K_FPCR:
4734         gen_helper_set_fpcr(cpu_env, val);
4735         break;
4736     }
4737 }
4738 
4739 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4740 {
4741     int index = IS_USER(s);
4742     TCGv tmp;
4743 
4744     tmp = tcg_temp_new();
4745     gen_load_fcr(s, tmp, reg);
4746     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4747 }
4748 
4749 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4750 {
4751     int index = IS_USER(s);
4752     TCGv tmp;
4753 
4754     tmp = tcg_temp_new();
4755     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4756     gen_store_fcr(s, tmp, reg);
4757 }
4758 
4759 
4760 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4761                              uint32_t insn, uint32_t ext)
4762 {
4763     int mask = (ext >> 10) & 7;
4764     int is_write = (ext >> 13) & 1;
4765     int mode = extract32(insn, 3, 3);
4766     int i;
4767     TCGv addr, tmp;
4768 
4769     switch (mode) {
4770     case 0: /* Dn */
4771         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4772             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4773             return;
4774         }
4775         if (is_write) {
4776             gen_load_fcr(s, DREG(insn, 0), mask);
4777         } else {
4778             gen_store_fcr(s, DREG(insn, 0), mask);
4779         }
4780         return;
4781     case 1: /* An, only with FPIAR */
4782         if (mask != M68K_FPIAR) {
4783             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4784             return;
4785         }
4786         if (is_write) {
4787             gen_load_fcr(s, AREG(insn, 0), mask);
4788         } else {
4789             gen_store_fcr(s, AREG(insn, 0), mask);
4790         }
4791         return;
4792     case 7: /* Immediate */
4793         if (REG(insn, 0) == 4) {
4794             if (is_write ||
4795                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4796                  mask != M68K_FPCR)) {
4797                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4798                 return;
4799             }
4800             tmp = tcg_constant_i32(read_im32(env, s));
4801             gen_store_fcr(s, tmp, mask);
4802             return;
4803         }
4804         break;
4805     default:
4806         break;
4807     }
4808 
4809     tmp = gen_lea(env, s, insn, OS_LONG);
4810     if (IS_NULL_QREG(tmp)) {
4811         gen_addr_fault(s);
4812         return;
4813     }
4814 
4815     addr = tcg_temp_new();
4816     tcg_gen_mov_i32(addr, tmp);
4817 
4818     /*
4819      * mask:
4820      *
4821      * 0b100 Floating-Point Control Register
4822      * 0b010 Floating-Point Status Register
4823      * 0b001 Floating-Point Instruction Address Register
4824      *
4825      */
4826 
4827     if (is_write && mode == 4) {
4828         for (i = 2; i >= 0; i--, mask >>= 1) {
4829             if (mask & 1) {
4830                 gen_qemu_store_fcr(s, addr, 1 << i);
4831                 if (mask != 1) {
4832                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4833                 }
4834             }
4835        }
4836        tcg_gen_mov_i32(AREG(insn, 0), addr);
4837     } else {
4838         for (i = 0; i < 3; i++, mask >>= 1) {
4839             if (mask & 1) {
4840                 if (is_write) {
4841                     gen_qemu_store_fcr(s, addr, 1 << i);
4842                 } else {
4843                     gen_qemu_load_fcr(s, addr, 1 << i);
4844                 }
4845                 if (mask != 1 || mode == 3) {
4846                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4847                 }
4848             }
4849         }
4850         if (mode == 3) {
4851             tcg_gen_mov_i32(AREG(insn, 0), addr);
4852         }
4853     }
4854 }
4855 
4856 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4857                           uint32_t insn, uint32_t ext)
4858 {
4859     int opsize;
4860     TCGv addr, tmp;
4861     int mode = (ext >> 11) & 0x3;
4862     int is_load = ((ext & 0x2000) == 0);
4863 
4864     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4865         opsize = OS_EXTENDED;
4866     } else {
4867         opsize = OS_DOUBLE;  /* FIXME */
4868     }
4869 
4870     addr = gen_lea(env, s, insn, opsize);
4871     if (IS_NULL_QREG(addr)) {
4872         gen_addr_fault(s);
4873         return;
4874     }
4875 
4876     tmp = tcg_temp_new();
4877     if (mode & 0x1) {
4878         /* Dynamic register list */
4879         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4880     } else {
4881         /* Static register list */
4882         tcg_gen_movi_i32(tmp, ext & 0xff);
4883     }
4884 
4885     if (!is_load && (mode & 2) == 0) {
4886         /*
4887          * predecrement addressing mode
4888          * only available to store register to memory
4889          */
4890         if (opsize == OS_EXTENDED) {
4891             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4892         } else {
4893             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4894         }
4895     } else {
4896         /* postincrement addressing mode */
4897         if (opsize == OS_EXTENDED) {
4898             if (is_load) {
4899                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4900             } else {
4901                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4902             }
4903         } else {
4904             if (is_load) {
4905                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4906             } else {
4907                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4908             }
4909         }
4910     }
4911     if ((insn & 070) == 030 || (insn & 070) == 040) {
4912         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4913     }
4914 }
4915 
4916 /*
4917  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4918  * immediately before the next FP instruction is executed.
4919  */
4920 DISAS_INSN(fpu)
4921 {
4922     uint16_t ext;
4923     int opmode;
4924     int opsize;
4925     TCGv_ptr cpu_src, cpu_dest;
4926 
4927     ext = read_im16(env, s);
4928     opmode = ext & 0x7f;
4929     switch ((ext >> 13) & 7) {
4930     case 0:
4931         break;
4932     case 1:
4933         goto undef;
4934     case 2:
4935         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4936             /* fmovecr */
4937             TCGv rom_offset = tcg_constant_i32(opmode);
4938             cpu_dest = gen_fp_ptr(REG(ext, 7));
4939             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4940             return;
4941         }
4942         break;
4943     case 3: /* fmove out */
4944         cpu_src = gen_fp_ptr(REG(ext, 7));
4945         opsize = ext_opsize(ext, 10);
4946         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4947                       EA_STORE, IS_USER(s)) == -1) {
4948             gen_addr_fault(s);
4949         }
4950         gen_helper_ftst(cpu_env, cpu_src);
4951         return;
4952     case 4: /* fmove to control register.  */
4953     case 5: /* fmove from control register.  */
4954         gen_op_fmove_fcr(env, s, insn, ext);
4955         return;
4956     case 6: /* fmovem */
4957     case 7:
4958         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4959             goto undef;
4960         }
4961         gen_op_fmovem(env, s, insn, ext);
4962         return;
4963     }
4964     if (ext & (1 << 14)) {
4965         /* Source effective address.  */
4966         opsize = ext_opsize(ext, 10);
4967         cpu_src = gen_fp_result_ptr();
4968         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4969                       EA_LOADS, IS_USER(s)) == -1) {
4970             gen_addr_fault(s);
4971             return;
4972         }
4973     } else {
4974         /* Source register.  */
4975         opsize = OS_EXTENDED;
4976         cpu_src = gen_fp_ptr(REG(ext, 10));
4977     }
4978     cpu_dest = gen_fp_ptr(REG(ext, 7));
4979     switch (opmode) {
4980     case 0: /* fmove */
4981         gen_fp_move(cpu_dest, cpu_src);
4982         break;
4983     case 0x40: /* fsmove */
4984         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4985         break;
4986     case 0x44: /* fdmove */
4987         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4988         break;
4989     case 1: /* fint */
4990         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4991         break;
4992     case 2: /* fsinh */
4993         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4994         break;
4995     case 3: /* fintrz */
4996         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4997         break;
4998     case 4: /* fsqrt */
4999         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5000         break;
5001     case 0x41: /* fssqrt */
5002         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5003         break;
5004     case 0x45: /* fdsqrt */
5005         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5006         break;
5007     case 0x06: /* flognp1 */
5008         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5009         break;
5010     case 0x08: /* fetoxm1 */
5011         gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5012         break;
5013     case 0x09: /* ftanh */
5014         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5015         break;
5016     case 0x0a: /* fatan */
5017         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5018         break;
5019     case 0x0c: /* fasin */
5020         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5021         break;
5022     case 0x0d: /* fatanh */
5023         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5024         break;
5025     case 0x0e: /* fsin */
5026         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5027         break;
5028     case 0x0f: /* ftan */
5029         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5030         break;
5031     case 0x10: /* fetox */
5032         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5033         break;
5034     case 0x11: /* ftwotox */
5035         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5036         break;
5037     case 0x12: /* ftentox */
5038         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5039         break;
5040     case 0x14: /* flogn */
5041         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5042         break;
5043     case 0x15: /* flog10 */
5044         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5045         break;
5046     case 0x16: /* flog2 */
5047         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5048         break;
5049     case 0x18: /* fabs */
5050         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5051         break;
5052     case 0x58: /* fsabs */
5053         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5054         break;
5055     case 0x5c: /* fdabs */
5056         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5057         break;
5058     case 0x19: /* fcosh */
5059         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5060         break;
5061     case 0x1a: /* fneg */
5062         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5063         break;
5064     case 0x5a: /* fsneg */
5065         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5066         break;
5067     case 0x5e: /* fdneg */
5068         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5069         break;
5070     case 0x1c: /* facos */
5071         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5072         break;
5073     case 0x1d: /* fcos */
5074         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5075         break;
5076     case 0x1e: /* fgetexp */
5077         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5078         break;
5079     case 0x1f: /* fgetman */
5080         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5081         break;
5082     case 0x20: /* fdiv */
5083         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5084         break;
5085     case 0x60: /* fsdiv */
5086         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5087         break;
5088     case 0x64: /* fddiv */
5089         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5090         break;
5091     case 0x21: /* fmod */
5092         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5093         break;
5094     case 0x22: /* fadd */
5095         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5096         break;
5097     case 0x62: /* fsadd */
5098         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5099         break;
5100     case 0x66: /* fdadd */
5101         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5102         break;
5103     case 0x23: /* fmul */
5104         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5105         break;
5106     case 0x63: /* fsmul */
5107         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5108         break;
5109     case 0x67: /* fdmul */
5110         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5111         break;
5112     case 0x24: /* fsgldiv */
5113         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5114         break;
5115     case 0x25: /* frem */
5116         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5117         break;
5118     case 0x26: /* fscale */
5119         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5120         break;
5121     case 0x27: /* fsglmul */
5122         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5123         break;
5124     case 0x28: /* fsub */
5125         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5126         break;
5127     case 0x68: /* fssub */
5128         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5129         break;
5130     case 0x6c: /* fdsub */
5131         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5132         break;
5133     case 0x30: case 0x31: case 0x32:
5134     case 0x33: case 0x34: case 0x35:
5135     case 0x36: case 0x37: {
5136             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5137             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5138         }
5139         break;
5140     case 0x38: /* fcmp */
5141         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5142         return;
5143     case 0x3a: /* ftst */
5144         gen_helper_ftst(cpu_env, cpu_src);
5145         return;
5146     default:
5147         goto undef;
5148     }
5149     gen_helper_ftst(cpu_env, cpu_dest);
5150     return;
5151 undef:
5152     /* FIXME: Is this right for offset addressing modes?  */
5153     s->pc -= 2;
5154     disas_undef_fpu(env, s, insn);
5155 }
5156 
5157 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5158 {
5159     TCGv fpsr;
5160 
5161     c->v2 = tcg_constant_i32(0);
5162     /* TODO: Raise BSUN exception.  */
5163     fpsr = tcg_temp_new();
5164     gen_load_fcr(s, fpsr, M68K_FPSR);
5165     switch (cond) {
5166     case 0:  /* False */
5167     case 16: /* Signaling False */
5168         c->v1 = c->v2;
5169         c->tcond = TCG_COND_NEVER;
5170         break;
5171     case 1:  /* EQual Z */
5172     case 17: /* Signaling EQual Z */
5173         c->v1 = tcg_temp_new();
5174         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5175         c->tcond = TCG_COND_NE;
5176         break;
5177     case 2:  /* Ordered Greater Than !(A || Z || N) */
5178     case 18: /* Greater Than !(A || Z || N) */
5179         c->v1 = tcg_temp_new();
5180         tcg_gen_andi_i32(c->v1, fpsr,
5181                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5182         c->tcond = TCG_COND_EQ;
5183         break;
5184     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5185     case 19: /* Greater than or Equal Z || !(A || N) */
5186         c->v1 = tcg_temp_new();
5187         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5188         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5189         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5190         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5191         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5192         c->tcond = TCG_COND_NE;
5193         break;
5194     case 4:  /* Ordered Less Than !(!N || A || Z); */
5195     case 20: /* Less Than !(!N || A || Z); */
5196         c->v1 = tcg_temp_new();
5197         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5198         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5199         c->tcond = TCG_COND_EQ;
5200         break;
5201     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5202     case 21: /* Less than or Equal Z || (N && !A) */
5203         c->v1 = tcg_temp_new();
5204         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5205         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5206         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5207         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5208         c->tcond = TCG_COND_NE;
5209         break;
5210     case 6:  /* Ordered Greater or Less than !(A || Z) */
5211     case 22: /* Greater or Less than !(A || Z) */
5212         c->v1 = tcg_temp_new();
5213         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5214         c->tcond = TCG_COND_EQ;
5215         break;
5216     case 7:  /* Ordered !A */
5217     case 23: /* Greater, Less or Equal !A */
5218         c->v1 = tcg_temp_new();
5219         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5220         c->tcond = TCG_COND_EQ;
5221         break;
5222     case 8:  /* Unordered A */
5223     case 24: /* Not Greater, Less or Equal A */
5224         c->v1 = tcg_temp_new();
5225         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5226         c->tcond = TCG_COND_NE;
5227         break;
5228     case 9:  /* Unordered or Equal A || Z */
5229     case 25: /* Not Greater or Less then A || Z */
5230         c->v1 = tcg_temp_new();
5231         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5232         c->tcond = TCG_COND_NE;
5233         break;
5234     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5235     case 26: /* Not Less or Equal A || !(N || Z)) */
5236         c->v1 = tcg_temp_new();
5237         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5238         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5239         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5240         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5241         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5242         c->tcond = TCG_COND_NE;
5243         break;
5244     case 11: /* Unordered or Greater or Equal A || Z || !N */
5245     case 27: /* Not Less Than A || Z || !N */
5246         c->v1 = tcg_temp_new();
5247         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5248         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5249         c->tcond = TCG_COND_NE;
5250         break;
5251     case 12: /* Unordered or Less Than A || (N && !Z) */
5252     case 28: /* Not Greater than or Equal A || (N && !Z) */
5253         c->v1 = tcg_temp_new();
5254         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5255         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5256         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5257         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5258         c->tcond = TCG_COND_NE;
5259         break;
5260     case 13: /* Unordered or Less or Equal A || Z || N */
5261     case 29: /* Not Greater Than A || Z || N */
5262         c->v1 = tcg_temp_new();
5263         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5264         c->tcond = TCG_COND_NE;
5265         break;
5266     case 14: /* Not Equal !Z */
5267     case 30: /* Signaling Not Equal !Z */
5268         c->v1 = tcg_temp_new();
5269         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5270         c->tcond = TCG_COND_EQ;
5271         break;
5272     case 15: /* True */
5273     case 31: /* Signaling True */
5274         c->v1 = c->v2;
5275         c->tcond = TCG_COND_ALWAYS;
5276         break;
5277     }
5278 }
5279 
5280 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5281 {
5282     DisasCompare c;
5283 
5284     gen_fcc_cond(&c, s, cond);
5285     update_cc_op(s);
5286     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5287 }
5288 
5289 DISAS_INSN(fbcc)
5290 {
5291     uint32_t offset;
5292     uint32_t base;
5293     TCGLabel *l1;
5294 
5295     base = s->pc;
5296     offset = (int16_t)read_im16(env, s);
5297     if (insn & (1 << 6)) {
5298         offset = (offset << 16) | read_im16(env, s);
5299     }
5300 
5301     l1 = gen_new_label();
5302     update_cc_op(s);
5303     gen_fjmpcc(s, insn & 0x3f, l1);
5304     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5305     gen_set_label(l1);
5306     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5307 }
5308 
5309 DISAS_INSN(fscc)
5310 {
5311     DisasCompare c;
5312     int cond;
5313     TCGv tmp;
5314     uint16_t ext;
5315 
5316     ext = read_im16(env, s);
5317     cond = ext & 0x3f;
5318     gen_fcc_cond(&c, s, cond);
5319 
5320     tmp = tcg_temp_new();
5321     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5322 
5323     tcg_gen_neg_i32(tmp, tmp);
5324     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5325 }
5326 
5327 DISAS_INSN(ftrapcc)
5328 {
5329     DisasCompare c;
5330     uint16_t ext;
5331     int cond;
5332 
5333     ext = read_im16(env, s);
5334     cond = ext & 0x3f;
5335 
5336     /* Consume and discard the immediate operand. */
5337     switch (extract32(insn, 0, 3)) {
5338     case 2: /* ftrapcc.w */
5339         (void)read_im16(env, s);
5340         break;
5341     case 3: /* ftrapcc.l */
5342         (void)read_im32(env, s);
5343         break;
5344     case 4: /* ftrapcc (no operand) */
5345         break;
5346     default:
5347         /* ftrapcc registered with only valid opmodes */
5348         g_assert_not_reached();
5349     }
5350 
5351     gen_fcc_cond(&c, s, cond);
5352     do_trapcc(s, &c);
5353 }
5354 
5355 #if defined(CONFIG_SOFTMMU)
5356 DISAS_INSN(frestore)
5357 {
5358     TCGv addr;
5359 
5360     if (IS_USER(s)) {
5361         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5362         return;
5363     }
5364     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5365         SRC_EA(env, addr, OS_LONG, 0, NULL);
5366         /* FIXME: check the state frame */
5367     } else {
5368         disas_undef(env, s, insn);
5369     }
5370 }
5371 
5372 DISAS_INSN(fsave)
5373 {
5374     if (IS_USER(s)) {
5375         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5376         return;
5377     }
5378 
5379     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5380         /* always write IDLE */
5381         TCGv idle = tcg_constant_i32(0x41000000);
5382         DEST_EA(env, insn, OS_LONG, idle, NULL);
5383     } else {
5384         disas_undef(env, s, insn);
5385     }
5386 }
5387 #endif
5388 
5389 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5390 {
5391     TCGv tmp = tcg_temp_new();
5392     if (s->env->macsr & MACSR_FI) {
5393         if (upper)
5394             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5395         else
5396             tcg_gen_shli_i32(tmp, val, 16);
5397     } else if (s->env->macsr & MACSR_SU) {
5398         if (upper)
5399             tcg_gen_sari_i32(tmp, val, 16);
5400         else
5401             tcg_gen_ext16s_i32(tmp, val);
5402     } else {
5403         if (upper)
5404             tcg_gen_shri_i32(tmp, val, 16);
5405         else
5406             tcg_gen_ext16u_i32(tmp, val);
5407     }
5408     return tmp;
5409 }
5410 
5411 static void gen_mac_clear_flags(void)
5412 {
5413     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5414                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5415 }
5416 
5417 DISAS_INSN(mac)
5418 {
5419     TCGv rx;
5420     TCGv ry;
5421     uint16_t ext;
5422     int acc;
5423     TCGv tmp;
5424     TCGv addr;
5425     TCGv loadval;
5426     int dual;
5427     TCGv saved_flags;
5428 
5429     if (!s->done_mac) {
5430         s->mactmp = tcg_temp_new_i64();
5431         s->done_mac = 1;
5432     }
5433 
5434     ext = read_im16(env, s);
5435 
5436     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5437     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5438     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5439         disas_undef(env, s, insn);
5440         return;
5441     }
5442     if (insn & 0x30) {
5443         /* MAC with load.  */
5444         tmp = gen_lea(env, s, insn, OS_LONG);
5445         addr = tcg_temp_new();
5446         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5447         /*
5448          * Load the value now to ensure correct exception behavior.
5449          * Perform writeback after reading the MAC inputs.
5450          */
5451         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5452 
5453         acc ^= 1;
5454         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5455         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5456     } else {
5457         loadval = addr = NULL_QREG;
5458         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5459         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5460     }
5461 
5462     gen_mac_clear_flags();
5463 #if 0
5464     l1 = -1;
5465     /* Disabled because conditional branches clobber temporary vars.  */
5466     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5467         /* Skip the multiply if we know we will ignore it.  */
5468         l1 = gen_new_label();
5469         tmp = tcg_temp_new();
5470         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5471         gen_op_jmp_nz32(tmp, l1);
5472     }
5473 #endif
5474 
5475     if ((ext & 0x0800) == 0) {
5476         /* Word.  */
5477         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5478         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5479     }
5480     if (s->env->macsr & MACSR_FI) {
5481         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5482     } else {
5483         if (s->env->macsr & MACSR_SU)
5484             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5485         else
5486             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5487         switch ((ext >> 9) & 3) {
5488         case 1:
5489             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5490             break;
5491         case 3:
5492             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5493             break;
5494         }
5495     }
5496 
5497     if (dual) {
5498         /* Save the overflow flag from the multiply.  */
5499         saved_flags = tcg_temp_new();
5500         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5501     } else {
5502         saved_flags = NULL_QREG;
5503     }
5504 
5505 #if 0
5506     /* Disabled because conditional branches clobber temporary vars.  */
5507     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5508         /* Skip the accumulate if the value is already saturated.  */
5509         l1 = gen_new_label();
5510         tmp = tcg_temp_new();
5511         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5512         gen_op_jmp_nz32(tmp, l1);
5513     }
5514 #endif
5515 
5516     if (insn & 0x100)
5517         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5518     else
5519         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5520 
5521     if (s->env->macsr & MACSR_FI)
5522         gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5523     else if (s->env->macsr & MACSR_SU)
5524         gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5525     else
5526         gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5527 
5528 #if 0
5529     /* Disabled because conditional branches clobber temporary vars.  */
5530     if (l1 != -1)
5531         gen_set_label(l1);
5532 #endif
5533 
5534     if (dual) {
5535         /* Dual accumulate variant.  */
5536         acc = (ext >> 2) & 3;
5537         /* Restore the overflow flag from the multiplier.  */
5538         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5539 #if 0
5540         /* Disabled because conditional branches clobber temporary vars.  */
5541         if ((s->env->macsr & MACSR_OMC) != 0) {
5542             /* Skip the accumulate if the value is already saturated.  */
5543             l1 = gen_new_label();
5544             tmp = tcg_temp_new();
5545             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5546             gen_op_jmp_nz32(tmp, l1);
5547         }
5548 #endif
5549         if (ext & 2)
5550             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5551         else
5552             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5553         if (s->env->macsr & MACSR_FI)
5554             gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5555         else if (s->env->macsr & MACSR_SU)
5556             gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5557         else
5558             gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5559 #if 0
5560         /* Disabled because conditional branches clobber temporary vars.  */
5561         if (l1 != -1)
5562             gen_set_label(l1);
5563 #endif
5564     }
5565     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5566 
5567     if (insn & 0x30) {
5568         TCGv rw;
5569         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5570         tcg_gen_mov_i32(rw, loadval);
5571         /*
5572          * FIXME: Should address writeback happen with the masked or
5573          * unmasked value?
5574          */
5575         switch ((insn >> 3) & 7) {
5576         case 3: /* Post-increment.  */
5577             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5578             break;
5579         case 4: /* Pre-decrement.  */
5580             tcg_gen_mov_i32(AREG(insn, 0), addr);
5581         }
5582     }
5583 }
5584 
5585 DISAS_INSN(from_mac)
5586 {
5587     TCGv rx;
5588     TCGv_i64 acc;
5589     int accnum;
5590 
5591     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5592     accnum = (insn >> 9) & 3;
5593     acc = MACREG(accnum);
5594     if (s->env->macsr & MACSR_FI) {
5595         gen_helper_get_macf(rx, cpu_env, acc);
5596     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5597         tcg_gen_extrl_i64_i32(rx, acc);
5598     } else if (s->env->macsr & MACSR_SU) {
5599         gen_helper_get_macs(rx, acc);
5600     } else {
5601         gen_helper_get_macu(rx, acc);
5602     }
5603     if (insn & 0x40) {
5604         tcg_gen_movi_i64(acc, 0);
5605         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5606     }
5607 }
5608 
5609 DISAS_INSN(move_mac)
5610 {
5611     /* FIXME: This can be done without a helper.  */
5612     int src;
5613     TCGv dest;
5614     src = insn & 3;
5615     dest = tcg_constant_i32((insn >> 9) & 3);
5616     gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5617     gen_mac_clear_flags();
5618     gen_helper_mac_set_flags(cpu_env, dest);
5619 }
5620 
5621 DISAS_INSN(from_macsr)
5622 {
5623     TCGv reg;
5624 
5625     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5626     tcg_gen_mov_i32(reg, QREG_MACSR);
5627 }
5628 
5629 DISAS_INSN(from_mask)
5630 {
5631     TCGv reg;
5632     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5633     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5634 }
5635 
5636 DISAS_INSN(from_mext)
5637 {
5638     TCGv reg;
5639     TCGv acc;
5640     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5641     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5642     if (s->env->macsr & MACSR_FI)
5643         gen_helper_get_mac_extf(reg, cpu_env, acc);
5644     else
5645         gen_helper_get_mac_exti(reg, cpu_env, acc);
5646 }
5647 
5648 DISAS_INSN(macsr_to_ccr)
5649 {
5650     TCGv tmp = tcg_temp_new();
5651 
5652     /* Note that X and C are always cleared. */
5653     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5654     gen_helper_set_ccr(cpu_env, tmp);
5655     set_cc_op(s, CC_OP_FLAGS);
5656 }
5657 
5658 DISAS_INSN(to_mac)
5659 {
5660     TCGv_i64 acc;
5661     TCGv val;
5662     int accnum;
5663     accnum = (insn >> 9) & 3;
5664     acc = MACREG(accnum);
5665     SRC_EA(env, val, OS_LONG, 0, NULL);
5666     if (s->env->macsr & MACSR_FI) {
5667         tcg_gen_ext_i32_i64(acc, val);
5668         tcg_gen_shli_i64(acc, acc, 8);
5669     } else if (s->env->macsr & MACSR_SU) {
5670         tcg_gen_ext_i32_i64(acc, val);
5671     } else {
5672         tcg_gen_extu_i32_i64(acc, val);
5673     }
5674     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5675     gen_mac_clear_flags();
5676     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5677 }
5678 
5679 DISAS_INSN(to_macsr)
5680 {
5681     TCGv val;
5682     SRC_EA(env, val, OS_LONG, 0, NULL);
5683     gen_helper_set_macsr(cpu_env, val);
5684     gen_exit_tb(s);
5685 }
5686 
5687 DISAS_INSN(to_mask)
5688 {
5689     TCGv val;
5690     SRC_EA(env, val, OS_LONG, 0, NULL);
5691     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5692 }
5693 
5694 DISAS_INSN(to_mext)
5695 {
5696     TCGv val;
5697     TCGv acc;
5698     SRC_EA(env, val, OS_LONG, 0, NULL);
5699     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5700     if (s->env->macsr & MACSR_FI)
5701         gen_helper_set_mac_extf(cpu_env, val, acc);
5702     else if (s->env->macsr & MACSR_SU)
5703         gen_helper_set_mac_exts(cpu_env, val, acc);
5704     else
5705         gen_helper_set_mac_extu(cpu_env, val, acc);
5706 }
5707 
5708 static disas_proc opcode_table[65536];
5709 
5710 static void
5711 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5712 {
5713   int i;
5714   int from;
5715   int to;
5716 
5717   /* Sanity check.  All set bits must be included in the mask.  */
5718   if (opcode & ~mask) {
5719       fprintf(stderr,
5720               "qemu internal error: bogus opcode definition %04x/%04x\n",
5721               opcode, mask);
5722       abort();
5723   }
5724   /*
5725    * This could probably be cleverer.  For now just optimize the case where
5726    * the top bits are known.
5727    */
5728   /* Find the first zero bit in the mask.  */
5729   i = 0x8000;
5730   while ((i & mask) != 0)
5731       i >>= 1;
5732   /* Iterate over all combinations of this and lower bits.  */
5733   if (i == 0)
5734       i = 1;
5735   else
5736       i <<= 1;
5737   from = opcode & ~(i - 1);
5738   to = from + i;
5739   for (i = from; i < to; i++) {
5740       if ((i & mask) == opcode)
5741           opcode_table[i] = proc;
5742   }
5743 }
5744 
5745 /*
5746  * Register m68k opcode handlers.  Order is important.
5747  * Later insn override earlier ones.
5748  */
5749 void register_m68k_insns (CPUM68KState *env)
5750 {
5751     /*
5752      * Build the opcode table only once to avoid
5753      * multithreading issues.
5754      */
5755     if (opcode_table[0] != NULL) {
5756         return;
5757     }
5758 
5759     /*
5760      * use BASE() for instruction available
5761      * for CF_ISA_A and M68000.
5762      */
5763 #define BASE(name, opcode, mask) \
5764     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5765 #define INSN(name, opcode, mask, feature) do { \
5766     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5767         BASE(name, opcode, mask); \
5768     } while(0)
5769     BASE(undef,     0000, 0000);
5770     INSN(arith_im,  0080, fff8, CF_ISA_A);
5771     INSN(arith_im,  0000, ff00, M68K);
5772     INSN(chk2,      00c0, f9c0, CHK2);
5773     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5774     BASE(bitop_reg, 0100, f1c0);
5775     BASE(bitop_reg, 0140, f1c0);
5776     BASE(bitop_reg, 0180, f1c0);
5777     BASE(bitop_reg, 01c0, f1c0);
5778     INSN(movep,     0108, f138, MOVEP);
5779     INSN(arith_im,  0280, fff8, CF_ISA_A);
5780     INSN(arith_im,  0200, ff00, M68K);
5781     INSN(undef,     02c0, ffc0, M68K);
5782     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5783     INSN(arith_im,  0480, fff8, CF_ISA_A);
5784     INSN(arith_im,  0400, ff00, M68K);
5785     INSN(undef,     04c0, ffc0, M68K);
5786     INSN(arith_im,  0600, ff00, M68K);
5787     INSN(undef,     06c0, ffc0, M68K);
5788     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5789     INSN(arith_im,  0680, fff8, CF_ISA_A);
5790     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5791     INSN(arith_im,  0c00, ff00, M68K);
5792     BASE(bitop_im,  0800, ffc0);
5793     BASE(bitop_im,  0840, ffc0);
5794     BASE(bitop_im,  0880, ffc0);
5795     BASE(bitop_im,  08c0, ffc0);
5796     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5797     INSN(arith_im,  0a00, ff00, M68K);
5798 #if defined(CONFIG_SOFTMMU)
5799     INSN(moves,     0e00, ff00, M68K);
5800 #endif
5801     INSN(cas,       0ac0, ffc0, CAS);
5802     INSN(cas,       0cc0, ffc0, CAS);
5803     INSN(cas,       0ec0, ffc0, CAS);
5804     INSN(cas2w,     0cfc, ffff, CAS);
5805     INSN(cas2l,     0efc, ffff, CAS);
5806     BASE(move,      1000, f000);
5807     BASE(move,      2000, f000);
5808     BASE(move,      3000, f000);
5809     INSN(chk,       4000, f040, M68K);
5810     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5811     INSN(negx,      4080, fff8, CF_ISA_A);
5812     INSN(negx,      4000, ff00, M68K);
5813     INSN(undef,     40c0, ffc0, M68K);
5814     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5815     INSN(move_from_sr, 40c0, ffc0, M68K);
5816     BASE(lea,       41c0, f1c0);
5817     BASE(clr,       4200, ff00);
5818     BASE(undef,     42c0, ffc0);
5819     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5820     INSN(move_from_ccr, 42c0, ffc0, M68K);
5821     INSN(neg,       4480, fff8, CF_ISA_A);
5822     INSN(neg,       4400, ff00, M68K);
5823     INSN(undef,     44c0, ffc0, M68K);
5824     BASE(move_to_ccr, 44c0, ffc0);
5825     INSN(not,       4680, fff8, CF_ISA_A);
5826     INSN(not,       4600, ff00, M68K);
5827 #if defined(CONFIG_SOFTMMU)
5828     BASE(move_to_sr, 46c0, ffc0);
5829 #endif
5830     INSN(nbcd,      4800, ffc0, M68K);
5831     INSN(linkl,     4808, fff8, M68K);
5832     BASE(pea,       4840, ffc0);
5833     BASE(swap,      4840, fff8);
5834     INSN(bkpt,      4848, fff8, BKPT);
5835     INSN(movem,     48d0, fbf8, CF_ISA_A);
5836     INSN(movem,     48e8, fbf8, CF_ISA_A);
5837     INSN(movem,     4880, fb80, M68K);
5838     BASE(ext,       4880, fff8);
5839     BASE(ext,       48c0, fff8);
5840     BASE(ext,       49c0, fff8);
5841     BASE(tst,       4a00, ff00);
5842     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5843     INSN(tas,       4ac0, ffc0, M68K);
5844 #if defined(CONFIG_SOFTMMU)
5845     INSN(halt,      4ac8, ffff, CF_ISA_A);
5846     INSN(halt,      4ac8, ffff, M68K);
5847 #endif
5848     INSN(pulse,     4acc, ffff, CF_ISA_A);
5849     BASE(illegal,   4afc, ffff);
5850     INSN(mull,      4c00, ffc0, CF_ISA_A);
5851     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5852     INSN(divl,      4c40, ffc0, CF_ISA_A);
5853     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5854     INSN(sats,      4c80, fff8, CF_ISA_B);
5855     BASE(trap,      4e40, fff0);
5856     BASE(link,      4e50, fff8);
5857     BASE(unlk,      4e58, fff8);
5858 #if defined(CONFIG_SOFTMMU)
5859     INSN(move_to_usp, 4e60, fff8, USP);
5860     INSN(move_from_usp, 4e68, fff8, USP);
5861     INSN(reset,     4e70, ffff, M68K);
5862     BASE(stop,      4e72, ffff);
5863     BASE(rte,       4e73, ffff);
5864     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5865     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5866 #endif
5867     BASE(nop,       4e71, ffff);
5868     INSN(rtd,       4e74, ffff, RTD);
5869     BASE(rts,       4e75, ffff);
5870     INSN(trapv,     4e76, ffff, M68K);
5871     INSN(rtr,       4e77, ffff, M68K);
5872     BASE(jump,      4e80, ffc0);
5873     BASE(jump,      4ec0, ffc0);
5874     INSN(addsubq,   5000, f080, M68K);
5875     BASE(addsubq,   5080, f0c0);
5876     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5877     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5878     INSN(dbcc,      50c8, f0f8, M68K);
5879     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5880     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5881     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5882     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5883 
5884     /* Branch instructions.  */
5885     BASE(branch,    6000, f000);
5886     /* Disable long branch instructions, then add back the ones we want.  */
5887     BASE(undef,     60ff, f0ff); /* All long branches.  */
5888     INSN(branch,    60ff, f0ff, CF_ISA_B);
5889     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5890     INSN(branch,    60ff, ffff, BRAL);
5891     INSN(branch,    60ff, f0ff, BCCL);
5892 
5893     BASE(moveq,     7000, f100);
5894     INSN(mvzs,      7100, f100, CF_ISA_B);
5895     BASE(or,        8000, f000);
5896     BASE(divw,      80c0, f0c0);
5897     INSN(sbcd_reg,  8100, f1f8, M68K);
5898     INSN(sbcd_mem,  8108, f1f8, M68K);
5899     BASE(addsub,    9000, f000);
5900     INSN(undef,     90c0, f0c0, CF_ISA_A);
5901     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5902     INSN(subx_reg,  9100, f138, M68K);
5903     INSN(subx_mem,  9108, f138, M68K);
5904     INSN(suba,      91c0, f1c0, CF_ISA_A);
5905     INSN(suba,      90c0, f0c0, M68K);
5906 
5907     BASE(undef_mac, a000, f000);
5908     INSN(mac,       a000, f100, CF_EMAC);
5909     INSN(from_mac,  a180, f9b0, CF_EMAC);
5910     INSN(move_mac,  a110, f9fc, CF_EMAC);
5911     INSN(from_macsr,a980, f9f0, CF_EMAC);
5912     INSN(from_mask, ad80, fff0, CF_EMAC);
5913     INSN(from_mext, ab80, fbf0, CF_EMAC);
5914     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5915     INSN(to_mac,    a100, f9c0, CF_EMAC);
5916     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5917     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5918     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5919 
5920     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5921     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5922     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5923     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5924     INSN(cmp,       b080, f1c0, CF_ISA_A);
5925     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5926     INSN(cmp,       b000, f100, M68K);
5927     INSN(eor,       b100, f100, M68K);
5928     INSN(cmpm,      b108, f138, M68K);
5929     INSN(cmpa,      b0c0, f0c0, M68K);
5930     INSN(eor,       b180, f1c0, CF_ISA_A);
5931     BASE(and,       c000, f000);
5932     INSN(exg_dd,    c140, f1f8, M68K);
5933     INSN(exg_aa,    c148, f1f8, M68K);
5934     INSN(exg_da,    c188, f1f8, M68K);
5935     BASE(mulw,      c0c0, f0c0);
5936     INSN(abcd_reg,  c100, f1f8, M68K);
5937     INSN(abcd_mem,  c108, f1f8, M68K);
5938     BASE(addsub,    d000, f000);
5939     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5940     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5941     INSN(addx_reg,  d100, f138, M68K);
5942     INSN(addx_mem,  d108, f138, M68K);
5943     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5944     INSN(adda,      d0c0, f0c0, M68K);
5945     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5946     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5947     INSN(shift8_im, e000, f0f0, M68K);
5948     INSN(shift16_im, e040, f0f0, M68K);
5949     INSN(shift_im,  e080, f0f0, M68K);
5950     INSN(shift8_reg, e020, f0f0, M68K);
5951     INSN(shift16_reg, e060, f0f0, M68K);
5952     INSN(shift_reg, e0a0, f0f0, M68K);
5953     INSN(shift_mem, e0c0, fcc0, M68K);
5954     INSN(rotate_im, e090, f0f0, M68K);
5955     INSN(rotate8_im, e010, f0f0, M68K);
5956     INSN(rotate16_im, e050, f0f0, M68K);
5957     INSN(rotate_reg, e0b0, f0f0, M68K);
5958     INSN(rotate8_reg, e030, f0f0, M68K);
5959     INSN(rotate16_reg, e070, f0f0, M68K);
5960     INSN(rotate_mem, e4c0, fcc0, M68K);
5961     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5962     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5963     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5964     INSN(bfins_reg, efc0, fff8, BITFIELD);
5965     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5966     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5967     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5968     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5969     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5970     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5971     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5972     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5973     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5974     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5975     BASE(undef_fpu, f000, f000);
5976     INSN(fpu,       f200, ffc0, CF_FPU);
5977     INSN(fbcc,      f280, ffc0, CF_FPU);
5978     INSN(fpu,       f200, ffc0, FPU);
5979     INSN(fscc,      f240, ffc0, FPU);
5980     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5981     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5982     INSN(fbcc,      f280, ff80, FPU);
5983 #if defined(CONFIG_SOFTMMU)
5984     INSN(frestore,  f340, ffc0, CF_FPU);
5985     INSN(fsave,     f300, ffc0, CF_FPU);
5986     INSN(frestore,  f340, ffc0, FPU);
5987     INSN(fsave,     f300, ffc0, FPU);
5988     INSN(intouch,   f340, ffc0, CF_ISA_A);
5989     INSN(cpushl,    f428, ff38, CF_ISA_A);
5990     INSN(cpush,     f420, ff20, M68040);
5991     INSN(cinv,      f400, ff20, M68040);
5992     INSN(pflush,    f500, ffe0, M68040);
5993     INSN(ptest,     f548, ffd8, M68040);
5994     INSN(wddata,    fb00, ff00, CF_ISA_A);
5995     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5996 #endif
5997     INSN(move16_mem, f600, ffe0, M68040);
5998     INSN(move16_reg, f620, fff8, M68040);
5999 #undef INSN
6000 }
6001 
6002 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6003 {
6004     DisasContext *dc = container_of(dcbase, DisasContext, base);
6005     CPUM68KState *env = cpu->env_ptr;
6006 
6007     dc->env = env;
6008     dc->pc = dc->base.pc_first;
6009     /* This value will always be filled in properly before m68k_tr_tb_stop. */
6010     dc->pc_prev = 0xdeadbeef;
6011     dc->cc_op = CC_OP_DYNAMIC;
6012     dc->cc_op_synced = 1;
6013     dc->done_mac = 0;
6014     dc->writeback_mask = 0;
6015 
6016     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6017     /* If architectural single step active, limit to 1 */
6018     if (dc->ss_active) {
6019         dc->base.max_insns = 1;
6020     }
6021 }
6022 
6023 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6024 {
6025 }
6026 
6027 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6028 {
6029     DisasContext *dc = container_of(dcbase, DisasContext, base);
6030     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6031 }
6032 
6033 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6034 {
6035     DisasContext *dc = container_of(dcbase, DisasContext, base);
6036     CPUM68KState *env = cpu->env_ptr;
6037     uint16_t insn = read_im16(env, dc);
6038 
6039     opcode_table[insn](env, dc, insn);
6040     do_writebacks(dc);
6041 
6042     dc->pc_prev = dc->base.pc_next;
6043     dc->base.pc_next = dc->pc;
6044 
6045     if (dc->base.is_jmp == DISAS_NEXT) {
6046         /*
6047          * Stop translation when the next insn might touch a new page.
6048          * This ensures that prefetch aborts at the right place.
6049          *
6050          * We cannot determine the size of the next insn without
6051          * completely decoding it.  However, the maximum insn size
6052          * is 32 bytes, so end if we do not have that much remaining.
6053          * This may produce several small TBs at the end of each page,
6054          * but they will all be linked with goto_tb.
6055          *
6056          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6057          * smaller than MC68020's.
6058          */
6059         target_ulong start_page_offset
6060             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6061 
6062         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6063             dc->base.is_jmp = DISAS_TOO_MANY;
6064         }
6065     }
6066 }
6067 
6068 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6069 {
6070     DisasContext *dc = container_of(dcbase, DisasContext, base);
6071 
6072     switch (dc->base.is_jmp) {
6073     case DISAS_NORETURN:
6074         break;
6075     case DISAS_TOO_MANY:
6076         update_cc_op(dc);
6077         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6078         break;
6079     case DISAS_JUMP:
6080         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6081         if (dc->ss_active) {
6082             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6083         } else {
6084             tcg_gen_lookup_and_goto_ptr();
6085         }
6086         break;
6087     case DISAS_EXIT:
6088         /*
6089          * We updated CC_OP and PC in gen_exit_tb, but also modified
6090          * other state that may require returning to the main loop.
6091          */
6092         if (dc->ss_active) {
6093             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6094         } else {
6095             tcg_gen_exit_tb(NULL, 0);
6096         }
6097         break;
6098     default:
6099         g_assert_not_reached();
6100     }
6101 }
6102 
6103 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6104                               CPUState *cpu, FILE *logfile)
6105 {
6106     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6107     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6108 }
6109 
6110 static const TranslatorOps m68k_tr_ops = {
6111     .init_disas_context = m68k_tr_init_disas_context,
6112     .tb_start           = m68k_tr_tb_start,
6113     .insn_start         = m68k_tr_insn_start,
6114     .translate_insn     = m68k_tr_translate_insn,
6115     .tb_stop            = m68k_tr_tb_stop,
6116     .disas_log          = m68k_tr_disas_log,
6117 };
6118 
6119 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6120                            target_ulong pc, void *host_pc)
6121 {
6122     DisasContext dc;
6123     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6124 }
6125 
6126 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6127 {
6128     floatx80 a = { .high = high, .low = low };
6129     union {
6130         float64 f64;
6131         double d;
6132     } u;
6133 
6134     u.f64 = floatx80_to_float64(a, &env->fp_status);
6135     return u.d;
6136 }
6137 
6138 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6139 {
6140     M68kCPU *cpu = M68K_CPU(cs);
6141     CPUM68KState *env = &cpu->env;
6142     int i;
6143     uint16_t sr;
6144     for (i = 0; i < 8; i++) {
6145         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6146                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6147                      i, env->dregs[i], i, env->aregs[i],
6148                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6149                      floatx80_to_double(env, env->fregs[i].l.upper,
6150                                         env->fregs[i].l.lower));
6151     }
6152     qemu_fprintf(f, "PC = %08x   ", env->pc);
6153     sr = env->sr | cpu_m68k_get_ccr(env);
6154     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6155                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6156                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6157                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6158                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6159                  (sr & CCF_C) ? 'C' : '-');
6160     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6161                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6162                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6163                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6164                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6165     qemu_fprintf(f, "\n                                "
6166                  "FPCR =     %04x ", env->fpcr);
6167     switch (env->fpcr & FPCR_PREC_MASK) {
6168     case FPCR_PREC_X:
6169         qemu_fprintf(f, "X ");
6170         break;
6171     case FPCR_PREC_S:
6172         qemu_fprintf(f, "S ");
6173         break;
6174     case FPCR_PREC_D:
6175         qemu_fprintf(f, "D ");
6176         break;
6177     }
6178     switch (env->fpcr & FPCR_RND_MASK) {
6179     case FPCR_RND_N:
6180         qemu_fprintf(f, "RN ");
6181         break;
6182     case FPCR_RND_Z:
6183         qemu_fprintf(f, "RZ ");
6184         break;
6185     case FPCR_RND_M:
6186         qemu_fprintf(f, "RM ");
6187         break;
6188     case FPCR_RND_P:
6189         qemu_fprintf(f, "RP ");
6190         break;
6191     }
6192     qemu_fprintf(f, "\n");
6193 #ifdef CONFIG_SOFTMMU
6194     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6195                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6196                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6197                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6198     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6199     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6200     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6201                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6202     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6203                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6204                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6205     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6206                  env->mmu.mmusr, env->mmu.ar);
6207 #endif
6208 }
6209