xref: /openbmc/qemu/target/m68k/translate.c (revision 2b108085)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 
38 //#define DEBUG_DISPATCH 1
39 
40 #define DEFO32(name, offset) static TCGv QREG_##name;
41 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
42 #include "qregs.def"
43 #undef DEFO32
44 #undef DEFO64
45 
46 static TCGv_i32 cpu_halted;
47 static TCGv_i32 cpu_exception_index;
48 
49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
50 static TCGv cpu_dregs[8];
51 static TCGv cpu_aregs[8];
52 static TCGv_i64 cpu_macc[4];
53 
54 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
56 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
57 #define MACREG(acc)     cpu_macc[acc]
58 #define QREG_SP         get_areg(s, 7)
59 
60 static TCGv NULL_QREG;
61 #define IS_NULL_QREG(t) (t == NULL_QREG)
62 /* Used to distinguish stores from bad addressing modes.  */
63 static TCGv store_dummy;
64 
65 #include "exec/gen-icount.h"
66 
67 void m68k_tcg_init(void)
68 {
69     char *p;
70     int i;
71 
72 #define DEFO32(name, offset) \
73     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
74         offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
77         offsetof(CPUM68KState, offset), #name);
78 #include "qregs.def"
79 #undef DEFO32
80 #undef DEFO64
81 
82     cpu_halted = tcg_global_mem_new_i32(cpu_env,
83                                         -offsetof(M68kCPU, env) +
84                                         offsetof(CPUState, halted), "HALTED");
85     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
86                                                  -offsetof(M68kCPU, env) +
87                                                  offsetof(CPUState, exception_index),
88                                                  "EXCEPTION");
89 
90     p = cpu_reg_names;
91     for (i = 0; i < 8; i++) {
92         sprintf(p, "D%d", i);
93         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
94                                           offsetof(CPUM68KState, dregs[i]), p);
95         p += 3;
96         sprintf(p, "A%d", i);
97         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
98                                           offsetof(CPUM68KState, aregs[i]), p);
99         p += 3;
100     }
101     for (i = 0; i < 4; i++) {
102         sprintf(p, "ACC%d", i);
103         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
104                                          offsetof(CPUM68KState, macc[i]), p);
105         p += 5;
106     }
107 
108     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
109     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 }
111 
112 /* internal defines */
113 typedef struct DisasContext {
114     CPUM68KState *env;
115     target_ulong insn_pc; /* Start of the current instruction.  */
116     target_ulong pc;
117     int is_jmp;
118     CCOp cc_op; /* Current CC operation */
119     int cc_op_synced;
120     struct TranslationBlock *tb;
121     int singlestep_enabled;
122     TCGv_i64 mactmp;
123     int done_mac;
124     int writeback_mask;
125     TCGv writeback[8];
126 #define MAX_TO_RELEASE 8
127     int release_count;
128     TCGv release[MAX_TO_RELEASE];
129 } DisasContext;
130 
131 static void init_release_array(DisasContext *s)
132 {
133 #ifdef CONFIG_DEBUG_TCG
134     memset(s->release, 0, sizeof(s->release));
135 #endif
136     s->release_count = 0;
137 }
138 
139 static void do_release(DisasContext *s)
140 {
141     int i;
142     for (i = 0; i < s->release_count; i++) {
143         tcg_temp_free(s->release[i]);
144     }
145     init_release_array(s);
146 }
147 
148 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
149 {
150     g_assert(s->release_count < MAX_TO_RELEASE);
151     return s->release[s->release_count++] = tmp;
152 }
153 
154 static TCGv get_areg(DisasContext *s, unsigned regno)
155 {
156     if (s->writeback_mask & (1 << regno)) {
157         return s->writeback[regno];
158     } else {
159         return cpu_aregs[regno];
160     }
161 }
162 
163 static void delay_set_areg(DisasContext *s, unsigned regno,
164                            TCGv val, bool give_temp)
165 {
166     if (s->writeback_mask & (1 << regno)) {
167         if (give_temp) {
168             tcg_temp_free(s->writeback[regno]);
169             s->writeback[regno] = val;
170         } else {
171             tcg_gen_mov_i32(s->writeback[regno], val);
172         }
173     } else {
174         s->writeback_mask |= 1 << regno;
175         if (give_temp) {
176             s->writeback[regno] = val;
177         } else {
178             TCGv tmp = tcg_temp_new();
179             s->writeback[regno] = tmp;
180             tcg_gen_mov_i32(tmp, val);
181         }
182     }
183 }
184 
185 static void do_writebacks(DisasContext *s)
186 {
187     unsigned mask = s->writeback_mask;
188     if (mask) {
189         s->writeback_mask = 0;
190         do {
191             unsigned regno = ctz32(mask);
192             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
193             tcg_temp_free(s->writeback[regno]);
194             mask &= mask - 1;
195         } while (mask);
196     }
197 }
198 
199 /* is_jmp field values */
200 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
201 #define DISAS_UPDATE    DISAS_TARGET_1 /* cpu state was modified dynamically */
202 #define DISAS_TB_JUMP   DISAS_TARGET_2 /* only pc was modified statically */
203 #define DISAS_JUMP_NEXT DISAS_TARGET_3
204 
205 #if defined(CONFIG_USER_ONLY)
206 #define IS_USER(s) 1
207 #else
208 #define IS_USER(s)   (!(s->tb->flags & TB_FLAGS_MSR_S))
209 #define SFC_INDEX(s) ((s->tb->flags & TB_FLAGS_SFC_S) ? \
210                       MMU_KERNEL_IDX : MMU_USER_IDX)
211 #define DFC_INDEX(s) ((s->tb->flags & TB_FLAGS_DFC_S) ? \
212                       MMU_KERNEL_IDX : MMU_USER_IDX)
213 #endif
214 
215 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
216 
217 #ifdef DEBUG_DISPATCH
218 #define DISAS_INSN(name)                                                \
219     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
220                                   uint16_t insn);                       \
221     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
222                              uint16_t insn)                             \
223     {                                                                   \
224         qemu_log("Dispatch " #name "\n");                               \
225         real_disas_##name(env, s, insn);                                \
226     }                                                                   \
227     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
228                                   uint16_t insn)
229 #else
230 #define DISAS_INSN(name)                                                \
231     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
232                              uint16_t insn)
233 #endif
234 
235 static const uint8_t cc_op_live[CC_OP_NB] = {
236     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
237     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
238     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
239     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
240     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
241     [CC_OP_LOGIC] = CCF_X | CCF_N
242 };
243 
244 static void set_cc_op(DisasContext *s, CCOp op)
245 {
246     CCOp old_op = s->cc_op;
247     int dead;
248 
249     if (old_op == op) {
250         return;
251     }
252     s->cc_op = op;
253     s->cc_op_synced = 0;
254 
255     /* Discard CC computation that will no longer be used.
256        Note that X and N are never dead.  */
257     dead = cc_op_live[old_op] & ~cc_op_live[op];
258     if (dead & CCF_C) {
259         tcg_gen_discard_i32(QREG_CC_C);
260     }
261     if (dead & CCF_Z) {
262         tcg_gen_discard_i32(QREG_CC_Z);
263     }
264     if (dead & CCF_V) {
265         tcg_gen_discard_i32(QREG_CC_V);
266     }
267 }
268 
269 /* Update the CPU env CC_OP state.  */
270 static void update_cc_op(DisasContext *s)
271 {
272     if (!s->cc_op_synced) {
273         s->cc_op_synced = 1;
274         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
275     }
276 }
277 
278 /* Generate a jump to an immediate address.  */
279 static void gen_jmp_im(DisasContext *s, uint32_t dest)
280 {
281     update_cc_op(s);
282     tcg_gen_movi_i32(QREG_PC, dest);
283     s->is_jmp = DISAS_JUMP;
284 }
285 
286 /* Generate a jump to the address in qreg DEST.  */
287 static void gen_jmp(DisasContext *s, TCGv dest)
288 {
289     update_cc_op(s);
290     tcg_gen_mov_i32(QREG_PC, dest);
291     s->is_jmp = DISAS_JUMP;
292 }
293 
294 static void gen_raise_exception(int nr)
295 {
296     TCGv_i32 tmp = tcg_const_i32(nr);
297 
298     gen_helper_raise_exception(cpu_env, tmp);
299     tcg_temp_free_i32(tmp);
300 }
301 
302 static void gen_exception(DisasContext *s, uint32_t where, int nr)
303 {
304     gen_jmp_im(s, where);
305     gen_raise_exception(nr);
306 }
307 
308 static inline void gen_addr_fault(DisasContext *s)
309 {
310     gen_exception(s, s->insn_pc, EXCP_ADDRESS);
311 }
312 
313 /* Generate a load from the specified address.  Narrow values are
314    sign extended to full register width.  */
315 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
316                             int sign, int index)
317 {
318     TCGv tmp;
319     tmp = tcg_temp_new_i32();
320     switch(opsize) {
321     case OS_BYTE:
322         if (sign)
323             tcg_gen_qemu_ld8s(tmp, addr, index);
324         else
325             tcg_gen_qemu_ld8u(tmp, addr, index);
326         break;
327     case OS_WORD:
328         if (sign)
329             tcg_gen_qemu_ld16s(tmp, addr, index);
330         else
331             tcg_gen_qemu_ld16u(tmp, addr, index);
332         break;
333     case OS_LONG:
334         tcg_gen_qemu_ld32u(tmp, addr, index);
335         break;
336     default:
337         g_assert_not_reached();
338     }
339     return tmp;
340 }
341 
342 /* Generate a store.  */
343 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
344                              int index)
345 {
346     switch(opsize) {
347     case OS_BYTE:
348         tcg_gen_qemu_st8(val, addr, index);
349         break;
350     case OS_WORD:
351         tcg_gen_qemu_st16(val, addr, index);
352         break;
353     case OS_LONG:
354         tcg_gen_qemu_st32(val, addr, index);
355         break;
356     default:
357         g_assert_not_reached();
358     }
359 }
360 
361 typedef enum {
362     EA_STORE,
363     EA_LOADU,
364     EA_LOADS
365 } ea_what;
366 
367 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
368    otherwise generate a store.  */
369 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
370                      ea_what what, int index)
371 {
372     if (what == EA_STORE) {
373         gen_store(s, opsize, addr, val, index);
374         return store_dummy;
375     } else {
376         return mark_to_release(s, gen_load(s, opsize, addr,
377                                            what == EA_LOADS, index));
378     }
379 }
380 
381 /* Read a 16-bit immediate constant */
382 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
383 {
384     uint16_t im;
385     im = cpu_lduw_code(env, s->pc);
386     s->pc += 2;
387     return im;
388 }
389 
390 /* Read an 8-bit immediate constant */
391 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
392 {
393     return read_im16(env, s);
394 }
395 
396 /* Read a 32-bit immediate constant.  */
397 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
398 {
399     uint32_t im;
400     im = read_im16(env, s) << 16;
401     im |= 0xffff & read_im16(env, s);
402     return im;
403 }
404 
405 /* Read a 64-bit immediate constant.  */
406 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
407 {
408     uint64_t im;
409     im = (uint64_t)read_im32(env, s) << 32;
410     im |= (uint64_t)read_im32(env, s);
411     return im;
412 }
413 
414 /* Calculate and address index.  */
415 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
416 {
417     TCGv add;
418     int scale;
419 
420     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
421     if ((ext & 0x800) == 0) {
422         tcg_gen_ext16s_i32(tmp, add);
423         add = tmp;
424     }
425     scale = (ext >> 9) & 3;
426     if (scale != 0) {
427         tcg_gen_shli_i32(tmp, add, scale);
428         add = tmp;
429     }
430     return add;
431 }
432 
433 /* Handle a base + index + displacement effective addresss.
434    A NULL_QREG base means pc-relative.  */
435 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
436 {
437     uint32_t offset;
438     uint16_t ext;
439     TCGv add;
440     TCGv tmp;
441     uint32_t bd, od;
442 
443     offset = s->pc;
444     ext = read_im16(env, s);
445 
446     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
447         return NULL_QREG;
448 
449     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
450         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
451         ext &= ~(3 << 9);
452     }
453 
454     if (ext & 0x100) {
455         /* full extension word format */
456         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
457             return NULL_QREG;
458 
459         if ((ext & 0x30) > 0x10) {
460             /* base displacement */
461             if ((ext & 0x30) == 0x20) {
462                 bd = (int16_t)read_im16(env, s);
463             } else {
464                 bd = read_im32(env, s);
465             }
466         } else {
467             bd = 0;
468         }
469         tmp = mark_to_release(s, tcg_temp_new());
470         if ((ext & 0x44) == 0) {
471             /* pre-index */
472             add = gen_addr_index(s, ext, tmp);
473         } else {
474             add = NULL_QREG;
475         }
476         if ((ext & 0x80) == 0) {
477             /* base not suppressed */
478             if (IS_NULL_QREG(base)) {
479                 base = mark_to_release(s, tcg_const_i32(offset + bd));
480                 bd = 0;
481             }
482             if (!IS_NULL_QREG(add)) {
483                 tcg_gen_add_i32(tmp, add, base);
484                 add = tmp;
485             } else {
486                 add = base;
487             }
488         }
489         if (!IS_NULL_QREG(add)) {
490             if (bd != 0) {
491                 tcg_gen_addi_i32(tmp, add, bd);
492                 add = tmp;
493             }
494         } else {
495             add = mark_to_release(s, tcg_const_i32(bd));
496         }
497         if ((ext & 3) != 0) {
498             /* memory indirect */
499             base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
500             if ((ext & 0x44) == 4) {
501                 add = gen_addr_index(s, ext, tmp);
502                 tcg_gen_add_i32(tmp, add, base);
503                 add = tmp;
504             } else {
505                 add = base;
506             }
507             if ((ext & 3) > 1) {
508                 /* outer displacement */
509                 if ((ext & 3) == 2) {
510                     od = (int16_t)read_im16(env, s);
511                 } else {
512                     od = read_im32(env, s);
513                 }
514             } else {
515                 od = 0;
516             }
517             if (od != 0) {
518                 tcg_gen_addi_i32(tmp, add, od);
519                 add = tmp;
520             }
521         }
522     } else {
523         /* brief extension word format */
524         tmp = mark_to_release(s, tcg_temp_new());
525         add = gen_addr_index(s, ext, tmp);
526         if (!IS_NULL_QREG(base)) {
527             tcg_gen_add_i32(tmp, add, base);
528             if ((int8_t)ext)
529                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
530         } else {
531             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
532         }
533         add = tmp;
534     }
535     return add;
536 }
537 
538 /* Sign or zero extend a value.  */
539 
540 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
541 {
542     switch (opsize) {
543     case OS_BYTE:
544         if (sign) {
545             tcg_gen_ext8s_i32(res, val);
546         } else {
547             tcg_gen_ext8u_i32(res, val);
548         }
549         break;
550     case OS_WORD:
551         if (sign) {
552             tcg_gen_ext16s_i32(res, val);
553         } else {
554             tcg_gen_ext16u_i32(res, val);
555         }
556         break;
557     case OS_LONG:
558         tcg_gen_mov_i32(res, val);
559         break;
560     default:
561         g_assert_not_reached();
562     }
563 }
564 
565 /* Evaluate all the CC flags.  */
566 
567 static void gen_flush_flags(DisasContext *s)
568 {
569     TCGv t0, t1;
570 
571     switch (s->cc_op) {
572     case CC_OP_FLAGS:
573         return;
574 
575     case CC_OP_ADDB:
576     case CC_OP_ADDW:
577     case CC_OP_ADDL:
578         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
579         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
580         /* Compute signed overflow for addition.  */
581         t0 = tcg_temp_new();
582         t1 = tcg_temp_new();
583         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
584         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
585         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
586         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
587         tcg_temp_free(t0);
588         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
589         tcg_temp_free(t1);
590         break;
591 
592     case CC_OP_SUBB:
593     case CC_OP_SUBW:
594     case CC_OP_SUBL:
595         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
596         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
597         /* Compute signed overflow for subtraction.  */
598         t0 = tcg_temp_new();
599         t1 = tcg_temp_new();
600         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
601         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
602         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
603         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
604         tcg_temp_free(t0);
605         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
606         tcg_temp_free(t1);
607         break;
608 
609     case CC_OP_CMPB:
610     case CC_OP_CMPW:
611     case CC_OP_CMPL:
612         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
613         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
614         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
615         /* Compute signed overflow for subtraction.  */
616         t0 = tcg_temp_new();
617         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
618         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
619         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
620         tcg_temp_free(t0);
621         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
622         break;
623 
624     case CC_OP_LOGIC:
625         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
626         tcg_gen_movi_i32(QREG_CC_C, 0);
627         tcg_gen_movi_i32(QREG_CC_V, 0);
628         break;
629 
630     case CC_OP_DYNAMIC:
631         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
632         s->cc_op_synced = 1;
633         break;
634 
635     default:
636         t0 = tcg_const_i32(s->cc_op);
637         gen_helper_flush_flags(cpu_env, t0);
638         tcg_temp_free(t0);
639         s->cc_op_synced = 1;
640         break;
641     }
642 
643     /* Note that flush_flags also assigned to env->cc_op.  */
644     s->cc_op = CC_OP_FLAGS;
645 }
646 
647 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
648 {
649     TCGv tmp;
650 
651     if (opsize == OS_LONG) {
652         tmp = val;
653     } else {
654         tmp = mark_to_release(s, tcg_temp_new());
655         gen_ext(tmp, val, opsize, sign);
656     }
657 
658     return tmp;
659 }
660 
661 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
662 {
663     gen_ext(QREG_CC_N, val, opsize, 1);
664     set_cc_op(s, CC_OP_LOGIC);
665 }
666 
667 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
668 {
669     tcg_gen_mov_i32(QREG_CC_N, dest);
670     tcg_gen_mov_i32(QREG_CC_V, src);
671     set_cc_op(s, CC_OP_CMPB + opsize);
672 }
673 
674 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
675 {
676     gen_ext(QREG_CC_N, dest, opsize, 1);
677     tcg_gen_mov_i32(QREG_CC_V, src);
678 }
679 
680 static inline int opsize_bytes(int opsize)
681 {
682     switch (opsize) {
683     case OS_BYTE: return 1;
684     case OS_WORD: return 2;
685     case OS_LONG: return 4;
686     case OS_SINGLE: return 4;
687     case OS_DOUBLE: return 8;
688     case OS_EXTENDED: return 12;
689     case OS_PACKED: return 12;
690     default:
691         g_assert_not_reached();
692     }
693 }
694 
695 static inline int insn_opsize(int insn)
696 {
697     switch ((insn >> 6) & 3) {
698     case 0: return OS_BYTE;
699     case 1: return OS_WORD;
700     case 2: return OS_LONG;
701     default:
702         g_assert_not_reached();
703     }
704 }
705 
706 static inline int ext_opsize(int ext, int pos)
707 {
708     switch ((ext >> pos) & 7) {
709     case 0: return OS_LONG;
710     case 1: return OS_SINGLE;
711     case 2: return OS_EXTENDED;
712     case 3: return OS_PACKED;
713     case 4: return OS_WORD;
714     case 5: return OS_DOUBLE;
715     case 6: return OS_BYTE;
716     default:
717         g_assert_not_reached();
718     }
719 }
720 
721 /* Assign value to a register.  If the width is less than the register width
722    only the low part of the register is set.  */
723 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
724 {
725     TCGv tmp;
726     switch (opsize) {
727     case OS_BYTE:
728         tcg_gen_andi_i32(reg, reg, 0xffffff00);
729         tmp = tcg_temp_new();
730         tcg_gen_ext8u_i32(tmp, val);
731         tcg_gen_or_i32(reg, reg, tmp);
732         tcg_temp_free(tmp);
733         break;
734     case OS_WORD:
735         tcg_gen_andi_i32(reg, reg, 0xffff0000);
736         tmp = tcg_temp_new();
737         tcg_gen_ext16u_i32(tmp, val);
738         tcg_gen_or_i32(reg, reg, tmp);
739         tcg_temp_free(tmp);
740         break;
741     case OS_LONG:
742     case OS_SINGLE:
743         tcg_gen_mov_i32(reg, val);
744         break;
745     default:
746         g_assert_not_reached();
747     }
748 }
749 
750 /* Generate code for an "effective address".  Does not adjust the base
751    register for autoincrement addressing modes.  */
752 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
753                          int mode, int reg0, int opsize)
754 {
755     TCGv reg;
756     TCGv tmp;
757     uint16_t ext;
758     uint32_t offset;
759 
760     switch (mode) {
761     case 0: /* Data register direct.  */
762     case 1: /* Address register direct.  */
763         return NULL_QREG;
764     case 3: /* Indirect postincrement.  */
765         if (opsize == OS_UNSIZED) {
766             return NULL_QREG;
767         }
768         /* fallthru */
769     case 2: /* Indirect register */
770         return get_areg(s, reg0);
771     case 4: /* Indirect predecrememnt.  */
772         if (opsize == OS_UNSIZED) {
773             return NULL_QREG;
774         }
775         reg = get_areg(s, reg0);
776         tmp = mark_to_release(s, tcg_temp_new());
777         if (reg0 == 7 && opsize == OS_BYTE &&
778             m68k_feature(s->env, M68K_FEATURE_M68000)) {
779             tcg_gen_subi_i32(tmp, reg, 2);
780         } else {
781             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
782         }
783         return tmp;
784     case 5: /* Indirect displacement.  */
785         reg = get_areg(s, reg0);
786         tmp = mark_to_release(s, tcg_temp_new());
787         ext = read_im16(env, s);
788         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
789         return tmp;
790     case 6: /* Indirect index + displacement.  */
791         reg = get_areg(s, reg0);
792         return gen_lea_indexed(env, s, reg);
793     case 7: /* Other */
794         switch (reg0) {
795         case 0: /* Absolute short.  */
796             offset = (int16_t)read_im16(env, s);
797             return mark_to_release(s, tcg_const_i32(offset));
798         case 1: /* Absolute long.  */
799             offset = read_im32(env, s);
800             return mark_to_release(s, tcg_const_i32(offset));
801         case 2: /* pc displacement  */
802             offset = s->pc;
803             offset += (int16_t)read_im16(env, s);
804             return mark_to_release(s, tcg_const_i32(offset));
805         case 3: /* pc index+displacement.  */
806             return gen_lea_indexed(env, s, NULL_QREG);
807         case 4: /* Immediate.  */
808         default:
809             return NULL_QREG;
810         }
811     }
812     /* Should never happen.  */
813     return NULL_QREG;
814 }
815 
816 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
817                     int opsize)
818 {
819     int mode = extract32(insn, 3, 3);
820     int reg0 = REG(insn, 0);
821     return gen_lea_mode(env, s, mode, reg0, opsize);
822 }
823 
824 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
825    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
826    ADDRP is non-null for readwrite operands.  */
827 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
828                         int opsize, TCGv val, TCGv *addrp, ea_what what,
829                         int index)
830 {
831     TCGv reg, tmp, result;
832     int32_t offset;
833 
834     switch (mode) {
835     case 0: /* Data register direct.  */
836         reg = cpu_dregs[reg0];
837         if (what == EA_STORE) {
838             gen_partset_reg(opsize, reg, val);
839             return store_dummy;
840         } else {
841             return gen_extend(s, reg, opsize, what == EA_LOADS);
842         }
843     case 1: /* Address register direct.  */
844         reg = get_areg(s, reg0);
845         if (what == EA_STORE) {
846             tcg_gen_mov_i32(reg, val);
847             return store_dummy;
848         } else {
849             return gen_extend(s, reg, opsize, what == EA_LOADS);
850         }
851     case 2: /* Indirect register */
852         reg = get_areg(s, reg0);
853         return gen_ldst(s, opsize, reg, val, what, index);
854     case 3: /* Indirect postincrement.  */
855         reg = get_areg(s, reg0);
856         result = gen_ldst(s, opsize, reg, val, what, index);
857         if (what == EA_STORE || !addrp) {
858             TCGv tmp = tcg_temp_new();
859             if (reg0 == 7 && opsize == OS_BYTE &&
860                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
861                 tcg_gen_addi_i32(tmp, reg, 2);
862             } else {
863                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
864             }
865             delay_set_areg(s, reg0, tmp, true);
866         }
867         return result;
868     case 4: /* Indirect predecrememnt.  */
869         if (addrp && what == EA_STORE) {
870             tmp = *addrp;
871         } else {
872             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
873             if (IS_NULL_QREG(tmp)) {
874                 return tmp;
875             }
876             if (addrp) {
877                 *addrp = tmp;
878             }
879         }
880         result = gen_ldst(s, opsize, tmp, val, what, index);
881         if (what == EA_STORE || !addrp) {
882             delay_set_areg(s, reg0, tmp, false);
883         }
884         return result;
885     case 5: /* Indirect displacement.  */
886     case 6: /* Indirect index + displacement.  */
887     do_indirect:
888         if (addrp && what == EA_STORE) {
889             tmp = *addrp;
890         } else {
891             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
892             if (IS_NULL_QREG(tmp)) {
893                 return tmp;
894             }
895             if (addrp) {
896                 *addrp = tmp;
897             }
898         }
899         return gen_ldst(s, opsize, tmp, val, what, index);
900     case 7: /* Other */
901         switch (reg0) {
902         case 0: /* Absolute short.  */
903         case 1: /* Absolute long.  */
904         case 2: /* pc displacement  */
905         case 3: /* pc index+displacement.  */
906             goto do_indirect;
907         case 4: /* Immediate.  */
908             /* Sign extend values for consistency.  */
909             switch (opsize) {
910             case OS_BYTE:
911                 if (what == EA_LOADS) {
912                     offset = (int8_t)read_im8(env, s);
913                 } else {
914                     offset = read_im8(env, s);
915                 }
916                 break;
917             case OS_WORD:
918                 if (what == EA_LOADS) {
919                     offset = (int16_t)read_im16(env, s);
920                 } else {
921                     offset = read_im16(env, s);
922                 }
923                 break;
924             case OS_LONG:
925                 offset = read_im32(env, s);
926                 break;
927             default:
928                 g_assert_not_reached();
929             }
930             return mark_to_release(s, tcg_const_i32(offset));
931         default:
932             return NULL_QREG;
933         }
934     }
935     /* Should never happen.  */
936     return NULL_QREG;
937 }
938 
939 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
940                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
941 {
942     int mode = extract32(insn, 3, 3);
943     int reg0 = REG(insn, 0);
944     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
945 }
946 
947 static TCGv_ptr gen_fp_ptr(int freg)
948 {
949     TCGv_ptr fp = tcg_temp_new_ptr();
950     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
951     return fp;
952 }
953 
954 static TCGv_ptr gen_fp_result_ptr(void)
955 {
956     TCGv_ptr fp = tcg_temp_new_ptr();
957     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
958     return fp;
959 }
960 
961 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
962 {
963     TCGv t32;
964     TCGv_i64 t64;
965 
966     t32 = tcg_temp_new();
967     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
968     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
969     tcg_temp_free(t32);
970 
971     t64 = tcg_temp_new_i64();
972     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
973     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
974     tcg_temp_free_i64(t64);
975 }
976 
977 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
978                         int index)
979 {
980     TCGv tmp;
981     TCGv_i64 t64;
982 
983     t64 = tcg_temp_new_i64();
984     tmp = tcg_temp_new();
985     switch (opsize) {
986     case OS_BYTE:
987         tcg_gen_qemu_ld8s(tmp, addr, index);
988         gen_helper_exts32(cpu_env, fp, tmp);
989         break;
990     case OS_WORD:
991         tcg_gen_qemu_ld16s(tmp, addr, index);
992         gen_helper_exts32(cpu_env, fp, tmp);
993         break;
994     case OS_LONG:
995         tcg_gen_qemu_ld32u(tmp, addr, index);
996         gen_helper_exts32(cpu_env, fp, tmp);
997         break;
998     case OS_SINGLE:
999         tcg_gen_qemu_ld32u(tmp, addr, index);
1000         gen_helper_extf32(cpu_env, fp, tmp);
1001         break;
1002     case OS_DOUBLE:
1003         tcg_gen_qemu_ld64(t64, addr, index);
1004         gen_helper_extf64(cpu_env, fp, t64);
1005         break;
1006     case OS_EXTENDED:
1007         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1008             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1009             break;
1010         }
1011         tcg_gen_qemu_ld32u(tmp, addr, index);
1012         tcg_gen_shri_i32(tmp, tmp, 16);
1013         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1014         tcg_gen_addi_i32(tmp, addr, 4);
1015         tcg_gen_qemu_ld64(t64, tmp, index);
1016         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1017         break;
1018     case OS_PACKED:
1019         /* unimplemented data type on 68040/ColdFire
1020          * FIXME if needed for another FPU
1021          */
1022         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1023         break;
1024     default:
1025         g_assert_not_reached();
1026     }
1027     tcg_temp_free(tmp);
1028     tcg_temp_free_i64(t64);
1029 }
1030 
1031 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1032                          int index)
1033 {
1034     TCGv tmp;
1035     TCGv_i64 t64;
1036 
1037     t64 = tcg_temp_new_i64();
1038     tmp = tcg_temp_new();
1039     switch (opsize) {
1040     case OS_BYTE:
1041         gen_helper_reds32(tmp, cpu_env, fp);
1042         tcg_gen_qemu_st8(tmp, addr, index);
1043         break;
1044     case OS_WORD:
1045         gen_helper_reds32(tmp, cpu_env, fp);
1046         tcg_gen_qemu_st16(tmp, addr, index);
1047         break;
1048     case OS_LONG:
1049         gen_helper_reds32(tmp, cpu_env, fp);
1050         tcg_gen_qemu_st32(tmp, addr, index);
1051         break;
1052     case OS_SINGLE:
1053         gen_helper_redf32(tmp, cpu_env, fp);
1054         tcg_gen_qemu_st32(tmp, addr, index);
1055         break;
1056     case OS_DOUBLE:
1057         gen_helper_redf64(t64, cpu_env, fp);
1058         tcg_gen_qemu_st64(t64, addr, index);
1059         break;
1060     case OS_EXTENDED:
1061         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1062             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1063             break;
1064         }
1065         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1066         tcg_gen_shli_i32(tmp, tmp, 16);
1067         tcg_gen_qemu_st32(tmp, addr, index);
1068         tcg_gen_addi_i32(tmp, addr, 4);
1069         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1070         tcg_gen_qemu_st64(t64, tmp, index);
1071         break;
1072     case OS_PACKED:
1073         /* unimplemented data type on 68040/ColdFire
1074          * FIXME if needed for another FPU
1075          */
1076         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1077         break;
1078     default:
1079         g_assert_not_reached();
1080     }
1081     tcg_temp_free(tmp);
1082     tcg_temp_free_i64(t64);
1083 }
1084 
1085 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1086                         TCGv_ptr fp, ea_what what, int index)
1087 {
1088     if (what == EA_STORE) {
1089         gen_store_fp(s, opsize, addr, fp, index);
1090     } else {
1091         gen_load_fp(s, opsize, addr, fp, index);
1092     }
1093 }
1094 
1095 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1096                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1097                           int index)
1098 {
1099     TCGv reg, addr, tmp;
1100     TCGv_i64 t64;
1101 
1102     switch (mode) {
1103     case 0: /* Data register direct.  */
1104         reg = cpu_dregs[reg0];
1105         if (what == EA_STORE) {
1106             switch (opsize) {
1107             case OS_BYTE:
1108             case OS_WORD:
1109             case OS_LONG:
1110                 gen_helper_reds32(reg, cpu_env, fp);
1111                 break;
1112             case OS_SINGLE:
1113                 gen_helper_redf32(reg, cpu_env, fp);
1114                 break;
1115             default:
1116                 g_assert_not_reached();
1117             }
1118         } else {
1119             tmp = tcg_temp_new();
1120             switch (opsize) {
1121             case OS_BYTE:
1122                 tcg_gen_ext8s_i32(tmp, reg);
1123                 gen_helper_exts32(cpu_env, fp, tmp);
1124                 break;
1125             case OS_WORD:
1126                 tcg_gen_ext16s_i32(tmp, reg);
1127                 gen_helper_exts32(cpu_env, fp, tmp);
1128                 break;
1129             case OS_LONG:
1130                 gen_helper_exts32(cpu_env, fp, reg);
1131                 break;
1132             case OS_SINGLE:
1133                 gen_helper_extf32(cpu_env, fp, reg);
1134                 break;
1135             default:
1136                 g_assert_not_reached();
1137             }
1138             tcg_temp_free(tmp);
1139         }
1140         return 0;
1141     case 1: /* Address register direct.  */
1142         return -1;
1143     case 2: /* Indirect register */
1144         addr = get_areg(s, reg0);
1145         gen_ldst_fp(s, opsize, addr, fp, what, index);
1146         return 0;
1147     case 3: /* Indirect postincrement.  */
1148         addr = cpu_aregs[reg0];
1149         gen_ldst_fp(s, opsize, addr, fp, what, index);
1150         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1151         return 0;
1152     case 4: /* Indirect predecrememnt.  */
1153         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1154         if (IS_NULL_QREG(addr)) {
1155             return -1;
1156         }
1157         gen_ldst_fp(s, opsize, addr, fp, what, index);
1158         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1159         return 0;
1160     case 5: /* Indirect displacement.  */
1161     case 6: /* Indirect index + displacement.  */
1162     do_indirect:
1163         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1164         if (IS_NULL_QREG(addr)) {
1165             return -1;
1166         }
1167         gen_ldst_fp(s, opsize, addr, fp, what, index);
1168         return 0;
1169     case 7: /* Other */
1170         switch (reg0) {
1171         case 0: /* Absolute short.  */
1172         case 1: /* Absolute long.  */
1173         case 2: /* pc displacement  */
1174         case 3: /* pc index+displacement.  */
1175             goto do_indirect;
1176         case 4: /* Immediate.  */
1177             if (what == EA_STORE) {
1178                 return -1;
1179             }
1180             switch (opsize) {
1181             case OS_BYTE:
1182                 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1183                 gen_helper_exts32(cpu_env, fp, tmp);
1184                 tcg_temp_free(tmp);
1185                 break;
1186             case OS_WORD:
1187                 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1188                 gen_helper_exts32(cpu_env, fp, tmp);
1189                 tcg_temp_free(tmp);
1190                 break;
1191             case OS_LONG:
1192                 tmp = tcg_const_i32(read_im32(env, s));
1193                 gen_helper_exts32(cpu_env, fp, tmp);
1194                 tcg_temp_free(tmp);
1195                 break;
1196             case OS_SINGLE:
1197                 tmp = tcg_const_i32(read_im32(env, s));
1198                 gen_helper_extf32(cpu_env, fp, tmp);
1199                 tcg_temp_free(tmp);
1200                 break;
1201             case OS_DOUBLE:
1202                 t64 = tcg_const_i64(read_im64(env, s));
1203                 gen_helper_extf64(cpu_env, fp, t64);
1204                 tcg_temp_free_i64(t64);
1205                 break;
1206             case OS_EXTENDED:
1207                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1208                     gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1209                     break;
1210                 }
1211                 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1212                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1213                 tcg_temp_free(tmp);
1214                 t64 = tcg_const_i64(read_im64(env, s));
1215                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1216                 tcg_temp_free_i64(t64);
1217                 break;
1218             case OS_PACKED:
1219                 /* unimplemented data type on 68040/ColdFire
1220                  * FIXME if needed for another FPU
1221                  */
1222                 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1223                 break;
1224             default:
1225                 g_assert_not_reached();
1226             }
1227             return 0;
1228         default:
1229             return -1;
1230         }
1231     }
1232     return -1;
1233 }
1234 
1235 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1236                        int opsize, TCGv_ptr fp, ea_what what, int index)
1237 {
1238     int mode = extract32(insn, 3, 3);
1239     int reg0 = REG(insn, 0);
1240     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1241 }
1242 
1243 typedef struct {
1244     TCGCond tcond;
1245     bool g1;
1246     bool g2;
1247     TCGv v1;
1248     TCGv v2;
1249 } DisasCompare;
1250 
1251 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1252 {
1253     TCGv tmp, tmp2;
1254     TCGCond tcond;
1255     CCOp op = s->cc_op;
1256 
1257     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1258     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1259         c->g1 = c->g2 = 1;
1260         c->v1 = QREG_CC_N;
1261         c->v2 = QREG_CC_V;
1262         switch (cond) {
1263         case 2: /* HI */
1264         case 3: /* LS */
1265             tcond = TCG_COND_LEU;
1266             goto done;
1267         case 4: /* CC */
1268         case 5: /* CS */
1269             tcond = TCG_COND_LTU;
1270             goto done;
1271         case 6: /* NE */
1272         case 7: /* EQ */
1273             tcond = TCG_COND_EQ;
1274             goto done;
1275         case 10: /* PL */
1276         case 11: /* MI */
1277             c->g1 = c->g2 = 0;
1278             c->v2 = tcg_const_i32(0);
1279             c->v1 = tmp = tcg_temp_new();
1280             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1281             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1282             /* fallthru */
1283         case 12: /* GE */
1284         case 13: /* LT */
1285             tcond = TCG_COND_LT;
1286             goto done;
1287         case 14: /* GT */
1288         case 15: /* LE */
1289             tcond = TCG_COND_LE;
1290             goto done;
1291         }
1292     }
1293 
1294     c->g1 = 1;
1295     c->g2 = 0;
1296     c->v2 = tcg_const_i32(0);
1297 
1298     switch (cond) {
1299     case 0: /* T */
1300     case 1: /* F */
1301         c->v1 = c->v2;
1302         tcond = TCG_COND_NEVER;
1303         goto done;
1304     case 14: /* GT (!(Z || (N ^ V))) */
1305     case 15: /* LE (Z || (N ^ V)) */
1306         /* Logic operations clear V, which simplifies LE to (Z || N),
1307            and since Z and N are co-located, this becomes a normal
1308            comparison vs N.  */
1309         if (op == CC_OP_LOGIC) {
1310             c->v1 = QREG_CC_N;
1311             tcond = TCG_COND_LE;
1312             goto done;
1313         }
1314         break;
1315     case 12: /* GE (!(N ^ V)) */
1316     case 13: /* LT (N ^ V) */
1317         /* Logic operations clear V, which simplifies this to N.  */
1318         if (op != CC_OP_LOGIC) {
1319             break;
1320         }
1321         /* fallthru */
1322     case 10: /* PL (!N) */
1323     case 11: /* MI (N) */
1324         /* Several cases represent N normally.  */
1325         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1326             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1327             op == CC_OP_LOGIC) {
1328             c->v1 = QREG_CC_N;
1329             tcond = TCG_COND_LT;
1330             goto done;
1331         }
1332         break;
1333     case 6: /* NE (!Z) */
1334     case 7: /* EQ (Z) */
1335         /* Some cases fold Z into N.  */
1336         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1337             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1338             op == CC_OP_LOGIC) {
1339             tcond = TCG_COND_EQ;
1340             c->v1 = QREG_CC_N;
1341             goto done;
1342         }
1343         break;
1344     case 4: /* CC (!C) */
1345     case 5: /* CS (C) */
1346         /* Some cases fold C into X.  */
1347         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1348             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1349             tcond = TCG_COND_NE;
1350             c->v1 = QREG_CC_X;
1351             goto done;
1352         }
1353         /* fallthru */
1354     case 8: /* VC (!V) */
1355     case 9: /* VS (V) */
1356         /* Logic operations clear V and C.  */
1357         if (op == CC_OP_LOGIC) {
1358             tcond = TCG_COND_NEVER;
1359             c->v1 = c->v2;
1360             goto done;
1361         }
1362         break;
1363     }
1364 
1365     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1366     gen_flush_flags(s);
1367 
1368     switch (cond) {
1369     case 0: /* T */
1370     case 1: /* F */
1371     default:
1372         /* Invalid, or handled above.  */
1373         abort();
1374     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1375     case 3: /* LS (C || Z) */
1376         c->v1 = tmp = tcg_temp_new();
1377         c->g1 = 0;
1378         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1379         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1380         tcond = TCG_COND_NE;
1381         break;
1382     case 4: /* CC (!C) */
1383     case 5: /* CS (C) */
1384         c->v1 = QREG_CC_C;
1385         tcond = TCG_COND_NE;
1386         break;
1387     case 6: /* NE (!Z) */
1388     case 7: /* EQ (Z) */
1389         c->v1 = QREG_CC_Z;
1390         tcond = TCG_COND_EQ;
1391         break;
1392     case 8: /* VC (!V) */
1393     case 9: /* VS (V) */
1394         c->v1 = QREG_CC_V;
1395         tcond = TCG_COND_LT;
1396         break;
1397     case 10: /* PL (!N) */
1398     case 11: /* MI (N) */
1399         c->v1 = QREG_CC_N;
1400         tcond = TCG_COND_LT;
1401         break;
1402     case 12: /* GE (!(N ^ V)) */
1403     case 13: /* LT (N ^ V) */
1404         c->v1 = tmp = tcg_temp_new();
1405         c->g1 = 0;
1406         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1407         tcond = TCG_COND_LT;
1408         break;
1409     case 14: /* GT (!(Z || (N ^ V))) */
1410     case 15: /* LE (Z || (N ^ V)) */
1411         c->v1 = tmp = tcg_temp_new();
1412         c->g1 = 0;
1413         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1414         tcg_gen_neg_i32(tmp, tmp);
1415         tmp2 = tcg_temp_new();
1416         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1417         tcg_gen_or_i32(tmp, tmp, tmp2);
1418         tcg_temp_free(tmp2);
1419         tcond = TCG_COND_LT;
1420         break;
1421     }
1422 
1423  done:
1424     if ((cond & 1) == 0) {
1425         tcond = tcg_invert_cond(tcond);
1426     }
1427     c->tcond = tcond;
1428 }
1429 
1430 static void free_cond(DisasCompare *c)
1431 {
1432     if (!c->g1) {
1433         tcg_temp_free(c->v1);
1434     }
1435     if (!c->g2) {
1436         tcg_temp_free(c->v2);
1437     }
1438 }
1439 
1440 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1441 {
1442   DisasCompare c;
1443 
1444   gen_cc_cond(&c, s, cond);
1445   update_cc_op(s);
1446   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1447   free_cond(&c);
1448 }
1449 
1450 /* Force a TB lookup after an instruction that changes the CPU state.  */
1451 static void gen_lookup_tb(DisasContext *s)
1452 {
1453     update_cc_op(s);
1454     tcg_gen_movi_i32(QREG_PC, s->pc);
1455     s->is_jmp = DISAS_UPDATE;
1456 }
1457 
1458 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1459         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1460                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1461         if (IS_NULL_QREG(result)) {                                     \
1462             gen_addr_fault(s);                                          \
1463             return;                                                     \
1464         }                                                               \
1465     } while (0)
1466 
1467 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1468         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1469                                 EA_STORE, IS_USER(s));                  \
1470         if (IS_NULL_QREG(ea_result)) {                                  \
1471             gen_addr_fault(s);                                          \
1472             return;                                                     \
1473         }                                                               \
1474     } while (0)
1475 
1476 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1477 {
1478 #ifndef CONFIG_USER_ONLY
1479     return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1480            (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1481 #else
1482     return true;
1483 #endif
1484 }
1485 
1486 /* Generate a jump to an immediate address.  */
1487 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1488 {
1489     if (unlikely(s->singlestep_enabled)) {
1490         gen_exception(s, dest, EXCP_DEBUG);
1491     } else if (use_goto_tb(s, dest)) {
1492         tcg_gen_goto_tb(n);
1493         tcg_gen_movi_i32(QREG_PC, dest);
1494         tcg_gen_exit_tb((uintptr_t)s->tb + n);
1495     } else {
1496         gen_jmp_im(s, dest);
1497         tcg_gen_exit_tb(0);
1498     }
1499     s->is_jmp = DISAS_TB_JUMP;
1500 }
1501 
1502 DISAS_INSN(scc)
1503 {
1504     DisasCompare c;
1505     int cond;
1506     TCGv tmp;
1507 
1508     cond = (insn >> 8) & 0xf;
1509     gen_cc_cond(&c, s, cond);
1510 
1511     tmp = tcg_temp_new();
1512     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1513     free_cond(&c);
1514 
1515     tcg_gen_neg_i32(tmp, tmp);
1516     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1517     tcg_temp_free(tmp);
1518 }
1519 
1520 DISAS_INSN(dbcc)
1521 {
1522     TCGLabel *l1;
1523     TCGv reg;
1524     TCGv tmp;
1525     int16_t offset;
1526     uint32_t base;
1527 
1528     reg = DREG(insn, 0);
1529     base = s->pc;
1530     offset = (int16_t)read_im16(env, s);
1531     l1 = gen_new_label();
1532     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1533 
1534     tmp = tcg_temp_new();
1535     tcg_gen_ext16s_i32(tmp, reg);
1536     tcg_gen_addi_i32(tmp, tmp, -1);
1537     gen_partset_reg(OS_WORD, reg, tmp);
1538     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1539     gen_jmp_tb(s, 1, base + offset);
1540     gen_set_label(l1);
1541     gen_jmp_tb(s, 0, s->pc);
1542 }
1543 
1544 DISAS_INSN(undef_mac)
1545 {
1546     gen_exception(s, s->insn_pc, EXCP_LINEA);
1547 }
1548 
1549 DISAS_INSN(undef_fpu)
1550 {
1551     gen_exception(s, s->insn_pc, EXCP_LINEF);
1552 }
1553 
1554 DISAS_INSN(undef)
1555 {
1556     /* ??? This is both instructions that are as yet unimplemented
1557        for the 680x0 series, as well as those that are implemented
1558        but actually illegal for CPU32 or pre-68020.  */
1559     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1560                   insn, s->insn_pc);
1561     gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
1562 }
1563 
1564 DISAS_INSN(mulw)
1565 {
1566     TCGv reg;
1567     TCGv tmp;
1568     TCGv src;
1569     int sign;
1570 
1571     sign = (insn & 0x100) != 0;
1572     reg = DREG(insn, 9);
1573     tmp = tcg_temp_new();
1574     if (sign)
1575         tcg_gen_ext16s_i32(tmp, reg);
1576     else
1577         tcg_gen_ext16u_i32(tmp, reg);
1578     SRC_EA(env, src, OS_WORD, sign, NULL);
1579     tcg_gen_mul_i32(tmp, tmp, src);
1580     tcg_gen_mov_i32(reg, tmp);
1581     gen_logic_cc(s, tmp, OS_LONG);
1582     tcg_temp_free(tmp);
1583 }
1584 
1585 DISAS_INSN(divw)
1586 {
1587     int sign;
1588     TCGv src;
1589     TCGv destr;
1590 
1591     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1592 
1593     sign = (insn & 0x100) != 0;
1594 
1595     /* dest.l / src.w */
1596 
1597     SRC_EA(env, src, OS_WORD, sign, NULL);
1598     destr = tcg_const_i32(REG(insn, 9));
1599     if (sign) {
1600         gen_helper_divsw(cpu_env, destr, src);
1601     } else {
1602         gen_helper_divuw(cpu_env, destr, src);
1603     }
1604     tcg_temp_free(destr);
1605 
1606     set_cc_op(s, CC_OP_FLAGS);
1607 }
1608 
1609 DISAS_INSN(divl)
1610 {
1611     TCGv num, reg, den;
1612     int sign;
1613     uint16_t ext;
1614 
1615     ext = read_im16(env, s);
1616 
1617     sign = (ext & 0x0800) != 0;
1618 
1619     if (ext & 0x400) {
1620         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1621             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1622             return;
1623         }
1624 
1625         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1626 
1627         SRC_EA(env, den, OS_LONG, 0, NULL);
1628         num = tcg_const_i32(REG(ext, 12));
1629         reg = tcg_const_i32(REG(ext, 0));
1630         if (sign) {
1631             gen_helper_divsll(cpu_env, num, reg, den);
1632         } else {
1633             gen_helper_divull(cpu_env, num, reg, den);
1634         }
1635         tcg_temp_free(reg);
1636         tcg_temp_free(num);
1637         set_cc_op(s, CC_OP_FLAGS);
1638         return;
1639     }
1640 
1641     /* divX.l <EA>, Dq        32/32 -> 32q     */
1642     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1643 
1644     SRC_EA(env, den, OS_LONG, 0, NULL);
1645     num = tcg_const_i32(REG(ext, 12));
1646     reg = tcg_const_i32(REG(ext, 0));
1647     if (sign) {
1648         gen_helper_divsl(cpu_env, num, reg, den);
1649     } else {
1650         gen_helper_divul(cpu_env, num, reg, den);
1651     }
1652     tcg_temp_free(reg);
1653     tcg_temp_free(num);
1654 
1655     set_cc_op(s, CC_OP_FLAGS);
1656 }
1657 
1658 static void bcd_add(TCGv dest, TCGv src)
1659 {
1660     TCGv t0, t1;
1661 
1662     /*  dest10 = dest10 + src10 + X
1663      *
1664      *        t1 = src
1665      *        t2 = t1 + 0x066
1666      *        t3 = t2 + dest + X
1667      *        t4 = t2 ^ dest
1668      *        t5 = t3 ^ t4
1669      *        t6 = ~t5 & 0x110
1670      *        t7 = (t6 >> 2) | (t6 >> 3)
1671      *        return t3 - t7
1672      */
1673 
1674     /* t1 = (src + 0x066) + dest + X
1675      *    = result with some possible exceding 0x6
1676      */
1677 
1678     t0 = tcg_const_i32(0x066);
1679     tcg_gen_add_i32(t0, t0, src);
1680 
1681     t1 = tcg_temp_new();
1682     tcg_gen_add_i32(t1, t0, dest);
1683     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1684 
1685     /* we will remove exceding 0x6 where there is no carry */
1686 
1687     /* t0 = (src + 0x0066) ^ dest
1688      *    = t1 without carries
1689      */
1690 
1691     tcg_gen_xor_i32(t0, t0, dest);
1692 
1693     /* extract the carries
1694      * t0 = t0 ^ t1
1695      *    = only the carries
1696      */
1697 
1698     tcg_gen_xor_i32(t0, t0, t1);
1699 
1700     /* generate 0x1 where there is no carry
1701      * and for each 0x10, generate a 0x6
1702      */
1703 
1704     tcg_gen_shri_i32(t0, t0, 3);
1705     tcg_gen_not_i32(t0, t0);
1706     tcg_gen_andi_i32(t0, t0, 0x22);
1707     tcg_gen_add_i32(dest, t0, t0);
1708     tcg_gen_add_i32(dest, dest, t0);
1709     tcg_temp_free(t0);
1710 
1711     /* remove the exceding 0x6
1712      * for digits that have not generated a carry
1713      */
1714 
1715     tcg_gen_sub_i32(dest, t1, dest);
1716     tcg_temp_free(t1);
1717 }
1718 
1719 static void bcd_sub(TCGv dest, TCGv src)
1720 {
1721     TCGv t0, t1, t2;
1722 
1723     /*  dest10 = dest10 - src10 - X
1724      *         = bcd_add(dest + 1 - X, 0x199 - src)
1725      */
1726 
1727     /* t0 = 0x066 + (0x199 - src) */
1728 
1729     t0 = tcg_temp_new();
1730     tcg_gen_subfi_i32(t0, 0x1ff, src);
1731 
1732     /* t1 = t0 + dest + 1 - X*/
1733 
1734     t1 = tcg_temp_new();
1735     tcg_gen_add_i32(t1, t0, dest);
1736     tcg_gen_addi_i32(t1, t1, 1);
1737     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1738 
1739     /* t2 = t0 ^ dest */
1740 
1741     t2 = tcg_temp_new();
1742     tcg_gen_xor_i32(t2, t0, dest);
1743 
1744     /* t0 = t1 ^ t2 */
1745 
1746     tcg_gen_xor_i32(t0, t1, t2);
1747 
1748     /* t2 = ~t0 & 0x110
1749      * t0 = (t2 >> 2) | (t2 >> 3)
1750      *
1751      * to fit on 8bit operands, changed in:
1752      *
1753      * t2 = ~(t0 >> 3) & 0x22
1754      * t0 = t2 + t2
1755      * t0 = t0 + t2
1756      */
1757 
1758     tcg_gen_shri_i32(t2, t0, 3);
1759     tcg_gen_not_i32(t2, t2);
1760     tcg_gen_andi_i32(t2, t2, 0x22);
1761     tcg_gen_add_i32(t0, t2, t2);
1762     tcg_gen_add_i32(t0, t0, t2);
1763     tcg_temp_free(t2);
1764 
1765     /* return t1 - t0 */
1766 
1767     tcg_gen_sub_i32(dest, t1, t0);
1768     tcg_temp_free(t0);
1769     tcg_temp_free(t1);
1770 }
1771 
1772 static void bcd_flags(TCGv val)
1773 {
1774     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1775     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1776 
1777     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1778 
1779     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1780 }
1781 
1782 DISAS_INSN(abcd_reg)
1783 {
1784     TCGv src;
1785     TCGv dest;
1786 
1787     gen_flush_flags(s); /* !Z is sticky */
1788 
1789     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1790     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1791     bcd_add(dest, src);
1792     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1793 
1794     bcd_flags(dest);
1795 }
1796 
1797 DISAS_INSN(abcd_mem)
1798 {
1799     TCGv src, dest, addr;
1800 
1801     gen_flush_flags(s); /* !Z is sticky */
1802 
1803     /* Indirect pre-decrement load (mode 4) */
1804 
1805     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1806                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1807     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1808                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1809 
1810     bcd_add(dest, src);
1811 
1812     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1813                 EA_STORE, IS_USER(s));
1814 
1815     bcd_flags(dest);
1816 }
1817 
1818 DISAS_INSN(sbcd_reg)
1819 {
1820     TCGv src, dest;
1821 
1822     gen_flush_flags(s); /* !Z is sticky */
1823 
1824     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1825     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1826 
1827     bcd_sub(dest, src);
1828 
1829     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1830 
1831     bcd_flags(dest);
1832 }
1833 
1834 DISAS_INSN(sbcd_mem)
1835 {
1836     TCGv src, dest, addr;
1837 
1838     gen_flush_flags(s); /* !Z is sticky */
1839 
1840     /* Indirect pre-decrement load (mode 4) */
1841 
1842     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1843                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1844     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1845                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1846 
1847     bcd_sub(dest, src);
1848 
1849     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1850                 EA_STORE, IS_USER(s));
1851 
1852     bcd_flags(dest);
1853 }
1854 
1855 DISAS_INSN(nbcd)
1856 {
1857     TCGv src, dest;
1858     TCGv addr;
1859 
1860     gen_flush_flags(s); /* !Z is sticky */
1861 
1862     SRC_EA(env, src, OS_BYTE, 0, &addr);
1863 
1864     dest = tcg_const_i32(0);
1865     bcd_sub(dest, src);
1866 
1867     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1868 
1869     bcd_flags(dest);
1870 
1871     tcg_temp_free(dest);
1872 }
1873 
1874 DISAS_INSN(addsub)
1875 {
1876     TCGv reg;
1877     TCGv dest;
1878     TCGv src;
1879     TCGv tmp;
1880     TCGv addr;
1881     int add;
1882     int opsize;
1883 
1884     add = (insn & 0x4000) != 0;
1885     opsize = insn_opsize(insn);
1886     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1887     dest = tcg_temp_new();
1888     if (insn & 0x100) {
1889         SRC_EA(env, tmp, opsize, 1, &addr);
1890         src = reg;
1891     } else {
1892         tmp = reg;
1893         SRC_EA(env, src, opsize, 1, NULL);
1894     }
1895     if (add) {
1896         tcg_gen_add_i32(dest, tmp, src);
1897         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1898         set_cc_op(s, CC_OP_ADDB + opsize);
1899     } else {
1900         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1901         tcg_gen_sub_i32(dest, tmp, src);
1902         set_cc_op(s, CC_OP_SUBB + opsize);
1903     }
1904     gen_update_cc_add(dest, src, opsize);
1905     if (insn & 0x100) {
1906         DEST_EA(env, insn, opsize, dest, &addr);
1907     } else {
1908         gen_partset_reg(opsize, DREG(insn, 9), dest);
1909     }
1910     tcg_temp_free(dest);
1911 }
1912 
1913 /* Reverse the order of the bits in REG.  */
1914 DISAS_INSN(bitrev)
1915 {
1916     TCGv reg;
1917     reg = DREG(insn, 0);
1918     gen_helper_bitrev(reg, reg);
1919 }
1920 
1921 DISAS_INSN(bitop_reg)
1922 {
1923     int opsize;
1924     int op;
1925     TCGv src1;
1926     TCGv src2;
1927     TCGv tmp;
1928     TCGv addr;
1929     TCGv dest;
1930 
1931     if ((insn & 0x38) != 0)
1932         opsize = OS_BYTE;
1933     else
1934         opsize = OS_LONG;
1935     op = (insn >> 6) & 3;
1936     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1937 
1938     gen_flush_flags(s);
1939     src2 = tcg_temp_new();
1940     if (opsize == OS_BYTE)
1941         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1942     else
1943         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1944 
1945     tmp = tcg_const_i32(1);
1946     tcg_gen_shl_i32(tmp, tmp, src2);
1947     tcg_temp_free(src2);
1948 
1949     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1950 
1951     dest = tcg_temp_new();
1952     switch (op) {
1953     case 1: /* bchg */
1954         tcg_gen_xor_i32(dest, src1, tmp);
1955         break;
1956     case 2: /* bclr */
1957         tcg_gen_andc_i32(dest, src1, tmp);
1958         break;
1959     case 3: /* bset */
1960         tcg_gen_or_i32(dest, src1, tmp);
1961         break;
1962     default: /* btst */
1963         break;
1964     }
1965     tcg_temp_free(tmp);
1966     if (op) {
1967         DEST_EA(env, insn, opsize, dest, &addr);
1968     }
1969     tcg_temp_free(dest);
1970 }
1971 
1972 DISAS_INSN(sats)
1973 {
1974     TCGv reg;
1975     reg = DREG(insn, 0);
1976     gen_flush_flags(s);
1977     gen_helper_sats(reg, reg, QREG_CC_V);
1978     gen_logic_cc(s, reg, OS_LONG);
1979 }
1980 
1981 static void gen_push(DisasContext *s, TCGv val)
1982 {
1983     TCGv tmp;
1984 
1985     tmp = tcg_temp_new();
1986     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1987     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1988     tcg_gen_mov_i32(QREG_SP, tmp);
1989     tcg_temp_free(tmp);
1990 }
1991 
1992 static TCGv mreg(int reg)
1993 {
1994     if (reg < 8) {
1995         /* Dx */
1996         return cpu_dregs[reg];
1997     }
1998     /* Ax */
1999     return cpu_aregs[reg & 7];
2000 }
2001 
2002 DISAS_INSN(movem)
2003 {
2004     TCGv addr, incr, tmp, r[16];
2005     int is_load = (insn & 0x0400) != 0;
2006     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2007     uint16_t mask = read_im16(env, s);
2008     int mode = extract32(insn, 3, 3);
2009     int reg0 = REG(insn, 0);
2010     int i;
2011 
2012     tmp = cpu_aregs[reg0];
2013 
2014     switch (mode) {
2015     case 0: /* data register direct */
2016     case 1: /* addr register direct */
2017     do_addr_fault:
2018         gen_addr_fault(s);
2019         return;
2020 
2021     case 2: /* indirect */
2022         break;
2023 
2024     case 3: /* indirect post-increment */
2025         if (!is_load) {
2026             /* post-increment is not allowed */
2027             goto do_addr_fault;
2028         }
2029         break;
2030 
2031     case 4: /* indirect pre-decrement */
2032         if (is_load) {
2033             /* pre-decrement is not allowed */
2034             goto do_addr_fault;
2035         }
2036         /* We want a bare copy of the address reg, without any pre-decrement
2037            adjustment, as gen_lea would provide.  */
2038         break;
2039 
2040     default:
2041         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2042         if (IS_NULL_QREG(tmp)) {
2043             goto do_addr_fault;
2044         }
2045         break;
2046     }
2047 
2048     addr = tcg_temp_new();
2049     tcg_gen_mov_i32(addr, tmp);
2050     incr = tcg_const_i32(opsize_bytes(opsize));
2051 
2052     if (is_load) {
2053         /* memory to register */
2054         for (i = 0; i < 16; i++) {
2055             if (mask & (1 << i)) {
2056                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2057                 tcg_gen_add_i32(addr, addr, incr);
2058             }
2059         }
2060         for (i = 0; i < 16; i++) {
2061             if (mask & (1 << i)) {
2062                 tcg_gen_mov_i32(mreg(i), r[i]);
2063                 tcg_temp_free(r[i]);
2064             }
2065         }
2066         if (mode == 3) {
2067             /* post-increment: movem (An)+,X */
2068             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2069         }
2070     } else {
2071         /* register to memory */
2072         if (mode == 4) {
2073             /* pre-decrement: movem X,-(An) */
2074             for (i = 15; i >= 0; i--) {
2075                 if ((mask << i) & 0x8000) {
2076                     tcg_gen_sub_i32(addr, addr, incr);
2077                     if (reg0 + 8 == i &&
2078                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2079                         /* M68020+: if the addressing register is the
2080                          * register moved to memory, the value written
2081                          * is the initial value decremented by the size of
2082                          * the operation, regardless of how many actual
2083                          * stores have been performed until this point.
2084                          * M68000/M68010: the value is the initial value.
2085                          */
2086                         tmp = tcg_temp_new();
2087                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2088                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2089                         tcg_temp_free(tmp);
2090                     } else {
2091                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2092                     }
2093                 }
2094             }
2095             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2096         } else {
2097             for (i = 0; i < 16; i++) {
2098                 if (mask & (1 << i)) {
2099                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2100                     tcg_gen_add_i32(addr, addr, incr);
2101                 }
2102             }
2103         }
2104     }
2105 
2106     tcg_temp_free(incr);
2107     tcg_temp_free(addr);
2108 }
2109 
2110 DISAS_INSN(movep)
2111 {
2112     uint8_t i;
2113     int16_t displ;
2114     TCGv reg;
2115     TCGv addr;
2116     TCGv abuf;
2117     TCGv dbuf;
2118 
2119     displ = read_im16(env, s);
2120 
2121     addr = AREG(insn, 0);
2122     reg = DREG(insn, 9);
2123 
2124     abuf = tcg_temp_new();
2125     tcg_gen_addi_i32(abuf, addr, displ);
2126     dbuf = tcg_temp_new();
2127 
2128     if (insn & 0x40) {
2129         i = 4;
2130     } else {
2131         i = 2;
2132     }
2133 
2134     if (insn & 0x80) {
2135         for ( ; i > 0 ; i--) {
2136             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2137             tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2138             if (i > 1) {
2139                 tcg_gen_addi_i32(abuf, abuf, 2);
2140             }
2141         }
2142     } else {
2143         for ( ; i > 0 ; i--) {
2144             tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2145             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2146             if (i > 1) {
2147                 tcg_gen_addi_i32(abuf, abuf, 2);
2148             }
2149         }
2150     }
2151     tcg_temp_free(abuf);
2152     tcg_temp_free(dbuf);
2153 }
2154 
2155 DISAS_INSN(bitop_im)
2156 {
2157     int opsize;
2158     int op;
2159     TCGv src1;
2160     uint32_t mask;
2161     int bitnum;
2162     TCGv tmp;
2163     TCGv addr;
2164 
2165     if ((insn & 0x38) != 0)
2166         opsize = OS_BYTE;
2167     else
2168         opsize = OS_LONG;
2169     op = (insn >> 6) & 3;
2170 
2171     bitnum = read_im16(env, s);
2172     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2173         if (bitnum & 0xfe00) {
2174             disas_undef(env, s, insn);
2175             return;
2176         }
2177     } else {
2178         if (bitnum & 0xff00) {
2179             disas_undef(env, s, insn);
2180             return;
2181         }
2182     }
2183 
2184     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2185 
2186     gen_flush_flags(s);
2187     if (opsize == OS_BYTE)
2188         bitnum &= 7;
2189     else
2190         bitnum &= 31;
2191     mask = 1 << bitnum;
2192 
2193    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2194 
2195     if (op) {
2196         tmp = tcg_temp_new();
2197         switch (op) {
2198         case 1: /* bchg */
2199             tcg_gen_xori_i32(tmp, src1, mask);
2200             break;
2201         case 2: /* bclr */
2202             tcg_gen_andi_i32(tmp, src1, ~mask);
2203             break;
2204         case 3: /* bset */
2205             tcg_gen_ori_i32(tmp, src1, mask);
2206             break;
2207         default: /* btst */
2208             break;
2209         }
2210         DEST_EA(env, insn, opsize, tmp, &addr);
2211         tcg_temp_free(tmp);
2212     }
2213 }
2214 
2215 static TCGv gen_get_ccr(DisasContext *s)
2216 {
2217     TCGv dest;
2218 
2219     update_cc_op(s);
2220     dest = tcg_temp_new();
2221     gen_helper_get_ccr(dest, cpu_env);
2222     return dest;
2223 }
2224 
2225 static TCGv gen_get_sr(DisasContext *s)
2226 {
2227     TCGv ccr;
2228     TCGv sr;
2229 
2230     ccr = gen_get_ccr(s);
2231     sr = tcg_temp_new();
2232     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2233     tcg_gen_or_i32(sr, sr, ccr);
2234     return sr;
2235 }
2236 
2237 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2238 {
2239     if (ccr_only) {
2240         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2241         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2242         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2243         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2244         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2245     } else {
2246         TCGv sr = tcg_const_i32(val);
2247         gen_helper_set_sr(cpu_env, sr);
2248         tcg_temp_free(sr);
2249     }
2250     set_cc_op(s, CC_OP_FLAGS);
2251 }
2252 
2253 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2254 {
2255     if (ccr_only) {
2256         gen_helper_set_ccr(cpu_env, val);
2257     } else {
2258         gen_helper_set_sr(cpu_env, val);
2259     }
2260     set_cc_op(s, CC_OP_FLAGS);
2261 }
2262 
2263 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2264                            bool ccr_only)
2265 {
2266     if ((insn & 0x3f) == 0x3c) {
2267         uint16_t val;
2268         val = read_im16(env, s);
2269         gen_set_sr_im(s, val, ccr_only);
2270     } else {
2271         TCGv src;
2272         SRC_EA(env, src, OS_WORD, 0, NULL);
2273         gen_set_sr(s, src, ccr_only);
2274     }
2275 }
2276 
2277 DISAS_INSN(arith_im)
2278 {
2279     int op;
2280     TCGv im;
2281     TCGv src1;
2282     TCGv dest;
2283     TCGv addr;
2284     int opsize;
2285     bool with_SR = ((insn & 0x3f) == 0x3c);
2286 
2287     op = (insn >> 9) & 7;
2288     opsize = insn_opsize(insn);
2289     switch (opsize) {
2290     case OS_BYTE:
2291         im = tcg_const_i32((int8_t)read_im8(env, s));
2292         break;
2293     case OS_WORD:
2294         im = tcg_const_i32((int16_t)read_im16(env, s));
2295         break;
2296     case OS_LONG:
2297         im = tcg_const_i32(read_im32(env, s));
2298         break;
2299     default:
2300        abort();
2301     }
2302 
2303     if (with_SR) {
2304         /* SR/CCR can only be used with andi/eori/ori */
2305         if (op == 2 || op == 3 || op == 6) {
2306             disas_undef(env, s, insn);
2307             return;
2308         }
2309         switch (opsize) {
2310         case OS_BYTE:
2311             src1 = gen_get_ccr(s);
2312             break;
2313         case OS_WORD:
2314             if (IS_USER(s)) {
2315                 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2316                 return;
2317             }
2318             src1 = gen_get_sr(s);
2319             break;
2320         case OS_LONG:
2321             disas_undef(env, s, insn);
2322             return;
2323         }
2324     } else {
2325         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2326     }
2327     dest = tcg_temp_new();
2328     switch (op) {
2329     case 0: /* ori */
2330         tcg_gen_or_i32(dest, src1, im);
2331         if (with_SR) {
2332             gen_set_sr(s, dest, opsize == OS_BYTE);
2333         } else {
2334             DEST_EA(env, insn, opsize, dest, &addr);
2335             gen_logic_cc(s, dest, opsize);
2336         }
2337         break;
2338     case 1: /* andi */
2339         tcg_gen_and_i32(dest, src1, im);
2340         if (with_SR) {
2341             gen_set_sr(s, dest, opsize == OS_BYTE);
2342         } else {
2343             DEST_EA(env, insn, opsize, dest, &addr);
2344             gen_logic_cc(s, dest, opsize);
2345         }
2346         break;
2347     case 2: /* subi */
2348         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2349         tcg_gen_sub_i32(dest, src1, im);
2350         gen_update_cc_add(dest, im, opsize);
2351         set_cc_op(s, CC_OP_SUBB + opsize);
2352         DEST_EA(env, insn, opsize, dest, &addr);
2353         break;
2354     case 3: /* addi */
2355         tcg_gen_add_i32(dest, src1, im);
2356         gen_update_cc_add(dest, im, opsize);
2357         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2358         set_cc_op(s, CC_OP_ADDB + opsize);
2359         DEST_EA(env, insn, opsize, dest, &addr);
2360         break;
2361     case 5: /* eori */
2362         tcg_gen_xor_i32(dest, src1, im);
2363         if (with_SR) {
2364             gen_set_sr(s, dest, opsize == OS_BYTE);
2365         } else {
2366             DEST_EA(env, insn, opsize, dest, &addr);
2367             gen_logic_cc(s, dest, opsize);
2368         }
2369         break;
2370     case 6: /* cmpi */
2371         gen_update_cc_cmp(s, src1, im, opsize);
2372         break;
2373     default:
2374         abort();
2375     }
2376     tcg_temp_free(im);
2377     tcg_temp_free(dest);
2378 }
2379 
2380 DISAS_INSN(cas)
2381 {
2382     int opsize;
2383     TCGv addr;
2384     uint16_t ext;
2385     TCGv load;
2386     TCGv cmp;
2387     TCGMemOp opc;
2388 
2389     switch ((insn >> 9) & 3) {
2390     case 1:
2391         opsize = OS_BYTE;
2392         opc = MO_SB;
2393         break;
2394     case 2:
2395         opsize = OS_WORD;
2396         opc = MO_TESW;
2397         break;
2398     case 3:
2399         opsize = OS_LONG;
2400         opc = MO_TESL;
2401         break;
2402     default:
2403         g_assert_not_reached();
2404     }
2405 
2406     ext = read_im16(env, s);
2407 
2408     /* cas Dc,Du,<EA> */
2409 
2410     addr = gen_lea(env, s, insn, opsize);
2411     if (IS_NULL_QREG(addr)) {
2412         gen_addr_fault(s);
2413         return;
2414     }
2415 
2416     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2417 
2418     /* if  <EA> == Dc then
2419      *     <EA> = Du
2420      *     Dc = <EA> (because <EA> == Dc)
2421      * else
2422      *     Dc = <EA>
2423      */
2424 
2425     load = tcg_temp_new();
2426     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2427                                IS_USER(s), opc);
2428     /* update flags before setting cmp to load */
2429     gen_update_cc_cmp(s, load, cmp, opsize);
2430     gen_partset_reg(opsize, DREG(ext, 0), load);
2431 
2432     tcg_temp_free(load);
2433 
2434     switch (extract32(insn, 3, 3)) {
2435     case 3: /* Indirect postincrement.  */
2436         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2437         break;
2438     case 4: /* Indirect predecrememnt.  */
2439         tcg_gen_mov_i32(AREG(insn, 0), addr);
2440         break;
2441     }
2442 }
2443 
2444 DISAS_INSN(cas2w)
2445 {
2446     uint16_t ext1, ext2;
2447     TCGv addr1, addr2;
2448     TCGv regs;
2449 
2450     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2451 
2452     ext1 = read_im16(env, s);
2453 
2454     if (ext1 & 0x8000) {
2455         /* Address Register */
2456         addr1 = AREG(ext1, 12);
2457     } else {
2458         /* Data Register */
2459         addr1 = DREG(ext1, 12);
2460     }
2461 
2462     ext2 = read_im16(env, s);
2463     if (ext2 & 0x8000) {
2464         /* Address Register */
2465         addr2 = AREG(ext2, 12);
2466     } else {
2467         /* Data Register */
2468         addr2 = DREG(ext2, 12);
2469     }
2470 
2471     /* if (R1) == Dc1 && (R2) == Dc2 then
2472      *     (R1) = Du1
2473      *     (R2) = Du2
2474      * else
2475      *     Dc1 = (R1)
2476      *     Dc2 = (R2)
2477      */
2478 
2479     regs = tcg_const_i32(REG(ext2, 6) |
2480                          (REG(ext1, 6) << 3) |
2481                          (REG(ext2, 0) << 6) |
2482                          (REG(ext1, 0) << 9));
2483     if (tb_cflags(s->tb) & CF_PARALLEL) {
2484         gen_helper_exit_atomic(cpu_env);
2485     } else {
2486         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2487     }
2488     tcg_temp_free(regs);
2489 
2490     /* Note that cas2w also assigned to env->cc_op.  */
2491     s->cc_op = CC_OP_CMPW;
2492     s->cc_op_synced = 1;
2493 }
2494 
2495 DISAS_INSN(cas2l)
2496 {
2497     uint16_t ext1, ext2;
2498     TCGv addr1, addr2, regs;
2499 
2500     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2501 
2502     ext1 = read_im16(env, s);
2503 
2504     if (ext1 & 0x8000) {
2505         /* Address Register */
2506         addr1 = AREG(ext1, 12);
2507     } else {
2508         /* Data Register */
2509         addr1 = DREG(ext1, 12);
2510     }
2511 
2512     ext2 = read_im16(env, s);
2513     if (ext2 & 0x8000) {
2514         /* Address Register */
2515         addr2 = AREG(ext2, 12);
2516     } else {
2517         /* Data Register */
2518         addr2 = DREG(ext2, 12);
2519     }
2520 
2521     /* if (R1) == Dc1 && (R2) == Dc2 then
2522      *     (R1) = Du1
2523      *     (R2) = Du2
2524      * else
2525      *     Dc1 = (R1)
2526      *     Dc2 = (R2)
2527      */
2528 
2529     regs = tcg_const_i32(REG(ext2, 6) |
2530                          (REG(ext1, 6) << 3) |
2531                          (REG(ext2, 0) << 6) |
2532                          (REG(ext1, 0) << 9));
2533     if (tb_cflags(s->tb) & CF_PARALLEL) {
2534         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2535     } else {
2536         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2537     }
2538     tcg_temp_free(regs);
2539 
2540     /* Note that cas2l also assigned to env->cc_op.  */
2541     s->cc_op = CC_OP_CMPL;
2542     s->cc_op_synced = 1;
2543 }
2544 
2545 DISAS_INSN(byterev)
2546 {
2547     TCGv reg;
2548 
2549     reg = DREG(insn, 0);
2550     tcg_gen_bswap32_i32(reg, reg);
2551 }
2552 
2553 DISAS_INSN(move)
2554 {
2555     TCGv src;
2556     TCGv dest;
2557     int op;
2558     int opsize;
2559 
2560     switch (insn >> 12) {
2561     case 1: /* move.b */
2562         opsize = OS_BYTE;
2563         break;
2564     case 2: /* move.l */
2565         opsize = OS_LONG;
2566         break;
2567     case 3: /* move.w */
2568         opsize = OS_WORD;
2569         break;
2570     default:
2571         abort();
2572     }
2573     SRC_EA(env, src, opsize, 1, NULL);
2574     op = (insn >> 6) & 7;
2575     if (op == 1) {
2576         /* movea */
2577         /* The value will already have been sign extended.  */
2578         dest = AREG(insn, 9);
2579         tcg_gen_mov_i32(dest, src);
2580     } else {
2581         /* normal move */
2582         uint16_t dest_ea;
2583         dest_ea = ((insn >> 9) & 7) | (op << 3);
2584         DEST_EA(env, dest_ea, opsize, src, NULL);
2585         /* This will be correct because loads sign extend.  */
2586         gen_logic_cc(s, src, opsize);
2587     }
2588 }
2589 
2590 DISAS_INSN(negx)
2591 {
2592     TCGv z;
2593     TCGv src;
2594     TCGv addr;
2595     int opsize;
2596 
2597     opsize = insn_opsize(insn);
2598     SRC_EA(env, src, opsize, 1, &addr);
2599 
2600     gen_flush_flags(s); /* compute old Z */
2601 
2602     /* Perform substract with borrow.
2603      * (X, N) =  -(src + X);
2604      */
2605 
2606     z = tcg_const_i32(0);
2607     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2608     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2609     tcg_temp_free(z);
2610     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2611 
2612     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2613 
2614     /* Compute signed-overflow for negation.  The normal formula for
2615      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2616      * this simplies to res & src.
2617      */
2618 
2619     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2620 
2621     /* Copy the rest of the results into place.  */
2622     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2623     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2624 
2625     set_cc_op(s, CC_OP_FLAGS);
2626 
2627     /* result is in QREG_CC_N */
2628 
2629     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2630 }
2631 
2632 DISAS_INSN(lea)
2633 {
2634     TCGv reg;
2635     TCGv tmp;
2636 
2637     reg = AREG(insn, 9);
2638     tmp = gen_lea(env, s, insn, OS_LONG);
2639     if (IS_NULL_QREG(tmp)) {
2640         gen_addr_fault(s);
2641         return;
2642     }
2643     tcg_gen_mov_i32(reg, tmp);
2644 }
2645 
2646 DISAS_INSN(clr)
2647 {
2648     int opsize;
2649     TCGv zero;
2650 
2651     zero = tcg_const_i32(0);
2652 
2653     opsize = insn_opsize(insn);
2654     DEST_EA(env, insn, opsize, zero, NULL);
2655     gen_logic_cc(s, zero, opsize);
2656     tcg_temp_free(zero);
2657 }
2658 
2659 DISAS_INSN(move_from_ccr)
2660 {
2661     TCGv ccr;
2662 
2663     ccr = gen_get_ccr(s);
2664     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2665 }
2666 
2667 DISAS_INSN(neg)
2668 {
2669     TCGv src1;
2670     TCGv dest;
2671     TCGv addr;
2672     int opsize;
2673 
2674     opsize = insn_opsize(insn);
2675     SRC_EA(env, src1, opsize, 1, &addr);
2676     dest = tcg_temp_new();
2677     tcg_gen_neg_i32(dest, src1);
2678     set_cc_op(s, CC_OP_SUBB + opsize);
2679     gen_update_cc_add(dest, src1, opsize);
2680     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2681     DEST_EA(env, insn, opsize, dest, &addr);
2682     tcg_temp_free(dest);
2683 }
2684 
2685 DISAS_INSN(move_to_ccr)
2686 {
2687     gen_move_to_sr(env, s, insn, true);
2688 }
2689 
2690 DISAS_INSN(not)
2691 {
2692     TCGv src1;
2693     TCGv dest;
2694     TCGv addr;
2695     int opsize;
2696 
2697     opsize = insn_opsize(insn);
2698     SRC_EA(env, src1, opsize, 1, &addr);
2699     dest = tcg_temp_new();
2700     tcg_gen_not_i32(dest, src1);
2701     DEST_EA(env, insn, opsize, dest, &addr);
2702     gen_logic_cc(s, dest, opsize);
2703 }
2704 
2705 DISAS_INSN(swap)
2706 {
2707     TCGv src1;
2708     TCGv src2;
2709     TCGv reg;
2710 
2711     src1 = tcg_temp_new();
2712     src2 = tcg_temp_new();
2713     reg = DREG(insn, 0);
2714     tcg_gen_shli_i32(src1, reg, 16);
2715     tcg_gen_shri_i32(src2, reg, 16);
2716     tcg_gen_or_i32(reg, src1, src2);
2717     tcg_temp_free(src2);
2718     tcg_temp_free(src1);
2719     gen_logic_cc(s, reg, OS_LONG);
2720 }
2721 
2722 DISAS_INSN(bkpt)
2723 {
2724     gen_exception(s, s->insn_pc, EXCP_DEBUG);
2725 }
2726 
2727 DISAS_INSN(pea)
2728 {
2729     TCGv tmp;
2730 
2731     tmp = gen_lea(env, s, insn, OS_LONG);
2732     if (IS_NULL_QREG(tmp)) {
2733         gen_addr_fault(s);
2734         return;
2735     }
2736     gen_push(s, tmp);
2737 }
2738 
2739 DISAS_INSN(ext)
2740 {
2741     int op;
2742     TCGv reg;
2743     TCGv tmp;
2744 
2745     reg = DREG(insn, 0);
2746     op = (insn >> 6) & 7;
2747     tmp = tcg_temp_new();
2748     if (op == 3)
2749         tcg_gen_ext16s_i32(tmp, reg);
2750     else
2751         tcg_gen_ext8s_i32(tmp, reg);
2752     if (op == 2)
2753         gen_partset_reg(OS_WORD, reg, tmp);
2754     else
2755         tcg_gen_mov_i32(reg, tmp);
2756     gen_logic_cc(s, tmp, OS_LONG);
2757     tcg_temp_free(tmp);
2758 }
2759 
2760 DISAS_INSN(tst)
2761 {
2762     int opsize;
2763     TCGv tmp;
2764 
2765     opsize = insn_opsize(insn);
2766     SRC_EA(env, tmp, opsize, 1, NULL);
2767     gen_logic_cc(s, tmp, opsize);
2768 }
2769 
2770 DISAS_INSN(pulse)
2771 {
2772   /* Implemented as a NOP.  */
2773 }
2774 
2775 DISAS_INSN(illegal)
2776 {
2777     gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
2778 }
2779 
2780 /* ??? This should be atomic.  */
2781 DISAS_INSN(tas)
2782 {
2783     TCGv dest;
2784     TCGv src1;
2785     TCGv addr;
2786 
2787     dest = tcg_temp_new();
2788     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2789     gen_logic_cc(s, src1, OS_BYTE);
2790     tcg_gen_ori_i32(dest, src1, 0x80);
2791     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2792     tcg_temp_free(dest);
2793 }
2794 
2795 DISAS_INSN(mull)
2796 {
2797     uint16_t ext;
2798     TCGv src1;
2799     int sign;
2800 
2801     ext = read_im16(env, s);
2802 
2803     sign = ext & 0x800;
2804 
2805     if (ext & 0x400) {
2806         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2807             gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
2808             return;
2809         }
2810 
2811         SRC_EA(env, src1, OS_LONG, 0, NULL);
2812 
2813         if (sign) {
2814             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2815         } else {
2816             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2817         }
2818         /* if Dl == Dh, 68040 returns low word */
2819         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2820         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2821         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2822 
2823         tcg_gen_movi_i32(QREG_CC_V, 0);
2824         tcg_gen_movi_i32(QREG_CC_C, 0);
2825 
2826         set_cc_op(s, CC_OP_FLAGS);
2827         return;
2828     }
2829     SRC_EA(env, src1, OS_LONG, 0, NULL);
2830     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2831         tcg_gen_movi_i32(QREG_CC_C, 0);
2832         if (sign) {
2833             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2834             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2835             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2836             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2837         } else {
2838             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2839             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2840             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2841         }
2842         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2843         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2844 
2845         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2846 
2847         set_cc_op(s, CC_OP_FLAGS);
2848     } else {
2849         /* The upper 32 bits of the product are discarded, so
2850            muls.l and mulu.l are functionally equivalent.  */
2851         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2852         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2853     }
2854 }
2855 
2856 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2857 {
2858     TCGv reg;
2859     TCGv tmp;
2860 
2861     reg = AREG(insn, 0);
2862     tmp = tcg_temp_new();
2863     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2864     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2865     if ((insn & 7) != 7) {
2866         tcg_gen_mov_i32(reg, tmp);
2867     }
2868     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2869     tcg_temp_free(tmp);
2870 }
2871 
2872 DISAS_INSN(link)
2873 {
2874     int16_t offset;
2875 
2876     offset = read_im16(env, s);
2877     gen_link(s, insn, offset);
2878 }
2879 
2880 DISAS_INSN(linkl)
2881 {
2882     int32_t offset;
2883 
2884     offset = read_im32(env, s);
2885     gen_link(s, insn, offset);
2886 }
2887 
2888 DISAS_INSN(unlk)
2889 {
2890     TCGv src;
2891     TCGv reg;
2892     TCGv tmp;
2893 
2894     src = tcg_temp_new();
2895     reg = AREG(insn, 0);
2896     tcg_gen_mov_i32(src, reg);
2897     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2898     tcg_gen_mov_i32(reg, tmp);
2899     tcg_gen_addi_i32(QREG_SP, src, 4);
2900     tcg_temp_free(src);
2901     tcg_temp_free(tmp);
2902 }
2903 
2904 #if defined(CONFIG_SOFTMMU)
2905 DISAS_INSN(reset)
2906 {
2907     if (IS_USER(s)) {
2908         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2909         return;
2910     }
2911 
2912     gen_helper_reset(cpu_env);
2913 }
2914 #endif
2915 
2916 DISAS_INSN(nop)
2917 {
2918 }
2919 
2920 DISAS_INSN(rtd)
2921 {
2922     TCGv tmp;
2923     int16_t offset = read_im16(env, s);
2924 
2925     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2926     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2927     gen_jmp(s, tmp);
2928 }
2929 
2930 DISAS_INSN(rts)
2931 {
2932     TCGv tmp;
2933 
2934     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2935     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2936     gen_jmp(s, tmp);
2937 }
2938 
2939 DISAS_INSN(jump)
2940 {
2941     TCGv tmp;
2942 
2943     /* Load the target address first to ensure correct exception
2944        behavior.  */
2945     tmp = gen_lea(env, s, insn, OS_LONG);
2946     if (IS_NULL_QREG(tmp)) {
2947         gen_addr_fault(s);
2948         return;
2949     }
2950     if ((insn & 0x40) == 0) {
2951         /* jsr */
2952         gen_push(s, tcg_const_i32(s->pc));
2953     }
2954     gen_jmp(s, tmp);
2955 }
2956 
2957 DISAS_INSN(addsubq)
2958 {
2959     TCGv src;
2960     TCGv dest;
2961     TCGv val;
2962     int imm;
2963     TCGv addr;
2964     int opsize;
2965 
2966     if ((insn & 070) == 010) {
2967         /* Operation on address register is always long.  */
2968         opsize = OS_LONG;
2969     } else {
2970         opsize = insn_opsize(insn);
2971     }
2972     SRC_EA(env, src, opsize, 1, &addr);
2973     imm = (insn >> 9) & 7;
2974     if (imm == 0) {
2975         imm = 8;
2976     }
2977     val = tcg_const_i32(imm);
2978     dest = tcg_temp_new();
2979     tcg_gen_mov_i32(dest, src);
2980     if ((insn & 0x38) == 0x08) {
2981         /* Don't update condition codes if the destination is an
2982            address register.  */
2983         if (insn & 0x0100) {
2984             tcg_gen_sub_i32(dest, dest, val);
2985         } else {
2986             tcg_gen_add_i32(dest, dest, val);
2987         }
2988     } else {
2989         if (insn & 0x0100) {
2990             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2991             tcg_gen_sub_i32(dest, dest, val);
2992             set_cc_op(s, CC_OP_SUBB + opsize);
2993         } else {
2994             tcg_gen_add_i32(dest, dest, val);
2995             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2996             set_cc_op(s, CC_OP_ADDB + opsize);
2997         }
2998         gen_update_cc_add(dest, val, opsize);
2999     }
3000     tcg_temp_free(val);
3001     DEST_EA(env, insn, opsize, dest, &addr);
3002     tcg_temp_free(dest);
3003 }
3004 
3005 DISAS_INSN(tpf)
3006 {
3007     switch (insn & 7) {
3008     case 2: /* One extension word.  */
3009         s->pc += 2;
3010         break;
3011     case 3: /* Two extension words.  */
3012         s->pc += 4;
3013         break;
3014     case 4: /* No extension words.  */
3015         break;
3016     default:
3017         disas_undef(env, s, insn);
3018     }
3019 }
3020 
3021 DISAS_INSN(branch)
3022 {
3023     int32_t offset;
3024     uint32_t base;
3025     int op;
3026     TCGLabel *l1;
3027 
3028     base = s->pc;
3029     op = (insn >> 8) & 0xf;
3030     offset = (int8_t)insn;
3031     if (offset == 0) {
3032         offset = (int16_t)read_im16(env, s);
3033     } else if (offset == -1) {
3034         offset = read_im32(env, s);
3035     }
3036     if (op == 1) {
3037         /* bsr */
3038         gen_push(s, tcg_const_i32(s->pc));
3039     }
3040     if (op > 1) {
3041         /* Bcc */
3042         l1 = gen_new_label();
3043         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3044         gen_jmp_tb(s, 1, base + offset);
3045         gen_set_label(l1);
3046         gen_jmp_tb(s, 0, s->pc);
3047     } else {
3048         /* Unconditional branch.  */
3049         update_cc_op(s);
3050         gen_jmp_tb(s, 0, base + offset);
3051     }
3052 }
3053 
3054 DISAS_INSN(moveq)
3055 {
3056     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3057     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3058 }
3059 
3060 DISAS_INSN(mvzs)
3061 {
3062     int opsize;
3063     TCGv src;
3064     TCGv reg;
3065 
3066     if (insn & 0x40)
3067         opsize = OS_WORD;
3068     else
3069         opsize = OS_BYTE;
3070     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3071     reg = DREG(insn, 9);
3072     tcg_gen_mov_i32(reg, src);
3073     gen_logic_cc(s, src, opsize);
3074 }
3075 
3076 DISAS_INSN(or)
3077 {
3078     TCGv reg;
3079     TCGv dest;
3080     TCGv src;
3081     TCGv addr;
3082     int opsize;
3083 
3084     opsize = insn_opsize(insn);
3085     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3086     dest = tcg_temp_new();
3087     if (insn & 0x100) {
3088         SRC_EA(env, src, opsize, 0, &addr);
3089         tcg_gen_or_i32(dest, src, reg);
3090         DEST_EA(env, insn, opsize, dest, &addr);
3091     } else {
3092         SRC_EA(env, src, opsize, 0, NULL);
3093         tcg_gen_or_i32(dest, src, reg);
3094         gen_partset_reg(opsize, DREG(insn, 9), dest);
3095     }
3096     gen_logic_cc(s, dest, opsize);
3097     tcg_temp_free(dest);
3098 }
3099 
3100 DISAS_INSN(suba)
3101 {
3102     TCGv src;
3103     TCGv reg;
3104 
3105     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3106     reg = AREG(insn, 9);
3107     tcg_gen_sub_i32(reg, reg, src);
3108 }
3109 
3110 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3111 {
3112     TCGv tmp;
3113 
3114     gen_flush_flags(s); /* compute old Z */
3115 
3116     /* Perform substract with borrow.
3117      * (X, N) = dest - (src + X);
3118      */
3119 
3120     tmp = tcg_const_i32(0);
3121     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3122     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3123     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3124     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3125 
3126     /* Compute signed-overflow for substract.  */
3127 
3128     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3129     tcg_gen_xor_i32(tmp, dest, src);
3130     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3131     tcg_temp_free(tmp);
3132 
3133     /* Copy the rest of the results into place.  */
3134     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3135     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3136 
3137     set_cc_op(s, CC_OP_FLAGS);
3138 
3139     /* result is in QREG_CC_N */
3140 }
3141 
3142 DISAS_INSN(subx_reg)
3143 {
3144     TCGv dest;
3145     TCGv src;
3146     int opsize;
3147 
3148     opsize = insn_opsize(insn);
3149 
3150     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3151     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3152 
3153     gen_subx(s, src, dest, opsize);
3154 
3155     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3156 }
3157 
3158 DISAS_INSN(subx_mem)
3159 {
3160     TCGv src;
3161     TCGv addr_src;
3162     TCGv dest;
3163     TCGv addr_dest;
3164     int opsize;
3165 
3166     opsize = insn_opsize(insn);
3167 
3168     addr_src = AREG(insn, 0);
3169     tcg_gen_subi_i32(addr_src, addr_src, opsize);
3170     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3171 
3172     addr_dest = AREG(insn, 9);
3173     tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3174     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3175 
3176     gen_subx(s, src, dest, opsize);
3177 
3178     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3179 
3180     tcg_temp_free(dest);
3181     tcg_temp_free(src);
3182 }
3183 
3184 DISAS_INSN(mov3q)
3185 {
3186     TCGv src;
3187     int val;
3188 
3189     val = (insn >> 9) & 7;
3190     if (val == 0)
3191         val = -1;
3192     src = tcg_const_i32(val);
3193     gen_logic_cc(s, src, OS_LONG);
3194     DEST_EA(env, insn, OS_LONG, src, NULL);
3195     tcg_temp_free(src);
3196 }
3197 
3198 DISAS_INSN(cmp)
3199 {
3200     TCGv src;
3201     TCGv reg;
3202     int opsize;
3203 
3204     opsize = insn_opsize(insn);
3205     SRC_EA(env, src, opsize, 1, NULL);
3206     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3207     gen_update_cc_cmp(s, reg, src, opsize);
3208 }
3209 
3210 DISAS_INSN(cmpa)
3211 {
3212     int opsize;
3213     TCGv src;
3214     TCGv reg;
3215 
3216     if (insn & 0x100) {
3217         opsize = OS_LONG;
3218     } else {
3219         opsize = OS_WORD;
3220     }
3221     SRC_EA(env, src, opsize, 1, NULL);
3222     reg = AREG(insn, 9);
3223     gen_update_cc_cmp(s, reg, src, OS_LONG);
3224 }
3225 
3226 DISAS_INSN(cmpm)
3227 {
3228     int opsize = insn_opsize(insn);
3229     TCGv src, dst;
3230 
3231     /* Post-increment load (mode 3) from Ay.  */
3232     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3233                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3234     /* Post-increment load (mode 3) from Ax.  */
3235     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3236                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3237 
3238     gen_update_cc_cmp(s, dst, src, opsize);
3239 }
3240 
3241 DISAS_INSN(eor)
3242 {
3243     TCGv src;
3244     TCGv dest;
3245     TCGv addr;
3246     int opsize;
3247 
3248     opsize = insn_opsize(insn);
3249 
3250     SRC_EA(env, src, opsize, 0, &addr);
3251     dest = tcg_temp_new();
3252     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3253     gen_logic_cc(s, dest, opsize);
3254     DEST_EA(env, insn, opsize, dest, &addr);
3255     tcg_temp_free(dest);
3256 }
3257 
3258 static void do_exg(TCGv reg1, TCGv reg2)
3259 {
3260     TCGv temp = tcg_temp_new();
3261     tcg_gen_mov_i32(temp, reg1);
3262     tcg_gen_mov_i32(reg1, reg2);
3263     tcg_gen_mov_i32(reg2, temp);
3264     tcg_temp_free(temp);
3265 }
3266 
3267 DISAS_INSN(exg_dd)
3268 {
3269     /* exchange Dx and Dy */
3270     do_exg(DREG(insn, 9), DREG(insn, 0));
3271 }
3272 
3273 DISAS_INSN(exg_aa)
3274 {
3275     /* exchange Ax and Ay */
3276     do_exg(AREG(insn, 9), AREG(insn, 0));
3277 }
3278 
3279 DISAS_INSN(exg_da)
3280 {
3281     /* exchange Dx and Ay */
3282     do_exg(DREG(insn, 9), AREG(insn, 0));
3283 }
3284 
3285 DISAS_INSN(and)
3286 {
3287     TCGv src;
3288     TCGv reg;
3289     TCGv dest;
3290     TCGv addr;
3291     int opsize;
3292 
3293     dest = tcg_temp_new();
3294 
3295     opsize = insn_opsize(insn);
3296     reg = DREG(insn, 9);
3297     if (insn & 0x100) {
3298         SRC_EA(env, src, opsize, 0, &addr);
3299         tcg_gen_and_i32(dest, src, reg);
3300         DEST_EA(env, insn, opsize, dest, &addr);
3301     } else {
3302         SRC_EA(env, src, opsize, 0, NULL);
3303         tcg_gen_and_i32(dest, src, reg);
3304         gen_partset_reg(opsize, reg, dest);
3305     }
3306     gen_logic_cc(s, dest, opsize);
3307     tcg_temp_free(dest);
3308 }
3309 
3310 DISAS_INSN(adda)
3311 {
3312     TCGv src;
3313     TCGv reg;
3314 
3315     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3316     reg = AREG(insn, 9);
3317     tcg_gen_add_i32(reg, reg, src);
3318 }
3319 
3320 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3321 {
3322     TCGv tmp;
3323 
3324     gen_flush_flags(s); /* compute old Z */
3325 
3326     /* Perform addition with carry.
3327      * (X, N) = src + dest + X;
3328      */
3329 
3330     tmp = tcg_const_i32(0);
3331     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3332     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3333     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3334 
3335     /* Compute signed-overflow for addition.  */
3336 
3337     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3338     tcg_gen_xor_i32(tmp, dest, src);
3339     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3340     tcg_temp_free(tmp);
3341 
3342     /* Copy the rest of the results into place.  */
3343     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3344     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3345 
3346     set_cc_op(s, CC_OP_FLAGS);
3347 
3348     /* result is in QREG_CC_N */
3349 }
3350 
3351 DISAS_INSN(addx_reg)
3352 {
3353     TCGv dest;
3354     TCGv src;
3355     int opsize;
3356 
3357     opsize = insn_opsize(insn);
3358 
3359     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3360     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3361 
3362     gen_addx(s, src, dest, opsize);
3363 
3364     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3365 }
3366 
3367 DISAS_INSN(addx_mem)
3368 {
3369     TCGv src;
3370     TCGv addr_src;
3371     TCGv dest;
3372     TCGv addr_dest;
3373     int opsize;
3374 
3375     opsize = insn_opsize(insn);
3376 
3377     addr_src = AREG(insn, 0);
3378     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3379     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3380 
3381     addr_dest = AREG(insn, 9);
3382     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3383     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3384 
3385     gen_addx(s, src, dest, opsize);
3386 
3387     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3388 
3389     tcg_temp_free(dest);
3390     tcg_temp_free(src);
3391 }
3392 
3393 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3394 {
3395     int count = (insn >> 9) & 7;
3396     int logical = insn & 8;
3397     int left = insn & 0x100;
3398     int bits = opsize_bytes(opsize) * 8;
3399     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3400 
3401     if (count == 0) {
3402         count = 8;
3403     }
3404 
3405     tcg_gen_movi_i32(QREG_CC_V, 0);
3406     if (left) {
3407         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3408         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3409 
3410         /* Note that ColdFire always clears V (done above),
3411            while M68000 sets if the most significant bit is changed at
3412            any time during the shift operation */
3413         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3414             /* if shift count >= bits, V is (reg != 0) */
3415             if (count >= bits) {
3416                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3417             } else {
3418                 TCGv t0 = tcg_temp_new();
3419                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3420                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3421                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3422                 tcg_temp_free(t0);
3423             }
3424             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3425         }
3426     } else {
3427         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3428         if (logical) {
3429             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3430         } else {
3431             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3432         }
3433     }
3434 
3435     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3436     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3437     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3438     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3439 
3440     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3441     set_cc_op(s, CC_OP_FLAGS);
3442 }
3443 
3444 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3445 {
3446     int logical = insn & 8;
3447     int left = insn & 0x100;
3448     int bits = opsize_bytes(opsize) * 8;
3449     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3450     TCGv s32;
3451     TCGv_i64 t64, s64;
3452 
3453     t64 = tcg_temp_new_i64();
3454     s64 = tcg_temp_new_i64();
3455     s32 = tcg_temp_new();
3456 
3457     /* Note that m68k truncates the shift count modulo 64, not 32.
3458        In addition, a 64-bit shift makes it easy to find "the last
3459        bit shifted out", for the carry flag.  */
3460     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3461     tcg_gen_extu_i32_i64(s64, s32);
3462     tcg_gen_extu_i32_i64(t64, reg);
3463 
3464     /* Optimistically set V=0.  Also used as a zero source below.  */
3465     tcg_gen_movi_i32(QREG_CC_V, 0);
3466     if (left) {
3467         tcg_gen_shl_i64(t64, t64, s64);
3468 
3469         if (opsize == OS_LONG) {
3470             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3471             /* Note that C=0 if shift count is 0, and we get that for free.  */
3472         } else {
3473             TCGv zero = tcg_const_i32(0);
3474             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3475             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3476             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3477                                 s32, zero, zero, QREG_CC_C);
3478             tcg_temp_free(zero);
3479         }
3480         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3481 
3482         /* X = C, but only if the shift count was non-zero.  */
3483         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3484                             QREG_CC_C, QREG_CC_X);
3485 
3486         /* M68000 sets V if the most significant bit is changed at
3487          * any time during the shift operation.  Do this via creating
3488          * an extension of the sign bit, comparing, and discarding
3489          * the bits below the sign bit.  I.e.
3490          *     int64_t s = (intN_t)reg;
3491          *     int64_t t = (int64_t)(intN_t)reg << count;
3492          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3493          */
3494         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3495             TCGv_i64 tt = tcg_const_i64(32);
3496             /* if shift is greater than 32, use 32 */
3497             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3498             tcg_temp_free_i64(tt);
3499             /* Sign extend the input to 64 bits; re-do the shift.  */
3500             tcg_gen_ext_i32_i64(t64, reg);
3501             tcg_gen_shl_i64(s64, t64, s64);
3502             /* Clear all bits that are unchanged.  */
3503             tcg_gen_xor_i64(t64, t64, s64);
3504             /* Ignore the bits below the sign bit.  */
3505             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3506             /* If any bits remain set, we have overflow.  */
3507             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3508             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3509             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3510         }
3511     } else {
3512         tcg_gen_shli_i64(t64, t64, 32);
3513         if (logical) {
3514             tcg_gen_shr_i64(t64, t64, s64);
3515         } else {
3516             tcg_gen_sar_i64(t64, t64, s64);
3517         }
3518         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3519 
3520         /* Note that C=0 if shift count is 0, and we get that for free.  */
3521         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3522 
3523         /* X = C, but only if the shift count was non-zero.  */
3524         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3525                             QREG_CC_C, QREG_CC_X);
3526     }
3527     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3528     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3529 
3530     tcg_temp_free(s32);
3531     tcg_temp_free_i64(s64);
3532     tcg_temp_free_i64(t64);
3533 
3534     /* Write back the result.  */
3535     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3536     set_cc_op(s, CC_OP_FLAGS);
3537 }
3538 
3539 DISAS_INSN(shift8_im)
3540 {
3541     shift_im(s, insn, OS_BYTE);
3542 }
3543 
3544 DISAS_INSN(shift16_im)
3545 {
3546     shift_im(s, insn, OS_WORD);
3547 }
3548 
3549 DISAS_INSN(shift_im)
3550 {
3551     shift_im(s, insn, OS_LONG);
3552 }
3553 
3554 DISAS_INSN(shift8_reg)
3555 {
3556     shift_reg(s, insn, OS_BYTE);
3557 }
3558 
3559 DISAS_INSN(shift16_reg)
3560 {
3561     shift_reg(s, insn, OS_WORD);
3562 }
3563 
3564 DISAS_INSN(shift_reg)
3565 {
3566     shift_reg(s, insn, OS_LONG);
3567 }
3568 
3569 DISAS_INSN(shift_mem)
3570 {
3571     int logical = insn & 8;
3572     int left = insn & 0x100;
3573     TCGv src;
3574     TCGv addr;
3575 
3576     SRC_EA(env, src, OS_WORD, !logical, &addr);
3577     tcg_gen_movi_i32(QREG_CC_V, 0);
3578     if (left) {
3579         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3580         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3581 
3582         /* Note that ColdFire always clears V,
3583            while M68000 sets if the most significant bit is changed at
3584            any time during the shift operation */
3585         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3586             src = gen_extend(s, src, OS_WORD, 1);
3587             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3588         }
3589     } else {
3590         tcg_gen_mov_i32(QREG_CC_C, src);
3591         if (logical) {
3592             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3593         } else {
3594             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3595         }
3596     }
3597 
3598     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3599     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3600     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3601     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3602 
3603     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3604     set_cc_op(s, CC_OP_FLAGS);
3605 }
3606 
3607 static void rotate(TCGv reg, TCGv shift, int left, int size)
3608 {
3609     switch (size) {
3610     case 8:
3611         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3612         tcg_gen_ext8u_i32(reg, reg);
3613         tcg_gen_muli_i32(reg, reg, 0x01010101);
3614         goto do_long;
3615     case 16:
3616         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3617         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3618         goto do_long;
3619     do_long:
3620     default:
3621         if (left) {
3622             tcg_gen_rotl_i32(reg, reg, shift);
3623         } else {
3624             tcg_gen_rotr_i32(reg, reg, shift);
3625         }
3626     }
3627 
3628     /* compute flags */
3629 
3630     switch (size) {
3631     case 8:
3632         tcg_gen_ext8s_i32(reg, reg);
3633         break;
3634     case 16:
3635         tcg_gen_ext16s_i32(reg, reg);
3636         break;
3637     default:
3638         break;
3639     }
3640 
3641     /* QREG_CC_X is not affected */
3642 
3643     tcg_gen_mov_i32(QREG_CC_N, reg);
3644     tcg_gen_mov_i32(QREG_CC_Z, reg);
3645 
3646     if (left) {
3647         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3648     } else {
3649         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3650     }
3651 
3652     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3653 }
3654 
3655 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3656 {
3657     switch (size) {
3658     case 8:
3659         tcg_gen_ext8s_i32(reg, reg);
3660         break;
3661     case 16:
3662         tcg_gen_ext16s_i32(reg, reg);
3663         break;
3664     default:
3665         break;
3666     }
3667     tcg_gen_mov_i32(QREG_CC_N, reg);
3668     tcg_gen_mov_i32(QREG_CC_Z, reg);
3669     tcg_gen_mov_i32(QREG_CC_X, X);
3670     tcg_gen_mov_i32(QREG_CC_C, X);
3671     tcg_gen_movi_i32(QREG_CC_V, 0);
3672 }
3673 
3674 /* Result of rotate_x() is valid if 0 <= shift <= size */
3675 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3676 {
3677     TCGv X, shl, shr, shx, sz, zero;
3678 
3679     sz = tcg_const_i32(size);
3680 
3681     shr = tcg_temp_new();
3682     shl = tcg_temp_new();
3683     shx = tcg_temp_new();
3684     if (left) {
3685         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3686         tcg_gen_movi_i32(shr, size + 1);
3687         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3688         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3689         /* shx = shx < 0 ? size : shx; */
3690         zero = tcg_const_i32(0);
3691         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3692         tcg_temp_free(zero);
3693     } else {
3694         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3695         tcg_gen_movi_i32(shl, size + 1);
3696         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3697         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3698     }
3699 
3700     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3701 
3702     tcg_gen_shl_i32(shl, reg, shl);
3703     tcg_gen_shr_i32(shr, reg, shr);
3704     tcg_gen_or_i32(reg, shl, shr);
3705     tcg_temp_free(shl);
3706     tcg_temp_free(shr);
3707     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3708     tcg_gen_or_i32(reg, reg, shx);
3709     tcg_temp_free(shx);
3710 
3711     /* X = (reg >> size) & 1 */
3712 
3713     X = tcg_temp_new();
3714     tcg_gen_shr_i32(X, reg, sz);
3715     tcg_gen_andi_i32(X, X, 1);
3716     tcg_temp_free(sz);
3717 
3718     return X;
3719 }
3720 
3721 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3722 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3723 {
3724     TCGv_i64 t0, shift64;
3725     TCGv X, lo, hi, zero;
3726 
3727     shift64 = tcg_temp_new_i64();
3728     tcg_gen_extu_i32_i64(shift64, shift);
3729 
3730     t0 = tcg_temp_new_i64();
3731 
3732     X = tcg_temp_new();
3733     lo = tcg_temp_new();
3734     hi = tcg_temp_new();
3735 
3736     if (left) {
3737         /* create [reg:X:..] */
3738 
3739         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3740         tcg_gen_concat_i32_i64(t0, lo, reg);
3741 
3742         /* rotate */
3743 
3744         tcg_gen_rotl_i64(t0, t0, shift64);
3745         tcg_temp_free_i64(shift64);
3746 
3747         /* result is [reg:..:reg:X] */
3748 
3749         tcg_gen_extr_i64_i32(lo, hi, t0);
3750         tcg_gen_andi_i32(X, lo, 1);
3751 
3752         tcg_gen_shri_i32(lo, lo, 1);
3753     } else {
3754         /* create [..:X:reg] */
3755 
3756         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3757 
3758         tcg_gen_rotr_i64(t0, t0, shift64);
3759         tcg_temp_free_i64(shift64);
3760 
3761         /* result is value: [X:reg:..:reg] */
3762 
3763         tcg_gen_extr_i64_i32(lo, hi, t0);
3764 
3765         /* extract X */
3766 
3767         tcg_gen_shri_i32(X, hi, 31);
3768 
3769         /* extract result */
3770 
3771         tcg_gen_shli_i32(hi, hi, 1);
3772     }
3773     tcg_temp_free_i64(t0);
3774     tcg_gen_or_i32(lo, lo, hi);
3775     tcg_temp_free(hi);
3776 
3777     /* if shift == 0, register and X are not affected */
3778 
3779     zero = tcg_const_i32(0);
3780     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3781     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3782     tcg_temp_free(zero);
3783     tcg_temp_free(lo);
3784 
3785     return X;
3786 }
3787 
3788 DISAS_INSN(rotate_im)
3789 {
3790     TCGv shift;
3791     int tmp;
3792     int left = (insn & 0x100);
3793 
3794     tmp = (insn >> 9) & 7;
3795     if (tmp == 0) {
3796         tmp = 8;
3797     }
3798 
3799     shift = tcg_const_i32(tmp);
3800     if (insn & 8) {
3801         rotate(DREG(insn, 0), shift, left, 32);
3802     } else {
3803         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3804         rotate_x_flags(DREG(insn, 0), X, 32);
3805         tcg_temp_free(X);
3806     }
3807     tcg_temp_free(shift);
3808 
3809     set_cc_op(s, CC_OP_FLAGS);
3810 }
3811 
3812 DISAS_INSN(rotate8_im)
3813 {
3814     int left = (insn & 0x100);
3815     TCGv reg;
3816     TCGv shift;
3817     int tmp;
3818 
3819     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3820 
3821     tmp = (insn >> 9) & 7;
3822     if (tmp == 0) {
3823         tmp = 8;
3824     }
3825 
3826     shift = tcg_const_i32(tmp);
3827     if (insn & 8) {
3828         rotate(reg, shift, left, 8);
3829     } else {
3830         TCGv X = rotate_x(reg, shift, left, 8);
3831         rotate_x_flags(reg, X, 8);
3832         tcg_temp_free(X);
3833     }
3834     tcg_temp_free(shift);
3835     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3836     set_cc_op(s, CC_OP_FLAGS);
3837 }
3838 
3839 DISAS_INSN(rotate16_im)
3840 {
3841     int left = (insn & 0x100);
3842     TCGv reg;
3843     TCGv shift;
3844     int tmp;
3845 
3846     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3847     tmp = (insn >> 9) & 7;
3848     if (tmp == 0) {
3849         tmp = 8;
3850     }
3851 
3852     shift = tcg_const_i32(tmp);
3853     if (insn & 8) {
3854         rotate(reg, shift, left, 16);
3855     } else {
3856         TCGv X = rotate_x(reg, shift, left, 16);
3857         rotate_x_flags(reg, X, 16);
3858         tcg_temp_free(X);
3859     }
3860     tcg_temp_free(shift);
3861     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3862     set_cc_op(s, CC_OP_FLAGS);
3863 }
3864 
3865 DISAS_INSN(rotate_reg)
3866 {
3867     TCGv reg;
3868     TCGv src;
3869     TCGv t0, t1;
3870     int left = (insn & 0x100);
3871 
3872     reg = DREG(insn, 0);
3873     src = DREG(insn, 9);
3874     /* shift in [0..63] */
3875     t0 = tcg_temp_new();
3876     tcg_gen_andi_i32(t0, src, 63);
3877     t1 = tcg_temp_new_i32();
3878     if (insn & 8) {
3879         tcg_gen_andi_i32(t1, src, 31);
3880         rotate(reg, t1, left, 32);
3881         /* if shift == 0, clear C */
3882         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3883                             t0, QREG_CC_V /* 0 */,
3884                             QREG_CC_V /* 0 */, QREG_CC_C);
3885     } else {
3886         TCGv X;
3887         /* modulo 33 */
3888         tcg_gen_movi_i32(t1, 33);
3889         tcg_gen_remu_i32(t1, t0, t1);
3890         X = rotate32_x(DREG(insn, 0), t1, left);
3891         rotate_x_flags(DREG(insn, 0), X, 32);
3892         tcg_temp_free(X);
3893     }
3894     tcg_temp_free(t1);
3895     tcg_temp_free(t0);
3896     set_cc_op(s, CC_OP_FLAGS);
3897 }
3898 
3899 DISAS_INSN(rotate8_reg)
3900 {
3901     TCGv reg;
3902     TCGv src;
3903     TCGv t0, t1;
3904     int left = (insn & 0x100);
3905 
3906     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3907     src = DREG(insn, 9);
3908     /* shift in [0..63] */
3909     t0 = tcg_temp_new_i32();
3910     tcg_gen_andi_i32(t0, src, 63);
3911     t1 = tcg_temp_new_i32();
3912     if (insn & 8) {
3913         tcg_gen_andi_i32(t1, src, 7);
3914         rotate(reg, t1, left, 8);
3915         /* if shift == 0, clear C */
3916         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3917                             t0, QREG_CC_V /* 0 */,
3918                             QREG_CC_V /* 0 */, QREG_CC_C);
3919     } else {
3920         TCGv X;
3921         /* modulo 9 */
3922         tcg_gen_movi_i32(t1, 9);
3923         tcg_gen_remu_i32(t1, t0, t1);
3924         X = rotate_x(reg, t1, left, 8);
3925         rotate_x_flags(reg, X, 8);
3926         tcg_temp_free(X);
3927     }
3928     tcg_temp_free(t1);
3929     tcg_temp_free(t0);
3930     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3931     set_cc_op(s, CC_OP_FLAGS);
3932 }
3933 
3934 DISAS_INSN(rotate16_reg)
3935 {
3936     TCGv reg;
3937     TCGv src;
3938     TCGv t0, t1;
3939     int left = (insn & 0x100);
3940 
3941     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3942     src = DREG(insn, 9);
3943     /* shift in [0..63] */
3944     t0 = tcg_temp_new_i32();
3945     tcg_gen_andi_i32(t0, src, 63);
3946     t1 = tcg_temp_new_i32();
3947     if (insn & 8) {
3948         tcg_gen_andi_i32(t1, src, 15);
3949         rotate(reg, t1, left, 16);
3950         /* if shift == 0, clear C */
3951         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3952                             t0, QREG_CC_V /* 0 */,
3953                             QREG_CC_V /* 0 */, QREG_CC_C);
3954     } else {
3955         TCGv X;
3956         /* modulo 17 */
3957         tcg_gen_movi_i32(t1, 17);
3958         tcg_gen_remu_i32(t1, t0, t1);
3959         X = rotate_x(reg, t1, left, 16);
3960         rotate_x_flags(reg, X, 16);
3961         tcg_temp_free(X);
3962     }
3963     tcg_temp_free(t1);
3964     tcg_temp_free(t0);
3965     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3966     set_cc_op(s, CC_OP_FLAGS);
3967 }
3968 
3969 DISAS_INSN(rotate_mem)
3970 {
3971     TCGv src;
3972     TCGv addr;
3973     TCGv shift;
3974     int left = (insn & 0x100);
3975 
3976     SRC_EA(env, src, OS_WORD, 0, &addr);
3977 
3978     shift = tcg_const_i32(1);
3979     if (insn & 0x0200) {
3980         rotate(src, shift, left, 16);
3981     } else {
3982         TCGv X = rotate_x(src, shift, left, 16);
3983         rotate_x_flags(src, X, 16);
3984         tcg_temp_free(X);
3985     }
3986     tcg_temp_free(shift);
3987     DEST_EA(env, insn, OS_WORD, src, &addr);
3988     set_cc_op(s, CC_OP_FLAGS);
3989 }
3990 
3991 DISAS_INSN(bfext_reg)
3992 {
3993     int ext = read_im16(env, s);
3994     int is_sign = insn & 0x200;
3995     TCGv src = DREG(insn, 0);
3996     TCGv dst = DREG(ext, 12);
3997     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3998     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3999     int pos = 32 - ofs - len;        /* little bit-endian */
4000     TCGv tmp = tcg_temp_new();
4001     TCGv shift;
4002 
4003     /* In general, we're going to rotate the field so that it's at the
4004        top of the word and then right-shift by the compliment of the
4005        width to extend the field.  */
4006     if (ext & 0x20) {
4007         /* Variable width.  */
4008         if (ext & 0x800) {
4009             /* Variable offset.  */
4010             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4011             tcg_gen_rotl_i32(tmp, src, tmp);
4012         } else {
4013             tcg_gen_rotli_i32(tmp, src, ofs);
4014         }
4015 
4016         shift = tcg_temp_new();
4017         tcg_gen_neg_i32(shift, DREG(ext, 0));
4018         tcg_gen_andi_i32(shift, shift, 31);
4019         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4020         if (is_sign) {
4021             tcg_gen_mov_i32(dst, QREG_CC_N);
4022         } else {
4023             tcg_gen_shr_i32(dst, tmp, shift);
4024         }
4025         tcg_temp_free(shift);
4026     } else {
4027         /* Immediate width.  */
4028         if (ext & 0x800) {
4029             /* Variable offset */
4030             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4031             tcg_gen_rotl_i32(tmp, src, tmp);
4032             src = tmp;
4033             pos = 32 - len;
4034         } else {
4035             /* Immediate offset.  If the field doesn't wrap around the
4036                end of the word, rely on (s)extract completely.  */
4037             if (pos < 0) {
4038                 tcg_gen_rotli_i32(tmp, src, ofs);
4039                 src = tmp;
4040                 pos = 32 - len;
4041             }
4042         }
4043 
4044         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4045         if (is_sign) {
4046             tcg_gen_mov_i32(dst, QREG_CC_N);
4047         } else {
4048             tcg_gen_extract_i32(dst, src, pos, len);
4049         }
4050     }
4051 
4052     tcg_temp_free(tmp);
4053     set_cc_op(s, CC_OP_LOGIC);
4054 }
4055 
4056 DISAS_INSN(bfext_mem)
4057 {
4058     int ext = read_im16(env, s);
4059     int is_sign = insn & 0x200;
4060     TCGv dest = DREG(ext, 12);
4061     TCGv addr, len, ofs;
4062 
4063     addr = gen_lea(env, s, insn, OS_UNSIZED);
4064     if (IS_NULL_QREG(addr)) {
4065         gen_addr_fault(s);
4066         return;
4067     }
4068 
4069     if (ext & 0x20) {
4070         len = DREG(ext, 0);
4071     } else {
4072         len = tcg_const_i32(extract32(ext, 0, 5));
4073     }
4074     if (ext & 0x800) {
4075         ofs = DREG(ext, 6);
4076     } else {
4077         ofs = tcg_const_i32(extract32(ext, 6, 5));
4078     }
4079 
4080     if (is_sign) {
4081         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4082         tcg_gen_mov_i32(QREG_CC_N, dest);
4083     } else {
4084         TCGv_i64 tmp = tcg_temp_new_i64();
4085         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4086         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4087         tcg_temp_free_i64(tmp);
4088     }
4089     set_cc_op(s, CC_OP_LOGIC);
4090 
4091     if (!(ext & 0x20)) {
4092         tcg_temp_free(len);
4093     }
4094     if (!(ext & 0x800)) {
4095         tcg_temp_free(ofs);
4096     }
4097 }
4098 
4099 DISAS_INSN(bfop_reg)
4100 {
4101     int ext = read_im16(env, s);
4102     TCGv src = DREG(insn, 0);
4103     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4104     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4105     TCGv mask, tofs, tlen;
4106 
4107     tofs = NULL;
4108     tlen = NULL;
4109     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4110         tofs = tcg_temp_new();
4111         tlen = tcg_temp_new();
4112     }
4113 
4114     if ((ext & 0x820) == 0) {
4115         /* Immediate width and offset.  */
4116         uint32_t maski = 0x7fffffffu >> (len - 1);
4117         if (ofs + len <= 32) {
4118             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4119         } else {
4120             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4121         }
4122         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4123         mask = tcg_const_i32(ror32(maski, ofs));
4124         if (tofs) {
4125             tcg_gen_movi_i32(tofs, ofs);
4126             tcg_gen_movi_i32(tlen, len);
4127         }
4128     } else {
4129         TCGv tmp = tcg_temp_new();
4130         if (ext & 0x20) {
4131             /* Variable width */
4132             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4133             tcg_gen_andi_i32(tmp, tmp, 31);
4134             mask = tcg_const_i32(0x7fffffffu);
4135             tcg_gen_shr_i32(mask, mask, tmp);
4136             if (tlen) {
4137                 tcg_gen_addi_i32(tlen, tmp, 1);
4138             }
4139         } else {
4140             /* Immediate width */
4141             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4142             if (tlen) {
4143                 tcg_gen_movi_i32(tlen, len);
4144             }
4145         }
4146         if (ext & 0x800) {
4147             /* Variable offset */
4148             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4149             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4150             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4151             tcg_gen_rotr_i32(mask, mask, tmp);
4152             if (tofs) {
4153                 tcg_gen_mov_i32(tofs, tmp);
4154             }
4155         } else {
4156             /* Immediate offset (and variable width) */
4157             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4158             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4159             tcg_gen_rotri_i32(mask, mask, ofs);
4160             if (tofs) {
4161                 tcg_gen_movi_i32(tofs, ofs);
4162             }
4163         }
4164         tcg_temp_free(tmp);
4165     }
4166     set_cc_op(s, CC_OP_LOGIC);
4167 
4168     switch (insn & 0x0f00) {
4169     case 0x0a00: /* bfchg */
4170         tcg_gen_eqv_i32(src, src, mask);
4171         break;
4172     case 0x0c00: /* bfclr */
4173         tcg_gen_and_i32(src, src, mask);
4174         break;
4175     case 0x0d00: /* bfffo */
4176         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4177         tcg_temp_free(tlen);
4178         tcg_temp_free(tofs);
4179         break;
4180     case 0x0e00: /* bfset */
4181         tcg_gen_orc_i32(src, src, mask);
4182         break;
4183     case 0x0800: /* bftst */
4184         /* flags already set; no other work to do.  */
4185         break;
4186     default:
4187         g_assert_not_reached();
4188     }
4189     tcg_temp_free(mask);
4190 }
4191 
4192 DISAS_INSN(bfop_mem)
4193 {
4194     int ext = read_im16(env, s);
4195     TCGv addr, len, ofs;
4196     TCGv_i64 t64;
4197 
4198     addr = gen_lea(env, s, insn, OS_UNSIZED);
4199     if (IS_NULL_QREG(addr)) {
4200         gen_addr_fault(s);
4201         return;
4202     }
4203 
4204     if (ext & 0x20) {
4205         len = DREG(ext, 0);
4206     } else {
4207         len = tcg_const_i32(extract32(ext, 0, 5));
4208     }
4209     if (ext & 0x800) {
4210         ofs = DREG(ext, 6);
4211     } else {
4212         ofs = tcg_const_i32(extract32(ext, 6, 5));
4213     }
4214 
4215     switch (insn & 0x0f00) {
4216     case 0x0a00: /* bfchg */
4217         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4218         break;
4219     case 0x0c00: /* bfclr */
4220         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4221         break;
4222     case 0x0d00: /* bfffo */
4223         t64 = tcg_temp_new_i64();
4224         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4225         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4226         tcg_temp_free_i64(t64);
4227         break;
4228     case 0x0e00: /* bfset */
4229         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4230         break;
4231     case 0x0800: /* bftst */
4232         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4233         break;
4234     default:
4235         g_assert_not_reached();
4236     }
4237     set_cc_op(s, CC_OP_LOGIC);
4238 
4239     if (!(ext & 0x20)) {
4240         tcg_temp_free(len);
4241     }
4242     if (!(ext & 0x800)) {
4243         tcg_temp_free(ofs);
4244     }
4245 }
4246 
4247 DISAS_INSN(bfins_reg)
4248 {
4249     int ext = read_im16(env, s);
4250     TCGv dst = DREG(insn, 0);
4251     TCGv src = DREG(ext, 12);
4252     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4253     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4254     int pos = 32 - ofs - len;        /* little bit-endian */
4255     TCGv tmp;
4256 
4257     tmp = tcg_temp_new();
4258 
4259     if (ext & 0x20) {
4260         /* Variable width */
4261         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4262         tcg_gen_andi_i32(tmp, tmp, 31);
4263         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4264     } else {
4265         /* Immediate width */
4266         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4267     }
4268     set_cc_op(s, CC_OP_LOGIC);
4269 
4270     /* Immediate width and offset */
4271     if ((ext & 0x820) == 0) {
4272         /* Check for suitability for deposit.  */
4273         if (pos >= 0) {
4274             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4275         } else {
4276             uint32_t maski = -2U << (len - 1);
4277             uint32_t roti = (ofs + len) & 31;
4278             tcg_gen_andi_i32(tmp, src, ~maski);
4279             tcg_gen_rotri_i32(tmp, tmp, roti);
4280             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4281             tcg_gen_or_i32(dst, dst, tmp);
4282         }
4283     } else {
4284         TCGv mask = tcg_temp_new();
4285         TCGv rot = tcg_temp_new();
4286 
4287         if (ext & 0x20) {
4288             /* Variable width */
4289             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4290             tcg_gen_andi_i32(rot, rot, 31);
4291             tcg_gen_movi_i32(mask, -2);
4292             tcg_gen_shl_i32(mask, mask, rot);
4293             tcg_gen_mov_i32(rot, DREG(ext, 0));
4294             tcg_gen_andc_i32(tmp, src, mask);
4295         } else {
4296             /* Immediate width (variable offset) */
4297             uint32_t maski = -2U << (len - 1);
4298             tcg_gen_andi_i32(tmp, src, ~maski);
4299             tcg_gen_movi_i32(mask, maski);
4300             tcg_gen_movi_i32(rot, len & 31);
4301         }
4302         if (ext & 0x800) {
4303             /* Variable offset */
4304             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4305         } else {
4306             /* Immediate offset (variable width) */
4307             tcg_gen_addi_i32(rot, rot, ofs);
4308         }
4309         tcg_gen_andi_i32(rot, rot, 31);
4310         tcg_gen_rotr_i32(mask, mask, rot);
4311         tcg_gen_rotr_i32(tmp, tmp, rot);
4312         tcg_gen_and_i32(dst, dst, mask);
4313         tcg_gen_or_i32(dst, dst, tmp);
4314 
4315         tcg_temp_free(rot);
4316         tcg_temp_free(mask);
4317     }
4318     tcg_temp_free(tmp);
4319 }
4320 
4321 DISAS_INSN(bfins_mem)
4322 {
4323     int ext = read_im16(env, s);
4324     TCGv src = DREG(ext, 12);
4325     TCGv addr, len, ofs;
4326 
4327     addr = gen_lea(env, s, insn, OS_UNSIZED);
4328     if (IS_NULL_QREG(addr)) {
4329         gen_addr_fault(s);
4330         return;
4331     }
4332 
4333     if (ext & 0x20) {
4334         len = DREG(ext, 0);
4335     } else {
4336         len = tcg_const_i32(extract32(ext, 0, 5));
4337     }
4338     if (ext & 0x800) {
4339         ofs = DREG(ext, 6);
4340     } else {
4341         ofs = tcg_const_i32(extract32(ext, 6, 5));
4342     }
4343 
4344     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4345     set_cc_op(s, CC_OP_LOGIC);
4346 
4347     if (!(ext & 0x20)) {
4348         tcg_temp_free(len);
4349     }
4350     if (!(ext & 0x800)) {
4351         tcg_temp_free(ofs);
4352     }
4353 }
4354 
4355 DISAS_INSN(ff1)
4356 {
4357     TCGv reg;
4358     reg = DREG(insn, 0);
4359     gen_logic_cc(s, reg, OS_LONG);
4360     gen_helper_ff1(reg, reg);
4361 }
4362 
4363 DISAS_INSN(chk)
4364 {
4365     TCGv src, reg;
4366     int opsize;
4367 
4368     switch ((insn >> 7) & 3) {
4369     case 3:
4370         opsize = OS_WORD;
4371         break;
4372     case 2:
4373         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4374             opsize = OS_LONG;
4375             break;
4376         }
4377         /* fallthru */
4378     default:
4379         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4380         return;
4381     }
4382     SRC_EA(env, src, opsize, 1, NULL);
4383     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4384 
4385     gen_flush_flags(s);
4386     gen_helper_chk(cpu_env, reg, src);
4387 }
4388 
4389 DISAS_INSN(chk2)
4390 {
4391     uint16_t ext;
4392     TCGv addr1, addr2, bound1, bound2, reg;
4393     int opsize;
4394 
4395     switch ((insn >> 9) & 3) {
4396     case 0:
4397         opsize = OS_BYTE;
4398         break;
4399     case 1:
4400         opsize = OS_WORD;
4401         break;
4402     case 2:
4403         opsize = OS_LONG;
4404         break;
4405     default:
4406         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4407         return;
4408     }
4409 
4410     ext = read_im16(env, s);
4411     if ((ext & 0x0800) == 0) {
4412         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4413         return;
4414     }
4415 
4416     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4417     addr2 = tcg_temp_new();
4418     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4419 
4420     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4421     tcg_temp_free(addr1);
4422     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4423     tcg_temp_free(addr2);
4424 
4425     reg = tcg_temp_new();
4426     if (ext & 0x8000) {
4427         tcg_gen_mov_i32(reg, AREG(ext, 12));
4428     } else {
4429         gen_ext(reg, DREG(ext, 12), opsize, 1);
4430     }
4431 
4432     gen_flush_flags(s);
4433     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4434     tcg_temp_free(reg);
4435     tcg_temp_free(bound1);
4436     tcg_temp_free(bound2);
4437 }
4438 
4439 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4440 {
4441     TCGv addr;
4442     TCGv_i64 t0, t1;
4443 
4444     addr = tcg_temp_new();
4445 
4446     t0 = tcg_temp_new_i64();
4447     t1 = tcg_temp_new_i64();
4448 
4449     tcg_gen_andi_i32(addr, src, ~15);
4450     tcg_gen_qemu_ld64(t0, addr, index);
4451     tcg_gen_addi_i32(addr, addr, 8);
4452     tcg_gen_qemu_ld64(t1, addr, index);
4453 
4454     tcg_gen_andi_i32(addr, dst, ~15);
4455     tcg_gen_qemu_st64(t0, addr, index);
4456     tcg_gen_addi_i32(addr, addr, 8);
4457     tcg_gen_qemu_st64(t1, addr, index);
4458 
4459     tcg_temp_free_i64(t0);
4460     tcg_temp_free_i64(t1);
4461     tcg_temp_free(addr);
4462 }
4463 
4464 DISAS_INSN(move16_reg)
4465 {
4466     int index = IS_USER(s);
4467     TCGv tmp;
4468     uint16_t ext;
4469 
4470     ext = read_im16(env, s);
4471     if ((ext & (1 << 15)) == 0) {
4472         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4473     }
4474 
4475     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4476 
4477     /* Ax can be Ay, so save Ay before incrementing Ax */
4478     tmp = tcg_temp_new();
4479     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4480     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4481     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4482     tcg_temp_free(tmp);
4483 }
4484 
4485 DISAS_INSN(move16_mem)
4486 {
4487     int index = IS_USER(s);
4488     TCGv reg, addr;
4489 
4490     reg = AREG(insn, 0);
4491     addr = tcg_const_i32(read_im32(env, s));
4492 
4493     if ((insn >> 3) & 1) {
4494         /* MOVE16 (xxx).L, (Ay) */
4495         m68k_copy_line(reg, addr, index);
4496     } else {
4497         /* MOVE16 (Ay), (xxx).L */
4498         m68k_copy_line(addr, reg, index);
4499     }
4500 
4501     tcg_temp_free(addr);
4502 
4503     if (((insn >> 3) & 2) == 0) {
4504         /* (Ay)+ */
4505         tcg_gen_addi_i32(reg, reg, 16);
4506     }
4507 }
4508 
4509 DISAS_INSN(strldsr)
4510 {
4511     uint16_t ext;
4512     uint32_t addr;
4513 
4514     addr = s->pc - 2;
4515     ext = read_im16(env, s);
4516     if (ext != 0x46FC) {
4517         gen_exception(s, addr, EXCP_UNSUPPORTED);
4518         return;
4519     }
4520     ext = read_im16(env, s);
4521     if (IS_USER(s) || (ext & SR_S) == 0) {
4522         gen_exception(s, addr, EXCP_PRIVILEGE);
4523         return;
4524     }
4525     gen_push(s, gen_get_sr(s));
4526     gen_set_sr_im(s, ext, 0);
4527 }
4528 
4529 DISAS_INSN(move_from_sr)
4530 {
4531     TCGv sr;
4532 
4533     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4534         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4535         return;
4536     }
4537     sr = gen_get_sr(s);
4538     DEST_EA(env, insn, OS_WORD, sr, NULL);
4539 }
4540 
4541 #if defined(CONFIG_SOFTMMU)
4542 DISAS_INSN(moves)
4543 {
4544     int opsize;
4545     uint16_t ext;
4546     TCGv reg;
4547     TCGv addr;
4548     int extend;
4549 
4550     if (IS_USER(s)) {
4551         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4552         return;
4553     }
4554 
4555     ext = read_im16(env, s);
4556 
4557     opsize = insn_opsize(insn);
4558 
4559     if (ext & 0x8000) {
4560         /* address register */
4561         reg = AREG(ext, 12);
4562         extend = 1;
4563     } else {
4564         /* data register */
4565         reg = DREG(ext, 12);
4566         extend = 0;
4567     }
4568 
4569     addr = gen_lea(env, s, insn, opsize);
4570     if (IS_NULL_QREG(addr)) {
4571         gen_addr_fault(s);
4572         return;
4573     }
4574 
4575     if (ext & 0x0800) {
4576         /* from reg to ea */
4577         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4578     } else {
4579         /* from ea to reg */
4580         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4581         if (extend) {
4582             gen_ext(reg, tmp, opsize, 1);
4583         } else {
4584             gen_partset_reg(opsize, reg, tmp);
4585         }
4586         tcg_temp_free(tmp);
4587     }
4588     switch (extract32(insn, 3, 3)) {
4589     case 3: /* Indirect postincrement.  */
4590         tcg_gen_addi_i32(AREG(insn, 0), addr,
4591                          REG(insn, 0) == 7 && opsize == OS_BYTE
4592                          ? 2
4593                          : opsize_bytes(opsize));
4594         break;
4595     case 4: /* Indirect predecrememnt.  */
4596         tcg_gen_mov_i32(AREG(insn, 0), addr);
4597         break;
4598     }
4599 }
4600 
4601 DISAS_INSN(move_to_sr)
4602 {
4603     if (IS_USER(s)) {
4604         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4605         return;
4606     }
4607     gen_move_to_sr(env, s, insn, false);
4608     gen_lookup_tb(s);
4609 }
4610 
4611 DISAS_INSN(move_from_usp)
4612 {
4613     if (IS_USER(s)) {
4614         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4615         return;
4616     }
4617     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4618                    offsetof(CPUM68KState, sp[M68K_USP]));
4619 }
4620 
4621 DISAS_INSN(move_to_usp)
4622 {
4623     if (IS_USER(s)) {
4624         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4625         return;
4626     }
4627     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4628                    offsetof(CPUM68KState, sp[M68K_USP]));
4629 }
4630 
4631 DISAS_INSN(halt)
4632 {
4633     if (IS_USER(s)) {
4634         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4635         return;
4636     }
4637 
4638     gen_exception(s, s->pc, EXCP_HALT_INSN);
4639 }
4640 
4641 DISAS_INSN(stop)
4642 {
4643     uint16_t ext;
4644 
4645     if (IS_USER(s)) {
4646         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4647         return;
4648     }
4649 
4650     ext = read_im16(env, s);
4651 
4652     gen_set_sr_im(s, ext, 0);
4653     tcg_gen_movi_i32(cpu_halted, 1);
4654     gen_exception(s, s->pc, EXCP_HLT);
4655 }
4656 
4657 DISAS_INSN(rte)
4658 {
4659     if (IS_USER(s)) {
4660         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4661         return;
4662     }
4663     gen_exception(s, s->insn_pc, EXCP_RTE);
4664 }
4665 
4666 DISAS_INSN(cf_movec)
4667 {
4668     uint16_t ext;
4669     TCGv reg;
4670 
4671     if (IS_USER(s)) {
4672         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4673         return;
4674     }
4675 
4676     ext = read_im16(env, s);
4677 
4678     if (ext & 0x8000) {
4679         reg = AREG(ext, 12);
4680     } else {
4681         reg = DREG(ext, 12);
4682     }
4683     gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4684     gen_lookup_tb(s);
4685 }
4686 
4687 DISAS_INSN(m68k_movec)
4688 {
4689     uint16_t ext;
4690     TCGv reg;
4691 
4692     if (IS_USER(s)) {
4693         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4694         return;
4695     }
4696 
4697     ext = read_im16(env, s);
4698 
4699     if (ext & 0x8000) {
4700         reg = AREG(ext, 12);
4701     } else {
4702         reg = DREG(ext, 12);
4703     }
4704     if (insn & 1) {
4705         gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4706     } else {
4707         gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4708     }
4709     gen_lookup_tb(s);
4710 }
4711 
4712 DISAS_INSN(intouch)
4713 {
4714     if (IS_USER(s)) {
4715         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4716         return;
4717     }
4718     /* ICache fetch.  Implement as no-op.  */
4719 }
4720 
4721 DISAS_INSN(cpushl)
4722 {
4723     if (IS_USER(s)) {
4724         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4725         return;
4726     }
4727     /* Cache push/invalidate.  Implement as no-op.  */
4728 }
4729 
4730 DISAS_INSN(cpush)
4731 {
4732     if (IS_USER(s)) {
4733         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4734         return;
4735     }
4736     /* Cache push/invalidate.  Implement as no-op.  */
4737 }
4738 
4739 DISAS_INSN(cinv)
4740 {
4741     if (IS_USER(s)) {
4742         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4743         return;
4744     }
4745     /* Invalidate cache line.  Implement as no-op.  */
4746 }
4747 
4748 #if defined(CONFIG_SOFTMMU)
4749 DISAS_INSN(pflush)
4750 {
4751     TCGv opmode;
4752 
4753     if (IS_USER(s)) {
4754         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4755         return;
4756     }
4757 
4758     opmode = tcg_const_i32((insn >> 3) & 3);
4759     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4760     tcg_temp_free(opmode);
4761 }
4762 
4763 DISAS_INSN(ptest)
4764 {
4765     TCGv is_read;
4766 
4767     if (IS_USER(s)) {
4768         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4769         return;
4770     }
4771     is_read = tcg_const_i32((insn >> 5) & 1);
4772     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4773     tcg_temp_free(is_read);
4774 }
4775 #endif
4776 
4777 DISAS_INSN(wddata)
4778 {
4779     gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4780 }
4781 
4782 DISAS_INSN(wdebug)
4783 {
4784     M68kCPU *cpu = m68k_env_get_cpu(env);
4785 
4786     if (IS_USER(s)) {
4787         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4788         return;
4789     }
4790     /* TODO: Implement wdebug.  */
4791     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4792 }
4793 #endif
4794 
4795 DISAS_INSN(trap)
4796 {
4797     gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf));
4798 }
4799 
4800 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4801 {
4802     switch (reg) {
4803     case M68K_FPIAR:
4804         tcg_gen_movi_i32(res, 0);
4805         break;
4806     case M68K_FPSR:
4807         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4808         break;
4809     case M68K_FPCR:
4810         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4811         break;
4812     }
4813 }
4814 
4815 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4816 {
4817     switch (reg) {
4818     case M68K_FPIAR:
4819         break;
4820     case M68K_FPSR:
4821         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4822         break;
4823     case M68K_FPCR:
4824         gen_helper_set_fpcr(cpu_env, val);
4825         break;
4826     }
4827 }
4828 
4829 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4830 {
4831     int index = IS_USER(s);
4832     TCGv tmp;
4833 
4834     tmp = tcg_temp_new();
4835     gen_load_fcr(s, tmp, reg);
4836     tcg_gen_qemu_st32(tmp, addr, index);
4837     tcg_temp_free(tmp);
4838 }
4839 
4840 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4841 {
4842     int index = IS_USER(s);
4843     TCGv tmp;
4844 
4845     tmp = tcg_temp_new();
4846     tcg_gen_qemu_ld32u(tmp, addr, index);
4847     gen_store_fcr(s, tmp, reg);
4848     tcg_temp_free(tmp);
4849 }
4850 
4851 
4852 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4853                              uint32_t insn, uint32_t ext)
4854 {
4855     int mask = (ext >> 10) & 7;
4856     int is_write = (ext >> 13) & 1;
4857     int mode = extract32(insn, 3, 3);
4858     int i;
4859     TCGv addr, tmp;
4860 
4861     switch (mode) {
4862     case 0: /* Dn */
4863         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4864             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4865             return;
4866         }
4867         if (is_write) {
4868             gen_load_fcr(s, DREG(insn, 0), mask);
4869         } else {
4870             gen_store_fcr(s, DREG(insn, 0), mask);
4871         }
4872         return;
4873     case 1: /* An, only with FPIAR */
4874         if (mask != M68K_FPIAR) {
4875             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4876             return;
4877         }
4878         if (is_write) {
4879             gen_load_fcr(s, AREG(insn, 0), mask);
4880         } else {
4881             gen_store_fcr(s, AREG(insn, 0), mask);
4882         }
4883         return;
4884     default:
4885         break;
4886     }
4887 
4888     tmp = gen_lea(env, s, insn, OS_LONG);
4889     if (IS_NULL_QREG(tmp)) {
4890         gen_addr_fault(s);
4891         return;
4892     }
4893 
4894     addr = tcg_temp_new();
4895     tcg_gen_mov_i32(addr, tmp);
4896 
4897     /* mask:
4898      *
4899      * 0b100 Floating-Point Control Register
4900      * 0b010 Floating-Point Status Register
4901      * 0b001 Floating-Point Instruction Address Register
4902      *
4903      */
4904 
4905     if (is_write && mode == 4) {
4906         for (i = 2; i >= 0; i--, mask >>= 1) {
4907             if (mask & 1) {
4908                 gen_qemu_store_fcr(s, addr, 1 << i);
4909                 if (mask != 1) {
4910                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4911                 }
4912             }
4913        }
4914        tcg_gen_mov_i32(AREG(insn, 0), addr);
4915     } else {
4916         for (i = 0; i < 3; i++, mask >>= 1) {
4917             if (mask & 1) {
4918                 if (is_write) {
4919                     gen_qemu_store_fcr(s, addr, 1 << i);
4920                 } else {
4921                     gen_qemu_load_fcr(s, addr, 1 << i);
4922                 }
4923                 if (mask != 1 || mode == 3) {
4924                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4925                 }
4926             }
4927         }
4928         if (mode == 3) {
4929             tcg_gen_mov_i32(AREG(insn, 0), addr);
4930         }
4931     }
4932     tcg_temp_free_i32(addr);
4933 }
4934 
4935 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4936                           uint32_t insn, uint32_t ext)
4937 {
4938     int opsize;
4939     TCGv addr, tmp;
4940     int mode = (ext >> 11) & 0x3;
4941     int is_load = ((ext & 0x2000) == 0);
4942 
4943     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4944         opsize = OS_EXTENDED;
4945     } else {
4946         opsize = OS_DOUBLE;  /* FIXME */
4947     }
4948 
4949     addr = gen_lea(env, s, insn, opsize);
4950     if (IS_NULL_QREG(addr)) {
4951         gen_addr_fault(s);
4952         return;
4953     }
4954 
4955     tmp = tcg_temp_new();
4956     if (mode & 0x1) {
4957         /* Dynamic register list */
4958         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4959     } else {
4960         /* Static register list */
4961         tcg_gen_movi_i32(tmp, ext & 0xff);
4962     }
4963 
4964     if (!is_load && (mode & 2) == 0) {
4965         /* predecrement addressing mode
4966          * only available to store register to memory
4967          */
4968         if (opsize == OS_EXTENDED) {
4969             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4970         } else {
4971             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4972         }
4973     } else {
4974         /* postincrement addressing mode */
4975         if (opsize == OS_EXTENDED) {
4976             if (is_load) {
4977                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4978             } else {
4979                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4980             }
4981         } else {
4982             if (is_load) {
4983                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4984             } else {
4985                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4986             }
4987         }
4988     }
4989     if ((insn & 070) == 030 || (insn & 070) == 040) {
4990         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4991     }
4992     tcg_temp_free(tmp);
4993 }
4994 
4995 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4996    immediately before the next FP instruction is executed.  */
4997 DISAS_INSN(fpu)
4998 {
4999     uint16_t ext;
5000     int opmode;
5001     int opsize;
5002     TCGv_ptr cpu_src, cpu_dest;
5003 
5004     ext = read_im16(env, s);
5005     opmode = ext & 0x7f;
5006     switch ((ext >> 13) & 7) {
5007     case 0:
5008         break;
5009     case 1:
5010         goto undef;
5011     case 2:
5012         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5013             /* fmovecr */
5014             TCGv rom_offset = tcg_const_i32(opmode);
5015             cpu_dest = gen_fp_ptr(REG(ext, 7));
5016             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5017             tcg_temp_free_ptr(cpu_dest);
5018             tcg_temp_free(rom_offset);
5019             return;
5020         }
5021         break;
5022     case 3: /* fmove out */
5023         cpu_src = gen_fp_ptr(REG(ext, 7));
5024         opsize = ext_opsize(ext, 10);
5025         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5026                       EA_STORE, IS_USER(s)) == -1) {
5027             gen_addr_fault(s);
5028         }
5029         gen_helper_ftst(cpu_env, cpu_src);
5030         tcg_temp_free_ptr(cpu_src);
5031         return;
5032     case 4: /* fmove to control register.  */
5033     case 5: /* fmove from control register.  */
5034         gen_op_fmove_fcr(env, s, insn, ext);
5035         return;
5036     case 6: /* fmovem */
5037     case 7:
5038         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5039             goto undef;
5040         }
5041         gen_op_fmovem(env, s, insn, ext);
5042         return;
5043     }
5044     if (ext & (1 << 14)) {
5045         /* Source effective address.  */
5046         opsize = ext_opsize(ext, 10);
5047         cpu_src = gen_fp_result_ptr();
5048         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5049                       EA_LOADS, IS_USER(s)) == -1) {
5050             gen_addr_fault(s);
5051             return;
5052         }
5053     } else {
5054         /* Source register.  */
5055         opsize = OS_EXTENDED;
5056         cpu_src = gen_fp_ptr(REG(ext, 10));
5057     }
5058     cpu_dest = gen_fp_ptr(REG(ext, 7));
5059     switch (opmode) {
5060     case 0: /* fmove */
5061         gen_fp_move(cpu_dest, cpu_src);
5062         break;
5063     case 0x40: /* fsmove */
5064         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x44: /* fdmove */
5067         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5068         break;
5069     case 1: /* fint */
5070         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5071         break;
5072     case 2: /* fsinh */
5073         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5074         break;
5075     case 3: /* fintrz */
5076         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5077         break;
5078     case 4: /* fsqrt */
5079         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5080         break;
5081     case 0x41: /* fssqrt */
5082         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5083         break;
5084     case 0x45: /* fdsqrt */
5085         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5086         break;
5087     case 0x06: /* flognp1 */
5088         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5089         break;
5090     case 0x09: /* ftanh */
5091         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5092         break;
5093     case 0x0a: /* fatan */
5094         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5095         break;
5096     case 0x0c: /* fasin */
5097         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5098         break;
5099     case 0x0d: /* fatanh */
5100         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5101         break;
5102     case 0x0e: /* fsin */
5103         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5104         break;
5105     case 0x0f: /* ftan */
5106         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5107         break;
5108     case 0x10: /* fetox */
5109         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5110         break;
5111     case 0x11: /* ftwotox */
5112         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5113         break;
5114     case 0x12: /* ftentox */
5115         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5116         break;
5117     case 0x14: /* flogn */
5118         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5119         break;
5120     case 0x15: /* flog10 */
5121         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5122         break;
5123     case 0x16: /* flog2 */
5124         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5125         break;
5126     case 0x18: /* fabs */
5127         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5128         break;
5129     case 0x58: /* fsabs */
5130         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5131         break;
5132     case 0x5c: /* fdabs */
5133         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5134         break;
5135     case 0x19: /* fcosh */
5136         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5137         break;
5138     case 0x1a: /* fneg */
5139         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5140         break;
5141     case 0x5a: /* fsneg */
5142         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5143         break;
5144     case 0x5e: /* fdneg */
5145         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5146         break;
5147     case 0x1c: /* facos */
5148         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5149         break;
5150     case 0x1d: /* fcos */
5151         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5152         break;
5153     case 0x1e: /* fgetexp */
5154         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5155         break;
5156     case 0x1f: /* fgetman */
5157         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5158         break;
5159     case 0x20: /* fdiv */
5160         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5161         break;
5162     case 0x60: /* fsdiv */
5163         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5164         break;
5165     case 0x64: /* fddiv */
5166         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5167         break;
5168     case 0x21: /* fmod */
5169         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5170         break;
5171     case 0x22: /* fadd */
5172         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5173         break;
5174     case 0x62: /* fsadd */
5175         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5176         break;
5177     case 0x66: /* fdadd */
5178         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5179         break;
5180     case 0x23: /* fmul */
5181         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5182         break;
5183     case 0x63: /* fsmul */
5184         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5185         break;
5186     case 0x67: /* fdmul */
5187         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5188         break;
5189     case 0x24: /* fsgldiv */
5190         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5191         break;
5192     case 0x25: /* frem */
5193         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5194         break;
5195     case 0x26: /* fscale */
5196         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5197         break;
5198     case 0x27: /* fsglmul */
5199         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5200         break;
5201     case 0x28: /* fsub */
5202         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5203         break;
5204     case 0x68: /* fssub */
5205         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5206         break;
5207     case 0x6c: /* fdsub */
5208         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5209         break;
5210     case 0x30: case 0x31: case 0x32:
5211     case 0x33: case 0x34: case 0x35:
5212     case 0x36: case 0x37: {
5213             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5214             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5215             tcg_temp_free_ptr(cpu_dest2);
5216         }
5217         break;
5218     case 0x38: /* fcmp */
5219         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5220         return;
5221     case 0x3a: /* ftst */
5222         gen_helper_ftst(cpu_env, cpu_src);
5223         return;
5224     default:
5225         goto undef;
5226     }
5227     tcg_temp_free_ptr(cpu_src);
5228     gen_helper_ftst(cpu_env, cpu_dest);
5229     tcg_temp_free_ptr(cpu_dest);
5230     return;
5231 undef:
5232     /* FIXME: Is this right for offset addressing modes?  */
5233     s->pc -= 2;
5234     disas_undef_fpu(env, s, insn);
5235 }
5236 
5237 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5238 {
5239     TCGv fpsr;
5240 
5241     c->g1 = 1;
5242     c->v2 = tcg_const_i32(0);
5243     c->g2 = 0;
5244     /* TODO: Raise BSUN exception.  */
5245     fpsr = tcg_temp_new();
5246     gen_load_fcr(s, fpsr, M68K_FPSR);
5247     switch (cond) {
5248     case 0:  /* False */
5249     case 16: /* Signaling False */
5250         c->v1 = c->v2;
5251         c->tcond = TCG_COND_NEVER;
5252         break;
5253     case 1:  /* EQual Z */
5254     case 17: /* Signaling EQual Z */
5255         c->v1 = tcg_temp_new();
5256         c->g1 = 0;
5257         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5258         c->tcond = TCG_COND_NE;
5259         break;
5260     case 2:  /* Ordered Greater Than !(A || Z || N) */
5261     case 18: /* Greater Than !(A || Z || N) */
5262         c->v1 = tcg_temp_new();
5263         c->g1 = 0;
5264         tcg_gen_andi_i32(c->v1, fpsr,
5265                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5266         c->tcond = TCG_COND_EQ;
5267         break;
5268     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5269     case 19: /* Greater than or Equal Z || !(A || N) */
5270         c->v1 = tcg_temp_new();
5271         c->g1 = 0;
5272         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5273         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5274         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5275         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5276         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5277         c->tcond = TCG_COND_NE;
5278         break;
5279     case 4:  /* Ordered Less Than !(!N || A || Z); */
5280     case 20: /* Less Than !(!N || A || Z); */
5281         c->v1 = tcg_temp_new();
5282         c->g1 = 0;
5283         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5284         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5285         c->tcond = TCG_COND_EQ;
5286         break;
5287     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5288     case 21: /* Less than or Equal Z || (N && !A) */
5289         c->v1 = tcg_temp_new();
5290         c->g1 = 0;
5291         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5292         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5293         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5294         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5295         c->tcond = TCG_COND_NE;
5296         break;
5297     case 6:  /* Ordered Greater or Less than !(A || Z) */
5298     case 22: /* Greater or Less than !(A || Z) */
5299         c->v1 = tcg_temp_new();
5300         c->g1 = 0;
5301         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5302         c->tcond = TCG_COND_EQ;
5303         break;
5304     case 7:  /* Ordered !A */
5305     case 23: /* Greater, Less or Equal !A */
5306         c->v1 = tcg_temp_new();
5307         c->g1 = 0;
5308         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5309         c->tcond = TCG_COND_EQ;
5310         break;
5311     case 8:  /* Unordered A */
5312     case 24: /* Not Greater, Less or Equal A */
5313         c->v1 = tcg_temp_new();
5314         c->g1 = 0;
5315         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5316         c->tcond = TCG_COND_NE;
5317         break;
5318     case 9:  /* Unordered or Equal A || Z */
5319     case 25: /* Not Greater or Less then A || Z */
5320         c->v1 = tcg_temp_new();
5321         c->g1 = 0;
5322         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5323         c->tcond = TCG_COND_NE;
5324         break;
5325     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5326     case 26: /* Not Less or Equal A || !(N || Z)) */
5327         c->v1 = tcg_temp_new();
5328         c->g1 = 0;
5329         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5330         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5331         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5332         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5333         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5334         c->tcond = TCG_COND_NE;
5335         break;
5336     case 11: /* Unordered or Greater or Equal A || Z || !N */
5337     case 27: /* Not Less Than A || Z || !N */
5338         c->v1 = tcg_temp_new();
5339         c->g1 = 0;
5340         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5341         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5342         c->tcond = TCG_COND_NE;
5343         break;
5344     case 12: /* Unordered or Less Than A || (N && !Z) */
5345     case 28: /* Not Greater than or Equal A || (N && !Z) */
5346         c->v1 = tcg_temp_new();
5347         c->g1 = 0;
5348         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5349         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5350         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5351         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5352         c->tcond = TCG_COND_NE;
5353         break;
5354     case 13: /* Unordered or Less or Equal A || Z || N */
5355     case 29: /* Not Greater Than A || Z || N */
5356         c->v1 = tcg_temp_new();
5357         c->g1 = 0;
5358         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5359         c->tcond = TCG_COND_NE;
5360         break;
5361     case 14: /* Not Equal !Z */
5362     case 30: /* Signaling Not Equal !Z */
5363         c->v1 = tcg_temp_new();
5364         c->g1 = 0;
5365         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5366         c->tcond = TCG_COND_EQ;
5367         break;
5368     case 15: /* True */
5369     case 31: /* Signaling True */
5370         c->v1 = c->v2;
5371         c->tcond = TCG_COND_ALWAYS;
5372         break;
5373     }
5374     tcg_temp_free(fpsr);
5375 }
5376 
5377 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5378 {
5379     DisasCompare c;
5380 
5381     gen_fcc_cond(&c, s, cond);
5382     update_cc_op(s);
5383     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5384     free_cond(&c);
5385 }
5386 
5387 DISAS_INSN(fbcc)
5388 {
5389     uint32_t offset;
5390     uint32_t base;
5391     TCGLabel *l1;
5392 
5393     base = s->pc;
5394     offset = (int16_t)read_im16(env, s);
5395     if (insn & (1 << 6)) {
5396         offset = (offset << 16) | read_im16(env, s);
5397     }
5398 
5399     l1 = gen_new_label();
5400     update_cc_op(s);
5401     gen_fjmpcc(s, insn & 0x3f, l1);
5402     gen_jmp_tb(s, 0, s->pc);
5403     gen_set_label(l1);
5404     gen_jmp_tb(s, 1, base + offset);
5405 }
5406 
5407 DISAS_INSN(fscc)
5408 {
5409     DisasCompare c;
5410     int cond;
5411     TCGv tmp;
5412     uint16_t ext;
5413 
5414     ext = read_im16(env, s);
5415     cond = ext & 0x3f;
5416     gen_fcc_cond(&c, s, cond);
5417 
5418     tmp = tcg_temp_new();
5419     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5420     free_cond(&c);
5421 
5422     tcg_gen_neg_i32(tmp, tmp);
5423     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5424     tcg_temp_free(tmp);
5425 }
5426 
5427 #if defined(CONFIG_SOFTMMU)
5428 DISAS_INSN(frestore)
5429 {
5430     TCGv addr;
5431 
5432     if (IS_USER(s)) {
5433         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5434         return;
5435     }
5436     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5437         SRC_EA(env, addr, OS_LONG, 0, NULL);
5438         /* FIXME: check the state frame */
5439     } else {
5440         disas_undef(env, s, insn);
5441     }
5442 }
5443 
5444 DISAS_INSN(fsave)
5445 {
5446     if (IS_USER(s)) {
5447         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5448         return;
5449     }
5450 
5451     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5452         /* always write IDLE */
5453         TCGv idle = tcg_const_i32(0x41000000);
5454         DEST_EA(env, insn, OS_LONG, idle, NULL);
5455         tcg_temp_free(idle);
5456     } else {
5457         disas_undef(env, s, insn);
5458     }
5459 }
5460 #endif
5461 
5462 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5463 {
5464     TCGv tmp = tcg_temp_new();
5465     if (s->env->macsr & MACSR_FI) {
5466         if (upper)
5467             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5468         else
5469             tcg_gen_shli_i32(tmp, val, 16);
5470     } else if (s->env->macsr & MACSR_SU) {
5471         if (upper)
5472             tcg_gen_sari_i32(tmp, val, 16);
5473         else
5474             tcg_gen_ext16s_i32(tmp, val);
5475     } else {
5476         if (upper)
5477             tcg_gen_shri_i32(tmp, val, 16);
5478         else
5479             tcg_gen_ext16u_i32(tmp, val);
5480     }
5481     return tmp;
5482 }
5483 
5484 static void gen_mac_clear_flags(void)
5485 {
5486     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5487                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5488 }
5489 
5490 DISAS_INSN(mac)
5491 {
5492     TCGv rx;
5493     TCGv ry;
5494     uint16_t ext;
5495     int acc;
5496     TCGv tmp;
5497     TCGv addr;
5498     TCGv loadval;
5499     int dual;
5500     TCGv saved_flags;
5501 
5502     if (!s->done_mac) {
5503         s->mactmp = tcg_temp_new_i64();
5504         s->done_mac = 1;
5505     }
5506 
5507     ext = read_im16(env, s);
5508 
5509     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5510     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5511     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5512         disas_undef(env, s, insn);
5513         return;
5514     }
5515     if (insn & 0x30) {
5516         /* MAC with load.  */
5517         tmp = gen_lea(env, s, insn, OS_LONG);
5518         addr = tcg_temp_new();
5519         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5520         /* Load the value now to ensure correct exception behavior.
5521            Perform writeback after reading the MAC inputs.  */
5522         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5523 
5524         acc ^= 1;
5525         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5526         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5527     } else {
5528         loadval = addr = NULL_QREG;
5529         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5530         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5531     }
5532 
5533     gen_mac_clear_flags();
5534 #if 0
5535     l1 = -1;
5536     /* Disabled because conditional branches clobber temporary vars.  */
5537     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5538         /* Skip the multiply if we know we will ignore it.  */
5539         l1 = gen_new_label();
5540         tmp = tcg_temp_new();
5541         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5542         gen_op_jmp_nz32(tmp, l1);
5543     }
5544 #endif
5545 
5546     if ((ext & 0x0800) == 0) {
5547         /* Word.  */
5548         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5549         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5550     }
5551     if (s->env->macsr & MACSR_FI) {
5552         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5553     } else {
5554         if (s->env->macsr & MACSR_SU)
5555             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5556         else
5557             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5558         switch ((ext >> 9) & 3) {
5559         case 1:
5560             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5561             break;
5562         case 3:
5563             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5564             break;
5565         }
5566     }
5567 
5568     if (dual) {
5569         /* Save the overflow flag from the multiply.  */
5570         saved_flags = tcg_temp_new();
5571         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5572     } else {
5573         saved_flags = NULL_QREG;
5574     }
5575 
5576 #if 0
5577     /* Disabled because conditional branches clobber temporary vars.  */
5578     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5579         /* Skip the accumulate if the value is already saturated.  */
5580         l1 = gen_new_label();
5581         tmp = tcg_temp_new();
5582         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5583         gen_op_jmp_nz32(tmp, l1);
5584     }
5585 #endif
5586 
5587     if (insn & 0x100)
5588         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5589     else
5590         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5591 
5592     if (s->env->macsr & MACSR_FI)
5593         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5594     else if (s->env->macsr & MACSR_SU)
5595         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5596     else
5597         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5598 
5599 #if 0
5600     /* Disabled because conditional branches clobber temporary vars.  */
5601     if (l1 != -1)
5602         gen_set_label(l1);
5603 #endif
5604 
5605     if (dual) {
5606         /* Dual accumulate variant.  */
5607         acc = (ext >> 2) & 3;
5608         /* Restore the overflow flag from the multiplier.  */
5609         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5610 #if 0
5611         /* Disabled because conditional branches clobber temporary vars.  */
5612         if ((s->env->macsr & MACSR_OMC) != 0) {
5613             /* Skip the accumulate if the value is already saturated.  */
5614             l1 = gen_new_label();
5615             tmp = tcg_temp_new();
5616             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5617             gen_op_jmp_nz32(tmp, l1);
5618         }
5619 #endif
5620         if (ext & 2)
5621             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5622         else
5623             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5624         if (s->env->macsr & MACSR_FI)
5625             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5626         else if (s->env->macsr & MACSR_SU)
5627             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5628         else
5629             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5630 #if 0
5631         /* Disabled because conditional branches clobber temporary vars.  */
5632         if (l1 != -1)
5633             gen_set_label(l1);
5634 #endif
5635     }
5636     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5637 
5638     if (insn & 0x30) {
5639         TCGv rw;
5640         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5641         tcg_gen_mov_i32(rw, loadval);
5642         /* FIXME: Should address writeback happen with the masked or
5643            unmasked value?  */
5644         switch ((insn >> 3) & 7) {
5645         case 3: /* Post-increment.  */
5646             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5647             break;
5648         case 4: /* Pre-decrement.  */
5649             tcg_gen_mov_i32(AREG(insn, 0), addr);
5650         }
5651         tcg_temp_free(loadval);
5652     }
5653 }
5654 
5655 DISAS_INSN(from_mac)
5656 {
5657     TCGv rx;
5658     TCGv_i64 acc;
5659     int accnum;
5660 
5661     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5662     accnum = (insn >> 9) & 3;
5663     acc = MACREG(accnum);
5664     if (s->env->macsr & MACSR_FI) {
5665         gen_helper_get_macf(rx, cpu_env, acc);
5666     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5667         tcg_gen_extrl_i64_i32(rx, acc);
5668     } else if (s->env->macsr & MACSR_SU) {
5669         gen_helper_get_macs(rx, acc);
5670     } else {
5671         gen_helper_get_macu(rx, acc);
5672     }
5673     if (insn & 0x40) {
5674         tcg_gen_movi_i64(acc, 0);
5675         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5676     }
5677 }
5678 
5679 DISAS_INSN(move_mac)
5680 {
5681     /* FIXME: This can be done without a helper.  */
5682     int src;
5683     TCGv dest;
5684     src = insn & 3;
5685     dest = tcg_const_i32((insn >> 9) & 3);
5686     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5687     gen_mac_clear_flags();
5688     gen_helper_mac_set_flags(cpu_env, dest);
5689 }
5690 
5691 DISAS_INSN(from_macsr)
5692 {
5693     TCGv reg;
5694 
5695     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5696     tcg_gen_mov_i32(reg, QREG_MACSR);
5697 }
5698 
5699 DISAS_INSN(from_mask)
5700 {
5701     TCGv reg;
5702     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5703     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5704 }
5705 
5706 DISAS_INSN(from_mext)
5707 {
5708     TCGv reg;
5709     TCGv acc;
5710     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5711     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5712     if (s->env->macsr & MACSR_FI)
5713         gen_helper_get_mac_extf(reg, cpu_env, acc);
5714     else
5715         gen_helper_get_mac_exti(reg, cpu_env, acc);
5716 }
5717 
5718 DISAS_INSN(macsr_to_ccr)
5719 {
5720     TCGv tmp = tcg_temp_new();
5721     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5722     gen_helper_set_sr(cpu_env, tmp);
5723     tcg_temp_free(tmp);
5724     set_cc_op(s, CC_OP_FLAGS);
5725 }
5726 
5727 DISAS_INSN(to_mac)
5728 {
5729     TCGv_i64 acc;
5730     TCGv val;
5731     int accnum;
5732     accnum = (insn >> 9) & 3;
5733     acc = MACREG(accnum);
5734     SRC_EA(env, val, OS_LONG, 0, NULL);
5735     if (s->env->macsr & MACSR_FI) {
5736         tcg_gen_ext_i32_i64(acc, val);
5737         tcg_gen_shli_i64(acc, acc, 8);
5738     } else if (s->env->macsr & MACSR_SU) {
5739         tcg_gen_ext_i32_i64(acc, val);
5740     } else {
5741         tcg_gen_extu_i32_i64(acc, val);
5742     }
5743     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5744     gen_mac_clear_flags();
5745     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5746 }
5747 
5748 DISAS_INSN(to_macsr)
5749 {
5750     TCGv val;
5751     SRC_EA(env, val, OS_LONG, 0, NULL);
5752     gen_helper_set_macsr(cpu_env, val);
5753     gen_lookup_tb(s);
5754 }
5755 
5756 DISAS_INSN(to_mask)
5757 {
5758     TCGv val;
5759     SRC_EA(env, val, OS_LONG, 0, NULL);
5760     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5761 }
5762 
5763 DISAS_INSN(to_mext)
5764 {
5765     TCGv val;
5766     TCGv acc;
5767     SRC_EA(env, val, OS_LONG, 0, NULL);
5768     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5769     if (s->env->macsr & MACSR_FI)
5770         gen_helper_set_mac_extf(cpu_env, val, acc);
5771     else if (s->env->macsr & MACSR_SU)
5772         gen_helper_set_mac_exts(cpu_env, val, acc);
5773     else
5774         gen_helper_set_mac_extu(cpu_env, val, acc);
5775 }
5776 
5777 static disas_proc opcode_table[65536];
5778 
5779 static void
5780 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5781 {
5782   int i;
5783   int from;
5784   int to;
5785 
5786   /* Sanity check.  All set bits must be included in the mask.  */
5787   if (opcode & ~mask) {
5788       fprintf(stderr,
5789               "qemu internal error: bogus opcode definition %04x/%04x\n",
5790               opcode, mask);
5791       abort();
5792   }
5793   /* This could probably be cleverer.  For now just optimize the case where
5794      the top bits are known.  */
5795   /* Find the first zero bit in the mask.  */
5796   i = 0x8000;
5797   while ((i & mask) != 0)
5798       i >>= 1;
5799   /* Iterate over all combinations of this and lower bits.  */
5800   if (i == 0)
5801       i = 1;
5802   else
5803       i <<= 1;
5804   from = opcode & ~(i - 1);
5805   to = from + i;
5806   for (i = from; i < to; i++) {
5807       if ((i & mask) == opcode)
5808           opcode_table[i] = proc;
5809   }
5810 }
5811 
5812 /* Register m68k opcode handlers.  Order is important.
5813    Later insn override earlier ones.  */
5814 void register_m68k_insns (CPUM68KState *env)
5815 {
5816     /* Build the opcode table only once to avoid
5817        multithreading issues. */
5818     if (opcode_table[0] != NULL) {
5819         return;
5820     }
5821 
5822     /* use BASE() for instruction available
5823      * for CF_ISA_A and M68000.
5824      */
5825 #define BASE(name, opcode, mask) \
5826     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5827 #define INSN(name, opcode, mask, feature) do { \
5828     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5829         BASE(name, opcode, mask); \
5830     } while(0)
5831     BASE(undef,     0000, 0000);
5832     INSN(arith_im,  0080, fff8, CF_ISA_A);
5833     INSN(arith_im,  0000, ff00, M68000);
5834     INSN(chk2,      00c0, f9c0, CHK2);
5835     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5836     BASE(bitop_reg, 0100, f1c0);
5837     BASE(bitop_reg, 0140, f1c0);
5838     BASE(bitop_reg, 0180, f1c0);
5839     BASE(bitop_reg, 01c0, f1c0);
5840     INSN(movep,     0108, f138, MOVEP);
5841     INSN(arith_im,  0280, fff8, CF_ISA_A);
5842     INSN(arith_im,  0200, ff00, M68000);
5843     INSN(undef,     02c0, ffc0, M68000);
5844     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5845     INSN(arith_im,  0480, fff8, CF_ISA_A);
5846     INSN(arith_im,  0400, ff00, M68000);
5847     INSN(undef,     04c0, ffc0, M68000);
5848     INSN(arith_im,  0600, ff00, M68000);
5849     INSN(undef,     06c0, ffc0, M68000);
5850     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5851     INSN(arith_im,  0680, fff8, CF_ISA_A);
5852     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5853     INSN(arith_im,  0c00, ff00, M68000);
5854     BASE(bitop_im,  0800, ffc0);
5855     BASE(bitop_im,  0840, ffc0);
5856     BASE(bitop_im,  0880, ffc0);
5857     BASE(bitop_im,  08c0, ffc0);
5858     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5859     INSN(arith_im,  0a00, ff00, M68000);
5860 #if defined(CONFIG_SOFTMMU)
5861     INSN(moves,     0e00, ff00, M68000);
5862 #endif
5863     INSN(cas,       0ac0, ffc0, CAS);
5864     INSN(cas,       0cc0, ffc0, CAS);
5865     INSN(cas,       0ec0, ffc0, CAS);
5866     INSN(cas2w,     0cfc, ffff, CAS);
5867     INSN(cas2l,     0efc, ffff, CAS);
5868     BASE(move,      1000, f000);
5869     BASE(move,      2000, f000);
5870     BASE(move,      3000, f000);
5871     INSN(chk,       4000, f040, M68000);
5872     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5873     INSN(negx,      4080, fff8, CF_ISA_A);
5874     INSN(negx,      4000, ff00, M68000);
5875     INSN(undef,     40c0, ffc0, M68000);
5876     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5877     INSN(move_from_sr, 40c0, ffc0, M68000);
5878     BASE(lea,       41c0, f1c0);
5879     BASE(clr,       4200, ff00);
5880     BASE(undef,     42c0, ffc0);
5881     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5882     INSN(move_from_ccr, 42c0, ffc0, M68000);
5883     INSN(neg,       4480, fff8, CF_ISA_A);
5884     INSN(neg,       4400, ff00, M68000);
5885     INSN(undef,     44c0, ffc0, M68000);
5886     BASE(move_to_ccr, 44c0, ffc0);
5887     INSN(not,       4680, fff8, CF_ISA_A);
5888     INSN(not,       4600, ff00, M68000);
5889 #if defined(CONFIG_SOFTMMU)
5890     BASE(move_to_sr, 46c0, ffc0);
5891 #endif
5892     INSN(nbcd,      4800, ffc0, M68000);
5893     INSN(linkl,     4808, fff8, M68000);
5894     BASE(pea,       4840, ffc0);
5895     BASE(swap,      4840, fff8);
5896     INSN(bkpt,      4848, fff8, BKPT);
5897     INSN(movem,     48d0, fbf8, CF_ISA_A);
5898     INSN(movem,     48e8, fbf8, CF_ISA_A);
5899     INSN(movem,     4880, fb80, M68000);
5900     BASE(ext,       4880, fff8);
5901     BASE(ext,       48c0, fff8);
5902     BASE(ext,       49c0, fff8);
5903     BASE(tst,       4a00, ff00);
5904     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5905     INSN(tas,       4ac0, ffc0, M68000);
5906 #if defined(CONFIG_SOFTMMU)
5907     INSN(halt,      4ac8, ffff, CF_ISA_A);
5908 #endif
5909     INSN(pulse,     4acc, ffff, CF_ISA_A);
5910     BASE(illegal,   4afc, ffff);
5911     INSN(mull,      4c00, ffc0, CF_ISA_A);
5912     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5913     INSN(divl,      4c40, ffc0, CF_ISA_A);
5914     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5915     INSN(sats,      4c80, fff8, CF_ISA_B);
5916     BASE(trap,      4e40, fff0);
5917     BASE(link,      4e50, fff8);
5918     BASE(unlk,      4e58, fff8);
5919 #if defined(CONFIG_SOFTMMU)
5920     INSN(move_to_usp, 4e60, fff8, USP);
5921     INSN(move_from_usp, 4e68, fff8, USP);
5922     INSN(reset,     4e70, ffff, M68000);
5923     BASE(stop,      4e72, ffff);
5924     BASE(rte,       4e73, ffff);
5925     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5926     INSN(m68k_movec, 4e7a, fffe, M68000);
5927 #endif
5928     BASE(nop,       4e71, ffff);
5929     INSN(rtd,       4e74, ffff, RTD);
5930     BASE(rts,       4e75, ffff);
5931     BASE(jump,      4e80, ffc0);
5932     BASE(jump,      4ec0, ffc0);
5933     INSN(addsubq,   5000, f080, M68000);
5934     BASE(addsubq,   5080, f0c0);
5935     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5936     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
5937     INSN(dbcc,      50c8, f0f8, M68000);
5938     INSN(tpf,       51f8, fff8, CF_ISA_A);
5939 
5940     /* Branch instructions.  */
5941     BASE(branch,    6000, f000);
5942     /* Disable long branch instructions, then add back the ones we want.  */
5943     BASE(undef,     60ff, f0ff); /* All long branches.  */
5944     INSN(branch,    60ff, f0ff, CF_ISA_B);
5945     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5946     INSN(branch,    60ff, ffff, BRAL);
5947     INSN(branch,    60ff, f0ff, BCCL);
5948 
5949     BASE(moveq,     7000, f100);
5950     INSN(mvzs,      7100, f100, CF_ISA_B);
5951     BASE(or,        8000, f000);
5952     BASE(divw,      80c0, f0c0);
5953     INSN(sbcd_reg,  8100, f1f8, M68000);
5954     INSN(sbcd_mem,  8108, f1f8, M68000);
5955     BASE(addsub,    9000, f000);
5956     INSN(undef,     90c0, f0c0, CF_ISA_A);
5957     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5958     INSN(subx_reg,  9100, f138, M68000);
5959     INSN(subx_mem,  9108, f138, M68000);
5960     INSN(suba,      91c0, f1c0, CF_ISA_A);
5961     INSN(suba,      90c0, f0c0, M68000);
5962 
5963     BASE(undef_mac, a000, f000);
5964     INSN(mac,       a000, f100, CF_EMAC);
5965     INSN(from_mac,  a180, f9b0, CF_EMAC);
5966     INSN(move_mac,  a110, f9fc, CF_EMAC);
5967     INSN(from_macsr,a980, f9f0, CF_EMAC);
5968     INSN(from_mask, ad80, fff0, CF_EMAC);
5969     INSN(from_mext, ab80, fbf0, CF_EMAC);
5970     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5971     INSN(to_mac,    a100, f9c0, CF_EMAC);
5972     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5973     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5974     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5975 
5976     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5977     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5978     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5979     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5980     INSN(cmp,       b080, f1c0, CF_ISA_A);
5981     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5982     INSN(cmp,       b000, f100, M68000);
5983     INSN(eor,       b100, f100, M68000);
5984     INSN(cmpm,      b108, f138, M68000);
5985     INSN(cmpa,      b0c0, f0c0, M68000);
5986     INSN(eor,       b180, f1c0, CF_ISA_A);
5987     BASE(and,       c000, f000);
5988     INSN(exg_dd,    c140, f1f8, M68000);
5989     INSN(exg_aa,    c148, f1f8, M68000);
5990     INSN(exg_da,    c188, f1f8, M68000);
5991     BASE(mulw,      c0c0, f0c0);
5992     INSN(abcd_reg,  c100, f1f8, M68000);
5993     INSN(abcd_mem,  c108, f1f8, M68000);
5994     BASE(addsub,    d000, f000);
5995     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5996     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5997     INSN(addx_reg,  d100, f138, M68000);
5998     INSN(addx_mem,  d108, f138, M68000);
5999     INSN(adda,      d1c0, f1c0, CF_ISA_A);
6000     INSN(adda,      d0c0, f0c0, M68000);
6001     INSN(shift_im,  e080, f0f0, CF_ISA_A);
6002     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
6003     INSN(shift8_im, e000, f0f0, M68000);
6004     INSN(shift16_im, e040, f0f0, M68000);
6005     INSN(shift_im,  e080, f0f0, M68000);
6006     INSN(shift8_reg, e020, f0f0, M68000);
6007     INSN(shift16_reg, e060, f0f0, M68000);
6008     INSN(shift_reg, e0a0, f0f0, M68000);
6009     INSN(shift_mem, e0c0, fcc0, M68000);
6010     INSN(rotate_im, e090, f0f0, M68000);
6011     INSN(rotate8_im, e010, f0f0, M68000);
6012     INSN(rotate16_im, e050, f0f0, M68000);
6013     INSN(rotate_reg, e0b0, f0f0, M68000);
6014     INSN(rotate8_reg, e030, f0f0, M68000);
6015     INSN(rotate16_reg, e070, f0f0, M68000);
6016     INSN(rotate_mem, e4c0, fcc0, M68000);
6017     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
6018     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6019     INSN(bfins_mem, efc0, ffc0, BITFIELD);
6020     INSN(bfins_reg, efc0, fff8, BITFIELD);
6021     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
6022     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
6023     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
6024     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
6025     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
6026     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
6027     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
6028     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
6029     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
6030     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
6031     BASE(undef_fpu, f000, f000);
6032     INSN(fpu,       f200, ffc0, CF_FPU);
6033     INSN(fbcc,      f280, ffc0, CF_FPU);
6034     INSN(fpu,       f200, ffc0, FPU);
6035     INSN(fscc,      f240, ffc0, FPU);
6036     INSN(fbcc,      f280, ff80, FPU);
6037 #if defined(CONFIG_SOFTMMU)
6038     INSN(frestore,  f340, ffc0, CF_FPU);
6039     INSN(fsave,     f300, ffc0, CF_FPU);
6040     INSN(frestore,  f340, ffc0, FPU);
6041     INSN(fsave,     f300, ffc0, FPU);
6042     INSN(intouch,   f340, ffc0, CF_ISA_A);
6043     INSN(cpushl,    f428, ff38, CF_ISA_A);
6044     INSN(cpush,     f420, ff20, M68040);
6045     INSN(cinv,      f400, ff20, M68040);
6046     INSN(pflush,    f500, ffe0, M68040);
6047     INSN(ptest,     f548, ffd8, M68040);
6048     INSN(wddata,    fb00, ff00, CF_ISA_A);
6049     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6050 #endif
6051     INSN(move16_mem, f600, ffe0, M68040);
6052     INSN(move16_reg, f620, fff8, M68040);
6053 #undef INSN
6054 }
6055 
6056 /* ??? Some of this implementation is not exception safe.  We should always
6057    write back the result to memory before setting the condition codes.  */
6058 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
6059 {
6060     uint16_t insn = read_im16(env, s);
6061     opcode_table[insn](env, s, insn);
6062     do_writebacks(s);
6063     do_release(s);
6064 }
6065 
6066 /* generate intermediate code for basic block 'tb'.  */
6067 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6068 {
6069     CPUM68KState *env = cs->env_ptr;
6070     DisasContext dc1, *dc = &dc1;
6071     target_ulong pc_start;
6072     int pc_offset;
6073     int num_insns;
6074     int max_insns;
6075 
6076     /* generate intermediate code */
6077     pc_start = tb->pc;
6078 
6079     dc->tb = tb;
6080 
6081     dc->env = env;
6082     dc->is_jmp = DISAS_NEXT;
6083     dc->pc = pc_start;
6084     dc->cc_op = CC_OP_DYNAMIC;
6085     dc->cc_op_synced = 1;
6086     dc->singlestep_enabled = cs->singlestep_enabled;
6087     dc->done_mac = 0;
6088     dc->writeback_mask = 0;
6089     num_insns = 0;
6090     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6091     if (max_insns == 0) {
6092         max_insns = CF_COUNT_MASK;
6093     }
6094     if (max_insns > TCG_MAX_INSNS) {
6095         max_insns = TCG_MAX_INSNS;
6096     }
6097 
6098     init_release_array(dc);
6099 
6100     gen_tb_start(tb);
6101     do {
6102         pc_offset = dc->pc - pc_start;
6103         tcg_gen_insn_start(dc->pc, dc->cc_op);
6104         num_insns++;
6105 
6106         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
6107             gen_exception(dc, dc->pc, EXCP_DEBUG);
6108             dc->is_jmp = DISAS_JUMP;
6109             /* The address covered by the breakpoint must be included in
6110                [tb->pc, tb->pc + tb->size) in order to for it to be
6111                properly cleared -- thus we increment the PC here so that
6112                the logic setting tb->size below does the right thing.  */
6113             dc->pc += 2;
6114             break;
6115         }
6116 
6117         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6118             gen_io_start();
6119         }
6120 
6121         dc->insn_pc = dc->pc;
6122 	disas_m68k_insn(env, dc);
6123     } while (!dc->is_jmp && !tcg_op_buf_full() &&
6124              !cs->singlestep_enabled &&
6125              !singlestep &&
6126              (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
6127              num_insns < max_insns);
6128 
6129     if (tb_cflags(tb) & CF_LAST_IO)
6130         gen_io_end();
6131     if (unlikely(cs->singlestep_enabled)) {
6132         /* Make sure the pc is updated, and raise a debug exception.  */
6133         if (!dc->is_jmp) {
6134             update_cc_op(dc);
6135             tcg_gen_movi_i32(QREG_PC, dc->pc);
6136         }
6137         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
6138     } else {
6139         switch(dc->is_jmp) {
6140         case DISAS_NEXT:
6141             update_cc_op(dc);
6142             gen_jmp_tb(dc, 0, dc->pc);
6143             break;
6144         default:
6145         case DISAS_JUMP:
6146         case DISAS_UPDATE:
6147             update_cc_op(dc);
6148             /* indicate that the hash table must be used to find the next TB */
6149             tcg_gen_exit_tb(0);
6150             break;
6151         case DISAS_TB_JUMP:
6152             /* nothing more to generate */
6153             break;
6154         }
6155     }
6156     gen_tb_end(tb, num_insns);
6157 
6158 #ifdef DEBUG_DISAS
6159     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6160         && qemu_log_in_addr_range(pc_start)) {
6161         qemu_log_lock();
6162         qemu_log("----------------\n");
6163         qemu_log("IN: %s\n", lookup_symbol(pc_start));
6164         log_target_disas(cs, pc_start, dc->pc - pc_start);
6165         qemu_log("\n");
6166         qemu_log_unlock();
6167     }
6168 #endif
6169     tb->size = dc->pc - pc_start;
6170     tb->icount = num_insns;
6171 }
6172 
6173 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6174 {
6175     floatx80 a = { .high = high, .low = low };
6176     union {
6177         float64 f64;
6178         double d;
6179     } u;
6180 
6181     u.f64 = floatx80_to_float64(a, &env->fp_status);
6182     return u.d;
6183 }
6184 
6185 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
6186                          int flags)
6187 {
6188     M68kCPU *cpu = M68K_CPU(cs);
6189     CPUM68KState *env = &cpu->env;
6190     int i;
6191     uint16_t sr;
6192     for (i = 0; i < 8; i++) {
6193         cpu_fprintf(f, "D%d = %08x   A%d = %08x   "
6194                     "F%d = %04x %016"PRIx64"  (%12g)\n",
6195                     i, env->dregs[i], i, env->aregs[i],
6196                     i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6197                     floatx80_to_double(env, env->fregs[i].l.upper,
6198                                        env->fregs[i].l.lower));
6199     }
6200     cpu_fprintf (f, "PC = %08x   ", env->pc);
6201     sr = env->sr | cpu_m68k_get_ccr(env);
6202     cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6203                 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6204                 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6205                 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6206                 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6207                 (sr & CCF_C) ? 'C' : '-');
6208     cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6209                 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6210                 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6211                 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6212                 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6213     cpu_fprintf(f, "\n                                "
6214                    "FPCR =     %04x ", env->fpcr);
6215     switch (env->fpcr & FPCR_PREC_MASK) {
6216     case FPCR_PREC_X:
6217         cpu_fprintf(f, "X ");
6218         break;
6219     case FPCR_PREC_S:
6220         cpu_fprintf(f, "S ");
6221         break;
6222     case FPCR_PREC_D:
6223         cpu_fprintf(f, "D ");
6224         break;
6225     }
6226     switch (env->fpcr & FPCR_RND_MASK) {
6227     case FPCR_RND_N:
6228         cpu_fprintf(f, "RN ");
6229         break;
6230     case FPCR_RND_Z:
6231         cpu_fprintf(f, "RZ ");
6232         break;
6233     case FPCR_RND_M:
6234         cpu_fprintf(f, "RM ");
6235         break;
6236     case FPCR_RND_P:
6237         cpu_fprintf(f, "RP ");
6238         break;
6239     }
6240     cpu_fprintf(f, "\n");
6241 #ifdef CONFIG_SOFTMMU
6242     cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6243                env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6244                env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6245                env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6246     cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6247     cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6248     cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6249                 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6250     cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6251                 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6252                 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6253     cpu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6254                 env->mmu.mmusr, env->mmu.ar);
6255 #endif
6256 }
6257 
6258 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6259                           target_ulong *data)
6260 {
6261     int cc_op = data[1];
6262     env->pc = data[0];
6263     if (cc_op != CC_OP_DYNAMIC) {
6264         env->cc_op = cc_op;
6265     }
6266 }
6267