xref: /openbmc/qemu/target/m68k/translate.c (revision ba632924)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 
38 //#define DEBUG_DISPATCH 1
39 
40 #define DEFO32(name, offset) static TCGv QREG_##name;
41 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
42 #include "qregs.def"
43 #undef DEFO32
44 #undef DEFO64
45 
46 static TCGv_i32 cpu_halted;
47 static TCGv_i32 cpu_exception_index;
48 
49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
50 static TCGv cpu_dregs[8];
51 static TCGv cpu_aregs[8];
52 static TCGv_i64 cpu_macc[4];
53 
54 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
56 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
57 #define MACREG(acc)     cpu_macc[acc]
58 #define QREG_SP         get_areg(s, 7)
59 
60 static TCGv NULL_QREG;
61 #define IS_NULL_QREG(t) (t == NULL_QREG)
62 /* Used to distinguish stores from bad addressing modes.  */
63 static TCGv store_dummy;
64 
65 #include "exec/gen-icount.h"
66 
67 void m68k_tcg_init(void)
68 {
69     char *p;
70     int i;
71 
72 #define DEFO32(name, offset) \
73     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
74         offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
77         offsetof(CPUM68KState, offset), #name);
78 #include "qregs.def"
79 #undef DEFO32
80 #undef DEFO64
81 
82     cpu_halted = tcg_global_mem_new_i32(cpu_env,
83                                         -offsetof(M68kCPU, env) +
84                                         offsetof(CPUState, halted), "HALTED");
85     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
86                                                  -offsetof(M68kCPU, env) +
87                                                  offsetof(CPUState, exception_index),
88                                                  "EXCEPTION");
89 
90     p = cpu_reg_names;
91     for (i = 0; i < 8; i++) {
92         sprintf(p, "D%d", i);
93         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
94                                           offsetof(CPUM68KState, dregs[i]), p);
95         p += 3;
96         sprintf(p, "A%d", i);
97         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
98                                           offsetof(CPUM68KState, aregs[i]), p);
99         p += 3;
100     }
101     for (i = 0; i < 4; i++) {
102         sprintf(p, "ACC%d", i);
103         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
104                                          offsetof(CPUM68KState, macc[i]), p);
105         p += 5;
106     }
107 
108     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
109     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 }
111 
112 /* internal defines */
113 typedef struct DisasContext {
114     DisasContextBase base;
115     CPUM68KState *env;
116     target_ulong pc;
117     CCOp cc_op; /* Current CC operation */
118     int cc_op_synced;
119     TCGv_i64 mactmp;
120     int done_mac;
121     int writeback_mask;
122     TCGv writeback[8];
123 #define MAX_TO_RELEASE 8
124     int release_count;
125     TCGv release[MAX_TO_RELEASE];
126 } DisasContext;
127 
128 static void init_release_array(DisasContext *s)
129 {
130 #ifdef CONFIG_DEBUG_TCG
131     memset(s->release, 0, sizeof(s->release));
132 #endif
133     s->release_count = 0;
134 }
135 
136 static void do_release(DisasContext *s)
137 {
138     int i;
139     for (i = 0; i < s->release_count; i++) {
140         tcg_temp_free(s->release[i]);
141     }
142     init_release_array(s);
143 }
144 
145 static TCGv mark_to_release(DisasContext *s, TCGv tmp)
146 {
147     g_assert(s->release_count < MAX_TO_RELEASE);
148     return s->release[s->release_count++] = tmp;
149 }
150 
151 static TCGv get_areg(DisasContext *s, unsigned regno)
152 {
153     if (s->writeback_mask & (1 << regno)) {
154         return s->writeback[regno];
155     } else {
156         return cpu_aregs[regno];
157     }
158 }
159 
160 static void delay_set_areg(DisasContext *s, unsigned regno,
161                            TCGv val, bool give_temp)
162 {
163     if (s->writeback_mask & (1 << regno)) {
164         if (give_temp) {
165             tcg_temp_free(s->writeback[regno]);
166             s->writeback[regno] = val;
167         } else {
168             tcg_gen_mov_i32(s->writeback[regno], val);
169         }
170     } else {
171         s->writeback_mask |= 1 << regno;
172         if (give_temp) {
173             s->writeback[regno] = val;
174         } else {
175             TCGv tmp = tcg_temp_new();
176             s->writeback[regno] = tmp;
177             tcg_gen_mov_i32(tmp, val);
178         }
179     }
180 }
181 
182 static void do_writebacks(DisasContext *s)
183 {
184     unsigned mask = s->writeback_mask;
185     if (mask) {
186         s->writeback_mask = 0;
187         do {
188             unsigned regno = ctz32(mask);
189             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
190             tcg_temp_free(s->writeback[regno]);
191             mask &= mask - 1;
192         } while (mask);
193     }
194 }
195 
196 /* is_jmp field values */
197 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
198 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
199 
200 #if defined(CONFIG_USER_ONLY)
201 #define IS_USER(s) 1
202 #else
203 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
204 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
205                       MMU_KERNEL_IDX : MMU_USER_IDX)
206 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
207                       MMU_KERNEL_IDX : MMU_USER_IDX)
208 #endif
209 
210 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
211 
212 #ifdef DEBUG_DISPATCH
213 #define DISAS_INSN(name)                                                \
214     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
215                                   uint16_t insn);                       \
216     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
217                              uint16_t insn)                             \
218     {                                                                   \
219         qemu_log("Dispatch " #name "\n");                               \
220         real_disas_##name(env, s, insn);                                \
221     }                                                                   \
222     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
223                                   uint16_t insn)
224 #else
225 #define DISAS_INSN(name)                                                \
226     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
227                              uint16_t insn)
228 #endif
229 
230 static const uint8_t cc_op_live[CC_OP_NB] = {
231     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
232     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
233     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
234     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
235     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
236     [CC_OP_LOGIC] = CCF_X | CCF_N
237 };
238 
239 static void set_cc_op(DisasContext *s, CCOp op)
240 {
241     CCOp old_op = s->cc_op;
242     int dead;
243 
244     if (old_op == op) {
245         return;
246     }
247     s->cc_op = op;
248     s->cc_op_synced = 0;
249 
250     /* Discard CC computation that will no longer be used.
251        Note that X and N are never dead.  */
252     dead = cc_op_live[old_op] & ~cc_op_live[op];
253     if (dead & CCF_C) {
254         tcg_gen_discard_i32(QREG_CC_C);
255     }
256     if (dead & CCF_Z) {
257         tcg_gen_discard_i32(QREG_CC_Z);
258     }
259     if (dead & CCF_V) {
260         tcg_gen_discard_i32(QREG_CC_V);
261     }
262 }
263 
264 /* Update the CPU env CC_OP state.  */
265 static void update_cc_op(DisasContext *s)
266 {
267     if (!s->cc_op_synced) {
268         s->cc_op_synced = 1;
269         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
270     }
271 }
272 
273 /* Generate a jump to an immediate address.  */
274 static void gen_jmp_im(DisasContext *s, uint32_t dest)
275 {
276     update_cc_op(s);
277     tcg_gen_movi_i32(QREG_PC, dest);
278     s->base.is_jmp = DISAS_JUMP;
279 }
280 
281 /* Generate a jump to the address in qreg DEST.  */
282 static void gen_jmp(DisasContext *s, TCGv dest)
283 {
284     update_cc_op(s);
285     tcg_gen_mov_i32(QREG_PC, dest);
286     s->base.is_jmp = DISAS_JUMP;
287 }
288 
289 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
290 {
291     TCGv_i32 tmp;
292 
293     update_cc_op(s);
294     tcg_gen_movi_i32(QREG_PC, dest);
295 
296     tmp = tcg_const_i32(nr);
297     gen_helper_raise_exception(cpu_env, tmp);
298     tcg_temp_free_i32(tmp);
299 
300     s->base.is_jmp = DISAS_NORETURN;
301 }
302 
303 static inline void gen_addr_fault(DisasContext *s)
304 {
305     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
306 }
307 
308 /* Generate a load from the specified address.  Narrow values are
309    sign extended to full register width.  */
310 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
311                             int sign, int index)
312 {
313     TCGv tmp;
314     tmp = tcg_temp_new_i32();
315     switch(opsize) {
316     case OS_BYTE:
317         if (sign)
318             tcg_gen_qemu_ld8s(tmp, addr, index);
319         else
320             tcg_gen_qemu_ld8u(tmp, addr, index);
321         break;
322     case OS_WORD:
323         if (sign)
324             tcg_gen_qemu_ld16s(tmp, addr, index);
325         else
326             tcg_gen_qemu_ld16u(tmp, addr, index);
327         break;
328     case OS_LONG:
329         tcg_gen_qemu_ld32u(tmp, addr, index);
330         break;
331     default:
332         g_assert_not_reached();
333     }
334     return tmp;
335 }
336 
337 /* Generate a store.  */
338 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
339                              int index)
340 {
341     switch(opsize) {
342     case OS_BYTE:
343         tcg_gen_qemu_st8(val, addr, index);
344         break;
345     case OS_WORD:
346         tcg_gen_qemu_st16(val, addr, index);
347         break;
348     case OS_LONG:
349         tcg_gen_qemu_st32(val, addr, index);
350         break;
351     default:
352         g_assert_not_reached();
353     }
354 }
355 
356 typedef enum {
357     EA_STORE,
358     EA_LOADU,
359     EA_LOADS
360 } ea_what;
361 
362 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
363    otherwise generate a store.  */
364 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
365                      ea_what what, int index)
366 {
367     if (what == EA_STORE) {
368         gen_store(s, opsize, addr, val, index);
369         return store_dummy;
370     } else {
371         return mark_to_release(s, gen_load(s, opsize, addr,
372                                            what == EA_LOADS, index));
373     }
374 }
375 
376 /* Read a 16-bit immediate constant */
377 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
378 {
379     uint16_t im;
380     im = cpu_lduw_code(env, s->pc);
381     s->pc += 2;
382     return im;
383 }
384 
385 /* Read an 8-bit immediate constant */
386 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
387 {
388     return read_im16(env, s);
389 }
390 
391 /* Read a 32-bit immediate constant.  */
392 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
393 {
394     uint32_t im;
395     im = read_im16(env, s) << 16;
396     im |= 0xffff & read_im16(env, s);
397     return im;
398 }
399 
400 /* Read a 64-bit immediate constant.  */
401 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
402 {
403     uint64_t im;
404     im = (uint64_t)read_im32(env, s) << 32;
405     im |= (uint64_t)read_im32(env, s);
406     return im;
407 }
408 
409 /* Calculate and address index.  */
410 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
411 {
412     TCGv add;
413     int scale;
414 
415     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
416     if ((ext & 0x800) == 0) {
417         tcg_gen_ext16s_i32(tmp, add);
418         add = tmp;
419     }
420     scale = (ext >> 9) & 3;
421     if (scale != 0) {
422         tcg_gen_shli_i32(tmp, add, scale);
423         add = tmp;
424     }
425     return add;
426 }
427 
428 /* Handle a base + index + displacement effective addresss.
429    A NULL_QREG base means pc-relative.  */
430 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
431 {
432     uint32_t offset;
433     uint16_t ext;
434     TCGv add;
435     TCGv tmp;
436     uint32_t bd, od;
437 
438     offset = s->pc;
439     ext = read_im16(env, s);
440 
441     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
442         return NULL_QREG;
443 
444     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
445         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
446         ext &= ~(3 << 9);
447     }
448 
449     if (ext & 0x100) {
450         /* full extension word format */
451         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
452             return NULL_QREG;
453 
454         if ((ext & 0x30) > 0x10) {
455             /* base displacement */
456             if ((ext & 0x30) == 0x20) {
457                 bd = (int16_t)read_im16(env, s);
458             } else {
459                 bd = read_im32(env, s);
460             }
461         } else {
462             bd = 0;
463         }
464         tmp = mark_to_release(s, tcg_temp_new());
465         if ((ext & 0x44) == 0) {
466             /* pre-index */
467             add = gen_addr_index(s, ext, tmp);
468         } else {
469             add = NULL_QREG;
470         }
471         if ((ext & 0x80) == 0) {
472             /* base not suppressed */
473             if (IS_NULL_QREG(base)) {
474                 base = mark_to_release(s, tcg_const_i32(offset + bd));
475                 bd = 0;
476             }
477             if (!IS_NULL_QREG(add)) {
478                 tcg_gen_add_i32(tmp, add, base);
479                 add = tmp;
480             } else {
481                 add = base;
482             }
483         }
484         if (!IS_NULL_QREG(add)) {
485             if (bd != 0) {
486                 tcg_gen_addi_i32(tmp, add, bd);
487                 add = tmp;
488             }
489         } else {
490             add = mark_to_release(s, tcg_const_i32(bd));
491         }
492         if ((ext & 3) != 0) {
493             /* memory indirect */
494             base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
495             if ((ext & 0x44) == 4) {
496                 add = gen_addr_index(s, ext, tmp);
497                 tcg_gen_add_i32(tmp, add, base);
498                 add = tmp;
499             } else {
500                 add = base;
501             }
502             if ((ext & 3) > 1) {
503                 /* outer displacement */
504                 if ((ext & 3) == 2) {
505                     od = (int16_t)read_im16(env, s);
506                 } else {
507                     od = read_im32(env, s);
508                 }
509             } else {
510                 od = 0;
511             }
512             if (od != 0) {
513                 tcg_gen_addi_i32(tmp, add, od);
514                 add = tmp;
515             }
516         }
517     } else {
518         /* brief extension word format */
519         tmp = mark_to_release(s, tcg_temp_new());
520         add = gen_addr_index(s, ext, tmp);
521         if (!IS_NULL_QREG(base)) {
522             tcg_gen_add_i32(tmp, add, base);
523             if ((int8_t)ext)
524                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
525         } else {
526             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
527         }
528         add = tmp;
529     }
530     return add;
531 }
532 
533 /* Sign or zero extend a value.  */
534 
535 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
536 {
537     switch (opsize) {
538     case OS_BYTE:
539         if (sign) {
540             tcg_gen_ext8s_i32(res, val);
541         } else {
542             tcg_gen_ext8u_i32(res, val);
543         }
544         break;
545     case OS_WORD:
546         if (sign) {
547             tcg_gen_ext16s_i32(res, val);
548         } else {
549             tcg_gen_ext16u_i32(res, val);
550         }
551         break;
552     case OS_LONG:
553         tcg_gen_mov_i32(res, val);
554         break;
555     default:
556         g_assert_not_reached();
557     }
558 }
559 
560 /* Evaluate all the CC flags.  */
561 
562 static void gen_flush_flags(DisasContext *s)
563 {
564     TCGv t0, t1;
565 
566     switch (s->cc_op) {
567     case CC_OP_FLAGS:
568         return;
569 
570     case CC_OP_ADDB:
571     case CC_OP_ADDW:
572     case CC_OP_ADDL:
573         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
574         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
575         /* Compute signed overflow for addition.  */
576         t0 = tcg_temp_new();
577         t1 = tcg_temp_new();
578         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
579         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
580         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
581         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
582         tcg_temp_free(t0);
583         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
584         tcg_temp_free(t1);
585         break;
586 
587     case CC_OP_SUBB:
588     case CC_OP_SUBW:
589     case CC_OP_SUBL:
590         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
591         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
592         /* Compute signed overflow for subtraction.  */
593         t0 = tcg_temp_new();
594         t1 = tcg_temp_new();
595         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
596         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
597         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
598         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
599         tcg_temp_free(t0);
600         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
601         tcg_temp_free(t1);
602         break;
603 
604     case CC_OP_CMPB:
605     case CC_OP_CMPW:
606     case CC_OP_CMPL:
607         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
608         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
609         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
610         /* Compute signed overflow for subtraction.  */
611         t0 = tcg_temp_new();
612         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
613         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
614         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
615         tcg_temp_free(t0);
616         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
617         break;
618 
619     case CC_OP_LOGIC:
620         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
621         tcg_gen_movi_i32(QREG_CC_C, 0);
622         tcg_gen_movi_i32(QREG_CC_V, 0);
623         break;
624 
625     case CC_OP_DYNAMIC:
626         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
627         s->cc_op_synced = 1;
628         break;
629 
630     default:
631         t0 = tcg_const_i32(s->cc_op);
632         gen_helper_flush_flags(cpu_env, t0);
633         tcg_temp_free(t0);
634         s->cc_op_synced = 1;
635         break;
636     }
637 
638     /* Note that flush_flags also assigned to env->cc_op.  */
639     s->cc_op = CC_OP_FLAGS;
640 }
641 
642 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
643 {
644     TCGv tmp;
645 
646     if (opsize == OS_LONG) {
647         tmp = val;
648     } else {
649         tmp = mark_to_release(s, tcg_temp_new());
650         gen_ext(tmp, val, opsize, sign);
651     }
652 
653     return tmp;
654 }
655 
656 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
657 {
658     gen_ext(QREG_CC_N, val, opsize, 1);
659     set_cc_op(s, CC_OP_LOGIC);
660 }
661 
662 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
663 {
664     tcg_gen_mov_i32(QREG_CC_N, dest);
665     tcg_gen_mov_i32(QREG_CC_V, src);
666     set_cc_op(s, CC_OP_CMPB + opsize);
667 }
668 
669 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
670 {
671     gen_ext(QREG_CC_N, dest, opsize, 1);
672     tcg_gen_mov_i32(QREG_CC_V, src);
673 }
674 
675 static inline int opsize_bytes(int opsize)
676 {
677     switch (opsize) {
678     case OS_BYTE: return 1;
679     case OS_WORD: return 2;
680     case OS_LONG: return 4;
681     case OS_SINGLE: return 4;
682     case OS_DOUBLE: return 8;
683     case OS_EXTENDED: return 12;
684     case OS_PACKED: return 12;
685     default:
686         g_assert_not_reached();
687     }
688 }
689 
690 static inline int insn_opsize(int insn)
691 {
692     switch ((insn >> 6) & 3) {
693     case 0: return OS_BYTE;
694     case 1: return OS_WORD;
695     case 2: return OS_LONG;
696     default:
697         g_assert_not_reached();
698     }
699 }
700 
701 static inline int ext_opsize(int ext, int pos)
702 {
703     switch ((ext >> pos) & 7) {
704     case 0: return OS_LONG;
705     case 1: return OS_SINGLE;
706     case 2: return OS_EXTENDED;
707     case 3: return OS_PACKED;
708     case 4: return OS_WORD;
709     case 5: return OS_DOUBLE;
710     case 6: return OS_BYTE;
711     default:
712         g_assert_not_reached();
713     }
714 }
715 
716 /* Assign value to a register.  If the width is less than the register width
717    only the low part of the register is set.  */
718 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
719 {
720     TCGv tmp;
721     switch (opsize) {
722     case OS_BYTE:
723         tcg_gen_andi_i32(reg, reg, 0xffffff00);
724         tmp = tcg_temp_new();
725         tcg_gen_ext8u_i32(tmp, val);
726         tcg_gen_or_i32(reg, reg, tmp);
727         tcg_temp_free(tmp);
728         break;
729     case OS_WORD:
730         tcg_gen_andi_i32(reg, reg, 0xffff0000);
731         tmp = tcg_temp_new();
732         tcg_gen_ext16u_i32(tmp, val);
733         tcg_gen_or_i32(reg, reg, tmp);
734         tcg_temp_free(tmp);
735         break;
736     case OS_LONG:
737     case OS_SINGLE:
738         tcg_gen_mov_i32(reg, val);
739         break;
740     default:
741         g_assert_not_reached();
742     }
743 }
744 
745 /* Generate code for an "effective address".  Does not adjust the base
746    register for autoincrement addressing modes.  */
747 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
748                          int mode, int reg0, int opsize)
749 {
750     TCGv reg;
751     TCGv tmp;
752     uint16_t ext;
753     uint32_t offset;
754 
755     switch (mode) {
756     case 0: /* Data register direct.  */
757     case 1: /* Address register direct.  */
758         return NULL_QREG;
759     case 3: /* Indirect postincrement.  */
760         if (opsize == OS_UNSIZED) {
761             return NULL_QREG;
762         }
763         /* fallthru */
764     case 2: /* Indirect register */
765         return get_areg(s, reg0);
766     case 4: /* Indirect predecrememnt.  */
767         if (opsize == OS_UNSIZED) {
768             return NULL_QREG;
769         }
770         reg = get_areg(s, reg0);
771         tmp = mark_to_release(s, tcg_temp_new());
772         if (reg0 == 7 && opsize == OS_BYTE &&
773             m68k_feature(s->env, M68K_FEATURE_M68000)) {
774             tcg_gen_subi_i32(tmp, reg, 2);
775         } else {
776             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
777         }
778         return tmp;
779     case 5: /* Indirect displacement.  */
780         reg = get_areg(s, reg0);
781         tmp = mark_to_release(s, tcg_temp_new());
782         ext = read_im16(env, s);
783         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
784         return tmp;
785     case 6: /* Indirect index + displacement.  */
786         reg = get_areg(s, reg0);
787         return gen_lea_indexed(env, s, reg);
788     case 7: /* Other */
789         switch (reg0) {
790         case 0: /* Absolute short.  */
791             offset = (int16_t)read_im16(env, s);
792             return mark_to_release(s, tcg_const_i32(offset));
793         case 1: /* Absolute long.  */
794             offset = read_im32(env, s);
795             return mark_to_release(s, tcg_const_i32(offset));
796         case 2: /* pc displacement  */
797             offset = s->pc;
798             offset += (int16_t)read_im16(env, s);
799             return mark_to_release(s, tcg_const_i32(offset));
800         case 3: /* pc index+displacement.  */
801             return gen_lea_indexed(env, s, NULL_QREG);
802         case 4: /* Immediate.  */
803         default:
804             return NULL_QREG;
805         }
806     }
807     /* Should never happen.  */
808     return NULL_QREG;
809 }
810 
811 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
812                     int opsize)
813 {
814     int mode = extract32(insn, 3, 3);
815     int reg0 = REG(insn, 0);
816     return gen_lea_mode(env, s, mode, reg0, opsize);
817 }
818 
819 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
820    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
821    ADDRP is non-null for readwrite operands.  */
822 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
823                         int opsize, TCGv val, TCGv *addrp, ea_what what,
824                         int index)
825 {
826     TCGv reg, tmp, result;
827     int32_t offset;
828 
829     switch (mode) {
830     case 0: /* Data register direct.  */
831         reg = cpu_dregs[reg0];
832         if (what == EA_STORE) {
833             gen_partset_reg(opsize, reg, val);
834             return store_dummy;
835         } else {
836             return gen_extend(s, reg, opsize, what == EA_LOADS);
837         }
838     case 1: /* Address register direct.  */
839         reg = get_areg(s, reg0);
840         if (what == EA_STORE) {
841             tcg_gen_mov_i32(reg, val);
842             return store_dummy;
843         } else {
844             return gen_extend(s, reg, opsize, what == EA_LOADS);
845         }
846     case 2: /* Indirect register */
847         reg = get_areg(s, reg0);
848         return gen_ldst(s, opsize, reg, val, what, index);
849     case 3: /* Indirect postincrement.  */
850         reg = get_areg(s, reg0);
851         result = gen_ldst(s, opsize, reg, val, what, index);
852         if (what == EA_STORE || !addrp) {
853             TCGv tmp = tcg_temp_new();
854             if (reg0 == 7 && opsize == OS_BYTE &&
855                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
856                 tcg_gen_addi_i32(tmp, reg, 2);
857             } else {
858                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
859             }
860             delay_set_areg(s, reg0, tmp, true);
861         }
862         return result;
863     case 4: /* Indirect predecrememnt.  */
864         if (addrp && what == EA_STORE) {
865             tmp = *addrp;
866         } else {
867             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
868             if (IS_NULL_QREG(tmp)) {
869                 return tmp;
870             }
871             if (addrp) {
872                 *addrp = tmp;
873             }
874         }
875         result = gen_ldst(s, opsize, tmp, val, what, index);
876         if (what == EA_STORE || !addrp) {
877             delay_set_areg(s, reg0, tmp, false);
878         }
879         return result;
880     case 5: /* Indirect displacement.  */
881     case 6: /* Indirect index + displacement.  */
882     do_indirect:
883         if (addrp && what == EA_STORE) {
884             tmp = *addrp;
885         } else {
886             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
887             if (IS_NULL_QREG(tmp)) {
888                 return tmp;
889             }
890             if (addrp) {
891                 *addrp = tmp;
892             }
893         }
894         return gen_ldst(s, opsize, tmp, val, what, index);
895     case 7: /* Other */
896         switch (reg0) {
897         case 0: /* Absolute short.  */
898         case 1: /* Absolute long.  */
899         case 2: /* pc displacement  */
900         case 3: /* pc index+displacement.  */
901             goto do_indirect;
902         case 4: /* Immediate.  */
903             /* Sign extend values for consistency.  */
904             switch (opsize) {
905             case OS_BYTE:
906                 if (what == EA_LOADS) {
907                     offset = (int8_t)read_im8(env, s);
908                 } else {
909                     offset = read_im8(env, s);
910                 }
911                 break;
912             case OS_WORD:
913                 if (what == EA_LOADS) {
914                     offset = (int16_t)read_im16(env, s);
915                 } else {
916                     offset = read_im16(env, s);
917                 }
918                 break;
919             case OS_LONG:
920                 offset = read_im32(env, s);
921                 break;
922             default:
923                 g_assert_not_reached();
924             }
925             return mark_to_release(s, tcg_const_i32(offset));
926         default:
927             return NULL_QREG;
928         }
929     }
930     /* Should never happen.  */
931     return NULL_QREG;
932 }
933 
934 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
935                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
936 {
937     int mode = extract32(insn, 3, 3);
938     int reg0 = REG(insn, 0);
939     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
940 }
941 
942 static TCGv_ptr gen_fp_ptr(int freg)
943 {
944     TCGv_ptr fp = tcg_temp_new_ptr();
945     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
946     return fp;
947 }
948 
949 static TCGv_ptr gen_fp_result_ptr(void)
950 {
951     TCGv_ptr fp = tcg_temp_new_ptr();
952     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
953     return fp;
954 }
955 
956 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
957 {
958     TCGv t32;
959     TCGv_i64 t64;
960 
961     t32 = tcg_temp_new();
962     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
963     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
964     tcg_temp_free(t32);
965 
966     t64 = tcg_temp_new_i64();
967     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
968     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
969     tcg_temp_free_i64(t64);
970 }
971 
972 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
973                         int index)
974 {
975     TCGv tmp;
976     TCGv_i64 t64;
977 
978     t64 = tcg_temp_new_i64();
979     tmp = tcg_temp_new();
980     switch (opsize) {
981     case OS_BYTE:
982         tcg_gen_qemu_ld8s(tmp, addr, index);
983         gen_helper_exts32(cpu_env, fp, tmp);
984         break;
985     case OS_WORD:
986         tcg_gen_qemu_ld16s(tmp, addr, index);
987         gen_helper_exts32(cpu_env, fp, tmp);
988         break;
989     case OS_LONG:
990         tcg_gen_qemu_ld32u(tmp, addr, index);
991         gen_helper_exts32(cpu_env, fp, tmp);
992         break;
993     case OS_SINGLE:
994         tcg_gen_qemu_ld32u(tmp, addr, index);
995         gen_helper_extf32(cpu_env, fp, tmp);
996         break;
997     case OS_DOUBLE:
998         tcg_gen_qemu_ld64(t64, addr, index);
999         gen_helper_extf64(cpu_env, fp, t64);
1000         break;
1001     case OS_EXTENDED:
1002         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1003             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1004             break;
1005         }
1006         tcg_gen_qemu_ld32u(tmp, addr, index);
1007         tcg_gen_shri_i32(tmp, tmp, 16);
1008         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1009         tcg_gen_addi_i32(tmp, addr, 4);
1010         tcg_gen_qemu_ld64(t64, tmp, index);
1011         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1012         break;
1013     case OS_PACKED:
1014         /* unimplemented data type on 68040/ColdFire
1015          * FIXME if needed for another FPU
1016          */
1017         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1018         break;
1019     default:
1020         g_assert_not_reached();
1021     }
1022     tcg_temp_free(tmp);
1023     tcg_temp_free_i64(t64);
1024 }
1025 
1026 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1027                          int index)
1028 {
1029     TCGv tmp;
1030     TCGv_i64 t64;
1031 
1032     t64 = tcg_temp_new_i64();
1033     tmp = tcg_temp_new();
1034     switch (opsize) {
1035     case OS_BYTE:
1036         gen_helper_reds32(tmp, cpu_env, fp);
1037         tcg_gen_qemu_st8(tmp, addr, index);
1038         break;
1039     case OS_WORD:
1040         gen_helper_reds32(tmp, cpu_env, fp);
1041         tcg_gen_qemu_st16(tmp, addr, index);
1042         break;
1043     case OS_LONG:
1044         gen_helper_reds32(tmp, cpu_env, fp);
1045         tcg_gen_qemu_st32(tmp, addr, index);
1046         break;
1047     case OS_SINGLE:
1048         gen_helper_redf32(tmp, cpu_env, fp);
1049         tcg_gen_qemu_st32(tmp, addr, index);
1050         break;
1051     case OS_DOUBLE:
1052         gen_helper_redf64(t64, cpu_env, fp);
1053         tcg_gen_qemu_st64(t64, addr, index);
1054         break;
1055     case OS_EXTENDED:
1056         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1057             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1058             break;
1059         }
1060         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1061         tcg_gen_shli_i32(tmp, tmp, 16);
1062         tcg_gen_qemu_st32(tmp, addr, index);
1063         tcg_gen_addi_i32(tmp, addr, 4);
1064         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1065         tcg_gen_qemu_st64(t64, tmp, index);
1066         break;
1067     case OS_PACKED:
1068         /* unimplemented data type on 68040/ColdFire
1069          * FIXME if needed for another FPU
1070          */
1071         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1072         break;
1073     default:
1074         g_assert_not_reached();
1075     }
1076     tcg_temp_free(tmp);
1077     tcg_temp_free_i64(t64);
1078 }
1079 
1080 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1081                         TCGv_ptr fp, ea_what what, int index)
1082 {
1083     if (what == EA_STORE) {
1084         gen_store_fp(s, opsize, addr, fp, index);
1085     } else {
1086         gen_load_fp(s, opsize, addr, fp, index);
1087     }
1088 }
1089 
1090 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1091                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1092                           int index)
1093 {
1094     TCGv reg, addr, tmp;
1095     TCGv_i64 t64;
1096 
1097     switch (mode) {
1098     case 0: /* Data register direct.  */
1099         reg = cpu_dregs[reg0];
1100         if (what == EA_STORE) {
1101             switch (opsize) {
1102             case OS_BYTE:
1103             case OS_WORD:
1104             case OS_LONG:
1105                 gen_helper_reds32(reg, cpu_env, fp);
1106                 break;
1107             case OS_SINGLE:
1108                 gen_helper_redf32(reg, cpu_env, fp);
1109                 break;
1110             default:
1111                 g_assert_not_reached();
1112             }
1113         } else {
1114             tmp = tcg_temp_new();
1115             switch (opsize) {
1116             case OS_BYTE:
1117                 tcg_gen_ext8s_i32(tmp, reg);
1118                 gen_helper_exts32(cpu_env, fp, tmp);
1119                 break;
1120             case OS_WORD:
1121                 tcg_gen_ext16s_i32(tmp, reg);
1122                 gen_helper_exts32(cpu_env, fp, tmp);
1123                 break;
1124             case OS_LONG:
1125                 gen_helper_exts32(cpu_env, fp, reg);
1126                 break;
1127             case OS_SINGLE:
1128                 gen_helper_extf32(cpu_env, fp, reg);
1129                 break;
1130             default:
1131                 g_assert_not_reached();
1132             }
1133             tcg_temp_free(tmp);
1134         }
1135         return 0;
1136     case 1: /* Address register direct.  */
1137         return -1;
1138     case 2: /* Indirect register */
1139         addr = get_areg(s, reg0);
1140         gen_ldst_fp(s, opsize, addr, fp, what, index);
1141         return 0;
1142     case 3: /* Indirect postincrement.  */
1143         addr = cpu_aregs[reg0];
1144         gen_ldst_fp(s, opsize, addr, fp, what, index);
1145         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1146         return 0;
1147     case 4: /* Indirect predecrememnt.  */
1148         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1149         if (IS_NULL_QREG(addr)) {
1150             return -1;
1151         }
1152         gen_ldst_fp(s, opsize, addr, fp, what, index);
1153         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1154         return 0;
1155     case 5: /* Indirect displacement.  */
1156     case 6: /* Indirect index + displacement.  */
1157     do_indirect:
1158         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1159         if (IS_NULL_QREG(addr)) {
1160             return -1;
1161         }
1162         gen_ldst_fp(s, opsize, addr, fp, what, index);
1163         return 0;
1164     case 7: /* Other */
1165         switch (reg0) {
1166         case 0: /* Absolute short.  */
1167         case 1: /* Absolute long.  */
1168         case 2: /* pc displacement  */
1169         case 3: /* pc index+displacement.  */
1170             goto do_indirect;
1171         case 4: /* Immediate.  */
1172             if (what == EA_STORE) {
1173                 return -1;
1174             }
1175             switch (opsize) {
1176             case OS_BYTE:
1177                 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1178                 gen_helper_exts32(cpu_env, fp, tmp);
1179                 tcg_temp_free(tmp);
1180                 break;
1181             case OS_WORD:
1182                 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1183                 gen_helper_exts32(cpu_env, fp, tmp);
1184                 tcg_temp_free(tmp);
1185                 break;
1186             case OS_LONG:
1187                 tmp = tcg_const_i32(read_im32(env, s));
1188                 gen_helper_exts32(cpu_env, fp, tmp);
1189                 tcg_temp_free(tmp);
1190                 break;
1191             case OS_SINGLE:
1192                 tmp = tcg_const_i32(read_im32(env, s));
1193                 gen_helper_extf32(cpu_env, fp, tmp);
1194                 tcg_temp_free(tmp);
1195                 break;
1196             case OS_DOUBLE:
1197                 t64 = tcg_const_i64(read_im64(env, s));
1198                 gen_helper_extf64(cpu_env, fp, t64);
1199                 tcg_temp_free_i64(t64);
1200                 break;
1201             case OS_EXTENDED:
1202                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1203                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1204                     break;
1205                 }
1206                 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1207                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1208                 tcg_temp_free(tmp);
1209                 t64 = tcg_const_i64(read_im64(env, s));
1210                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1211                 tcg_temp_free_i64(t64);
1212                 break;
1213             case OS_PACKED:
1214                 /* unimplemented data type on 68040/ColdFire
1215                  * FIXME if needed for another FPU
1216                  */
1217                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1218                 break;
1219             default:
1220                 g_assert_not_reached();
1221             }
1222             return 0;
1223         default:
1224             return -1;
1225         }
1226     }
1227     return -1;
1228 }
1229 
1230 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1231                        int opsize, TCGv_ptr fp, ea_what what, int index)
1232 {
1233     int mode = extract32(insn, 3, 3);
1234     int reg0 = REG(insn, 0);
1235     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1236 }
1237 
1238 typedef struct {
1239     TCGCond tcond;
1240     bool g1;
1241     bool g2;
1242     TCGv v1;
1243     TCGv v2;
1244 } DisasCompare;
1245 
1246 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1247 {
1248     TCGv tmp, tmp2;
1249     TCGCond tcond;
1250     CCOp op = s->cc_op;
1251 
1252     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1253     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1254         c->g1 = c->g2 = 1;
1255         c->v1 = QREG_CC_N;
1256         c->v2 = QREG_CC_V;
1257         switch (cond) {
1258         case 2: /* HI */
1259         case 3: /* LS */
1260             tcond = TCG_COND_LEU;
1261             goto done;
1262         case 4: /* CC */
1263         case 5: /* CS */
1264             tcond = TCG_COND_LTU;
1265             goto done;
1266         case 6: /* NE */
1267         case 7: /* EQ */
1268             tcond = TCG_COND_EQ;
1269             goto done;
1270         case 10: /* PL */
1271         case 11: /* MI */
1272             c->g1 = c->g2 = 0;
1273             c->v2 = tcg_const_i32(0);
1274             c->v1 = tmp = tcg_temp_new();
1275             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1276             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1277             /* fallthru */
1278         case 12: /* GE */
1279         case 13: /* LT */
1280             tcond = TCG_COND_LT;
1281             goto done;
1282         case 14: /* GT */
1283         case 15: /* LE */
1284             tcond = TCG_COND_LE;
1285             goto done;
1286         }
1287     }
1288 
1289     c->g1 = 1;
1290     c->g2 = 0;
1291     c->v2 = tcg_const_i32(0);
1292 
1293     switch (cond) {
1294     case 0: /* T */
1295     case 1: /* F */
1296         c->v1 = c->v2;
1297         tcond = TCG_COND_NEVER;
1298         goto done;
1299     case 14: /* GT (!(Z || (N ^ V))) */
1300     case 15: /* LE (Z || (N ^ V)) */
1301         /* Logic operations clear V, which simplifies LE to (Z || N),
1302            and since Z and N are co-located, this becomes a normal
1303            comparison vs N.  */
1304         if (op == CC_OP_LOGIC) {
1305             c->v1 = QREG_CC_N;
1306             tcond = TCG_COND_LE;
1307             goto done;
1308         }
1309         break;
1310     case 12: /* GE (!(N ^ V)) */
1311     case 13: /* LT (N ^ V) */
1312         /* Logic operations clear V, which simplifies this to N.  */
1313         if (op != CC_OP_LOGIC) {
1314             break;
1315         }
1316         /* fallthru */
1317     case 10: /* PL (!N) */
1318     case 11: /* MI (N) */
1319         /* Several cases represent N normally.  */
1320         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1321             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1322             op == CC_OP_LOGIC) {
1323             c->v1 = QREG_CC_N;
1324             tcond = TCG_COND_LT;
1325             goto done;
1326         }
1327         break;
1328     case 6: /* NE (!Z) */
1329     case 7: /* EQ (Z) */
1330         /* Some cases fold Z into N.  */
1331         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1332             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1333             op == CC_OP_LOGIC) {
1334             tcond = TCG_COND_EQ;
1335             c->v1 = QREG_CC_N;
1336             goto done;
1337         }
1338         break;
1339     case 4: /* CC (!C) */
1340     case 5: /* CS (C) */
1341         /* Some cases fold C into X.  */
1342         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1343             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1344             tcond = TCG_COND_NE;
1345             c->v1 = QREG_CC_X;
1346             goto done;
1347         }
1348         /* fallthru */
1349     case 8: /* VC (!V) */
1350     case 9: /* VS (V) */
1351         /* Logic operations clear V and C.  */
1352         if (op == CC_OP_LOGIC) {
1353             tcond = TCG_COND_NEVER;
1354             c->v1 = c->v2;
1355             goto done;
1356         }
1357         break;
1358     }
1359 
1360     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1361     gen_flush_flags(s);
1362 
1363     switch (cond) {
1364     case 0: /* T */
1365     case 1: /* F */
1366     default:
1367         /* Invalid, or handled above.  */
1368         abort();
1369     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1370     case 3: /* LS (C || Z) */
1371         c->v1 = tmp = tcg_temp_new();
1372         c->g1 = 0;
1373         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1374         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1375         tcond = TCG_COND_NE;
1376         break;
1377     case 4: /* CC (!C) */
1378     case 5: /* CS (C) */
1379         c->v1 = QREG_CC_C;
1380         tcond = TCG_COND_NE;
1381         break;
1382     case 6: /* NE (!Z) */
1383     case 7: /* EQ (Z) */
1384         c->v1 = QREG_CC_Z;
1385         tcond = TCG_COND_EQ;
1386         break;
1387     case 8: /* VC (!V) */
1388     case 9: /* VS (V) */
1389         c->v1 = QREG_CC_V;
1390         tcond = TCG_COND_LT;
1391         break;
1392     case 10: /* PL (!N) */
1393     case 11: /* MI (N) */
1394         c->v1 = QREG_CC_N;
1395         tcond = TCG_COND_LT;
1396         break;
1397     case 12: /* GE (!(N ^ V)) */
1398     case 13: /* LT (N ^ V) */
1399         c->v1 = tmp = tcg_temp_new();
1400         c->g1 = 0;
1401         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1402         tcond = TCG_COND_LT;
1403         break;
1404     case 14: /* GT (!(Z || (N ^ V))) */
1405     case 15: /* LE (Z || (N ^ V)) */
1406         c->v1 = tmp = tcg_temp_new();
1407         c->g1 = 0;
1408         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1409         tcg_gen_neg_i32(tmp, tmp);
1410         tmp2 = tcg_temp_new();
1411         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1412         tcg_gen_or_i32(tmp, tmp, tmp2);
1413         tcg_temp_free(tmp2);
1414         tcond = TCG_COND_LT;
1415         break;
1416     }
1417 
1418  done:
1419     if ((cond & 1) == 0) {
1420         tcond = tcg_invert_cond(tcond);
1421     }
1422     c->tcond = tcond;
1423 }
1424 
1425 static void free_cond(DisasCompare *c)
1426 {
1427     if (!c->g1) {
1428         tcg_temp_free(c->v1);
1429     }
1430     if (!c->g2) {
1431         tcg_temp_free(c->v2);
1432     }
1433 }
1434 
1435 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1436 {
1437   DisasCompare c;
1438 
1439   gen_cc_cond(&c, s, cond);
1440   update_cc_op(s);
1441   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1442   free_cond(&c);
1443 }
1444 
1445 /* Force a TB lookup after an instruction that changes the CPU state.  */
1446 static void gen_exit_tb(DisasContext *s)
1447 {
1448     update_cc_op(s);
1449     tcg_gen_movi_i32(QREG_PC, s->pc);
1450     s->base.is_jmp = DISAS_EXIT;
1451 }
1452 
1453 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1454         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1455                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1456         if (IS_NULL_QREG(result)) {                                     \
1457             gen_addr_fault(s);                                          \
1458             return;                                                     \
1459         }                                                               \
1460     } while (0)
1461 
1462 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1463         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1464                                 EA_STORE, IS_USER(s));                  \
1465         if (IS_NULL_QREG(ea_result)) {                                  \
1466             gen_addr_fault(s);                                          \
1467             return;                                                     \
1468         }                                                               \
1469     } while (0)
1470 
1471 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1472 {
1473 #ifndef CONFIG_USER_ONLY
1474     return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)
1475         || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1476 #else
1477     return true;
1478 #endif
1479 }
1480 
1481 /* Generate a jump to an immediate address.  */
1482 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1483 {
1484     if (unlikely(s->base.singlestep_enabled)) {
1485         gen_exception(s, dest, EXCP_DEBUG);
1486     } else if (use_goto_tb(s, dest)) {
1487         tcg_gen_goto_tb(n);
1488         tcg_gen_movi_i32(QREG_PC, dest);
1489         tcg_gen_exit_tb(s->base.tb, n);
1490     } else {
1491         gen_jmp_im(s, dest);
1492         tcg_gen_exit_tb(NULL, 0);
1493     }
1494     s->base.is_jmp = DISAS_NORETURN;
1495 }
1496 
1497 DISAS_INSN(scc)
1498 {
1499     DisasCompare c;
1500     int cond;
1501     TCGv tmp;
1502 
1503     cond = (insn >> 8) & 0xf;
1504     gen_cc_cond(&c, s, cond);
1505 
1506     tmp = tcg_temp_new();
1507     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1508     free_cond(&c);
1509 
1510     tcg_gen_neg_i32(tmp, tmp);
1511     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1512     tcg_temp_free(tmp);
1513 }
1514 
1515 DISAS_INSN(dbcc)
1516 {
1517     TCGLabel *l1;
1518     TCGv reg;
1519     TCGv tmp;
1520     int16_t offset;
1521     uint32_t base;
1522 
1523     reg = DREG(insn, 0);
1524     base = s->pc;
1525     offset = (int16_t)read_im16(env, s);
1526     l1 = gen_new_label();
1527     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1528 
1529     tmp = tcg_temp_new();
1530     tcg_gen_ext16s_i32(tmp, reg);
1531     tcg_gen_addi_i32(tmp, tmp, -1);
1532     gen_partset_reg(OS_WORD, reg, tmp);
1533     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1534     gen_jmp_tb(s, 1, base + offset);
1535     gen_set_label(l1);
1536     gen_jmp_tb(s, 0, s->pc);
1537 }
1538 
1539 DISAS_INSN(undef_mac)
1540 {
1541     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1542 }
1543 
1544 DISAS_INSN(undef_fpu)
1545 {
1546     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1547 }
1548 
1549 DISAS_INSN(undef)
1550 {
1551     /* ??? This is both instructions that are as yet unimplemented
1552        for the 680x0 series, as well as those that are implemented
1553        but actually illegal for CPU32 or pre-68020.  */
1554     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1555                   insn, s->base.pc_next);
1556     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1557 }
1558 
1559 DISAS_INSN(mulw)
1560 {
1561     TCGv reg;
1562     TCGv tmp;
1563     TCGv src;
1564     int sign;
1565 
1566     sign = (insn & 0x100) != 0;
1567     reg = DREG(insn, 9);
1568     tmp = tcg_temp_new();
1569     if (sign)
1570         tcg_gen_ext16s_i32(tmp, reg);
1571     else
1572         tcg_gen_ext16u_i32(tmp, reg);
1573     SRC_EA(env, src, OS_WORD, sign, NULL);
1574     tcg_gen_mul_i32(tmp, tmp, src);
1575     tcg_gen_mov_i32(reg, tmp);
1576     gen_logic_cc(s, tmp, OS_LONG);
1577     tcg_temp_free(tmp);
1578 }
1579 
1580 DISAS_INSN(divw)
1581 {
1582     int sign;
1583     TCGv src;
1584     TCGv destr;
1585 
1586     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1587 
1588     sign = (insn & 0x100) != 0;
1589 
1590     /* dest.l / src.w */
1591 
1592     SRC_EA(env, src, OS_WORD, sign, NULL);
1593     destr = tcg_const_i32(REG(insn, 9));
1594     if (sign) {
1595         gen_helper_divsw(cpu_env, destr, src);
1596     } else {
1597         gen_helper_divuw(cpu_env, destr, src);
1598     }
1599     tcg_temp_free(destr);
1600 
1601     set_cc_op(s, CC_OP_FLAGS);
1602 }
1603 
1604 DISAS_INSN(divl)
1605 {
1606     TCGv num, reg, den;
1607     int sign;
1608     uint16_t ext;
1609 
1610     ext = read_im16(env, s);
1611 
1612     sign = (ext & 0x0800) != 0;
1613 
1614     if (ext & 0x400) {
1615         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1616             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1617             return;
1618         }
1619 
1620         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1621 
1622         SRC_EA(env, den, OS_LONG, 0, NULL);
1623         num = tcg_const_i32(REG(ext, 12));
1624         reg = tcg_const_i32(REG(ext, 0));
1625         if (sign) {
1626             gen_helper_divsll(cpu_env, num, reg, den);
1627         } else {
1628             gen_helper_divull(cpu_env, num, reg, den);
1629         }
1630         tcg_temp_free(reg);
1631         tcg_temp_free(num);
1632         set_cc_op(s, CC_OP_FLAGS);
1633         return;
1634     }
1635 
1636     /* divX.l <EA>, Dq        32/32 -> 32q     */
1637     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1638 
1639     SRC_EA(env, den, OS_LONG, 0, NULL);
1640     num = tcg_const_i32(REG(ext, 12));
1641     reg = tcg_const_i32(REG(ext, 0));
1642     if (sign) {
1643         gen_helper_divsl(cpu_env, num, reg, den);
1644     } else {
1645         gen_helper_divul(cpu_env, num, reg, den);
1646     }
1647     tcg_temp_free(reg);
1648     tcg_temp_free(num);
1649 
1650     set_cc_op(s, CC_OP_FLAGS);
1651 }
1652 
1653 static void bcd_add(TCGv dest, TCGv src)
1654 {
1655     TCGv t0, t1;
1656 
1657     /*  dest10 = dest10 + src10 + X
1658      *
1659      *        t1 = src
1660      *        t2 = t1 + 0x066
1661      *        t3 = t2 + dest + X
1662      *        t4 = t2 ^ dest
1663      *        t5 = t3 ^ t4
1664      *        t6 = ~t5 & 0x110
1665      *        t7 = (t6 >> 2) | (t6 >> 3)
1666      *        return t3 - t7
1667      */
1668 
1669     /* t1 = (src + 0x066) + dest + X
1670      *    = result with some possible exceding 0x6
1671      */
1672 
1673     t0 = tcg_const_i32(0x066);
1674     tcg_gen_add_i32(t0, t0, src);
1675 
1676     t1 = tcg_temp_new();
1677     tcg_gen_add_i32(t1, t0, dest);
1678     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1679 
1680     /* we will remove exceding 0x6 where there is no carry */
1681 
1682     /* t0 = (src + 0x0066) ^ dest
1683      *    = t1 without carries
1684      */
1685 
1686     tcg_gen_xor_i32(t0, t0, dest);
1687 
1688     /* extract the carries
1689      * t0 = t0 ^ t1
1690      *    = only the carries
1691      */
1692 
1693     tcg_gen_xor_i32(t0, t0, t1);
1694 
1695     /* generate 0x1 where there is no carry
1696      * and for each 0x10, generate a 0x6
1697      */
1698 
1699     tcg_gen_shri_i32(t0, t0, 3);
1700     tcg_gen_not_i32(t0, t0);
1701     tcg_gen_andi_i32(t0, t0, 0x22);
1702     tcg_gen_add_i32(dest, t0, t0);
1703     tcg_gen_add_i32(dest, dest, t0);
1704     tcg_temp_free(t0);
1705 
1706     /* remove the exceding 0x6
1707      * for digits that have not generated a carry
1708      */
1709 
1710     tcg_gen_sub_i32(dest, t1, dest);
1711     tcg_temp_free(t1);
1712 }
1713 
1714 static void bcd_sub(TCGv dest, TCGv src)
1715 {
1716     TCGv t0, t1, t2;
1717 
1718     /*  dest10 = dest10 - src10 - X
1719      *         = bcd_add(dest + 1 - X, 0x199 - src)
1720      */
1721 
1722     /* t0 = 0x066 + (0x199 - src) */
1723 
1724     t0 = tcg_temp_new();
1725     tcg_gen_subfi_i32(t0, 0x1ff, src);
1726 
1727     /* t1 = t0 + dest + 1 - X*/
1728 
1729     t1 = tcg_temp_new();
1730     tcg_gen_add_i32(t1, t0, dest);
1731     tcg_gen_addi_i32(t1, t1, 1);
1732     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1733 
1734     /* t2 = t0 ^ dest */
1735 
1736     t2 = tcg_temp_new();
1737     tcg_gen_xor_i32(t2, t0, dest);
1738 
1739     /* t0 = t1 ^ t2 */
1740 
1741     tcg_gen_xor_i32(t0, t1, t2);
1742 
1743     /* t2 = ~t0 & 0x110
1744      * t0 = (t2 >> 2) | (t2 >> 3)
1745      *
1746      * to fit on 8bit operands, changed in:
1747      *
1748      * t2 = ~(t0 >> 3) & 0x22
1749      * t0 = t2 + t2
1750      * t0 = t0 + t2
1751      */
1752 
1753     tcg_gen_shri_i32(t2, t0, 3);
1754     tcg_gen_not_i32(t2, t2);
1755     tcg_gen_andi_i32(t2, t2, 0x22);
1756     tcg_gen_add_i32(t0, t2, t2);
1757     tcg_gen_add_i32(t0, t0, t2);
1758     tcg_temp_free(t2);
1759 
1760     /* return t1 - t0 */
1761 
1762     tcg_gen_sub_i32(dest, t1, t0);
1763     tcg_temp_free(t0);
1764     tcg_temp_free(t1);
1765 }
1766 
1767 static void bcd_flags(TCGv val)
1768 {
1769     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1770     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1771 
1772     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1773 
1774     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1775 }
1776 
1777 DISAS_INSN(abcd_reg)
1778 {
1779     TCGv src;
1780     TCGv dest;
1781 
1782     gen_flush_flags(s); /* !Z is sticky */
1783 
1784     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1785     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1786     bcd_add(dest, src);
1787     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1788 
1789     bcd_flags(dest);
1790 }
1791 
1792 DISAS_INSN(abcd_mem)
1793 {
1794     TCGv src, dest, addr;
1795 
1796     gen_flush_flags(s); /* !Z is sticky */
1797 
1798     /* Indirect pre-decrement load (mode 4) */
1799 
1800     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1801                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1802     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1803                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1804 
1805     bcd_add(dest, src);
1806 
1807     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1808                 EA_STORE, IS_USER(s));
1809 
1810     bcd_flags(dest);
1811 }
1812 
1813 DISAS_INSN(sbcd_reg)
1814 {
1815     TCGv src, dest;
1816 
1817     gen_flush_flags(s); /* !Z is sticky */
1818 
1819     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1820     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1821 
1822     bcd_sub(dest, src);
1823 
1824     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1825 
1826     bcd_flags(dest);
1827 }
1828 
1829 DISAS_INSN(sbcd_mem)
1830 {
1831     TCGv src, dest, addr;
1832 
1833     gen_flush_flags(s); /* !Z is sticky */
1834 
1835     /* Indirect pre-decrement load (mode 4) */
1836 
1837     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1838                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1839     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1840                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1841 
1842     bcd_sub(dest, src);
1843 
1844     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1845                 EA_STORE, IS_USER(s));
1846 
1847     bcd_flags(dest);
1848 }
1849 
1850 DISAS_INSN(nbcd)
1851 {
1852     TCGv src, dest;
1853     TCGv addr;
1854 
1855     gen_flush_flags(s); /* !Z is sticky */
1856 
1857     SRC_EA(env, src, OS_BYTE, 0, &addr);
1858 
1859     dest = tcg_const_i32(0);
1860     bcd_sub(dest, src);
1861 
1862     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1863 
1864     bcd_flags(dest);
1865 
1866     tcg_temp_free(dest);
1867 }
1868 
1869 DISAS_INSN(addsub)
1870 {
1871     TCGv reg;
1872     TCGv dest;
1873     TCGv src;
1874     TCGv tmp;
1875     TCGv addr;
1876     int add;
1877     int opsize;
1878 
1879     add = (insn & 0x4000) != 0;
1880     opsize = insn_opsize(insn);
1881     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1882     dest = tcg_temp_new();
1883     if (insn & 0x100) {
1884         SRC_EA(env, tmp, opsize, 1, &addr);
1885         src = reg;
1886     } else {
1887         tmp = reg;
1888         SRC_EA(env, src, opsize, 1, NULL);
1889     }
1890     if (add) {
1891         tcg_gen_add_i32(dest, tmp, src);
1892         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1893         set_cc_op(s, CC_OP_ADDB + opsize);
1894     } else {
1895         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1896         tcg_gen_sub_i32(dest, tmp, src);
1897         set_cc_op(s, CC_OP_SUBB + opsize);
1898     }
1899     gen_update_cc_add(dest, src, opsize);
1900     if (insn & 0x100) {
1901         DEST_EA(env, insn, opsize, dest, &addr);
1902     } else {
1903         gen_partset_reg(opsize, DREG(insn, 9), dest);
1904     }
1905     tcg_temp_free(dest);
1906 }
1907 
1908 /* Reverse the order of the bits in REG.  */
1909 DISAS_INSN(bitrev)
1910 {
1911     TCGv reg;
1912     reg = DREG(insn, 0);
1913     gen_helper_bitrev(reg, reg);
1914 }
1915 
1916 DISAS_INSN(bitop_reg)
1917 {
1918     int opsize;
1919     int op;
1920     TCGv src1;
1921     TCGv src2;
1922     TCGv tmp;
1923     TCGv addr;
1924     TCGv dest;
1925 
1926     if ((insn & 0x38) != 0)
1927         opsize = OS_BYTE;
1928     else
1929         opsize = OS_LONG;
1930     op = (insn >> 6) & 3;
1931     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1932 
1933     gen_flush_flags(s);
1934     src2 = tcg_temp_new();
1935     if (opsize == OS_BYTE)
1936         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1937     else
1938         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1939 
1940     tmp = tcg_const_i32(1);
1941     tcg_gen_shl_i32(tmp, tmp, src2);
1942     tcg_temp_free(src2);
1943 
1944     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1945 
1946     dest = tcg_temp_new();
1947     switch (op) {
1948     case 1: /* bchg */
1949         tcg_gen_xor_i32(dest, src1, tmp);
1950         break;
1951     case 2: /* bclr */
1952         tcg_gen_andc_i32(dest, src1, tmp);
1953         break;
1954     case 3: /* bset */
1955         tcg_gen_or_i32(dest, src1, tmp);
1956         break;
1957     default: /* btst */
1958         break;
1959     }
1960     tcg_temp_free(tmp);
1961     if (op) {
1962         DEST_EA(env, insn, opsize, dest, &addr);
1963     }
1964     tcg_temp_free(dest);
1965 }
1966 
1967 DISAS_INSN(sats)
1968 {
1969     TCGv reg;
1970     reg = DREG(insn, 0);
1971     gen_flush_flags(s);
1972     gen_helper_sats(reg, reg, QREG_CC_V);
1973     gen_logic_cc(s, reg, OS_LONG);
1974 }
1975 
1976 static void gen_push(DisasContext *s, TCGv val)
1977 {
1978     TCGv tmp;
1979 
1980     tmp = tcg_temp_new();
1981     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1982     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1983     tcg_gen_mov_i32(QREG_SP, tmp);
1984     tcg_temp_free(tmp);
1985 }
1986 
1987 static TCGv mreg(int reg)
1988 {
1989     if (reg < 8) {
1990         /* Dx */
1991         return cpu_dregs[reg];
1992     }
1993     /* Ax */
1994     return cpu_aregs[reg & 7];
1995 }
1996 
1997 DISAS_INSN(movem)
1998 {
1999     TCGv addr, incr, tmp, r[16];
2000     int is_load = (insn & 0x0400) != 0;
2001     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2002     uint16_t mask = read_im16(env, s);
2003     int mode = extract32(insn, 3, 3);
2004     int reg0 = REG(insn, 0);
2005     int i;
2006 
2007     tmp = cpu_aregs[reg0];
2008 
2009     switch (mode) {
2010     case 0: /* data register direct */
2011     case 1: /* addr register direct */
2012     do_addr_fault:
2013         gen_addr_fault(s);
2014         return;
2015 
2016     case 2: /* indirect */
2017         break;
2018 
2019     case 3: /* indirect post-increment */
2020         if (!is_load) {
2021             /* post-increment is not allowed */
2022             goto do_addr_fault;
2023         }
2024         break;
2025 
2026     case 4: /* indirect pre-decrement */
2027         if (is_load) {
2028             /* pre-decrement is not allowed */
2029             goto do_addr_fault;
2030         }
2031         /* We want a bare copy of the address reg, without any pre-decrement
2032            adjustment, as gen_lea would provide.  */
2033         break;
2034 
2035     default:
2036         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2037         if (IS_NULL_QREG(tmp)) {
2038             goto do_addr_fault;
2039         }
2040         break;
2041     }
2042 
2043     addr = tcg_temp_new();
2044     tcg_gen_mov_i32(addr, tmp);
2045     incr = tcg_const_i32(opsize_bytes(opsize));
2046 
2047     if (is_load) {
2048         /* memory to register */
2049         for (i = 0; i < 16; i++) {
2050             if (mask & (1 << i)) {
2051                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2052                 tcg_gen_add_i32(addr, addr, incr);
2053             }
2054         }
2055         for (i = 0; i < 16; i++) {
2056             if (mask & (1 << i)) {
2057                 tcg_gen_mov_i32(mreg(i), r[i]);
2058                 tcg_temp_free(r[i]);
2059             }
2060         }
2061         if (mode == 3) {
2062             /* post-increment: movem (An)+,X */
2063             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2064         }
2065     } else {
2066         /* register to memory */
2067         if (mode == 4) {
2068             /* pre-decrement: movem X,-(An) */
2069             for (i = 15; i >= 0; i--) {
2070                 if ((mask << i) & 0x8000) {
2071                     tcg_gen_sub_i32(addr, addr, incr);
2072                     if (reg0 + 8 == i &&
2073                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2074                         /* M68020+: if the addressing register is the
2075                          * register moved to memory, the value written
2076                          * is the initial value decremented by the size of
2077                          * the operation, regardless of how many actual
2078                          * stores have been performed until this point.
2079                          * M68000/M68010: the value is the initial value.
2080                          */
2081                         tmp = tcg_temp_new();
2082                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2083                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2084                         tcg_temp_free(tmp);
2085                     } else {
2086                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2087                     }
2088                 }
2089             }
2090             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2091         } else {
2092             for (i = 0; i < 16; i++) {
2093                 if (mask & (1 << i)) {
2094                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2095                     tcg_gen_add_i32(addr, addr, incr);
2096                 }
2097             }
2098         }
2099     }
2100 
2101     tcg_temp_free(incr);
2102     tcg_temp_free(addr);
2103 }
2104 
2105 DISAS_INSN(movep)
2106 {
2107     uint8_t i;
2108     int16_t displ;
2109     TCGv reg;
2110     TCGv addr;
2111     TCGv abuf;
2112     TCGv dbuf;
2113 
2114     displ = read_im16(env, s);
2115 
2116     addr = AREG(insn, 0);
2117     reg = DREG(insn, 9);
2118 
2119     abuf = tcg_temp_new();
2120     tcg_gen_addi_i32(abuf, addr, displ);
2121     dbuf = tcg_temp_new();
2122 
2123     if (insn & 0x40) {
2124         i = 4;
2125     } else {
2126         i = 2;
2127     }
2128 
2129     if (insn & 0x80) {
2130         for ( ; i > 0 ; i--) {
2131             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2132             tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2133             if (i > 1) {
2134                 tcg_gen_addi_i32(abuf, abuf, 2);
2135             }
2136         }
2137     } else {
2138         for ( ; i > 0 ; i--) {
2139             tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2140             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2141             if (i > 1) {
2142                 tcg_gen_addi_i32(abuf, abuf, 2);
2143             }
2144         }
2145     }
2146     tcg_temp_free(abuf);
2147     tcg_temp_free(dbuf);
2148 }
2149 
2150 DISAS_INSN(bitop_im)
2151 {
2152     int opsize;
2153     int op;
2154     TCGv src1;
2155     uint32_t mask;
2156     int bitnum;
2157     TCGv tmp;
2158     TCGv addr;
2159 
2160     if ((insn & 0x38) != 0)
2161         opsize = OS_BYTE;
2162     else
2163         opsize = OS_LONG;
2164     op = (insn >> 6) & 3;
2165 
2166     bitnum = read_im16(env, s);
2167     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2168         if (bitnum & 0xfe00) {
2169             disas_undef(env, s, insn);
2170             return;
2171         }
2172     } else {
2173         if (bitnum & 0xff00) {
2174             disas_undef(env, s, insn);
2175             return;
2176         }
2177     }
2178 
2179     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2180 
2181     gen_flush_flags(s);
2182     if (opsize == OS_BYTE)
2183         bitnum &= 7;
2184     else
2185         bitnum &= 31;
2186     mask = 1 << bitnum;
2187 
2188    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2189 
2190     if (op) {
2191         tmp = tcg_temp_new();
2192         switch (op) {
2193         case 1: /* bchg */
2194             tcg_gen_xori_i32(tmp, src1, mask);
2195             break;
2196         case 2: /* bclr */
2197             tcg_gen_andi_i32(tmp, src1, ~mask);
2198             break;
2199         case 3: /* bset */
2200             tcg_gen_ori_i32(tmp, src1, mask);
2201             break;
2202         default: /* btst */
2203             break;
2204         }
2205         DEST_EA(env, insn, opsize, tmp, &addr);
2206         tcg_temp_free(tmp);
2207     }
2208 }
2209 
2210 static TCGv gen_get_ccr(DisasContext *s)
2211 {
2212     TCGv dest;
2213 
2214     update_cc_op(s);
2215     dest = tcg_temp_new();
2216     gen_helper_get_ccr(dest, cpu_env);
2217     return dest;
2218 }
2219 
2220 static TCGv gen_get_sr(DisasContext *s)
2221 {
2222     TCGv ccr;
2223     TCGv sr;
2224 
2225     ccr = gen_get_ccr(s);
2226     sr = tcg_temp_new();
2227     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2228     tcg_gen_or_i32(sr, sr, ccr);
2229     return sr;
2230 }
2231 
2232 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2233 {
2234     if (ccr_only) {
2235         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2236         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2237         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2238         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2239         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2240     } else {
2241         TCGv sr = tcg_const_i32(val);
2242         gen_helper_set_sr(cpu_env, sr);
2243         tcg_temp_free(sr);
2244     }
2245     set_cc_op(s, CC_OP_FLAGS);
2246 }
2247 
2248 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2249 {
2250     if (ccr_only) {
2251         gen_helper_set_ccr(cpu_env, val);
2252     } else {
2253         gen_helper_set_sr(cpu_env, val);
2254     }
2255     set_cc_op(s, CC_OP_FLAGS);
2256 }
2257 
2258 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2259                            bool ccr_only)
2260 {
2261     if ((insn & 0x3f) == 0x3c) {
2262         uint16_t val;
2263         val = read_im16(env, s);
2264         gen_set_sr_im(s, val, ccr_only);
2265     } else {
2266         TCGv src;
2267         SRC_EA(env, src, OS_WORD, 0, NULL);
2268         gen_set_sr(s, src, ccr_only);
2269     }
2270 }
2271 
2272 DISAS_INSN(arith_im)
2273 {
2274     int op;
2275     TCGv im;
2276     TCGv src1;
2277     TCGv dest;
2278     TCGv addr;
2279     int opsize;
2280     bool with_SR = ((insn & 0x3f) == 0x3c);
2281 
2282     op = (insn >> 9) & 7;
2283     opsize = insn_opsize(insn);
2284     switch (opsize) {
2285     case OS_BYTE:
2286         im = tcg_const_i32((int8_t)read_im8(env, s));
2287         break;
2288     case OS_WORD:
2289         im = tcg_const_i32((int16_t)read_im16(env, s));
2290         break;
2291     case OS_LONG:
2292         im = tcg_const_i32(read_im32(env, s));
2293         break;
2294     default:
2295         g_assert_not_reached();
2296     }
2297 
2298     if (with_SR) {
2299         /* SR/CCR can only be used with andi/eori/ori */
2300         if (op == 2 || op == 3 || op == 6) {
2301             disas_undef(env, s, insn);
2302             return;
2303         }
2304         switch (opsize) {
2305         case OS_BYTE:
2306             src1 = gen_get_ccr(s);
2307             break;
2308         case OS_WORD:
2309             if (IS_USER(s)) {
2310                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2311                 return;
2312             }
2313             src1 = gen_get_sr(s);
2314             break;
2315         default:
2316             /* OS_LONG; others already g_assert_not_reached.  */
2317             disas_undef(env, s, insn);
2318             return;
2319         }
2320     } else {
2321         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2322     }
2323     dest = tcg_temp_new();
2324     switch (op) {
2325     case 0: /* ori */
2326         tcg_gen_or_i32(dest, src1, im);
2327         if (with_SR) {
2328             gen_set_sr(s, dest, opsize == OS_BYTE);
2329         } else {
2330             DEST_EA(env, insn, opsize, dest, &addr);
2331             gen_logic_cc(s, dest, opsize);
2332         }
2333         break;
2334     case 1: /* andi */
2335         tcg_gen_and_i32(dest, src1, im);
2336         if (with_SR) {
2337             gen_set_sr(s, dest, opsize == OS_BYTE);
2338         } else {
2339             DEST_EA(env, insn, opsize, dest, &addr);
2340             gen_logic_cc(s, dest, opsize);
2341         }
2342         break;
2343     case 2: /* subi */
2344         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2345         tcg_gen_sub_i32(dest, src1, im);
2346         gen_update_cc_add(dest, im, opsize);
2347         set_cc_op(s, CC_OP_SUBB + opsize);
2348         DEST_EA(env, insn, opsize, dest, &addr);
2349         break;
2350     case 3: /* addi */
2351         tcg_gen_add_i32(dest, src1, im);
2352         gen_update_cc_add(dest, im, opsize);
2353         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2354         set_cc_op(s, CC_OP_ADDB + opsize);
2355         DEST_EA(env, insn, opsize, dest, &addr);
2356         break;
2357     case 5: /* eori */
2358         tcg_gen_xor_i32(dest, src1, im);
2359         if (with_SR) {
2360             gen_set_sr(s, dest, opsize == OS_BYTE);
2361         } else {
2362             DEST_EA(env, insn, opsize, dest, &addr);
2363             gen_logic_cc(s, dest, opsize);
2364         }
2365         break;
2366     case 6: /* cmpi */
2367         gen_update_cc_cmp(s, src1, im, opsize);
2368         break;
2369     default:
2370         abort();
2371     }
2372     tcg_temp_free(im);
2373     tcg_temp_free(dest);
2374 }
2375 
2376 DISAS_INSN(cas)
2377 {
2378     int opsize;
2379     TCGv addr;
2380     uint16_t ext;
2381     TCGv load;
2382     TCGv cmp;
2383     TCGMemOp opc;
2384 
2385     switch ((insn >> 9) & 3) {
2386     case 1:
2387         opsize = OS_BYTE;
2388         opc = MO_SB;
2389         break;
2390     case 2:
2391         opsize = OS_WORD;
2392         opc = MO_TESW;
2393         break;
2394     case 3:
2395         opsize = OS_LONG;
2396         opc = MO_TESL;
2397         break;
2398     default:
2399         g_assert_not_reached();
2400     }
2401 
2402     ext = read_im16(env, s);
2403 
2404     /* cas Dc,Du,<EA> */
2405 
2406     addr = gen_lea(env, s, insn, opsize);
2407     if (IS_NULL_QREG(addr)) {
2408         gen_addr_fault(s);
2409         return;
2410     }
2411 
2412     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2413 
2414     /* if  <EA> == Dc then
2415      *     <EA> = Du
2416      *     Dc = <EA> (because <EA> == Dc)
2417      * else
2418      *     Dc = <EA>
2419      */
2420 
2421     load = tcg_temp_new();
2422     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2423                                IS_USER(s), opc);
2424     /* update flags before setting cmp to load */
2425     gen_update_cc_cmp(s, load, cmp, opsize);
2426     gen_partset_reg(opsize, DREG(ext, 0), load);
2427 
2428     tcg_temp_free(load);
2429 
2430     switch (extract32(insn, 3, 3)) {
2431     case 3: /* Indirect postincrement.  */
2432         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2433         break;
2434     case 4: /* Indirect predecrememnt.  */
2435         tcg_gen_mov_i32(AREG(insn, 0), addr);
2436         break;
2437     }
2438 }
2439 
2440 DISAS_INSN(cas2w)
2441 {
2442     uint16_t ext1, ext2;
2443     TCGv addr1, addr2;
2444     TCGv regs;
2445 
2446     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2447 
2448     ext1 = read_im16(env, s);
2449 
2450     if (ext1 & 0x8000) {
2451         /* Address Register */
2452         addr1 = AREG(ext1, 12);
2453     } else {
2454         /* Data Register */
2455         addr1 = DREG(ext1, 12);
2456     }
2457 
2458     ext2 = read_im16(env, s);
2459     if (ext2 & 0x8000) {
2460         /* Address Register */
2461         addr2 = AREG(ext2, 12);
2462     } else {
2463         /* Data Register */
2464         addr2 = DREG(ext2, 12);
2465     }
2466 
2467     /* if (R1) == Dc1 && (R2) == Dc2 then
2468      *     (R1) = Du1
2469      *     (R2) = Du2
2470      * else
2471      *     Dc1 = (R1)
2472      *     Dc2 = (R2)
2473      */
2474 
2475     regs = tcg_const_i32(REG(ext2, 6) |
2476                          (REG(ext1, 6) << 3) |
2477                          (REG(ext2, 0) << 6) |
2478                          (REG(ext1, 0) << 9));
2479     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2480         gen_helper_exit_atomic(cpu_env);
2481     } else {
2482         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2483     }
2484     tcg_temp_free(regs);
2485 
2486     /* Note that cas2w also assigned to env->cc_op.  */
2487     s->cc_op = CC_OP_CMPW;
2488     s->cc_op_synced = 1;
2489 }
2490 
2491 DISAS_INSN(cas2l)
2492 {
2493     uint16_t ext1, ext2;
2494     TCGv addr1, addr2, regs;
2495 
2496     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2497 
2498     ext1 = read_im16(env, s);
2499 
2500     if (ext1 & 0x8000) {
2501         /* Address Register */
2502         addr1 = AREG(ext1, 12);
2503     } else {
2504         /* Data Register */
2505         addr1 = DREG(ext1, 12);
2506     }
2507 
2508     ext2 = read_im16(env, s);
2509     if (ext2 & 0x8000) {
2510         /* Address Register */
2511         addr2 = AREG(ext2, 12);
2512     } else {
2513         /* Data Register */
2514         addr2 = DREG(ext2, 12);
2515     }
2516 
2517     /* if (R1) == Dc1 && (R2) == Dc2 then
2518      *     (R1) = Du1
2519      *     (R2) = Du2
2520      * else
2521      *     Dc1 = (R1)
2522      *     Dc2 = (R2)
2523      */
2524 
2525     regs = tcg_const_i32(REG(ext2, 6) |
2526                          (REG(ext1, 6) << 3) |
2527                          (REG(ext2, 0) << 6) |
2528                          (REG(ext1, 0) << 9));
2529     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2530         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2531     } else {
2532         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2533     }
2534     tcg_temp_free(regs);
2535 
2536     /* Note that cas2l also assigned to env->cc_op.  */
2537     s->cc_op = CC_OP_CMPL;
2538     s->cc_op_synced = 1;
2539 }
2540 
2541 DISAS_INSN(byterev)
2542 {
2543     TCGv reg;
2544 
2545     reg = DREG(insn, 0);
2546     tcg_gen_bswap32_i32(reg, reg);
2547 }
2548 
2549 DISAS_INSN(move)
2550 {
2551     TCGv src;
2552     TCGv dest;
2553     int op;
2554     int opsize;
2555 
2556     switch (insn >> 12) {
2557     case 1: /* move.b */
2558         opsize = OS_BYTE;
2559         break;
2560     case 2: /* move.l */
2561         opsize = OS_LONG;
2562         break;
2563     case 3: /* move.w */
2564         opsize = OS_WORD;
2565         break;
2566     default:
2567         abort();
2568     }
2569     SRC_EA(env, src, opsize, 1, NULL);
2570     op = (insn >> 6) & 7;
2571     if (op == 1) {
2572         /* movea */
2573         /* The value will already have been sign extended.  */
2574         dest = AREG(insn, 9);
2575         tcg_gen_mov_i32(dest, src);
2576     } else {
2577         /* normal move */
2578         uint16_t dest_ea;
2579         dest_ea = ((insn >> 9) & 7) | (op << 3);
2580         DEST_EA(env, dest_ea, opsize, src, NULL);
2581         /* This will be correct because loads sign extend.  */
2582         gen_logic_cc(s, src, opsize);
2583     }
2584 }
2585 
2586 DISAS_INSN(negx)
2587 {
2588     TCGv z;
2589     TCGv src;
2590     TCGv addr;
2591     int opsize;
2592 
2593     opsize = insn_opsize(insn);
2594     SRC_EA(env, src, opsize, 1, &addr);
2595 
2596     gen_flush_flags(s); /* compute old Z */
2597 
2598     /* Perform substract with borrow.
2599      * (X, N) =  -(src + X);
2600      */
2601 
2602     z = tcg_const_i32(0);
2603     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2604     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2605     tcg_temp_free(z);
2606     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2607 
2608     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2609 
2610     /* Compute signed-overflow for negation.  The normal formula for
2611      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2612      * this simplies to res & src.
2613      */
2614 
2615     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2616 
2617     /* Copy the rest of the results into place.  */
2618     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2619     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2620 
2621     set_cc_op(s, CC_OP_FLAGS);
2622 
2623     /* result is in QREG_CC_N */
2624 
2625     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2626 }
2627 
2628 DISAS_INSN(lea)
2629 {
2630     TCGv reg;
2631     TCGv tmp;
2632 
2633     reg = AREG(insn, 9);
2634     tmp = gen_lea(env, s, insn, OS_LONG);
2635     if (IS_NULL_QREG(tmp)) {
2636         gen_addr_fault(s);
2637         return;
2638     }
2639     tcg_gen_mov_i32(reg, tmp);
2640 }
2641 
2642 DISAS_INSN(clr)
2643 {
2644     int opsize;
2645     TCGv zero;
2646 
2647     zero = tcg_const_i32(0);
2648 
2649     opsize = insn_opsize(insn);
2650     DEST_EA(env, insn, opsize, zero, NULL);
2651     gen_logic_cc(s, zero, opsize);
2652     tcg_temp_free(zero);
2653 }
2654 
2655 DISAS_INSN(move_from_ccr)
2656 {
2657     TCGv ccr;
2658 
2659     ccr = gen_get_ccr(s);
2660     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2661 }
2662 
2663 DISAS_INSN(neg)
2664 {
2665     TCGv src1;
2666     TCGv dest;
2667     TCGv addr;
2668     int opsize;
2669 
2670     opsize = insn_opsize(insn);
2671     SRC_EA(env, src1, opsize, 1, &addr);
2672     dest = tcg_temp_new();
2673     tcg_gen_neg_i32(dest, src1);
2674     set_cc_op(s, CC_OP_SUBB + opsize);
2675     gen_update_cc_add(dest, src1, opsize);
2676     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2677     DEST_EA(env, insn, opsize, dest, &addr);
2678     tcg_temp_free(dest);
2679 }
2680 
2681 DISAS_INSN(move_to_ccr)
2682 {
2683     gen_move_to_sr(env, s, insn, true);
2684 }
2685 
2686 DISAS_INSN(not)
2687 {
2688     TCGv src1;
2689     TCGv dest;
2690     TCGv addr;
2691     int opsize;
2692 
2693     opsize = insn_opsize(insn);
2694     SRC_EA(env, src1, opsize, 1, &addr);
2695     dest = tcg_temp_new();
2696     tcg_gen_not_i32(dest, src1);
2697     DEST_EA(env, insn, opsize, dest, &addr);
2698     gen_logic_cc(s, dest, opsize);
2699 }
2700 
2701 DISAS_INSN(swap)
2702 {
2703     TCGv src1;
2704     TCGv src2;
2705     TCGv reg;
2706 
2707     src1 = tcg_temp_new();
2708     src2 = tcg_temp_new();
2709     reg = DREG(insn, 0);
2710     tcg_gen_shli_i32(src1, reg, 16);
2711     tcg_gen_shri_i32(src2, reg, 16);
2712     tcg_gen_or_i32(reg, src1, src2);
2713     tcg_temp_free(src2);
2714     tcg_temp_free(src1);
2715     gen_logic_cc(s, reg, OS_LONG);
2716 }
2717 
2718 DISAS_INSN(bkpt)
2719 {
2720     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2721 }
2722 
2723 DISAS_INSN(pea)
2724 {
2725     TCGv tmp;
2726 
2727     tmp = gen_lea(env, s, insn, OS_LONG);
2728     if (IS_NULL_QREG(tmp)) {
2729         gen_addr_fault(s);
2730         return;
2731     }
2732     gen_push(s, tmp);
2733 }
2734 
2735 DISAS_INSN(ext)
2736 {
2737     int op;
2738     TCGv reg;
2739     TCGv tmp;
2740 
2741     reg = DREG(insn, 0);
2742     op = (insn >> 6) & 7;
2743     tmp = tcg_temp_new();
2744     if (op == 3)
2745         tcg_gen_ext16s_i32(tmp, reg);
2746     else
2747         tcg_gen_ext8s_i32(tmp, reg);
2748     if (op == 2)
2749         gen_partset_reg(OS_WORD, reg, tmp);
2750     else
2751         tcg_gen_mov_i32(reg, tmp);
2752     gen_logic_cc(s, tmp, OS_LONG);
2753     tcg_temp_free(tmp);
2754 }
2755 
2756 DISAS_INSN(tst)
2757 {
2758     int opsize;
2759     TCGv tmp;
2760 
2761     opsize = insn_opsize(insn);
2762     SRC_EA(env, tmp, opsize, 1, NULL);
2763     gen_logic_cc(s, tmp, opsize);
2764 }
2765 
2766 DISAS_INSN(pulse)
2767 {
2768   /* Implemented as a NOP.  */
2769 }
2770 
2771 DISAS_INSN(illegal)
2772 {
2773     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2774 }
2775 
2776 /* ??? This should be atomic.  */
2777 DISAS_INSN(tas)
2778 {
2779     TCGv dest;
2780     TCGv src1;
2781     TCGv addr;
2782 
2783     dest = tcg_temp_new();
2784     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2785     gen_logic_cc(s, src1, OS_BYTE);
2786     tcg_gen_ori_i32(dest, src1, 0x80);
2787     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2788     tcg_temp_free(dest);
2789 }
2790 
2791 DISAS_INSN(mull)
2792 {
2793     uint16_t ext;
2794     TCGv src1;
2795     int sign;
2796 
2797     ext = read_im16(env, s);
2798 
2799     sign = ext & 0x800;
2800 
2801     if (ext & 0x400) {
2802         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2803             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2804             return;
2805         }
2806 
2807         SRC_EA(env, src1, OS_LONG, 0, NULL);
2808 
2809         if (sign) {
2810             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2811         } else {
2812             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2813         }
2814         /* if Dl == Dh, 68040 returns low word */
2815         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2816         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2817         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2818 
2819         tcg_gen_movi_i32(QREG_CC_V, 0);
2820         tcg_gen_movi_i32(QREG_CC_C, 0);
2821 
2822         set_cc_op(s, CC_OP_FLAGS);
2823         return;
2824     }
2825     SRC_EA(env, src1, OS_LONG, 0, NULL);
2826     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2827         tcg_gen_movi_i32(QREG_CC_C, 0);
2828         if (sign) {
2829             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2830             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2831             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2832             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2833         } else {
2834             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2835             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2836             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2837         }
2838         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2839         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2840 
2841         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2842 
2843         set_cc_op(s, CC_OP_FLAGS);
2844     } else {
2845         /* The upper 32 bits of the product are discarded, so
2846            muls.l and mulu.l are functionally equivalent.  */
2847         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2848         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2849     }
2850 }
2851 
2852 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2853 {
2854     TCGv reg;
2855     TCGv tmp;
2856 
2857     reg = AREG(insn, 0);
2858     tmp = tcg_temp_new();
2859     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2860     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2861     if ((insn & 7) != 7) {
2862         tcg_gen_mov_i32(reg, tmp);
2863     }
2864     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2865     tcg_temp_free(tmp);
2866 }
2867 
2868 DISAS_INSN(link)
2869 {
2870     int16_t offset;
2871 
2872     offset = read_im16(env, s);
2873     gen_link(s, insn, offset);
2874 }
2875 
2876 DISAS_INSN(linkl)
2877 {
2878     int32_t offset;
2879 
2880     offset = read_im32(env, s);
2881     gen_link(s, insn, offset);
2882 }
2883 
2884 DISAS_INSN(unlk)
2885 {
2886     TCGv src;
2887     TCGv reg;
2888     TCGv tmp;
2889 
2890     src = tcg_temp_new();
2891     reg = AREG(insn, 0);
2892     tcg_gen_mov_i32(src, reg);
2893     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2894     tcg_gen_mov_i32(reg, tmp);
2895     tcg_gen_addi_i32(QREG_SP, src, 4);
2896     tcg_temp_free(src);
2897     tcg_temp_free(tmp);
2898 }
2899 
2900 #if defined(CONFIG_SOFTMMU)
2901 DISAS_INSN(reset)
2902 {
2903     if (IS_USER(s)) {
2904         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2905         return;
2906     }
2907 
2908     gen_helper_reset(cpu_env);
2909 }
2910 #endif
2911 
2912 DISAS_INSN(nop)
2913 {
2914 }
2915 
2916 DISAS_INSN(rtd)
2917 {
2918     TCGv tmp;
2919     int16_t offset = read_im16(env, s);
2920 
2921     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2922     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2923     gen_jmp(s, tmp);
2924 }
2925 
2926 DISAS_INSN(rts)
2927 {
2928     TCGv tmp;
2929 
2930     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2931     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2932     gen_jmp(s, tmp);
2933 }
2934 
2935 DISAS_INSN(jump)
2936 {
2937     TCGv tmp;
2938 
2939     /* Load the target address first to ensure correct exception
2940        behavior.  */
2941     tmp = gen_lea(env, s, insn, OS_LONG);
2942     if (IS_NULL_QREG(tmp)) {
2943         gen_addr_fault(s);
2944         return;
2945     }
2946     if ((insn & 0x40) == 0) {
2947         /* jsr */
2948         gen_push(s, tcg_const_i32(s->pc));
2949     }
2950     gen_jmp(s, tmp);
2951 }
2952 
2953 DISAS_INSN(addsubq)
2954 {
2955     TCGv src;
2956     TCGv dest;
2957     TCGv val;
2958     int imm;
2959     TCGv addr;
2960     int opsize;
2961 
2962     if ((insn & 070) == 010) {
2963         /* Operation on address register is always long.  */
2964         opsize = OS_LONG;
2965     } else {
2966         opsize = insn_opsize(insn);
2967     }
2968     SRC_EA(env, src, opsize, 1, &addr);
2969     imm = (insn >> 9) & 7;
2970     if (imm == 0) {
2971         imm = 8;
2972     }
2973     val = tcg_const_i32(imm);
2974     dest = tcg_temp_new();
2975     tcg_gen_mov_i32(dest, src);
2976     if ((insn & 0x38) == 0x08) {
2977         /* Don't update condition codes if the destination is an
2978            address register.  */
2979         if (insn & 0x0100) {
2980             tcg_gen_sub_i32(dest, dest, val);
2981         } else {
2982             tcg_gen_add_i32(dest, dest, val);
2983         }
2984     } else {
2985         if (insn & 0x0100) {
2986             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2987             tcg_gen_sub_i32(dest, dest, val);
2988             set_cc_op(s, CC_OP_SUBB + opsize);
2989         } else {
2990             tcg_gen_add_i32(dest, dest, val);
2991             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2992             set_cc_op(s, CC_OP_ADDB + opsize);
2993         }
2994         gen_update_cc_add(dest, val, opsize);
2995     }
2996     tcg_temp_free(val);
2997     DEST_EA(env, insn, opsize, dest, &addr);
2998     tcg_temp_free(dest);
2999 }
3000 
3001 DISAS_INSN(tpf)
3002 {
3003     switch (insn & 7) {
3004     case 2: /* One extension word.  */
3005         s->pc += 2;
3006         break;
3007     case 3: /* Two extension words.  */
3008         s->pc += 4;
3009         break;
3010     case 4: /* No extension words.  */
3011         break;
3012     default:
3013         disas_undef(env, s, insn);
3014     }
3015 }
3016 
3017 DISAS_INSN(branch)
3018 {
3019     int32_t offset;
3020     uint32_t base;
3021     int op;
3022     TCGLabel *l1;
3023 
3024     base = s->pc;
3025     op = (insn >> 8) & 0xf;
3026     offset = (int8_t)insn;
3027     if (offset == 0) {
3028         offset = (int16_t)read_im16(env, s);
3029     } else if (offset == -1) {
3030         offset = read_im32(env, s);
3031     }
3032     if (op == 1) {
3033         /* bsr */
3034         gen_push(s, tcg_const_i32(s->pc));
3035     }
3036     if (op > 1) {
3037         /* Bcc */
3038         l1 = gen_new_label();
3039         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3040         gen_jmp_tb(s, 1, base + offset);
3041         gen_set_label(l1);
3042         gen_jmp_tb(s, 0, s->pc);
3043     } else {
3044         /* Unconditional branch.  */
3045         update_cc_op(s);
3046         gen_jmp_tb(s, 0, base + offset);
3047     }
3048 }
3049 
3050 DISAS_INSN(moveq)
3051 {
3052     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3053     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3054 }
3055 
3056 DISAS_INSN(mvzs)
3057 {
3058     int opsize;
3059     TCGv src;
3060     TCGv reg;
3061 
3062     if (insn & 0x40)
3063         opsize = OS_WORD;
3064     else
3065         opsize = OS_BYTE;
3066     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3067     reg = DREG(insn, 9);
3068     tcg_gen_mov_i32(reg, src);
3069     gen_logic_cc(s, src, opsize);
3070 }
3071 
3072 DISAS_INSN(or)
3073 {
3074     TCGv reg;
3075     TCGv dest;
3076     TCGv src;
3077     TCGv addr;
3078     int opsize;
3079 
3080     opsize = insn_opsize(insn);
3081     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3082     dest = tcg_temp_new();
3083     if (insn & 0x100) {
3084         SRC_EA(env, src, opsize, 0, &addr);
3085         tcg_gen_or_i32(dest, src, reg);
3086         DEST_EA(env, insn, opsize, dest, &addr);
3087     } else {
3088         SRC_EA(env, src, opsize, 0, NULL);
3089         tcg_gen_or_i32(dest, src, reg);
3090         gen_partset_reg(opsize, DREG(insn, 9), dest);
3091     }
3092     gen_logic_cc(s, dest, opsize);
3093     tcg_temp_free(dest);
3094 }
3095 
3096 DISAS_INSN(suba)
3097 {
3098     TCGv src;
3099     TCGv reg;
3100 
3101     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3102     reg = AREG(insn, 9);
3103     tcg_gen_sub_i32(reg, reg, src);
3104 }
3105 
3106 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3107 {
3108     TCGv tmp;
3109 
3110     gen_flush_flags(s); /* compute old Z */
3111 
3112     /* Perform substract with borrow.
3113      * (X, N) = dest - (src + X);
3114      */
3115 
3116     tmp = tcg_const_i32(0);
3117     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3118     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3119     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3120     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3121 
3122     /* Compute signed-overflow for substract.  */
3123 
3124     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3125     tcg_gen_xor_i32(tmp, dest, src);
3126     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3127     tcg_temp_free(tmp);
3128 
3129     /* Copy the rest of the results into place.  */
3130     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3131     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3132 
3133     set_cc_op(s, CC_OP_FLAGS);
3134 
3135     /* result is in QREG_CC_N */
3136 }
3137 
3138 DISAS_INSN(subx_reg)
3139 {
3140     TCGv dest;
3141     TCGv src;
3142     int opsize;
3143 
3144     opsize = insn_opsize(insn);
3145 
3146     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3147     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3148 
3149     gen_subx(s, src, dest, opsize);
3150 
3151     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3152 }
3153 
3154 DISAS_INSN(subx_mem)
3155 {
3156     TCGv src;
3157     TCGv addr_src;
3158     TCGv dest;
3159     TCGv addr_dest;
3160     int opsize;
3161 
3162     opsize = insn_opsize(insn);
3163 
3164     addr_src = AREG(insn, 0);
3165     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3166     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3167 
3168     addr_dest = AREG(insn, 9);
3169     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3170     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3171 
3172     gen_subx(s, src, dest, opsize);
3173 
3174     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3175 
3176     tcg_temp_free(dest);
3177     tcg_temp_free(src);
3178 }
3179 
3180 DISAS_INSN(mov3q)
3181 {
3182     TCGv src;
3183     int val;
3184 
3185     val = (insn >> 9) & 7;
3186     if (val == 0)
3187         val = -1;
3188     src = tcg_const_i32(val);
3189     gen_logic_cc(s, src, OS_LONG);
3190     DEST_EA(env, insn, OS_LONG, src, NULL);
3191     tcg_temp_free(src);
3192 }
3193 
3194 DISAS_INSN(cmp)
3195 {
3196     TCGv src;
3197     TCGv reg;
3198     int opsize;
3199 
3200     opsize = insn_opsize(insn);
3201     SRC_EA(env, src, opsize, 1, NULL);
3202     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3203     gen_update_cc_cmp(s, reg, src, opsize);
3204 }
3205 
3206 DISAS_INSN(cmpa)
3207 {
3208     int opsize;
3209     TCGv src;
3210     TCGv reg;
3211 
3212     if (insn & 0x100) {
3213         opsize = OS_LONG;
3214     } else {
3215         opsize = OS_WORD;
3216     }
3217     SRC_EA(env, src, opsize, 1, NULL);
3218     reg = AREG(insn, 9);
3219     gen_update_cc_cmp(s, reg, src, OS_LONG);
3220 }
3221 
3222 DISAS_INSN(cmpm)
3223 {
3224     int opsize = insn_opsize(insn);
3225     TCGv src, dst;
3226 
3227     /* Post-increment load (mode 3) from Ay.  */
3228     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3229                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3230     /* Post-increment load (mode 3) from Ax.  */
3231     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3232                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3233 
3234     gen_update_cc_cmp(s, dst, src, opsize);
3235 }
3236 
3237 DISAS_INSN(eor)
3238 {
3239     TCGv src;
3240     TCGv dest;
3241     TCGv addr;
3242     int opsize;
3243 
3244     opsize = insn_opsize(insn);
3245 
3246     SRC_EA(env, src, opsize, 0, &addr);
3247     dest = tcg_temp_new();
3248     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3249     gen_logic_cc(s, dest, opsize);
3250     DEST_EA(env, insn, opsize, dest, &addr);
3251     tcg_temp_free(dest);
3252 }
3253 
3254 static void do_exg(TCGv reg1, TCGv reg2)
3255 {
3256     TCGv temp = tcg_temp_new();
3257     tcg_gen_mov_i32(temp, reg1);
3258     tcg_gen_mov_i32(reg1, reg2);
3259     tcg_gen_mov_i32(reg2, temp);
3260     tcg_temp_free(temp);
3261 }
3262 
3263 DISAS_INSN(exg_dd)
3264 {
3265     /* exchange Dx and Dy */
3266     do_exg(DREG(insn, 9), DREG(insn, 0));
3267 }
3268 
3269 DISAS_INSN(exg_aa)
3270 {
3271     /* exchange Ax and Ay */
3272     do_exg(AREG(insn, 9), AREG(insn, 0));
3273 }
3274 
3275 DISAS_INSN(exg_da)
3276 {
3277     /* exchange Dx and Ay */
3278     do_exg(DREG(insn, 9), AREG(insn, 0));
3279 }
3280 
3281 DISAS_INSN(and)
3282 {
3283     TCGv src;
3284     TCGv reg;
3285     TCGv dest;
3286     TCGv addr;
3287     int opsize;
3288 
3289     dest = tcg_temp_new();
3290 
3291     opsize = insn_opsize(insn);
3292     reg = DREG(insn, 9);
3293     if (insn & 0x100) {
3294         SRC_EA(env, src, opsize, 0, &addr);
3295         tcg_gen_and_i32(dest, src, reg);
3296         DEST_EA(env, insn, opsize, dest, &addr);
3297     } else {
3298         SRC_EA(env, src, opsize, 0, NULL);
3299         tcg_gen_and_i32(dest, src, reg);
3300         gen_partset_reg(opsize, reg, dest);
3301     }
3302     gen_logic_cc(s, dest, opsize);
3303     tcg_temp_free(dest);
3304 }
3305 
3306 DISAS_INSN(adda)
3307 {
3308     TCGv src;
3309     TCGv reg;
3310 
3311     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3312     reg = AREG(insn, 9);
3313     tcg_gen_add_i32(reg, reg, src);
3314 }
3315 
3316 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3317 {
3318     TCGv tmp;
3319 
3320     gen_flush_flags(s); /* compute old Z */
3321 
3322     /* Perform addition with carry.
3323      * (X, N) = src + dest + X;
3324      */
3325 
3326     tmp = tcg_const_i32(0);
3327     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3328     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3329     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3330 
3331     /* Compute signed-overflow for addition.  */
3332 
3333     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3334     tcg_gen_xor_i32(tmp, dest, src);
3335     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3336     tcg_temp_free(tmp);
3337 
3338     /* Copy the rest of the results into place.  */
3339     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3340     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3341 
3342     set_cc_op(s, CC_OP_FLAGS);
3343 
3344     /* result is in QREG_CC_N */
3345 }
3346 
3347 DISAS_INSN(addx_reg)
3348 {
3349     TCGv dest;
3350     TCGv src;
3351     int opsize;
3352 
3353     opsize = insn_opsize(insn);
3354 
3355     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3356     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3357 
3358     gen_addx(s, src, dest, opsize);
3359 
3360     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3361 }
3362 
3363 DISAS_INSN(addx_mem)
3364 {
3365     TCGv src;
3366     TCGv addr_src;
3367     TCGv dest;
3368     TCGv addr_dest;
3369     int opsize;
3370 
3371     opsize = insn_opsize(insn);
3372 
3373     addr_src = AREG(insn, 0);
3374     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3375     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3376 
3377     addr_dest = AREG(insn, 9);
3378     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3379     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3380 
3381     gen_addx(s, src, dest, opsize);
3382 
3383     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3384 
3385     tcg_temp_free(dest);
3386     tcg_temp_free(src);
3387 }
3388 
3389 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3390 {
3391     int count = (insn >> 9) & 7;
3392     int logical = insn & 8;
3393     int left = insn & 0x100;
3394     int bits = opsize_bytes(opsize) * 8;
3395     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3396 
3397     if (count == 0) {
3398         count = 8;
3399     }
3400 
3401     tcg_gen_movi_i32(QREG_CC_V, 0);
3402     if (left) {
3403         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3404         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3405 
3406         /* Note that ColdFire always clears V (done above),
3407            while M68000 sets if the most significant bit is changed at
3408            any time during the shift operation */
3409         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3410             /* if shift count >= bits, V is (reg != 0) */
3411             if (count >= bits) {
3412                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3413             } else {
3414                 TCGv t0 = tcg_temp_new();
3415                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3416                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3417                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3418                 tcg_temp_free(t0);
3419             }
3420             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3421         }
3422     } else {
3423         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3424         if (logical) {
3425             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3426         } else {
3427             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3428         }
3429     }
3430 
3431     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3432     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3433     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3434     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3435 
3436     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3437     set_cc_op(s, CC_OP_FLAGS);
3438 }
3439 
3440 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3441 {
3442     int logical = insn & 8;
3443     int left = insn & 0x100;
3444     int bits = opsize_bytes(opsize) * 8;
3445     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3446     TCGv s32;
3447     TCGv_i64 t64, s64;
3448 
3449     t64 = tcg_temp_new_i64();
3450     s64 = tcg_temp_new_i64();
3451     s32 = tcg_temp_new();
3452 
3453     /* Note that m68k truncates the shift count modulo 64, not 32.
3454        In addition, a 64-bit shift makes it easy to find "the last
3455        bit shifted out", for the carry flag.  */
3456     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3457     tcg_gen_extu_i32_i64(s64, s32);
3458     tcg_gen_extu_i32_i64(t64, reg);
3459 
3460     /* Optimistically set V=0.  Also used as a zero source below.  */
3461     tcg_gen_movi_i32(QREG_CC_V, 0);
3462     if (left) {
3463         tcg_gen_shl_i64(t64, t64, s64);
3464 
3465         if (opsize == OS_LONG) {
3466             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3467             /* Note that C=0 if shift count is 0, and we get that for free.  */
3468         } else {
3469             TCGv zero = tcg_const_i32(0);
3470             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3471             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3472             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3473                                 s32, zero, zero, QREG_CC_C);
3474             tcg_temp_free(zero);
3475         }
3476         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3477 
3478         /* X = C, but only if the shift count was non-zero.  */
3479         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3480                             QREG_CC_C, QREG_CC_X);
3481 
3482         /* M68000 sets V if the most significant bit is changed at
3483          * any time during the shift operation.  Do this via creating
3484          * an extension of the sign bit, comparing, and discarding
3485          * the bits below the sign bit.  I.e.
3486          *     int64_t s = (intN_t)reg;
3487          *     int64_t t = (int64_t)(intN_t)reg << count;
3488          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3489          */
3490         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3491             TCGv_i64 tt = tcg_const_i64(32);
3492             /* if shift is greater than 32, use 32 */
3493             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3494             tcg_temp_free_i64(tt);
3495             /* Sign extend the input to 64 bits; re-do the shift.  */
3496             tcg_gen_ext_i32_i64(t64, reg);
3497             tcg_gen_shl_i64(s64, t64, s64);
3498             /* Clear all bits that are unchanged.  */
3499             tcg_gen_xor_i64(t64, t64, s64);
3500             /* Ignore the bits below the sign bit.  */
3501             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3502             /* If any bits remain set, we have overflow.  */
3503             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3504             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3505             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3506         }
3507     } else {
3508         tcg_gen_shli_i64(t64, t64, 32);
3509         if (logical) {
3510             tcg_gen_shr_i64(t64, t64, s64);
3511         } else {
3512             tcg_gen_sar_i64(t64, t64, s64);
3513         }
3514         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3515 
3516         /* Note that C=0 if shift count is 0, and we get that for free.  */
3517         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3518 
3519         /* X = C, but only if the shift count was non-zero.  */
3520         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3521                             QREG_CC_C, QREG_CC_X);
3522     }
3523     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3524     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3525 
3526     tcg_temp_free(s32);
3527     tcg_temp_free_i64(s64);
3528     tcg_temp_free_i64(t64);
3529 
3530     /* Write back the result.  */
3531     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3532     set_cc_op(s, CC_OP_FLAGS);
3533 }
3534 
3535 DISAS_INSN(shift8_im)
3536 {
3537     shift_im(s, insn, OS_BYTE);
3538 }
3539 
3540 DISAS_INSN(shift16_im)
3541 {
3542     shift_im(s, insn, OS_WORD);
3543 }
3544 
3545 DISAS_INSN(shift_im)
3546 {
3547     shift_im(s, insn, OS_LONG);
3548 }
3549 
3550 DISAS_INSN(shift8_reg)
3551 {
3552     shift_reg(s, insn, OS_BYTE);
3553 }
3554 
3555 DISAS_INSN(shift16_reg)
3556 {
3557     shift_reg(s, insn, OS_WORD);
3558 }
3559 
3560 DISAS_INSN(shift_reg)
3561 {
3562     shift_reg(s, insn, OS_LONG);
3563 }
3564 
3565 DISAS_INSN(shift_mem)
3566 {
3567     int logical = insn & 8;
3568     int left = insn & 0x100;
3569     TCGv src;
3570     TCGv addr;
3571 
3572     SRC_EA(env, src, OS_WORD, !logical, &addr);
3573     tcg_gen_movi_i32(QREG_CC_V, 0);
3574     if (left) {
3575         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3576         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3577 
3578         /* Note that ColdFire always clears V,
3579            while M68000 sets if the most significant bit is changed at
3580            any time during the shift operation */
3581         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3582             src = gen_extend(s, src, OS_WORD, 1);
3583             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3584         }
3585     } else {
3586         tcg_gen_mov_i32(QREG_CC_C, src);
3587         if (logical) {
3588             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3589         } else {
3590             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3591         }
3592     }
3593 
3594     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3595     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3596     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3597     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3598 
3599     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3600     set_cc_op(s, CC_OP_FLAGS);
3601 }
3602 
3603 static void rotate(TCGv reg, TCGv shift, int left, int size)
3604 {
3605     switch (size) {
3606     case 8:
3607         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3608         tcg_gen_ext8u_i32(reg, reg);
3609         tcg_gen_muli_i32(reg, reg, 0x01010101);
3610         goto do_long;
3611     case 16:
3612         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3613         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3614         goto do_long;
3615     do_long:
3616     default:
3617         if (left) {
3618             tcg_gen_rotl_i32(reg, reg, shift);
3619         } else {
3620             tcg_gen_rotr_i32(reg, reg, shift);
3621         }
3622     }
3623 
3624     /* compute flags */
3625 
3626     switch (size) {
3627     case 8:
3628         tcg_gen_ext8s_i32(reg, reg);
3629         break;
3630     case 16:
3631         tcg_gen_ext16s_i32(reg, reg);
3632         break;
3633     default:
3634         break;
3635     }
3636 
3637     /* QREG_CC_X is not affected */
3638 
3639     tcg_gen_mov_i32(QREG_CC_N, reg);
3640     tcg_gen_mov_i32(QREG_CC_Z, reg);
3641 
3642     if (left) {
3643         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3644     } else {
3645         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3646     }
3647 
3648     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3649 }
3650 
3651 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3652 {
3653     switch (size) {
3654     case 8:
3655         tcg_gen_ext8s_i32(reg, reg);
3656         break;
3657     case 16:
3658         tcg_gen_ext16s_i32(reg, reg);
3659         break;
3660     default:
3661         break;
3662     }
3663     tcg_gen_mov_i32(QREG_CC_N, reg);
3664     tcg_gen_mov_i32(QREG_CC_Z, reg);
3665     tcg_gen_mov_i32(QREG_CC_X, X);
3666     tcg_gen_mov_i32(QREG_CC_C, X);
3667     tcg_gen_movi_i32(QREG_CC_V, 0);
3668 }
3669 
3670 /* Result of rotate_x() is valid if 0 <= shift <= size */
3671 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3672 {
3673     TCGv X, shl, shr, shx, sz, zero;
3674 
3675     sz = tcg_const_i32(size);
3676 
3677     shr = tcg_temp_new();
3678     shl = tcg_temp_new();
3679     shx = tcg_temp_new();
3680     if (left) {
3681         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3682         tcg_gen_movi_i32(shr, size + 1);
3683         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3684         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3685         /* shx = shx < 0 ? size : shx; */
3686         zero = tcg_const_i32(0);
3687         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3688         tcg_temp_free(zero);
3689     } else {
3690         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3691         tcg_gen_movi_i32(shl, size + 1);
3692         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3693         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3694     }
3695 
3696     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3697 
3698     tcg_gen_shl_i32(shl, reg, shl);
3699     tcg_gen_shr_i32(shr, reg, shr);
3700     tcg_gen_or_i32(reg, shl, shr);
3701     tcg_temp_free(shl);
3702     tcg_temp_free(shr);
3703     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3704     tcg_gen_or_i32(reg, reg, shx);
3705     tcg_temp_free(shx);
3706 
3707     /* X = (reg >> size) & 1 */
3708 
3709     X = tcg_temp_new();
3710     tcg_gen_shr_i32(X, reg, sz);
3711     tcg_gen_andi_i32(X, X, 1);
3712     tcg_temp_free(sz);
3713 
3714     return X;
3715 }
3716 
3717 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3718 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3719 {
3720     TCGv_i64 t0, shift64;
3721     TCGv X, lo, hi, zero;
3722 
3723     shift64 = tcg_temp_new_i64();
3724     tcg_gen_extu_i32_i64(shift64, shift);
3725 
3726     t0 = tcg_temp_new_i64();
3727 
3728     X = tcg_temp_new();
3729     lo = tcg_temp_new();
3730     hi = tcg_temp_new();
3731 
3732     if (left) {
3733         /* create [reg:X:..] */
3734 
3735         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3736         tcg_gen_concat_i32_i64(t0, lo, reg);
3737 
3738         /* rotate */
3739 
3740         tcg_gen_rotl_i64(t0, t0, shift64);
3741         tcg_temp_free_i64(shift64);
3742 
3743         /* result is [reg:..:reg:X] */
3744 
3745         tcg_gen_extr_i64_i32(lo, hi, t0);
3746         tcg_gen_andi_i32(X, lo, 1);
3747 
3748         tcg_gen_shri_i32(lo, lo, 1);
3749     } else {
3750         /* create [..:X:reg] */
3751 
3752         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3753 
3754         tcg_gen_rotr_i64(t0, t0, shift64);
3755         tcg_temp_free_i64(shift64);
3756 
3757         /* result is value: [X:reg:..:reg] */
3758 
3759         tcg_gen_extr_i64_i32(lo, hi, t0);
3760 
3761         /* extract X */
3762 
3763         tcg_gen_shri_i32(X, hi, 31);
3764 
3765         /* extract result */
3766 
3767         tcg_gen_shli_i32(hi, hi, 1);
3768     }
3769     tcg_temp_free_i64(t0);
3770     tcg_gen_or_i32(lo, lo, hi);
3771     tcg_temp_free(hi);
3772 
3773     /* if shift == 0, register and X are not affected */
3774 
3775     zero = tcg_const_i32(0);
3776     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3777     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3778     tcg_temp_free(zero);
3779     tcg_temp_free(lo);
3780 
3781     return X;
3782 }
3783 
3784 DISAS_INSN(rotate_im)
3785 {
3786     TCGv shift;
3787     int tmp;
3788     int left = (insn & 0x100);
3789 
3790     tmp = (insn >> 9) & 7;
3791     if (tmp == 0) {
3792         tmp = 8;
3793     }
3794 
3795     shift = tcg_const_i32(tmp);
3796     if (insn & 8) {
3797         rotate(DREG(insn, 0), shift, left, 32);
3798     } else {
3799         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3800         rotate_x_flags(DREG(insn, 0), X, 32);
3801         tcg_temp_free(X);
3802     }
3803     tcg_temp_free(shift);
3804 
3805     set_cc_op(s, CC_OP_FLAGS);
3806 }
3807 
3808 DISAS_INSN(rotate8_im)
3809 {
3810     int left = (insn & 0x100);
3811     TCGv reg;
3812     TCGv shift;
3813     int tmp;
3814 
3815     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3816 
3817     tmp = (insn >> 9) & 7;
3818     if (tmp == 0) {
3819         tmp = 8;
3820     }
3821 
3822     shift = tcg_const_i32(tmp);
3823     if (insn & 8) {
3824         rotate(reg, shift, left, 8);
3825     } else {
3826         TCGv X = rotate_x(reg, shift, left, 8);
3827         rotate_x_flags(reg, X, 8);
3828         tcg_temp_free(X);
3829     }
3830     tcg_temp_free(shift);
3831     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3832     set_cc_op(s, CC_OP_FLAGS);
3833 }
3834 
3835 DISAS_INSN(rotate16_im)
3836 {
3837     int left = (insn & 0x100);
3838     TCGv reg;
3839     TCGv shift;
3840     int tmp;
3841 
3842     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3843     tmp = (insn >> 9) & 7;
3844     if (tmp == 0) {
3845         tmp = 8;
3846     }
3847 
3848     shift = tcg_const_i32(tmp);
3849     if (insn & 8) {
3850         rotate(reg, shift, left, 16);
3851     } else {
3852         TCGv X = rotate_x(reg, shift, left, 16);
3853         rotate_x_flags(reg, X, 16);
3854         tcg_temp_free(X);
3855     }
3856     tcg_temp_free(shift);
3857     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3858     set_cc_op(s, CC_OP_FLAGS);
3859 }
3860 
3861 DISAS_INSN(rotate_reg)
3862 {
3863     TCGv reg;
3864     TCGv src;
3865     TCGv t0, t1;
3866     int left = (insn & 0x100);
3867 
3868     reg = DREG(insn, 0);
3869     src = DREG(insn, 9);
3870     /* shift in [0..63] */
3871     t0 = tcg_temp_new();
3872     tcg_gen_andi_i32(t0, src, 63);
3873     t1 = tcg_temp_new_i32();
3874     if (insn & 8) {
3875         tcg_gen_andi_i32(t1, src, 31);
3876         rotate(reg, t1, left, 32);
3877         /* if shift == 0, clear C */
3878         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3879                             t0, QREG_CC_V /* 0 */,
3880                             QREG_CC_V /* 0 */, QREG_CC_C);
3881     } else {
3882         TCGv X;
3883         /* modulo 33 */
3884         tcg_gen_movi_i32(t1, 33);
3885         tcg_gen_remu_i32(t1, t0, t1);
3886         X = rotate32_x(DREG(insn, 0), t1, left);
3887         rotate_x_flags(DREG(insn, 0), X, 32);
3888         tcg_temp_free(X);
3889     }
3890     tcg_temp_free(t1);
3891     tcg_temp_free(t0);
3892     set_cc_op(s, CC_OP_FLAGS);
3893 }
3894 
3895 DISAS_INSN(rotate8_reg)
3896 {
3897     TCGv reg;
3898     TCGv src;
3899     TCGv t0, t1;
3900     int left = (insn & 0x100);
3901 
3902     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3903     src = DREG(insn, 9);
3904     /* shift in [0..63] */
3905     t0 = tcg_temp_new_i32();
3906     tcg_gen_andi_i32(t0, src, 63);
3907     t1 = tcg_temp_new_i32();
3908     if (insn & 8) {
3909         tcg_gen_andi_i32(t1, src, 7);
3910         rotate(reg, t1, left, 8);
3911         /* if shift == 0, clear C */
3912         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3913                             t0, QREG_CC_V /* 0 */,
3914                             QREG_CC_V /* 0 */, QREG_CC_C);
3915     } else {
3916         TCGv X;
3917         /* modulo 9 */
3918         tcg_gen_movi_i32(t1, 9);
3919         tcg_gen_remu_i32(t1, t0, t1);
3920         X = rotate_x(reg, t1, left, 8);
3921         rotate_x_flags(reg, X, 8);
3922         tcg_temp_free(X);
3923     }
3924     tcg_temp_free(t1);
3925     tcg_temp_free(t0);
3926     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3927     set_cc_op(s, CC_OP_FLAGS);
3928 }
3929 
3930 DISAS_INSN(rotate16_reg)
3931 {
3932     TCGv reg;
3933     TCGv src;
3934     TCGv t0, t1;
3935     int left = (insn & 0x100);
3936 
3937     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3938     src = DREG(insn, 9);
3939     /* shift in [0..63] */
3940     t0 = tcg_temp_new_i32();
3941     tcg_gen_andi_i32(t0, src, 63);
3942     t1 = tcg_temp_new_i32();
3943     if (insn & 8) {
3944         tcg_gen_andi_i32(t1, src, 15);
3945         rotate(reg, t1, left, 16);
3946         /* if shift == 0, clear C */
3947         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3948                             t0, QREG_CC_V /* 0 */,
3949                             QREG_CC_V /* 0 */, QREG_CC_C);
3950     } else {
3951         TCGv X;
3952         /* modulo 17 */
3953         tcg_gen_movi_i32(t1, 17);
3954         tcg_gen_remu_i32(t1, t0, t1);
3955         X = rotate_x(reg, t1, left, 16);
3956         rotate_x_flags(reg, X, 16);
3957         tcg_temp_free(X);
3958     }
3959     tcg_temp_free(t1);
3960     tcg_temp_free(t0);
3961     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3962     set_cc_op(s, CC_OP_FLAGS);
3963 }
3964 
3965 DISAS_INSN(rotate_mem)
3966 {
3967     TCGv src;
3968     TCGv addr;
3969     TCGv shift;
3970     int left = (insn & 0x100);
3971 
3972     SRC_EA(env, src, OS_WORD, 0, &addr);
3973 
3974     shift = tcg_const_i32(1);
3975     if (insn & 0x0200) {
3976         rotate(src, shift, left, 16);
3977     } else {
3978         TCGv X = rotate_x(src, shift, left, 16);
3979         rotate_x_flags(src, X, 16);
3980         tcg_temp_free(X);
3981     }
3982     tcg_temp_free(shift);
3983     DEST_EA(env, insn, OS_WORD, src, &addr);
3984     set_cc_op(s, CC_OP_FLAGS);
3985 }
3986 
3987 DISAS_INSN(bfext_reg)
3988 {
3989     int ext = read_im16(env, s);
3990     int is_sign = insn & 0x200;
3991     TCGv src = DREG(insn, 0);
3992     TCGv dst = DREG(ext, 12);
3993     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3994     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3995     int pos = 32 - ofs - len;        /* little bit-endian */
3996     TCGv tmp = tcg_temp_new();
3997     TCGv shift;
3998 
3999     /* In general, we're going to rotate the field so that it's at the
4000        top of the word and then right-shift by the complement of the
4001        width to extend the field.  */
4002     if (ext & 0x20) {
4003         /* Variable width.  */
4004         if (ext & 0x800) {
4005             /* Variable offset.  */
4006             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4007             tcg_gen_rotl_i32(tmp, src, tmp);
4008         } else {
4009             tcg_gen_rotli_i32(tmp, src, ofs);
4010         }
4011 
4012         shift = tcg_temp_new();
4013         tcg_gen_neg_i32(shift, DREG(ext, 0));
4014         tcg_gen_andi_i32(shift, shift, 31);
4015         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4016         if (is_sign) {
4017             tcg_gen_mov_i32(dst, QREG_CC_N);
4018         } else {
4019             tcg_gen_shr_i32(dst, tmp, shift);
4020         }
4021         tcg_temp_free(shift);
4022     } else {
4023         /* Immediate width.  */
4024         if (ext & 0x800) {
4025             /* Variable offset */
4026             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4027             tcg_gen_rotl_i32(tmp, src, tmp);
4028             src = tmp;
4029             pos = 32 - len;
4030         } else {
4031             /* Immediate offset.  If the field doesn't wrap around the
4032                end of the word, rely on (s)extract completely.  */
4033             if (pos < 0) {
4034                 tcg_gen_rotli_i32(tmp, src, ofs);
4035                 src = tmp;
4036                 pos = 32 - len;
4037             }
4038         }
4039 
4040         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4041         if (is_sign) {
4042             tcg_gen_mov_i32(dst, QREG_CC_N);
4043         } else {
4044             tcg_gen_extract_i32(dst, src, pos, len);
4045         }
4046     }
4047 
4048     tcg_temp_free(tmp);
4049     set_cc_op(s, CC_OP_LOGIC);
4050 }
4051 
4052 DISAS_INSN(bfext_mem)
4053 {
4054     int ext = read_im16(env, s);
4055     int is_sign = insn & 0x200;
4056     TCGv dest = DREG(ext, 12);
4057     TCGv addr, len, ofs;
4058 
4059     addr = gen_lea(env, s, insn, OS_UNSIZED);
4060     if (IS_NULL_QREG(addr)) {
4061         gen_addr_fault(s);
4062         return;
4063     }
4064 
4065     if (ext & 0x20) {
4066         len = DREG(ext, 0);
4067     } else {
4068         len = tcg_const_i32(extract32(ext, 0, 5));
4069     }
4070     if (ext & 0x800) {
4071         ofs = DREG(ext, 6);
4072     } else {
4073         ofs = tcg_const_i32(extract32(ext, 6, 5));
4074     }
4075 
4076     if (is_sign) {
4077         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4078         tcg_gen_mov_i32(QREG_CC_N, dest);
4079     } else {
4080         TCGv_i64 tmp = tcg_temp_new_i64();
4081         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4082         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4083         tcg_temp_free_i64(tmp);
4084     }
4085     set_cc_op(s, CC_OP_LOGIC);
4086 
4087     if (!(ext & 0x20)) {
4088         tcg_temp_free(len);
4089     }
4090     if (!(ext & 0x800)) {
4091         tcg_temp_free(ofs);
4092     }
4093 }
4094 
4095 DISAS_INSN(bfop_reg)
4096 {
4097     int ext = read_im16(env, s);
4098     TCGv src = DREG(insn, 0);
4099     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4100     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4101     TCGv mask, tofs, tlen;
4102 
4103     tofs = NULL;
4104     tlen = NULL;
4105     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4106         tofs = tcg_temp_new();
4107         tlen = tcg_temp_new();
4108     }
4109 
4110     if ((ext & 0x820) == 0) {
4111         /* Immediate width and offset.  */
4112         uint32_t maski = 0x7fffffffu >> (len - 1);
4113         if (ofs + len <= 32) {
4114             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4115         } else {
4116             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4117         }
4118         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4119         mask = tcg_const_i32(ror32(maski, ofs));
4120         if (tofs) {
4121             tcg_gen_movi_i32(tofs, ofs);
4122             tcg_gen_movi_i32(tlen, len);
4123         }
4124     } else {
4125         TCGv tmp = tcg_temp_new();
4126         if (ext & 0x20) {
4127             /* Variable width */
4128             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4129             tcg_gen_andi_i32(tmp, tmp, 31);
4130             mask = tcg_const_i32(0x7fffffffu);
4131             tcg_gen_shr_i32(mask, mask, tmp);
4132             if (tlen) {
4133                 tcg_gen_addi_i32(tlen, tmp, 1);
4134             }
4135         } else {
4136             /* Immediate width */
4137             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4138             if (tlen) {
4139                 tcg_gen_movi_i32(tlen, len);
4140             }
4141         }
4142         if (ext & 0x800) {
4143             /* Variable offset */
4144             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4145             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4146             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4147             tcg_gen_rotr_i32(mask, mask, tmp);
4148             if (tofs) {
4149                 tcg_gen_mov_i32(tofs, tmp);
4150             }
4151         } else {
4152             /* Immediate offset (and variable width) */
4153             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4154             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4155             tcg_gen_rotri_i32(mask, mask, ofs);
4156             if (tofs) {
4157                 tcg_gen_movi_i32(tofs, ofs);
4158             }
4159         }
4160         tcg_temp_free(tmp);
4161     }
4162     set_cc_op(s, CC_OP_LOGIC);
4163 
4164     switch (insn & 0x0f00) {
4165     case 0x0a00: /* bfchg */
4166         tcg_gen_eqv_i32(src, src, mask);
4167         break;
4168     case 0x0c00: /* bfclr */
4169         tcg_gen_and_i32(src, src, mask);
4170         break;
4171     case 0x0d00: /* bfffo */
4172         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4173         tcg_temp_free(tlen);
4174         tcg_temp_free(tofs);
4175         break;
4176     case 0x0e00: /* bfset */
4177         tcg_gen_orc_i32(src, src, mask);
4178         break;
4179     case 0x0800: /* bftst */
4180         /* flags already set; no other work to do.  */
4181         break;
4182     default:
4183         g_assert_not_reached();
4184     }
4185     tcg_temp_free(mask);
4186 }
4187 
4188 DISAS_INSN(bfop_mem)
4189 {
4190     int ext = read_im16(env, s);
4191     TCGv addr, len, ofs;
4192     TCGv_i64 t64;
4193 
4194     addr = gen_lea(env, s, insn, OS_UNSIZED);
4195     if (IS_NULL_QREG(addr)) {
4196         gen_addr_fault(s);
4197         return;
4198     }
4199 
4200     if (ext & 0x20) {
4201         len = DREG(ext, 0);
4202     } else {
4203         len = tcg_const_i32(extract32(ext, 0, 5));
4204     }
4205     if (ext & 0x800) {
4206         ofs = DREG(ext, 6);
4207     } else {
4208         ofs = tcg_const_i32(extract32(ext, 6, 5));
4209     }
4210 
4211     switch (insn & 0x0f00) {
4212     case 0x0a00: /* bfchg */
4213         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4214         break;
4215     case 0x0c00: /* bfclr */
4216         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4217         break;
4218     case 0x0d00: /* bfffo */
4219         t64 = tcg_temp_new_i64();
4220         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4221         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4222         tcg_temp_free_i64(t64);
4223         break;
4224     case 0x0e00: /* bfset */
4225         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4226         break;
4227     case 0x0800: /* bftst */
4228         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4229         break;
4230     default:
4231         g_assert_not_reached();
4232     }
4233     set_cc_op(s, CC_OP_LOGIC);
4234 
4235     if (!(ext & 0x20)) {
4236         tcg_temp_free(len);
4237     }
4238     if (!(ext & 0x800)) {
4239         tcg_temp_free(ofs);
4240     }
4241 }
4242 
4243 DISAS_INSN(bfins_reg)
4244 {
4245     int ext = read_im16(env, s);
4246     TCGv dst = DREG(insn, 0);
4247     TCGv src = DREG(ext, 12);
4248     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4249     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4250     int pos = 32 - ofs - len;        /* little bit-endian */
4251     TCGv tmp;
4252 
4253     tmp = tcg_temp_new();
4254 
4255     if (ext & 0x20) {
4256         /* Variable width */
4257         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4258         tcg_gen_andi_i32(tmp, tmp, 31);
4259         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4260     } else {
4261         /* Immediate width */
4262         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4263     }
4264     set_cc_op(s, CC_OP_LOGIC);
4265 
4266     /* Immediate width and offset */
4267     if ((ext & 0x820) == 0) {
4268         /* Check for suitability for deposit.  */
4269         if (pos >= 0) {
4270             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4271         } else {
4272             uint32_t maski = -2U << (len - 1);
4273             uint32_t roti = (ofs + len) & 31;
4274             tcg_gen_andi_i32(tmp, src, ~maski);
4275             tcg_gen_rotri_i32(tmp, tmp, roti);
4276             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4277             tcg_gen_or_i32(dst, dst, tmp);
4278         }
4279     } else {
4280         TCGv mask = tcg_temp_new();
4281         TCGv rot = tcg_temp_new();
4282 
4283         if (ext & 0x20) {
4284             /* Variable width */
4285             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4286             tcg_gen_andi_i32(rot, rot, 31);
4287             tcg_gen_movi_i32(mask, -2);
4288             tcg_gen_shl_i32(mask, mask, rot);
4289             tcg_gen_mov_i32(rot, DREG(ext, 0));
4290             tcg_gen_andc_i32(tmp, src, mask);
4291         } else {
4292             /* Immediate width (variable offset) */
4293             uint32_t maski = -2U << (len - 1);
4294             tcg_gen_andi_i32(tmp, src, ~maski);
4295             tcg_gen_movi_i32(mask, maski);
4296             tcg_gen_movi_i32(rot, len & 31);
4297         }
4298         if (ext & 0x800) {
4299             /* Variable offset */
4300             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4301         } else {
4302             /* Immediate offset (variable width) */
4303             tcg_gen_addi_i32(rot, rot, ofs);
4304         }
4305         tcg_gen_andi_i32(rot, rot, 31);
4306         tcg_gen_rotr_i32(mask, mask, rot);
4307         tcg_gen_rotr_i32(tmp, tmp, rot);
4308         tcg_gen_and_i32(dst, dst, mask);
4309         tcg_gen_or_i32(dst, dst, tmp);
4310 
4311         tcg_temp_free(rot);
4312         tcg_temp_free(mask);
4313     }
4314     tcg_temp_free(tmp);
4315 }
4316 
4317 DISAS_INSN(bfins_mem)
4318 {
4319     int ext = read_im16(env, s);
4320     TCGv src = DREG(ext, 12);
4321     TCGv addr, len, ofs;
4322 
4323     addr = gen_lea(env, s, insn, OS_UNSIZED);
4324     if (IS_NULL_QREG(addr)) {
4325         gen_addr_fault(s);
4326         return;
4327     }
4328 
4329     if (ext & 0x20) {
4330         len = DREG(ext, 0);
4331     } else {
4332         len = tcg_const_i32(extract32(ext, 0, 5));
4333     }
4334     if (ext & 0x800) {
4335         ofs = DREG(ext, 6);
4336     } else {
4337         ofs = tcg_const_i32(extract32(ext, 6, 5));
4338     }
4339 
4340     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4341     set_cc_op(s, CC_OP_LOGIC);
4342 
4343     if (!(ext & 0x20)) {
4344         tcg_temp_free(len);
4345     }
4346     if (!(ext & 0x800)) {
4347         tcg_temp_free(ofs);
4348     }
4349 }
4350 
4351 DISAS_INSN(ff1)
4352 {
4353     TCGv reg;
4354     reg = DREG(insn, 0);
4355     gen_logic_cc(s, reg, OS_LONG);
4356     gen_helper_ff1(reg, reg);
4357 }
4358 
4359 DISAS_INSN(chk)
4360 {
4361     TCGv src, reg;
4362     int opsize;
4363 
4364     switch ((insn >> 7) & 3) {
4365     case 3:
4366         opsize = OS_WORD;
4367         break;
4368     case 2:
4369         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4370             opsize = OS_LONG;
4371             break;
4372         }
4373         /* fallthru */
4374     default:
4375         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4376         return;
4377     }
4378     SRC_EA(env, src, opsize, 1, NULL);
4379     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4380 
4381     gen_flush_flags(s);
4382     gen_helper_chk(cpu_env, reg, src);
4383 }
4384 
4385 DISAS_INSN(chk2)
4386 {
4387     uint16_t ext;
4388     TCGv addr1, addr2, bound1, bound2, reg;
4389     int opsize;
4390 
4391     switch ((insn >> 9) & 3) {
4392     case 0:
4393         opsize = OS_BYTE;
4394         break;
4395     case 1:
4396         opsize = OS_WORD;
4397         break;
4398     case 2:
4399         opsize = OS_LONG;
4400         break;
4401     default:
4402         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4403         return;
4404     }
4405 
4406     ext = read_im16(env, s);
4407     if ((ext & 0x0800) == 0) {
4408         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4409         return;
4410     }
4411 
4412     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4413     addr2 = tcg_temp_new();
4414     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4415 
4416     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4417     tcg_temp_free(addr1);
4418     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4419     tcg_temp_free(addr2);
4420 
4421     reg = tcg_temp_new();
4422     if (ext & 0x8000) {
4423         tcg_gen_mov_i32(reg, AREG(ext, 12));
4424     } else {
4425         gen_ext(reg, DREG(ext, 12), opsize, 1);
4426     }
4427 
4428     gen_flush_flags(s);
4429     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4430     tcg_temp_free(reg);
4431     tcg_temp_free(bound1);
4432     tcg_temp_free(bound2);
4433 }
4434 
4435 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4436 {
4437     TCGv addr;
4438     TCGv_i64 t0, t1;
4439 
4440     addr = tcg_temp_new();
4441 
4442     t0 = tcg_temp_new_i64();
4443     t1 = tcg_temp_new_i64();
4444 
4445     tcg_gen_andi_i32(addr, src, ~15);
4446     tcg_gen_qemu_ld64(t0, addr, index);
4447     tcg_gen_addi_i32(addr, addr, 8);
4448     tcg_gen_qemu_ld64(t1, addr, index);
4449 
4450     tcg_gen_andi_i32(addr, dst, ~15);
4451     tcg_gen_qemu_st64(t0, addr, index);
4452     tcg_gen_addi_i32(addr, addr, 8);
4453     tcg_gen_qemu_st64(t1, addr, index);
4454 
4455     tcg_temp_free_i64(t0);
4456     tcg_temp_free_i64(t1);
4457     tcg_temp_free(addr);
4458 }
4459 
4460 DISAS_INSN(move16_reg)
4461 {
4462     int index = IS_USER(s);
4463     TCGv tmp;
4464     uint16_t ext;
4465 
4466     ext = read_im16(env, s);
4467     if ((ext & (1 << 15)) == 0) {
4468         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4469     }
4470 
4471     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4472 
4473     /* Ax can be Ay, so save Ay before incrementing Ax */
4474     tmp = tcg_temp_new();
4475     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4476     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4477     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4478     tcg_temp_free(tmp);
4479 }
4480 
4481 DISAS_INSN(move16_mem)
4482 {
4483     int index = IS_USER(s);
4484     TCGv reg, addr;
4485 
4486     reg = AREG(insn, 0);
4487     addr = tcg_const_i32(read_im32(env, s));
4488 
4489     if ((insn >> 3) & 1) {
4490         /* MOVE16 (xxx).L, (Ay) */
4491         m68k_copy_line(reg, addr, index);
4492     } else {
4493         /* MOVE16 (Ay), (xxx).L */
4494         m68k_copy_line(addr, reg, index);
4495     }
4496 
4497     tcg_temp_free(addr);
4498 
4499     if (((insn >> 3) & 2) == 0) {
4500         /* (Ay)+ */
4501         tcg_gen_addi_i32(reg, reg, 16);
4502     }
4503 }
4504 
4505 DISAS_INSN(strldsr)
4506 {
4507     uint16_t ext;
4508     uint32_t addr;
4509 
4510     addr = s->pc - 2;
4511     ext = read_im16(env, s);
4512     if (ext != 0x46FC) {
4513         gen_exception(s, addr, EXCP_ILLEGAL);
4514         return;
4515     }
4516     ext = read_im16(env, s);
4517     if (IS_USER(s) || (ext & SR_S) == 0) {
4518         gen_exception(s, addr, EXCP_PRIVILEGE);
4519         return;
4520     }
4521     gen_push(s, gen_get_sr(s));
4522     gen_set_sr_im(s, ext, 0);
4523 }
4524 
4525 DISAS_INSN(move_from_sr)
4526 {
4527     TCGv sr;
4528 
4529     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4530         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4531         return;
4532     }
4533     sr = gen_get_sr(s);
4534     DEST_EA(env, insn, OS_WORD, sr, NULL);
4535 }
4536 
4537 #if defined(CONFIG_SOFTMMU)
4538 DISAS_INSN(moves)
4539 {
4540     int opsize;
4541     uint16_t ext;
4542     TCGv reg;
4543     TCGv addr;
4544     int extend;
4545 
4546     if (IS_USER(s)) {
4547         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4548         return;
4549     }
4550 
4551     ext = read_im16(env, s);
4552 
4553     opsize = insn_opsize(insn);
4554 
4555     if (ext & 0x8000) {
4556         /* address register */
4557         reg = AREG(ext, 12);
4558         extend = 1;
4559     } else {
4560         /* data register */
4561         reg = DREG(ext, 12);
4562         extend = 0;
4563     }
4564 
4565     addr = gen_lea(env, s, insn, opsize);
4566     if (IS_NULL_QREG(addr)) {
4567         gen_addr_fault(s);
4568         return;
4569     }
4570 
4571     if (ext & 0x0800) {
4572         /* from reg to ea */
4573         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4574     } else {
4575         /* from ea to reg */
4576         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4577         if (extend) {
4578             gen_ext(reg, tmp, opsize, 1);
4579         } else {
4580             gen_partset_reg(opsize, reg, tmp);
4581         }
4582         tcg_temp_free(tmp);
4583     }
4584     switch (extract32(insn, 3, 3)) {
4585     case 3: /* Indirect postincrement.  */
4586         tcg_gen_addi_i32(AREG(insn, 0), addr,
4587                          REG(insn, 0) == 7 && opsize == OS_BYTE
4588                          ? 2
4589                          : opsize_bytes(opsize));
4590         break;
4591     case 4: /* Indirect predecrememnt.  */
4592         tcg_gen_mov_i32(AREG(insn, 0), addr);
4593         break;
4594     }
4595 }
4596 
4597 DISAS_INSN(move_to_sr)
4598 {
4599     if (IS_USER(s)) {
4600         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4601         return;
4602     }
4603     gen_move_to_sr(env, s, insn, false);
4604     gen_exit_tb(s);
4605 }
4606 
4607 DISAS_INSN(move_from_usp)
4608 {
4609     if (IS_USER(s)) {
4610         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4611         return;
4612     }
4613     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4614                    offsetof(CPUM68KState, sp[M68K_USP]));
4615 }
4616 
4617 DISAS_INSN(move_to_usp)
4618 {
4619     if (IS_USER(s)) {
4620         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4621         return;
4622     }
4623     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4624                    offsetof(CPUM68KState, sp[M68K_USP]));
4625 }
4626 
4627 DISAS_INSN(halt)
4628 {
4629     if (IS_USER(s)) {
4630         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4631         return;
4632     }
4633 
4634     gen_exception(s, s->pc, EXCP_HALT_INSN);
4635 }
4636 
4637 DISAS_INSN(stop)
4638 {
4639     uint16_t ext;
4640 
4641     if (IS_USER(s)) {
4642         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4643         return;
4644     }
4645 
4646     ext = read_im16(env, s);
4647 
4648     gen_set_sr_im(s, ext, 0);
4649     tcg_gen_movi_i32(cpu_halted, 1);
4650     gen_exception(s, s->pc, EXCP_HLT);
4651 }
4652 
4653 DISAS_INSN(rte)
4654 {
4655     if (IS_USER(s)) {
4656         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4657         return;
4658     }
4659     gen_exception(s, s->base.pc_next, EXCP_RTE);
4660 }
4661 
4662 DISAS_INSN(cf_movec)
4663 {
4664     uint16_t ext;
4665     TCGv reg;
4666 
4667     if (IS_USER(s)) {
4668         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4669         return;
4670     }
4671 
4672     ext = read_im16(env, s);
4673 
4674     if (ext & 0x8000) {
4675         reg = AREG(ext, 12);
4676     } else {
4677         reg = DREG(ext, 12);
4678     }
4679     gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4680     gen_exit_tb(s);
4681 }
4682 
4683 DISAS_INSN(m68k_movec)
4684 {
4685     uint16_t ext;
4686     TCGv reg;
4687 
4688     if (IS_USER(s)) {
4689         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4690         return;
4691     }
4692 
4693     ext = read_im16(env, s);
4694 
4695     if (ext & 0x8000) {
4696         reg = AREG(ext, 12);
4697     } else {
4698         reg = DREG(ext, 12);
4699     }
4700     if (insn & 1) {
4701         gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4702     } else {
4703         gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4704     }
4705     gen_exit_tb(s);
4706 }
4707 
4708 DISAS_INSN(intouch)
4709 {
4710     if (IS_USER(s)) {
4711         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4712         return;
4713     }
4714     /* ICache fetch.  Implement as no-op.  */
4715 }
4716 
4717 DISAS_INSN(cpushl)
4718 {
4719     if (IS_USER(s)) {
4720         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4721         return;
4722     }
4723     /* Cache push/invalidate.  Implement as no-op.  */
4724 }
4725 
4726 DISAS_INSN(cpush)
4727 {
4728     if (IS_USER(s)) {
4729         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4730         return;
4731     }
4732     /* Cache push/invalidate.  Implement as no-op.  */
4733 }
4734 
4735 DISAS_INSN(cinv)
4736 {
4737     if (IS_USER(s)) {
4738         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4739         return;
4740     }
4741     /* Invalidate cache line.  Implement as no-op.  */
4742 }
4743 
4744 #if defined(CONFIG_SOFTMMU)
4745 DISAS_INSN(pflush)
4746 {
4747     TCGv opmode;
4748 
4749     if (IS_USER(s)) {
4750         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4751         return;
4752     }
4753 
4754     opmode = tcg_const_i32((insn >> 3) & 3);
4755     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4756     tcg_temp_free(opmode);
4757 }
4758 
4759 DISAS_INSN(ptest)
4760 {
4761     TCGv is_read;
4762 
4763     if (IS_USER(s)) {
4764         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4765         return;
4766     }
4767     is_read = tcg_const_i32((insn >> 5) & 1);
4768     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4769     tcg_temp_free(is_read);
4770 }
4771 #endif
4772 
4773 DISAS_INSN(wddata)
4774 {
4775     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4776 }
4777 
4778 DISAS_INSN(wdebug)
4779 {
4780     M68kCPU *cpu = m68k_env_get_cpu(env);
4781 
4782     if (IS_USER(s)) {
4783         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4784         return;
4785     }
4786     /* TODO: Implement wdebug.  */
4787     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4788 }
4789 #endif
4790 
4791 DISAS_INSN(trap)
4792 {
4793     gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
4794 }
4795 
4796 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4797 {
4798     switch (reg) {
4799     case M68K_FPIAR:
4800         tcg_gen_movi_i32(res, 0);
4801         break;
4802     case M68K_FPSR:
4803         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4804         break;
4805     case M68K_FPCR:
4806         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4807         break;
4808     }
4809 }
4810 
4811 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4812 {
4813     switch (reg) {
4814     case M68K_FPIAR:
4815         break;
4816     case M68K_FPSR:
4817         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4818         break;
4819     case M68K_FPCR:
4820         gen_helper_set_fpcr(cpu_env, val);
4821         break;
4822     }
4823 }
4824 
4825 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4826 {
4827     int index = IS_USER(s);
4828     TCGv tmp;
4829 
4830     tmp = tcg_temp_new();
4831     gen_load_fcr(s, tmp, reg);
4832     tcg_gen_qemu_st32(tmp, addr, index);
4833     tcg_temp_free(tmp);
4834 }
4835 
4836 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4837 {
4838     int index = IS_USER(s);
4839     TCGv tmp;
4840 
4841     tmp = tcg_temp_new();
4842     tcg_gen_qemu_ld32u(tmp, addr, index);
4843     gen_store_fcr(s, tmp, reg);
4844     tcg_temp_free(tmp);
4845 }
4846 
4847 
4848 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4849                              uint32_t insn, uint32_t ext)
4850 {
4851     int mask = (ext >> 10) & 7;
4852     int is_write = (ext >> 13) & 1;
4853     int mode = extract32(insn, 3, 3);
4854     int i;
4855     TCGv addr, tmp;
4856 
4857     switch (mode) {
4858     case 0: /* Dn */
4859         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4860             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4861             return;
4862         }
4863         if (is_write) {
4864             gen_load_fcr(s, DREG(insn, 0), mask);
4865         } else {
4866             gen_store_fcr(s, DREG(insn, 0), mask);
4867         }
4868         return;
4869     case 1: /* An, only with FPIAR */
4870         if (mask != M68K_FPIAR) {
4871             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4872             return;
4873         }
4874         if (is_write) {
4875             gen_load_fcr(s, AREG(insn, 0), mask);
4876         } else {
4877             gen_store_fcr(s, AREG(insn, 0), mask);
4878         }
4879         return;
4880     default:
4881         break;
4882     }
4883 
4884     tmp = gen_lea(env, s, insn, OS_LONG);
4885     if (IS_NULL_QREG(tmp)) {
4886         gen_addr_fault(s);
4887         return;
4888     }
4889 
4890     addr = tcg_temp_new();
4891     tcg_gen_mov_i32(addr, tmp);
4892 
4893     /* mask:
4894      *
4895      * 0b100 Floating-Point Control Register
4896      * 0b010 Floating-Point Status Register
4897      * 0b001 Floating-Point Instruction Address Register
4898      *
4899      */
4900 
4901     if (is_write && mode == 4) {
4902         for (i = 2; i >= 0; i--, mask >>= 1) {
4903             if (mask & 1) {
4904                 gen_qemu_store_fcr(s, addr, 1 << i);
4905                 if (mask != 1) {
4906                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4907                 }
4908             }
4909        }
4910        tcg_gen_mov_i32(AREG(insn, 0), addr);
4911     } else {
4912         for (i = 0; i < 3; i++, mask >>= 1) {
4913             if (mask & 1) {
4914                 if (is_write) {
4915                     gen_qemu_store_fcr(s, addr, 1 << i);
4916                 } else {
4917                     gen_qemu_load_fcr(s, addr, 1 << i);
4918                 }
4919                 if (mask != 1 || mode == 3) {
4920                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4921                 }
4922             }
4923         }
4924         if (mode == 3) {
4925             tcg_gen_mov_i32(AREG(insn, 0), addr);
4926         }
4927     }
4928     tcg_temp_free_i32(addr);
4929 }
4930 
4931 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4932                           uint32_t insn, uint32_t ext)
4933 {
4934     int opsize;
4935     TCGv addr, tmp;
4936     int mode = (ext >> 11) & 0x3;
4937     int is_load = ((ext & 0x2000) == 0);
4938 
4939     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4940         opsize = OS_EXTENDED;
4941     } else {
4942         opsize = OS_DOUBLE;  /* FIXME */
4943     }
4944 
4945     addr = gen_lea(env, s, insn, opsize);
4946     if (IS_NULL_QREG(addr)) {
4947         gen_addr_fault(s);
4948         return;
4949     }
4950 
4951     tmp = tcg_temp_new();
4952     if (mode & 0x1) {
4953         /* Dynamic register list */
4954         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4955     } else {
4956         /* Static register list */
4957         tcg_gen_movi_i32(tmp, ext & 0xff);
4958     }
4959 
4960     if (!is_load && (mode & 2) == 0) {
4961         /* predecrement addressing mode
4962          * only available to store register to memory
4963          */
4964         if (opsize == OS_EXTENDED) {
4965             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4966         } else {
4967             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4968         }
4969     } else {
4970         /* postincrement addressing mode */
4971         if (opsize == OS_EXTENDED) {
4972             if (is_load) {
4973                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4974             } else {
4975                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4976             }
4977         } else {
4978             if (is_load) {
4979                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4980             } else {
4981                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4982             }
4983         }
4984     }
4985     if ((insn & 070) == 030 || (insn & 070) == 040) {
4986         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4987     }
4988     tcg_temp_free(tmp);
4989 }
4990 
4991 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4992    immediately before the next FP instruction is executed.  */
4993 DISAS_INSN(fpu)
4994 {
4995     uint16_t ext;
4996     int opmode;
4997     int opsize;
4998     TCGv_ptr cpu_src, cpu_dest;
4999 
5000     ext = read_im16(env, s);
5001     opmode = ext & 0x7f;
5002     switch ((ext >> 13) & 7) {
5003     case 0:
5004         break;
5005     case 1:
5006         goto undef;
5007     case 2:
5008         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5009             /* fmovecr */
5010             TCGv rom_offset = tcg_const_i32(opmode);
5011             cpu_dest = gen_fp_ptr(REG(ext, 7));
5012             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5013             tcg_temp_free_ptr(cpu_dest);
5014             tcg_temp_free(rom_offset);
5015             return;
5016         }
5017         break;
5018     case 3: /* fmove out */
5019         cpu_src = gen_fp_ptr(REG(ext, 7));
5020         opsize = ext_opsize(ext, 10);
5021         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5022                       EA_STORE, IS_USER(s)) == -1) {
5023             gen_addr_fault(s);
5024         }
5025         gen_helper_ftst(cpu_env, cpu_src);
5026         tcg_temp_free_ptr(cpu_src);
5027         return;
5028     case 4: /* fmove to control register.  */
5029     case 5: /* fmove from control register.  */
5030         gen_op_fmove_fcr(env, s, insn, ext);
5031         return;
5032     case 6: /* fmovem */
5033     case 7:
5034         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5035             goto undef;
5036         }
5037         gen_op_fmovem(env, s, insn, ext);
5038         return;
5039     }
5040     if (ext & (1 << 14)) {
5041         /* Source effective address.  */
5042         opsize = ext_opsize(ext, 10);
5043         cpu_src = gen_fp_result_ptr();
5044         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5045                       EA_LOADS, IS_USER(s)) == -1) {
5046             gen_addr_fault(s);
5047             return;
5048         }
5049     } else {
5050         /* Source register.  */
5051         opsize = OS_EXTENDED;
5052         cpu_src = gen_fp_ptr(REG(ext, 10));
5053     }
5054     cpu_dest = gen_fp_ptr(REG(ext, 7));
5055     switch (opmode) {
5056     case 0: /* fmove */
5057         gen_fp_move(cpu_dest, cpu_src);
5058         break;
5059     case 0x40: /* fsmove */
5060         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5061         break;
5062     case 0x44: /* fdmove */
5063         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5064         break;
5065     case 1: /* fint */
5066         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5067         break;
5068     case 2: /* fsinh */
5069         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5070         break;
5071     case 3: /* fintrz */
5072         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5073         break;
5074     case 4: /* fsqrt */
5075         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5076         break;
5077     case 0x41: /* fssqrt */
5078         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5079         break;
5080     case 0x45: /* fdsqrt */
5081         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5082         break;
5083     case 0x06: /* flognp1 */
5084         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5085         break;
5086     case 0x09: /* ftanh */
5087         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5088         break;
5089     case 0x0a: /* fatan */
5090         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5091         break;
5092     case 0x0c: /* fasin */
5093         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5094         break;
5095     case 0x0d: /* fatanh */
5096         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5097         break;
5098     case 0x0e: /* fsin */
5099         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5100         break;
5101     case 0x0f: /* ftan */
5102         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5103         break;
5104     case 0x10: /* fetox */
5105         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5106         break;
5107     case 0x11: /* ftwotox */
5108         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5109         break;
5110     case 0x12: /* ftentox */
5111         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5112         break;
5113     case 0x14: /* flogn */
5114         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5115         break;
5116     case 0x15: /* flog10 */
5117         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5118         break;
5119     case 0x16: /* flog2 */
5120         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5121         break;
5122     case 0x18: /* fabs */
5123         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5124         break;
5125     case 0x58: /* fsabs */
5126         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5127         break;
5128     case 0x5c: /* fdabs */
5129         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5130         break;
5131     case 0x19: /* fcosh */
5132         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5133         break;
5134     case 0x1a: /* fneg */
5135         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5136         break;
5137     case 0x5a: /* fsneg */
5138         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5139         break;
5140     case 0x5e: /* fdneg */
5141         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5142         break;
5143     case 0x1c: /* facos */
5144         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5145         break;
5146     case 0x1d: /* fcos */
5147         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5148         break;
5149     case 0x1e: /* fgetexp */
5150         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5151         break;
5152     case 0x1f: /* fgetman */
5153         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5154         break;
5155     case 0x20: /* fdiv */
5156         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5157         break;
5158     case 0x60: /* fsdiv */
5159         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5160         break;
5161     case 0x64: /* fddiv */
5162         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5163         break;
5164     case 0x21: /* fmod */
5165         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5166         break;
5167     case 0x22: /* fadd */
5168         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5169         break;
5170     case 0x62: /* fsadd */
5171         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5172         break;
5173     case 0x66: /* fdadd */
5174         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5175         break;
5176     case 0x23: /* fmul */
5177         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5178         break;
5179     case 0x63: /* fsmul */
5180         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5181         break;
5182     case 0x67: /* fdmul */
5183         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5184         break;
5185     case 0x24: /* fsgldiv */
5186         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5187         break;
5188     case 0x25: /* frem */
5189         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5190         break;
5191     case 0x26: /* fscale */
5192         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5193         break;
5194     case 0x27: /* fsglmul */
5195         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5196         break;
5197     case 0x28: /* fsub */
5198         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5199         break;
5200     case 0x68: /* fssub */
5201         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5202         break;
5203     case 0x6c: /* fdsub */
5204         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5205         break;
5206     case 0x30: case 0x31: case 0x32:
5207     case 0x33: case 0x34: case 0x35:
5208     case 0x36: case 0x37: {
5209             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5210             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5211             tcg_temp_free_ptr(cpu_dest2);
5212         }
5213         break;
5214     case 0x38: /* fcmp */
5215         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5216         return;
5217     case 0x3a: /* ftst */
5218         gen_helper_ftst(cpu_env, cpu_src);
5219         return;
5220     default:
5221         goto undef;
5222     }
5223     tcg_temp_free_ptr(cpu_src);
5224     gen_helper_ftst(cpu_env, cpu_dest);
5225     tcg_temp_free_ptr(cpu_dest);
5226     return;
5227 undef:
5228     /* FIXME: Is this right for offset addressing modes?  */
5229     s->pc -= 2;
5230     disas_undef_fpu(env, s, insn);
5231 }
5232 
5233 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5234 {
5235     TCGv fpsr;
5236 
5237     c->g1 = 1;
5238     c->v2 = tcg_const_i32(0);
5239     c->g2 = 0;
5240     /* TODO: Raise BSUN exception.  */
5241     fpsr = tcg_temp_new();
5242     gen_load_fcr(s, fpsr, M68K_FPSR);
5243     switch (cond) {
5244     case 0:  /* False */
5245     case 16: /* Signaling False */
5246         c->v1 = c->v2;
5247         c->tcond = TCG_COND_NEVER;
5248         break;
5249     case 1:  /* EQual Z */
5250     case 17: /* Signaling EQual Z */
5251         c->v1 = tcg_temp_new();
5252         c->g1 = 0;
5253         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5254         c->tcond = TCG_COND_NE;
5255         break;
5256     case 2:  /* Ordered Greater Than !(A || Z || N) */
5257     case 18: /* Greater Than !(A || Z || N) */
5258         c->v1 = tcg_temp_new();
5259         c->g1 = 0;
5260         tcg_gen_andi_i32(c->v1, fpsr,
5261                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5262         c->tcond = TCG_COND_EQ;
5263         break;
5264     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5265     case 19: /* Greater than or Equal Z || !(A || N) */
5266         c->v1 = tcg_temp_new();
5267         c->g1 = 0;
5268         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5269         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5270         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5271         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5272         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5273         c->tcond = TCG_COND_NE;
5274         break;
5275     case 4:  /* Ordered Less Than !(!N || A || Z); */
5276     case 20: /* Less Than !(!N || A || Z); */
5277         c->v1 = tcg_temp_new();
5278         c->g1 = 0;
5279         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5280         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5281         c->tcond = TCG_COND_EQ;
5282         break;
5283     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5284     case 21: /* Less than or Equal Z || (N && !A) */
5285         c->v1 = tcg_temp_new();
5286         c->g1 = 0;
5287         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5288         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5289         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5290         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5291         c->tcond = TCG_COND_NE;
5292         break;
5293     case 6:  /* Ordered Greater or Less than !(A || Z) */
5294     case 22: /* Greater or Less than !(A || Z) */
5295         c->v1 = tcg_temp_new();
5296         c->g1 = 0;
5297         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5298         c->tcond = TCG_COND_EQ;
5299         break;
5300     case 7:  /* Ordered !A */
5301     case 23: /* Greater, Less or Equal !A */
5302         c->v1 = tcg_temp_new();
5303         c->g1 = 0;
5304         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5305         c->tcond = TCG_COND_EQ;
5306         break;
5307     case 8:  /* Unordered A */
5308     case 24: /* Not Greater, Less or Equal A */
5309         c->v1 = tcg_temp_new();
5310         c->g1 = 0;
5311         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5312         c->tcond = TCG_COND_NE;
5313         break;
5314     case 9:  /* Unordered or Equal A || Z */
5315     case 25: /* Not Greater or Less then A || Z */
5316         c->v1 = tcg_temp_new();
5317         c->g1 = 0;
5318         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5319         c->tcond = TCG_COND_NE;
5320         break;
5321     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5322     case 26: /* Not Less or Equal A || !(N || Z)) */
5323         c->v1 = tcg_temp_new();
5324         c->g1 = 0;
5325         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5326         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5327         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5328         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5329         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5330         c->tcond = TCG_COND_NE;
5331         break;
5332     case 11: /* Unordered or Greater or Equal A || Z || !N */
5333     case 27: /* Not Less Than A || Z || !N */
5334         c->v1 = tcg_temp_new();
5335         c->g1 = 0;
5336         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5337         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5338         c->tcond = TCG_COND_NE;
5339         break;
5340     case 12: /* Unordered or Less Than A || (N && !Z) */
5341     case 28: /* Not Greater than or Equal A || (N && !Z) */
5342         c->v1 = tcg_temp_new();
5343         c->g1 = 0;
5344         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5345         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5346         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5347         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5348         c->tcond = TCG_COND_NE;
5349         break;
5350     case 13: /* Unordered or Less or Equal A || Z || N */
5351     case 29: /* Not Greater Than A || Z || N */
5352         c->v1 = tcg_temp_new();
5353         c->g1 = 0;
5354         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5355         c->tcond = TCG_COND_NE;
5356         break;
5357     case 14: /* Not Equal !Z */
5358     case 30: /* Signaling Not Equal !Z */
5359         c->v1 = tcg_temp_new();
5360         c->g1 = 0;
5361         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5362         c->tcond = TCG_COND_EQ;
5363         break;
5364     case 15: /* True */
5365     case 31: /* Signaling True */
5366         c->v1 = c->v2;
5367         c->tcond = TCG_COND_ALWAYS;
5368         break;
5369     }
5370     tcg_temp_free(fpsr);
5371 }
5372 
5373 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5374 {
5375     DisasCompare c;
5376 
5377     gen_fcc_cond(&c, s, cond);
5378     update_cc_op(s);
5379     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5380     free_cond(&c);
5381 }
5382 
5383 DISAS_INSN(fbcc)
5384 {
5385     uint32_t offset;
5386     uint32_t base;
5387     TCGLabel *l1;
5388 
5389     base = s->pc;
5390     offset = (int16_t)read_im16(env, s);
5391     if (insn & (1 << 6)) {
5392         offset = (offset << 16) | read_im16(env, s);
5393     }
5394 
5395     l1 = gen_new_label();
5396     update_cc_op(s);
5397     gen_fjmpcc(s, insn & 0x3f, l1);
5398     gen_jmp_tb(s, 0, s->pc);
5399     gen_set_label(l1);
5400     gen_jmp_tb(s, 1, base + offset);
5401 }
5402 
5403 DISAS_INSN(fscc)
5404 {
5405     DisasCompare c;
5406     int cond;
5407     TCGv tmp;
5408     uint16_t ext;
5409 
5410     ext = read_im16(env, s);
5411     cond = ext & 0x3f;
5412     gen_fcc_cond(&c, s, cond);
5413 
5414     tmp = tcg_temp_new();
5415     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5416     free_cond(&c);
5417 
5418     tcg_gen_neg_i32(tmp, tmp);
5419     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5420     tcg_temp_free(tmp);
5421 }
5422 
5423 #if defined(CONFIG_SOFTMMU)
5424 DISAS_INSN(frestore)
5425 {
5426     TCGv addr;
5427 
5428     if (IS_USER(s)) {
5429         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5430         return;
5431     }
5432     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5433         SRC_EA(env, addr, OS_LONG, 0, NULL);
5434         /* FIXME: check the state frame */
5435     } else {
5436         disas_undef(env, s, insn);
5437     }
5438 }
5439 
5440 DISAS_INSN(fsave)
5441 {
5442     if (IS_USER(s)) {
5443         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5444         return;
5445     }
5446 
5447     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5448         /* always write IDLE */
5449         TCGv idle = tcg_const_i32(0x41000000);
5450         DEST_EA(env, insn, OS_LONG, idle, NULL);
5451         tcg_temp_free(idle);
5452     } else {
5453         disas_undef(env, s, insn);
5454     }
5455 }
5456 #endif
5457 
5458 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5459 {
5460     TCGv tmp = tcg_temp_new();
5461     if (s->env->macsr & MACSR_FI) {
5462         if (upper)
5463             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5464         else
5465             tcg_gen_shli_i32(tmp, val, 16);
5466     } else if (s->env->macsr & MACSR_SU) {
5467         if (upper)
5468             tcg_gen_sari_i32(tmp, val, 16);
5469         else
5470             tcg_gen_ext16s_i32(tmp, val);
5471     } else {
5472         if (upper)
5473             tcg_gen_shri_i32(tmp, val, 16);
5474         else
5475             tcg_gen_ext16u_i32(tmp, val);
5476     }
5477     return tmp;
5478 }
5479 
5480 static void gen_mac_clear_flags(void)
5481 {
5482     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5483                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5484 }
5485 
5486 DISAS_INSN(mac)
5487 {
5488     TCGv rx;
5489     TCGv ry;
5490     uint16_t ext;
5491     int acc;
5492     TCGv tmp;
5493     TCGv addr;
5494     TCGv loadval;
5495     int dual;
5496     TCGv saved_flags;
5497 
5498     if (!s->done_mac) {
5499         s->mactmp = tcg_temp_new_i64();
5500         s->done_mac = 1;
5501     }
5502 
5503     ext = read_im16(env, s);
5504 
5505     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5506     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5507     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5508         disas_undef(env, s, insn);
5509         return;
5510     }
5511     if (insn & 0x30) {
5512         /* MAC with load.  */
5513         tmp = gen_lea(env, s, insn, OS_LONG);
5514         addr = tcg_temp_new();
5515         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5516         /* Load the value now to ensure correct exception behavior.
5517            Perform writeback after reading the MAC inputs.  */
5518         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5519 
5520         acc ^= 1;
5521         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5522         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5523     } else {
5524         loadval = addr = NULL_QREG;
5525         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5526         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5527     }
5528 
5529     gen_mac_clear_flags();
5530 #if 0
5531     l1 = -1;
5532     /* Disabled because conditional branches clobber temporary vars.  */
5533     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5534         /* Skip the multiply if we know we will ignore it.  */
5535         l1 = gen_new_label();
5536         tmp = tcg_temp_new();
5537         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5538         gen_op_jmp_nz32(tmp, l1);
5539     }
5540 #endif
5541 
5542     if ((ext & 0x0800) == 0) {
5543         /* Word.  */
5544         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5545         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5546     }
5547     if (s->env->macsr & MACSR_FI) {
5548         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5549     } else {
5550         if (s->env->macsr & MACSR_SU)
5551             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5552         else
5553             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5554         switch ((ext >> 9) & 3) {
5555         case 1:
5556             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5557             break;
5558         case 3:
5559             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5560             break;
5561         }
5562     }
5563 
5564     if (dual) {
5565         /* Save the overflow flag from the multiply.  */
5566         saved_flags = tcg_temp_new();
5567         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5568     } else {
5569         saved_flags = NULL_QREG;
5570     }
5571 
5572 #if 0
5573     /* Disabled because conditional branches clobber temporary vars.  */
5574     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5575         /* Skip the accumulate if the value is already saturated.  */
5576         l1 = gen_new_label();
5577         tmp = tcg_temp_new();
5578         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5579         gen_op_jmp_nz32(tmp, l1);
5580     }
5581 #endif
5582 
5583     if (insn & 0x100)
5584         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5585     else
5586         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5587 
5588     if (s->env->macsr & MACSR_FI)
5589         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5590     else if (s->env->macsr & MACSR_SU)
5591         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5592     else
5593         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5594 
5595 #if 0
5596     /* Disabled because conditional branches clobber temporary vars.  */
5597     if (l1 != -1)
5598         gen_set_label(l1);
5599 #endif
5600 
5601     if (dual) {
5602         /* Dual accumulate variant.  */
5603         acc = (ext >> 2) & 3;
5604         /* Restore the overflow flag from the multiplier.  */
5605         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5606 #if 0
5607         /* Disabled because conditional branches clobber temporary vars.  */
5608         if ((s->env->macsr & MACSR_OMC) != 0) {
5609             /* Skip the accumulate if the value is already saturated.  */
5610             l1 = gen_new_label();
5611             tmp = tcg_temp_new();
5612             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5613             gen_op_jmp_nz32(tmp, l1);
5614         }
5615 #endif
5616         if (ext & 2)
5617             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5618         else
5619             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5620         if (s->env->macsr & MACSR_FI)
5621             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5622         else if (s->env->macsr & MACSR_SU)
5623             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5624         else
5625             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5626 #if 0
5627         /* Disabled because conditional branches clobber temporary vars.  */
5628         if (l1 != -1)
5629             gen_set_label(l1);
5630 #endif
5631     }
5632     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5633 
5634     if (insn & 0x30) {
5635         TCGv rw;
5636         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5637         tcg_gen_mov_i32(rw, loadval);
5638         /* FIXME: Should address writeback happen with the masked or
5639            unmasked value?  */
5640         switch ((insn >> 3) & 7) {
5641         case 3: /* Post-increment.  */
5642             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5643             break;
5644         case 4: /* Pre-decrement.  */
5645             tcg_gen_mov_i32(AREG(insn, 0), addr);
5646         }
5647         tcg_temp_free(loadval);
5648     }
5649 }
5650 
5651 DISAS_INSN(from_mac)
5652 {
5653     TCGv rx;
5654     TCGv_i64 acc;
5655     int accnum;
5656 
5657     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5658     accnum = (insn >> 9) & 3;
5659     acc = MACREG(accnum);
5660     if (s->env->macsr & MACSR_FI) {
5661         gen_helper_get_macf(rx, cpu_env, acc);
5662     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5663         tcg_gen_extrl_i64_i32(rx, acc);
5664     } else if (s->env->macsr & MACSR_SU) {
5665         gen_helper_get_macs(rx, acc);
5666     } else {
5667         gen_helper_get_macu(rx, acc);
5668     }
5669     if (insn & 0x40) {
5670         tcg_gen_movi_i64(acc, 0);
5671         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5672     }
5673 }
5674 
5675 DISAS_INSN(move_mac)
5676 {
5677     /* FIXME: This can be done without a helper.  */
5678     int src;
5679     TCGv dest;
5680     src = insn & 3;
5681     dest = tcg_const_i32((insn >> 9) & 3);
5682     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5683     gen_mac_clear_flags();
5684     gen_helper_mac_set_flags(cpu_env, dest);
5685 }
5686 
5687 DISAS_INSN(from_macsr)
5688 {
5689     TCGv reg;
5690 
5691     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5692     tcg_gen_mov_i32(reg, QREG_MACSR);
5693 }
5694 
5695 DISAS_INSN(from_mask)
5696 {
5697     TCGv reg;
5698     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5699     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5700 }
5701 
5702 DISAS_INSN(from_mext)
5703 {
5704     TCGv reg;
5705     TCGv acc;
5706     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5707     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5708     if (s->env->macsr & MACSR_FI)
5709         gen_helper_get_mac_extf(reg, cpu_env, acc);
5710     else
5711         gen_helper_get_mac_exti(reg, cpu_env, acc);
5712 }
5713 
5714 DISAS_INSN(macsr_to_ccr)
5715 {
5716     TCGv tmp = tcg_temp_new();
5717     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5718     gen_helper_set_sr(cpu_env, tmp);
5719     tcg_temp_free(tmp);
5720     set_cc_op(s, CC_OP_FLAGS);
5721 }
5722 
5723 DISAS_INSN(to_mac)
5724 {
5725     TCGv_i64 acc;
5726     TCGv val;
5727     int accnum;
5728     accnum = (insn >> 9) & 3;
5729     acc = MACREG(accnum);
5730     SRC_EA(env, val, OS_LONG, 0, NULL);
5731     if (s->env->macsr & MACSR_FI) {
5732         tcg_gen_ext_i32_i64(acc, val);
5733         tcg_gen_shli_i64(acc, acc, 8);
5734     } else if (s->env->macsr & MACSR_SU) {
5735         tcg_gen_ext_i32_i64(acc, val);
5736     } else {
5737         tcg_gen_extu_i32_i64(acc, val);
5738     }
5739     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5740     gen_mac_clear_flags();
5741     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5742 }
5743 
5744 DISAS_INSN(to_macsr)
5745 {
5746     TCGv val;
5747     SRC_EA(env, val, OS_LONG, 0, NULL);
5748     gen_helper_set_macsr(cpu_env, val);
5749     gen_exit_tb(s);
5750 }
5751 
5752 DISAS_INSN(to_mask)
5753 {
5754     TCGv val;
5755     SRC_EA(env, val, OS_LONG, 0, NULL);
5756     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5757 }
5758 
5759 DISAS_INSN(to_mext)
5760 {
5761     TCGv val;
5762     TCGv acc;
5763     SRC_EA(env, val, OS_LONG, 0, NULL);
5764     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5765     if (s->env->macsr & MACSR_FI)
5766         gen_helper_set_mac_extf(cpu_env, val, acc);
5767     else if (s->env->macsr & MACSR_SU)
5768         gen_helper_set_mac_exts(cpu_env, val, acc);
5769     else
5770         gen_helper_set_mac_extu(cpu_env, val, acc);
5771 }
5772 
5773 static disas_proc opcode_table[65536];
5774 
5775 static void
5776 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5777 {
5778   int i;
5779   int from;
5780   int to;
5781 
5782   /* Sanity check.  All set bits must be included in the mask.  */
5783   if (opcode & ~mask) {
5784       fprintf(stderr,
5785               "qemu internal error: bogus opcode definition %04x/%04x\n",
5786               opcode, mask);
5787       abort();
5788   }
5789   /* This could probably be cleverer.  For now just optimize the case where
5790      the top bits are known.  */
5791   /* Find the first zero bit in the mask.  */
5792   i = 0x8000;
5793   while ((i & mask) != 0)
5794       i >>= 1;
5795   /* Iterate over all combinations of this and lower bits.  */
5796   if (i == 0)
5797       i = 1;
5798   else
5799       i <<= 1;
5800   from = opcode & ~(i - 1);
5801   to = from + i;
5802   for (i = from; i < to; i++) {
5803       if ((i & mask) == opcode)
5804           opcode_table[i] = proc;
5805   }
5806 }
5807 
5808 /* Register m68k opcode handlers.  Order is important.
5809    Later insn override earlier ones.  */
5810 void register_m68k_insns (CPUM68KState *env)
5811 {
5812     /* Build the opcode table only once to avoid
5813        multithreading issues. */
5814     if (opcode_table[0] != NULL) {
5815         return;
5816     }
5817 
5818     /* use BASE() for instruction available
5819      * for CF_ISA_A and M68000.
5820      */
5821 #define BASE(name, opcode, mask) \
5822     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5823 #define INSN(name, opcode, mask, feature) do { \
5824     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5825         BASE(name, opcode, mask); \
5826     } while(0)
5827     BASE(undef,     0000, 0000);
5828     INSN(arith_im,  0080, fff8, CF_ISA_A);
5829     INSN(arith_im,  0000, ff00, M68000);
5830     INSN(chk2,      00c0, f9c0, CHK2);
5831     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5832     BASE(bitop_reg, 0100, f1c0);
5833     BASE(bitop_reg, 0140, f1c0);
5834     BASE(bitop_reg, 0180, f1c0);
5835     BASE(bitop_reg, 01c0, f1c0);
5836     INSN(movep,     0108, f138, MOVEP);
5837     INSN(arith_im,  0280, fff8, CF_ISA_A);
5838     INSN(arith_im,  0200, ff00, M68000);
5839     INSN(undef,     02c0, ffc0, M68000);
5840     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5841     INSN(arith_im,  0480, fff8, CF_ISA_A);
5842     INSN(arith_im,  0400, ff00, M68000);
5843     INSN(undef,     04c0, ffc0, M68000);
5844     INSN(arith_im,  0600, ff00, M68000);
5845     INSN(undef,     06c0, ffc0, M68000);
5846     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5847     INSN(arith_im,  0680, fff8, CF_ISA_A);
5848     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5849     INSN(arith_im,  0c00, ff00, M68000);
5850     BASE(bitop_im,  0800, ffc0);
5851     BASE(bitop_im,  0840, ffc0);
5852     BASE(bitop_im,  0880, ffc0);
5853     BASE(bitop_im,  08c0, ffc0);
5854     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5855     INSN(arith_im,  0a00, ff00, M68000);
5856 #if defined(CONFIG_SOFTMMU)
5857     INSN(moves,     0e00, ff00, M68000);
5858 #endif
5859     INSN(cas,       0ac0, ffc0, CAS);
5860     INSN(cas,       0cc0, ffc0, CAS);
5861     INSN(cas,       0ec0, ffc0, CAS);
5862     INSN(cas2w,     0cfc, ffff, CAS);
5863     INSN(cas2l,     0efc, ffff, CAS);
5864     BASE(move,      1000, f000);
5865     BASE(move,      2000, f000);
5866     BASE(move,      3000, f000);
5867     INSN(chk,       4000, f040, M68000);
5868     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5869     INSN(negx,      4080, fff8, CF_ISA_A);
5870     INSN(negx,      4000, ff00, M68000);
5871     INSN(undef,     40c0, ffc0, M68000);
5872     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5873     INSN(move_from_sr, 40c0, ffc0, M68000);
5874     BASE(lea,       41c0, f1c0);
5875     BASE(clr,       4200, ff00);
5876     BASE(undef,     42c0, ffc0);
5877     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5878     INSN(move_from_ccr, 42c0, ffc0, M68000);
5879     INSN(neg,       4480, fff8, CF_ISA_A);
5880     INSN(neg,       4400, ff00, M68000);
5881     INSN(undef,     44c0, ffc0, M68000);
5882     BASE(move_to_ccr, 44c0, ffc0);
5883     INSN(not,       4680, fff8, CF_ISA_A);
5884     INSN(not,       4600, ff00, M68000);
5885 #if defined(CONFIG_SOFTMMU)
5886     BASE(move_to_sr, 46c0, ffc0);
5887 #endif
5888     INSN(nbcd,      4800, ffc0, M68000);
5889     INSN(linkl,     4808, fff8, M68000);
5890     BASE(pea,       4840, ffc0);
5891     BASE(swap,      4840, fff8);
5892     INSN(bkpt,      4848, fff8, BKPT);
5893     INSN(movem,     48d0, fbf8, CF_ISA_A);
5894     INSN(movem,     48e8, fbf8, CF_ISA_A);
5895     INSN(movem,     4880, fb80, M68000);
5896     BASE(ext,       4880, fff8);
5897     BASE(ext,       48c0, fff8);
5898     BASE(ext,       49c0, fff8);
5899     BASE(tst,       4a00, ff00);
5900     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5901     INSN(tas,       4ac0, ffc0, M68000);
5902 #if defined(CONFIG_SOFTMMU)
5903     INSN(halt,      4ac8, ffff, CF_ISA_A);
5904 #endif
5905     INSN(pulse,     4acc, ffff, CF_ISA_A);
5906     BASE(illegal,   4afc, ffff);
5907     INSN(mull,      4c00, ffc0, CF_ISA_A);
5908     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5909     INSN(divl,      4c40, ffc0, CF_ISA_A);
5910     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5911     INSN(sats,      4c80, fff8, CF_ISA_B);
5912     BASE(trap,      4e40, fff0);
5913     BASE(link,      4e50, fff8);
5914     BASE(unlk,      4e58, fff8);
5915 #if defined(CONFIG_SOFTMMU)
5916     INSN(move_to_usp, 4e60, fff8, USP);
5917     INSN(move_from_usp, 4e68, fff8, USP);
5918     INSN(reset,     4e70, ffff, M68000);
5919     BASE(stop,      4e72, ffff);
5920     BASE(rte,       4e73, ffff);
5921     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5922     INSN(m68k_movec, 4e7a, fffe, M68000);
5923 #endif
5924     BASE(nop,       4e71, ffff);
5925     INSN(rtd,       4e74, ffff, RTD);
5926     BASE(rts,       4e75, ffff);
5927     BASE(jump,      4e80, ffc0);
5928     BASE(jump,      4ec0, ffc0);
5929     INSN(addsubq,   5000, f080, M68000);
5930     BASE(addsubq,   5080, f0c0);
5931     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5932     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
5933     INSN(dbcc,      50c8, f0f8, M68000);
5934     INSN(tpf,       51f8, fff8, CF_ISA_A);
5935 
5936     /* Branch instructions.  */
5937     BASE(branch,    6000, f000);
5938     /* Disable long branch instructions, then add back the ones we want.  */
5939     BASE(undef,     60ff, f0ff); /* All long branches.  */
5940     INSN(branch,    60ff, f0ff, CF_ISA_B);
5941     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5942     INSN(branch,    60ff, ffff, BRAL);
5943     INSN(branch,    60ff, f0ff, BCCL);
5944 
5945     BASE(moveq,     7000, f100);
5946     INSN(mvzs,      7100, f100, CF_ISA_B);
5947     BASE(or,        8000, f000);
5948     BASE(divw,      80c0, f0c0);
5949     INSN(sbcd_reg,  8100, f1f8, M68000);
5950     INSN(sbcd_mem,  8108, f1f8, M68000);
5951     BASE(addsub,    9000, f000);
5952     INSN(undef,     90c0, f0c0, CF_ISA_A);
5953     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5954     INSN(subx_reg,  9100, f138, M68000);
5955     INSN(subx_mem,  9108, f138, M68000);
5956     INSN(suba,      91c0, f1c0, CF_ISA_A);
5957     INSN(suba,      90c0, f0c0, M68000);
5958 
5959     BASE(undef_mac, a000, f000);
5960     INSN(mac,       a000, f100, CF_EMAC);
5961     INSN(from_mac,  a180, f9b0, CF_EMAC);
5962     INSN(move_mac,  a110, f9fc, CF_EMAC);
5963     INSN(from_macsr,a980, f9f0, CF_EMAC);
5964     INSN(from_mask, ad80, fff0, CF_EMAC);
5965     INSN(from_mext, ab80, fbf0, CF_EMAC);
5966     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5967     INSN(to_mac,    a100, f9c0, CF_EMAC);
5968     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5969     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5970     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5971 
5972     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5973     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5974     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5975     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5976     INSN(cmp,       b080, f1c0, CF_ISA_A);
5977     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5978     INSN(cmp,       b000, f100, M68000);
5979     INSN(eor,       b100, f100, M68000);
5980     INSN(cmpm,      b108, f138, M68000);
5981     INSN(cmpa,      b0c0, f0c0, M68000);
5982     INSN(eor,       b180, f1c0, CF_ISA_A);
5983     BASE(and,       c000, f000);
5984     INSN(exg_dd,    c140, f1f8, M68000);
5985     INSN(exg_aa,    c148, f1f8, M68000);
5986     INSN(exg_da,    c188, f1f8, M68000);
5987     BASE(mulw,      c0c0, f0c0);
5988     INSN(abcd_reg,  c100, f1f8, M68000);
5989     INSN(abcd_mem,  c108, f1f8, M68000);
5990     BASE(addsub,    d000, f000);
5991     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5992     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5993     INSN(addx_reg,  d100, f138, M68000);
5994     INSN(addx_mem,  d108, f138, M68000);
5995     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5996     INSN(adda,      d0c0, f0c0, M68000);
5997     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5998     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5999     INSN(shift8_im, e000, f0f0, M68000);
6000     INSN(shift16_im, e040, f0f0, M68000);
6001     INSN(shift_im,  e080, f0f0, M68000);
6002     INSN(shift8_reg, e020, f0f0, M68000);
6003     INSN(shift16_reg, e060, f0f0, M68000);
6004     INSN(shift_reg, e0a0, f0f0, M68000);
6005     INSN(shift_mem, e0c0, fcc0, M68000);
6006     INSN(rotate_im, e090, f0f0, M68000);
6007     INSN(rotate8_im, e010, f0f0, M68000);
6008     INSN(rotate16_im, e050, f0f0, M68000);
6009     INSN(rotate_reg, e0b0, f0f0, M68000);
6010     INSN(rotate8_reg, e030, f0f0, M68000);
6011     INSN(rotate16_reg, e070, f0f0, M68000);
6012     INSN(rotate_mem, e4c0, fcc0, M68000);
6013     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
6014     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6015     INSN(bfins_mem, efc0, ffc0, BITFIELD);
6016     INSN(bfins_reg, efc0, fff8, BITFIELD);
6017     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
6018     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
6019     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
6020     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
6021     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
6022     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
6023     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
6024     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
6025     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
6026     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
6027     BASE(undef_fpu, f000, f000);
6028     INSN(fpu,       f200, ffc0, CF_FPU);
6029     INSN(fbcc,      f280, ffc0, CF_FPU);
6030     INSN(fpu,       f200, ffc0, FPU);
6031     INSN(fscc,      f240, ffc0, FPU);
6032     INSN(fbcc,      f280, ff80, FPU);
6033 #if defined(CONFIG_SOFTMMU)
6034     INSN(frestore,  f340, ffc0, CF_FPU);
6035     INSN(fsave,     f300, ffc0, CF_FPU);
6036     INSN(frestore,  f340, ffc0, FPU);
6037     INSN(fsave,     f300, ffc0, FPU);
6038     INSN(intouch,   f340, ffc0, CF_ISA_A);
6039     INSN(cpushl,    f428, ff38, CF_ISA_A);
6040     INSN(cpush,     f420, ff20, M68040);
6041     INSN(cinv,      f400, ff20, M68040);
6042     INSN(pflush,    f500, ffe0, M68040);
6043     INSN(ptest,     f548, ffd8, M68040);
6044     INSN(wddata,    fb00, ff00, CF_ISA_A);
6045     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6046 #endif
6047     INSN(move16_mem, f600, ffe0, M68040);
6048     INSN(move16_reg, f620, fff8, M68040);
6049 #undef INSN
6050 }
6051 
6052 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6053 {
6054     DisasContext *dc = container_of(dcbase, DisasContext, base);
6055     CPUM68KState *env = cpu->env_ptr;
6056 
6057     dc->env = env;
6058     dc->pc = dc->base.pc_first;
6059     dc->cc_op = CC_OP_DYNAMIC;
6060     dc->cc_op_synced = 1;
6061     dc->done_mac = 0;
6062     dc->writeback_mask = 0;
6063     init_release_array(dc);
6064 }
6065 
6066 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6067 {
6068 }
6069 
6070 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6071 {
6072     DisasContext *dc = container_of(dcbase, DisasContext, base);
6073     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6074 }
6075 
6076 static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
6077                                      const CPUBreakpoint *bp)
6078 {
6079     DisasContext *dc = container_of(dcbase, DisasContext, base);
6080 
6081     gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
6082     /* The address covered by the breakpoint must be included in
6083        [tb->pc, tb->pc + tb->size) in order to for it to be
6084        properly cleared -- thus we increment the PC here so that
6085        the logic setting tb->size below does the right thing.  */
6086     dc->base.pc_next += 2;
6087 
6088     return true;
6089 }
6090 
6091 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6092 {
6093     DisasContext *dc = container_of(dcbase, DisasContext, base);
6094     CPUM68KState *env = cpu->env_ptr;
6095     uint16_t insn = read_im16(env, dc);
6096 
6097     opcode_table[insn](env, dc, insn);
6098     do_writebacks(dc);
6099     do_release(dc);
6100 
6101     dc->base.pc_next = dc->pc;
6102 
6103     if (dc->base.is_jmp == DISAS_NEXT) {
6104         /* Stop translation when the next insn might touch a new page.
6105          * This ensures that prefetch aborts at the right place.
6106          *
6107          * We cannot determine the size of the next insn without
6108          * completely decoding it.  However, the maximum insn size
6109          * is 32 bytes, so end if we do not have that much remaining.
6110          * This may produce several small TBs at the end of each page,
6111          * but they will all be linked with goto_tb.
6112          *
6113          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6114          * smaller than MC68020's.
6115          */
6116         target_ulong start_page_offset
6117             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6118 
6119         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6120             dc->base.is_jmp = DISAS_TOO_MANY;
6121         }
6122     }
6123 }
6124 
6125 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6126 {
6127     DisasContext *dc = container_of(dcbase, DisasContext, base);
6128 
6129     if (dc->base.is_jmp == DISAS_NORETURN) {
6130         return;
6131     }
6132     if (dc->base.singlestep_enabled) {
6133         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
6134         return;
6135     }
6136 
6137     switch (dc->base.is_jmp) {
6138     case DISAS_TOO_MANY:
6139         update_cc_op(dc);
6140         gen_jmp_tb(dc, 0, dc->pc);
6141         break;
6142     case DISAS_JUMP:
6143         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6144         tcg_gen_lookup_and_goto_ptr();
6145         break;
6146     case DISAS_EXIT:
6147         /* We updated CC_OP and PC in gen_exit_tb, but also modified
6148            other state that may require returning to the main loop.  */
6149         tcg_gen_exit_tb(NULL, 0);
6150         break;
6151     default:
6152         g_assert_not_reached();
6153     }
6154 }
6155 
6156 static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
6157 {
6158     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
6159     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
6160 }
6161 
6162 static const TranslatorOps m68k_tr_ops = {
6163     .init_disas_context = m68k_tr_init_disas_context,
6164     .tb_start           = m68k_tr_tb_start,
6165     .insn_start         = m68k_tr_insn_start,
6166     .breakpoint_check   = m68k_tr_breakpoint_check,
6167     .translate_insn     = m68k_tr_translate_insn,
6168     .tb_stop            = m68k_tr_tb_stop,
6169     .disas_log          = m68k_tr_disas_log,
6170 };
6171 
6172 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
6173 {
6174     DisasContext dc;
6175     translator_loop(&m68k_tr_ops, &dc.base, cpu, tb);
6176 }
6177 
6178 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6179 {
6180     floatx80 a = { .high = high, .low = low };
6181     union {
6182         float64 f64;
6183         double d;
6184     } u;
6185 
6186     u.f64 = floatx80_to_float64(a, &env->fp_status);
6187     return u.d;
6188 }
6189 
6190 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
6191                          int flags)
6192 {
6193     M68kCPU *cpu = M68K_CPU(cs);
6194     CPUM68KState *env = &cpu->env;
6195     int i;
6196     uint16_t sr;
6197     for (i = 0; i < 8; i++) {
6198         cpu_fprintf(f, "D%d = %08x   A%d = %08x   "
6199                     "F%d = %04x %016"PRIx64"  (%12g)\n",
6200                     i, env->dregs[i], i, env->aregs[i],
6201                     i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6202                     floatx80_to_double(env, env->fregs[i].l.upper,
6203                                        env->fregs[i].l.lower));
6204     }
6205     cpu_fprintf (f, "PC = %08x   ", env->pc);
6206     sr = env->sr | cpu_m68k_get_ccr(env);
6207     cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6208                 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6209                 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6210                 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6211                 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6212                 (sr & CCF_C) ? 'C' : '-');
6213     cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6214                 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6215                 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6216                 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6217                 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6218     cpu_fprintf(f, "\n                                "
6219                    "FPCR =     %04x ", env->fpcr);
6220     switch (env->fpcr & FPCR_PREC_MASK) {
6221     case FPCR_PREC_X:
6222         cpu_fprintf(f, "X ");
6223         break;
6224     case FPCR_PREC_S:
6225         cpu_fprintf(f, "S ");
6226         break;
6227     case FPCR_PREC_D:
6228         cpu_fprintf(f, "D ");
6229         break;
6230     }
6231     switch (env->fpcr & FPCR_RND_MASK) {
6232     case FPCR_RND_N:
6233         cpu_fprintf(f, "RN ");
6234         break;
6235     case FPCR_RND_Z:
6236         cpu_fprintf(f, "RZ ");
6237         break;
6238     case FPCR_RND_M:
6239         cpu_fprintf(f, "RM ");
6240         break;
6241     case FPCR_RND_P:
6242         cpu_fprintf(f, "RP ");
6243         break;
6244     }
6245     cpu_fprintf(f, "\n");
6246 #ifdef CONFIG_SOFTMMU
6247     cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6248                env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6249                env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6250                env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6251     cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6252     cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6253     cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6254                 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6255     cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6256                 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6257                 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6258     cpu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6259                 env->mmu.mmusr, env->mmu.ar);
6260 #endif
6261 }
6262 
6263 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6264                           target_ulong *data)
6265 {
6266     int cc_op = data[1];
6267     env->pc = data[0];
6268     if (cc_op != CC_OP_DYNAMIC) {
6269         env->cc_op = cc_op;
6270     }
6271 }
6272