xref: /openbmc/qemu/target/m68k/translate.c (revision 646c5478)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 //#define DEBUG_DISPATCH 1
37 
38 /* Fake floating point.  */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
42 
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
46 #include "qregs.def"
47 #undef DEFO32
48 #undef DEFO64
49 #undef DEFF64
50 
51 static TCGv_i32 cpu_halted;
52 static TCGv_i32 cpu_exception_index;
53 
54 static TCGv_env cpu_env;
55 
56 static char cpu_reg_names[3*8*3 + 5*4];
57 static TCGv cpu_dregs[8];
58 static TCGv cpu_aregs[8];
59 static TCGv_i64 cpu_fregs[8];
60 static TCGv_i64 cpu_macc[4];
61 
62 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
63 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
64 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
65 #define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
66 #define MACREG(acc)     cpu_macc[acc]
67 #define QREG_SP         get_areg(s, 7)
68 
69 static TCGv NULL_QREG;
70 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
71 /* Used to distinguish stores from bad addressing modes.  */
72 static TCGv store_dummy;
73 
74 #include "exec/gen-icount.h"
75 
76 void m68k_tcg_init(void)
77 {
78     char *p;
79     int i;
80 
81     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
82     tcg_ctx.tcg_env = cpu_env;
83 
84 #define DEFO32(name, offset) \
85     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
86         offsetof(CPUM68KState, offset), #name);
87 #define DEFO64(name, offset) \
88     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
89         offsetof(CPUM68KState, offset), #name);
90 #define DEFF64(name, offset) DEFO64(name, offset)
91 #include "qregs.def"
92 #undef DEFO32
93 #undef DEFO64
94 #undef DEFF64
95 
96     cpu_halted = tcg_global_mem_new_i32(cpu_env,
97                                         -offsetof(M68kCPU, env) +
98                                         offsetof(CPUState, halted), "HALTED");
99     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
100                                                  -offsetof(M68kCPU, env) +
101                                                  offsetof(CPUState, exception_index),
102                                                  "EXCEPTION");
103 
104     p = cpu_reg_names;
105     for (i = 0; i < 8; i++) {
106         sprintf(p, "D%d", i);
107         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
108                                           offsetof(CPUM68KState, dregs[i]), p);
109         p += 3;
110         sprintf(p, "A%d", i);
111         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
112                                           offsetof(CPUM68KState, aregs[i]), p);
113         p += 3;
114         sprintf(p, "F%d", i);
115         cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
116                                           offsetof(CPUM68KState, fregs[i]), p);
117         p += 3;
118     }
119     for (i = 0; i < 4; i++) {
120         sprintf(p, "ACC%d", i);
121         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
122                                          offsetof(CPUM68KState, macc[i]), p);
123         p += 5;
124     }
125 
126     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
127     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
128 }
129 
130 /* internal defines */
131 typedef struct DisasContext {
132     CPUM68KState *env;
133     target_ulong insn_pc; /* Start of the current instruction.  */
134     target_ulong pc;
135     int is_jmp;
136     CCOp cc_op; /* Current CC operation */
137     int cc_op_synced;
138     int user;
139     uint32_t fpcr;
140     struct TranslationBlock *tb;
141     int singlestep_enabled;
142     TCGv_i64 mactmp;
143     int done_mac;
144     int writeback_mask;
145     TCGv writeback[8];
146 } DisasContext;
147 
148 static TCGv get_areg(DisasContext *s, unsigned regno)
149 {
150     if (s->writeback_mask & (1 << regno)) {
151         return s->writeback[regno];
152     } else {
153         return cpu_aregs[regno];
154     }
155 }
156 
157 static void delay_set_areg(DisasContext *s, unsigned regno,
158                            TCGv val, bool give_temp)
159 {
160     if (s->writeback_mask & (1 << regno)) {
161         if (give_temp) {
162             tcg_temp_free(s->writeback[regno]);
163             s->writeback[regno] = val;
164         } else {
165             tcg_gen_mov_i32(s->writeback[regno], val);
166         }
167     } else {
168         s->writeback_mask |= 1 << regno;
169         if (give_temp) {
170             s->writeback[regno] = val;
171         } else {
172             TCGv tmp = tcg_temp_new();
173             s->writeback[regno] = tmp;
174             tcg_gen_mov_i32(tmp, val);
175         }
176     }
177 }
178 
179 static void do_writebacks(DisasContext *s)
180 {
181     unsigned mask = s->writeback_mask;
182     if (mask) {
183         s->writeback_mask = 0;
184         do {
185             unsigned regno = ctz32(mask);
186             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
187             tcg_temp_free(s->writeback[regno]);
188             mask &= mask - 1;
189         } while (mask);
190     }
191 }
192 
193 #define DISAS_JUMP_NEXT 4
194 
195 #if defined(CONFIG_USER_ONLY)
196 #define IS_USER(s) 1
197 #else
198 #define IS_USER(s) s->user
199 #endif
200 
201 /* XXX: move that elsewhere */
202 /* ??? Fix exceptions.  */
203 static void *gen_throws_exception;
204 #define gen_last_qop NULL
205 
206 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
207 
208 #ifdef DEBUG_DISPATCH
209 #define DISAS_INSN(name)                                                \
210     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
211                                   uint16_t insn);                       \
212     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
213                              uint16_t insn)                             \
214     {                                                                   \
215         qemu_log("Dispatch " #name "\n");                               \
216         real_disas_##name(env, s, insn);                                \
217     }                                                                   \
218     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
219                                   uint16_t insn)
220 #else
221 #define DISAS_INSN(name)                                                \
222     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
223                              uint16_t insn)
224 #endif
225 
226 static const uint8_t cc_op_live[CC_OP_NB] = {
227     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
228     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
229     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
230     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
231     [CC_OP_LOGIC] = CCF_X | CCF_N
232 };
233 
234 static void set_cc_op(DisasContext *s, CCOp op)
235 {
236     CCOp old_op = s->cc_op;
237     int dead;
238 
239     if (old_op == op) {
240         return;
241     }
242     s->cc_op = op;
243     s->cc_op_synced = 0;
244 
245     /* Discard CC computation that will no longer be used.
246        Note that X and N are never dead.  */
247     dead = cc_op_live[old_op] & ~cc_op_live[op];
248     if (dead & CCF_C) {
249         tcg_gen_discard_i32(QREG_CC_C);
250     }
251     if (dead & CCF_Z) {
252         tcg_gen_discard_i32(QREG_CC_Z);
253     }
254     if (dead & CCF_V) {
255         tcg_gen_discard_i32(QREG_CC_V);
256     }
257 }
258 
259 /* Update the CPU env CC_OP state.  */
260 static void update_cc_op(DisasContext *s)
261 {
262     if (!s->cc_op_synced) {
263         s->cc_op_synced = 1;
264         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
265     }
266 }
267 
268 /* Generate a load from the specified address.  Narrow values are
269    sign extended to full register width.  */
270 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
271 {
272     TCGv tmp;
273     int index = IS_USER(s);
274     tmp = tcg_temp_new_i32();
275     switch(opsize) {
276     case OS_BYTE:
277         if (sign)
278             tcg_gen_qemu_ld8s(tmp, addr, index);
279         else
280             tcg_gen_qemu_ld8u(tmp, addr, index);
281         break;
282     case OS_WORD:
283         if (sign)
284             tcg_gen_qemu_ld16s(tmp, addr, index);
285         else
286             tcg_gen_qemu_ld16u(tmp, addr, index);
287         break;
288     case OS_LONG:
289     case OS_SINGLE:
290         tcg_gen_qemu_ld32u(tmp, addr, index);
291         break;
292     default:
293         g_assert_not_reached();
294     }
295     gen_throws_exception = gen_last_qop;
296     return tmp;
297 }
298 
299 static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
300 {
301     TCGv_i64 tmp;
302     int index = IS_USER(s);
303     tmp = tcg_temp_new_i64();
304     tcg_gen_qemu_ldf64(tmp, addr, index);
305     gen_throws_exception = gen_last_qop;
306     return tmp;
307 }
308 
309 /* Generate a store.  */
310 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
311 {
312     int index = IS_USER(s);
313     switch(opsize) {
314     case OS_BYTE:
315         tcg_gen_qemu_st8(val, addr, index);
316         break;
317     case OS_WORD:
318         tcg_gen_qemu_st16(val, addr, index);
319         break;
320     case OS_LONG:
321     case OS_SINGLE:
322         tcg_gen_qemu_st32(val, addr, index);
323         break;
324     default:
325         g_assert_not_reached();
326     }
327     gen_throws_exception = gen_last_qop;
328 }
329 
330 static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
331 {
332     int index = IS_USER(s);
333     tcg_gen_qemu_stf64(val, addr, index);
334     gen_throws_exception = gen_last_qop;
335 }
336 
337 typedef enum {
338     EA_STORE,
339     EA_LOADU,
340     EA_LOADS
341 } ea_what;
342 
343 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
344    otherwise generate a store.  */
345 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
346                      ea_what what)
347 {
348     if (what == EA_STORE) {
349         gen_store(s, opsize, addr, val);
350         return store_dummy;
351     } else {
352         return gen_load(s, opsize, addr, what == EA_LOADS);
353     }
354 }
355 
356 /* Read a 16-bit immediate constant */
357 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
358 {
359     uint16_t im;
360     im = cpu_lduw_code(env, s->pc);
361     s->pc += 2;
362     return im;
363 }
364 
365 /* Read an 8-bit immediate constant */
366 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
367 {
368     return read_im16(env, s);
369 }
370 
371 /* Read a 32-bit immediate constant.  */
372 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
373 {
374     uint32_t im;
375     im = read_im16(env, s) << 16;
376     im |= 0xffff & read_im16(env, s);
377     return im;
378 }
379 
380 /* Calculate and address index.  */
381 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
382 {
383     TCGv add;
384     int scale;
385 
386     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
387     if ((ext & 0x800) == 0) {
388         tcg_gen_ext16s_i32(tmp, add);
389         add = tmp;
390     }
391     scale = (ext >> 9) & 3;
392     if (scale != 0) {
393         tcg_gen_shli_i32(tmp, add, scale);
394         add = tmp;
395     }
396     return add;
397 }
398 
399 /* Handle a base + index + displacement effective addresss.
400    A NULL_QREG base means pc-relative.  */
401 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
402 {
403     uint32_t offset;
404     uint16_t ext;
405     TCGv add;
406     TCGv tmp;
407     uint32_t bd, od;
408 
409     offset = s->pc;
410     ext = read_im16(env, s);
411 
412     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
413         return NULL_QREG;
414 
415     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
416         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
417         ext &= ~(3 << 9);
418     }
419 
420     if (ext & 0x100) {
421         /* full extension word format */
422         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
423             return NULL_QREG;
424 
425         if ((ext & 0x30) > 0x10) {
426             /* base displacement */
427             if ((ext & 0x30) == 0x20) {
428                 bd = (int16_t)read_im16(env, s);
429             } else {
430                 bd = read_im32(env, s);
431             }
432         } else {
433             bd = 0;
434         }
435         tmp = tcg_temp_new();
436         if ((ext & 0x44) == 0) {
437             /* pre-index */
438             add = gen_addr_index(s, ext, tmp);
439         } else {
440             add = NULL_QREG;
441         }
442         if ((ext & 0x80) == 0) {
443             /* base not suppressed */
444             if (IS_NULL_QREG(base)) {
445                 base = tcg_const_i32(offset + bd);
446                 bd = 0;
447             }
448             if (!IS_NULL_QREG(add)) {
449                 tcg_gen_add_i32(tmp, add, base);
450                 add = tmp;
451             } else {
452                 add = base;
453             }
454         }
455         if (!IS_NULL_QREG(add)) {
456             if (bd != 0) {
457                 tcg_gen_addi_i32(tmp, add, bd);
458                 add = tmp;
459             }
460         } else {
461             add = tcg_const_i32(bd);
462         }
463         if ((ext & 3) != 0) {
464             /* memory indirect */
465             base = gen_load(s, OS_LONG, add, 0);
466             if ((ext & 0x44) == 4) {
467                 add = gen_addr_index(s, ext, tmp);
468                 tcg_gen_add_i32(tmp, add, base);
469                 add = tmp;
470             } else {
471                 add = base;
472             }
473             if ((ext & 3) > 1) {
474                 /* outer displacement */
475                 if ((ext & 3) == 2) {
476                     od = (int16_t)read_im16(env, s);
477                 } else {
478                     od = read_im32(env, s);
479                 }
480             } else {
481                 od = 0;
482             }
483             if (od != 0) {
484                 tcg_gen_addi_i32(tmp, add, od);
485                 add = tmp;
486             }
487         }
488     } else {
489         /* brief extension word format */
490         tmp = tcg_temp_new();
491         add = gen_addr_index(s, ext, tmp);
492         if (!IS_NULL_QREG(base)) {
493             tcg_gen_add_i32(tmp, add, base);
494             if ((int8_t)ext)
495                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
496         } else {
497             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
498         }
499         add = tmp;
500     }
501     return add;
502 }
503 
504 /* Sign or zero extend a value.  */
505 
506 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
507 {
508     switch (opsize) {
509     case OS_BYTE:
510         if (sign) {
511             tcg_gen_ext8s_i32(res, val);
512         } else {
513             tcg_gen_ext8u_i32(res, val);
514         }
515         break;
516     case OS_WORD:
517         if (sign) {
518             tcg_gen_ext16s_i32(res, val);
519         } else {
520             tcg_gen_ext16u_i32(res, val);
521         }
522         break;
523     case OS_LONG:
524         tcg_gen_mov_i32(res, val);
525         break;
526     default:
527         g_assert_not_reached();
528     }
529 }
530 
531 /* Evaluate all the CC flags.  */
532 
533 static void gen_flush_flags(DisasContext *s)
534 {
535     TCGv t0, t1;
536 
537     switch (s->cc_op) {
538     case CC_OP_FLAGS:
539         return;
540 
541     case CC_OP_ADDB:
542     case CC_OP_ADDW:
543     case CC_OP_ADDL:
544         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
545         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
546         /* Compute signed overflow for addition.  */
547         t0 = tcg_temp_new();
548         t1 = tcg_temp_new();
549         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
550         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
551         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
552         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
553         tcg_temp_free(t0);
554         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
555         tcg_temp_free(t1);
556         break;
557 
558     case CC_OP_SUBB:
559     case CC_OP_SUBW:
560     case CC_OP_SUBL:
561         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
562         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
563         /* Compute signed overflow for subtraction.  */
564         t0 = tcg_temp_new();
565         t1 = tcg_temp_new();
566         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
567         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
568         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
569         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
570         tcg_temp_free(t0);
571         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
572         tcg_temp_free(t1);
573         break;
574 
575     case CC_OP_CMPB:
576     case CC_OP_CMPW:
577     case CC_OP_CMPL:
578         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
579         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
580         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
581         /* Compute signed overflow for subtraction.  */
582         t0 = tcg_temp_new();
583         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
584         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
585         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
586         tcg_temp_free(t0);
587         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
588         break;
589 
590     case CC_OP_LOGIC:
591         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
592         tcg_gen_movi_i32(QREG_CC_C, 0);
593         tcg_gen_movi_i32(QREG_CC_V, 0);
594         break;
595 
596     case CC_OP_DYNAMIC:
597         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
598         break;
599 
600     default:
601         t0 = tcg_const_i32(s->cc_op);
602         gen_helper_flush_flags(cpu_env, t0);
603         tcg_temp_free(t0);
604         break;
605     }
606 
607     /* Note that flush_flags also assigned to env->cc_op.  */
608     s->cc_op = CC_OP_FLAGS;
609     s->cc_op_synced = 1;
610 }
611 
612 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
613 {
614     TCGv tmp;
615 
616     if (opsize == OS_LONG) {
617         tmp = val;
618     } else {
619         tmp = tcg_temp_new();
620         gen_ext(tmp, val, opsize, sign);
621     }
622 
623     return tmp;
624 }
625 
626 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
627 {
628     gen_ext(QREG_CC_N, val, opsize, 1);
629     set_cc_op(s, CC_OP_LOGIC);
630 }
631 
632 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
633 {
634     tcg_gen_mov_i32(QREG_CC_N, dest);
635     tcg_gen_mov_i32(QREG_CC_V, src);
636     set_cc_op(s, CC_OP_CMPB + opsize);
637 }
638 
639 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
640 {
641     gen_ext(QREG_CC_N, dest, opsize, 1);
642     tcg_gen_mov_i32(QREG_CC_V, src);
643 }
644 
645 static inline int opsize_bytes(int opsize)
646 {
647     switch (opsize) {
648     case OS_BYTE: return 1;
649     case OS_WORD: return 2;
650     case OS_LONG: return 4;
651     case OS_SINGLE: return 4;
652     case OS_DOUBLE: return 8;
653     case OS_EXTENDED: return 12;
654     case OS_PACKED: return 12;
655     default:
656         g_assert_not_reached();
657     }
658 }
659 
660 static inline int insn_opsize(int insn)
661 {
662     switch ((insn >> 6) & 3) {
663     case 0: return OS_BYTE;
664     case 1: return OS_WORD;
665     case 2: return OS_LONG;
666     default:
667         g_assert_not_reached();
668     }
669 }
670 
671 /* Assign value to a register.  If the width is less than the register width
672    only the low part of the register is set.  */
673 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
674 {
675     TCGv tmp;
676     switch (opsize) {
677     case OS_BYTE:
678         tcg_gen_andi_i32(reg, reg, 0xffffff00);
679         tmp = tcg_temp_new();
680         tcg_gen_ext8u_i32(tmp, val);
681         tcg_gen_or_i32(reg, reg, tmp);
682         tcg_temp_free(tmp);
683         break;
684     case OS_WORD:
685         tcg_gen_andi_i32(reg, reg, 0xffff0000);
686         tmp = tcg_temp_new();
687         tcg_gen_ext16u_i32(tmp, val);
688         tcg_gen_or_i32(reg, reg, tmp);
689         tcg_temp_free(tmp);
690         break;
691     case OS_LONG:
692     case OS_SINGLE:
693         tcg_gen_mov_i32(reg, val);
694         break;
695     default:
696         g_assert_not_reached();
697     }
698 }
699 
700 /* Generate code for an "effective address".  Does not adjust the base
701    register for autoincrement addressing modes.  */
702 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
703                          int mode, int reg0, int opsize)
704 {
705     TCGv reg;
706     TCGv tmp;
707     uint16_t ext;
708     uint32_t offset;
709 
710     switch (mode) {
711     case 0: /* Data register direct.  */
712     case 1: /* Address register direct.  */
713         return NULL_QREG;
714     case 2: /* Indirect register */
715     case 3: /* Indirect postincrement.  */
716         return get_areg(s, reg0);
717     case 4: /* Indirect predecrememnt.  */
718         reg = get_areg(s, reg0);
719         tmp = tcg_temp_new();
720         tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
721         return tmp;
722     case 5: /* Indirect displacement.  */
723         reg = get_areg(s, reg0);
724         tmp = tcg_temp_new();
725         ext = read_im16(env, s);
726         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
727         return tmp;
728     case 6: /* Indirect index + displacement.  */
729         reg = get_areg(s, reg0);
730         return gen_lea_indexed(env, s, reg);
731     case 7: /* Other */
732         switch (reg0) {
733         case 0: /* Absolute short.  */
734             offset = (int16_t)read_im16(env, s);
735             return tcg_const_i32(offset);
736         case 1: /* Absolute long.  */
737             offset = read_im32(env, s);
738             return tcg_const_i32(offset);
739         case 2: /* pc displacement  */
740             offset = s->pc;
741             offset += (int16_t)read_im16(env, s);
742             return tcg_const_i32(offset);
743         case 3: /* pc index+displacement.  */
744             return gen_lea_indexed(env, s, NULL_QREG);
745         case 4: /* Immediate.  */
746         default:
747             return NULL_QREG;
748         }
749     }
750     /* Should never happen.  */
751     return NULL_QREG;
752 }
753 
754 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
755                     int opsize)
756 {
757     int mode = extract32(insn, 3, 3);
758     int reg0 = REG(insn, 0);
759     return gen_lea_mode(env, s, mode, reg0, opsize);
760 }
761 
762 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
763    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
764    ADDRP is non-null for readwrite operands.  */
765 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
766                         int opsize, TCGv val, TCGv *addrp, ea_what what)
767 {
768     TCGv reg, tmp, result;
769     int32_t offset;
770 
771     switch (mode) {
772     case 0: /* Data register direct.  */
773         reg = cpu_dregs[reg0];
774         if (what == EA_STORE) {
775             gen_partset_reg(opsize, reg, val);
776             return store_dummy;
777         } else {
778             return gen_extend(reg, opsize, what == EA_LOADS);
779         }
780     case 1: /* Address register direct.  */
781         reg = get_areg(s, reg0);
782         if (what == EA_STORE) {
783             tcg_gen_mov_i32(reg, val);
784             return store_dummy;
785         } else {
786             return gen_extend(reg, opsize, what == EA_LOADS);
787         }
788     case 2: /* Indirect register */
789         reg = get_areg(s, reg0);
790         return gen_ldst(s, opsize, reg, val, what);
791     case 3: /* Indirect postincrement.  */
792         reg = get_areg(s, reg0);
793         result = gen_ldst(s, opsize, reg, val, what);
794         if (what == EA_STORE || !addrp) {
795             TCGv tmp = tcg_temp_new();
796             tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
797             delay_set_areg(s, reg0, tmp, true);
798         }
799         return result;
800     case 4: /* Indirect predecrememnt.  */
801         if (addrp && what == EA_STORE) {
802             tmp = *addrp;
803         } else {
804             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
805             if (IS_NULL_QREG(tmp)) {
806                 return tmp;
807             }
808             if (addrp) {
809                 *addrp = tmp;
810             }
811         }
812         result = gen_ldst(s, opsize, tmp, val, what);
813         if (what == EA_STORE || !addrp) {
814             delay_set_areg(s, reg0, tmp, false);
815         }
816         return result;
817     case 5: /* Indirect displacement.  */
818     case 6: /* Indirect index + displacement.  */
819     do_indirect:
820         if (addrp && what == EA_STORE) {
821             tmp = *addrp;
822         } else {
823             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
824             if (IS_NULL_QREG(tmp)) {
825                 return tmp;
826             }
827             if (addrp) {
828                 *addrp = tmp;
829             }
830         }
831         return gen_ldst(s, opsize, tmp, val, what);
832     case 7: /* Other */
833         switch (reg0) {
834         case 0: /* Absolute short.  */
835         case 1: /* Absolute long.  */
836         case 2: /* pc displacement  */
837         case 3: /* pc index+displacement.  */
838             goto do_indirect;
839         case 4: /* Immediate.  */
840             /* Sign extend values for consistency.  */
841             switch (opsize) {
842             case OS_BYTE:
843                 if (what == EA_LOADS) {
844                     offset = (int8_t)read_im8(env, s);
845                 } else {
846                     offset = read_im8(env, s);
847                 }
848                 break;
849             case OS_WORD:
850                 if (what == EA_LOADS) {
851                     offset = (int16_t)read_im16(env, s);
852                 } else {
853                     offset = read_im16(env, s);
854                 }
855                 break;
856             case OS_LONG:
857                 offset = read_im32(env, s);
858                 break;
859             default:
860                 g_assert_not_reached();
861             }
862             return tcg_const_i32(offset);
863         default:
864             return NULL_QREG;
865         }
866     }
867     /* Should never happen.  */
868     return NULL_QREG;
869 }
870 
871 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
872                    int opsize, TCGv val, TCGv *addrp, ea_what what)
873 {
874     int mode = extract32(insn, 3, 3);
875     int reg0 = REG(insn, 0);
876     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
877 }
878 
879 typedef struct {
880     TCGCond tcond;
881     bool g1;
882     bool g2;
883     TCGv v1;
884     TCGv v2;
885 } DisasCompare;
886 
887 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
888 {
889     TCGv tmp, tmp2;
890     TCGCond tcond;
891     CCOp op = s->cc_op;
892 
893     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
894     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
895         c->g1 = c->g2 = 1;
896         c->v1 = QREG_CC_N;
897         c->v2 = QREG_CC_V;
898         switch (cond) {
899         case 2: /* HI */
900         case 3: /* LS */
901             tcond = TCG_COND_LEU;
902             goto done;
903         case 4: /* CC */
904         case 5: /* CS */
905             tcond = TCG_COND_LTU;
906             goto done;
907         case 6: /* NE */
908         case 7: /* EQ */
909             tcond = TCG_COND_EQ;
910             goto done;
911         case 10: /* PL */
912         case 11: /* MI */
913             c->g1 = c->g2 = 0;
914             c->v2 = tcg_const_i32(0);
915             c->v1 = tmp = tcg_temp_new();
916             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
917             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
918             /* fallthru */
919         case 12: /* GE */
920         case 13: /* LT */
921             tcond = TCG_COND_LT;
922             goto done;
923         case 14: /* GT */
924         case 15: /* LE */
925             tcond = TCG_COND_LE;
926             goto done;
927         }
928     }
929 
930     c->g1 = 1;
931     c->g2 = 0;
932     c->v2 = tcg_const_i32(0);
933 
934     switch (cond) {
935     case 0: /* T */
936     case 1: /* F */
937         c->v1 = c->v2;
938         tcond = TCG_COND_NEVER;
939         goto done;
940     case 14: /* GT (!(Z || (N ^ V))) */
941     case 15: /* LE (Z || (N ^ V)) */
942         /* Logic operations clear V, which simplifies LE to (Z || N),
943            and since Z and N are co-located, this becomes a normal
944            comparison vs N.  */
945         if (op == CC_OP_LOGIC) {
946             c->v1 = QREG_CC_N;
947             tcond = TCG_COND_LE;
948             goto done;
949         }
950         break;
951     case 12: /* GE (!(N ^ V)) */
952     case 13: /* LT (N ^ V) */
953         /* Logic operations clear V, which simplifies this to N.  */
954         if (op != CC_OP_LOGIC) {
955             break;
956         }
957         /* fallthru */
958     case 10: /* PL (!N) */
959     case 11: /* MI (N) */
960         /* Several cases represent N normally.  */
961         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
962             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
963             op == CC_OP_LOGIC) {
964             c->v1 = QREG_CC_N;
965             tcond = TCG_COND_LT;
966             goto done;
967         }
968         break;
969     case 6: /* NE (!Z) */
970     case 7: /* EQ (Z) */
971         /* Some cases fold Z into N.  */
972         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
973             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
974             op == CC_OP_LOGIC) {
975             tcond = TCG_COND_EQ;
976             c->v1 = QREG_CC_N;
977             goto done;
978         }
979         break;
980     case 4: /* CC (!C) */
981     case 5: /* CS (C) */
982         /* Some cases fold C into X.  */
983         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
984             op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL) {
985             tcond = TCG_COND_NE;
986             c->v1 = QREG_CC_X;
987             goto done;
988         }
989         /* fallthru */
990     case 8: /* VC (!V) */
991     case 9: /* VS (V) */
992         /* Logic operations clear V and C.  */
993         if (op == CC_OP_LOGIC) {
994             tcond = TCG_COND_NEVER;
995             c->v1 = c->v2;
996             goto done;
997         }
998         break;
999     }
1000 
1001     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1002     gen_flush_flags(s);
1003 
1004     switch (cond) {
1005     case 0: /* T */
1006     case 1: /* F */
1007     default:
1008         /* Invalid, or handled above.  */
1009         abort();
1010     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1011     case 3: /* LS (C || Z) */
1012         c->v1 = tmp = tcg_temp_new();
1013         c->g1 = 0;
1014         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1015         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1016         tcond = TCG_COND_NE;
1017         break;
1018     case 4: /* CC (!C) */
1019     case 5: /* CS (C) */
1020         c->v1 = QREG_CC_C;
1021         tcond = TCG_COND_NE;
1022         break;
1023     case 6: /* NE (!Z) */
1024     case 7: /* EQ (Z) */
1025         c->v1 = QREG_CC_Z;
1026         tcond = TCG_COND_EQ;
1027         break;
1028     case 8: /* VC (!V) */
1029     case 9: /* VS (V) */
1030         c->v1 = QREG_CC_V;
1031         tcond = TCG_COND_LT;
1032         break;
1033     case 10: /* PL (!N) */
1034     case 11: /* MI (N) */
1035         c->v1 = QREG_CC_N;
1036         tcond = TCG_COND_LT;
1037         break;
1038     case 12: /* GE (!(N ^ V)) */
1039     case 13: /* LT (N ^ V) */
1040         c->v1 = tmp = tcg_temp_new();
1041         c->g1 = 0;
1042         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1043         tcond = TCG_COND_LT;
1044         break;
1045     case 14: /* GT (!(Z || (N ^ V))) */
1046     case 15: /* LE (Z || (N ^ V)) */
1047         c->v1 = tmp = tcg_temp_new();
1048         c->g1 = 0;
1049         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1050         tcg_gen_neg_i32(tmp, tmp);
1051         tmp2 = tcg_temp_new();
1052         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1053         tcg_gen_or_i32(tmp, tmp, tmp2);
1054         tcg_temp_free(tmp2);
1055         tcond = TCG_COND_LT;
1056         break;
1057     }
1058 
1059  done:
1060     if ((cond & 1) == 0) {
1061         tcond = tcg_invert_cond(tcond);
1062     }
1063     c->tcond = tcond;
1064 }
1065 
1066 static void free_cond(DisasCompare *c)
1067 {
1068     if (!c->g1) {
1069         tcg_temp_free(c->v1);
1070     }
1071     if (!c->g2) {
1072         tcg_temp_free(c->v2);
1073     }
1074 }
1075 
1076 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1077 {
1078   DisasCompare c;
1079 
1080   gen_cc_cond(&c, s, cond);
1081   update_cc_op(s);
1082   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1083   free_cond(&c);
1084 }
1085 
1086 /* Force a TB lookup after an instruction that changes the CPU state.  */
1087 static void gen_lookup_tb(DisasContext *s)
1088 {
1089     update_cc_op(s);
1090     tcg_gen_movi_i32(QREG_PC, s->pc);
1091     s->is_jmp = DISAS_UPDATE;
1092 }
1093 
1094 /* Generate a jump to an immediate address.  */
1095 static void gen_jmp_im(DisasContext *s, uint32_t dest)
1096 {
1097     update_cc_op(s);
1098     tcg_gen_movi_i32(QREG_PC, dest);
1099     s->is_jmp = DISAS_JUMP;
1100 }
1101 
1102 /* Generate a jump to the address in qreg DEST.  */
1103 static void gen_jmp(DisasContext *s, TCGv dest)
1104 {
1105     update_cc_op(s);
1106     tcg_gen_mov_i32(QREG_PC, dest);
1107     s->is_jmp = DISAS_JUMP;
1108 }
1109 
1110 static void gen_raise_exception(int nr)
1111 {
1112     TCGv_i32 tmp = tcg_const_i32(nr);
1113 
1114     gen_helper_raise_exception(cpu_env, tmp);
1115     tcg_temp_free_i32(tmp);
1116 }
1117 
1118 static void gen_exception(DisasContext *s, uint32_t where, int nr)
1119 {
1120     update_cc_op(s);
1121     gen_jmp_im(s, where);
1122     gen_raise_exception(nr);
1123 }
1124 
1125 static inline void gen_addr_fault(DisasContext *s)
1126 {
1127     gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1128 }
1129 
1130 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1131         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1132                         op_sign ? EA_LOADS : EA_LOADU);                 \
1133         if (IS_NULL_QREG(result)) {                                     \
1134             gen_addr_fault(s);                                          \
1135             return;                                                     \
1136         }                                                               \
1137     } while (0)
1138 
1139 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1140         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1141         if (IS_NULL_QREG(ea_result)) {                                  \
1142             gen_addr_fault(s);                                          \
1143             return;                                                     \
1144         }                                                               \
1145     } while (0)
1146 
1147 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1148 {
1149 #ifndef CONFIG_USER_ONLY
1150     return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1151            (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1152 #else
1153     return true;
1154 #endif
1155 }
1156 
1157 /* Generate a jump to an immediate address.  */
1158 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1159 {
1160     if (unlikely(s->singlestep_enabled)) {
1161         gen_exception(s, dest, EXCP_DEBUG);
1162     } else if (use_goto_tb(s, dest)) {
1163         tcg_gen_goto_tb(n);
1164         tcg_gen_movi_i32(QREG_PC, dest);
1165         tcg_gen_exit_tb((uintptr_t)s->tb + n);
1166     } else {
1167         gen_jmp_im(s, dest);
1168         tcg_gen_exit_tb(0);
1169     }
1170     s->is_jmp = DISAS_TB_JUMP;
1171 }
1172 
1173 DISAS_INSN(scc)
1174 {
1175     DisasCompare c;
1176     int cond;
1177     TCGv tmp;
1178 
1179     cond = (insn >> 8) & 0xf;
1180     gen_cc_cond(&c, s, cond);
1181 
1182     tmp = tcg_temp_new();
1183     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1184     free_cond(&c);
1185 
1186     tcg_gen_neg_i32(tmp, tmp);
1187     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1188     tcg_temp_free(tmp);
1189 }
1190 
1191 DISAS_INSN(dbcc)
1192 {
1193     TCGLabel *l1;
1194     TCGv reg;
1195     TCGv tmp;
1196     int16_t offset;
1197     uint32_t base;
1198 
1199     reg = DREG(insn, 0);
1200     base = s->pc;
1201     offset = (int16_t)read_im16(env, s);
1202     l1 = gen_new_label();
1203     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1204 
1205     tmp = tcg_temp_new();
1206     tcg_gen_ext16s_i32(tmp, reg);
1207     tcg_gen_addi_i32(tmp, tmp, -1);
1208     gen_partset_reg(OS_WORD, reg, tmp);
1209     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1210     gen_jmp_tb(s, 1, base + offset);
1211     gen_set_label(l1);
1212     gen_jmp_tb(s, 0, s->pc);
1213 }
1214 
1215 DISAS_INSN(undef_mac)
1216 {
1217     gen_exception(s, s->pc - 2, EXCP_LINEA);
1218 }
1219 
1220 DISAS_INSN(undef_fpu)
1221 {
1222     gen_exception(s, s->pc - 2, EXCP_LINEF);
1223 }
1224 
1225 DISAS_INSN(undef)
1226 {
1227     /* ??? This is both instructions that are as yet unimplemented
1228        for the 680x0 series, as well as those that are implemented
1229        but actually illegal for CPU32 or pre-68020.  */
1230     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1231                   insn, s->pc - 2);
1232     gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1233 }
1234 
1235 DISAS_INSN(mulw)
1236 {
1237     TCGv reg;
1238     TCGv tmp;
1239     TCGv src;
1240     int sign;
1241 
1242     sign = (insn & 0x100) != 0;
1243     reg = DREG(insn, 9);
1244     tmp = tcg_temp_new();
1245     if (sign)
1246         tcg_gen_ext16s_i32(tmp, reg);
1247     else
1248         tcg_gen_ext16u_i32(tmp, reg);
1249     SRC_EA(env, src, OS_WORD, sign, NULL);
1250     tcg_gen_mul_i32(tmp, tmp, src);
1251     tcg_gen_mov_i32(reg, tmp);
1252     gen_logic_cc(s, tmp, OS_LONG);
1253     tcg_temp_free(tmp);
1254 }
1255 
1256 DISAS_INSN(divw)
1257 {
1258     int sign;
1259     TCGv src;
1260     TCGv destr;
1261 
1262     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1263 
1264     sign = (insn & 0x100) != 0;
1265 
1266     /* dest.l / src.w */
1267 
1268     SRC_EA(env, src, OS_WORD, sign, NULL);
1269     destr = tcg_const_i32(REG(insn, 9));
1270     if (sign) {
1271         gen_helper_divsw(cpu_env, destr, src);
1272     } else {
1273         gen_helper_divuw(cpu_env, destr, src);
1274     }
1275     tcg_temp_free(destr);
1276 
1277     set_cc_op(s, CC_OP_FLAGS);
1278 }
1279 
1280 DISAS_INSN(divl)
1281 {
1282     TCGv num, reg, den;
1283     int sign;
1284     uint16_t ext;
1285 
1286     ext = read_im16(env, s);
1287 
1288     sign = (ext & 0x0800) != 0;
1289 
1290     if (ext & 0x400) {
1291         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1292             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1293             return;
1294         }
1295 
1296         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1297 
1298         SRC_EA(env, den, OS_LONG, 0, NULL);
1299         num = tcg_const_i32(REG(ext, 12));
1300         reg = tcg_const_i32(REG(ext, 0));
1301         if (sign) {
1302             gen_helper_divsll(cpu_env, num, reg, den);
1303         } else {
1304             gen_helper_divull(cpu_env, num, reg, den);
1305         }
1306         tcg_temp_free(reg);
1307         tcg_temp_free(num);
1308         set_cc_op(s, CC_OP_FLAGS);
1309         return;
1310     }
1311 
1312     /* divX.l <EA>, Dq        32/32 -> 32q     */
1313     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1314 
1315     SRC_EA(env, den, OS_LONG, 0, NULL);
1316     num = tcg_const_i32(REG(ext, 12));
1317     reg = tcg_const_i32(REG(ext, 0));
1318     if (sign) {
1319         gen_helper_divsl(cpu_env, num, reg, den);
1320     } else {
1321         gen_helper_divul(cpu_env, num, reg, den);
1322     }
1323     tcg_temp_free(reg);
1324     tcg_temp_free(num);
1325 
1326     set_cc_op(s, CC_OP_FLAGS);
1327 }
1328 
1329 static void bcd_add(TCGv dest, TCGv src)
1330 {
1331     TCGv t0, t1;
1332 
1333     /*  dest10 = dest10 + src10 + X
1334      *
1335      *        t1 = src
1336      *        t2 = t1 + 0x066
1337      *        t3 = t2 + dest + X
1338      *        t4 = t2 ^ dest
1339      *        t5 = t3 ^ t4
1340      *        t6 = ~t5 & 0x110
1341      *        t7 = (t6 >> 2) | (t6 >> 3)
1342      *        return t3 - t7
1343      */
1344 
1345     /* t1 = (src + 0x066) + dest + X
1346      *    = result with some possible exceding 0x6
1347      */
1348 
1349     t0 = tcg_const_i32(0x066);
1350     tcg_gen_add_i32(t0, t0, src);
1351 
1352     t1 = tcg_temp_new();
1353     tcg_gen_add_i32(t1, t0, dest);
1354     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1355 
1356     /* we will remove exceding 0x6 where there is no carry */
1357 
1358     /* t0 = (src + 0x0066) ^ dest
1359      *    = t1 without carries
1360      */
1361 
1362     tcg_gen_xor_i32(t0, t0, dest);
1363 
1364     /* extract the carries
1365      * t0 = t0 ^ t1
1366      *    = only the carries
1367      */
1368 
1369     tcg_gen_xor_i32(t0, t0, t1);
1370 
1371     /* generate 0x1 where there is no carry
1372      * and for each 0x10, generate a 0x6
1373      */
1374 
1375     tcg_gen_shri_i32(t0, t0, 3);
1376     tcg_gen_not_i32(t0, t0);
1377     tcg_gen_andi_i32(t0, t0, 0x22);
1378     tcg_gen_add_i32(dest, t0, t0);
1379     tcg_gen_add_i32(dest, dest, t0);
1380     tcg_temp_free(t0);
1381 
1382     /* remove the exceding 0x6
1383      * for digits that have not generated a carry
1384      */
1385 
1386     tcg_gen_sub_i32(dest, t1, dest);
1387     tcg_temp_free(t1);
1388 }
1389 
1390 static void bcd_sub(TCGv dest, TCGv src)
1391 {
1392     TCGv t0, t1, t2;
1393 
1394     /*  dest10 = dest10 - src10 - X
1395      *         = bcd_add(dest + 1 - X, 0x199 - src)
1396      */
1397 
1398     /* t0 = 0x066 + (0x199 - src) */
1399 
1400     t0 = tcg_temp_new();
1401     tcg_gen_subfi_i32(t0, 0x1ff, src);
1402 
1403     /* t1 = t0 + dest + 1 - X*/
1404 
1405     t1 = tcg_temp_new();
1406     tcg_gen_add_i32(t1, t0, dest);
1407     tcg_gen_addi_i32(t1, t1, 1);
1408     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1409 
1410     /* t2 = t0 ^ dest */
1411 
1412     t2 = tcg_temp_new();
1413     tcg_gen_xor_i32(t2, t0, dest);
1414 
1415     /* t0 = t1 ^ t2 */
1416 
1417     tcg_gen_xor_i32(t0, t1, t2);
1418 
1419     /* t2 = ~t0 & 0x110
1420      * t0 = (t2 >> 2) | (t2 >> 3)
1421      *
1422      * to fit on 8bit operands, changed in:
1423      *
1424      * t2 = ~(t0 >> 3) & 0x22
1425      * t0 = t2 + t2
1426      * t0 = t0 + t2
1427      */
1428 
1429     tcg_gen_shri_i32(t2, t0, 3);
1430     tcg_gen_not_i32(t2, t2);
1431     tcg_gen_andi_i32(t2, t2, 0x22);
1432     tcg_gen_add_i32(t0, t2, t2);
1433     tcg_gen_add_i32(t0, t0, t2);
1434     tcg_temp_free(t2);
1435 
1436     /* return t1 - t0 */
1437 
1438     tcg_gen_sub_i32(dest, t1, t0);
1439     tcg_temp_free(t0);
1440     tcg_temp_free(t1);
1441 }
1442 
1443 static void bcd_flags(TCGv val)
1444 {
1445     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1446     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1447 
1448     tcg_gen_shri_i32(QREG_CC_C, val, 8);
1449     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
1450 
1451     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1452 }
1453 
1454 DISAS_INSN(abcd_reg)
1455 {
1456     TCGv src;
1457     TCGv dest;
1458 
1459     gen_flush_flags(s); /* !Z is sticky */
1460 
1461     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1462     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1463     bcd_add(dest, src);
1464     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1465 
1466     bcd_flags(dest);
1467 }
1468 
1469 DISAS_INSN(abcd_mem)
1470 {
1471     TCGv src, dest, addr;
1472 
1473     gen_flush_flags(s); /* !Z is sticky */
1474 
1475     /* Indirect pre-decrement load (mode 4) */
1476 
1477     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1478                       NULL_QREG, NULL, EA_LOADU);
1479     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1480                        NULL_QREG, &addr, EA_LOADU);
1481 
1482     bcd_add(dest, src);
1483 
1484     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1485 
1486     bcd_flags(dest);
1487 }
1488 
1489 DISAS_INSN(sbcd_reg)
1490 {
1491     TCGv src, dest;
1492 
1493     gen_flush_flags(s); /* !Z is sticky */
1494 
1495     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1496     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1497 
1498     bcd_sub(dest, src);
1499 
1500     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1501 
1502     bcd_flags(dest);
1503 }
1504 
1505 DISAS_INSN(sbcd_mem)
1506 {
1507     TCGv src, dest, addr;
1508 
1509     gen_flush_flags(s); /* !Z is sticky */
1510 
1511     /* Indirect pre-decrement load (mode 4) */
1512 
1513     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1514                       NULL_QREG, NULL, EA_LOADU);
1515     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1516                        NULL_QREG, &addr, EA_LOADU);
1517 
1518     bcd_sub(dest, src);
1519 
1520     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1521 
1522     bcd_flags(dest);
1523 }
1524 
1525 DISAS_INSN(nbcd)
1526 {
1527     TCGv src, dest;
1528     TCGv addr;
1529 
1530     gen_flush_flags(s); /* !Z is sticky */
1531 
1532     SRC_EA(env, src, OS_BYTE, 0, &addr);
1533 
1534     dest = tcg_const_i32(0);
1535     bcd_sub(dest, src);
1536 
1537     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1538 
1539     bcd_flags(dest);
1540 
1541     tcg_temp_free(dest);
1542 }
1543 
1544 DISAS_INSN(addsub)
1545 {
1546     TCGv reg;
1547     TCGv dest;
1548     TCGv src;
1549     TCGv tmp;
1550     TCGv addr;
1551     int add;
1552     int opsize;
1553 
1554     add = (insn & 0x4000) != 0;
1555     opsize = insn_opsize(insn);
1556     reg = gen_extend(DREG(insn, 9), opsize, 1);
1557     dest = tcg_temp_new();
1558     if (insn & 0x100) {
1559         SRC_EA(env, tmp, opsize, 1, &addr);
1560         src = reg;
1561     } else {
1562         tmp = reg;
1563         SRC_EA(env, src, opsize, 1, NULL);
1564     }
1565     if (add) {
1566         tcg_gen_add_i32(dest, tmp, src);
1567         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1568         set_cc_op(s, CC_OP_ADDB + opsize);
1569     } else {
1570         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1571         tcg_gen_sub_i32(dest, tmp, src);
1572         set_cc_op(s, CC_OP_SUBB + opsize);
1573     }
1574     gen_update_cc_add(dest, src, opsize);
1575     if (insn & 0x100) {
1576         DEST_EA(env, insn, opsize, dest, &addr);
1577     } else {
1578         gen_partset_reg(opsize, DREG(insn, 9), dest);
1579     }
1580     tcg_temp_free(dest);
1581 }
1582 
1583 /* Reverse the order of the bits in REG.  */
1584 DISAS_INSN(bitrev)
1585 {
1586     TCGv reg;
1587     reg = DREG(insn, 0);
1588     gen_helper_bitrev(reg, reg);
1589 }
1590 
1591 DISAS_INSN(bitop_reg)
1592 {
1593     int opsize;
1594     int op;
1595     TCGv src1;
1596     TCGv src2;
1597     TCGv tmp;
1598     TCGv addr;
1599     TCGv dest;
1600 
1601     if ((insn & 0x38) != 0)
1602         opsize = OS_BYTE;
1603     else
1604         opsize = OS_LONG;
1605     op = (insn >> 6) & 3;
1606     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1607 
1608     gen_flush_flags(s);
1609     src2 = tcg_temp_new();
1610     if (opsize == OS_BYTE)
1611         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1612     else
1613         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1614 
1615     tmp = tcg_const_i32(1);
1616     tcg_gen_shl_i32(tmp, tmp, src2);
1617     tcg_temp_free(src2);
1618 
1619     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1620 
1621     dest = tcg_temp_new();
1622     switch (op) {
1623     case 1: /* bchg */
1624         tcg_gen_xor_i32(dest, src1, tmp);
1625         break;
1626     case 2: /* bclr */
1627         tcg_gen_andc_i32(dest, src1, tmp);
1628         break;
1629     case 3: /* bset */
1630         tcg_gen_or_i32(dest, src1, tmp);
1631         break;
1632     default: /* btst */
1633         break;
1634     }
1635     tcg_temp_free(tmp);
1636     if (op) {
1637         DEST_EA(env, insn, opsize, dest, &addr);
1638     }
1639     tcg_temp_free(dest);
1640 }
1641 
1642 DISAS_INSN(sats)
1643 {
1644     TCGv reg;
1645     reg = DREG(insn, 0);
1646     gen_flush_flags(s);
1647     gen_helper_sats(reg, reg, QREG_CC_V);
1648     gen_logic_cc(s, reg, OS_LONG);
1649 }
1650 
1651 static void gen_push(DisasContext *s, TCGv val)
1652 {
1653     TCGv tmp;
1654 
1655     tmp = tcg_temp_new();
1656     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1657     gen_store(s, OS_LONG, tmp, val);
1658     tcg_gen_mov_i32(QREG_SP, tmp);
1659     tcg_temp_free(tmp);
1660 }
1661 
1662 static TCGv mreg(int reg)
1663 {
1664     if (reg < 8) {
1665         /* Dx */
1666         return cpu_dregs[reg];
1667     }
1668     /* Ax */
1669     return cpu_aregs[reg & 7];
1670 }
1671 
1672 DISAS_INSN(movem)
1673 {
1674     TCGv addr, incr, tmp, r[16];
1675     int is_load = (insn & 0x0400) != 0;
1676     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1677     uint16_t mask = read_im16(env, s);
1678     int mode = extract32(insn, 3, 3);
1679     int reg0 = REG(insn, 0);
1680     int i;
1681 
1682     tmp = cpu_aregs[reg0];
1683 
1684     switch (mode) {
1685     case 0: /* data register direct */
1686     case 1: /* addr register direct */
1687     do_addr_fault:
1688         gen_addr_fault(s);
1689         return;
1690 
1691     case 2: /* indirect */
1692         break;
1693 
1694     case 3: /* indirect post-increment */
1695         if (!is_load) {
1696             /* post-increment is not allowed */
1697             goto do_addr_fault;
1698         }
1699         break;
1700 
1701     case 4: /* indirect pre-decrement */
1702         if (is_load) {
1703             /* pre-decrement is not allowed */
1704             goto do_addr_fault;
1705         }
1706         /* We want a bare copy of the address reg, without any pre-decrement
1707            adjustment, as gen_lea would provide.  */
1708         break;
1709 
1710     default:
1711         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1712         if (IS_NULL_QREG(tmp)) {
1713             goto do_addr_fault;
1714         }
1715         break;
1716     }
1717 
1718     addr = tcg_temp_new();
1719     tcg_gen_mov_i32(addr, tmp);
1720     incr = tcg_const_i32(opsize_bytes(opsize));
1721 
1722     if (is_load) {
1723         /* memory to register */
1724         for (i = 0; i < 16; i++) {
1725             if (mask & (1 << i)) {
1726                 r[i] = gen_load(s, opsize, addr, 1);
1727                 tcg_gen_add_i32(addr, addr, incr);
1728             }
1729         }
1730         for (i = 0; i < 16; i++) {
1731             if (mask & (1 << i)) {
1732                 tcg_gen_mov_i32(mreg(i), r[i]);
1733                 tcg_temp_free(r[i]);
1734             }
1735         }
1736         if (mode == 3) {
1737             /* post-increment: movem (An)+,X */
1738             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1739         }
1740     } else {
1741         /* register to memory */
1742         if (mode == 4) {
1743             /* pre-decrement: movem X,-(An) */
1744             for (i = 15; i >= 0; i--) {
1745                 if ((mask << i) & 0x8000) {
1746                     tcg_gen_sub_i32(addr, addr, incr);
1747                     if (reg0 + 8 == i &&
1748                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1749                         /* M68020+: if the addressing register is the
1750                          * register moved to memory, the value written
1751                          * is the initial value decremented by the size of
1752                          * the operation, regardless of how many actual
1753                          * stores have been performed until this point.
1754                          * M68000/M68010: the value is the initial value.
1755                          */
1756                         tmp = tcg_temp_new();
1757                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
1758                         gen_store(s, opsize, addr, tmp);
1759                         tcg_temp_free(tmp);
1760                     } else {
1761                         gen_store(s, opsize, addr, mreg(i));
1762                     }
1763                 }
1764             }
1765             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1766         } else {
1767             for (i = 0; i < 16; i++) {
1768                 if (mask & (1 << i)) {
1769                     gen_store(s, opsize, addr, mreg(i));
1770                     tcg_gen_add_i32(addr, addr, incr);
1771                 }
1772             }
1773         }
1774     }
1775 
1776     tcg_temp_free(incr);
1777     tcg_temp_free(addr);
1778 }
1779 
1780 DISAS_INSN(bitop_im)
1781 {
1782     int opsize;
1783     int op;
1784     TCGv src1;
1785     uint32_t mask;
1786     int bitnum;
1787     TCGv tmp;
1788     TCGv addr;
1789 
1790     if ((insn & 0x38) != 0)
1791         opsize = OS_BYTE;
1792     else
1793         opsize = OS_LONG;
1794     op = (insn >> 6) & 3;
1795 
1796     bitnum = read_im16(env, s);
1797     if (bitnum & 0xff00) {
1798         disas_undef(env, s, insn);
1799         return;
1800     }
1801 
1802     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1803 
1804     gen_flush_flags(s);
1805     if (opsize == OS_BYTE)
1806         bitnum &= 7;
1807     else
1808         bitnum &= 31;
1809     mask = 1 << bitnum;
1810 
1811    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1812 
1813     if (op) {
1814         tmp = tcg_temp_new();
1815         switch (op) {
1816         case 1: /* bchg */
1817             tcg_gen_xori_i32(tmp, src1, mask);
1818             break;
1819         case 2: /* bclr */
1820             tcg_gen_andi_i32(tmp, src1, ~mask);
1821             break;
1822         case 3: /* bset */
1823             tcg_gen_ori_i32(tmp, src1, mask);
1824             break;
1825         default: /* btst */
1826             break;
1827         }
1828         DEST_EA(env, insn, opsize, tmp, &addr);
1829         tcg_temp_free(tmp);
1830     }
1831 }
1832 
1833 DISAS_INSN(arith_im)
1834 {
1835     int op;
1836     TCGv im;
1837     TCGv src1;
1838     TCGv dest;
1839     TCGv addr;
1840     int opsize;
1841 
1842     op = (insn >> 9) & 7;
1843     opsize = insn_opsize(insn);
1844     switch (opsize) {
1845     case OS_BYTE:
1846         im = tcg_const_i32((int8_t)read_im8(env, s));
1847         break;
1848     case OS_WORD:
1849         im = tcg_const_i32((int16_t)read_im16(env, s));
1850         break;
1851     case OS_LONG:
1852         im = tcg_const_i32(read_im32(env, s));
1853         break;
1854     default:
1855        abort();
1856     }
1857     SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
1858     dest = tcg_temp_new();
1859     switch (op) {
1860     case 0: /* ori */
1861         tcg_gen_or_i32(dest, src1, im);
1862         gen_logic_cc(s, dest, opsize);
1863         break;
1864     case 1: /* andi */
1865         tcg_gen_and_i32(dest, src1, im);
1866         gen_logic_cc(s, dest, opsize);
1867         break;
1868     case 2: /* subi */
1869         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
1870         tcg_gen_sub_i32(dest, src1, im);
1871         gen_update_cc_add(dest, im, opsize);
1872         set_cc_op(s, CC_OP_SUBB + opsize);
1873         break;
1874     case 3: /* addi */
1875         tcg_gen_add_i32(dest, src1, im);
1876         gen_update_cc_add(dest, im, opsize);
1877         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1878         set_cc_op(s, CC_OP_ADDB + opsize);
1879         break;
1880     case 5: /* eori */
1881         tcg_gen_xor_i32(dest, src1, im);
1882         gen_logic_cc(s, dest, opsize);
1883         break;
1884     case 6: /* cmpi */
1885         gen_update_cc_cmp(s, src1, im, opsize);
1886         break;
1887     default:
1888         abort();
1889     }
1890     tcg_temp_free(im);
1891     if (op != 6) {
1892         DEST_EA(env, insn, opsize, dest, &addr);
1893     }
1894     tcg_temp_free(dest);
1895 }
1896 
1897 DISAS_INSN(cas)
1898 {
1899     int opsize;
1900     TCGv addr;
1901     uint16_t ext;
1902     TCGv load;
1903     TCGv cmp;
1904     TCGMemOp opc;
1905 
1906     switch ((insn >> 9) & 3) {
1907     case 1:
1908         opsize = OS_BYTE;
1909         opc = MO_SB;
1910         break;
1911     case 2:
1912         opsize = OS_WORD;
1913         opc = MO_TESW;
1914         break;
1915     case 3:
1916         opsize = OS_LONG;
1917         opc = MO_TESL;
1918         break;
1919     default:
1920         g_assert_not_reached();
1921     }
1922     opc |= MO_ALIGN;
1923 
1924     ext = read_im16(env, s);
1925 
1926     /* cas Dc,Du,<EA> */
1927 
1928     addr = gen_lea(env, s, insn, opsize);
1929     if (IS_NULL_QREG(addr)) {
1930         gen_addr_fault(s);
1931         return;
1932     }
1933 
1934     cmp = gen_extend(DREG(ext, 0), opsize, 1);
1935 
1936     /* if  <EA> == Dc then
1937      *     <EA> = Du
1938      *     Dc = <EA> (because <EA> == Dc)
1939      * else
1940      *     Dc = <EA>
1941      */
1942 
1943     load = tcg_temp_new();
1944     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
1945                                IS_USER(s), opc);
1946     /* update flags before setting cmp to load */
1947     gen_update_cc_cmp(s, load, cmp, opsize);
1948     gen_partset_reg(opsize, DREG(ext, 0), load);
1949 
1950     tcg_temp_free(load);
1951 }
1952 
1953 DISAS_INSN(cas2w)
1954 {
1955     uint16_t ext1, ext2;
1956     TCGv addr1, addr2;
1957     TCGv regs;
1958 
1959     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
1960 
1961     ext1 = read_im16(env, s);
1962 
1963     if (ext1 & 0x8000) {
1964         /* Address Register */
1965         addr1 = AREG(ext1, 12);
1966     } else {
1967         /* Data Register */
1968         addr1 = DREG(ext1, 12);
1969     }
1970 
1971     ext2 = read_im16(env, s);
1972     if (ext2 & 0x8000) {
1973         /* Address Register */
1974         addr2 = AREG(ext2, 12);
1975     } else {
1976         /* Data Register */
1977         addr2 = DREG(ext2, 12);
1978     }
1979 
1980     /* if (R1) == Dc1 && (R2) == Dc2 then
1981      *     (R1) = Du1
1982      *     (R2) = Du2
1983      * else
1984      *     Dc1 = (R1)
1985      *     Dc2 = (R2)
1986      */
1987 
1988     regs = tcg_const_i32(REG(ext2, 6) |
1989                          (REG(ext1, 6) << 3) |
1990                          (REG(ext2, 0) << 6) |
1991                          (REG(ext1, 0) << 9));
1992     gen_helper_cas2w(cpu_env, regs, addr1, addr2);
1993     tcg_temp_free(regs);
1994 
1995     /* Note that cas2w also assigned to env->cc_op.  */
1996     s->cc_op = CC_OP_CMPW;
1997     s->cc_op_synced = 1;
1998 }
1999 
2000 DISAS_INSN(cas2l)
2001 {
2002     uint16_t ext1, ext2;
2003     TCGv addr1, addr2, regs;
2004 
2005     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2006 
2007     ext1 = read_im16(env, s);
2008 
2009     if (ext1 & 0x8000) {
2010         /* Address Register */
2011         addr1 = AREG(ext1, 12);
2012     } else {
2013         /* Data Register */
2014         addr1 = DREG(ext1, 12);
2015     }
2016 
2017     ext2 = read_im16(env, s);
2018     if (ext2 & 0x8000) {
2019         /* Address Register */
2020         addr2 = AREG(ext2, 12);
2021     } else {
2022         /* Data Register */
2023         addr2 = DREG(ext2, 12);
2024     }
2025 
2026     /* if (R1) == Dc1 && (R2) == Dc2 then
2027      *     (R1) = Du1
2028      *     (R2) = Du2
2029      * else
2030      *     Dc1 = (R1)
2031      *     Dc2 = (R2)
2032      */
2033 
2034     regs = tcg_const_i32(REG(ext2, 6) |
2035                          (REG(ext1, 6) << 3) |
2036                          (REG(ext2, 0) << 6) |
2037                          (REG(ext1, 0) << 9));
2038     gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2039     tcg_temp_free(regs);
2040 
2041     /* Note that cas2l also assigned to env->cc_op.  */
2042     s->cc_op = CC_OP_CMPL;
2043     s->cc_op_synced = 1;
2044 }
2045 
2046 DISAS_INSN(byterev)
2047 {
2048     TCGv reg;
2049 
2050     reg = DREG(insn, 0);
2051     tcg_gen_bswap32_i32(reg, reg);
2052 }
2053 
2054 DISAS_INSN(move)
2055 {
2056     TCGv src;
2057     TCGv dest;
2058     int op;
2059     int opsize;
2060 
2061     switch (insn >> 12) {
2062     case 1: /* move.b */
2063         opsize = OS_BYTE;
2064         break;
2065     case 2: /* move.l */
2066         opsize = OS_LONG;
2067         break;
2068     case 3: /* move.w */
2069         opsize = OS_WORD;
2070         break;
2071     default:
2072         abort();
2073     }
2074     SRC_EA(env, src, opsize, 1, NULL);
2075     op = (insn >> 6) & 7;
2076     if (op == 1) {
2077         /* movea */
2078         /* The value will already have been sign extended.  */
2079         dest = AREG(insn, 9);
2080         tcg_gen_mov_i32(dest, src);
2081     } else {
2082         /* normal move */
2083         uint16_t dest_ea;
2084         dest_ea = ((insn >> 9) & 7) | (op << 3);
2085         DEST_EA(env, dest_ea, opsize, src, NULL);
2086         /* This will be correct because loads sign extend.  */
2087         gen_logic_cc(s, src, opsize);
2088     }
2089 }
2090 
2091 DISAS_INSN(negx)
2092 {
2093     TCGv z;
2094     TCGv src;
2095     TCGv addr;
2096     int opsize;
2097 
2098     opsize = insn_opsize(insn);
2099     SRC_EA(env, src, opsize, 1, &addr);
2100 
2101     gen_flush_flags(s); /* compute old Z */
2102 
2103     /* Perform substract with borrow.
2104      * (X, N) =  -(src + X);
2105      */
2106 
2107     z = tcg_const_i32(0);
2108     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2109     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2110     tcg_temp_free(z);
2111     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2112 
2113     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2114 
2115     /* Compute signed-overflow for negation.  The normal formula for
2116      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2117      * this simplies to res & src.
2118      */
2119 
2120     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2121 
2122     /* Copy the rest of the results into place.  */
2123     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2124     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2125 
2126     set_cc_op(s, CC_OP_FLAGS);
2127 
2128     /* result is in QREG_CC_N */
2129 
2130     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2131 }
2132 
2133 DISAS_INSN(lea)
2134 {
2135     TCGv reg;
2136     TCGv tmp;
2137 
2138     reg = AREG(insn, 9);
2139     tmp = gen_lea(env, s, insn, OS_LONG);
2140     if (IS_NULL_QREG(tmp)) {
2141         gen_addr_fault(s);
2142         return;
2143     }
2144     tcg_gen_mov_i32(reg, tmp);
2145 }
2146 
2147 DISAS_INSN(clr)
2148 {
2149     int opsize;
2150     TCGv zero;
2151 
2152     zero = tcg_const_i32(0);
2153 
2154     opsize = insn_opsize(insn);
2155     DEST_EA(env, insn, opsize, zero, NULL);
2156     gen_logic_cc(s, zero, opsize);
2157     tcg_temp_free(zero);
2158 }
2159 
2160 static TCGv gen_get_ccr(DisasContext *s)
2161 {
2162     TCGv dest;
2163 
2164     gen_flush_flags(s);
2165     update_cc_op(s);
2166     dest = tcg_temp_new();
2167     gen_helper_get_ccr(dest, cpu_env);
2168     return dest;
2169 }
2170 
2171 DISAS_INSN(move_from_ccr)
2172 {
2173     TCGv ccr;
2174 
2175     ccr = gen_get_ccr(s);
2176     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2177 }
2178 
2179 DISAS_INSN(neg)
2180 {
2181     TCGv src1;
2182     TCGv dest;
2183     TCGv addr;
2184     int opsize;
2185 
2186     opsize = insn_opsize(insn);
2187     SRC_EA(env, src1, opsize, 1, &addr);
2188     dest = tcg_temp_new();
2189     tcg_gen_neg_i32(dest, src1);
2190     set_cc_op(s, CC_OP_SUBB + opsize);
2191     gen_update_cc_add(dest, src1, opsize);
2192     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2193     DEST_EA(env, insn, opsize, dest, &addr);
2194     tcg_temp_free(dest);
2195 }
2196 
2197 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2198 {
2199     if (ccr_only) {
2200         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2201         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2202         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2203         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2204         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2205     } else {
2206         gen_helper_set_sr(cpu_env, tcg_const_i32(val));
2207     }
2208     set_cc_op(s, CC_OP_FLAGS);
2209 }
2210 
2211 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2212                        int ccr_only)
2213 {
2214     if ((insn & 0x38) == 0) {
2215         if (ccr_only) {
2216             gen_helper_set_ccr(cpu_env, DREG(insn, 0));
2217         } else {
2218             gen_helper_set_sr(cpu_env, DREG(insn, 0));
2219         }
2220         set_cc_op(s, CC_OP_FLAGS);
2221     } else if ((insn & 0x3f) == 0x3c) {
2222         uint16_t val;
2223         val = read_im16(env, s);
2224         gen_set_sr_im(s, val, ccr_only);
2225     } else {
2226         disas_undef(env, s, insn);
2227     }
2228 }
2229 
2230 
2231 DISAS_INSN(move_to_ccr)
2232 {
2233     gen_set_sr(env, s, insn, 1);
2234 }
2235 
2236 DISAS_INSN(not)
2237 {
2238     TCGv src1;
2239     TCGv dest;
2240     TCGv addr;
2241     int opsize;
2242 
2243     opsize = insn_opsize(insn);
2244     SRC_EA(env, src1, opsize, 1, &addr);
2245     dest = tcg_temp_new();
2246     tcg_gen_not_i32(dest, src1);
2247     DEST_EA(env, insn, opsize, dest, &addr);
2248     gen_logic_cc(s, dest, opsize);
2249 }
2250 
2251 DISAS_INSN(swap)
2252 {
2253     TCGv src1;
2254     TCGv src2;
2255     TCGv reg;
2256 
2257     src1 = tcg_temp_new();
2258     src2 = tcg_temp_new();
2259     reg = DREG(insn, 0);
2260     tcg_gen_shli_i32(src1, reg, 16);
2261     tcg_gen_shri_i32(src2, reg, 16);
2262     tcg_gen_or_i32(reg, src1, src2);
2263     tcg_temp_free(src2);
2264     tcg_temp_free(src1);
2265     gen_logic_cc(s, reg, OS_LONG);
2266 }
2267 
2268 DISAS_INSN(bkpt)
2269 {
2270     gen_exception(s, s->pc - 2, EXCP_DEBUG);
2271 }
2272 
2273 DISAS_INSN(pea)
2274 {
2275     TCGv tmp;
2276 
2277     tmp = gen_lea(env, s, insn, OS_LONG);
2278     if (IS_NULL_QREG(tmp)) {
2279         gen_addr_fault(s);
2280         return;
2281     }
2282     gen_push(s, tmp);
2283 }
2284 
2285 DISAS_INSN(ext)
2286 {
2287     int op;
2288     TCGv reg;
2289     TCGv tmp;
2290 
2291     reg = DREG(insn, 0);
2292     op = (insn >> 6) & 7;
2293     tmp = tcg_temp_new();
2294     if (op == 3)
2295         tcg_gen_ext16s_i32(tmp, reg);
2296     else
2297         tcg_gen_ext8s_i32(tmp, reg);
2298     if (op == 2)
2299         gen_partset_reg(OS_WORD, reg, tmp);
2300     else
2301         tcg_gen_mov_i32(reg, tmp);
2302     gen_logic_cc(s, tmp, OS_LONG);
2303     tcg_temp_free(tmp);
2304 }
2305 
2306 DISAS_INSN(tst)
2307 {
2308     int opsize;
2309     TCGv tmp;
2310 
2311     opsize = insn_opsize(insn);
2312     SRC_EA(env, tmp, opsize, 1, NULL);
2313     gen_logic_cc(s, tmp, opsize);
2314 }
2315 
2316 DISAS_INSN(pulse)
2317 {
2318   /* Implemented as a NOP.  */
2319 }
2320 
2321 DISAS_INSN(illegal)
2322 {
2323     gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2324 }
2325 
2326 /* ??? This should be atomic.  */
2327 DISAS_INSN(tas)
2328 {
2329     TCGv dest;
2330     TCGv src1;
2331     TCGv addr;
2332 
2333     dest = tcg_temp_new();
2334     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2335     gen_logic_cc(s, src1, OS_BYTE);
2336     tcg_gen_ori_i32(dest, src1, 0x80);
2337     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2338     tcg_temp_free(dest);
2339 }
2340 
2341 DISAS_INSN(mull)
2342 {
2343     uint16_t ext;
2344     TCGv src1;
2345     int sign;
2346 
2347     ext = read_im16(env, s);
2348 
2349     sign = ext & 0x800;
2350 
2351     if (ext & 0x400) {
2352         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2353             gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
2354             return;
2355         }
2356 
2357         SRC_EA(env, src1, OS_LONG, 0, NULL);
2358 
2359         if (sign) {
2360             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2361         } else {
2362             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2363         }
2364         /* if Dl == Dh, 68040 returns low word */
2365         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2366         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2367         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2368 
2369         tcg_gen_movi_i32(QREG_CC_V, 0);
2370         tcg_gen_movi_i32(QREG_CC_C, 0);
2371 
2372         set_cc_op(s, CC_OP_FLAGS);
2373         return;
2374     }
2375     SRC_EA(env, src1, OS_LONG, 0, NULL);
2376     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2377         tcg_gen_movi_i32(QREG_CC_C, 0);
2378         if (sign) {
2379             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2380             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2381             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2382             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2383         } else {
2384             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2385             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2386             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2387         }
2388         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2389         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2390 
2391         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2392 
2393         set_cc_op(s, CC_OP_FLAGS);
2394     } else {
2395         /* The upper 32 bits of the product are discarded, so
2396            muls.l and mulu.l are functionally equivalent.  */
2397         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2398         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2399     }
2400 }
2401 
2402 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2403 {
2404     TCGv reg;
2405     TCGv tmp;
2406 
2407     reg = AREG(insn, 0);
2408     tmp = tcg_temp_new();
2409     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2410     gen_store(s, OS_LONG, tmp, reg);
2411     if ((insn & 7) != 7) {
2412         tcg_gen_mov_i32(reg, tmp);
2413     }
2414     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2415     tcg_temp_free(tmp);
2416 }
2417 
2418 DISAS_INSN(link)
2419 {
2420     int16_t offset;
2421 
2422     offset = read_im16(env, s);
2423     gen_link(s, insn, offset);
2424 }
2425 
2426 DISAS_INSN(linkl)
2427 {
2428     int32_t offset;
2429 
2430     offset = read_im32(env, s);
2431     gen_link(s, insn, offset);
2432 }
2433 
2434 DISAS_INSN(unlk)
2435 {
2436     TCGv src;
2437     TCGv reg;
2438     TCGv tmp;
2439 
2440     src = tcg_temp_new();
2441     reg = AREG(insn, 0);
2442     tcg_gen_mov_i32(src, reg);
2443     tmp = gen_load(s, OS_LONG, src, 0);
2444     tcg_gen_mov_i32(reg, tmp);
2445     tcg_gen_addi_i32(QREG_SP, src, 4);
2446     tcg_temp_free(src);
2447 }
2448 
2449 DISAS_INSN(nop)
2450 {
2451 }
2452 
2453 DISAS_INSN(rts)
2454 {
2455     TCGv tmp;
2456 
2457     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2458     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2459     gen_jmp(s, tmp);
2460 }
2461 
2462 DISAS_INSN(jump)
2463 {
2464     TCGv tmp;
2465 
2466     /* Load the target address first to ensure correct exception
2467        behavior.  */
2468     tmp = gen_lea(env, s, insn, OS_LONG);
2469     if (IS_NULL_QREG(tmp)) {
2470         gen_addr_fault(s);
2471         return;
2472     }
2473     if ((insn & 0x40) == 0) {
2474         /* jsr */
2475         gen_push(s, tcg_const_i32(s->pc));
2476     }
2477     gen_jmp(s, tmp);
2478 }
2479 
2480 DISAS_INSN(addsubq)
2481 {
2482     TCGv src;
2483     TCGv dest;
2484     TCGv val;
2485     int imm;
2486     TCGv addr;
2487     int opsize;
2488 
2489     if ((insn & 070) == 010) {
2490         /* Operation on address register is always long.  */
2491         opsize = OS_LONG;
2492     } else {
2493         opsize = insn_opsize(insn);
2494     }
2495     SRC_EA(env, src, opsize, 1, &addr);
2496     imm = (insn >> 9) & 7;
2497     if (imm == 0) {
2498         imm = 8;
2499     }
2500     val = tcg_const_i32(imm);
2501     dest = tcg_temp_new();
2502     tcg_gen_mov_i32(dest, src);
2503     if ((insn & 0x38) == 0x08) {
2504         /* Don't update condition codes if the destination is an
2505            address register.  */
2506         if (insn & 0x0100) {
2507             tcg_gen_sub_i32(dest, dest, val);
2508         } else {
2509             tcg_gen_add_i32(dest, dest, val);
2510         }
2511     } else {
2512         if (insn & 0x0100) {
2513             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2514             tcg_gen_sub_i32(dest, dest, val);
2515             set_cc_op(s, CC_OP_SUBB + opsize);
2516         } else {
2517             tcg_gen_add_i32(dest, dest, val);
2518             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2519             set_cc_op(s, CC_OP_ADDB + opsize);
2520         }
2521         gen_update_cc_add(dest, val, opsize);
2522     }
2523     tcg_temp_free(val);
2524     DEST_EA(env, insn, opsize, dest, &addr);
2525     tcg_temp_free(dest);
2526 }
2527 
2528 DISAS_INSN(tpf)
2529 {
2530     switch (insn & 7) {
2531     case 2: /* One extension word.  */
2532         s->pc += 2;
2533         break;
2534     case 3: /* Two extension words.  */
2535         s->pc += 4;
2536         break;
2537     case 4: /* No extension words.  */
2538         break;
2539     default:
2540         disas_undef(env, s, insn);
2541     }
2542 }
2543 
2544 DISAS_INSN(branch)
2545 {
2546     int32_t offset;
2547     uint32_t base;
2548     int op;
2549     TCGLabel *l1;
2550 
2551     base = s->pc;
2552     op = (insn >> 8) & 0xf;
2553     offset = (int8_t)insn;
2554     if (offset == 0) {
2555         offset = (int16_t)read_im16(env, s);
2556     } else if (offset == -1) {
2557         offset = read_im32(env, s);
2558     }
2559     if (op == 1) {
2560         /* bsr */
2561         gen_push(s, tcg_const_i32(s->pc));
2562     }
2563     if (op > 1) {
2564         /* Bcc */
2565         l1 = gen_new_label();
2566         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2567         gen_jmp_tb(s, 1, base + offset);
2568         gen_set_label(l1);
2569         gen_jmp_tb(s, 0, s->pc);
2570     } else {
2571         /* Unconditional branch.  */
2572         gen_jmp_tb(s, 0, base + offset);
2573     }
2574 }
2575 
2576 DISAS_INSN(moveq)
2577 {
2578     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2579     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2580 }
2581 
2582 DISAS_INSN(mvzs)
2583 {
2584     int opsize;
2585     TCGv src;
2586     TCGv reg;
2587 
2588     if (insn & 0x40)
2589         opsize = OS_WORD;
2590     else
2591         opsize = OS_BYTE;
2592     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2593     reg = DREG(insn, 9);
2594     tcg_gen_mov_i32(reg, src);
2595     gen_logic_cc(s, src, opsize);
2596 }
2597 
2598 DISAS_INSN(or)
2599 {
2600     TCGv reg;
2601     TCGv dest;
2602     TCGv src;
2603     TCGv addr;
2604     int opsize;
2605 
2606     opsize = insn_opsize(insn);
2607     reg = gen_extend(DREG(insn, 9), opsize, 0);
2608     dest = tcg_temp_new();
2609     if (insn & 0x100) {
2610         SRC_EA(env, src, opsize, 0, &addr);
2611         tcg_gen_or_i32(dest, src, reg);
2612         DEST_EA(env, insn, opsize, dest, &addr);
2613     } else {
2614         SRC_EA(env, src, opsize, 0, NULL);
2615         tcg_gen_or_i32(dest, src, reg);
2616         gen_partset_reg(opsize, DREG(insn, 9), dest);
2617     }
2618     gen_logic_cc(s, dest, opsize);
2619     tcg_temp_free(dest);
2620 }
2621 
2622 DISAS_INSN(suba)
2623 {
2624     TCGv src;
2625     TCGv reg;
2626 
2627     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2628     reg = AREG(insn, 9);
2629     tcg_gen_sub_i32(reg, reg, src);
2630 }
2631 
2632 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2633 {
2634     TCGv tmp;
2635 
2636     gen_flush_flags(s); /* compute old Z */
2637 
2638     /* Perform substract with borrow.
2639      * (X, N) = dest - (src + X);
2640      */
2641 
2642     tmp = tcg_const_i32(0);
2643     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2644     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2645     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2646     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2647 
2648     /* Compute signed-overflow for substract.  */
2649 
2650     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2651     tcg_gen_xor_i32(tmp, dest, src);
2652     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2653     tcg_temp_free(tmp);
2654 
2655     /* Copy the rest of the results into place.  */
2656     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2657     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2658 
2659     set_cc_op(s, CC_OP_FLAGS);
2660 
2661     /* result is in QREG_CC_N */
2662 }
2663 
2664 DISAS_INSN(subx_reg)
2665 {
2666     TCGv dest;
2667     TCGv src;
2668     int opsize;
2669 
2670     opsize = insn_opsize(insn);
2671 
2672     src = gen_extend(DREG(insn, 0), opsize, 1);
2673     dest = gen_extend(DREG(insn, 9), opsize, 1);
2674 
2675     gen_subx(s, src, dest, opsize);
2676 
2677     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2678 }
2679 
2680 DISAS_INSN(subx_mem)
2681 {
2682     TCGv src;
2683     TCGv addr_src;
2684     TCGv dest;
2685     TCGv addr_dest;
2686     int opsize;
2687 
2688     opsize = insn_opsize(insn);
2689 
2690     addr_src = AREG(insn, 0);
2691     tcg_gen_subi_i32(addr_src, addr_src, opsize);
2692     src = gen_load(s, opsize, addr_src, 1);
2693 
2694     addr_dest = AREG(insn, 9);
2695     tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
2696     dest = gen_load(s, opsize, addr_dest, 1);
2697 
2698     gen_subx(s, src, dest, opsize);
2699 
2700     gen_store(s, opsize, addr_dest, QREG_CC_N);
2701 }
2702 
2703 DISAS_INSN(mov3q)
2704 {
2705     TCGv src;
2706     int val;
2707 
2708     val = (insn >> 9) & 7;
2709     if (val == 0)
2710         val = -1;
2711     src = tcg_const_i32(val);
2712     gen_logic_cc(s, src, OS_LONG);
2713     DEST_EA(env, insn, OS_LONG, src, NULL);
2714     tcg_temp_free(src);
2715 }
2716 
2717 DISAS_INSN(cmp)
2718 {
2719     TCGv src;
2720     TCGv reg;
2721     int opsize;
2722 
2723     opsize = insn_opsize(insn);
2724     SRC_EA(env, src, opsize, 1, NULL);
2725     reg = gen_extend(DREG(insn, 9), opsize, 1);
2726     gen_update_cc_cmp(s, reg, src, opsize);
2727 }
2728 
2729 DISAS_INSN(cmpa)
2730 {
2731     int opsize;
2732     TCGv src;
2733     TCGv reg;
2734 
2735     if (insn & 0x100) {
2736         opsize = OS_LONG;
2737     } else {
2738         opsize = OS_WORD;
2739     }
2740     SRC_EA(env, src, opsize, 1, NULL);
2741     reg = AREG(insn, 9);
2742     gen_update_cc_cmp(s, reg, src, OS_LONG);
2743 }
2744 
2745 DISAS_INSN(cmpm)
2746 {
2747     int opsize = insn_opsize(insn);
2748     TCGv src, dst;
2749 
2750     /* Post-increment load (mode 3) from Ay.  */
2751     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
2752                       NULL_QREG, NULL, EA_LOADS);
2753     /* Post-increment load (mode 3) from Ax.  */
2754     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
2755                       NULL_QREG, NULL, EA_LOADS);
2756 
2757     gen_update_cc_cmp(s, dst, src, opsize);
2758 }
2759 
2760 DISAS_INSN(eor)
2761 {
2762     TCGv src;
2763     TCGv dest;
2764     TCGv addr;
2765     int opsize;
2766 
2767     opsize = insn_opsize(insn);
2768 
2769     SRC_EA(env, src, opsize, 0, &addr);
2770     dest = tcg_temp_new();
2771     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
2772     gen_logic_cc(s, dest, opsize);
2773     DEST_EA(env, insn, opsize, dest, &addr);
2774     tcg_temp_free(dest);
2775 }
2776 
2777 static void do_exg(TCGv reg1, TCGv reg2)
2778 {
2779     TCGv temp = tcg_temp_new();
2780     tcg_gen_mov_i32(temp, reg1);
2781     tcg_gen_mov_i32(reg1, reg2);
2782     tcg_gen_mov_i32(reg2, temp);
2783     tcg_temp_free(temp);
2784 }
2785 
2786 DISAS_INSN(exg_dd)
2787 {
2788     /* exchange Dx and Dy */
2789     do_exg(DREG(insn, 9), DREG(insn, 0));
2790 }
2791 
2792 DISAS_INSN(exg_aa)
2793 {
2794     /* exchange Ax and Ay */
2795     do_exg(AREG(insn, 9), AREG(insn, 0));
2796 }
2797 
2798 DISAS_INSN(exg_da)
2799 {
2800     /* exchange Dx and Ay */
2801     do_exg(DREG(insn, 9), AREG(insn, 0));
2802 }
2803 
2804 DISAS_INSN(and)
2805 {
2806     TCGv src;
2807     TCGv reg;
2808     TCGv dest;
2809     TCGv addr;
2810     int opsize;
2811 
2812     dest = tcg_temp_new();
2813 
2814     opsize = insn_opsize(insn);
2815     reg = DREG(insn, 9);
2816     if (insn & 0x100) {
2817         SRC_EA(env, src, opsize, 0, &addr);
2818         tcg_gen_and_i32(dest, src, reg);
2819         DEST_EA(env, insn, opsize, dest, &addr);
2820     } else {
2821         SRC_EA(env, src, opsize, 0, NULL);
2822         tcg_gen_and_i32(dest, src, reg);
2823         gen_partset_reg(opsize, reg, dest);
2824     }
2825     gen_logic_cc(s, dest, opsize);
2826     tcg_temp_free(dest);
2827 }
2828 
2829 DISAS_INSN(adda)
2830 {
2831     TCGv src;
2832     TCGv reg;
2833 
2834     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2835     reg = AREG(insn, 9);
2836     tcg_gen_add_i32(reg, reg, src);
2837 }
2838 
2839 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2840 {
2841     TCGv tmp;
2842 
2843     gen_flush_flags(s); /* compute old Z */
2844 
2845     /* Perform addition with carry.
2846      * (X, N) = src + dest + X;
2847      */
2848 
2849     tmp = tcg_const_i32(0);
2850     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
2851     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
2852     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2853 
2854     /* Compute signed-overflow for addition.  */
2855 
2856     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
2857     tcg_gen_xor_i32(tmp, dest, src);
2858     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
2859     tcg_temp_free(tmp);
2860 
2861     /* Copy the rest of the results into place.  */
2862     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2863     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2864 
2865     set_cc_op(s, CC_OP_FLAGS);
2866 
2867     /* result is in QREG_CC_N */
2868 }
2869 
2870 DISAS_INSN(addx_reg)
2871 {
2872     TCGv dest;
2873     TCGv src;
2874     int opsize;
2875 
2876     opsize = insn_opsize(insn);
2877 
2878     dest = gen_extend(DREG(insn, 9), opsize, 1);
2879     src = gen_extend(DREG(insn, 0), opsize, 1);
2880 
2881     gen_addx(s, src, dest, opsize);
2882 
2883     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2884 }
2885 
2886 DISAS_INSN(addx_mem)
2887 {
2888     TCGv src;
2889     TCGv addr_src;
2890     TCGv dest;
2891     TCGv addr_dest;
2892     int opsize;
2893 
2894     opsize = insn_opsize(insn);
2895 
2896     addr_src = AREG(insn, 0);
2897     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
2898     src = gen_load(s, opsize, addr_src, 1);
2899 
2900     addr_dest = AREG(insn, 9);
2901     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
2902     dest = gen_load(s, opsize, addr_dest, 1);
2903 
2904     gen_addx(s, src, dest, opsize);
2905 
2906     gen_store(s, opsize, addr_dest, QREG_CC_N);
2907 }
2908 
2909 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
2910 {
2911     int count = (insn >> 9) & 7;
2912     int logical = insn & 8;
2913     int left = insn & 0x100;
2914     int bits = opsize_bytes(opsize) * 8;
2915     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
2916 
2917     if (count == 0) {
2918         count = 8;
2919     }
2920 
2921     tcg_gen_movi_i32(QREG_CC_V, 0);
2922     if (left) {
2923         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
2924         tcg_gen_shli_i32(QREG_CC_N, reg, count);
2925 
2926         /* Note that ColdFire always clears V (done above),
2927            while M68000 sets if the most significant bit is changed at
2928            any time during the shift operation */
2929         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
2930             /* if shift count >= bits, V is (reg != 0) */
2931             if (count >= bits) {
2932                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
2933             } else {
2934                 TCGv t0 = tcg_temp_new();
2935                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
2936                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
2937                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
2938                 tcg_temp_free(t0);
2939             }
2940             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2941         }
2942     } else {
2943         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
2944         if (logical) {
2945             tcg_gen_shri_i32(QREG_CC_N, reg, count);
2946         } else {
2947             tcg_gen_sari_i32(QREG_CC_N, reg, count);
2948         }
2949     }
2950 
2951     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2952     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
2953     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2954     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
2955 
2956     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
2957     set_cc_op(s, CC_OP_FLAGS);
2958 }
2959 
2960 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
2961 {
2962     int logical = insn & 8;
2963     int left = insn & 0x100;
2964     int bits = opsize_bytes(opsize) * 8;
2965     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
2966     TCGv s32;
2967     TCGv_i64 t64, s64;
2968 
2969     t64 = tcg_temp_new_i64();
2970     s64 = tcg_temp_new_i64();
2971     s32 = tcg_temp_new();
2972 
2973     /* Note that m68k truncates the shift count modulo 64, not 32.
2974        In addition, a 64-bit shift makes it easy to find "the last
2975        bit shifted out", for the carry flag.  */
2976     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
2977     tcg_gen_extu_i32_i64(s64, s32);
2978     tcg_gen_extu_i32_i64(t64, reg);
2979 
2980     /* Optimistically set V=0.  Also used as a zero source below.  */
2981     tcg_gen_movi_i32(QREG_CC_V, 0);
2982     if (left) {
2983         tcg_gen_shl_i64(t64, t64, s64);
2984 
2985         if (opsize == OS_LONG) {
2986             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
2987             /* Note that C=0 if shift count is 0, and we get that for free.  */
2988         } else {
2989             TCGv zero = tcg_const_i32(0);
2990             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
2991             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
2992             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
2993                                 s32, zero, zero, QREG_CC_C);
2994             tcg_temp_free(zero);
2995         }
2996         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
2997 
2998         /* X = C, but only if the shift count was non-zero.  */
2999         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3000                             QREG_CC_C, QREG_CC_X);
3001 
3002         /* M68000 sets V if the most significant bit is changed at
3003          * any time during the shift operation.  Do this via creating
3004          * an extension of the sign bit, comparing, and discarding
3005          * the bits below the sign bit.  I.e.
3006          *     int64_t s = (intN_t)reg;
3007          *     int64_t t = (int64_t)(intN_t)reg << count;
3008          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3009          */
3010         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3011             TCGv_i64 tt = tcg_const_i64(32);
3012             /* if shift is greater than 32, use 32 */
3013             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3014             tcg_temp_free_i64(tt);
3015             /* Sign extend the input to 64 bits; re-do the shift.  */
3016             tcg_gen_ext_i32_i64(t64, reg);
3017             tcg_gen_shl_i64(s64, t64, s64);
3018             /* Clear all bits that are unchanged.  */
3019             tcg_gen_xor_i64(t64, t64, s64);
3020             /* Ignore the bits below the sign bit.  */
3021             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3022             /* If any bits remain set, we have overflow.  */
3023             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3024             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3025             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3026         }
3027     } else {
3028         tcg_gen_shli_i64(t64, t64, 32);
3029         if (logical) {
3030             tcg_gen_shr_i64(t64, t64, s64);
3031         } else {
3032             tcg_gen_sar_i64(t64, t64, s64);
3033         }
3034         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3035 
3036         /* Note that C=0 if shift count is 0, and we get that for free.  */
3037         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3038 
3039         /* X = C, but only if the shift count was non-zero.  */
3040         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3041                             QREG_CC_C, QREG_CC_X);
3042     }
3043     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3044     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3045 
3046     tcg_temp_free(s32);
3047     tcg_temp_free_i64(s64);
3048     tcg_temp_free_i64(t64);
3049 
3050     /* Write back the result.  */
3051     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3052     set_cc_op(s, CC_OP_FLAGS);
3053 }
3054 
3055 DISAS_INSN(shift8_im)
3056 {
3057     shift_im(s, insn, OS_BYTE);
3058 }
3059 
3060 DISAS_INSN(shift16_im)
3061 {
3062     shift_im(s, insn, OS_WORD);
3063 }
3064 
3065 DISAS_INSN(shift_im)
3066 {
3067     shift_im(s, insn, OS_LONG);
3068 }
3069 
3070 DISAS_INSN(shift8_reg)
3071 {
3072     shift_reg(s, insn, OS_BYTE);
3073 }
3074 
3075 DISAS_INSN(shift16_reg)
3076 {
3077     shift_reg(s, insn, OS_WORD);
3078 }
3079 
3080 DISAS_INSN(shift_reg)
3081 {
3082     shift_reg(s, insn, OS_LONG);
3083 }
3084 
3085 DISAS_INSN(shift_mem)
3086 {
3087     int logical = insn & 8;
3088     int left = insn & 0x100;
3089     TCGv src;
3090     TCGv addr;
3091 
3092     SRC_EA(env, src, OS_WORD, !logical, &addr);
3093     tcg_gen_movi_i32(QREG_CC_V, 0);
3094     if (left) {
3095         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3096         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3097 
3098         /* Note that ColdFire always clears V,
3099            while M68000 sets if the most significant bit is changed at
3100            any time during the shift operation */
3101         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3102             src = gen_extend(src, OS_WORD, 1);
3103             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3104         }
3105     } else {
3106         tcg_gen_mov_i32(QREG_CC_C, src);
3107         if (logical) {
3108             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3109         } else {
3110             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3111         }
3112     }
3113 
3114     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3115     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3116     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3117     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3118 
3119     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3120     set_cc_op(s, CC_OP_FLAGS);
3121 }
3122 
3123 static void rotate(TCGv reg, TCGv shift, int left, int size)
3124 {
3125     switch (size) {
3126     case 8:
3127         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3128         tcg_gen_ext8u_i32(reg, reg);
3129         tcg_gen_muli_i32(reg, reg, 0x01010101);
3130         goto do_long;
3131     case 16:
3132         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3133         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3134         goto do_long;
3135     do_long:
3136     default:
3137         if (left) {
3138             tcg_gen_rotl_i32(reg, reg, shift);
3139         } else {
3140             tcg_gen_rotr_i32(reg, reg, shift);
3141         }
3142     }
3143 
3144     /* compute flags */
3145 
3146     switch (size) {
3147     case 8:
3148         tcg_gen_ext8s_i32(reg, reg);
3149         break;
3150     case 16:
3151         tcg_gen_ext16s_i32(reg, reg);
3152         break;
3153     default:
3154         break;
3155     }
3156 
3157     /* QREG_CC_X is not affected */
3158 
3159     tcg_gen_mov_i32(QREG_CC_N, reg);
3160     tcg_gen_mov_i32(QREG_CC_Z, reg);
3161 
3162     if (left) {
3163         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3164     } else {
3165         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3166     }
3167 
3168     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3169 }
3170 
3171 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3172 {
3173     switch (size) {
3174     case 8:
3175         tcg_gen_ext8s_i32(reg, reg);
3176         break;
3177     case 16:
3178         tcg_gen_ext16s_i32(reg, reg);
3179         break;
3180     default:
3181         break;
3182     }
3183     tcg_gen_mov_i32(QREG_CC_N, reg);
3184     tcg_gen_mov_i32(QREG_CC_Z, reg);
3185     tcg_gen_mov_i32(QREG_CC_X, X);
3186     tcg_gen_mov_i32(QREG_CC_C, X);
3187     tcg_gen_movi_i32(QREG_CC_V, 0);
3188 }
3189 
3190 /* Result of rotate_x() is valid if 0 <= shift <= size */
3191 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3192 {
3193     TCGv X, shl, shr, shx, sz, zero;
3194 
3195     sz = tcg_const_i32(size);
3196 
3197     shr = tcg_temp_new();
3198     shl = tcg_temp_new();
3199     shx = tcg_temp_new();
3200     if (left) {
3201         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3202         tcg_gen_movi_i32(shr, size + 1);
3203         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3204         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3205         /* shx = shx < 0 ? size : shx; */
3206         zero = tcg_const_i32(0);
3207         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3208         tcg_temp_free(zero);
3209     } else {
3210         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3211         tcg_gen_movi_i32(shl, size + 1);
3212         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3213         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3214     }
3215 
3216     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3217 
3218     tcg_gen_shl_i32(shl, reg, shl);
3219     tcg_gen_shr_i32(shr, reg, shr);
3220     tcg_gen_or_i32(reg, shl, shr);
3221     tcg_temp_free(shl);
3222     tcg_temp_free(shr);
3223     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3224     tcg_gen_or_i32(reg, reg, shx);
3225     tcg_temp_free(shx);
3226 
3227     /* X = (reg >> size) & 1 */
3228 
3229     X = tcg_temp_new();
3230     tcg_gen_shr_i32(X, reg, sz);
3231     tcg_gen_andi_i32(X, X, 1);
3232     tcg_temp_free(sz);
3233 
3234     return X;
3235 }
3236 
3237 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3238 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3239 {
3240     TCGv_i64 t0, shift64;
3241     TCGv X, lo, hi, zero;
3242 
3243     shift64 = tcg_temp_new_i64();
3244     tcg_gen_extu_i32_i64(shift64, shift);
3245 
3246     t0 = tcg_temp_new_i64();
3247 
3248     X = tcg_temp_new();
3249     lo = tcg_temp_new();
3250     hi = tcg_temp_new();
3251 
3252     if (left) {
3253         /* create [reg:X:..] */
3254 
3255         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3256         tcg_gen_concat_i32_i64(t0, lo, reg);
3257 
3258         /* rotate */
3259 
3260         tcg_gen_rotl_i64(t0, t0, shift64);
3261         tcg_temp_free_i64(shift64);
3262 
3263         /* result is [reg:..:reg:X] */
3264 
3265         tcg_gen_extr_i64_i32(lo, hi, t0);
3266         tcg_gen_andi_i32(X, lo, 1);
3267 
3268         tcg_gen_shri_i32(lo, lo, 1);
3269     } else {
3270         /* create [..:X:reg] */
3271 
3272         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3273 
3274         tcg_gen_rotr_i64(t0, t0, shift64);
3275         tcg_temp_free_i64(shift64);
3276 
3277         /* result is value: [X:reg:..:reg] */
3278 
3279         tcg_gen_extr_i64_i32(lo, hi, t0);
3280 
3281         /* extract X */
3282 
3283         tcg_gen_shri_i32(X, hi, 31);
3284 
3285         /* extract result */
3286 
3287         tcg_gen_shli_i32(hi, hi, 1);
3288     }
3289     tcg_temp_free_i64(t0);
3290     tcg_gen_or_i32(lo, lo, hi);
3291     tcg_temp_free(hi);
3292 
3293     /* if shift == 0, register and X are not affected */
3294 
3295     zero = tcg_const_i32(0);
3296     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3297     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3298     tcg_temp_free(zero);
3299     tcg_temp_free(lo);
3300 
3301     return X;
3302 }
3303 
3304 DISAS_INSN(rotate_im)
3305 {
3306     TCGv shift;
3307     int tmp;
3308     int left = (insn & 0x100);
3309 
3310     tmp = (insn >> 9) & 7;
3311     if (tmp == 0) {
3312         tmp = 8;
3313     }
3314 
3315     shift = tcg_const_i32(tmp);
3316     if (insn & 8) {
3317         rotate(DREG(insn, 0), shift, left, 32);
3318     } else {
3319         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3320         rotate_x_flags(DREG(insn, 0), X, 32);
3321         tcg_temp_free(X);
3322     }
3323     tcg_temp_free(shift);
3324 
3325     set_cc_op(s, CC_OP_FLAGS);
3326 }
3327 
3328 DISAS_INSN(rotate8_im)
3329 {
3330     int left = (insn & 0x100);
3331     TCGv reg;
3332     TCGv shift;
3333     int tmp;
3334 
3335     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3336 
3337     tmp = (insn >> 9) & 7;
3338     if (tmp == 0) {
3339         tmp = 8;
3340     }
3341 
3342     shift = tcg_const_i32(tmp);
3343     if (insn & 8) {
3344         rotate(reg, shift, left, 8);
3345     } else {
3346         TCGv X = rotate_x(reg, shift, left, 8);
3347         rotate_x_flags(reg, X, 8);
3348         tcg_temp_free(X);
3349     }
3350     tcg_temp_free(shift);
3351     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3352     set_cc_op(s, CC_OP_FLAGS);
3353 }
3354 
3355 DISAS_INSN(rotate16_im)
3356 {
3357     int left = (insn & 0x100);
3358     TCGv reg;
3359     TCGv shift;
3360     int tmp;
3361 
3362     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3363     tmp = (insn >> 9) & 7;
3364     if (tmp == 0) {
3365         tmp = 8;
3366     }
3367 
3368     shift = tcg_const_i32(tmp);
3369     if (insn & 8) {
3370         rotate(reg, shift, left, 16);
3371     } else {
3372         TCGv X = rotate_x(reg, shift, left, 16);
3373         rotate_x_flags(reg, X, 16);
3374         tcg_temp_free(X);
3375     }
3376     tcg_temp_free(shift);
3377     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3378     set_cc_op(s, CC_OP_FLAGS);
3379 }
3380 
3381 DISAS_INSN(rotate_reg)
3382 {
3383     TCGv reg;
3384     TCGv src;
3385     TCGv t0, t1;
3386     int left = (insn & 0x100);
3387 
3388     reg = DREG(insn, 0);
3389     src = DREG(insn, 9);
3390     /* shift in [0..63] */
3391     t0 = tcg_temp_new();
3392     tcg_gen_andi_i32(t0, src, 63);
3393     t1 = tcg_temp_new_i32();
3394     if (insn & 8) {
3395         tcg_gen_andi_i32(t1, src, 31);
3396         rotate(reg, t1, left, 32);
3397         /* if shift == 0, clear C */
3398         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3399                             t0, QREG_CC_V /* 0 */,
3400                             QREG_CC_V /* 0 */, QREG_CC_C);
3401     } else {
3402         TCGv X;
3403         /* modulo 33 */
3404         tcg_gen_movi_i32(t1, 33);
3405         tcg_gen_remu_i32(t1, t0, t1);
3406         X = rotate32_x(DREG(insn, 0), t1, left);
3407         rotate_x_flags(DREG(insn, 0), X, 32);
3408         tcg_temp_free(X);
3409     }
3410     tcg_temp_free(t1);
3411     tcg_temp_free(t0);
3412     set_cc_op(s, CC_OP_FLAGS);
3413 }
3414 
3415 DISAS_INSN(rotate8_reg)
3416 {
3417     TCGv reg;
3418     TCGv src;
3419     TCGv t0, t1;
3420     int left = (insn & 0x100);
3421 
3422     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3423     src = DREG(insn, 9);
3424     /* shift in [0..63] */
3425     t0 = tcg_temp_new_i32();
3426     tcg_gen_andi_i32(t0, src, 63);
3427     t1 = tcg_temp_new_i32();
3428     if (insn & 8) {
3429         tcg_gen_andi_i32(t1, src, 7);
3430         rotate(reg, t1, left, 8);
3431         /* if shift == 0, clear C */
3432         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3433                             t0, QREG_CC_V /* 0 */,
3434                             QREG_CC_V /* 0 */, QREG_CC_C);
3435     } else {
3436         TCGv X;
3437         /* modulo 9 */
3438         tcg_gen_movi_i32(t1, 9);
3439         tcg_gen_remu_i32(t1, t0, t1);
3440         X = rotate_x(reg, t1, left, 8);
3441         rotate_x_flags(reg, X, 8);
3442         tcg_temp_free(X);
3443     }
3444     tcg_temp_free(t1);
3445     tcg_temp_free(t0);
3446     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3447     set_cc_op(s, CC_OP_FLAGS);
3448 }
3449 
3450 DISAS_INSN(rotate16_reg)
3451 {
3452     TCGv reg;
3453     TCGv src;
3454     TCGv t0, t1;
3455     int left = (insn & 0x100);
3456 
3457     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3458     src = DREG(insn, 9);
3459     /* shift in [0..63] */
3460     t0 = tcg_temp_new_i32();
3461     tcg_gen_andi_i32(t0, src, 63);
3462     t1 = tcg_temp_new_i32();
3463     if (insn & 8) {
3464         tcg_gen_andi_i32(t1, src, 15);
3465         rotate(reg, t1, left, 16);
3466         /* if shift == 0, clear C */
3467         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3468                             t0, QREG_CC_V /* 0 */,
3469                             QREG_CC_V /* 0 */, QREG_CC_C);
3470     } else {
3471         TCGv X;
3472         /* modulo 17 */
3473         tcg_gen_movi_i32(t1, 17);
3474         tcg_gen_remu_i32(t1, t0, t1);
3475         X = rotate_x(reg, t1, left, 16);
3476         rotate_x_flags(reg, X, 16);
3477         tcg_temp_free(X);
3478     }
3479     tcg_temp_free(t1);
3480     tcg_temp_free(t0);
3481     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3482     set_cc_op(s, CC_OP_FLAGS);
3483 }
3484 
3485 DISAS_INSN(rotate_mem)
3486 {
3487     TCGv src;
3488     TCGv addr;
3489     TCGv shift;
3490     int left = (insn & 0x100);
3491 
3492     SRC_EA(env, src, OS_WORD, 0, &addr);
3493 
3494     shift = tcg_const_i32(1);
3495     if (insn & 0x0200) {
3496         rotate(src, shift, left, 16);
3497     } else {
3498         TCGv X = rotate_x(src, shift, left, 16);
3499         rotate_x_flags(src, X, 16);
3500         tcg_temp_free(X);
3501     }
3502     tcg_temp_free(shift);
3503     DEST_EA(env, insn, OS_WORD, src, &addr);
3504     set_cc_op(s, CC_OP_FLAGS);
3505 }
3506 
3507 DISAS_INSN(ff1)
3508 {
3509     TCGv reg;
3510     reg = DREG(insn, 0);
3511     gen_logic_cc(s, reg, OS_LONG);
3512     gen_helper_ff1(reg, reg);
3513 }
3514 
3515 static TCGv gen_get_sr(DisasContext *s)
3516 {
3517     TCGv ccr;
3518     TCGv sr;
3519 
3520     ccr = gen_get_ccr(s);
3521     sr = tcg_temp_new();
3522     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
3523     tcg_gen_or_i32(sr, sr, ccr);
3524     return sr;
3525 }
3526 
3527 DISAS_INSN(strldsr)
3528 {
3529     uint16_t ext;
3530     uint32_t addr;
3531 
3532     addr = s->pc - 2;
3533     ext = read_im16(env, s);
3534     if (ext != 0x46FC) {
3535         gen_exception(s, addr, EXCP_UNSUPPORTED);
3536         return;
3537     }
3538     ext = read_im16(env, s);
3539     if (IS_USER(s) || (ext & SR_S) == 0) {
3540         gen_exception(s, addr, EXCP_PRIVILEGE);
3541         return;
3542     }
3543     gen_push(s, gen_get_sr(s));
3544     gen_set_sr_im(s, ext, 0);
3545 }
3546 
3547 DISAS_INSN(move_from_sr)
3548 {
3549     TCGv sr;
3550 
3551     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
3552         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3553         return;
3554     }
3555     sr = gen_get_sr(s);
3556     DEST_EA(env, insn, OS_WORD, sr, NULL);
3557 }
3558 
3559 DISAS_INSN(move_to_sr)
3560 {
3561     if (IS_USER(s)) {
3562         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3563         return;
3564     }
3565     gen_set_sr(env, s, insn, 0);
3566     gen_lookup_tb(s);
3567 }
3568 
3569 DISAS_INSN(move_from_usp)
3570 {
3571     if (IS_USER(s)) {
3572         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3573         return;
3574     }
3575     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
3576                    offsetof(CPUM68KState, sp[M68K_USP]));
3577 }
3578 
3579 DISAS_INSN(move_to_usp)
3580 {
3581     if (IS_USER(s)) {
3582         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3583         return;
3584     }
3585     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
3586                    offsetof(CPUM68KState, sp[M68K_USP]));
3587 }
3588 
3589 DISAS_INSN(halt)
3590 {
3591     gen_exception(s, s->pc, EXCP_HALT_INSN);
3592 }
3593 
3594 DISAS_INSN(stop)
3595 {
3596     uint16_t ext;
3597 
3598     if (IS_USER(s)) {
3599         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3600         return;
3601     }
3602 
3603     ext = read_im16(env, s);
3604 
3605     gen_set_sr_im(s, ext, 0);
3606     tcg_gen_movi_i32(cpu_halted, 1);
3607     gen_exception(s, s->pc, EXCP_HLT);
3608 }
3609 
3610 DISAS_INSN(rte)
3611 {
3612     if (IS_USER(s)) {
3613         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3614         return;
3615     }
3616     gen_exception(s, s->pc - 2, EXCP_RTE);
3617 }
3618 
3619 DISAS_INSN(movec)
3620 {
3621     uint16_t ext;
3622     TCGv reg;
3623 
3624     if (IS_USER(s)) {
3625         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3626         return;
3627     }
3628 
3629     ext = read_im16(env, s);
3630 
3631     if (ext & 0x8000) {
3632         reg = AREG(ext, 12);
3633     } else {
3634         reg = DREG(ext, 12);
3635     }
3636     gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
3637     gen_lookup_tb(s);
3638 }
3639 
3640 DISAS_INSN(intouch)
3641 {
3642     if (IS_USER(s)) {
3643         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3644         return;
3645     }
3646     /* ICache fetch.  Implement as no-op.  */
3647 }
3648 
3649 DISAS_INSN(cpushl)
3650 {
3651     if (IS_USER(s)) {
3652         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3653         return;
3654     }
3655     /* Cache push/invalidate.  Implement as no-op.  */
3656 }
3657 
3658 DISAS_INSN(wddata)
3659 {
3660     gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3661 }
3662 
3663 DISAS_INSN(wdebug)
3664 {
3665     M68kCPU *cpu = m68k_env_get_cpu(env);
3666 
3667     if (IS_USER(s)) {
3668         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3669         return;
3670     }
3671     /* TODO: Implement wdebug.  */
3672     cpu_abort(CPU(cpu), "WDEBUG not implemented");
3673 }
3674 
3675 DISAS_INSN(trap)
3676 {
3677     gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
3678 }
3679 
3680 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
3681    immediately before the next FP instruction is executed.  */
3682 DISAS_INSN(fpu)
3683 {
3684     uint16_t ext;
3685     int32_t offset;
3686     int opmode;
3687     TCGv_i64 src;
3688     TCGv_i64 dest;
3689     TCGv_i64 res;
3690     TCGv tmp32;
3691     int round;
3692     int set_dest;
3693     int opsize;
3694 
3695     ext = read_im16(env, s);
3696     opmode = ext & 0x7f;
3697     switch ((ext >> 13) & 7) {
3698     case 0: case 2:
3699         break;
3700     case 1:
3701         goto undef;
3702     case 3: /* fmove out */
3703         src = FREG(ext, 7);
3704         tmp32 = tcg_temp_new_i32();
3705         /* fmove */
3706         /* ??? TODO: Proper behavior on overflow.  */
3707         switch ((ext >> 10) & 7) {
3708         case 0:
3709             opsize = OS_LONG;
3710             gen_helper_f64_to_i32(tmp32, cpu_env, src);
3711             break;
3712         case 1:
3713             opsize = OS_SINGLE;
3714             gen_helper_f64_to_f32(tmp32, cpu_env, src);
3715             break;
3716         case 4:
3717             opsize = OS_WORD;
3718             gen_helper_f64_to_i32(tmp32, cpu_env, src);
3719             break;
3720         case 5: /* OS_DOUBLE */
3721             tcg_gen_mov_i32(tmp32, AREG(insn, 0));
3722             switch ((insn >> 3) & 7) {
3723             case 2:
3724             case 3:
3725                 break;
3726             case 4:
3727                 tcg_gen_addi_i32(tmp32, tmp32, -8);
3728                 break;
3729             case 5:
3730                 offset = cpu_ldsw_code(env, s->pc);
3731                 s->pc += 2;
3732                 tcg_gen_addi_i32(tmp32, tmp32, offset);
3733                 break;
3734             default:
3735                 goto undef;
3736             }
3737             gen_store64(s, tmp32, src);
3738             switch ((insn >> 3) & 7) {
3739             case 3:
3740                 tcg_gen_addi_i32(tmp32, tmp32, 8);
3741                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
3742                 break;
3743             case 4:
3744                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
3745                 break;
3746             }
3747             tcg_temp_free_i32(tmp32);
3748             return;
3749         case 6:
3750             opsize = OS_BYTE;
3751             gen_helper_f64_to_i32(tmp32, cpu_env, src);
3752             break;
3753         default:
3754             goto undef;
3755         }
3756         DEST_EA(env, insn, opsize, tmp32, NULL);
3757         tcg_temp_free_i32(tmp32);
3758         return;
3759     case 4: /* fmove to control register.  */
3760         switch ((ext >> 10) & 7) {
3761         case 4: /* FPCR */
3762             /* Not implemented.  Ignore writes.  */
3763             break;
3764         case 1: /* FPIAR */
3765         case 2: /* FPSR */
3766         default:
3767             cpu_abort(NULL, "Unimplemented: fmove to control %d",
3768                       (ext >> 10) & 7);
3769         }
3770         break;
3771     case 5: /* fmove from control register.  */
3772         switch ((ext >> 10) & 7) {
3773         case 4: /* FPCR */
3774             /* Not implemented.  Always return zero.  */
3775             tmp32 = tcg_const_i32(0);
3776             break;
3777         case 1: /* FPIAR */
3778         case 2: /* FPSR */
3779         default:
3780             cpu_abort(NULL, "Unimplemented: fmove from control %d",
3781                       (ext >> 10) & 7);
3782             goto undef;
3783         }
3784         DEST_EA(env, insn, OS_LONG, tmp32, NULL);
3785         break;
3786     case 6: /* fmovem */
3787     case 7:
3788         {
3789             TCGv addr;
3790             uint16_t mask;
3791             int i;
3792             if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
3793                 goto undef;
3794             tmp32 = gen_lea(env, s, insn, OS_LONG);
3795             if (IS_NULL_QREG(tmp32)) {
3796                 gen_addr_fault(s);
3797                 return;
3798             }
3799             addr = tcg_temp_new_i32();
3800             tcg_gen_mov_i32(addr, tmp32);
3801             mask = 0x80;
3802             for (i = 0; i < 8; i++) {
3803                 if (ext & mask) {
3804                     dest = FREG(i, 0);
3805                     if (ext & (1 << 13)) {
3806                         /* store */
3807                         tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
3808                     } else {
3809                         /* load */
3810                         tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
3811                     }
3812                     if (ext & (mask - 1))
3813                         tcg_gen_addi_i32(addr, addr, 8);
3814                 }
3815                 mask >>= 1;
3816             }
3817             tcg_temp_free_i32(addr);
3818         }
3819         return;
3820     }
3821     if (ext & (1 << 14)) {
3822         /* Source effective address.  */
3823         switch ((ext >> 10) & 7) {
3824         case 0: opsize = OS_LONG; break;
3825         case 1: opsize = OS_SINGLE; break;
3826         case 4: opsize = OS_WORD; break;
3827         case 5: opsize = OS_DOUBLE; break;
3828         case 6: opsize = OS_BYTE; break;
3829         default:
3830             goto undef;
3831         }
3832         if (opsize == OS_DOUBLE) {
3833             tmp32 = tcg_temp_new_i32();
3834             tcg_gen_mov_i32(tmp32, AREG(insn, 0));
3835             switch ((insn >> 3) & 7) {
3836             case 2:
3837             case 3:
3838                 break;
3839             case 4:
3840                 tcg_gen_addi_i32(tmp32, tmp32, -8);
3841                 break;
3842             case 5:
3843                 offset = cpu_ldsw_code(env, s->pc);
3844                 s->pc += 2;
3845                 tcg_gen_addi_i32(tmp32, tmp32, offset);
3846                 break;
3847             case 7:
3848                 offset = cpu_ldsw_code(env, s->pc);
3849                 offset += s->pc - 2;
3850                 s->pc += 2;
3851                 tcg_gen_addi_i32(tmp32, tmp32, offset);
3852                 break;
3853             default:
3854                 goto undef;
3855             }
3856             src = gen_load64(s, tmp32);
3857             switch ((insn >> 3) & 7) {
3858             case 3:
3859                 tcg_gen_addi_i32(tmp32, tmp32, 8);
3860                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
3861                 break;
3862             case 4:
3863                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
3864                 break;
3865             }
3866             tcg_temp_free_i32(tmp32);
3867         } else {
3868             SRC_EA(env, tmp32, opsize, 1, NULL);
3869             src = tcg_temp_new_i64();
3870             switch (opsize) {
3871             case OS_LONG:
3872             case OS_WORD:
3873             case OS_BYTE:
3874                 gen_helper_i32_to_f64(src, cpu_env, tmp32);
3875                 break;
3876             case OS_SINGLE:
3877                 gen_helper_f32_to_f64(src, cpu_env, tmp32);
3878                 break;
3879             }
3880         }
3881     } else {
3882         /* Source register.  */
3883         src = FREG(ext, 10);
3884     }
3885     dest = FREG(ext, 7);
3886     res = tcg_temp_new_i64();
3887     if (opmode != 0x3a)
3888         tcg_gen_mov_f64(res, dest);
3889     round = 1;
3890     set_dest = 1;
3891     switch (opmode) {
3892     case 0: case 0x40: case 0x44: /* fmove */
3893         tcg_gen_mov_f64(res, src);
3894         break;
3895     case 1: /* fint */
3896         gen_helper_iround_f64(res, cpu_env, src);
3897         round = 0;
3898         break;
3899     case 3: /* fintrz */
3900         gen_helper_itrunc_f64(res, cpu_env, src);
3901         round = 0;
3902         break;
3903     case 4: case 0x41: case 0x45: /* fsqrt */
3904         gen_helper_sqrt_f64(res, cpu_env, src);
3905         break;
3906     case 0x18: case 0x58: case 0x5c: /* fabs */
3907         gen_helper_abs_f64(res, src);
3908         break;
3909     case 0x1a: case 0x5a: case 0x5e: /* fneg */
3910         gen_helper_chs_f64(res, src);
3911         break;
3912     case 0x20: case 0x60: case 0x64: /* fdiv */
3913         gen_helper_div_f64(res, cpu_env, res, src);
3914         break;
3915     case 0x22: case 0x62: case 0x66: /* fadd */
3916         gen_helper_add_f64(res, cpu_env, res, src);
3917         break;
3918     case 0x23: case 0x63: case 0x67: /* fmul */
3919         gen_helper_mul_f64(res, cpu_env, res, src);
3920         break;
3921     case 0x28: case 0x68: case 0x6c: /* fsub */
3922         gen_helper_sub_f64(res, cpu_env, res, src);
3923         break;
3924     case 0x38: /* fcmp */
3925         gen_helper_sub_cmp_f64(res, cpu_env, res, src);
3926         set_dest = 0;
3927         round = 0;
3928         break;
3929     case 0x3a: /* ftst */
3930         tcg_gen_mov_f64(res, src);
3931         set_dest = 0;
3932         round = 0;
3933         break;
3934     default:
3935         goto undef;
3936     }
3937     if (ext & (1 << 14)) {
3938         tcg_temp_free_i64(src);
3939     }
3940     if (round) {
3941         if (opmode & 0x40) {
3942             if ((opmode & 0x4) != 0)
3943                 round = 0;
3944         } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
3945             round = 0;
3946         }
3947     }
3948     if (round) {
3949         TCGv tmp = tcg_temp_new_i32();
3950         gen_helper_f64_to_f32(tmp, cpu_env, res);
3951         gen_helper_f32_to_f64(res, cpu_env, tmp);
3952         tcg_temp_free_i32(tmp);
3953     }
3954     tcg_gen_mov_f64(QREG_FP_RESULT, res);
3955     if (set_dest) {
3956         tcg_gen_mov_f64(dest, res);
3957     }
3958     tcg_temp_free_i64(res);
3959     return;
3960 undef:
3961     /* FIXME: Is this right for offset addressing modes?  */
3962     s->pc -= 2;
3963     disas_undef_fpu(env, s, insn);
3964 }
3965 
3966 DISAS_INSN(fbcc)
3967 {
3968     uint32_t offset;
3969     uint32_t addr;
3970     TCGv flag;
3971     TCGLabel *l1;
3972 
3973     addr = s->pc;
3974     offset = cpu_ldsw_code(env, s->pc);
3975     s->pc += 2;
3976     if (insn & (1 << 6)) {
3977         offset = (offset << 16) | read_im16(env, s);
3978     }
3979 
3980     l1 = gen_new_label();
3981     /* TODO: Raise BSUN exception.  */
3982     flag = tcg_temp_new();
3983     gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
3984     /* Jump to l1 if condition is true.  */
3985     switch (insn & 0xf) {
3986     case 0: /* f */
3987         break;
3988     case 1: /* eq (=0) */
3989         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
3990         break;
3991     case 2: /* ogt (=1) */
3992         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
3993         break;
3994     case 3: /* oge (=0 or =1) */
3995         tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
3996         break;
3997     case 4: /* olt (=-1) */
3998         tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
3999         break;
4000     case 5: /* ole (=-1 or =0) */
4001         tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
4002         break;
4003     case 6: /* ogl (=-1 or =1) */
4004         tcg_gen_andi_i32(flag, flag, 1);
4005         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4006         break;
4007     case 7: /* or (=2) */
4008         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
4009         break;
4010     case 8: /* un (<2) */
4011         tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
4012         break;
4013     case 9: /* ueq (=0 or =2) */
4014         tcg_gen_andi_i32(flag, flag, 1);
4015         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
4016         break;
4017     case 10: /* ugt (>0) */
4018         tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
4019         break;
4020     case 11: /* uge (>=0) */
4021         tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
4022         break;
4023     case 12: /* ult (=-1 or =2) */
4024         tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
4025         break;
4026     case 13: /* ule (!=1) */
4027         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
4028         break;
4029     case 14: /* ne (!=0) */
4030         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4031         break;
4032     case 15: /* t */
4033         tcg_gen_br(l1);
4034         break;
4035     }
4036     gen_jmp_tb(s, 0, s->pc);
4037     gen_set_label(l1);
4038     gen_jmp_tb(s, 1, addr + offset);
4039 }
4040 
4041 DISAS_INSN(frestore)
4042 {
4043     M68kCPU *cpu = m68k_env_get_cpu(env);
4044 
4045     /* TODO: Implement frestore.  */
4046     cpu_abort(CPU(cpu), "FRESTORE not implemented");
4047 }
4048 
4049 DISAS_INSN(fsave)
4050 {
4051     M68kCPU *cpu = m68k_env_get_cpu(env);
4052 
4053     /* TODO: Implement fsave.  */
4054     cpu_abort(CPU(cpu), "FSAVE not implemented");
4055 }
4056 
4057 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
4058 {
4059     TCGv tmp = tcg_temp_new();
4060     if (s->env->macsr & MACSR_FI) {
4061         if (upper)
4062             tcg_gen_andi_i32(tmp, val, 0xffff0000);
4063         else
4064             tcg_gen_shli_i32(tmp, val, 16);
4065     } else if (s->env->macsr & MACSR_SU) {
4066         if (upper)
4067             tcg_gen_sari_i32(tmp, val, 16);
4068         else
4069             tcg_gen_ext16s_i32(tmp, val);
4070     } else {
4071         if (upper)
4072             tcg_gen_shri_i32(tmp, val, 16);
4073         else
4074             tcg_gen_ext16u_i32(tmp, val);
4075     }
4076     return tmp;
4077 }
4078 
4079 static void gen_mac_clear_flags(void)
4080 {
4081     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
4082                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
4083 }
4084 
4085 DISAS_INSN(mac)
4086 {
4087     TCGv rx;
4088     TCGv ry;
4089     uint16_t ext;
4090     int acc;
4091     TCGv tmp;
4092     TCGv addr;
4093     TCGv loadval;
4094     int dual;
4095     TCGv saved_flags;
4096 
4097     if (!s->done_mac) {
4098         s->mactmp = tcg_temp_new_i64();
4099         s->done_mac = 1;
4100     }
4101 
4102     ext = read_im16(env, s);
4103 
4104     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
4105     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
4106     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
4107         disas_undef(env, s, insn);
4108         return;
4109     }
4110     if (insn & 0x30) {
4111         /* MAC with load.  */
4112         tmp = gen_lea(env, s, insn, OS_LONG);
4113         addr = tcg_temp_new();
4114         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
4115         /* Load the value now to ensure correct exception behavior.
4116            Perform writeback after reading the MAC inputs.  */
4117         loadval = gen_load(s, OS_LONG, addr, 0);
4118 
4119         acc ^= 1;
4120         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
4121         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
4122     } else {
4123         loadval = addr = NULL_QREG;
4124         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4125         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4126     }
4127 
4128     gen_mac_clear_flags();
4129 #if 0
4130     l1 = -1;
4131     /* Disabled because conditional branches clobber temporary vars.  */
4132     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
4133         /* Skip the multiply if we know we will ignore it.  */
4134         l1 = gen_new_label();
4135         tmp = tcg_temp_new();
4136         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
4137         gen_op_jmp_nz32(tmp, l1);
4138     }
4139 #endif
4140 
4141     if ((ext & 0x0800) == 0) {
4142         /* Word.  */
4143         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
4144         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
4145     }
4146     if (s->env->macsr & MACSR_FI) {
4147         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
4148     } else {
4149         if (s->env->macsr & MACSR_SU)
4150             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
4151         else
4152             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
4153         switch ((ext >> 9) & 3) {
4154         case 1:
4155             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
4156             break;
4157         case 3:
4158             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
4159             break;
4160         }
4161     }
4162 
4163     if (dual) {
4164         /* Save the overflow flag from the multiply.  */
4165         saved_flags = tcg_temp_new();
4166         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
4167     } else {
4168         saved_flags = NULL_QREG;
4169     }
4170 
4171 #if 0
4172     /* Disabled because conditional branches clobber temporary vars.  */
4173     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
4174         /* Skip the accumulate if the value is already saturated.  */
4175         l1 = gen_new_label();
4176         tmp = tcg_temp_new();
4177         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4178         gen_op_jmp_nz32(tmp, l1);
4179     }
4180 #endif
4181 
4182     if (insn & 0x100)
4183         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4184     else
4185         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4186 
4187     if (s->env->macsr & MACSR_FI)
4188         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4189     else if (s->env->macsr & MACSR_SU)
4190         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4191     else
4192         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4193 
4194 #if 0
4195     /* Disabled because conditional branches clobber temporary vars.  */
4196     if (l1 != -1)
4197         gen_set_label(l1);
4198 #endif
4199 
4200     if (dual) {
4201         /* Dual accumulate variant.  */
4202         acc = (ext >> 2) & 3;
4203         /* Restore the overflow flag from the multiplier.  */
4204         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
4205 #if 0
4206         /* Disabled because conditional branches clobber temporary vars.  */
4207         if ((s->env->macsr & MACSR_OMC) != 0) {
4208             /* Skip the accumulate if the value is already saturated.  */
4209             l1 = gen_new_label();
4210             tmp = tcg_temp_new();
4211             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4212             gen_op_jmp_nz32(tmp, l1);
4213         }
4214 #endif
4215         if (ext & 2)
4216             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4217         else
4218             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4219         if (s->env->macsr & MACSR_FI)
4220             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4221         else if (s->env->macsr & MACSR_SU)
4222             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4223         else
4224             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4225 #if 0
4226         /* Disabled because conditional branches clobber temporary vars.  */
4227         if (l1 != -1)
4228             gen_set_label(l1);
4229 #endif
4230     }
4231     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
4232 
4233     if (insn & 0x30) {
4234         TCGv rw;
4235         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4236         tcg_gen_mov_i32(rw, loadval);
4237         /* FIXME: Should address writeback happen with the masked or
4238            unmasked value?  */
4239         switch ((insn >> 3) & 7) {
4240         case 3: /* Post-increment.  */
4241             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
4242             break;
4243         case 4: /* Pre-decrement.  */
4244             tcg_gen_mov_i32(AREG(insn, 0), addr);
4245         }
4246     }
4247 }
4248 
4249 DISAS_INSN(from_mac)
4250 {
4251     TCGv rx;
4252     TCGv_i64 acc;
4253     int accnum;
4254 
4255     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4256     accnum = (insn >> 9) & 3;
4257     acc = MACREG(accnum);
4258     if (s->env->macsr & MACSR_FI) {
4259         gen_helper_get_macf(rx, cpu_env, acc);
4260     } else if ((s->env->macsr & MACSR_OMC) == 0) {
4261         tcg_gen_extrl_i64_i32(rx, acc);
4262     } else if (s->env->macsr & MACSR_SU) {
4263         gen_helper_get_macs(rx, acc);
4264     } else {
4265         gen_helper_get_macu(rx, acc);
4266     }
4267     if (insn & 0x40) {
4268         tcg_gen_movi_i64(acc, 0);
4269         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4270     }
4271 }
4272 
4273 DISAS_INSN(move_mac)
4274 {
4275     /* FIXME: This can be done without a helper.  */
4276     int src;
4277     TCGv dest;
4278     src = insn & 3;
4279     dest = tcg_const_i32((insn >> 9) & 3);
4280     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
4281     gen_mac_clear_flags();
4282     gen_helper_mac_set_flags(cpu_env, dest);
4283 }
4284 
4285 DISAS_INSN(from_macsr)
4286 {
4287     TCGv reg;
4288 
4289     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4290     tcg_gen_mov_i32(reg, QREG_MACSR);
4291 }
4292 
4293 DISAS_INSN(from_mask)
4294 {
4295     TCGv reg;
4296     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4297     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
4298 }
4299 
4300 DISAS_INSN(from_mext)
4301 {
4302     TCGv reg;
4303     TCGv acc;
4304     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4305     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4306     if (s->env->macsr & MACSR_FI)
4307         gen_helper_get_mac_extf(reg, cpu_env, acc);
4308     else
4309         gen_helper_get_mac_exti(reg, cpu_env, acc);
4310 }
4311 
4312 DISAS_INSN(macsr_to_ccr)
4313 {
4314     TCGv tmp = tcg_temp_new();
4315     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
4316     gen_helper_set_sr(cpu_env, tmp);
4317     tcg_temp_free(tmp);
4318     set_cc_op(s, CC_OP_FLAGS);
4319 }
4320 
4321 DISAS_INSN(to_mac)
4322 {
4323     TCGv_i64 acc;
4324     TCGv val;
4325     int accnum;
4326     accnum = (insn >> 9) & 3;
4327     acc = MACREG(accnum);
4328     SRC_EA(env, val, OS_LONG, 0, NULL);
4329     if (s->env->macsr & MACSR_FI) {
4330         tcg_gen_ext_i32_i64(acc, val);
4331         tcg_gen_shli_i64(acc, acc, 8);
4332     } else if (s->env->macsr & MACSR_SU) {
4333         tcg_gen_ext_i32_i64(acc, val);
4334     } else {
4335         tcg_gen_extu_i32_i64(acc, val);
4336     }
4337     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4338     gen_mac_clear_flags();
4339     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
4340 }
4341 
4342 DISAS_INSN(to_macsr)
4343 {
4344     TCGv val;
4345     SRC_EA(env, val, OS_LONG, 0, NULL);
4346     gen_helper_set_macsr(cpu_env, val);
4347     gen_lookup_tb(s);
4348 }
4349 
4350 DISAS_INSN(to_mask)
4351 {
4352     TCGv val;
4353     SRC_EA(env, val, OS_LONG, 0, NULL);
4354     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
4355 }
4356 
4357 DISAS_INSN(to_mext)
4358 {
4359     TCGv val;
4360     TCGv acc;
4361     SRC_EA(env, val, OS_LONG, 0, NULL);
4362     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4363     if (s->env->macsr & MACSR_FI)
4364         gen_helper_set_mac_extf(cpu_env, val, acc);
4365     else if (s->env->macsr & MACSR_SU)
4366         gen_helper_set_mac_exts(cpu_env, val, acc);
4367     else
4368         gen_helper_set_mac_extu(cpu_env, val, acc);
4369 }
4370 
4371 static disas_proc opcode_table[65536];
4372 
4373 static void
4374 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
4375 {
4376   int i;
4377   int from;
4378   int to;
4379 
4380   /* Sanity check.  All set bits must be included in the mask.  */
4381   if (opcode & ~mask) {
4382       fprintf(stderr,
4383               "qemu internal error: bogus opcode definition %04x/%04x\n",
4384               opcode, mask);
4385       abort();
4386   }
4387   /* This could probably be cleverer.  For now just optimize the case where
4388      the top bits are known.  */
4389   /* Find the first zero bit in the mask.  */
4390   i = 0x8000;
4391   while ((i & mask) != 0)
4392       i >>= 1;
4393   /* Iterate over all combinations of this and lower bits.  */
4394   if (i == 0)
4395       i = 1;
4396   else
4397       i <<= 1;
4398   from = opcode & ~(i - 1);
4399   to = from + i;
4400   for (i = from; i < to; i++) {
4401       if ((i & mask) == opcode)
4402           opcode_table[i] = proc;
4403   }
4404 }
4405 
4406 /* Register m68k opcode handlers.  Order is important.
4407    Later insn override earlier ones.  */
4408 void register_m68k_insns (CPUM68KState *env)
4409 {
4410     /* Build the opcode table only once to avoid
4411        multithreading issues. */
4412     if (opcode_table[0] != NULL) {
4413         return;
4414     }
4415 
4416     /* use BASE() for instruction available
4417      * for CF_ISA_A and M68000.
4418      */
4419 #define BASE(name, opcode, mask) \
4420     register_opcode(disas_##name, 0x##opcode, 0x##mask)
4421 #define INSN(name, opcode, mask, feature) do { \
4422     if (m68k_feature(env, M68K_FEATURE_##feature)) \
4423         BASE(name, opcode, mask); \
4424     } while(0)
4425     BASE(undef,     0000, 0000);
4426     INSN(arith_im,  0080, fff8, CF_ISA_A);
4427     INSN(arith_im,  0000, ff00, M68000);
4428     INSN(undef,     00c0, ffc0, M68000);
4429     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
4430     BASE(bitop_reg, 0100, f1c0);
4431     BASE(bitop_reg, 0140, f1c0);
4432     BASE(bitop_reg, 0180, f1c0);
4433     BASE(bitop_reg, 01c0, f1c0);
4434     INSN(arith_im,  0280, fff8, CF_ISA_A);
4435     INSN(arith_im,  0200, ff00, M68000);
4436     INSN(undef,     02c0, ffc0, M68000);
4437     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
4438     INSN(arith_im,  0480, fff8, CF_ISA_A);
4439     INSN(arith_im,  0400, ff00, M68000);
4440     INSN(undef,     04c0, ffc0, M68000);
4441     INSN(arith_im,  0600, ff00, M68000);
4442     INSN(undef,     06c0, ffc0, M68000);
4443     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
4444     INSN(arith_im,  0680, fff8, CF_ISA_A);
4445     INSN(arith_im,  0c00, ff38, CF_ISA_A);
4446     INSN(arith_im,  0c00, ff00, M68000);
4447     BASE(bitop_im,  0800, ffc0);
4448     BASE(bitop_im,  0840, ffc0);
4449     BASE(bitop_im,  0880, ffc0);
4450     BASE(bitop_im,  08c0, ffc0);
4451     INSN(arith_im,  0a80, fff8, CF_ISA_A);
4452     INSN(arith_im,  0a00, ff00, M68000);
4453     INSN(cas,       0ac0, ffc0, CAS);
4454     INSN(cas,       0cc0, ffc0, CAS);
4455     INSN(cas,       0ec0, ffc0, CAS);
4456     INSN(cas2w,     0cfc, ffff, CAS);
4457     INSN(cas2l,     0efc, ffff, CAS);
4458     BASE(move,      1000, f000);
4459     BASE(move,      2000, f000);
4460     BASE(move,      3000, f000);
4461     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
4462     INSN(negx,      4080, fff8, CF_ISA_A);
4463     INSN(negx,      4000, ff00, M68000);
4464     INSN(undef,     40c0, ffc0, M68000);
4465     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
4466     INSN(move_from_sr, 40c0, ffc0, M68000);
4467     BASE(lea,       41c0, f1c0);
4468     BASE(clr,       4200, ff00);
4469     BASE(undef,     42c0, ffc0);
4470     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
4471     INSN(move_from_ccr, 42c0, ffc0, M68000);
4472     INSN(neg,       4480, fff8, CF_ISA_A);
4473     INSN(neg,       4400, ff00, M68000);
4474     INSN(undef,     44c0, ffc0, M68000);
4475     BASE(move_to_ccr, 44c0, ffc0);
4476     INSN(not,       4680, fff8, CF_ISA_A);
4477     INSN(not,       4600, ff00, M68000);
4478     INSN(undef,     46c0, ffc0, M68000);
4479     INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
4480     INSN(nbcd,      4800, ffc0, M68000);
4481     INSN(linkl,     4808, fff8, M68000);
4482     BASE(pea,       4840, ffc0);
4483     BASE(swap,      4840, fff8);
4484     INSN(bkpt,      4848, fff8, BKPT);
4485     INSN(movem,     48d0, fbf8, CF_ISA_A);
4486     INSN(movem,     48e8, fbf8, CF_ISA_A);
4487     INSN(movem,     4880, fb80, M68000);
4488     BASE(ext,       4880, fff8);
4489     BASE(ext,       48c0, fff8);
4490     BASE(ext,       49c0, fff8);
4491     BASE(tst,       4a00, ff00);
4492     INSN(tas,       4ac0, ffc0, CF_ISA_B);
4493     INSN(tas,       4ac0, ffc0, M68000);
4494     INSN(halt,      4ac8, ffff, CF_ISA_A);
4495     INSN(pulse,     4acc, ffff, CF_ISA_A);
4496     BASE(illegal,   4afc, ffff);
4497     INSN(mull,      4c00, ffc0, CF_ISA_A);
4498     INSN(mull,      4c00, ffc0, LONG_MULDIV);
4499     INSN(divl,      4c40, ffc0, CF_ISA_A);
4500     INSN(divl,      4c40, ffc0, LONG_MULDIV);
4501     INSN(sats,      4c80, fff8, CF_ISA_B);
4502     BASE(trap,      4e40, fff0);
4503     BASE(link,      4e50, fff8);
4504     BASE(unlk,      4e58, fff8);
4505     INSN(move_to_usp, 4e60, fff8, USP);
4506     INSN(move_from_usp, 4e68, fff8, USP);
4507     BASE(nop,       4e71, ffff);
4508     BASE(stop,      4e72, ffff);
4509     BASE(rte,       4e73, ffff);
4510     BASE(rts,       4e75, ffff);
4511     INSN(movec,     4e7b, ffff, CF_ISA_A);
4512     BASE(jump,      4e80, ffc0);
4513     BASE(jump,      4ec0, ffc0);
4514     INSN(addsubq,   5000, f080, M68000);
4515     BASE(addsubq,   5080, f0c0);
4516     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
4517     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
4518     INSN(dbcc,      50c8, f0f8, M68000);
4519     INSN(tpf,       51f8, fff8, CF_ISA_A);
4520 
4521     /* Branch instructions.  */
4522     BASE(branch,    6000, f000);
4523     /* Disable long branch instructions, then add back the ones we want.  */
4524     BASE(undef,     60ff, f0ff); /* All long branches.  */
4525     INSN(branch,    60ff, f0ff, CF_ISA_B);
4526     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
4527     INSN(branch,    60ff, ffff, BRAL);
4528     INSN(branch,    60ff, f0ff, BCCL);
4529 
4530     BASE(moveq,     7000, f100);
4531     INSN(mvzs,      7100, f100, CF_ISA_B);
4532     BASE(or,        8000, f000);
4533     BASE(divw,      80c0, f0c0);
4534     INSN(sbcd_reg,  8100, f1f8, M68000);
4535     INSN(sbcd_mem,  8108, f1f8, M68000);
4536     BASE(addsub,    9000, f000);
4537     INSN(undef,     90c0, f0c0, CF_ISA_A);
4538     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
4539     INSN(subx_reg,  9100, f138, M68000);
4540     INSN(subx_mem,  9108, f138, M68000);
4541     INSN(suba,      91c0, f1c0, CF_ISA_A);
4542     INSN(suba,      90c0, f0c0, M68000);
4543 
4544     BASE(undef_mac, a000, f000);
4545     INSN(mac,       a000, f100, CF_EMAC);
4546     INSN(from_mac,  a180, f9b0, CF_EMAC);
4547     INSN(move_mac,  a110, f9fc, CF_EMAC);
4548     INSN(from_macsr,a980, f9f0, CF_EMAC);
4549     INSN(from_mask, ad80, fff0, CF_EMAC);
4550     INSN(from_mext, ab80, fbf0, CF_EMAC);
4551     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
4552     INSN(to_mac,    a100, f9c0, CF_EMAC);
4553     INSN(to_macsr,  a900, ffc0, CF_EMAC);
4554     INSN(to_mext,   ab00, fbc0, CF_EMAC);
4555     INSN(to_mask,   ad00, ffc0, CF_EMAC);
4556 
4557     INSN(mov3q,     a140, f1c0, CF_ISA_B);
4558     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
4559     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
4560     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
4561     INSN(cmp,       b080, f1c0, CF_ISA_A);
4562     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
4563     INSN(cmp,       b000, f100, M68000);
4564     INSN(eor,       b100, f100, M68000);
4565     INSN(cmpm,      b108, f138, M68000);
4566     INSN(cmpa,      b0c0, f0c0, M68000);
4567     INSN(eor,       b180, f1c0, CF_ISA_A);
4568     BASE(and,       c000, f000);
4569     INSN(exg_dd,    c140, f1f8, M68000);
4570     INSN(exg_aa,    c148, f1f8, M68000);
4571     INSN(exg_da,    c188, f1f8, M68000);
4572     BASE(mulw,      c0c0, f0c0);
4573     INSN(abcd_reg,  c100, f1f8, M68000);
4574     INSN(abcd_mem,  c108, f1f8, M68000);
4575     BASE(addsub,    d000, f000);
4576     INSN(undef,     d0c0, f0c0, CF_ISA_A);
4577     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
4578     INSN(addx_reg,  d100, f138, M68000);
4579     INSN(addx_mem,  d108, f138, M68000);
4580     INSN(adda,      d1c0, f1c0, CF_ISA_A);
4581     INSN(adda,      d0c0, f0c0, M68000);
4582     INSN(shift_im,  e080, f0f0, CF_ISA_A);
4583     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
4584     INSN(shift8_im, e000, f0f0, M68000);
4585     INSN(shift16_im, e040, f0f0, M68000);
4586     INSN(shift_im,  e080, f0f0, M68000);
4587     INSN(shift8_reg, e020, f0f0, M68000);
4588     INSN(shift16_reg, e060, f0f0, M68000);
4589     INSN(shift_reg, e0a0, f0f0, M68000);
4590     INSN(shift_mem, e0c0, fcc0, M68000);
4591     INSN(rotate_im, e090, f0f0, M68000);
4592     INSN(rotate8_im, e010, f0f0, M68000);
4593     INSN(rotate16_im, e050, f0f0, M68000);
4594     INSN(rotate_reg, e0b0, f0f0, M68000);
4595     INSN(rotate8_reg, e030, f0f0, M68000);
4596     INSN(rotate16_reg, e070, f0f0, M68000);
4597     INSN(rotate_mem, e4c0, fcc0, M68000);
4598     INSN(undef_fpu, f000, f000, CF_ISA_A);
4599     INSN(fpu,       f200, ffc0, CF_FPU);
4600     INSN(fbcc,      f280, ffc0, CF_FPU);
4601     INSN(frestore,  f340, ffc0, CF_FPU);
4602     INSN(fsave,     f340, ffc0, CF_FPU);
4603     INSN(intouch,   f340, ffc0, CF_ISA_A);
4604     INSN(cpushl,    f428, ff38, CF_ISA_A);
4605     INSN(wddata,    fb00, ff00, CF_ISA_A);
4606     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
4607 #undef INSN
4608 }
4609 
4610 /* ??? Some of this implementation is not exception safe.  We should always
4611    write back the result to memory before setting the condition codes.  */
4612 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
4613 {
4614     uint16_t insn = read_im16(env, s);
4615     opcode_table[insn](env, s, insn);
4616     do_writebacks(s);
4617 }
4618 
4619 /* generate intermediate code for basic block 'tb'.  */
4620 void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
4621 {
4622     M68kCPU *cpu = m68k_env_get_cpu(env);
4623     CPUState *cs = CPU(cpu);
4624     DisasContext dc1, *dc = &dc1;
4625     target_ulong pc_start;
4626     int pc_offset;
4627     int num_insns;
4628     int max_insns;
4629 
4630     /* generate intermediate code */
4631     pc_start = tb->pc;
4632 
4633     dc->tb = tb;
4634 
4635     dc->env = env;
4636     dc->is_jmp = DISAS_NEXT;
4637     dc->pc = pc_start;
4638     dc->cc_op = CC_OP_DYNAMIC;
4639     dc->cc_op_synced = 1;
4640     dc->singlestep_enabled = cs->singlestep_enabled;
4641     dc->fpcr = env->fpcr;
4642     dc->user = (env->sr & SR_S) == 0;
4643     dc->done_mac = 0;
4644     dc->writeback_mask = 0;
4645     num_insns = 0;
4646     max_insns = tb->cflags & CF_COUNT_MASK;
4647     if (max_insns == 0) {
4648         max_insns = CF_COUNT_MASK;
4649     }
4650     if (max_insns > TCG_MAX_INSNS) {
4651         max_insns = TCG_MAX_INSNS;
4652     }
4653 
4654     gen_tb_start(tb);
4655     do {
4656         pc_offset = dc->pc - pc_start;
4657         gen_throws_exception = NULL;
4658         tcg_gen_insn_start(dc->pc, dc->cc_op);
4659         num_insns++;
4660 
4661         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
4662             gen_exception(dc, dc->pc, EXCP_DEBUG);
4663             dc->is_jmp = DISAS_JUMP;
4664             /* The address covered by the breakpoint must be included in
4665                [tb->pc, tb->pc + tb->size) in order to for it to be
4666                properly cleared -- thus we increment the PC here so that
4667                the logic setting tb->size below does the right thing.  */
4668             dc->pc += 2;
4669             break;
4670         }
4671 
4672         if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4673             gen_io_start();
4674         }
4675 
4676         dc->insn_pc = dc->pc;
4677 	disas_m68k_insn(env, dc);
4678     } while (!dc->is_jmp && !tcg_op_buf_full() &&
4679              !cs->singlestep_enabled &&
4680              !singlestep &&
4681              (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
4682              num_insns < max_insns);
4683 
4684     if (tb->cflags & CF_LAST_IO)
4685         gen_io_end();
4686     if (unlikely(cs->singlestep_enabled)) {
4687         /* Make sure the pc is updated, and raise a debug exception.  */
4688         if (!dc->is_jmp) {
4689             update_cc_op(dc);
4690             tcg_gen_movi_i32(QREG_PC, dc->pc);
4691         }
4692         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
4693     } else {
4694         switch(dc->is_jmp) {
4695         case DISAS_NEXT:
4696             update_cc_op(dc);
4697             gen_jmp_tb(dc, 0, dc->pc);
4698             break;
4699         default:
4700         case DISAS_JUMP:
4701         case DISAS_UPDATE:
4702             update_cc_op(dc);
4703             /* indicate that the hash table must be used to find the next TB */
4704             tcg_gen_exit_tb(0);
4705             break;
4706         case DISAS_TB_JUMP:
4707             /* nothing more to generate */
4708             break;
4709         }
4710     }
4711     gen_tb_end(tb, num_insns);
4712 
4713 #ifdef DEBUG_DISAS
4714     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
4715         && qemu_log_in_addr_range(pc_start)) {
4716         qemu_log_lock();
4717         qemu_log("----------------\n");
4718         qemu_log("IN: %s\n", lookup_symbol(pc_start));
4719         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
4720         qemu_log("\n");
4721         qemu_log_unlock();
4722     }
4723 #endif
4724     tb->size = dc->pc - pc_start;
4725     tb->icount = num_insns;
4726 }
4727 
4728 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
4729                          int flags)
4730 {
4731     M68kCPU *cpu = M68K_CPU(cs);
4732     CPUM68KState *env = &cpu->env;
4733     int i;
4734     uint16_t sr;
4735     CPU_DoubleU u;
4736     for (i = 0; i < 8; i++)
4737       {
4738         u.d = env->fregs[i];
4739         cpu_fprintf(f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
4740                     i, env->dregs[i], i, env->aregs[i],
4741                     i, u.l.upper, u.l.lower, *(double *)&u.d);
4742       }
4743     cpu_fprintf (f, "PC = %08x   ", env->pc);
4744     sr = env->sr | cpu_m68k_get_ccr(env);
4745     cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
4746                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
4747                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
4748     cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
4749 }
4750 
4751 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
4752                           target_ulong *data)
4753 {
4754     int cc_op = data[1];
4755     env->pc = data[0];
4756     if (cc_op != CC_OP_DYNAMIC) {
4757         env->cc_op = cc_op;
4758     }
4759 }
4760