xref: /openbmc/qemu/target/m68k/translate.c (revision db7a99cd)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 
35 
36 //#define DEBUG_DISPATCH 1
37 
38 /* Fake floating point.  */
39 #define tcg_gen_mov_f64 tcg_gen_mov_i64
40 #define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
41 #define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
42 
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #define DEFF64(name, offset) static TCGv_i64 QREG_##name;
46 #include "qregs.def"
47 #undef DEFO32
48 #undef DEFO64
49 #undef DEFF64
50 
51 static TCGv_i32 cpu_halted;
52 static TCGv_i32 cpu_exception_index;
53 
54 static TCGv_env cpu_env;
55 
56 static char cpu_reg_names[3*8*3 + 5*4];
57 static TCGv cpu_dregs[8];
58 static TCGv cpu_aregs[8];
59 static TCGv_i64 cpu_fregs[8];
60 static TCGv_i64 cpu_macc[4];
61 
62 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
63 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
64 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
65 #define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
66 #define MACREG(acc)     cpu_macc[acc]
67 #define QREG_SP         get_areg(s, 7)
68 
69 static TCGv NULL_QREG;
70 #define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
71 /* Used to distinguish stores from bad addressing modes.  */
72 static TCGv store_dummy;
73 
74 #include "exec/gen-icount.h"
75 
76 void m68k_tcg_init(void)
77 {
78     char *p;
79     int i;
80 
81     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
82     tcg_ctx.tcg_env = cpu_env;
83 
84 #define DEFO32(name, offset) \
85     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
86         offsetof(CPUM68KState, offset), #name);
87 #define DEFO64(name, offset) \
88     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
89         offsetof(CPUM68KState, offset), #name);
90 #define DEFF64(name, offset) DEFO64(name, offset)
91 #include "qregs.def"
92 #undef DEFO32
93 #undef DEFO64
94 #undef DEFF64
95 
96     cpu_halted = tcg_global_mem_new_i32(cpu_env,
97                                         -offsetof(M68kCPU, env) +
98                                         offsetof(CPUState, halted), "HALTED");
99     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
100                                                  -offsetof(M68kCPU, env) +
101                                                  offsetof(CPUState, exception_index),
102                                                  "EXCEPTION");
103 
104     p = cpu_reg_names;
105     for (i = 0; i < 8; i++) {
106         sprintf(p, "D%d", i);
107         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
108                                           offsetof(CPUM68KState, dregs[i]), p);
109         p += 3;
110         sprintf(p, "A%d", i);
111         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
112                                           offsetof(CPUM68KState, aregs[i]), p);
113         p += 3;
114         sprintf(p, "F%d", i);
115         cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
116                                           offsetof(CPUM68KState, fregs[i]), p);
117         p += 3;
118     }
119     for (i = 0; i < 4; i++) {
120         sprintf(p, "ACC%d", i);
121         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
122                                          offsetof(CPUM68KState, macc[i]), p);
123         p += 5;
124     }
125 
126     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
127     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
128 }
129 
130 /* internal defines */
131 typedef struct DisasContext {
132     CPUM68KState *env;
133     target_ulong insn_pc; /* Start of the current instruction.  */
134     target_ulong pc;
135     int is_jmp;
136     CCOp cc_op; /* Current CC operation */
137     int cc_op_synced;
138     int user;
139     uint32_t fpcr;
140     struct TranslationBlock *tb;
141     int singlestep_enabled;
142     TCGv_i64 mactmp;
143     int done_mac;
144     int writeback_mask;
145     TCGv writeback[8];
146 } DisasContext;
147 
148 static TCGv get_areg(DisasContext *s, unsigned regno)
149 {
150     if (s->writeback_mask & (1 << regno)) {
151         return s->writeback[regno];
152     } else {
153         return cpu_aregs[regno];
154     }
155 }
156 
157 static void delay_set_areg(DisasContext *s, unsigned regno,
158                            TCGv val, bool give_temp)
159 {
160     if (s->writeback_mask & (1 << regno)) {
161         if (give_temp) {
162             tcg_temp_free(s->writeback[regno]);
163             s->writeback[regno] = val;
164         } else {
165             tcg_gen_mov_i32(s->writeback[regno], val);
166         }
167     } else {
168         s->writeback_mask |= 1 << regno;
169         if (give_temp) {
170             s->writeback[regno] = val;
171         } else {
172             TCGv tmp = tcg_temp_new();
173             s->writeback[regno] = tmp;
174             tcg_gen_mov_i32(tmp, val);
175         }
176     }
177 }
178 
179 static void do_writebacks(DisasContext *s)
180 {
181     unsigned mask = s->writeback_mask;
182     if (mask) {
183         s->writeback_mask = 0;
184         do {
185             unsigned regno = ctz32(mask);
186             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
187             tcg_temp_free(s->writeback[regno]);
188             mask &= mask - 1;
189         } while (mask);
190     }
191 }
192 
193 #define DISAS_JUMP_NEXT 4
194 
195 #if defined(CONFIG_USER_ONLY)
196 #define IS_USER(s) 1
197 #else
198 #define IS_USER(s) s->user
199 #endif
200 
201 /* XXX: move that elsewhere */
202 /* ??? Fix exceptions.  */
203 static void *gen_throws_exception;
204 #define gen_last_qop NULL
205 
206 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
207 
208 #ifdef DEBUG_DISPATCH
209 #define DISAS_INSN(name)                                                \
210     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
211                                   uint16_t insn);                       \
212     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
213                              uint16_t insn)                             \
214     {                                                                   \
215         qemu_log("Dispatch " #name "\n");                               \
216         real_disas_##name(env, s, insn);                                \
217     }                                                                   \
218     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
219                                   uint16_t insn)
220 #else
221 #define DISAS_INSN(name)                                                \
222     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
223                              uint16_t insn)
224 #endif
225 
226 static const uint8_t cc_op_live[CC_OP_NB] = {
227     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
228     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
229     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
230     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
231     [CC_OP_LOGIC] = CCF_X | CCF_N
232 };
233 
234 static void set_cc_op(DisasContext *s, CCOp op)
235 {
236     CCOp old_op = s->cc_op;
237     int dead;
238 
239     if (old_op == op) {
240         return;
241     }
242     s->cc_op = op;
243     s->cc_op_synced = 0;
244 
245     /* Discard CC computation that will no longer be used.
246        Note that X and N are never dead.  */
247     dead = cc_op_live[old_op] & ~cc_op_live[op];
248     if (dead & CCF_C) {
249         tcg_gen_discard_i32(QREG_CC_C);
250     }
251     if (dead & CCF_Z) {
252         tcg_gen_discard_i32(QREG_CC_Z);
253     }
254     if (dead & CCF_V) {
255         tcg_gen_discard_i32(QREG_CC_V);
256     }
257 }
258 
259 /* Update the CPU env CC_OP state.  */
260 static void update_cc_op(DisasContext *s)
261 {
262     if (!s->cc_op_synced) {
263         s->cc_op_synced = 1;
264         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
265     }
266 }
267 
268 /* Generate a load from the specified address.  Narrow values are
269    sign extended to full register width.  */
270 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
271 {
272     TCGv tmp;
273     int index = IS_USER(s);
274     tmp = tcg_temp_new_i32();
275     switch(opsize) {
276     case OS_BYTE:
277         if (sign)
278             tcg_gen_qemu_ld8s(tmp, addr, index);
279         else
280             tcg_gen_qemu_ld8u(tmp, addr, index);
281         break;
282     case OS_WORD:
283         if (sign)
284             tcg_gen_qemu_ld16s(tmp, addr, index);
285         else
286             tcg_gen_qemu_ld16u(tmp, addr, index);
287         break;
288     case OS_LONG:
289     case OS_SINGLE:
290         tcg_gen_qemu_ld32u(tmp, addr, index);
291         break;
292     default:
293         g_assert_not_reached();
294     }
295     gen_throws_exception = gen_last_qop;
296     return tmp;
297 }
298 
299 static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
300 {
301     TCGv_i64 tmp;
302     int index = IS_USER(s);
303     tmp = tcg_temp_new_i64();
304     tcg_gen_qemu_ldf64(tmp, addr, index);
305     gen_throws_exception = gen_last_qop;
306     return tmp;
307 }
308 
309 /* Generate a store.  */
310 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
311 {
312     int index = IS_USER(s);
313     switch(opsize) {
314     case OS_BYTE:
315         tcg_gen_qemu_st8(val, addr, index);
316         break;
317     case OS_WORD:
318         tcg_gen_qemu_st16(val, addr, index);
319         break;
320     case OS_LONG:
321     case OS_SINGLE:
322         tcg_gen_qemu_st32(val, addr, index);
323         break;
324     default:
325         g_assert_not_reached();
326     }
327     gen_throws_exception = gen_last_qop;
328 }
329 
330 static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
331 {
332     int index = IS_USER(s);
333     tcg_gen_qemu_stf64(val, addr, index);
334     gen_throws_exception = gen_last_qop;
335 }
336 
337 typedef enum {
338     EA_STORE,
339     EA_LOADU,
340     EA_LOADS
341 } ea_what;
342 
343 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
344    otherwise generate a store.  */
345 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
346                      ea_what what)
347 {
348     if (what == EA_STORE) {
349         gen_store(s, opsize, addr, val);
350         return store_dummy;
351     } else {
352         return gen_load(s, opsize, addr, what == EA_LOADS);
353     }
354 }
355 
356 /* Read a 16-bit immediate constant */
357 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
358 {
359     uint16_t im;
360     im = cpu_lduw_code(env, s->pc);
361     s->pc += 2;
362     return im;
363 }
364 
365 /* Read an 8-bit immediate constant */
366 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
367 {
368     return read_im16(env, s);
369 }
370 
371 /* Read a 32-bit immediate constant.  */
372 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
373 {
374     uint32_t im;
375     im = read_im16(env, s) << 16;
376     im |= 0xffff & read_im16(env, s);
377     return im;
378 }
379 
380 /* Calculate and address index.  */
381 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
382 {
383     TCGv add;
384     int scale;
385 
386     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
387     if ((ext & 0x800) == 0) {
388         tcg_gen_ext16s_i32(tmp, add);
389         add = tmp;
390     }
391     scale = (ext >> 9) & 3;
392     if (scale != 0) {
393         tcg_gen_shli_i32(tmp, add, scale);
394         add = tmp;
395     }
396     return add;
397 }
398 
399 /* Handle a base + index + displacement effective addresss.
400    A NULL_QREG base means pc-relative.  */
401 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
402 {
403     uint32_t offset;
404     uint16_t ext;
405     TCGv add;
406     TCGv tmp;
407     uint32_t bd, od;
408 
409     offset = s->pc;
410     ext = read_im16(env, s);
411 
412     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
413         return NULL_QREG;
414 
415     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
416         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
417         ext &= ~(3 << 9);
418     }
419 
420     if (ext & 0x100) {
421         /* full extension word format */
422         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
423             return NULL_QREG;
424 
425         if ((ext & 0x30) > 0x10) {
426             /* base displacement */
427             if ((ext & 0x30) == 0x20) {
428                 bd = (int16_t)read_im16(env, s);
429             } else {
430                 bd = read_im32(env, s);
431             }
432         } else {
433             bd = 0;
434         }
435         tmp = tcg_temp_new();
436         if ((ext & 0x44) == 0) {
437             /* pre-index */
438             add = gen_addr_index(s, ext, tmp);
439         } else {
440             add = NULL_QREG;
441         }
442         if ((ext & 0x80) == 0) {
443             /* base not suppressed */
444             if (IS_NULL_QREG(base)) {
445                 base = tcg_const_i32(offset + bd);
446                 bd = 0;
447             }
448             if (!IS_NULL_QREG(add)) {
449                 tcg_gen_add_i32(tmp, add, base);
450                 add = tmp;
451             } else {
452                 add = base;
453             }
454         }
455         if (!IS_NULL_QREG(add)) {
456             if (bd != 0) {
457                 tcg_gen_addi_i32(tmp, add, bd);
458                 add = tmp;
459             }
460         } else {
461             add = tcg_const_i32(bd);
462         }
463         if ((ext & 3) != 0) {
464             /* memory indirect */
465             base = gen_load(s, OS_LONG, add, 0);
466             if ((ext & 0x44) == 4) {
467                 add = gen_addr_index(s, ext, tmp);
468                 tcg_gen_add_i32(tmp, add, base);
469                 add = tmp;
470             } else {
471                 add = base;
472             }
473             if ((ext & 3) > 1) {
474                 /* outer displacement */
475                 if ((ext & 3) == 2) {
476                     od = (int16_t)read_im16(env, s);
477                 } else {
478                     od = read_im32(env, s);
479                 }
480             } else {
481                 od = 0;
482             }
483             if (od != 0) {
484                 tcg_gen_addi_i32(tmp, add, od);
485                 add = tmp;
486             }
487         }
488     } else {
489         /* brief extension word format */
490         tmp = tcg_temp_new();
491         add = gen_addr_index(s, ext, tmp);
492         if (!IS_NULL_QREG(base)) {
493             tcg_gen_add_i32(tmp, add, base);
494             if ((int8_t)ext)
495                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
496         } else {
497             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
498         }
499         add = tmp;
500     }
501     return add;
502 }
503 
504 /* Sign or zero extend a value.  */
505 
506 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
507 {
508     switch (opsize) {
509     case OS_BYTE:
510         if (sign) {
511             tcg_gen_ext8s_i32(res, val);
512         } else {
513             tcg_gen_ext8u_i32(res, val);
514         }
515         break;
516     case OS_WORD:
517         if (sign) {
518             tcg_gen_ext16s_i32(res, val);
519         } else {
520             tcg_gen_ext16u_i32(res, val);
521         }
522         break;
523     case OS_LONG:
524         tcg_gen_mov_i32(res, val);
525         break;
526     default:
527         g_assert_not_reached();
528     }
529 }
530 
531 /* Evaluate all the CC flags.  */
532 
533 static void gen_flush_flags(DisasContext *s)
534 {
535     TCGv t0, t1;
536 
537     switch (s->cc_op) {
538     case CC_OP_FLAGS:
539         return;
540 
541     case CC_OP_ADDB:
542     case CC_OP_ADDW:
543     case CC_OP_ADDL:
544         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
545         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
546         /* Compute signed overflow for addition.  */
547         t0 = tcg_temp_new();
548         t1 = tcg_temp_new();
549         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
550         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
551         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
552         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
553         tcg_temp_free(t0);
554         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
555         tcg_temp_free(t1);
556         break;
557 
558     case CC_OP_SUBB:
559     case CC_OP_SUBW:
560     case CC_OP_SUBL:
561         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
562         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
563         /* Compute signed overflow for subtraction.  */
564         t0 = tcg_temp_new();
565         t1 = tcg_temp_new();
566         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
567         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
568         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
569         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
570         tcg_temp_free(t0);
571         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
572         tcg_temp_free(t1);
573         break;
574 
575     case CC_OP_CMPB:
576     case CC_OP_CMPW:
577     case CC_OP_CMPL:
578         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
579         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
580         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
581         /* Compute signed overflow for subtraction.  */
582         t0 = tcg_temp_new();
583         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
584         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
585         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
586         tcg_temp_free(t0);
587         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
588         break;
589 
590     case CC_OP_LOGIC:
591         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
592         tcg_gen_movi_i32(QREG_CC_C, 0);
593         tcg_gen_movi_i32(QREG_CC_V, 0);
594         break;
595 
596     case CC_OP_DYNAMIC:
597         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
598         s->cc_op_synced = 1;
599         break;
600 
601     default:
602         t0 = tcg_const_i32(s->cc_op);
603         gen_helper_flush_flags(cpu_env, t0);
604         tcg_temp_free(t0);
605         s->cc_op_synced = 1;
606         break;
607     }
608 
609     /* Note that flush_flags also assigned to env->cc_op.  */
610     s->cc_op = CC_OP_FLAGS;
611 }
612 
613 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
614 {
615     TCGv tmp;
616 
617     if (opsize == OS_LONG) {
618         tmp = val;
619     } else {
620         tmp = tcg_temp_new();
621         gen_ext(tmp, val, opsize, sign);
622     }
623 
624     return tmp;
625 }
626 
627 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
628 {
629     gen_ext(QREG_CC_N, val, opsize, 1);
630     set_cc_op(s, CC_OP_LOGIC);
631 }
632 
633 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
634 {
635     tcg_gen_mov_i32(QREG_CC_N, dest);
636     tcg_gen_mov_i32(QREG_CC_V, src);
637     set_cc_op(s, CC_OP_CMPB + opsize);
638 }
639 
640 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
641 {
642     gen_ext(QREG_CC_N, dest, opsize, 1);
643     tcg_gen_mov_i32(QREG_CC_V, src);
644 }
645 
646 static inline int opsize_bytes(int opsize)
647 {
648     switch (opsize) {
649     case OS_BYTE: return 1;
650     case OS_WORD: return 2;
651     case OS_LONG: return 4;
652     case OS_SINGLE: return 4;
653     case OS_DOUBLE: return 8;
654     case OS_EXTENDED: return 12;
655     case OS_PACKED: return 12;
656     default:
657         g_assert_not_reached();
658     }
659 }
660 
661 static inline int insn_opsize(int insn)
662 {
663     switch ((insn >> 6) & 3) {
664     case 0: return OS_BYTE;
665     case 1: return OS_WORD;
666     case 2: return OS_LONG;
667     default:
668         g_assert_not_reached();
669     }
670 }
671 
672 static inline int ext_opsize(int ext, int pos)
673 {
674     switch ((ext >> pos) & 7) {
675     case 0: return OS_LONG;
676     case 1: return OS_SINGLE;
677     case 2: return OS_EXTENDED;
678     case 3: return OS_PACKED;
679     case 4: return OS_WORD;
680     case 5: return OS_DOUBLE;
681     case 6: return OS_BYTE;
682     default:
683         g_assert_not_reached();
684     }
685 }
686 
687 /* Assign value to a register.  If the width is less than the register width
688    only the low part of the register is set.  */
689 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
690 {
691     TCGv tmp;
692     switch (opsize) {
693     case OS_BYTE:
694         tcg_gen_andi_i32(reg, reg, 0xffffff00);
695         tmp = tcg_temp_new();
696         tcg_gen_ext8u_i32(tmp, val);
697         tcg_gen_or_i32(reg, reg, tmp);
698         tcg_temp_free(tmp);
699         break;
700     case OS_WORD:
701         tcg_gen_andi_i32(reg, reg, 0xffff0000);
702         tmp = tcg_temp_new();
703         tcg_gen_ext16u_i32(tmp, val);
704         tcg_gen_or_i32(reg, reg, tmp);
705         tcg_temp_free(tmp);
706         break;
707     case OS_LONG:
708     case OS_SINGLE:
709         tcg_gen_mov_i32(reg, val);
710         break;
711     default:
712         g_assert_not_reached();
713     }
714 }
715 
716 /* Generate code for an "effective address".  Does not adjust the base
717    register for autoincrement addressing modes.  */
718 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
719                          int mode, int reg0, int opsize)
720 {
721     TCGv reg;
722     TCGv tmp;
723     uint16_t ext;
724     uint32_t offset;
725 
726     switch (mode) {
727     case 0: /* Data register direct.  */
728     case 1: /* Address register direct.  */
729         return NULL_QREG;
730     case 3: /* Indirect postincrement.  */
731         if (opsize == OS_UNSIZED) {
732             return NULL_QREG;
733         }
734         /* fallthru */
735     case 2: /* Indirect register */
736         return get_areg(s, reg0);
737     case 4: /* Indirect predecrememnt.  */
738         if (opsize == OS_UNSIZED) {
739             return NULL_QREG;
740         }
741         reg = get_areg(s, reg0);
742         tmp = tcg_temp_new();
743         if (reg0 == 7 && opsize == OS_BYTE &&
744             m68k_feature(s->env, M68K_FEATURE_M68000)) {
745             tcg_gen_subi_i32(tmp, reg, 2);
746         } else {
747             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
748         }
749         return tmp;
750     case 5: /* Indirect displacement.  */
751         reg = get_areg(s, reg0);
752         tmp = tcg_temp_new();
753         ext = read_im16(env, s);
754         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
755         return tmp;
756     case 6: /* Indirect index + displacement.  */
757         reg = get_areg(s, reg0);
758         return gen_lea_indexed(env, s, reg);
759     case 7: /* Other */
760         switch (reg0) {
761         case 0: /* Absolute short.  */
762             offset = (int16_t)read_im16(env, s);
763             return tcg_const_i32(offset);
764         case 1: /* Absolute long.  */
765             offset = read_im32(env, s);
766             return tcg_const_i32(offset);
767         case 2: /* pc displacement  */
768             offset = s->pc;
769             offset += (int16_t)read_im16(env, s);
770             return tcg_const_i32(offset);
771         case 3: /* pc index+displacement.  */
772             return gen_lea_indexed(env, s, NULL_QREG);
773         case 4: /* Immediate.  */
774         default:
775             return NULL_QREG;
776         }
777     }
778     /* Should never happen.  */
779     return NULL_QREG;
780 }
781 
782 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
783                     int opsize)
784 {
785     int mode = extract32(insn, 3, 3);
786     int reg0 = REG(insn, 0);
787     return gen_lea_mode(env, s, mode, reg0, opsize);
788 }
789 
790 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
791    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
792    ADDRP is non-null for readwrite operands.  */
793 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
794                         int opsize, TCGv val, TCGv *addrp, ea_what what)
795 {
796     TCGv reg, tmp, result;
797     int32_t offset;
798 
799     switch (mode) {
800     case 0: /* Data register direct.  */
801         reg = cpu_dregs[reg0];
802         if (what == EA_STORE) {
803             gen_partset_reg(opsize, reg, val);
804             return store_dummy;
805         } else {
806             return gen_extend(reg, opsize, what == EA_LOADS);
807         }
808     case 1: /* Address register direct.  */
809         reg = get_areg(s, reg0);
810         if (what == EA_STORE) {
811             tcg_gen_mov_i32(reg, val);
812             return store_dummy;
813         } else {
814             return gen_extend(reg, opsize, what == EA_LOADS);
815         }
816     case 2: /* Indirect register */
817         reg = get_areg(s, reg0);
818         return gen_ldst(s, opsize, reg, val, what);
819     case 3: /* Indirect postincrement.  */
820         reg = get_areg(s, reg0);
821         result = gen_ldst(s, opsize, reg, val, what);
822         if (what == EA_STORE || !addrp) {
823             TCGv tmp = tcg_temp_new();
824             if (reg0 == 7 && opsize == OS_BYTE &&
825                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
826                 tcg_gen_addi_i32(tmp, reg, 2);
827             } else {
828                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
829             }
830             delay_set_areg(s, reg0, tmp, true);
831         }
832         return result;
833     case 4: /* Indirect predecrememnt.  */
834         if (addrp && what == EA_STORE) {
835             tmp = *addrp;
836         } else {
837             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
838             if (IS_NULL_QREG(tmp)) {
839                 return tmp;
840             }
841             if (addrp) {
842                 *addrp = tmp;
843             }
844         }
845         result = gen_ldst(s, opsize, tmp, val, what);
846         if (what == EA_STORE || !addrp) {
847             delay_set_areg(s, reg0, tmp, false);
848         }
849         return result;
850     case 5: /* Indirect displacement.  */
851     case 6: /* Indirect index + displacement.  */
852     do_indirect:
853         if (addrp && what == EA_STORE) {
854             tmp = *addrp;
855         } else {
856             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
857             if (IS_NULL_QREG(tmp)) {
858                 return tmp;
859             }
860             if (addrp) {
861                 *addrp = tmp;
862             }
863         }
864         return gen_ldst(s, opsize, tmp, val, what);
865     case 7: /* Other */
866         switch (reg0) {
867         case 0: /* Absolute short.  */
868         case 1: /* Absolute long.  */
869         case 2: /* pc displacement  */
870         case 3: /* pc index+displacement.  */
871             goto do_indirect;
872         case 4: /* Immediate.  */
873             /* Sign extend values for consistency.  */
874             switch (opsize) {
875             case OS_BYTE:
876                 if (what == EA_LOADS) {
877                     offset = (int8_t)read_im8(env, s);
878                 } else {
879                     offset = read_im8(env, s);
880                 }
881                 break;
882             case OS_WORD:
883                 if (what == EA_LOADS) {
884                     offset = (int16_t)read_im16(env, s);
885                 } else {
886                     offset = read_im16(env, s);
887                 }
888                 break;
889             case OS_LONG:
890                 offset = read_im32(env, s);
891                 break;
892             default:
893                 g_assert_not_reached();
894             }
895             return tcg_const_i32(offset);
896         default:
897             return NULL_QREG;
898         }
899     }
900     /* Should never happen.  */
901     return NULL_QREG;
902 }
903 
904 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
905                    int opsize, TCGv val, TCGv *addrp, ea_what what)
906 {
907     int mode = extract32(insn, 3, 3);
908     int reg0 = REG(insn, 0);
909     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
910 }
911 
912 typedef struct {
913     TCGCond tcond;
914     bool g1;
915     bool g2;
916     TCGv v1;
917     TCGv v2;
918 } DisasCompare;
919 
920 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
921 {
922     TCGv tmp, tmp2;
923     TCGCond tcond;
924     CCOp op = s->cc_op;
925 
926     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
927     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
928         c->g1 = c->g2 = 1;
929         c->v1 = QREG_CC_N;
930         c->v2 = QREG_CC_V;
931         switch (cond) {
932         case 2: /* HI */
933         case 3: /* LS */
934             tcond = TCG_COND_LEU;
935             goto done;
936         case 4: /* CC */
937         case 5: /* CS */
938             tcond = TCG_COND_LTU;
939             goto done;
940         case 6: /* NE */
941         case 7: /* EQ */
942             tcond = TCG_COND_EQ;
943             goto done;
944         case 10: /* PL */
945         case 11: /* MI */
946             c->g1 = c->g2 = 0;
947             c->v2 = tcg_const_i32(0);
948             c->v1 = tmp = tcg_temp_new();
949             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
950             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
951             /* fallthru */
952         case 12: /* GE */
953         case 13: /* LT */
954             tcond = TCG_COND_LT;
955             goto done;
956         case 14: /* GT */
957         case 15: /* LE */
958             tcond = TCG_COND_LE;
959             goto done;
960         }
961     }
962 
963     c->g1 = 1;
964     c->g2 = 0;
965     c->v2 = tcg_const_i32(0);
966 
967     switch (cond) {
968     case 0: /* T */
969     case 1: /* F */
970         c->v1 = c->v2;
971         tcond = TCG_COND_NEVER;
972         goto done;
973     case 14: /* GT (!(Z || (N ^ V))) */
974     case 15: /* LE (Z || (N ^ V)) */
975         /* Logic operations clear V, which simplifies LE to (Z || N),
976            and since Z and N are co-located, this becomes a normal
977            comparison vs N.  */
978         if (op == CC_OP_LOGIC) {
979             c->v1 = QREG_CC_N;
980             tcond = TCG_COND_LE;
981             goto done;
982         }
983         break;
984     case 12: /* GE (!(N ^ V)) */
985     case 13: /* LT (N ^ V) */
986         /* Logic operations clear V, which simplifies this to N.  */
987         if (op != CC_OP_LOGIC) {
988             break;
989         }
990         /* fallthru */
991     case 10: /* PL (!N) */
992     case 11: /* MI (N) */
993         /* Several cases represent N normally.  */
994         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
995             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
996             op == CC_OP_LOGIC) {
997             c->v1 = QREG_CC_N;
998             tcond = TCG_COND_LT;
999             goto done;
1000         }
1001         break;
1002     case 6: /* NE (!Z) */
1003     case 7: /* EQ (Z) */
1004         /* Some cases fold Z into N.  */
1005         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1006             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1007             op == CC_OP_LOGIC) {
1008             tcond = TCG_COND_EQ;
1009             c->v1 = QREG_CC_N;
1010             goto done;
1011         }
1012         break;
1013     case 4: /* CC (!C) */
1014     case 5: /* CS (C) */
1015         /* Some cases fold C into X.  */
1016         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1017             op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL) {
1018             tcond = TCG_COND_NE;
1019             c->v1 = QREG_CC_X;
1020             goto done;
1021         }
1022         /* fallthru */
1023     case 8: /* VC (!V) */
1024     case 9: /* VS (V) */
1025         /* Logic operations clear V and C.  */
1026         if (op == CC_OP_LOGIC) {
1027             tcond = TCG_COND_NEVER;
1028             c->v1 = c->v2;
1029             goto done;
1030         }
1031         break;
1032     }
1033 
1034     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1035     gen_flush_flags(s);
1036 
1037     switch (cond) {
1038     case 0: /* T */
1039     case 1: /* F */
1040     default:
1041         /* Invalid, or handled above.  */
1042         abort();
1043     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1044     case 3: /* LS (C || Z) */
1045         c->v1 = tmp = tcg_temp_new();
1046         c->g1 = 0;
1047         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1048         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1049         tcond = TCG_COND_NE;
1050         break;
1051     case 4: /* CC (!C) */
1052     case 5: /* CS (C) */
1053         c->v1 = QREG_CC_C;
1054         tcond = TCG_COND_NE;
1055         break;
1056     case 6: /* NE (!Z) */
1057     case 7: /* EQ (Z) */
1058         c->v1 = QREG_CC_Z;
1059         tcond = TCG_COND_EQ;
1060         break;
1061     case 8: /* VC (!V) */
1062     case 9: /* VS (V) */
1063         c->v1 = QREG_CC_V;
1064         tcond = TCG_COND_LT;
1065         break;
1066     case 10: /* PL (!N) */
1067     case 11: /* MI (N) */
1068         c->v1 = QREG_CC_N;
1069         tcond = TCG_COND_LT;
1070         break;
1071     case 12: /* GE (!(N ^ V)) */
1072     case 13: /* LT (N ^ V) */
1073         c->v1 = tmp = tcg_temp_new();
1074         c->g1 = 0;
1075         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1076         tcond = TCG_COND_LT;
1077         break;
1078     case 14: /* GT (!(Z || (N ^ V))) */
1079     case 15: /* LE (Z || (N ^ V)) */
1080         c->v1 = tmp = tcg_temp_new();
1081         c->g1 = 0;
1082         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1083         tcg_gen_neg_i32(tmp, tmp);
1084         tmp2 = tcg_temp_new();
1085         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1086         tcg_gen_or_i32(tmp, tmp, tmp2);
1087         tcg_temp_free(tmp2);
1088         tcond = TCG_COND_LT;
1089         break;
1090     }
1091 
1092  done:
1093     if ((cond & 1) == 0) {
1094         tcond = tcg_invert_cond(tcond);
1095     }
1096     c->tcond = tcond;
1097 }
1098 
1099 static void free_cond(DisasCompare *c)
1100 {
1101     if (!c->g1) {
1102         tcg_temp_free(c->v1);
1103     }
1104     if (!c->g2) {
1105         tcg_temp_free(c->v2);
1106     }
1107 }
1108 
1109 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1110 {
1111   DisasCompare c;
1112 
1113   gen_cc_cond(&c, s, cond);
1114   update_cc_op(s);
1115   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1116   free_cond(&c);
1117 }
1118 
1119 /* Force a TB lookup after an instruction that changes the CPU state.  */
1120 static void gen_lookup_tb(DisasContext *s)
1121 {
1122     update_cc_op(s);
1123     tcg_gen_movi_i32(QREG_PC, s->pc);
1124     s->is_jmp = DISAS_UPDATE;
1125 }
1126 
1127 /* Generate a jump to an immediate address.  */
1128 static void gen_jmp_im(DisasContext *s, uint32_t dest)
1129 {
1130     update_cc_op(s);
1131     tcg_gen_movi_i32(QREG_PC, dest);
1132     s->is_jmp = DISAS_JUMP;
1133 }
1134 
1135 /* Generate a jump to the address in qreg DEST.  */
1136 static void gen_jmp(DisasContext *s, TCGv dest)
1137 {
1138     update_cc_op(s);
1139     tcg_gen_mov_i32(QREG_PC, dest);
1140     s->is_jmp = DISAS_JUMP;
1141 }
1142 
1143 static void gen_raise_exception(int nr)
1144 {
1145     TCGv_i32 tmp = tcg_const_i32(nr);
1146 
1147     gen_helper_raise_exception(cpu_env, tmp);
1148     tcg_temp_free_i32(tmp);
1149 }
1150 
1151 static void gen_exception(DisasContext *s, uint32_t where, int nr)
1152 {
1153     update_cc_op(s);
1154     gen_jmp_im(s, where);
1155     gen_raise_exception(nr);
1156 }
1157 
1158 static inline void gen_addr_fault(DisasContext *s)
1159 {
1160     gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1161 }
1162 
1163 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1164         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1165                         op_sign ? EA_LOADS : EA_LOADU);                 \
1166         if (IS_NULL_QREG(result)) {                                     \
1167             gen_addr_fault(s);                                          \
1168             return;                                                     \
1169         }                                                               \
1170     } while (0)
1171 
1172 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1173         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1174         if (IS_NULL_QREG(ea_result)) {                                  \
1175             gen_addr_fault(s);                                          \
1176             return;                                                     \
1177         }                                                               \
1178     } while (0)
1179 
1180 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1181 {
1182 #ifndef CONFIG_USER_ONLY
1183     return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1184            (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1185 #else
1186     return true;
1187 #endif
1188 }
1189 
1190 /* Generate a jump to an immediate address.  */
1191 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1192 {
1193     if (unlikely(s->singlestep_enabled)) {
1194         gen_exception(s, dest, EXCP_DEBUG);
1195     } else if (use_goto_tb(s, dest)) {
1196         tcg_gen_goto_tb(n);
1197         tcg_gen_movi_i32(QREG_PC, dest);
1198         tcg_gen_exit_tb((uintptr_t)s->tb + n);
1199     } else {
1200         gen_jmp_im(s, dest);
1201         tcg_gen_exit_tb(0);
1202     }
1203     s->is_jmp = DISAS_TB_JUMP;
1204 }
1205 
1206 DISAS_INSN(scc)
1207 {
1208     DisasCompare c;
1209     int cond;
1210     TCGv tmp;
1211 
1212     cond = (insn >> 8) & 0xf;
1213     gen_cc_cond(&c, s, cond);
1214 
1215     tmp = tcg_temp_new();
1216     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1217     free_cond(&c);
1218 
1219     tcg_gen_neg_i32(tmp, tmp);
1220     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1221     tcg_temp_free(tmp);
1222 }
1223 
1224 DISAS_INSN(dbcc)
1225 {
1226     TCGLabel *l1;
1227     TCGv reg;
1228     TCGv tmp;
1229     int16_t offset;
1230     uint32_t base;
1231 
1232     reg = DREG(insn, 0);
1233     base = s->pc;
1234     offset = (int16_t)read_im16(env, s);
1235     l1 = gen_new_label();
1236     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1237 
1238     tmp = tcg_temp_new();
1239     tcg_gen_ext16s_i32(tmp, reg);
1240     tcg_gen_addi_i32(tmp, tmp, -1);
1241     gen_partset_reg(OS_WORD, reg, tmp);
1242     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1243     gen_jmp_tb(s, 1, base + offset);
1244     gen_set_label(l1);
1245     gen_jmp_tb(s, 0, s->pc);
1246 }
1247 
1248 DISAS_INSN(undef_mac)
1249 {
1250     gen_exception(s, s->pc - 2, EXCP_LINEA);
1251 }
1252 
1253 DISAS_INSN(undef_fpu)
1254 {
1255     gen_exception(s, s->pc - 2, EXCP_LINEF);
1256 }
1257 
1258 DISAS_INSN(undef)
1259 {
1260     /* ??? This is both instructions that are as yet unimplemented
1261        for the 680x0 series, as well as those that are implemented
1262        but actually illegal for CPU32 or pre-68020.  */
1263     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1264                   insn, s->pc - 2);
1265     gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1266 }
1267 
1268 DISAS_INSN(mulw)
1269 {
1270     TCGv reg;
1271     TCGv tmp;
1272     TCGv src;
1273     int sign;
1274 
1275     sign = (insn & 0x100) != 0;
1276     reg = DREG(insn, 9);
1277     tmp = tcg_temp_new();
1278     if (sign)
1279         tcg_gen_ext16s_i32(tmp, reg);
1280     else
1281         tcg_gen_ext16u_i32(tmp, reg);
1282     SRC_EA(env, src, OS_WORD, sign, NULL);
1283     tcg_gen_mul_i32(tmp, tmp, src);
1284     tcg_gen_mov_i32(reg, tmp);
1285     gen_logic_cc(s, tmp, OS_LONG);
1286     tcg_temp_free(tmp);
1287 }
1288 
1289 DISAS_INSN(divw)
1290 {
1291     int sign;
1292     TCGv src;
1293     TCGv destr;
1294 
1295     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1296 
1297     sign = (insn & 0x100) != 0;
1298 
1299     /* dest.l / src.w */
1300 
1301     SRC_EA(env, src, OS_WORD, sign, NULL);
1302     destr = tcg_const_i32(REG(insn, 9));
1303     if (sign) {
1304         gen_helper_divsw(cpu_env, destr, src);
1305     } else {
1306         gen_helper_divuw(cpu_env, destr, src);
1307     }
1308     tcg_temp_free(destr);
1309 
1310     set_cc_op(s, CC_OP_FLAGS);
1311 }
1312 
1313 DISAS_INSN(divl)
1314 {
1315     TCGv num, reg, den;
1316     int sign;
1317     uint16_t ext;
1318 
1319     ext = read_im16(env, s);
1320 
1321     sign = (ext & 0x0800) != 0;
1322 
1323     if (ext & 0x400) {
1324         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1325             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1326             return;
1327         }
1328 
1329         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1330 
1331         SRC_EA(env, den, OS_LONG, 0, NULL);
1332         num = tcg_const_i32(REG(ext, 12));
1333         reg = tcg_const_i32(REG(ext, 0));
1334         if (sign) {
1335             gen_helper_divsll(cpu_env, num, reg, den);
1336         } else {
1337             gen_helper_divull(cpu_env, num, reg, den);
1338         }
1339         tcg_temp_free(reg);
1340         tcg_temp_free(num);
1341         set_cc_op(s, CC_OP_FLAGS);
1342         return;
1343     }
1344 
1345     /* divX.l <EA>, Dq        32/32 -> 32q     */
1346     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1347 
1348     SRC_EA(env, den, OS_LONG, 0, NULL);
1349     num = tcg_const_i32(REG(ext, 12));
1350     reg = tcg_const_i32(REG(ext, 0));
1351     if (sign) {
1352         gen_helper_divsl(cpu_env, num, reg, den);
1353     } else {
1354         gen_helper_divul(cpu_env, num, reg, den);
1355     }
1356     tcg_temp_free(reg);
1357     tcg_temp_free(num);
1358 
1359     set_cc_op(s, CC_OP_FLAGS);
1360 }
1361 
1362 static void bcd_add(TCGv dest, TCGv src)
1363 {
1364     TCGv t0, t1;
1365 
1366     /*  dest10 = dest10 + src10 + X
1367      *
1368      *        t1 = src
1369      *        t2 = t1 + 0x066
1370      *        t3 = t2 + dest + X
1371      *        t4 = t2 ^ dest
1372      *        t5 = t3 ^ t4
1373      *        t6 = ~t5 & 0x110
1374      *        t7 = (t6 >> 2) | (t6 >> 3)
1375      *        return t3 - t7
1376      */
1377 
1378     /* t1 = (src + 0x066) + dest + X
1379      *    = result with some possible exceding 0x6
1380      */
1381 
1382     t0 = tcg_const_i32(0x066);
1383     tcg_gen_add_i32(t0, t0, src);
1384 
1385     t1 = tcg_temp_new();
1386     tcg_gen_add_i32(t1, t0, dest);
1387     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1388 
1389     /* we will remove exceding 0x6 where there is no carry */
1390 
1391     /* t0 = (src + 0x0066) ^ dest
1392      *    = t1 without carries
1393      */
1394 
1395     tcg_gen_xor_i32(t0, t0, dest);
1396 
1397     /* extract the carries
1398      * t0 = t0 ^ t1
1399      *    = only the carries
1400      */
1401 
1402     tcg_gen_xor_i32(t0, t0, t1);
1403 
1404     /* generate 0x1 where there is no carry
1405      * and for each 0x10, generate a 0x6
1406      */
1407 
1408     tcg_gen_shri_i32(t0, t0, 3);
1409     tcg_gen_not_i32(t0, t0);
1410     tcg_gen_andi_i32(t0, t0, 0x22);
1411     tcg_gen_add_i32(dest, t0, t0);
1412     tcg_gen_add_i32(dest, dest, t0);
1413     tcg_temp_free(t0);
1414 
1415     /* remove the exceding 0x6
1416      * for digits that have not generated a carry
1417      */
1418 
1419     tcg_gen_sub_i32(dest, t1, dest);
1420     tcg_temp_free(t1);
1421 }
1422 
1423 static void bcd_sub(TCGv dest, TCGv src)
1424 {
1425     TCGv t0, t1, t2;
1426 
1427     /*  dest10 = dest10 - src10 - X
1428      *         = bcd_add(dest + 1 - X, 0x199 - src)
1429      */
1430 
1431     /* t0 = 0x066 + (0x199 - src) */
1432 
1433     t0 = tcg_temp_new();
1434     tcg_gen_subfi_i32(t0, 0x1ff, src);
1435 
1436     /* t1 = t0 + dest + 1 - X*/
1437 
1438     t1 = tcg_temp_new();
1439     tcg_gen_add_i32(t1, t0, dest);
1440     tcg_gen_addi_i32(t1, t1, 1);
1441     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1442 
1443     /* t2 = t0 ^ dest */
1444 
1445     t2 = tcg_temp_new();
1446     tcg_gen_xor_i32(t2, t0, dest);
1447 
1448     /* t0 = t1 ^ t2 */
1449 
1450     tcg_gen_xor_i32(t0, t1, t2);
1451 
1452     /* t2 = ~t0 & 0x110
1453      * t0 = (t2 >> 2) | (t2 >> 3)
1454      *
1455      * to fit on 8bit operands, changed in:
1456      *
1457      * t2 = ~(t0 >> 3) & 0x22
1458      * t0 = t2 + t2
1459      * t0 = t0 + t2
1460      */
1461 
1462     tcg_gen_shri_i32(t2, t0, 3);
1463     tcg_gen_not_i32(t2, t2);
1464     tcg_gen_andi_i32(t2, t2, 0x22);
1465     tcg_gen_add_i32(t0, t2, t2);
1466     tcg_gen_add_i32(t0, t0, t2);
1467     tcg_temp_free(t2);
1468 
1469     /* return t1 - t0 */
1470 
1471     tcg_gen_sub_i32(dest, t1, t0);
1472     tcg_temp_free(t0);
1473     tcg_temp_free(t1);
1474 }
1475 
1476 static void bcd_flags(TCGv val)
1477 {
1478     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1479     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1480 
1481     tcg_gen_shri_i32(QREG_CC_C, val, 8);
1482     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
1483 
1484     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1485 }
1486 
1487 DISAS_INSN(abcd_reg)
1488 {
1489     TCGv src;
1490     TCGv dest;
1491 
1492     gen_flush_flags(s); /* !Z is sticky */
1493 
1494     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1495     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1496     bcd_add(dest, src);
1497     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1498 
1499     bcd_flags(dest);
1500 }
1501 
1502 DISAS_INSN(abcd_mem)
1503 {
1504     TCGv src, dest, addr;
1505 
1506     gen_flush_flags(s); /* !Z is sticky */
1507 
1508     /* Indirect pre-decrement load (mode 4) */
1509 
1510     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1511                       NULL_QREG, NULL, EA_LOADU);
1512     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1513                        NULL_QREG, &addr, EA_LOADU);
1514 
1515     bcd_add(dest, src);
1516 
1517     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1518 
1519     bcd_flags(dest);
1520 }
1521 
1522 DISAS_INSN(sbcd_reg)
1523 {
1524     TCGv src, dest;
1525 
1526     gen_flush_flags(s); /* !Z is sticky */
1527 
1528     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1529     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1530 
1531     bcd_sub(dest, src);
1532 
1533     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1534 
1535     bcd_flags(dest);
1536 }
1537 
1538 DISAS_INSN(sbcd_mem)
1539 {
1540     TCGv src, dest, addr;
1541 
1542     gen_flush_flags(s); /* !Z is sticky */
1543 
1544     /* Indirect pre-decrement load (mode 4) */
1545 
1546     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1547                       NULL_QREG, NULL, EA_LOADU);
1548     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1549                        NULL_QREG, &addr, EA_LOADU);
1550 
1551     bcd_sub(dest, src);
1552 
1553     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1554 
1555     bcd_flags(dest);
1556 }
1557 
1558 DISAS_INSN(nbcd)
1559 {
1560     TCGv src, dest;
1561     TCGv addr;
1562 
1563     gen_flush_flags(s); /* !Z is sticky */
1564 
1565     SRC_EA(env, src, OS_BYTE, 0, &addr);
1566 
1567     dest = tcg_const_i32(0);
1568     bcd_sub(dest, src);
1569 
1570     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1571 
1572     bcd_flags(dest);
1573 
1574     tcg_temp_free(dest);
1575 }
1576 
1577 DISAS_INSN(addsub)
1578 {
1579     TCGv reg;
1580     TCGv dest;
1581     TCGv src;
1582     TCGv tmp;
1583     TCGv addr;
1584     int add;
1585     int opsize;
1586 
1587     add = (insn & 0x4000) != 0;
1588     opsize = insn_opsize(insn);
1589     reg = gen_extend(DREG(insn, 9), opsize, 1);
1590     dest = tcg_temp_new();
1591     if (insn & 0x100) {
1592         SRC_EA(env, tmp, opsize, 1, &addr);
1593         src = reg;
1594     } else {
1595         tmp = reg;
1596         SRC_EA(env, src, opsize, 1, NULL);
1597     }
1598     if (add) {
1599         tcg_gen_add_i32(dest, tmp, src);
1600         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1601         set_cc_op(s, CC_OP_ADDB + opsize);
1602     } else {
1603         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1604         tcg_gen_sub_i32(dest, tmp, src);
1605         set_cc_op(s, CC_OP_SUBB + opsize);
1606     }
1607     gen_update_cc_add(dest, src, opsize);
1608     if (insn & 0x100) {
1609         DEST_EA(env, insn, opsize, dest, &addr);
1610     } else {
1611         gen_partset_reg(opsize, DREG(insn, 9), dest);
1612     }
1613     tcg_temp_free(dest);
1614 }
1615 
1616 /* Reverse the order of the bits in REG.  */
1617 DISAS_INSN(bitrev)
1618 {
1619     TCGv reg;
1620     reg = DREG(insn, 0);
1621     gen_helper_bitrev(reg, reg);
1622 }
1623 
1624 DISAS_INSN(bitop_reg)
1625 {
1626     int opsize;
1627     int op;
1628     TCGv src1;
1629     TCGv src2;
1630     TCGv tmp;
1631     TCGv addr;
1632     TCGv dest;
1633 
1634     if ((insn & 0x38) != 0)
1635         opsize = OS_BYTE;
1636     else
1637         opsize = OS_LONG;
1638     op = (insn >> 6) & 3;
1639     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1640 
1641     gen_flush_flags(s);
1642     src2 = tcg_temp_new();
1643     if (opsize == OS_BYTE)
1644         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1645     else
1646         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1647 
1648     tmp = tcg_const_i32(1);
1649     tcg_gen_shl_i32(tmp, tmp, src2);
1650     tcg_temp_free(src2);
1651 
1652     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1653 
1654     dest = tcg_temp_new();
1655     switch (op) {
1656     case 1: /* bchg */
1657         tcg_gen_xor_i32(dest, src1, tmp);
1658         break;
1659     case 2: /* bclr */
1660         tcg_gen_andc_i32(dest, src1, tmp);
1661         break;
1662     case 3: /* bset */
1663         tcg_gen_or_i32(dest, src1, tmp);
1664         break;
1665     default: /* btst */
1666         break;
1667     }
1668     tcg_temp_free(tmp);
1669     if (op) {
1670         DEST_EA(env, insn, opsize, dest, &addr);
1671     }
1672     tcg_temp_free(dest);
1673 }
1674 
1675 DISAS_INSN(sats)
1676 {
1677     TCGv reg;
1678     reg = DREG(insn, 0);
1679     gen_flush_flags(s);
1680     gen_helper_sats(reg, reg, QREG_CC_V);
1681     gen_logic_cc(s, reg, OS_LONG);
1682 }
1683 
1684 static void gen_push(DisasContext *s, TCGv val)
1685 {
1686     TCGv tmp;
1687 
1688     tmp = tcg_temp_new();
1689     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1690     gen_store(s, OS_LONG, tmp, val);
1691     tcg_gen_mov_i32(QREG_SP, tmp);
1692     tcg_temp_free(tmp);
1693 }
1694 
1695 static TCGv mreg(int reg)
1696 {
1697     if (reg < 8) {
1698         /* Dx */
1699         return cpu_dregs[reg];
1700     }
1701     /* Ax */
1702     return cpu_aregs[reg & 7];
1703 }
1704 
1705 DISAS_INSN(movem)
1706 {
1707     TCGv addr, incr, tmp, r[16];
1708     int is_load = (insn & 0x0400) != 0;
1709     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1710     uint16_t mask = read_im16(env, s);
1711     int mode = extract32(insn, 3, 3);
1712     int reg0 = REG(insn, 0);
1713     int i;
1714 
1715     tmp = cpu_aregs[reg0];
1716 
1717     switch (mode) {
1718     case 0: /* data register direct */
1719     case 1: /* addr register direct */
1720     do_addr_fault:
1721         gen_addr_fault(s);
1722         return;
1723 
1724     case 2: /* indirect */
1725         break;
1726 
1727     case 3: /* indirect post-increment */
1728         if (!is_load) {
1729             /* post-increment is not allowed */
1730             goto do_addr_fault;
1731         }
1732         break;
1733 
1734     case 4: /* indirect pre-decrement */
1735         if (is_load) {
1736             /* pre-decrement is not allowed */
1737             goto do_addr_fault;
1738         }
1739         /* We want a bare copy of the address reg, without any pre-decrement
1740            adjustment, as gen_lea would provide.  */
1741         break;
1742 
1743     default:
1744         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1745         if (IS_NULL_QREG(tmp)) {
1746             goto do_addr_fault;
1747         }
1748         break;
1749     }
1750 
1751     addr = tcg_temp_new();
1752     tcg_gen_mov_i32(addr, tmp);
1753     incr = tcg_const_i32(opsize_bytes(opsize));
1754 
1755     if (is_load) {
1756         /* memory to register */
1757         for (i = 0; i < 16; i++) {
1758             if (mask & (1 << i)) {
1759                 r[i] = gen_load(s, opsize, addr, 1);
1760                 tcg_gen_add_i32(addr, addr, incr);
1761             }
1762         }
1763         for (i = 0; i < 16; i++) {
1764             if (mask & (1 << i)) {
1765                 tcg_gen_mov_i32(mreg(i), r[i]);
1766                 tcg_temp_free(r[i]);
1767             }
1768         }
1769         if (mode == 3) {
1770             /* post-increment: movem (An)+,X */
1771             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1772         }
1773     } else {
1774         /* register to memory */
1775         if (mode == 4) {
1776             /* pre-decrement: movem X,-(An) */
1777             for (i = 15; i >= 0; i--) {
1778                 if ((mask << i) & 0x8000) {
1779                     tcg_gen_sub_i32(addr, addr, incr);
1780                     if (reg0 + 8 == i &&
1781                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1782                         /* M68020+: if the addressing register is the
1783                          * register moved to memory, the value written
1784                          * is the initial value decremented by the size of
1785                          * the operation, regardless of how many actual
1786                          * stores have been performed until this point.
1787                          * M68000/M68010: the value is the initial value.
1788                          */
1789                         tmp = tcg_temp_new();
1790                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
1791                         gen_store(s, opsize, addr, tmp);
1792                         tcg_temp_free(tmp);
1793                     } else {
1794                         gen_store(s, opsize, addr, mreg(i));
1795                     }
1796                 }
1797             }
1798             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1799         } else {
1800             for (i = 0; i < 16; i++) {
1801                 if (mask & (1 << i)) {
1802                     gen_store(s, opsize, addr, mreg(i));
1803                     tcg_gen_add_i32(addr, addr, incr);
1804                 }
1805             }
1806         }
1807     }
1808 
1809     tcg_temp_free(incr);
1810     tcg_temp_free(addr);
1811 }
1812 
1813 DISAS_INSN(bitop_im)
1814 {
1815     int opsize;
1816     int op;
1817     TCGv src1;
1818     uint32_t mask;
1819     int bitnum;
1820     TCGv tmp;
1821     TCGv addr;
1822 
1823     if ((insn & 0x38) != 0)
1824         opsize = OS_BYTE;
1825     else
1826         opsize = OS_LONG;
1827     op = (insn >> 6) & 3;
1828 
1829     bitnum = read_im16(env, s);
1830     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
1831         if (bitnum & 0xfe00) {
1832             disas_undef(env, s, insn);
1833             return;
1834         }
1835     } else {
1836         if (bitnum & 0xff00) {
1837             disas_undef(env, s, insn);
1838             return;
1839         }
1840     }
1841 
1842     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1843 
1844     gen_flush_flags(s);
1845     if (opsize == OS_BYTE)
1846         bitnum &= 7;
1847     else
1848         bitnum &= 31;
1849     mask = 1 << bitnum;
1850 
1851    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1852 
1853     if (op) {
1854         tmp = tcg_temp_new();
1855         switch (op) {
1856         case 1: /* bchg */
1857             tcg_gen_xori_i32(tmp, src1, mask);
1858             break;
1859         case 2: /* bclr */
1860             tcg_gen_andi_i32(tmp, src1, ~mask);
1861             break;
1862         case 3: /* bset */
1863             tcg_gen_ori_i32(tmp, src1, mask);
1864             break;
1865         default: /* btst */
1866             break;
1867         }
1868         DEST_EA(env, insn, opsize, tmp, &addr);
1869         tcg_temp_free(tmp);
1870     }
1871 }
1872 
1873 DISAS_INSN(arith_im)
1874 {
1875     int op;
1876     TCGv im;
1877     TCGv src1;
1878     TCGv dest;
1879     TCGv addr;
1880     int opsize;
1881 
1882     op = (insn >> 9) & 7;
1883     opsize = insn_opsize(insn);
1884     switch (opsize) {
1885     case OS_BYTE:
1886         im = tcg_const_i32((int8_t)read_im8(env, s));
1887         break;
1888     case OS_WORD:
1889         im = tcg_const_i32((int16_t)read_im16(env, s));
1890         break;
1891     case OS_LONG:
1892         im = tcg_const_i32(read_im32(env, s));
1893         break;
1894     default:
1895        abort();
1896     }
1897     SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
1898     dest = tcg_temp_new();
1899     switch (op) {
1900     case 0: /* ori */
1901         tcg_gen_or_i32(dest, src1, im);
1902         gen_logic_cc(s, dest, opsize);
1903         break;
1904     case 1: /* andi */
1905         tcg_gen_and_i32(dest, src1, im);
1906         gen_logic_cc(s, dest, opsize);
1907         break;
1908     case 2: /* subi */
1909         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
1910         tcg_gen_sub_i32(dest, src1, im);
1911         gen_update_cc_add(dest, im, opsize);
1912         set_cc_op(s, CC_OP_SUBB + opsize);
1913         break;
1914     case 3: /* addi */
1915         tcg_gen_add_i32(dest, src1, im);
1916         gen_update_cc_add(dest, im, opsize);
1917         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1918         set_cc_op(s, CC_OP_ADDB + opsize);
1919         break;
1920     case 5: /* eori */
1921         tcg_gen_xor_i32(dest, src1, im);
1922         gen_logic_cc(s, dest, opsize);
1923         break;
1924     case 6: /* cmpi */
1925         gen_update_cc_cmp(s, src1, im, opsize);
1926         break;
1927     default:
1928         abort();
1929     }
1930     tcg_temp_free(im);
1931     if (op != 6) {
1932         DEST_EA(env, insn, opsize, dest, &addr);
1933     }
1934     tcg_temp_free(dest);
1935 }
1936 
1937 DISAS_INSN(cas)
1938 {
1939     int opsize;
1940     TCGv addr;
1941     uint16_t ext;
1942     TCGv load;
1943     TCGv cmp;
1944     TCGMemOp opc;
1945 
1946     switch ((insn >> 9) & 3) {
1947     case 1:
1948         opsize = OS_BYTE;
1949         opc = MO_SB;
1950         break;
1951     case 2:
1952         opsize = OS_WORD;
1953         opc = MO_TESW;
1954         break;
1955     case 3:
1956         opsize = OS_LONG;
1957         opc = MO_TESL;
1958         break;
1959     default:
1960         g_assert_not_reached();
1961     }
1962 
1963     ext = read_im16(env, s);
1964 
1965     /* cas Dc,Du,<EA> */
1966 
1967     addr = gen_lea(env, s, insn, opsize);
1968     if (IS_NULL_QREG(addr)) {
1969         gen_addr_fault(s);
1970         return;
1971     }
1972 
1973     cmp = gen_extend(DREG(ext, 0), opsize, 1);
1974 
1975     /* if  <EA> == Dc then
1976      *     <EA> = Du
1977      *     Dc = <EA> (because <EA> == Dc)
1978      * else
1979      *     Dc = <EA>
1980      */
1981 
1982     load = tcg_temp_new();
1983     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
1984                                IS_USER(s), opc);
1985     /* update flags before setting cmp to load */
1986     gen_update_cc_cmp(s, load, cmp, opsize);
1987     gen_partset_reg(opsize, DREG(ext, 0), load);
1988 
1989     tcg_temp_free(load);
1990 
1991     switch (extract32(insn, 3, 3)) {
1992     case 3: /* Indirect postincrement.  */
1993         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
1994         break;
1995     case 4: /* Indirect predecrememnt.  */
1996         tcg_gen_mov_i32(AREG(insn, 0), addr);
1997         break;
1998     }
1999 }
2000 
2001 DISAS_INSN(cas2w)
2002 {
2003     uint16_t ext1, ext2;
2004     TCGv addr1, addr2;
2005     TCGv regs;
2006 
2007     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2008 
2009     ext1 = read_im16(env, s);
2010 
2011     if (ext1 & 0x8000) {
2012         /* Address Register */
2013         addr1 = AREG(ext1, 12);
2014     } else {
2015         /* Data Register */
2016         addr1 = DREG(ext1, 12);
2017     }
2018 
2019     ext2 = read_im16(env, s);
2020     if (ext2 & 0x8000) {
2021         /* Address Register */
2022         addr2 = AREG(ext2, 12);
2023     } else {
2024         /* Data Register */
2025         addr2 = DREG(ext2, 12);
2026     }
2027 
2028     /* if (R1) == Dc1 && (R2) == Dc2 then
2029      *     (R1) = Du1
2030      *     (R2) = Du2
2031      * else
2032      *     Dc1 = (R1)
2033      *     Dc2 = (R2)
2034      */
2035 
2036     regs = tcg_const_i32(REG(ext2, 6) |
2037                          (REG(ext1, 6) << 3) |
2038                          (REG(ext2, 0) << 6) |
2039                          (REG(ext1, 0) << 9));
2040     gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2041     tcg_temp_free(regs);
2042 
2043     /* Note that cas2w also assigned to env->cc_op.  */
2044     s->cc_op = CC_OP_CMPW;
2045     s->cc_op_synced = 1;
2046 }
2047 
2048 DISAS_INSN(cas2l)
2049 {
2050     uint16_t ext1, ext2;
2051     TCGv addr1, addr2, regs;
2052 
2053     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2054 
2055     ext1 = read_im16(env, s);
2056 
2057     if (ext1 & 0x8000) {
2058         /* Address Register */
2059         addr1 = AREG(ext1, 12);
2060     } else {
2061         /* Data Register */
2062         addr1 = DREG(ext1, 12);
2063     }
2064 
2065     ext2 = read_im16(env, s);
2066     if (ext2 & 0x8000) {
2067         /* Address Register */
2068         addr2 = AREG(ext2, 12);
2069     } else {
2070         /* Data Register */
2071         addr2 = DREG(ext2, 12);
2072     }
2073 
2074     /* if (R1) == Dc1 && (R2) == Dc2 then
2075      *     (R1) = Du1
2076      *     (R2) = Du2
2077      * else
2078      *     Dc1 = (R1)
2079      *     Dc2 = (R2)
2080      */
2081 
2082     regs = tcg_const_i32(REG(ext2, 6) |
2083                          (REG(ext1, 6) << 3) |
2084                          (REG(ext2, 0) << 6) |
2085                          (REG(ext1, 0) << 9));
2086     gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2087     tcg_temp_free(regs);
2088 
2089     /* Note that cas2l also assigned to env->cc_op.  */
2090     s->cc_op = CC_OP_CMPL;
2091     s->cc_op_synced = 1;
2092 }
2093 
2094 DISAS_INSN(byterev)
2095 {
2096     TCGv reg;
2097 
2098     reg = DREG(insn, 0);
2099     tcg_gen_bswap32_i32(reg, reg);
2100 }
2101 
2102 DISAS_INSN(move)
2103 {
2104     TCGv src;
2105     TCGv dest;
2106     int op;
2107     int opsize;
2108 
2109     switch (insn >> 12) {
2110     case 1: /* move.b */
2111         opsize = OS_BYTE;
2112         break;
2113     case 2: /* move.l */
2114         opsize = OS_LONG;
2115         break;
2116     case 3: /* move.w */
2117         opsize = OS_WORD;
2118         break;
2119     default:
2120         abort();
2121     }
2122     SRC_EA(env, src, opsize, 1, NULL);
2123     op = (insn >> 6) & 7;
2124     if (op == 1) {
2125         /* movea */
2126         /* The value will already have been sign extended.  */
2127         dest = AREG(insn, 9);
2128         tcg_gen_mov_i32(dest, src);
2129     } else {
2130         /* normal move */
2131         uint16_t dest_ea;
2132         dest_ea = ((insn >> 9) & 7) | (op << 3);
2133         DEST_EA(env, dest_ea, opsize, src, NULL);
2134         /* This will be correct because loads sign extend.  */
2135         gen_logic_cc(s, src, opsize);
2136     }
2137 }
2138 
2139 DISAS_INSN(negx)
2140 {
2141     TCGv z;
2142     TCGv src;
2143     TCGv addr;
2144     int opsize;
2145 
2146     opsize = insn_opsize(insn);
2147     SRC_EA(env, src, opsize, 1, &addr);
2148 
2149     gen_flush_flags(s); /* compute old Z */
2150 
2151     /* Perform substract with borrow.
2152      * (X, N) =  -(src + X);
2153      */
2154 
2155     z = tcg_const_i32(0);
2156     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2157     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2158     tcg_temp_free(z);
2159     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2160 
2161     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2162 
2163     /* Compute signed-overflow for negation.  The normal formula for
2164      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2165      * this simplies to res & src.
2166      */
2167 
2168     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2169 
2170     /* Copy the rest of the results into place.  */
2171     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2172     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2173 
2174     set_cc_op(s, CC_OP_FLAGS);
2175 
2176     /* result is in QREG_CC_N */
2177 
2178     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2179 }
2180 
2181 DISAS_INSN(lea)
2182 {
2183     TCGv reg;
2184     TCGv tmp;
2185 
2186     reg = AREG(insn, 9);
2187     tmp = gen_lea(env, s, insn, OS_LONG);
2188     if (IS_NULL_QREG(tmp)) {
2189         gen_addr_fault(s);
2190         return;
2191     }
2192     tcg_gen_mov_i32(reg, tmp);
2193 }
2194 
2195 DISAS_INSN(clr)
2196 {
2197     int opsize;
2198     TCGv zero;
2199 
2200     zero = tcg_const_i32(0);
2201 
2202     opsize = insn_opsize(insn);
2203     DEST_EA(env, insn, opsize, zero, NULL);
2204     gen_logic_cc(s, zero, opsize);
2205     tcg_temp_free(zero);
2206 }
2207 
2208 static TCGv gen_get_ccr(DisasContext *s)
2209 {
2210     TCGv dest;
2211 
2212     gen_flush_flags(s);
2213     update_cc_op(s);
2214     dest = tcg_temp_new();
2215     gen_helper_get_ccr(dest, cpu_env);
2216     return dest;
2217 }
2218 
2219 DISAS_INSN(move_from_ccr)
2220 {
2221     TCGv ccr;
2222 
2223     ccr = gen_get_ccr(s);
2224     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2225 }
2226 
2227 DISAS_INSN(neg)
2228 {
2229     TCGv src1;
2230     TCGv dest;
2231     TCGv addr;
2232     int opsize;
2233 
2234     opsize = insn_opsize(insn);
2235     SRC_EA(env, src1, opsize, 1, &addr);
2236     dest = tcg_temp_new();
2237     tcg_gen_neg_i32(dest, src1);
2238     set_cc_op(s, CC_OP_SUBB + opsize);
2239     gen_update_cc_add(dest, src1, opsize);
2240     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2241     DEST_EA(env, insn, opsize, dest, &addr);
2242     tcg_temp_free(dest);
2243 }
2244 
2245 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2246 {
2247     if (ccr_only) {
2248         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2249         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2250         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2251         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2252         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2253     } else {
2254         gen_helper_set_sr(cpu_env, tcg_const_i32(val));
2255     }
2256     set_cc_op(s, CC_OP_FLAGS);
2257 }
2258 
2259 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2260                        int ccr_only)
2261 {
2262     if ((insn & 0x38) == 0) {
2263         if (ccr_only) {
2264             gen_helper_set_ccr(cpu_env, DREG(insn, 0));
2265         } else {
2266             gen_helper_set_sr(cpu_env, DREG(insn, 0));
2267         }
2268         set_cc_op(s, CC_OP_FLAGS);
2269     } else if ((insn & 0x3f) == 0x3c) {
2270         uint16_t val;
2271         val = read_im16(env, s);
2272         gen_set_sr_im(s, val, ccr_only);
2273     } else {
2274         disas_undef(env, s, insn);
2275     }
2276 }
2277 
2278 
2279 DISAS_INSN(move_to_ccr)
2280 {
2281     gen_set_sr(env, s, insn, 1);
2282 }
2283 
2284 DISAS_INSN(not)
2285 {
2286     TCGv src1;
2287     TCGv dest;
2288     TCGv addr;
2289     int opsize;
2290 
2291     opsize = insn_opsize(insn);
2292     SRC_EA(env, src1, opsize, 1, &addr);
2293     dest = tcg_temp_new();
2294     tcg_gen_not_i32(dest, src1);
2295     DEST_EA(env, insn, opsize, dest, &addr);
2296     gen_logic_cc(s, dest, opsize);
2297 }
2298 
2299 DISAS_INSN(swap)
2300 {
2301     TCGv src1;
2302     TCGv src2;
2303     TCGv reg;
2304 
2305     src1 = tcg_temp_new();
2306     src2 = tcg_temp_new();
2307     reg = DREG(insn, 0);
2308     tcg_gen_shli_i32(src1, reg, 16);
2309     tcg_gen_shri_i32(src2, reg, 16);
2310     tcg_gen_or_i32(reg, src1, src2);
2311     tcg_temp_free(src2);
2312     tcg_temp_free(src1);
2313     gen_logic_cc(s, reg, OS_LONG);
2314 }
2315 
2316 DISAS_INSN(bkpt)
2317 {
2318     gen_exception(s, s->pc - 2, EXCP_DEBUG);
2319 }
2320 
2321 DISAS_INSN(pea)
2322 {
2323     TCGv tmp;
2324 
2325     tmp = gen_lea(env, s, insn, OS_LONG);
2326     if (IS_NULL_QREG(tmp)) {
2327         gen_addr_fault(s);
2328         return;
2329     }
2330     gen_push(s, tmp);
2331 }
2332 
2333 DISAS_INSN(ext)
2334 {
2335     int op;
2336     TCGv reg;
2337     TCGv tmp;
2338 
2339     reg = DREG(insn, 0);
2340     op = (insn >> 6) & 7;
2341     tmp = tcg_temp_new();
2342     if (op == 3)
2343         tcg_gen_ext16s_i32(tmp, reg);
2344     else
2345         tcg_gen_ext8s_i32(tmp, reg);
2346     if (op == 2)
2347         gen_partset_reg(OS_WORD, reg, tmp);
2348     else
2349         tcg_gen_mov_i32(reg, tmp);
2350     gen_logic_cc(s, tmp, OS_LONG);
2351     tcg_temp_free(tmp);
2352 }
2353 
2354 DISAS_INSN(tst)
2355 {
2356     int opsize;
2357     TCGv tmp;
2358 
2359     opsize = insn_opsize(insn);
2360     SRC_EA(env, tmp, opsize, 1, NULL);
2361     gen_logic_cc(s, tmp, opsize);
2362 }
2363 
2364 DISAS_INSN(pulse)
2365 {
2366   /* Implemented as a NOP.  */
2367 }
2368 
2369 DISAS_INSN(illegal)
2370 {
2371     gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2372 }
2373 
2374 /* ??? This should be atomic.  */
2375 DISAS_INSN(tas)
2376 {
2377     TCGv dest;
2378     TCGv src1;
2379     TCGv addr;
2380 
2381     dest = tcg_temp_new();
2382     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2383     gen_logic_cc(s, src1, OS_BYTE);
2384     tcg_gen_ori_i32(dest, src1, 0x80);
2385     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2386     tcg_temp_free(dest);
2387 }
2388 
2389 DISAS_INSN(mull)
2390 {
2391     uint16_t ext;
2392     TCGv src1;
2393     int sign;
2394 
2395     ext = read_im16(env, s);
2396 
2397     sign = ext & 0x800;
2398 
2399     if (ext & 0x400) {
2400         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2401             gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
2402             return;
2403         }
2404 
2405         SRC_EA(env, src1, OS_LONG, 0, NULL);
2406 
2407         if (sign) {
2408             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2409         } else {
2410             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2411         }
2412         /* if Dl == Dh, 68040 returns low word */
2413         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2414         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2415         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2416 
2417         tcg_gen_movi_i32(QREG_CC_V, 0);
2418         tcg_gen_movi_i32(QREG_CC_C, 0);
2419 
2420         set_cc_op(s, CC_OP_FLAGS);
2421         return;
2422     }
2423     SRC_EA(env, src1, OS_LONG, 0, NULL);
2424     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2425         tcg_gen_movi_i32(QREG_CC_C, 0);
2426         if (sign) {
2427             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2428             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2429             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2430             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2431         } else {
2432             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2433             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2434             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2435         }
2436         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2437         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2438 
2439         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2440 
2441         set_cc_op(s, CC_OP_FLAGS);
2442     } else {
2443         /* The upper 32 bits of the product are discarded, so
2444            muls.l and mulu.l are functionally equivalent.  */
2445         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2446         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2447     }
2448 }
2449 
2450 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2451 {
2452     TCGv reg;
2453     TCGv tmp;
2454 
2455     reg = AREG(insn, 0);
2456     tmp = tcg_temp_new();
2457     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2458     gen_store(s, OS_LONG, tmp, reg);
2459     if ((insn & 7) != 7) {
2460         tcg_gen_mov_i32(reg, tmp);
2461     }
2462     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2463     tcg_temp_free(tmp);
2464 }
2465 
2466 DISAS_INSN(link)
2467 {
2468     int16_t offset;
2469 
2470     offset = read_im16(env, s);
2471     gen_link(s, insn, offset);
2472 }
2473 
2474 DISAS_INSN(linkl)
2475 {
2476     int32_t offset;
2477 
2478     offset = read_im32(env, s);
2479     gen_link(s, insn, offset);
2480 }
2481 
2482 DISAS_INSN(unlk)
2483 {
2484     TCGv src;
2485     TCGv reg;
2486     TCGv tmp;
2487 
2488     src = tcg_temp_new();
2489     reg = AREG(insn, 0);
2490     tcg_gen_mov_i32(src, reg);
2491     tmp = gen_load(s, OS_LONG, src, 0);
2492     tcg_gen_mov_i32(reg, tmp);
2493     tcg_gen_addi_i32(QREG_SP, src, 4);
2494     tcg_temp_free(src);
2495 }
2496 
2497 DISAS_INSN(nop)
2498 {
2499 }
2500 
2501 DISAS_INSN(rtd)
2502 {
2503     TCGv tmp;
2504     int16_t offset = read_im16(env, s);
2505 
2506     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2507     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2508     gen_jmp(s, tmp);
2509 }
2510 
2511 DISAS_INSN(rts)
2512 {
2513     TCGv tmp;
2514 
2515     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2516     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2517     gen_jmp(s, tmp);
2518 }
2519 
2520 DISAS_INSN(jump)
2521 {
2522     TCGv tmp;
2523 
2524     /* Load the target address first to ensure correct exception
2525        behavior.  */
2526     tmp = gen_lea(env, s, insn, OS_LONG);
2527     if (IS_NULL_QREG(tmp)) {
2528         gen_addr_fault(s);
2529         return;
2530     }
2531     if ((insn & 0x40) == 0) {
2532         /* jsr */
2533         gen_push(s, tcg_const_i32(s->pc));
2534     }
2535     gen_jmp(s, tmp);
2536 }
2537 
2538 DISAS_INSN(addsubq)
2539 {
2540     TCGv src;
2541     TCGv dest;
2542     TCGv val;
2543     int imm;
2544     TCGv addr;
2545     int opsize;
2546 
2547     if ((insn & 070) == 010) {
2548         /* Operation on address register is always long.  */
2549         opsize = OS_LONG;
2550     } else {
2551         opsize = insn_opsize(insn);
2552     }
2553     SRC_EA(env, src, opsize, 1, &addr);
2554     imm = (insn >> 9) & 7;
2555     if (imm == 0) {
2556         imm = 8;
2557     }
2558     val = tcg_const_i32(imm);
2559     dest = tcg_temp_new();
2560     tcg_gen_mov_i32(dest, src);
2561     if ((insn & 0x38) == 0x08) {
2562         /* Don't update condition codes if the destination is an
2563            address register.  */
2564         if (insn & 0x0100) {
2565             tcg_gen_sub_i32(dest, dest, val);
2566         } else {
2567             tcg_gen_add_i32(dest, dest, val);
2568         }
2569     } else {
2570         if (insn & 0x0100) {
2571             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2572             tcg_gen_sub_i32(dest, dest, val);
2573             set_cc_op(s, CC_OP_SUBB + opsize);
2574         } else {
2575             tcg_gen_add_i32(dest, dest, val);
2576             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2577             set_cc_op(s, CC_OP_ADDB + opsize);
2578         }
2579         gen_update_cc_add(dest, val, opsize);
2580     }
2581     tcg_temp_free(val);
2582     DEST_EA(env, insn, opsize, dest, &addr);
2583     tcg_temp_free(dest);
2584 }
2585 
2586 DISAS_INSN(tpf)
2587 {
2588     switch (insn & 7) {
2589     case 2: /* One extension word.  */
2590         s->pc += 2;
2591         break;
2592     case 3: /* Two extension words.  */
2593         s->pc += 4;
2594         break;
2595     case 4: /* No extension words.  */
2596         break;
2597     default:
2598         disas_undef(env, s, insn);
2599     }
2600 }
2601 
2602 DISAS_INSN(branch)
2603 {
2604     int32_t offset;
2605     uint32_t base;
2606     int op;
2607     TCGLabel *l1;
2608 
2609     base = s->pc;
2610     op = (insn >> 8) & 0xf;
2611     offset = (int8_t)insn;
2612     if (offset == 0) {
2613         offset = (int16_t)read_im16(env, s);
2614     } else if (offset == -1) {
2615         offset = read_im32(env, s);
2616     }
2617     if (op == 1) {
2618         /* bsr */
2619         gen_push(s, tcg_const_i32(s->pc));
2620     }
2621     if (op > 1) {
2622         /* Bcc */
2623         l1 = gen_new_label();
2624         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2625         gen_jmp_tb(s, 1, base + offset);
2626         gen_set_label(l1);
2627         gen_jmp_tb(s, 0, s->pc);
2628     } else {
2629         /* Unconditional branch.  */
2630         gen_jmp_tb(s, 0, base + offset);
2631     }
2632 }
2633 
2634 DISAS_INSN(moveq)
2635 {
2636     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2637     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2638 }
2639 
2640 DISAS_INSN(mvzs)
2641 {
2642     int opsize;
2643     TCGv src;
2644     TCGv reg;
2645 
2646     if (insn & 0x40)
2647         opsize = OS_WORD;
2648     else
2649         opsize = OS_BYTE;
2650     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2651     reg = DREG(insn, 9);
2652     tcg_gen_mov_i32(reg, src);
2653     gen_logic_cc(s, src, opsize);
2654 }
2655 
2656 DISAS_INSN(or)
2657 {
2658     TCGv reg;
2659     TCGv dest;
2660     TCGv src;
2661     TCGv addr;
2662     int opsize;
2663 
2664     opsize = insn_opsize(insn);
2665     reg = gen_extend(DREG(insn, 9), opsize, 0);
2666     dest = tcg_temp_new();
2667     if (insn & 0x100) {
2668         SRC_EA(env, src, opsize, 0, &addr);
2669         tcg_gen_or_i32(dest, src, reg);
2670         DEST_EA(env, insn, opsize, dest, &addr);
2671     } else {
2672         SRC_EA(env, src, opsize, 0, NULL);
2673         tcg_gen_or_i32(dest, src, reg);
2674         gen_partset_reg(opsize, DREG(insn, 9), dest);
2675     }
2676     gen_logic_cc(s, dest, opsize);
2677     tcg_temp_free(dest);
2678 }
2679 
2680 DISAS_INSN(suba)
2681 {
2682     TCGv src;
2683     TCGv reg;
2684 
2685     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2686     reg = AREG(insn, 9);
2687     tcg_gen_sub_i32(reg, reg, src);
2688 }
2689 
2690 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2691 {
2692     TCGv tmp;
2693 
2694     gen_flush_flags(s); /* compute old Z */
2695 
2696     /* Perform substract with borrow.
2697      * (X, N) = dest - (src + X);
2698      */
2699 
2700     tmp = tcg_const_i32(0);
2701     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2702     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2703     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2704     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2705 
2706     /* Compute signed-overflow for substract.  */
2707 
2708     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2709     tcg_gen_xor_i32(tmp, dest, src);
2710     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2711     tcg_temp_free(tmp);
2712 
2713     /* Copy the rest of the results into place.  */
2714     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2715     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2716 
2717     set_cc_op(s, CC_OP_FLAGS);
2718 
2719     /* result is in QREG_CC_N */
2720 }
2721 
2722 DISAS_INSN(subx_reg)
2723 {
2724     TCGv dest;
2725     TCGv src;
2726     int opsize;
2727 
2728     opsize = insn_opsize(insn);
2729 
2730     src = gen_extend(DREG(insn, 0), opsize, 1);
2731     dest = gen_extend(DREG(insn, 9), opsize, 1);
2732 
2733     gen_subx(s, src, dest, opsize);
2734 
2735     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2736 }
2737 
2738 DISAS_INSN(subx_mem)
2739 {
2740     TCGv src;
2741     TCGv addr_src;
2742     TCGv dest;
2743     TCGv addr_dest;
2744     int opsize;
2745 
2746     opsize = insn_opsize(insn);
2747 
2748     addr_src = AREG(insn, 0);
2749     tcg_gen_subi_i32(addr_src, addr_src, opsize);
2750     src = gen_load(s, opsize, addr_src, 1);
2751 
2752     addr_dest = AREG(insn, 9);
2753     tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
2754     dest = gen_load(s, opsize, addr_dest, 1);
2755 
2756     gen_subx(s, src, dest, opsize);
2757 
2758     gen_store(s, opsize, addr_dest, QREG_CC_N);
2759 }
2760 
2761 DISAS_INSN(mov3q)
2762 {
2763     TCGv src;
2764     int val;
2765 
2766     val = (insn >> 9) & 7;
2767     if (val == 0)
2768         val = -1;
2769     src = tcg_const_i32(val);
2770     gen_logic_cc(s, src, OS_LONG);
2771     DEST_EA(env, insn, OS_LONG, src, NULL);
2772     tcg_temp_free(src);
2773 }
2774 
2775 DISAS_INSN(cmp)
2776 {
2777     TCGv src;
2778     TCGv reg;
2779     int opsize;
2780 
2781     opsize = insn_opsize(insn);
2782     SRC_EA(env, src, opsize, 1, NULL);
2783     reg = gen_extend(DREG(insn, 9), opsize, 1);
2784     gen_update_cc_cmp(s, reg, src, opsize);
2785 }
2786 
2787 DISAS_INSN(cmpa)
2788 {
2789     int opsize;
2790     TCGv src;
2791     TCGv reg;
2792 
2793     if (insn & 0x100) {
2794         opsize = OS_LONG;
2795     } else {
2796         opsize = OS_WORD;
2797     }
2798     SRC_EA(env, src, opsize, 1, NULL);
2799     reg = AREG(insn, 9);
2800     gen_update_cc_cmp(s, reg, src, OS_LONG);
2801 }
2802 
2803 DISAS_INSN(cmpm)
2804 {
2805     int opsize = insn_opsize(insn);
2806     TCGv src, dst;
2807 
2808     /* Post-increment load (mode 3) from Ay.  */
2809     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
2810                       NULL_QREG, NULL, EA_LOADS);
2811     /* Post-increment load (mode 3) from Ax.  */
2812     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
2813                       NULL_QREG, NULL, EA_LOADS);
2814 
2815     gen_update_cc_cmp(s, dst, src, opsize);
2816 }
2817 
2818 DISAS_INSN(eor)
2819 {
2820     TCGv src;
2821     TCGv dest;
2822     TCGv addr;
2823     int opsize;
2824 
2825     opsize = insn_opsize(insn);
2826 
2827     SRC_EA(env, src, opsize, 0, &addr);
2828     dest = tcg_temp_new();
2829     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
2830     gen_logic_cc(s, dest, opsize);
2831     DEST_EA(env, insn, opsize, dest, &addr);
2832     tcg_temp_free(dest);
2833 }
2834 
2835 static void do_exg(TCGv reg1, TCGv reg2)
2836 {
2837     TCGv temp = tcg_temp_new();
2838     tcg_gen_mov_i32(temp, reg1);
2839     tcg_gen_mov_i32(reg1, reg2);
2840     tcg_gen_mov_i32(reg2, temp);
2841     tcg_temp_free(temp);
2842 }
2843 
2844 DISAS_INSN(exg_dd)
2845 {
2846     /* exchange Dx and Dy */
2847     do_exg(DREG(insn, 9), DREG(insn, 0));
2848 }
2849 
2850 DISAS_INSN(exg_aa)
2851 {
2852     /* exchange Ax and Ay */
2853     do_exg(AREG(insn, 9), AREG(insn, 0));
2854 }
2855 
2856 DISAS_INSN(exg_da)
2857 {
2858     /* exchange Dx and Ay */
2859     do_exg(DREG(insn, 9), AREG(insn, 0));
2860 }
2861 
2862 DISAS_INSN(and)
2863 {
2864     TCGv src;
2865     TCGv reg;
2866     TCGv dest;
2867     TCGv addr;
2868     int opsize;
2869 
2870     dest = tcg_temp_new();
2871 
2872     opsize = insn_opsize(insn);
2873     reg = DREG(insn, 9);
2874     if (insn & 0x100) {
2875         SRC_EA(env, src, opsize, 0, &addr);
2876         tcg_gen_and_i32(dest, src, reg);
2877         DEST_EA(env, insn, opsize, dest, &addr);
2878     } else {
2879         SRC_EA(env, src, opsize, 0, NULL);
2880         tcg_gen_and_i32(dest, src, reg);
2881         gen_partset_reg(opsize, reg, dest);
2882     }
2883     gen_logic_cc(s, dest, opsize);
2884     tcg_temp_free(dest);
2885 }
2886 
2887 DISAS_INSN(adda)
2888 {
2889     TCGv src;
2890     TCGv reg;
2891 
2892     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2893     reg = AREG(insn, 9);
2894     tcg_gen_add_i32(reg, reg, src);
2895 }
2896 
2897 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2898 {
2899     TCGv tmp;
2900 
2901     gen_flush_flags(s); /* compute old Z */
2902 
2903     /* Perform addition with carry.
2904      * (X, N) = src + dest + X;
2905      */
2906 
2907     tmp = tcg_const_i32(0);
2908     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
2909     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
2910     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2911 
2912     /* Compute signed-overflow for addition.  */
2913 
2914     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
2915     tcg_gen_xor_i32(tmp, dest, src);
2916     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
2917     tcg_temp_free(tmp);
2918 
2919     /* Copy the rest of the results into place.  */
2920     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2921     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2922 
2923     set_cc_op(s, CC_OP_FLAGS);
2924 
2925     /* result is in QREG_CC_N */
2926 }
2927 
2928 DISAS_INSN(addx_reg)
2929 {
2930     TCGv dest;
2931     TCGv src;
2932     int opsize;
2933 
2934     opsize = insn_opsize(insn);
2935 
2936     dest = gen_extend(DREG(insn, 9), opsize, 1);
2937     src = gen_extend(DREG(insn, 0), opsize, 1);
2938 
2939     gen_addx(s, src, dest, opsize);
2940 
2941     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2942 }
2943 
2944 DISAS_INSN(addx_mem)
2945 {
2946     TCGv src;
2947     TCGv addr_src;
2948     TCGv dest;
2949     TCGv addr_dest;
2950     int opsize;
2951 
2952     opsize = insn_opsize(insn);
2953 
2954     addr_src = AREG(insn, 0);
2955     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
2956     src = gen_load(s, opsize, addr_src, 1);
2957 
2958     addr_dest = AREG(insn, 9);
2959     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
2960     dest = gen_load(s, opsize, addr_dest, 1);
2961 
2962     gen_addx(s, src, dest, opsize);
2963 
2964     gen_store(s, opsize, addr_dest, QREG_CC_N);
2965 }
2966 
2967 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
2968 {
2969     int count = (insn >> 9) & 7;
2970     int logical = insn & 8;
2971     int left = insn & 0x100;
2972     int bits = opsize_bytes(opsize) * 8;
2973     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
2974 
2975     if (count == 0) {
2976         count = 8;
2977     }
2978 
2979     tcg_gen_movi_i32(QREG_CC_V, 0);
2980     if (left) {
2981         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
2982         tcg_gen_shli_i32(QREG_CC_N, reg, count);
2983 
2984         /* Note that ColdFire always clears V (done above),
2985            while M68000 sets if the most significant bit is changed at
2986            any time during the shift operation */
2987         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
2988             /* if shift count >= bits, V is (reg != 0) */
2989             if (count >= bits) {
2990                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
2991             } else {
2992                 TCGv t0 = tcg_temp_new();
2993                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
2994                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
2995                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
2996                 tcg_temp_free(t0);
2997             }
2998             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2999         }
3000     } else {
3001         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3002         if (logical) {
3003             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3004         } else {
3005             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3006         }
3007     }
3008 
3009     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3010     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3011     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3012     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3013 
3014     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3015     set_cc_op(s, CC_OP_FLAGS);
3016 }
3017 
3018 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3019 {
3020     int logical = insn & 8;
3021     int left = insn & 0x100;
3022     int bits = opsize_bytes(opsize) * 8;
3023     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3024     TCGv s32;
3025     TCGv_i64 t64, s64;
3026 
3027     t64 = tcg_temp_new_i64();
3028     s64 = tcg_temp_new_i64();
3029     s32 = tcg_temp_new();
3030 
3031     /* Note that m68k truncates the shift count modulo 64, not 32.
3032        In addition, a 64-bit shift makes it easy to find "the last
3033        bit shifted out", for the carry flag.  */
3034     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3035     tcg_gen_extu_i32_i64(s64, s32);
3036     tcg_gen_extu_i32_i64(t64, reg);
3037 
3038     /* Optimistically set V=0.  Also used as a zero source below.  */
3039     tcg_gen_movi_i32(QREG_CC_V, 0);
3040     if (left) {
3041         tcg_gen_shl_i64(t64, t64, s64);
3042 
3043         if (opsize == OS_LONG) {
3044             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3045             /* Note that C=0 if shift count is 0, and we get that for free.  */
3046         } else {
3047             TCGv zero = tcg_const_i32(0);
3048             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3049             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3050             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3051                                 s32, zero, zero, QREG_CC_C);
3052             tcg_temp_free(zero);
3053         }
3054         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3055 
3056         /* X = C, but only if the shift count was non-zero.  */
3057         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3058                             QREG_CC_C, QREG_CC_X);
3059 
3060         /* M68000 sets V if the most significant bit is changed at
3061          * any time during the shift operation.  Do this via creating
3062          * an extension of the sign bit, comparing, and discarding
3063          * the bits below the sign bit.  I.e.
3064          *     int64_t s = (intN_t)reg;
3065          *     int64_t t = (int64_t)(intN_t)reg << count;
3066          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3067          */
3068         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3069             TCGv_i64 tt = tcg_const_i64(32);
3070             /* if shift is greater than 32, use 32 */
3071             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3072             tcg_temp_free_i64(tt);
3073             /* Sign extend the input to 64 bits; re-do the shift.  */
3074             tcg_gen_ext_i32_i64(t64, reg);
3075             tcg_gen_shl_i64(s64, t64, s64);
3076             /* Clear all bits that are unchanged.  */
3077             tcg_gen_xor_i64(t64, t64, s64);
3078             /* Ignore the bits below the sign bit.  */
3079             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3080             /* If any bits remain set, we have overflow.  */
3081             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3082             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3083             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3084         }
3085     } else {
3086         tcg_gen_shli_i64(t64, t64, 32);
3087         if (logical) {
3088             tcg_gen_shr_i64(t64, t64, s64);
3089         } else {
3090             tcg_gen_sar_i64(t64, t64, s64);
3091         }
3092         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3093 
3094         /* Note that C=0 if shift count is 0, and we get that for free.  */
3095         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3096 
3097         /* X = C, but only if the shift count was non-zero.  */
3098         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3099                             QREG_CC_C, QREG_CC_X);
3100     }
3101     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3102     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3103 
3104     tcg_temp_free(s32);
3105     tcg_temp_free_i64(s64);
3106     tcg_temp_free_i64(t64);
3107 
3108     /* Write back the result.  */
3109     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3110     set_cc_op(s, CC_OP_FLAGS);
3111 }
3112 
3113 DISAS_INSN(shift8_im)
3114 {
3115     shift_im(s, insn, OS_BYTE);
3116 }
3117 
3118 DISAS_INSN(shift16_im)
3119 {
3120     shift_im(s, insn, OS_WORD);
3121 }
3122 
3123 DISAS_INSN(shift_im)
3124 {
3125     shift_im(s, insn, OS_LONG);
3126 }
3127 
3128 DISAS_INSN(shift8_reg)
3129 {
3130     shift_reg(s, insn, OS_BYTE);
3131 }
3132 
3133 DISAS_INSN(shift16_reg)
3134 {
3135     shift_reg(s, insn, OS_WORD);
3136 }
3137 
3138 DISAS_INSN(shift_reg)
3139 {
3140     shift_reg(s, insn, OS_LONG);
3141 }
3142 
3143 DISAS_INSN(shift_mem)
3144 {
3145     int logical = insn & 8;
3146     int left = insn & 0x100;
3147     TCGv src;
3148     TCGv addr;
3149 
3150     SRC_EA(env, src, OS_WORD, !logical, &addr);
3151     tcg_gen_movi_i32(QREG_CC_V, 0);
3152     if (left) {
3153         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3154         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3155 
3156         /* Note that ColdFire always clears V,
3157            while M68000 sets if the most significant bit is changed at
3158            any time during the shift operation */
3159         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3160             src = gen_extend(src, OS_WORD, 1);
3161             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3162         }
3163     } else {
3164         tcg_gen_mov_i32(QREG_CC_C, src);
3165         if (logical) {
3166             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3167         } else {
3168             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3169         }
3170     }
3171 
3172     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3173     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3174     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3175     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3176 
3177     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3178     set_cc_op(s, CC_OP_FLAGS);
3179 }
3180 
3181 static void rotate(TCGv reg, TCGv shift, int left, int size)
3182 {
3183     switch (size) {
3184     case 8:
3185         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3186         tcg_gen_ext8u_i32(reg, reg);
3187         tcg_gen_muli_i32(reg, reg, 0x01010101);
3188         goto do_long;
3189     case 16:
3190         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3191         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3192         goto do_long;
3193     do_long:
3194     default:
3195         if (left) {
3196             tcg_gen_rotl_i32(reg, reg, shift);
3197         } else {
3198             tcg_gen_rotr_i32(reg, reg, shift);
3199         }
3200     }
3201 
3202     /* compute flags */
3203 
3204     switch (size) {
3205     case 8:
3206         tcg_gen_ext8s_i32(reg, reg);
3207         break;
3208     case 16:
3209         tcg_gen_ext16s_i32(reg, reg);
3210         break;
3211     default:
3212         break;
3213     }
3214 
3215     /* QREG_CC_X is not affected */
3216 
3217     tcg_gen_mov_i32(QREG_CC_N, reg);
3218     tcg_gen_mov_i32(QREG_CC_Z, reg);
3219 
3220     if (left) {
3221         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3222     } else {
3223         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3224     }
3225 
3226     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3227 }
3228 
3229 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3230 {
3231     switch (size) {
3232     case 8:
3233         tcg_gen_ext8s_i32(reg, reg);
3234         break;
3235     case 16:
3236         tcg_gen_ext16s_i32(reg, reg);
3237         break;
3238     default:
3239         break;
3240     }
3241     tcg_gen_mov_i32(QREG_CC_N, reg);
3242     tcg_gen_mov_i32(QREG_CC_Z, reg);
3243     tcg_gen_mov_i32(QREG_CC_X, X);
3244     tcg_gen_mov_i32(QREG_CC_C, X);
3245     tcg_gen_movi_i32(QREG_CC_V, 0);
3246 }
3247 
3248 /* Result of rotate_x() is valid if 0 <= shift <= size */
3249 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3250 {
3251     TCGv X, shl, shr, shx, sz, zero;
3252 
3253     sz = tcg_const_i32(size);
3254 
3255     shr = tcg_temp_new();
3256     shl = tcg_temp_new();
3257     shx = tcg_temp_new();
3258     if (left) {
3259         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3260         tcg_gen_movi_i32(shr, size + 1);
3261         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3262         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3263         /* shx = shx < 0 ? size : shx; */
3264         zero = tcg_const_i32(0);
3265         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3266         tcg_temp_free(zero);
3267     } else {
3268         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3269         tcg_gen_movi_i32(shl, size + 1);
3270         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3271         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3272     }
3273 
3274     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3275 
3276     tcg_gen_shl_i32(shl, reg, shl);
3277     tcg_gen_shr_i32(shr, reg, shr);
3278     tcg_gen_or_i32(reg, shl, shr);
3279     tcg_temp_free(shl);
3280     tcg_temp_free(shr);
3281     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3282     tcg_gen_or_i32(reg, reg, shx);
3283     tcg_temp_free(shx);
3284 
3285     /* X = (reg >> size) & 1 */
3286 
3287     X = tcg_temp_new();
3288     tcg_gen_shr_i32(X, reg, sz);
3289     tcg_gen_andi_i32(X, X, 1);
3290     tcg_temp_free(sz);
3291 
3292     return X;
3293 }
3294 
3295 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3296 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3297 {
3298     TCGv_i64 t0, shift64;
3299     TCGv X, lo, hi, zero;
3300 
3301     shift64 = tcg_temp_new_i64();
3302     tcg_gen_extu_i32_i64(shift64, shift);
3303 
3304     t0 = tcg_temp_new_i64();
3305 
3306     X = tcg_temp_new();
3307     lo = tcg_temp_new();
3308     hi = tcg_temp_new();
3309 
3310     if (left) {
3311         /* create [reg:X:..] */
3312 
3313         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3314         tcg_gen_concat_i32_i64(t0, lo, reg);
3315 
3316         /* rotate */
3317 
3318         tcg_gen_rotl_i64(t0, t0, shift64);
3319         tcg_temp_free_i64(shift64);
3320 
3321         /* result is [reg:..:reg:X] */
3322 
3323         tcg_gen_extr_i64_i32(lo, hi, t0);
3324         tcg_gen_andi_i32(X, lo, 1);
3325 
3326         tcg_gen_shri_i32(lo, lo, 1);
3327     } else {
3328         /* create [..:X:reg] */
3329 
3330         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3331 
3332         tcg_gen_rotr_i64(t0, t0, shift64);
3333         tcg_temp_free_i64(shift64);
3334 
3335         /* result is value: [X:reg:..:reg] */
3336 
3337         tcg_gen_extr_i64_i32(lo, hi, t0);
3338 
3339         /* extract X */
3340 
3341         tcg_gen_shri_i32(X, hi, 31);
3342 
3343         /* extract result */
3344 
3345         tcg_gen_shli_i32(hi, hi, 1);
3346     }
3347     tcg_temp_free_i64(t0);
3348     tcg_gen_or_i32(lo, lo, hi);
3349     tcg_temp_free(hi);
3350 
3351     /* if shift == 0, register and X are not affected */
3352 
3353     zero = tcg_const_i32(0);
3354     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3355     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3356     tcg_temp_free(zero);
3357     tcg_temp_free(lo);
3358 
3359     return X;
3360 }
3361 
3362 DISAS_INSN(rotate_im)
3363 {
3364     TCGv shift;
3365     int tmp;
3366     int left = (insn & 0x100);
3367 
3368     tmp = (insn >> 9) & 7;
3369     if (tmp == 0) {
3370         tmp = 8;
3371     }
3372 
3373     shift = tcg_const_i32(tmp);
3374     if (insn & 8) {
3375         rotate(DREG(insn, 0), shift, left, 32);
3376     } else {
3377         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3378         rotate_x_flags(DREG(insn, 0), X, 32);
3379         tcg_temp_free(X);
3380     }
3381     tcg_temp_free(shift);
3382 
3383     set_cc_op(s, CC_OP_FLAGS);
3384 }
3385 
3386 DISAS_INSN(rotate8_im)
3387 {
3388     int left = (insn & 0x100);
3389     TCGv reg;
3390     TCGv shift;
3391     int tmp;
3392 
3393     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3394 
3395     tmp = (insn >> 9) & 7;
3396     if (tmp == 0) {
3397         tmp = 8;
3398     }
3399 
3400     shift = tcg_const_i32(tmp);
3401     if (insn & 8) {
3402         rotate(reg, shift, left, 8);
3403     } else {
3404         TCGv X = rotate_x(reg, shift, left, 8);
3405         rotate_x_flags(reg, X, 8);
3406         tcg_temp_free(X);
3407     }
3408     tcg_temp_free(shift);
3409     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3410     set_cc_op(s, CC_OP_FLAGS);
3411 }
3412 
3413 DISAS_INSN(rotate16_im)
3414 {
3415     int left = (insn & 0x100);
3416     TCGv reg;
3417     TCGv shift;
3418     int tmp;
3419 
3420     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3421     tmp = (insn >> 9) & 7;
3422     if (tmp == 0) {
3423         tmp = 8;
3424     }
3425 
3426     shift = tcg_const_i32(tmp);
3427     if (insn & 8) {
3428         rotate(reg, shift, left, 16);
3429     } else {
3430         TCGv X = rotate_x(reg, shift, left, 16);
3431         rotate_x_flags(reg, X, 16);
3432         tcg_temp_free(X);
3433     }
3434     tcg_temp_free(shift);
3435     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3436     set_cc_op(s, CC_OP_FLAGS);
3437 }
3438 
3439 DISAS_INSN(rotate_reg)
3440 {
3441     TCGv reg;
3442     TCGv src;
3443     TCGv t0, t1;
3444     int left = (insn & 0x100);
3445 
3446     reg = DREG(insn, 0);
3447     src = DREG(insn, 9);
3448     /* shift in [0..63] */
3449     t0 = tcg_temp_new();
3450     tcg_gen_andi_i32(t0, src, 63);
3451     t1 = tcg_temp_new_i32();
3452     if (insn & 8) {
3453         tcg_gen_andi_i32(t1, src, 31);
3454         rotate(reg, t1, left, 32);
3455         /* if shift == 0, clear C */
3456         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3457                             t0, QREG_CC_V /* 0 */,
3458                             QREG_CC_V /* 0 */, QREG_CC_C);
3459     } else {
3460         TCGv X;
3461         /* modulo 33 */
3462         tcg_gen_movi_i32(t1, 33);
3463         tcg_gen_remu_i32(t1, t0, t1);
3464         X = rotate32_x(DREG(insn, 0), t1, left);
3465         rotate_x_flags(DREG(insn, 0), X, 32);
3466         tcg_temp_free(X);
3467     }
3468     tcg_temp_free(t1);
3469     tcg_temp_free(t0);
3470     set_cc_op(s, CC_OP_FLAGS);
3471 }
3472 
3473 DISAS_INSN(rotate8_reg)
3474 {
3475     TCGv reg;
3476     TCGv src;
3477     TCGv t0, t1;
3478     int left = (insn & 0x100);
3479 
3480     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3481     src = DREG(insn, 9);
3482     /* shift in [0..63] */
3483     t0 = tcg_temp_new_i32();
3484     tcg_gen_andi_i32(t0, src, 63);
3485     t1 = tcg_temp_new_i32();
3486     if (insn & 8) {
3487         tcg_gen_andi_i32(t1, src, 7);
3488         rotate(reg, t1, left, 8);
3489         /* if shift == 0, clear C */
3490         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3491                             t0, QREG_CC_V /* 0 */,
3492                             QREG_CC_V /* 0 */, QREG_CC_C);
3493     } else {
3494         TCGv X;
3495         /* modulo 9 */
3496         tcg_gen_movi_i32(t1, 9);
3497         tcg_gen_remu_i32(t1, t0, t1);
3498         X = rotate_x(reg, t1, left, 8);
3499         rotate_x_flags(reg, X, 8);
3500         tcg_temp_free(X);
3501     }
3502     tcg_temp_free(t1);
3503     tcg_temp_free(t0);
3504     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3505     set_cc_op(s, CC_OP_FLAGS);
3506 }
3507 
3508 DISAS_INSN(rotate16_reg)
3509 {
3510     TCGv reg;
3511     TCGv src;
3512     TCGv t0, t1;
3513     int left = (insn & 0x100);
3514 
3515     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3516     src = DREG(insn, 9);
3517     /* shift in [0..63] */
3518     t0 = tcg_temp_new_i32();
3519     tcg_gen_andi_i32(t0, src, 63);
3520     t1 = tcg_temp_new_i32();
3521     if (insn & 8) {
3522         tcg_gen_andi_i32(t1, src, 15);
3523         rotate(reg, t1, left, 16);
3524         /* if shift == 0, clear C */
3525         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3526                             t0, QREG_CC_V /* 0 */,
3527                             QREG_CC_V /* 0 */, QREG_CC_C);
3528     } else {
3529         TCGv X;
3530         /* modulo 17 */
3531         tcg_gen_movi_i32(t1, 17);
3532         tcg_gen_remu_i32(t1, t0, t1);
3533         X = rotate_x(reg, t1, left, 16);
3534         rotate_x_flags(reg, X, 16);
3535         tcg_temp_free(X);
3536     }
3537     tcg_temp_free(t1);
3538     tcg_temp_free(t0);
3539     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3540     set_cc_op(s, CC_OP_FLAGS);
3541 }
3542 
3543 DISAS_INSN(rotate_mem)
3544 {
3545     TCGv src;
3546     TCGv addr;
3547     TCGv shift;
3548     int left = (insn & 0x100);
3549 
3550     SRC_EA(env, src, OS_WORD, 0, &addr);
3551 
3552     shift = tcg_const_i32(1);
3553     if (insn & 0x0200) {
3554         rotate(src, shift, left, 16);
3555     } else {
3556         TCGv X = rotate_x(src, shift, left, 16);
3557         rotate_x_flags(src, X, 16);
3558         tcg_temp_free(X);
3559     }
3560     tcg_temp_free(shift);
3561     DEST_EA(env, insn, OS_WORD, src, &addr);
3562     set_cc_op(s, CC_OP_FLAGS);
3563 }
3564 
3565 DISAS_INSN(bfext_reg)
3566 {
3567     int ext = read_im16(env, s);
3568     int is_sign = insn & 0x200;
3569     TCGv src = DREG(insn, 0);
3570     TCGv dst = DREG(ext, 12);
3571     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3572     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3573     int pos = 32 - ofs - len;        /* little bit-endian */
3574     TCGv tmp = tcg_temp_new();
3575     TCGv shift;
3576 
3577     /* In general, we're going to rotate the field so that it's at the
3578        top of the word and then right-shift by the compliment of the
3579        width to extend the field.  */
3580     if (ext & 0x20) {
3581         /* Variable width.  */
3582         if (ext & 0x800) {
3583             /* Variable offset.  */
3584             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3585             tcg_gen_rotl_i32(tmp, src, tmp);
3586         } else {
3587             tcg_gen_rotli_i32(tmp, src, ofs);
3588         }
3589 
3590         shift = tcg_temp_new();
3591         tcg_gen_neg_i32(shift, DREG(ext, 0));
3592         tcg_gen_andi_i32(shift, shift, 31);
3593         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3594         if (is_sign) {
3595             tcg_gen_mov_i32(dst, QREG_CC_N);
3596         } else {
3597             tcg_gen_shr_i32(dst, tmp, shift);
3598         }
3599         tcg_temp_free(shift);
3600     } else {
3601         /* Immediate width.  */
3602         if (ext & 0x800) {
3603             /* Variable offset */
3604             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3605             tcg_gen_rotl_i32(tmp, src, tmp);
3606             src = tmp;
3607             pos = 32 - len;
3608         } else {
3609             /* Immediate offset.  If the field doesn't wrap around the
3610                end of the word, rely on (s)extract completely.  */
3611             if (pos < 0) {
3612                 tcg_gen_rotli_i32(tmp, src, ofs);
3613                 src = tmp;
3614                 pos = 32 - len;
3615             }
3616         }
3617 
3618         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3619         if (is_sign) {
3620             tcg_gen_mov_i32(dst, QREG_CC_N);
3621         } else {
3622             tcg_gen_extract_i32(dst, src, pos, len);
3623         }
3624     }
3625 
3626     tcg_temp_free(tmp);
3627     set_cc_op(s, CC_OP_LOGIC);
3628 }
3629 
3630 DISAS_INSN(bfext_mem)
3631 {
3632     int ext = read_im16(env, s);
3633     int is_sign = insn & 0x200;
3634     TCGv dest = DREG(ext, 12);
3635     TCGv addr, len, ofs;
3636 
3637     addr = gen_lea(env, s, insn, OS_UNSIZED);
3638     if (IS_NULL_QREG(addr)) {
3639         gen_addr_fault(s);
3640         return;
3641     }
3642 
3643     if (ext & 0x20) {
3644         len = DREG(ext, 0);
3645     } else {
3646         len = tcg_const_i32(extract32(ext, 0, 5));
3647     }
3648     if (ext & 0x800) {
3649         ofs = DREG(ext, 6);
3650     } else {
3651         ofs = tcg_const_i32(extract32(ext, 6, 5));
3652     }
3653 
3654     if (is_sign) {
3655         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3656         tcg_gen_mov_i32(QREG_CC_N, dest);
3657     } else {
3658         TCGv_i64 tmp = tcg_temp_new_i64();
3659         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3660         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3661         tcg_temp_free_i64(tmp);
3662     }
3663     set_cc_op(s, CC_OP_LOGIC);
3664 
3665     if (!(ext & 0x20)) {
3666         tcg_temp_free(len);
3667     }
3668     if (!(ext & 0x800)) {
3669         tcg_temp_free(ofs);
3670     }
3671 }
3672 
3673 DISAS_INSN(bfop_reg)
3674 {
3675     int ext = read_im16(env, s);
3676     TCGv src = DREG(insn, 0);
3677     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3678     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3679     TCGv mask, tofs, tlen;
3680 
3681     TCGV_UNUSED(tofs);
3682     TCGV_UNUSED(tlen);
3683     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
3684         tofs = tcg_temp_new();
3685         tlen = tcg_temp_new();
3686     }
3687 
3688     if ((ext & 0x820) == 0) {
3689         /* Immediate width and offset.  */
3690         uint32_t maski = 0x7fffffffu >> (len - 1);
3691         if (ofs + len <= 32) {
3692             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3693         } else {
3694             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3695         }
3696         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
3697         mask = tcg_const_i32(ror32(maski, ofs));
3698         if (!TCGV_IS_UNUSED(tofs)) {
3699             tcg_gen_movi_i32(tofs, ofs);
3700             tcg_gen_movi_i32(tlen, len);
3701         }
3702     } else {
3703         TCGv tmp = tcg_temp_new();
3704         if (ext & 0x20) {
3705             /* Variable width */
3706             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
3707             tcg_gen_andi_i32(tmp, tmp, 31);
3708             mask = tcg_const_i32(0x7fffffffu);
3709             tcg_gen_shr_i32(mask, mask, tmp);
3710             if (!TCGV_IS_UNUSED(tlen)) {
3711                 tcg_gen_addi_i32(tlen, tmp, 1);
3712             }
3713         } else {
3714             /* Immediate width */
3715             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
3716             if (!TCGV_IS_UNUSED(tlen)) {
3717                 tcg_gen_movi_i32(tlen, len);
3718             }
3719         }
3720         if (ext & 0x800) {
3721             /* Variable offset */
3722             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3723             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
3724             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
3725             tcg_gen_rotr_i32(mask, mask, tmp);
3726             if (!TCGV_IS_UNUSED(tofs)) {
3727                 tcg_gen_mov_i32(tofs, tmp);
3728             }
3729         } else {
3730             /* Immediate offset (and variable width) */
3731             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3732             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
3733             tcg_gen_rotri_i32(mask, mask, ofs);
3734             if (!TCGV_IS_UNUSED(tofs)) {
3735                 tcg_gen_movi_i32(tofs, ofs);
3736             }
3737         }
3738         tcg_temp_free(tmp);
3739     }
3740     set_cc_op(s, CC_OP_LOGIC);
3741 
3742     switch (insn & 0x0f00) {
3743     case 0x0a00: /* bfchg */
3744         tcg_gen_eqv_i32(src, src, mask);
3745         break;
3746     case 0x0c00: /* bfclr */
3747         tcg_gen_and_i32(src, src, mask);
3748         break;
3749     case 0x0d00: /* bfffo */
3750         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
3751         tcg_temp_free(tlen);
3752         tcg_temp_free(tofs);
3753         break;
3754     case 0x0e00: /* bfset */
3755         tcg_gen_orc_i32(src, src, mask);
3756         break;
3757     case 0x0800: /* bftst */
3758         /* flags already set; no other work to do.  */
3759         break;
3760     default:
3761         g_assert_not_reached();
3762     }
3763     tcg_temp_free(mask);
3764 }
3765 
3766 DISAS_INSN(bfop_mem)
3767 {
3768     int ext = read_im16(env, s);
3769     TCGv addr, len, ofs;
3770     TCGv_i64 t64;
3771 
3772     addr = gen_lea(env, s, insn, OS_UNSIZED);
3773     if (IS_NULL_QREG(addr)) {
3774         gen_addr_fault(s);
3775         return;
3776     }
3777 
3778     if (ext & 0x20) {
3779         len = DREG(ext, 0);
3780     } else {
3781         len = tcg_const_i32(extract32(ext, 0, 5));
3782     }
3783     if (ext & 0x800) {
3784         ofs = DREG(ext, 6);
3785     } else {
3786         ofs = tcg_const_i32(extract32(ext, 6, 5));
3787     }
3788 
3789     switch (insn & 0x0f00) {
3790     case 0x0a00: /* bfchg */
3791         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3792         break;
3793     case 0x0c00: /* bfclr */
3794         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3795         break;
3796     case 0x0d00: /* bfffo */
3797         t64 = tcg_temp_new_i64();
3798         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
3799         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
3800         tcg_temp_free_i64(t64);
3801         break;
3802     case 0x0e00: /* bfset */
3803         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3804         break;
3805     case 0x0800: /* bftst */
3806         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3807         break;
3808     default:
3809         g_assert_not_reached();
3810     }
3811     set_cc_op(s, CC_OP_LOGIC);
3812 
3813     if (!(ext & 0x20)) {
3814         tcg_temp_free(len);
3815     }
3816     if (!(ext & 0x800)) {
3817         tcg_temp_free(ofs);
3818     }
3819 }
3820 
3821 DISAS_INSN(bfins_reg)
3822 {
3823     int ext = read_im16(env, s);
3824     TCGv dst = DREG(insn, 0);
3825     TCGv src = DREG(ext, 12);
3826     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3827     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3828     int pos = 32 - ofs - len;        /* little bit-endian */
3829     TCGv tmp;
3830 
3831     tmp = tcg_temp_new();
3832 
3833     if (ext & 0x20) {
3834         /* Variable width */
3835         tcg_gen_neg_i32(tmp, DREG(ext, 0));
3836         tcg_gen_andi_i32(tmp, tmp, 31);
3837         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
3838     } else {
3839         /* Immediate width */
3840         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
3841     }
3842     set_cc_op(s, CC_OP_LOGIC);
3843 
3844     /* Immediate width and offset */
3845     if ((ext & 0x820) == 0) {
3846         /* Check for suitability for deposit.  */
3847         if (pos >= 0) {
3848             tcg_gen_deposit_i32(dst, dst, src, pos, len);
3849         } else {
3850             uint32_t maski = -2U << (len - 1);
3851             uint32_t roti = (ofs + len) & 31;
3852             tcg_gen_andi_i32(tmp, src, ~maski);
3853             tcg_gen_rotri_i32(tmp, tmp, roti);
3854             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
3855             tcg_gen_or_i32(dst, dst, tmp);
3856         }
3857     } else {
3858         TCGv mask = tcg_temp_new();
3859         TCGv rot = tcg_temp_new();
3860 
3861         if (ext & 0x20) {
3862             /* Variable width */
3863             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
3864             tcg_gen_andi_i32(rot, rot, 31);
3865             tcg_gen_movi_i32(mask, -2);
3866             tcg_gen_shl_i32(mask, mask, rot);
3867             tcg_gen_mov_i32(rot, DREG(ext, 0));
3868             tcg_gen_andc_i32(tmp, src, mask);
3869         } else {
3870             /* Immediate width (variable offset) */
3871             uint32_t maski = -2U << (len - 1);
3872             tcg_gen_andi_i32(tmp, src, ~maski);
3873             tcg_gen_movi_i32(mask, maski);
3874             tcg_gen_movi_i32(rot, len & 31);
3875         }
3876         if (ext & 0x800) {
3877             /* Variable offset */
3878             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
3879         } else {
3880             /* Immediate offset (variable width) */
3881             tcg_gen_addi_i32(rot, rot, ofs);
3882         }
3883         tcg_gen_andi_i32(rot, rot, 31);
3884         tcg_gen_rotr_i32(mask, mask, rot);
3885         tcg_gen_rotr_i32(tmp, tmp, rot);
3886         tcg_gen_and_i32(dst, dst, mask);
3887         tcg_gen_or_i32(dst, dst, tmp);
3888 
3889         tcg_temp_free(rot);
3890         tcg_temp_free(mask);
3891     }
3892     tcg_temp_free(tmp);
3893 }
3894 
3895 DISAS_INSN(bfins_mem)
3896 {
3897     int ext = read_im16(env, s);
3898     TCGv src = DREG(ext, 12);
3899     TCGv addr, len, ofs;
3900 
3901     addr = gen_lea(env, s, insn, OS_UNSIZED);
3902     if (IS_NULL_QREG(addr)) {
3903         gen_addr_fault(s);
3904         return;
3905     }
3906 
3907     if (ext & 0x20) {
3908         len = DREG(ext, 0);
3909     } else {
3910         len = tcg_const_i32(extract32(ext, 0, 5));
3911     }
3912     if (ext & 0x800) {
3913         ofs = DREG(ext, 6);
3914     } else {
3915         ofs = tcg_const_i32(extract32(ext, 6, 5));
3916     }
3917 
3918     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
3919     set_cc_op(s, CC_OP_LOGIC);
3920 
3921     if (!(ext & 0x20)) {
3922         tcg_temp_free(len);
3923     }
3924     if (!(ext & 0x800)) {
3925         tcg_temp_free(ofs);
3926     }
3927 }
3928 
3929 DISAS_INSN(ff1)
3930 {
3931     TCGv reg;
3932     reg = DREG(insn, 0);
3933     gen_logic_cc(s, reg, OS_LONG);
3934     gen_helper_ff1(reg, reg);
3935 }
3936 
3937 static TCGv gen_get_sr(DisasContext *s)
3938 {
3939     TCGv ccr;
3940     TCGv sr;
3941 
3942     ccr = gen_get_ccr(s);
3943     sr = tcg_temp_new();
3944     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
3945     tcg_gen_or_i32(sr, sr, ccr);
3946     return sr;
3947 }
3948 
3949 DISAS_INSN(strldsr)
3950 {
3951     uint16_t ext;
3952     uint32_t addr;
3953 
3954     addr = s->pc - 2;
3955     ext = read_im16(env, s);
3956     if (ext != 0x46FC) {
3957         gen_exception(s, addr, EXCP_UNSUPPORTED);
3958         return;
3959     }
3960     ext = read_im16(env, s);
3961     if (IS_USER(s) || (ext & SR_S) == 0) {
3962         gen_exception(s, addr, EXCP_PRIVILEGE);
3963         return;
3964     }
3965     gen_push(s, gen_get_sr(s));
3966     gen_set_sr_im(s, ext, 0);
3967 }
3968 
3969 DISAS_INSN(move_from_sr)
3970 {
3971     TCGv sr;
3972 
3973     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
3974         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3975         return;
3976     }
3977     sr = gen_get_sr(s);
3978     DEST_EA(env, insn, OS_WORD, sr, NULL);
3979 }
3980 
3981 DISAS_INSN(move_to_sr)
3982 {
3983     if (IS_USER(s)) {
3984         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3985         return;
3986     }
3987     gen_set_sr(env, s, insn, 0);
3988     gen_lookup_tb(s);
3989 }
3990 
3991 DISAS_INSN(move_from_usp)
3992 {
3993     if (IS_USER(s)) {
3994         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3995         return;
3996     }
3997     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
3998                    offsetof(CPUM68KState, sp[M68K_USP]));
3999 }
4000 
4001 DISAS_INSN(move_to_usp)
4002 {
4003     if (IS_USER(s)) {
4004         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4005         return;
4006     }
4007     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4008                    offsetof(CPUM68KState, sp[M68K_USP]));
4009 }
4010 
4011 DISAS_INSN(halt)
4012 {
4013     gen_exception(s, s->pc, EXCP_HALT_INSN);
4014 }
4015 
4016 DISAS_INSN(stop)
4017 {
4018     uint16_t ext;
4019 
4020     if (IS_USER(s)) {
4021         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4022         return;
4023     }
4024 
4025     ext = read_im16(env, s);
4026 
4027     gen_set_sr_im(s, ext, 0);
4028     tcg_gen_movi_i32(cpu_halted, 1);
4029     gen_exception(s, s->pc, EXCP_HLT);
4030 }
4031 
4032 DISAS_INSN(rte)
4033 {
4034     if (IS_USER(s)) {
4035         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4036         return;
4037     }
4038     gen_exception(s, s->pc - 2, EXCP_RTE);
4039 }
4040 
4041 DISAS_INSN(movec)
4042 {
4043     uint16_t ext;
4044     TCGv reg;
4045 
4046     if (IS_USER(s)) {
4047         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4048         return;
4049     }
4050 
4051     ext = read_im16(env, s);
4052 
4053     if (ext & 0x8000) {
4054         reg = AREG(ext, 12);
4055     } else {
4056         reg = DREG(ext, 12);
4057     }
4058     gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4059     gen_lookup_tb(s);
4060 }
4061 
4062 DISAS_INSN(intouch)
4063 {
4064     if (IS_USER(s)) {
4065         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4066         return;
4067     }
4068     /* ICache fetch.  Implement as no-op.  */
4069 }
4070 
4071 DISAS_INSN(cpushl)
4072 {
4073     if (IS_USER(s)) {
4074         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4075         return;
4076     }
4077     /* Cache push/invalidate.  Implement as no-op.  */
4078 }
4079 
4080 DISAS_INSN(wddata)
4081 {
4082     gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4083 }
4084 
4085 DISAS_INSN(wdebug)
4086 {
4087     M68kCPU *cpu = m68k_env_get_cpu(env);
4088 
4089     if (IS_USER(s)) {
4090         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4091         return;
4092     }
4093     /* TODO: Implement wdebug.  */
4094     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4095 }
4096 
4097 DISAS_INSN(trap)
4098 {
4099     gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
4100 }
4101 
4102 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4103    immediately before the next FP instruction is executed.  */
4104 DISAS_INSN(fpu)
4105 {
4106     uint16_t ext;
4107     int32_t offset;
4108     int opmode;
4109     TCGv_i64 src;
4110     TCGv_i64 dest;
4111     TCGv_i64 res;
4112     TCGv tmp32;
4113     int round;
4114     int set_dest;
4115     int opsize;
4116 
4117     ext = read_im16(env, s);
4118     opmode = ext & 0x7f;
4119     switch ((ext >> 13) & 7) {
4120     case 0: case 2:
4121         break;
4122     case 1:
4123         goto undef;
4124     case 3: /* fmove out */
4125         src = FREG(ext, 7);
4126         tmp32 = tcg_temp_new_i32();
4127         /* fmove */
4128         /* ??? TODO: Proper behavior on overflow.  */
4129 
4130         opsize = ext_opsize(ext, 10);
4131         switch (opsize) {
4132         case OS_LONG:
4133             gen_helper_f64_to_i32(tmp32, cpu_env, src);
4134             break;
4135         case OS_SINGLE:
4136             gen_helper_f64_to_f32(tmp32, cpu_env, src);
4137             break;
4138         case OS_WORD:
4139             gen_helper_f64_to_i32(tmp32, cpu_env, src);
4140             break;
4141         case OS_DOUBLE:
4142             tcg_gen_mov_i32(tmp32, AREG(insn, 0));
4143             switch ((insn >> 3) & 7) {
4144             case 2:
4145             case 3:
4146                 break;
4147             case 4:
4148                 tcg_gen_addi_i32(tmp32, tmp32, -8);
4149                 break;
4150             case 5:
4151                 offset = cpu_ldsw_code(env, s->pc);
4152                 s->pc += 2;
4153                 tcg_gen_addi_i32(tmp32, tmp32, offset);
4154                 break;
4155             default:
4156                 goto undef;
4157             }
4158             gen_store64(s, tmp32, src);
4159             switch ((insn >> 3) & 7) {
4160             case 3:
4161                 tcg_gen_addi_i32(tmp32, tmp32, 8);
4162                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4163                 break;
4164             case 4:
4165                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4166                 break;
4167             }
4168             tcg_temp_free_i32(tmp32);
4169             return;
4170         case OS_BYTE:
4171             gen_helper_f64_to_i32(tmp32, cpu_env, src);
4172             break;
4173         default:
4174             goto undef;
4175         }
4176         DEST_EA(env, insn, opsize, tmp32, NULL);
4177         tcg_temp_free_i32(tmp32);
4178         return;
4179     case 4: /* fmove to control register.  */
4180         switch ((ext >> 10) & 7) {
4181         case 4: /* FPCR */
4182             /* Not implemented.  Ignore writes.  */
4183             break;
4184         case 1: /* FPIAR */
4185         case 2: /* FPSR */
4186         default:
4187             cpu_abort(NULL, "Unimplemented: fmove to control %d",
4188                       (ext >> 10) & 7);
4189         }
4190         break;
4191     case 5: /* fmove from control register.  */
4192         switch ((ext >> 10) & 7) {
4193         case 4: /* FPCR */
4194             /* Not implemented.  Always return zero.  */
4195             tmp32 = tcg_const_i32(0);
4196             break;
4197         case 1: /* FPIAR */
4198         case 2: /* FPSR */
4199         default:
4200             cpu_abort(NULL, "Unimplemented: fmove from control %d",
4201                       (ext >> 10) & 7);
4202             goto undef;
4203         }
4204         DEST_EA(env, insn, OS_LONG, tmp32, NULL);
4205         break;
4206     case 6: /* fmovem */
4207     case 7:
4208         {
4209             TCGv addr;
4210             uint16_t mask;
4211             int i;
4212             if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
4213                 goto undef;
4214             tmp32 = gen_lea(env, s, insn, OS_LONG);
4215             if (IS_NULL_QREG(tmp32)) {
4216                 gen_addr_fault(s);
4217                 return;
4218             }
4219             addr = tcg_temp_new_i32();
4220             tcg_gen_mov_i32(addr, tmp32);
4221             mask = 0x80;
4222             for (i = 0; i < 8; i++) {
4223                 if (ext & mask) {
4224                     dest = FREG(i, 0);
4225                     if (ext & (1 << 13)) {
4226                         /* store */
4227                         tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
4228                     } else {
4229                         /* load */
4230                         tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
4231                     }
4232                     if (ext & (mask - 1))
4233                         tcg_gen_addi_i32(addr, addr, 8);
4234                 }
4235                 mask >>= 1;
4236             }
4237             tcg_temp_free_i32(addr);
4238         }
4239         return;
4240     }
4241     if (ext & (1 << 14)) {
4242         /* Source effective address.  */
4243         opsize = ext_opsize(ext, 10);
4244         if (opsize == OS_DOUBLE) {
4245             tmp32 = tcg_temp_new_i32();
4246             tcg_gen_mov_i32(tmp32, AREG(insn, 0));
4247             switch ((insn >> 3) & 7) {
4248             case 2:
4249             case 3:
4250                 break;
4251             case 4:
4252                 tcg_gen_addi_i32(tmp32, tmp32, -8);
4253                 break;
4254             case 5:
4255                 offset = cpu_ldsw_code(env, s->pc);
4256                 s->pc += 2;
4257                 tcg_gen_addi_i32(tmp32, tmp32, offset);
4258                 break;
4259             case 7:
4260                 offset = cpu_ldsw_code(env, s->pc);
4261                 offset += s->pc - 2;
4262                 s->pc += 2;
4263                 tcg_gen_addi_i32(tmp32, tmp32, offset);
4264                 break;
4265             default:
4266                 goto undef;
4267             }
4268             src = gen_load64(s, tmp32);
4269             switch ((insn >> 3) & 7) {
4270             case 3:
4271                 tcg_gen_addi_i32(tmp32, tmp32, 8);
4272                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4273                 break;
4274             case 4:
4275                 tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4276                 break;
4277             }
4278             tcg_temp_free_i32(tmp32);
4279         } else {
4280             SRC_EA(env, tmp32, opsize, 1, NULL);
4281             src = tcg_temp_new_i64();
4282             switch (opsize) {
4283             case OS_LONG:
4284             case OS_WORD:
4285             case OS_BYTE:
4286                 gen_helper_i32_to_f64(src, cpu_env, tmp32);
4287                 break;
4288             case OS_SINGLE:
4289                 gen_helper_f32_to_f64(src, cpu_env, tmp32);
4290                 break;
4291             }
4292         }
4293     } else {
4294         /* Source register.  */
4295         src = FREG(ext, 10);
4296     }
4297     dest = FREG(ext, 7);
4298     res = tcg_temp_new_i64();
4299     if (opmode != 0x3a)
4300         tcg_gen_mov_f64(res, dest);
4301     round = 1;
4302     set_dest = 1;
4303     switch (opmode) {
4304     case 0: case 0x40: case 0x44: /* fmove */
4305         tcg_gen_mov_f64(res, src);
4306         break;
4307     case 1: /* fint */
4308         gen_helper_iround_f64(res, cpu_env, src);
4309         round = 0;
4310         break;
4311     case 3: /* fintrz */
4312         gen_helper_itrunc_f64(res, cpu_env, src);
4313         round = 0;
4314         break;
4315     case 4: case 0x41: case 0x45: /* fsqrt */
4316         gen_helper_sqrt_f64(res, cpu_env, src);
4317         break;
4318     case 0x18: case 0x58: case 0x5c: /* fabs */
4319         gen_helper_abs_f64(res, src);
4320         break;
4321     case 0x1a: case 0x5a: case 0x5e: /* fneg */
4322         gen_helper_chs_f64(res, src);
4323         break;
4324     case 0x20: case 0x60: case 0x64: /* fdiv */
4325         gen_helper_div_f64(res, cpu_env, res, src);
4326         break;
4327     case 0x22: case 0x62: case 0x66: /* fadd */
4328         gen_helper_add_f64(res, cpu_env, res, src);
4329         break;
4330     case 0x23: case 0x63: case 0x67: /* fmul */
4331         gen_helper_mul_f64(res, cpu_env, res, src);
4332         break;
4333     case 0x28: case 0x68: case 0x6c: /* fsub */
4334         gen_helper_sub_f64(res, cpu_env, res, src);
4335         break;
4336     case 0x38: /* fcmp */
4337         gen_helper_sub_cmp_f64(res, cpu_env, res, src);
4338         set_dest = 0;
4339         round = 0;
4340         break;
4341     case 0x3a: /* ftst */
4342         tcg_gen_mov_f64(res, src);
4343         set_dest = 0;
4344         round = 0;
4345         break;
4346     default:
4347         goto undef;
4348     }
4349     if (ext & (1 << 14)) {
4350         tcg_temp_free_i64(src);
4351     }
4352     if (round) {
4353         if (opmode & 0x40) {
4354             if ((opmode & 0x4) != 0)
4355                 round = 0;
4356         } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
4357             round = 0;
4358         }
4359     }
4360     if (round) {
4361         TCGv tmp = tcg_temp_new_i32();
4362         gen_helper_f64_to_f32(tmp, cpu_env, res);
4363         gen_helper_f32_to_f64(res, cpu_env, tmp);
4364         tcg_temp_free_i32(tmp);
4365     }
4366     tcg_gen_mov_f64(QREG_FP_RESULT, res);
4367     if (set_dest) {
4368         tcg_gen_mov_f64(dest, res);
4369     }
4370     tcg_temp_free_i64(res);
4371     return;
4372 undef:
4373     /* FIXME: Is this right for offset addressing modes?  */
4374     s->pc -= 2;
4375     disas_undef_fpu(env, s, insn);
4376 }
4377 
4378 DISAS_INSN(fbcc)
4379 {
4380     uint32_t offset;
4381     uint32_t addr;
4382     TCGv flag;
4383     TCGLabel *l1;
4384 
4385     addr = s->pc;
4386     offset = cpu_ldsw_code(env, s->pc);
4387     s->pc += 2;
4388     if (insn & (1 << 6)) {
4389         offset = (offset << 16) | read_im16(env, s);
4390     }
4391 
4392     l1 = gen_new_label();
4393     /* TODO: Raise BSUN exception.  */
4394     flag = tcg_temp_new();
4395     gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
4396     /* Jump to l1 if condition is true.  */
4397     switch (insn & 0xf) {
4398     case 0: /* f */
4399         break;
4400     case 1: /* eq (=0) */
4401         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
4402         break;
4403     case 2: /* ogt (=1) */
4404         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
4405         break;
4406     case 3: /* oge (=0 or =1) */
4407         tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
4408         break;
4409     case 4: /* olt (=-1) */
4410         tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
4411         break;
4412     case 5: /* ole (=-1 or =0) */
4413         tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
4414         break;
4415     case 6: /* ogl (=-1 or =1) */
4416         tcg_gen_andi_i32(flag, flag, 1);
4417         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4418         break;
4419     case 7: /* or (=2) */
4420         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
4421         break;
4422     case 8: /* un (<2) */
4423         tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
4424         break;
4425     case 9: /* ueq (=0 or =2) */
4426         tcg_gen_andi_i32(flag, flag, 1);
4427         tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
4428         break;
4429     case 10: /* ugt (>0) */
4430         tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
4431         break;
4432     case 11: /* uge (>=0) */
4433         tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
4434         break;
4435     case 12: /* ult (=-1 or =2) */
4436         tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
4437         break;
4438     case 13: /* ule (!=1) */
4439         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
4440         break;
4441     case 14: /* ne (!=0) */
4442         tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4443         break;
4444     case 15: /* t */
4445         tcg_gen_br(l1);
4446         break;
4447     }
4448     gen_jmp_tb(s, 0, s->pc);
4449     gen_set_label(l1);
4450     gen_jmp_tb(s, 1, addr + offset);
4451 }
4452 
4453 DISAS_INSN(frestore)
4454 {
4455     M68kCPU *cpu = m68k_env_get_cpu(env);
4456 
4457     /* TODO: Implement frestore.  */
4458     cpu_abort(CPU(cpu), "FRESTORE not implemented");
4459 }
4460 
4461 DISAS_INSN(fsave)
4462 {
4463     M68kCPU *cpu = m68k_env_get_cpu(env);
4464 
4465     /* TODO: Implement fsave.  */
4466     cpu_abort(CPU(cpu), "FSAVE not implemented");
4467 }
4468 
4469 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
4470 {
4471     TCGv tmp = tcg_temp_new();
4472     if (s->env->macsr & MACSR_FI) {
4473         if (upper)
4474             tcg_gen_andi_i32(tmp, val, 0xffff0000);
4475         else
4476             tcg_gen_shli_i32(tmp, val, 16);
4477     } else if (s->env->macsr & MACSR_SU) {
4478         if (upper)
4479             tcg_gen_sari_i32(tmp, val, 16);
4480         else
4481             tcg_gen_ext16s_i32(tmp, val);
4482     } else {
4483         if (upper)
4484             tcg_gen_shri_i32(tmp, val, 16);
4485         else
4486             tcg_gen_ext16u_i32(tmp, val);
4487     }
4488     return tmp;
4489 }
4490 
4491 static void gen_mac_clear_flags(void)
4492 {
4493     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
4494                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
4495 }
4496 
4497 DISAS_INSN(mac)
4498 {
4499     TCGv rx;
4500     TCGv ry;
4501     uint16_t ext;
4502     int acc;
4503     TCGv tmp;
4504     TCGv addr;
4505     TCGv loadval;
4506     int dual;
4507     TCGv saved_flags;
4508 
4509     if (!s->done_mac) {
4510         s->mactmp = tcg_temp_new_i64();
4511         s->done_mac = 1;
4512     }
4513 
4514     ext = read_im16(env, s);
4515 
4516     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
4517     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
4518     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
4519         disas_undef(env, s, insn);
4520         return;
4521     }
4522     if (insn & 0x30) {
4523         /* MAC with load.  */
4524         tmp = gen_lea(env, s, insn, OS_LONG);
4525         addr = tcg_temp_new();
4526         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
4527         /* Load the value now to ensure correct exception behavior.
4528            Perform writeback after reading the MAC inputs.  */
4529         loadval = gen_load(s, OS_LONG, addr, 0);
4530 
4531         acc ^= 1;
4532         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
4533         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
4534     } else {
4535         loadval = addr = NULL_QREG;
4536         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4537         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4538     }
4539 
4540     gen_mac_clear_flags();
4541 #if 0
4542     l1 = -1;
4543     /* Disabled because conditional branches clobber temporary vars.  */
4544     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
4545         /* Skip the multiply if we know we will ignore it.  */
4546         l1 = gen_new_label();
4547         tmp = tcg_temp_new();
4548         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
4549         gen_op_jmp_nz32(tmp, l1);
4550     }
4551 #endif
4552 
4553     if ((ext & 0x0800) == 0) {
4554         /* Word.  */
4555         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
4556         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
4557     }
4558     if (s->env->macsr & MACSR_FI) {
4559         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
4560     } else {
4561         if (s->env->macsr & MACSR_SU)
4562             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
4563         else
4564             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
4565         switch ((ext >> 9) & 3) {
4566         case 1:
4567             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
4568             break;
4569         case 3:
4570             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
4571             break;
4572         }
4573     }
4574 
4575     if (dual) {
4576         /* Save the overflow flag from the multiply.  */
4577         saved_flags = tcg_temp_new();
4578         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
4579     } else {
4580         saved_flags = NULL_QREG;
4581     }
4582 
4583 #if 0
4584     /* Disabled because conditional branches clobber temporary vars.  */
4585     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
4586         /* Skip the accumulate if the value is already saturated.  */
4587         l1 = gen_new_label();
4588         tmp = tcg_temp_new();
4589         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4590         gen_op_jmp_nz32(tmp, l1);
4591     }
4592 #endif
4593 
4594     if (insn & 0x100)
4595         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4596     else
4597         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4598 
4599     if (s->env->macsr & MACSR_FI)
4600         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4601     else if (s->env->macsr & MACSR_SU)
4602         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4603     else
4604         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4605 
4606 #if 0
4607     /* Disabled because conditional branches clobber temporary vars.  */
4608     if (l1 != -1)
4609         gen_set_label(l1);
4610 #endif
4611 
4612     if (dual) {
4613         /* Dual accumulate variant.  */
4614         acc = (ext >> 2) & 3;
4615         /* Restore the overflow flag from the multiplier.  */
4616         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
4617 #if 0
4618         /* Disabled because conditional branches clobber temporary vars.  */
4619         if ((s->env->macsr & MACSR_OMC) != 0) {
4620             /* Skip the accumulate if the value is already saturated.  */
4621             l1 = gen_new_label();
4622             tmp = tcg_temp_new();
4623             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4624             gen_op_jmp_nz32(tmp, l1);
4625         }
4626 #endif
4627         if (ext & 2)
4628             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4629         else
4630             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4631         if (s->env->macsr & MACSR_FI)
4632             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4633         else if (s->env->macsr & MACSR_SU)
4634             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4635         else
4636             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4637 #if 0
4638         /* Disabled because conditional branches clobber temporary vars.  */
4639         if (l1 != -1)
4640             gen_set_label(l1);
4641 #endif
4642     }
4643     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
4644 
4645     if (insn & 0x30) {
4646         TCGv rw;
4647         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4648         tcg_gen_mov_i32(rw, loadval);
4649         /* FIXME: Should address writeback happen with the masked or
4650            unmasked value?  */
4651         switch ((insn >> 3) & 7) {
4652         case 3: /* Post-increment.  */
4653             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
4654             break;
4655         case 4: /* Pre-decrement.  */
4656             tcg_gen_mov_i32(AREG(insn, 0), addr);
4657         }
4658     }
4659 }
4660 
4661 DISAS_INSN(from_mac)
4662 {
4663     TCGv rx;
4664     TCGv_i64 acc;
4665     int accnum;
4666 
4667     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4668     accnum = (insn >> 9) & 3;
4669     acc = MACREG(accnum);
4670     if (s->env->macsr & MACSR_FI) {
4671         gen_helper_get_macf(rx, cpu_env, acc);
4672     } else if ((s->env->macsr & MACSR_OMC) == 0) {
4673         tcg_gen_extrl_i64_i32(rx, acc);
4674     } else if (s->env->macsr & MACSR_SU) {
4675         gen_helper_get_macs(rx, acc);
4676     } else {
4677         gen_helper_get_macu(rx, acc);
4678     }
4679     if (insn & 0x40) {
4680         tcg_gen_movi_i64(acc, 0);
4681         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4682     }
4683 }
4684 
4685 DISAS_INSN(move_mac)
4686 {
4687     /* FIXME: This can be done without a helper.  */
4688     int src;
4689     TCGv dest;
4690     src = insn & 3;
4691     dest = tcg_const_i32((insn >> 9) & 3);
4692     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
4693     gen_mac_clear_flags();
4694     gen_helper_mac_set_flags(cpu_env, dest);
4695 }
4696 
4697 DISAS_INSN(from_macsr)
4698 {
4699     TCGv reg;
4700 
4701     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4702     tcg_gen_mov_i32(reg, QREG_MACSR);
4703 }
4704 
4705 DISAS_INSN(from_mask)
4706 {
4707     TCGv reg;
4708     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4709     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
4710 }
4711 
4712 DISAS_INSN(from_mext)
4713 {
4714     TCGv reg;
4715     TCGv acc;
4716     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4717     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4718     if (s->env->macsr & MACSR_FI)
4719         gen_helper_get_mac_extf(reg, cpu_env, acc);
4720     else
4721         gen_helper_get_mac_exti(reg, cpu_env, acc);
4722 }
4723 
4724 DISAS_INSN(macsr_to_ccr)
4725 {
4726     TCGv tmp = tcg_temp_new();
4727     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
4728     gen_helper_set_sr(cpu_env, tmp);
4729     tcg_temp_free(tmp);
4730     set_cc_op(s, CC_OP_FLAGS);
4731 }
4732 
4733 DISAS_INSN(to_mac)
4734 {
4735     TCGv_i64 acc;
4736     TCGv val;
4737     int accnum;
4738     accnum = (insn >> 9) & 3;
4739     acc = MACREG(accnum);
4740     SRC_EA(env, val, OS_LONG, 0, NULL);
4741     if (s->env->macsr & MACSR_FI) {
4742         tcg_gen_ext_i32_i64(acc, val);
4743         tcg_gen_shli_i64(acc, acc, 8);
4744     } else if (s->env->macsr & MACSR_SU) {
4745         tcg_gen_ext_i32_i64(acc, val);
4746     } else {
4747         tcg_gen_extu_i32_i64(acc, val);
4748     }
4749     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4750     gen_mac_clear_flags();
4751     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
4752 }
4753 
4754 DISAS_INSN(to_macsr)
4755 {
4756     TCGv val;
4757     SRC_EA(env, val, OS_LONG, 0, NULL);
4758     gen_helper_set_macsr(cpu_env, val);
4759     gen_lookup_tb(s);
4760 }
4761 
4762 DISAS_INSN(to_mask)
4763 {
4764     TCGv val;
4765     SRC_EA(env, val, OS_LONG, 0, NULL);
4766     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
4767 }
4768 
4769 DISAS_INSN(to_mext)
4770 {
4771     TCGv val;
4772     TCGv acc;
4773     SRC_EA(env, val, OS_LONG, 0, NULL);
4774     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4775     if (s->env->macsr & MACSR_FI)
4776         gen_helper_set_mac_extf(cpu_env, val, acc);
4777     else if (s->env->macsr & MACSR_SU)
4778         gen_helper_set_mac_exts(cpu_env, val, acc);
4779     else
4780         gen_helper_set_mac_extu(cpu_env, val, acc);
4781 }
4782 
4783 static disas_proc opcode_table[65536];
4784 
4785 static void
4786 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
4787 {
4788   int i;
4789   int from;
4790   int to;
4791 
4792   /* Sanity check.  All set bits must be included in the mask.  */
4793   if (opcode & ~mask) {
4794       fprintf(stderr,
4795               "qemu internal error: bogus opcode definition %04x/%04x\n",
4796               opcode, mask);
4797       abort();
4798   }
4799   /* This could probably be cleverer.  For now just optimize the case where
4800      the top bits are known.  */
4801   /* Find the first zero bit in the mask.  */
4802   i = 0x8000;
4803   while ((i & mask) != 0)
4804       i >>= 1;
4805   /* Iterate over all combinations of this and lower bits.  */
4806   if (i == 0)
4807       i = 1;
4808   else
4809       i <<= 1;
4810   from = opcode & ~(i - 1);
4811   to = from + i;
4812   for (i = from; i < to; i++) {
4813       if ((i & mask) == opcode)
4814           opcode_table[i] = proc;
4815   }
4816 }
4817 
4818 /* Register m68k opcode handlers.  Order is important.
4819    Later insn override earlier ones.  */
4820 void register_m68k_insns (CPUM68KState *env)
4821 {
4822     /* Build the opcode table only once to avoid
4823        multithreading issues. */
4824     if (opcode_table[0] != NULL) {
4825         return;
4826     }
4827 
4828     /* use BASE() for instruction available
4829      * for CF_ISA_A and M68000.
4830      */
4831 #define BASE(name, opcode, mask) \
4832     register_opcode(disas_##name, 0x##opcode, 0x##mask)
4833 #define INSN(name, opcode, mask, feature) do { \
4834     if (m68k_feature(env, M68K_FEATURE_##feature)) \
4835         BASE(name, opcode, mask); \
4836     } while(0)
4837     BASE(undef,     0000, 0000);
4838     INSN(arith_im,  0080, fff8, CF_ISA_A);
4839     INSN(arith_im,  0000, ff00, M68000);
4840     INSN(undef,     00c0, ffc0, M68000);
4841     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
4842     BASE(bitop_reg, 0100, f1c0);
4843     BASE(bitop_reg, 0140, f1c0);
4844     BASE(bitop_reg, 0180, f1c0);
4845     BASE(bitop_reg, 01c0, f1c0);
4846     INSN(arith_im,  0280, fff8, CF_ISA_A);
4847     INSN(arith_im,  0200, ff00, M68000);
4848     INSN(undef,     02c0, ffc0, M68000);
4849     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
4850     INSN(arith_im,  0480, fff8, CF_ISA_A);
4851     INSN(arith_im,  0400, ff00, M68000);
4852     INSN(undef,     04c0, ffc0, M68000);
4853     INSN(arith_im,  0600, ff00, M68000);
4854     INSN(undef,     06c0, ffc0, M68000);
4855     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
4856     INSN(arith_im,  0680, fff8, CF_ISA_A);
4857     INSN(arith_im,  0c00, ff38, CF_ISA_A);
4858     INSN(arith_im,  0c00, ff00, M68000);
4859     BASE(bitop_im,  0800, ffc0);
4860     BASE(bitop_im,  0840, ffc0);
4861     BASE(bitop_im,  0880, ffc0);
4862     BASE(bitop_im,  08c0, ffc0);
4863     INSN(arith_im,  0a80, fff8, CF_ISA_A);
4864     INSN(arith_im,  0a00, ff00, M68000);
4865     INSN(cas,       0ac0, ffc0, CAS);
4866     INSN(cas,       0cc0, ffc0, CAS);
4867     INSN(cas,       0ec0, ffc0, CAS);
4868     INSN(cas2w,     0cfc, ffff, CAS);
4869     INSN(cas2l,     0efc, ffff, CAS);
4870     BASE(move,      1000, f000);
4871     BASE(move,      2000, f000);
4872     BASE(move,      3000, f000);
4873     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
4874     INSN(negx,      4080, fff8, CF_ISA_A);
4875     INSN(negx,      4000, ff00, M68000);
4876     INSN(undef,     40c0, ffc0, M68000);
4877     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
4878     INSN(move_from_sr, 40c0, ffc0, M68000);
4879     BASE(lea,       41c0, f1c0);
4880     BASE(clr,       4200, ff00);
4881     BASE(undef,     42c0, ffc0);
4882     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
4883     INSN(move_from_ccr, 42c0, ffc0, M68000);
4884     INSN(neg,       4480, fff8, CF_ISA_A);
4885     INSN(neg,       4400, ff00, M68000);
4886     INSN(undef,     44c0, ffc0, M68000);
4887     BASE(move_to_ccr, 44c0, ffc0);
4888     INSN(not,       4680, fff8, CF_ISA_A);
4889     INSN(not,       4600, ff00, M68000);
4890     INSN(undef,     46c0, ffc0, M68000);
4891     INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
4892     INSN(nbcd,      4800, ffc0, M68000);
4893     INSN(linkl,     4808, fff8, M68000);
4894     BASE(pea,       4840, ffc0);
4895     BASE(swap,      4840, fff8);
4896     INSN(bkpt,      4848, fff8, BKPT);
4897     INSN(movem,     48d0, fbf8, CF_ISA_A);
4898     INSN(movem,     48e8, fbf8, CF_ISA_A);
4899     INSN(movem,     4880, fb80, M68000);
4900     BASE(ext,       4880, fff8);
4901     BASE(ext,       48c0, fff8);
4902     BASE(ext,       49c0, fff8);
4903     BASE(tst,       4a00, ff00);
4904     INSN(tas,       4ac0, ffc0, CF_ISA_B);
4905     INSN(tas,       4ac0, ffc0, M68000);
4906     INSN(halt,      4ac8, ffff, CF_ISA_A);
4907     INSN(pulse,     4acc, ffff, CF_ISA_A);
4908     BASE(illegal,   4afc, ffff);
4909     INSN(mull,      4c00, ffc0, CF_ISA_A);
4910     INSN(mull,      4c00, ffc0, LONG_MULDIV);
4911     INSN(divl,      4c40, ffc0, CF_ISA_A);
4912     INSN(divl,      4c40, ffc0, LONG_MULDIV);
4913     INSN(sats,      4c80, fff8, CF_ISA_B);
4914     BASE(trap,      4e40, fff0);
4915     BASE(link,      4e50, fff8);
4916     BASE(unlk,      4e58, fff8);
4917     INSN(move_to_usp, 4e60, fff8, USP);
4918     INSN(move_from_usp, 4e68, fff8, USP);
4919     BASE(nop,       4e71, ffff);
4920     BASE(stop,      4e72, ffff);
4921     BASE(rte,       4e73, ffff);
4922     INSN(rtd,       4e74, ffff, RTD);
4923     BASE(rts,       4e75, ffff);
4924     INSN(movec,     4e7b, ffff, CF_ISA_A);
4925     BASE(jump,      4e80, ffc0);
4926     BASE(jump,      4ec0, ffc0);
4927     INSN(addsubq,   5000, f080, M68000);
4928     BASE(addsubq,   5080, f0c0);
4929     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
4930     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
4931     INSN(dbcc,      50c8, f0f8, M68000);
4932     INSN(tpf,       51f8, fff8, CF_ISA_A);
4933 
4934     /* Branch instructions.  */
4935     BASE(branch,    6000, f000);
4936     /* Disable long branch instructions, then add back the ones we want.  */
4937     BASE(undef,     60ff, f0ff); /* All long branches.  */
4938     INSN(branch,    60ff, f0ff, CF_ISA_B);
4939     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
4940     INSN(branch,    60ff, ffff, BRAL);
4941     INSN(branch,    60ff, f0ff, BCCL);
4942 
4943     BASE(moveq,     7000, f100);
4944     INSN(mvzs,      7100, f100, CF_ISA_B);
4945     BASE(or,        8000, f000);
4946     BASE(divw,      80c0, f0c0);
4947     INSN(sbcd_reg,  8100, f1f8, M68000);
4948     INSN(sbcd_mem,  8108, f1f8, M68000);
4949     BASE(addsub,    9000, f000);
4950     INSN(undef,     90c0, f0c0, CF_ISA_A);
4951     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
4952     INSN(subx_reg,  9100, f138, M68000);
4953     INSN(subx_mem,  9108, f138, M68000);
4954     INSN(suba,      91c0, f1c0, CF_ISA_A);
4955     INSN(suba,      90c0, f0c0, M68000);
4956 
4957     BASE(undef_mac, a000, f000);
4958     INSN(mac,       a000, f100, CF_EMAC);
4959     INSN(from_mac,  a180, f9b0, CF_EMAC);
4960     INSN(move_mac,  a110, f9fc, CF_EMAC);
4961     INSN(from_macsr,a980, f9f0, CF_EMAC);
4962     INSN(from_mask, ad80, fff0, CF_EMAC);
4963     INSN(from_mext, ab80, fbf0, CF_EMAC);
4964     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
4965     INSN(to_mac,    a100, f9c0, CF_EMAC);
4966     INSN(to_macsr,  a900, ffc0, CF_EMAC);
4967     INSN(to_mext,   ab00, fbc0, CF_EMAC);
4968     INSN(to_mask,   ad00, ffc0, CF_EMAC);
4969 
4970     INSN(mov3q,     a140, f1c0, CF_ISA_B);
4971     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
4972     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
4973     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
4974     INSN(cmp,       b080, f1c0, CF_ISA_A);
4975     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
4976     INSN(cmp,       b000, f100, M68000);
4977     INSN(eor,       b100, f100, M68000);
4978     INSN(cmpm,      b108, f138, M68000);
4979     INSN(cmpa,      b0c0, f0c0, M68000);
4980     INSN(eor,       b180, f1c0, CF_ISA_A);
4981     BASE(and,       c000, f000);
4982     INSN(exg_dd,    c140, f1f8, M68000);
4983     INSN(exg_aa,    c148, f1f8, M68000);
4984     INSN(exg_da,    c188, f1f8, M68000);
4985     BASE(mulw,      c0c0, f0c0);
4986     INSN(abcd_reg,  c100, f1f8, M68000);
4987     INSN(abcd_mem,  c108, f1f8, M68000);
4988     BASE(addsub,    d000, f000);
4989     INSN(undef,     d0c0, f0c0, CF_ISA_A);
4990     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
4991     INSN(addx_reg,  d100, f138, M68000);
4992     INSN(addx_mem,  d108, f138, M68000);
4993     INSN(adda,      d1c0, f1c0, CF_ISA_A);
4994     INSN(adda,      d0c0, f0c0, M68000);
4995     INSN(shift_im,  e080, f0f0, CF_ISA_A);
4996     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
4997     INSN(shift8_im, e000, f0f0, M68000);
4998     INSN(shift16_im, e040, f0f0, M68000);
4999     INSN(shift_im,  e080, f0f0, M68000);
5000     INSN(shift8_reg, e020, f0f0, M68000);
5001     INSN(shift16_reg, e060, f0f0, M68000);
5002     INSN(shift_reg, e0a0, f0f0, M68000);
5003     INSN(shift_mem, e0c0, fcc0, M68000);
5004     INSN(rotate_im, e090, f0f0, M68000);
5005     INSN(rotate8_im, e010, f0f0, M68000);
5006     INSN(rotate16_im, e050, f0f0, M68000);
5007     INSN(rotate_reg, e0b0, f0f0, M68000);
5008     INSN(rotate8_reg, e030, f0f0, M68000);
5009     INSN(rotate16_reg, e070, f0f0, M68000);
5010     INSN(rotate_mem, e4c0, fcc0, M68000);
5011     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5012     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5013     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5014     INSN(bfins_reg, efc0, fff8, BITFIELD);
5015     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5016     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5017     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5018     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5019     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5020     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5021     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5022     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5023     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5024     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5025     INSN(undef_fpu, f000, f000, CF_ISA_A);
5026     INSN(fpu,       f200, ffc0, CF_FPU);
5027     INSN(fbcc,      f280, ffc0, CF_FPU);
5028     INSN(frestore,  f340, ffc0, CF_FPU);
5029     INSN(fsave,     f340, ffc0, CF_FPU);
5030     INSN(intouch,   f340, ffc0, CF_ISA_A);
5031     INSN(cpushl,    f428, ff38, CF_ISA_A);
5032     INSN(wddata,    fb00, ff00, CF_ISA_A);
5033     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5034 #undef INSN
5035 }
5036 
5037 /* ??? Some of this implementation is not exception safe.  We should always
5038    write back the result to memory before setting the condition codes.  */
5039 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5040 {
5041     uint16_t insn = read_im16(env, s);
5042     opcode_table[insn](env, s, insn);
5043     do_writebacks(s);
5044 }
5045 
5046 /* generate intermediate code for basic block 'tb'.  */
5047 void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
5048 {
5049     M68kCPU *cpu = m68k_env_get_cpu(env);
5050     CPUState *cs = CPU(cpu);
5051     DisasContext dc1, *dc = &dc1;
5052     target_ulong pc_start;
5053     int pc_offset;
5054     int num_insns;
5055     int max_insns;
5056 
5057     /* generate intermediate code */
5058     pc_start = tb->pc;
5059 
5060     dc->tb = tb;
5061 
5062     dc->env = env;
5063     dc->is_jmp = DISAS_NEXT;
5064     dc->pc = pc_start;
5065     dc->cc_op = CC_OP_DYNAMIC;
5066     dc->cc_op_synced = 1;
5067     dc->singlestep_enabled = cs->singlestep_enabled;
5068     dc->fpcr = env->fpcr;
5069     dc->user = (env->sr & SR_S) == 0;
5070     dc->done_mac = 0;
5071     dc->writeback_mask = 0;
5072     num_insns = 0;
5073     max_insns = tb->cflags & CF_COUNT_MASK;
5074     if (max_insns == 0) {
5075         max_insns = CF_COUNT_MASK;
5076     }
5077     if (max_insns > TCG_MAX_INSNS) {
5078         max_insns = TCG_MAX_INSNS;
5079     }
5080 
5081     gen_tb_start(tb);
5082     do {
5083         pc_offset = dc->pc - pc_start;
5084         gen_throws_exception = NULL;
5085         tcg_gen_insn_start(dc->pc, dc->cc_op);
5086         num_insns++;
5087 
5088         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5089             gen_exception(dc, dc->pc, EXCP_DEBUG);
5090             dc->is_jmp = DISAS_JUMP;
5091             /* The address covered by the breakpoint must be included in
5092                [tb->pc, tb->pc + tb->size) in order to for it to be
5093                properly cleared -- thus we increment the PC here so that
5094                the logic setting tb->size below does the right thing.  */
5095             dc->pc += 2;
5096             break;
5097         }
5098 
5099         if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5100             gen_io_start();
5101         }
5102 
5103         dc->insn_pc = dc->pc;
5104 	disas_m68k_insn(env, dc);
5105     } while (!dc->is_jmp && !tcg_op_buf_full() &&
5106              !cs->singlestep_enabled &&
5107              !singlestep &&
5108              (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5109              num_insns < max_insns);
5110 
5111     if (tb->cflags & CF_LAST_IO)
5112         gen_io_end();
5113     if (unlikely(cs->singlestep_enabled)) {
5114         /* Make sure the pc is updated, and raise a debug exception.  */
5115         if (!dc->is_jmp) {
5116             update_cc_op(dc);
5117             tcg_gen_movi_i32(QREG_PC, dc->pc);
5118         }
5119         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5120     } else {
5121         switch(dc->is_jmp) {
5122         case DISAS_NEXT:
5123             update_cc_op(dc);
5124             gen_jmp_tb(dc, 0, dc->pc);
5125             break;
5126         default:
5127         case DISAS_JUMP:
5128         case DISAS_UPDATE:
5129             update_cc_op(dc);
5130             /* indicate that the hash table must be used to find the next TB */
5131             tcg_gen_exit_tb(0);
5132             break;
5133         case DISAS_TB_JUMP:
5134             /* nothing more to generate */
5135             break;
5136         }
5137     }
5138     gen_tb_end(tb, num_insns);
5139 
5140 #ifdef DEBUG_DISAS
5141     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5142         && qemu_log_in_addr_range(pc_start)) {
5143         qemu_log_lock();
5144         qemu_log("----------------\n");
5145         qemu_log("IN: %s\n", lookup_symbol(pc_start));
5146         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
5147         qemu_log("\n");
5148         qemu_log_unlock();
5149     }
5150 #endif
5151     tb->size = dc->pc - pc_start;
5152     tb->icount = num_insns;
5153 }
5154 
5155 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
5156                          int flags)
5157 {
5158     M68kCPU *cpu = M68K_CPU(cs);
5159     CPUM68KState *env = &cpu->env;
5160     int i;
5161     uint16_t sr;
5162     CPU_DoubleU u;
5163     for (i = 0; i < 8; i++)
5164       {
5165         u.d = env->fregs[i];
5166         cpu_fprintf(f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
5167                     i, env->dregs[i], i, env->aregs[i],
5168                     i, u.l.upper, u.l.lower, *(double *)&u.d);
5169       }
5170     cpu_fprintf (f, "PC = %08x   ", env->pc);
5171     sr = env->sr | cpu_m68k_get_ccr(env);
5172     cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
5173                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
5174                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
5175     cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
5176 }
5177 
5178 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
5179                           target_ulong *data)
5180 {
5181     int cc_op = data[1];
5182     env->pc = data[0];
5183     if (cc_op != CC_OP_DYNAMIC) {
5184         env->cc_op = cc_op;
5185     }
5186 }
5187