xref: /openbmc/qemu/target/m68k/translate.c (revision 1c2adb95)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 
36 //#define DEBUG_DISPATCH 1
37 
38 #define DEFO32(name, offset) static TCGv QREG_##name;
39 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
40 #include "qregs.def"
41 #undef DEFO32
42 #undef DEFO64
43 
44 static TCGv_i32 cpu_halted;
45 static TCGv_i32 cpu_exception_index;
46 
47 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
48 static TCGv cpu_dregs[8];
49 static TCGv cpu_aregs[8];
50 static TCGv_i64 cpu_macc[4];
51 
52 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
53 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
54 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
55 #define MACREG(acc)     cpu_macc[acc]
56 #define QREG_SP         get_areg(s, 7)
57 
58 static TCGv NULL_QREG;
59 #define IS_NULL_QREG(t) (t == NULL_QREG)
60 /* Used to distinguish stores from bad addressing modes.  */
61 static TCGv store_dummy;
62 
63 #include "exec/gen-icount.h"
64 
65 void m68k_tcg_init(void)
66 {
67     char *p;
68     int i;
69 
70 #define DEFO32(name, offset) \
71     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
72         offsetof(CPUM68KState, offset), #name);
73 #define DEFO64(name, offset) \
74     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #include "qregs.def"
77 #undef DEFO32
78 #undef DEFO64
79 
80     cpu_halted = tcg_global_mem_new_i32(cpu_env,
81                                         -offsetof(M68kCPU, env) +
82                                         offsetof(CPUState, halted), "HALTED");
83     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
84                                                  -offsetof(M68kCPU, env) +
85                                                  offsetof(CPUState, exception_index),
86                                                  "EXCEPTION");
87 
88     p = cpu_reg_names;
89     for (i = 0; i < 8; i++) {
90         sprintf(p, "D%d", i);
91         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
92                                           offsetof(CPUM68KState, dregs[i]), p);
93         p += 3;
94         sprintf(p, "A%d", i);
95         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
96                                           offsetof(CPUM68KState, aregs[i]), p);
97         p += 3;
98     }
99     for (i = 0; i < 4; i++) {
100         sprintf(p, "ACC%d", i);
101         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
102                                          offsetof(CPUM68KState, macc[i]), p);
103         p += 5;
104     }
105 
106     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
107     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
108 }
109 
110 /* internal defines */
111 typedef struct DisasContext {
112     CPUM68KState *env;
113     target_ulong insn_pc; /* Start of the current instruction.  */
114     target_ulong pc;
115     int is_jmp;
116     CCOp cc_op; /* Current CC operation */
117     int cc_op_synced;
118     int user;
119     struct TranslationBlock *tb;
120     int singlestep_enabled;
121     TCGv_i64 mactmp;
122     int done_mac;
123     int writeback_mask;
124     TCGv writeback[8];
125 } DisasContext;
126 
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129     if (s->writeback_mask & (1 << regno)) {
130         return s->writeback[regno];
131     } else {
132         return cpu_aregs[regno];
133     }
134 }
135 
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137                            TCGv val, bool give_temp)
138 {
139     if (s->writeback_mask & (1 << regno)) {
140         if (give_temp) {
141             tcg_temp_free(s->writeback[regno]);
142             s->writeback[regno] = val;
143         } else {
144             tcg_gen_mov_i32(s->writeback[regno], val);
145         }
146     } else {
147         s->writeback_mask |= 1 << regno;
148         if (give_temp) {
149             s->writeback[regno] = val;
150         } else {
151             TCGv tmp = tcg_temp_new();
152             s->writeback[regno] = tmp;
153             tcg_gen_mov_i32(tmp, val);
154         }
155     }
156 }
157 
158 static void do_writebacks(DisasContext *s)
159 {
160     unsigned mask = s->writeback_mask;
161     if (mask) {
162         s->writeback_mask = 0;
163         do {
164             unsigned regno = ctz32(mask);
165             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
166             tcg_temp_free(s->writeback[regno]);
167             mask &= mask - 1;
168         } while (mask);
169     }
170 }
171 
172 /* is_jmp field values */
173 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
174 #define DISAS_UPDATE    DISAS_TARGET_1 /* cpu state was modified dynamically */
175 #define DISAS_TB_JUMP   DISAS_TARGET_2 /* only pc was modified statically */
176 #define DISAS_JUMP_NEXT DISAS_TARGET_3
177 
178 #if defined(CONFIG_USER_ONLY)
179 #define IS_USER(s) 1
180 #else
181 #define IS_USER(s) s->user
182 #endif
183 
184 /* XXX: move that elsewhere */
185 /* ??? Fix exceptions.  */
186 static void *gen_throws_exception;
187 #define gen_last_qop NULL
188 
189 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
190 
191 #ifdef DEBUG_DISPATCH
192 #define DISAS_INSN(name)                                                \
193     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
194                                   uint16_t insn);                       \
195     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
196                              uint16_t insn)                             \
197     {                                                                   \
198         qemu_log("Dispatch " #name "\n");                               \
199         real_disas_##name(env, s, insn);                                \
200     }                                                                   \
201     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
202                                   uint16_t insn)
203 #else
204 #define DISAS_INSN(name)                                                \
205     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
206                              uint16_t insn)
207 #endif
208 
209 static const uint8_t cc_op_live[CC_OP_NB] = {
210     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
211     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
212     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
213     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
214     [CC_OP_LOGIC] = CCF_X | CCF_N
215 };
216 
217 static void set_cc_op(DisasContext *s, CCOp op)
218 {
219     CCOp old_op = s->cc_op;
220     int dead;
221 
222     if (old_op == op) {
223         return;
224     }
225     s->cc_op = op;
226     s->cc_op_synced = 0;
227 
228     /* Discard CC computation that will no longer be used.
229        Note that X and N are never dead.  */
230     dead = cc_op_live[old_op] & ~cc_op_live[op];
231     if (dead & CCF_C) {
232         tcg_gen_discard_i32(QREG_CC_C);
233     }
234     if (dead & CCF_Z) {
235         tcg_gen_discard_i32(QREG_CC_Z);
236     }
237     if (dead & CCF_V) {
238         tcg_gen_discard_i32(QREG_CC_V);
239     }
240 }
241 
242 /* Update the CPU env CC_OP state.  */
243 static void update_cc_op(DisasContext *s)
244 {
245     if (!s->cc_op_synced) {
246         s->cc_op_synced = 1;
247         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
248     }
249 }
250 
251 /* Generate a jump to an immediate address.  */
252 static void gen_jmp_im(DisasContext *s, uint32_t dest)
253 {
254     update_cc_op(s);
255     tcg_gen_movi_i32(QREG_PC, dest);
256     s->is_jmp = DISAS_JUMP;
257 }
258 
259 /* Generate a jump to the address in qreg DEST.  */
260 static void gen_jmp(DisasContext *s, TCGv dest)
261 {
262     update_cc_op(s);
263     tcg_gen_mov_i32(QREG_PC, dest);
264     s->is_jmp = DISAS_JUMP;
265 }
266 
267 static void gen_raise_exception(int nr)
268 {
269     TCGv_i32 tmp = tcg_const_i32(nr);
270 
271     gen_helper_raise_exception(cpu_env, tmp);
272     tcg_temp_free_i32(tmp);
273 }
274 
275 static void gen_exception(DisasContext *s, uint32_t where, int nr)
276 {
277     update_cc_op(s);
278     gen_jmp_im(s, where);
279     gen_raise_exception(nr);
280 }
281 
282 static inline void gen_addr_fault(DisasContext *s)
283 {
284     gen_exception(s, s->insn_pc, EXCP_ADDRESS);
285 }
286 
287 /* Generate a load from the specified address.  Narrow values are
288    sign extended to full register width.  */
289 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
290 {
291     TCGv tmp;
292     int index = IS_USER(s);
293     tmp = tcg_temp_new_i32();
294     switch(opsize) {
295     case OS_BYTE:
296         if (sign)
297             tcg_gen_qemu_ld8s(tmp, addr, index);
298         else
299             tcg_gen_qemu_ld8u(tmp, addr, index);
300         break;
301     case OS_WORD:
302         if (sign)
303             tcg_gen_qemu_ld16s(tmp, addr, index);
304         else
305             tcg_gen_qemu_ld16u(tmp, addr, index);
306         break;
307     case OS_LONG:
308         tcg_gen_qemu_ld32u(tmp, addr, index);
309         break;
310     default:
311         g_assert_not_reached();
312     }
313     gen_throws_exception = gen_last_qop;
314     return tmp;
315 }
316 
317 /* Generate a store.  */
318 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
319 {
320     int index = IS_USER(s);
321     switch(opsize) {
322     case OS_BYTE:
323         tcg_gen_qemu_st8(val, addr, index);
324         break;
325     case OS_WORD:
326         tcg_gen_qemu_st16(val, addr, index);
327         break;
328     case OS_LONG:
329         tcg_gen_qemu_st32(val, addr, index);
330         break;
331     default:
332         g_assert_not_reached();
333     }
334     gen_throws_exception = gen_last_qop;
335 }
336 
337 typedef enum {
338     EA_STORE,
339     EA_LOADU,
340     EA_LOADS
341 } ea_what;
342 
343 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
344    otherwise generate a store.  */
345 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
346                      ea_what what)
347 {
348     if (what == EA_STORE) {
349         gen_store(s, opsize, addr, val);
350         return store_dummy;
351     } else {
352         return gen_load(s, opsize, addr, what == EA_LOADS);
353     }
354 }
355 
356 /* Read a 16-bit immediate constant */
357 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
358 {
359     uint16_t im;
360     im = cpu_lduw_code(env, s->pc);
361     s->pc += 2;
362     return im;
363 }
364 
365 /* Read an 8-bit immediate constant */
366 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
367 {
368     return read_im16(env, s);
369 }
370 
371 /* Read a 32-bit immediate constant.  */
372 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
373 {
374     uint32_t im;
375     im = read_im16(env, s) << 16;
376     im |= 0xffff & read_im16(env, s);
377     return im;
378 }
379 
380 /* Read a 64-bit immediate constant.  */
381 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
382 {
383     uint64_t im;
384     im = (uint64_t)read_im32(env, s) << 32;
385     im |= (uint64_t)read_im32(env, s);
386     return im;
387 }
388 
389 /* Calculate and address index.  */
390 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
391 {
392     TCGv add;
393     int scale;
394 
395     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
396     if ((ext & 0x800) == 0) {
397         tcg_gen_ext16s_i32(tmp, add);
398         add = tmp;
399     }
400     scale = (ext >> 9) & 3;
401     if (scale != 0) {
402         tcg_gen_shli_i32(tmp, add, scale);
403         add = tmp;
404     }
405     return add;
406 }
407 
408 /* Handle a base + index + displacement effective addresss.
409    A NULL_QREG base means pc-relative.  */
410 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
411 {
412     uint32_t offset;
413     uint16_t ext;
414     TCGv add;
415     TCGv tmp;
416     uint32_t bd, od;
417 
418     offset = s->pc;
419     ext = read_im16(env, s);
420 
421     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
422         return NULL_QREG;
423 
424     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
425         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
426         ext &= ~(3 << 9);
427     }
428 
429     if (ext & 0x100) {
430         /* full extension word format */
431         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
432             return NULL_QREG;
433 
434         if ((ext & 0x30) > 0x10) {
435             /* base displacement */
436             if ((ext & 0x30) == 0x20) {
437                 bd = (int16_t)read_im16(env, s);
438             } else {
439                 bd = read_im32(env, s);
440             }
441         } else {
442             bd = 0;
443         }
444         tmp = tcg_temp_new();
445         if ((ext & 0x44) == 0) {
446             /* pre-index */
447             add = gen_addr_index(s, ext, tmp);
448         } else {
449             add = NULL_QREG;
450         }
451         if ((ext & 0x80) == 0) {
452             /* base not suppressed */
453             if (IS_NULL_QREG(base)) {
454                 base = tcg_const_i32(offset + bd);
455                 bd = 0;
456             }
457             if (!IS_NULL_QREG(add)) {
458                 tcg_gen_add_i32(tmp, add, base);
459                 add = tmp;
460             } else {
461                 add = base;
462             }
463         }
464         if (!IS_NULL_QREG(add)) {
465             if (bd != 0) {
466                 tcg_gen_addi_i32(tmp, add, bd);
467                 add = tmp;
468             }
469         } else {
470             add = tcg_const_i32(bd);
471         }
472         if ((ext & 3) != 0) {
473             /* memory indirect */
474             base = gen_load(s, OS_LONG, add, 0);
475             if ((ext & 0x44) == 4) {
476                 add = gen_addr_index(s, ext, tmp);
477                 tcg_gen_add_i32(tmp, add, base);
478                 add = tmp;
479             } else {
480                 add = base;
481             }
482             if ((ext & 3) > 1) {
483                 /* outer displacement */
484                 if ((ext & 3) == 2) {
485                     od = (int16_t)read_im16(env, s);
486                 } else {
487                     od = read_im32(env, s);
488                 }
489             } else {
490                 od = 0;
491             }
492             if (od != 0) {
493                 tcg_gen_addi_i32(tmp, add, od);
494                 add = tmp;
495             }
496         }
497     } else {
498         /* brief extension word format */
499         tmp = tcg_temp_new();
500         add = gen_addr_index(s, ext, tmp);
501         if (!IS_NULL_QREG(base)) {
502             tcg_gen_add_i32(tmp, add, base);
503             if ((int8_t)ext)
504                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
505         } else {
506             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
507         }
508         add = tmp;
509     }
510     return add;
511 }
512 
513 /* Sign or zero extend a value.  */
514 
515 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
516 {
517     switch (opsize) {
518     case OS_BYTE:
519         if (sign) {
520             tcg_gen_ext8s_i32(res, val);
521         } else {
522             tcg_gen_ext8u_i32(res, val);
523         }
524         break;
525     case OS_WORD:
526         if (sign) {
527             tcg_gen_ext16s_i32(res, val);
528         } else {
529             tcg_gen_ext16u_i32(res, val);
530         }
531         break;
532     case OS_LONG:
533         tcg_gen_mov_i32(res, val);
534         break;
535     default:
536         g_assert_not_reached();
537     }
538 }
539 
540 /* Evaluate all the CC flags.  */
541 
542 static void gen_flush_flags(DisasContext *s)
543 {
544     TCGv t0, t1;
545 
546     switch (s->cc_op) {
547     case CC_OP_FLAGS:
548         return;
549 
550     case CC_OP_ADDB:
551     case CC_OP_ADDW:
552     case CC_OP_ADDL:
553         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
554         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
555         /* Compute signed overflow for addition.  */
556         t0 = tcg_temp_new();
557         t1 = tcg_temp_new();
558         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
559         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
560         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
561         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
562         tcg_temp_free(t0);
563         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
564         tcg_temp_free(t1);
565         break;
566 
567     case CC_OP_SUBB:
568     case CC_OP_SUBW:
569     case CC_OP_SUBL:
570         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
571         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
572         /* Compute signed overflow for subtraction.  */
573         t0 = tcg_temp_new();
574         t1 = tcg_temp_new();
575         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
576         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
577         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
578         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
579         tcg_temp_free(t0);
580         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
581         tcg_temp_free(t1);
582         break;
583 
584     case CC_OP_CMPB:
585     case CC_OP_CMPW:
586     case CC_OP_CMPL:
587         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
588         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
589         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
590         /* Compute signed overflow for subtraction.  */
591         t0 = tcg_temp_new();
592         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
593         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
594         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
595         tcg_temp_free(t0);
596         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
597         break;
598 
599     case CC_OP_LOGIC:
600         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
601         tcg_gen_movi_i32(QREG_CC_C, 0);
602         tcg_gen_movi_i32(QREG_CC_V, 0);
603         break;
604 
605     case CC_OP_DYNAMIC:
606         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
607         s->cc_op_synced = 1;
608         break;
609 
610     default:
611         t0 = tcg_const_i32(s->cc_op);
612         gen_helper_flush_flags(cpu_env, t0);
613         tcg_temp_free(t0);
614         s->cc_op_synced = 1;
615         break;
616     }
617 
618     /* Note that flush_flags also assigned to env->cc_op.  */
619     s->cc_op = CC_OP_FLAGS;
620 }
621 
622 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
623 {
624     TCGv tmp;
625 
626     if (opsize == OS_LONG) {
627         tmp = val;
628     } else {
629         tmp = tcg_temp_new();
630         gen_ext(tmp, val, opsize, sign);
631     }
632 
633     return tmp;
634 }
635 
636 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
637 {
638     gen_ext(QREG_CC_N, val, opsize, 1);
639     set_cc_op(s, CC_OP_LOGIC);
640 }
641 
642 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
643 {
644     tcg_gen_mov_i32(QREG_CC_N, dest);
645     tcg_gen_mov_i32(QREG_CC_V, src);
646     set_cc_op(s, CC_OP_CMPB + opsize);
647 }
648 
649 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
650 {
651     gen_ext(QREG_CC_N, dest, opsize, 1);
652     tcg_gen_mov_i32(QREG_CC_V, src);
653 }
654 
655 static inline int opsize_bytes(int opsize)
656 {
657     switch (opsize) {
658     case OS_BYTE: return 1;
659     case OS_WORD: return 2;
660     case OS_LONG: return 4;
661     case OS_SINGLE: return 4;
662     case OS_DOUBLE: return 8;
663     case OS_EXTENDED: return 12;
664     case OS_PACKED: return 12;
665     default:
666         g_assert_not_reached();
667     }
668 }
669 
670 static inline int insn_opsize(int insn)
671 {
672     switch ((insn >> 6) & 3) {
673     case 0: return OS_BYTE;
674     case 1: return OS_WORD;
675     case 2: return OS_LONG;
676     default:
677         g_assert_not_reached();
678     }
679 }
680 
681 static inline int ext_opsize(int ext, int pos)
682 {
683     switch ((ext >> pos) & 7) {
684     case 0: return OS_LONG;
685     case 1: return OS_SINGLE;
686     case 2: return OS_EXTENDED;
687     case 3: return OS_PACKED;
688     case 4: return OS_WORD;
689     case 5: return OS_DOUBLE;
690     case 6: return OS_BYTE;
691     default:
692         g_assert_not_reached();
693     }
694 }
695 
696 /* Assign value to a register.  If the width is less than the register width
697    only the low part of the register is set.  */
698 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
699 {
700     TCGv tmp;
701     switch (opsize) {
702     case OS_BYTE:
703         tcg_gen_andi_i32(reg, reg, 0xffffff00);
704         tmp = tcg_temp_new();
705         tcg_gen_ext8u_i32(tmp, val);
706         tcg_gen_or_i32(reg, reg, tmp);
707         tcg_temp_free(tmp);
708         break;
709     case OS_WORD:
710         tcg_gen_andi_i32(reg, reg, 0xffff0000);
711         tmp = tcg_temp_new();
712         tcg_gen_ext16u_i32(tmp, val);
713         tcg_gen_or_i32(reg, reg, tmp);
714         tcg_temp_free(tmp);
715         break;
716     case OS_LONG:
717     case OS_SINGLE:
718         tcg_gen_mov_i32(reg, val);
719         break;
720     default:
721         g_assert_not_reached();
722     }
723 }
724 
725 /* Generate code for an "effective address".  Does not adjust the base
726    register for autoincrement addressing modes.  */
727 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
728                          int mode, int reg0, int opsize)
729 {
730     TCGv reg;
731     TCGv tmp;
732     uint16_t ext;
733     uint32_t offset;
734 
735     switch (mode) {
736     case 0: /* Data register direct.  */
737     case 1: /* Address register direct.  */
738         return NULL_QREG;
739     case 3: /* Indirect postincrement.  */
740         if (opsize == OS_UNSIZED) {
741             return NULL_QREG;
742         }
743         /* fallthru */
744     case 2: /* Indirect register */
745         return get_areg(s, reg0);
746     case 4: /* Indirect predecrememnt.  */
747         if (opsize == OS_UNSIZED) {
748             return NULL_QREG;
749         }
750         reg = get_areg(s, reg0);
751         tmp = tcg_temp_new();
752         if (reg0 == 7 && opsize == OS_BYTE &&
753             m68k_feature(s->env, M68K_FEATURE_M68000)) {
754             tcg_gen_subi_i32(tmp, reg, 2);
755         } else {
756             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
757         }
758         return tmp;
759     case 5: /* Indirect displacement.  */
760         reg = get_areg(s, reg0);
761         tmp = tcg_temp_new();
762         ext = read_im16(env, s);
763         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
764         return tmp;
765     case 6: /* Indirect index + displacement.  */
766         reg = get_areg(s, reg0);
767         return gen_lea_indexed(env, s, reg);
768     case 7: /* Other */
769         switch (reg0) {
770         case 0: /* Absolute short.  */
771             offset = (int16_t)read_im16(env, s);
772             return tcg_const_i32(offset);
773         case 1: /* Absolute long.  */
774             offset = read_im32(env, s);
775             return tcg_const_i32(offset);
776         case 2: /* pc displacement  */
777             offset = s->pc;
778             offset += (int16_t)read_im16(env, s);
779             return tcg_const_i32(offset);
780         case 3: /* pc index+displacement.  */
781             return gen_lea_indexed(env, s, NULL_QREG);
782         case 4: /* Immediate.  */
783         default:
784             return NULL_QREG;
785         }
786     }
787     /* Should never happen.  */
788     return NULL_QREG;
789 }
790 
791 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
792                     int opsize)
793 {
794     int mode = extract32(insn, 3, 3);
795     int reg0 = REG(insn, 0);
796     return gen_lea_mode(env, s, mode, reg0, opsize);
797 }
798 
799 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
800    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
801    ADDRP is non-null for readwrite operands.  */
802 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
803                         int opsize, TCGv val, TCGv *addrp, ea_what what)
804 {
805     TCGv reg, tmp, result;
806     int32_t offset;
807 
808     switch (mode) {
809     case 0: /* Data register direct.  */
810         reg = cpu_dregs[reg0];
811         if (what == EA_STORE) {
812             gen_partset_reg(opsize, reg, val);
813             return store_dummy;
814         } else {
815             return gen_extend(reg, opsize, what == EA_LOADS);
816         }
817     case 1: /* Address register direct.  */
818         reg = get_areg(s, reg0);
819         if (what == EA_STORE) {
820             tcg_gen_mov_i32(reg, val);
821             return store_dummy;
822         } else {
823             return gen_extend(reg, opsize, what == EA_LOADS);
824         }
825     case 2: /* Indirect register */
826         reg = get_areg(s, reg0);
827         return gen_ldst(s, opsize, reg, val, what);
828     case 3: /* Indirect postincrement.  */
829         reg = get_areg(s, reg0);
830         result = gen_ldst(s, opsize, reg, val, what);
831         if (what == EA_STORE || !addrp) {
832             TCGv tmp = tcg_temp_new();
833             if (reg0 == 7 && opsize == OS_BYTE &&
834                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
835                 tcg_gen_addi_i32(tmp, reg, 2);
836             } else {
837                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
838             }
839             delay_set_areg(s, reg0, tmp, true);
840         }
841         return result;
842     case 4: /* Indirect predecrememnt.  */
843         if (addrp && what == EA_STORE) {
844             tmp = *addrp;
845         } else {
846             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
847             if (IS_NULL_QREG(tmp)) {
848                 return tmp;
849             }
850             if (addrp) {
851                 *addrp = tmp;
852             }
853         }
854         result = gen_ldst(s, opsize, tmp, val, what);
855         if (what == EA_STORE || !addrp) {
856             delay_set_areg(s, reg0, tmp, false);
857         }
858         return result;
859     case 5: /* Indirect displacement.  */
860     case 6: /* Indirect index + displacement.  */
861     do_indirect:
862         if (addrp && what == EA_STORE) {
863             tmp = *addrp;
864         } else {
865             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
866             if (IS_NULL_QREG(tmp)) {
867                 return tmp;
868             }
869             if (addrp) {
870                 *addrp = tmp;
871             }
872         }
873         return gen_ldst(s, opsize, tmp, val, what);
874     case 7: /* Other */
875         switch (reg0) {
876         case 0: /* Absolute short.  */
877         case 1: /* Absolute long.  */
878         case 2: /* pc displacement  */
879         case 3: /* pc index+displacement.  */
880             goto do_indirect;
881         case 4: /* Immediate.  */
882             /* Sign extend values for consistency.  */
883             switch (opsize) {
884             case OS_BYTE:
885                 if (what == EA_LOADS) {
886                     offset = (int8_t)read_im8(env, s);
887                 } else {
888                     offset = read_im8(env, s);
889                 }
890                 break;
891             case OS_WORD:
892                 if (what == EA_LOADS) {
893                     offset = (int16_t)read_im16(env, s);
894                 } else {
895                     offset = read_im16(env, s);
896                 }
897                 break;
898             case OS_LONG:
899                 offset = read_im32(env, s);
900                 break;
901             default:
902                 g_assert_not_reached();
903             }
904             return tcg_const_i32(offset);
905         default:
906             return NULL_QREG;
907         }
908     }
909     /* Should never happen.  */
910     return NULL_QREG;
911 }
912 
913 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
914                    int opsize, TCGv val, TCGv *addrp, ea_what what)
915 {
916     int mode = extract32(insn, 3, 3);
917     int reg0 = REG(insn, 0);
918     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
919 }
920 
921 static TCGv_ptr gen_fp_ptr(int freg)
922 {
923     TCGv_ptr fp = tcg_temp_new_ptr();
924     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
925     return fp;
926 }
927 
928 static TCGv_ptr gen_fp_result_ptr(void)
929 {
930     TCGv_ptr fp = tcg_temp_new_ptr();
931     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
932     return fp;
933 }
934 
935 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
936 {
937     TCGv t32;
938     TCGv_i64 t64;
939 
940     t32 = tcg_temp_new();
941     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
942     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
943     tcg_temp_free(t32);
944 
945     t64 = tcg_temp_new_i64();
946     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
947     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
948     tcg_temp_free_i64(t64);
949 }
950 
951 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
952 {
953     TCGv tmp;
954     TCGv_i64 t64;
955     int index = IS_USER(s);
956 
957     t64 = tcg_temp_new_i64();
958     tmp = tcg_temp_new();
959     switch (opsize) {
960     case OS_BYTE:
961         tcg_gen_qemu_ld8s(tmp, addr, index);
962         gen_helper_exts32(cpu_env, fp, tmp);
963         break;
964     case OS_WORD:
965         tcg_gen_qemu_ld16s(tmp, addr, index);
966         gen_helper_exts32(cpu_env, fp, tmp);
967         break;
968     case OS_LONG:
969         tcg_gen_qemu_ld32u(tmp, addr, index);
970         gen_helper_exts32(cpu_env, fp, tmp);
971         break;
972     case OS_SINGLE:
973         tcg_gen_qemu_ld32u(tmp, addr, index);
974         gen_helper_extf32(cpu_env, fp, tmp);
975         break;
976     case OS_DOUBLE:
977         tcg_gen_qemu_ld64(t64, addr, index);
978         gen_helper_extf64(cpu_env, fp, t64);
979         tcg_temp_free_i64(t64);
980         break;
981     case OS_EXTENDED:
982         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
983             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
984             break;
985         }
986         tcg_gen_qemu_ld32u(tmp, addr, index);
987         tcg_gen_shri_i32(tmp, tmp, 16);
988         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
989         tcg_gen_addi_i32(tmp, addr, 4);
990         tcg_gen_qemu_ld64(t64, tmp, index);
991         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
992         break;
993     case OS_PACKED:
994         /* unimplemented data type on 68040/ColdFire
995          * FIXME if needed for another FPU
996          */
997         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
998         break;
999     default:
1000         g_assert_not_reached();
1001     }
1002     tcg_temp_free(tmp);
1003     tcg_temp_free_i64(t64);
1004     gen_throws_exception = gen_last_qop;
1005 }
1006 
1007 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
1008 {
1009     TCGv tmp;
1010     TCGv_i64 t64;
1011     int index = IS_USER(s);
1012 
1013     t64 = tcg_temp_new_i64();
1014     tmp = tcg_temp_new();
1015     switch (opsize) {
1016     case OS_BYTE:
1017         gen_helper_reds32(tmp, cpu_env, fp);
1018         tcg_gen_qemu_st8(tmp, addr, index);
1019         break;
1020     case OS_WORD:
1021         gen_helper_reds32(tmp, cpu_env, fp);
1022         tcg_gen_qemu_st16(tmp, addr, index);
1023         break;
1024     case OS_LONG:
1025         gen_helper_reds32(tmp, cpu_env, fp);
1026         tcg_gen_qemu_st32(tmp, addr, index);
1027         break;
1028     case OS_SINGLE:
1029         gen_helper_redf32(tmp, cpu_env, fp);
1030         tcg_gen_qemu_st32(tmp, addr, index);
1031         break;
1032     case OS_DOUBLE:
1033         gen_helper_redf64(t64, cpu_env, fp);
1034         tcg_gen_qemu_st64(t64, addr, index);
1035         break;
1036     case OS_EXTENDED:
1037         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1038             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1039             break;
1040         }
1041         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1042         tcg_gen_shli_i32(tmp, tmp, 16);
1043         tcg_gen_qemu_st32(tmp, addr, index);
1044         tcg_gen_addi_i32(tmp, addr, 4);
1045         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1046         tcg_gen_qemu_st64(t64, tmp, index);
1047         break;
1048     case OS_PACKED:
1049         /* unimplemented data type on 68040/ColdFire
1050          * FIXME if needed for another FPU
1051          */
1052         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1053         break;
1054     default:
1055         g_assert_not_reached();
1056     }
1057     tcg_temp_free(tmp);
1058     tcg_temp_free_i64(t64);
1059     gen_throws_exception = gen_last_qop;
1060 }
1061 
1062 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1063                         TCGv_ptr fp, ea_what what)
1064 {
1065     if (what == EA_STORE) {
1066         gen_store_fp(s, opsize, addr, fp);
1067     } else {
1068         gen_load_fp(s, opsize, addr, fp);
1069     }
1070 }
1071 
1072 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1073                           int reg0, int opsize, TCGv_ptr fp, ea_what what)
1074 {
1075     TCGv reg, addr, tmp;
1076     TCGv_i64 t64;
1077 
1078     switch (mode) {
1079     case 0: /* Data register direct.  */
1080         reg = cpu_dregs[reg0];
1081         if (what == EA_STORE) {
1082             switch (opsize) {
1083             case OS_BYTE:
1084             case OS_WORD:
1085             case OS_LONG:
1086                 gen_helper_reds32(reg, cpu_env, fp);
1087                 break;
1088             case OS_SINGLE:
1089                 gen_helper_redf32(reg, cpu_env, fp);
1090                 break;
1091             default:
1092                 g_assert_not_reached();
1093             }
1094         } else {
1095             tmp = tcg_temp_new();
1096             switch (opsize) {
1097             case OS_BYTE:
1098                 tcg_gen_ext8s_i32(tmp, reg);
1099                 gen_helper_exts32(cpu_env, fp, tmp);
1100                 break;
1101             case OS_WORD:
1102                 tcg_gen_ext16s_i32(tmp, reg);
1103                 gen_helper_exts32(cpu_env, fp, tmp);
1104                 break;
1105             case OS_LONG:
1106                 gen_helper_exts32(cpu_env, fp, reg);
1107                 break;
1108             case OS_SINGLE:
1109                 gen_helper_extf32(cpu_env, fp, reg);
1110                 break;
1111             default:
1112                 g_assert_not_reached();
1113             }
1114             tcg_temp_free(tmp);
1115         }
1116         return 0;
1117     case 1: /* Address register direct.  */
1118         return -1;
1119     case 2: /* Indirect register */
1120         addr = get_areg(s, reg0);
1121         gen_ldst_fp(s, opsize, addr, fp, what);
1122         return 0;
1123     case 3: /* Indirect postincrement.  */
1124         addr = cpu_aregs[reg0];
1125         gen_ldst_fp(s, opsize, addr, fp, what);
1126         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1127         return 0;
1128     case 4: /* Indirect predecrememnt.  */
1129         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1130         if (IS_NULL_QREG(addr)) {
1131             return -1;
1132         }
1133         gen_ldst_fp(s, opsize, addr, fp, what);
1134         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1135         return 0;
1136     case 5: /* Indirect displacement.  */
1137     case 6: /* Indirect index + displacement.  */
1138     do_indirect:
1139         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1140         if (IS_NULL_QREG(addr)) {
1141             return -1;
1142         }
1143         gen_ldst_fp(s, opsize, addr, fp, what);
1144         return 0;
1145     case 7: /* Other */
1146         switch (reg0) {
1147         case 0: /* Absolute short.  */
1148         case 1: /* Absolute long.  */
1149         case 2: /* pc displacement  */
1150         case 3: /* pc index+displacement.  */
1151             goto do_indirect;
1152         case 4: /* Immediate.  */
1153             if (what == EA_STORE) {
1154                 return -1;
1155             }
1156             switch (opsize) {
1157             case OS_BYTE:
1158                 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1159                 gen_helper_exts32(cpu_env, fp, tmp);
1160                 tcg_temp_free(tmp);
1161                 break;
1162             case OS_WORD:
1163                 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1164                 gen_helper_exts32(cpu_env, fp, tmp);
1165                 tcg_temp_free(tmp);
1166                 break;
1167             case OS_LONG:
1168                 tmp = tcg_const_i32(read_im32(env, s));
1169                 gen_helper_exts32(cpu_env, fp, tmp);
1170                 tcg_temp_free(tmp);
1171                 break;
1172             case OS_SINGLE:
1173                 tmp = tcg_const_i32(read_im32(env, s));
1174                 gen_helper_extf32(cpu_env, fp, tmp);
1175                 tcg_temp_free(tmp);
1176                 break;
1177             case OS_DOUBLE:
1178                 t64 = tcg_const_i64(read_im64(env, s));
1179                 gen_helper_extf64(cpu_env, fp, t64);
1180                 tcg_temp_free_i64(t64);
1181                 break;
1182             case OS_EXTENDED:
1183                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1184                     gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1185                     break;
1186                 }
1187                 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1188                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1189                 tcg_temp_free(tmp);
1190                 t64 = tcg_const_i64(read_im64(env, s));
1191                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1192                 tcg_temp_free_i64(t64);
1193                 break;
1194             case OS_PACKED:
1195                 /* unimplemented data type on 68040/ColdFire
1196                  * FIXME if needed for another FPU
1197                  */
1198                 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1199                 break;
1200             default:
1201                 g_assert_not_reached();
1202             }
1203             return 0;
1204         default:
1205             return -1;
1206         }
1207     }
1208     return -1;
1209 }
1210 
1211 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1212                        int opsize, TCGv_ptr fp, ea_what what)
1213 {
1214     int mode = extract32(insn, 3, 3);
1215     int reg0 = REG(insn, 0);
1216     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what);
1217 }
1218 
1219 typedef struct {
1220     TCGCond tcond;
1221     bool g1;
1222     bool g2;
1223     TCGv v1;
1224     TCGv v2;
1225 } DisasCompare;
1226 
1227 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1228 {
1229     TCGv tmp, tmp2;
1230     TCGCond tcond;
1231     CCOp op = s->cc_op;
1232 
1233     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1234     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1235         c->g1 = c->g2 = 1;
1236         c->v1 = QREG_CC_N;
1237         c->v2 = QREG_CC_V;
1238         switch (cond) {
1239         case 2: /* HI */
1240         case 3: /* LS */
1241             tcond = TCG_COND_LEU;
1242             goto done;
1243         case 4: /* CC */
1244         case 5: /* CS */
1245             tcond = TCG_COND_LTU;
1246             goto done;
1247         case 6: /* NE */
1248         case 7: /* EQ */
1249             tcond = TCG_COND_EQ;
1250             goto done;
1251         case 10: /* PL */
1252         case 11: /* MI */
1253             c->g1 = c->g2 = 0;
1254             c->v2 = tcg_const_i32(0);
1255             c->v1 = tmp = tcg_temp_new();
1256             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1257             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1258             /* fallthru */
1259         case 12: /* GE */
1260         case 13: /* LT */
1261             tcond = TCG_COND_LT;
1262             goto done;
1263         case 14: /* GT */
1264         case 15: /* LE */
1265             tcond = TCG_COND_LE;
1266             goto done;
1267         }
1268     }
1269 
1270     c->g1 = 1;
1271     c->g2 = 0;
1272     c->v2 = tcg_const_i32(0);
1273 
1274     switch (cond) {
1275     case 0: /* T */
1276     case 1: /* F */
1277         c->v1 = c->v2;
1278         tcond = TCG_COND_NEVER;
1279         goto done;
1280     case 14: /* GT (!(Z || (N ^ V))) */
1281     case 15: /* LE (Z || (N ^ V)) */
1282         /* Logic operations clear V, which simplifies LE to (Z || N),
1283            and since Z and N are co-located, this becomes a normal
1284            comparison vs N.  */
1285         if (op == CC_OP_LOGIC) {
1286             c->v1 = QREG_CC_N;
1287             tcond = TCG_COND_LE;
1288             goto done;
1289         }
1290         break;
1291     case 12: /* GE (!(N ^ V)) */
1292     case 13: /* LT (N ^ V) */
1293         /* Logic operations clear V, which simplifies this to N.  */
1294         if (op != CC_OP_LOGIC) {
1295             break;
1296         }
1297         /* fallthru */
1298     case 10: /* PL (!N) */
1299     case 11: /* MI (N) */
1300         /* Several cases represent N normally.  */
1301         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1302             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1303             op == CC_OP_LOGIC) {
1304             c->v1 = QREG_CC_N;
1305             tcond = TCG_COND_LT;
1306             goto done;
1307         }
1308         break;
1309     case 6: /* NE (!Z) */
1310     case 7: /* EQ (Z) */
1311         /* Some cases fold Z into N.  */
1312         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1313             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1314             op == CC_OP_LOGIC) {
1315             tcond = TCG_COND_EQ;
1316             c->v1 = QREG_CC_N;
1317             goto done;
1318         }
1319         break;
1320     case 4: /* CC (!C) */
1321     case 5: /* CS (C) */
1322         /* Some cases fold C into X.  */
1323         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1324             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1325             tcond = TCG_COND_NE;
1326             c->v1 = QREG_CC_X;
1327             goto done;
1328         }
1329         /* fallthru */
1330     case 8: /* VC (!V) */
1331     case 9: /* VS (V) */
1332         /* Logic operations clear V and C.  */
1333         if (op == CC_OP_LOGIC) {
1334             tcond = TCG_COND_NEVER;
1335             c->v1 = c->v2;
1336             goto done;
1337         }
1338         break;
1339     }
1340 
1341     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1342     gen_flush_flags(s);
1343 
1344     switch (cond) {
1345     case 0: /* T */
1346     case 1: /* F */
1347     default:
1348         /* Invalid, or handled above.  */
1349         abort();
1350     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1351     case 3: /* LS (C || Z) */
1352         c->v1 = tmp = tcg_temp_new();
1353         c->g1 = 0;
1354         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1355         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1356         tcond = TCG_COND_NE;
1357         break;
1358     case 4: /* CC (!C) */
1359     case 5: /* CS (C) */
1360         c->v1 = QREG_CC_C;
1361         tcond = TCG_COND_NE;
1362         break;
1363     case 6: /* NE (!Z) */
1364     case 7: /* EQ (Z) */
1365         c->v1 = QREG_CC_Z;
1366         tcond = TCG_COND_EQ;
1367         break;
1368     case 8: /* VC (!V) */
1369     case 9: /* VS (V) */
1370         c->v1 = QREG_CC_V;
1371         tcond = TCG_COND_LT;
1372         break;
1373     case 10: /* PL (!N) */
1374     case 11: /* MI (N) */
1375         c->v1 = QREG_CC_N;
1376         tcond = TCG_COND_LT;
1377         break;
1378     case 12: /* GE (!(N ^ V)) */
1379     case 13: /* LT (N ^ V) */
1380         c->v1 = tmp = tcg_temp_new();
1381         c->g1 = 0;
1382         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1383         tcond = TCG_COND_LT;
1384         break;
1385     case 14: /* GT (!(Z || (N ^ V))) */
1386     case 15: /* LE (Z || (N ^ V)) */
1387         c->v1 = tmp = tcg_temp_new();
1388         c->g1 = 0;
1389         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1390         tcg_gen_neg_i32(tmp, tmp);
1391         tmp2 = tcg_temp_new();
1392         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1393         tcg_gen_or_i32(tmp, tmp, tmp2);
1394         tcg_temp_free(tmp2);
1395         tcond = TCG_COND_LT;
1396         break;
1397     }
1398 
1399  done:
1400     if ((cond & 1) == 0) {
1401         tcond = tcg_invert_cond(tcond);
1402     }
1403     c->tcond = tcond;
1404 }
1405 
1406 static void free_cond(DisasCompare *c)
1407 {
1408     if (!c->g1) {
1409         tcg_temp_free(c->v1);
1410     }
1411     if (!c->g2) {
1412         tcg_temp_free(c->v2);
1413     }
1414 }
1415 
1416 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1417 {
1418   DisasCompare c;
1419 
1420   gen_cc_cond(&c, s, cond);
1421   update_cc_op(s);
1422   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1423   free_cond(&c);
1424 }
1425 
1426 /* Force a TB lookup after an instruction that changes the CPU state.  */
1427 static void gen_lookup_tb(DisasContext *s)
1428 {
1429     update_cc_op(s);
1430     tcg_gen_movi_i32(QREG_PC, s->pc);
1431     s->is_jmp = DISAS_UPDATE;
1432 }
1433 
1434 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1435         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1436                         op_sign ? EA_LOADS : EA_LOADU);                 \
1437         if (IS_NULL_QREG(result)) {                                     \
1438             gen_addr_fault(s);                                          \
1439             return;                                                     \
1440         }                                                               \
1441     } while (0)
1442 
1443 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1444         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1445         if (IS_NULL_QREG(ea_result)) {                                  \
1446             gen_addr_fault(s);                                          \
1447             return;                                                     \
1448         }                                                               \
1449     } while (0)
1450 
1451 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1452 {
1453 #ifndef CONFIG_USER_ONLY
1454     return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1455            (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1456 #else
1457     return true;
1458 #endif
1459 }
1460 
1461 /* Generate a jump to an immediate address.  */
1462 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1463 {
1464     if (unlikely(s->singlestep_enabled)) {
1465         gen_exception(s, dest, EXCP_DEBUG);
1466     } else if (use_goto_tb(s, dest)) {
1467         tcg_gen_goto_tb(n);
1468         tcg_gen_movi_i32(QREG_PC, dest);
1469         tcg_gen_exit_tb((uintptr_t)s->tb + n);
1470     } else {
1471         gen_jmp_im(s, dest);
1472         tcg_gen_exit_tb(0);
1473     }
1474     s->is_jmp = DISAS_TB_JUMP;
1475 }
1476 
1477 DISAS_INSN(scc)
1478 {
1479     DisasCompare c;
1480     int cond;
1481     TCGv tmp;
1482 
1483     cond = (insn >> 8) & 0xf;
1484     gen_cc_cond(&c, s, cond);
1485 
1486     tmp = tcg_temp_new();
1487     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1488     free_cond(&c);
1489 
1490     tcg_gen_neg_i32(tmp, tmp);
1491     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1492     tcg_temp_free(tmp);
1493 }
1494 
1495 DISAS_INSN(dbcc)
1496 {
1497     TCGLabel *l1;
1498     TCGv reg;
1499     TCGv tmp;
1500     int16_t offset;
1501     uint32_t base;
1502 
1503     reg = DREG(insn, 0);
1504     base = s->pc;
1505     offset = (int16_t)read_im16(env, s);
1506     l1 = gen_new_label();
1507     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1508 
1509     tmp = tcg_temp_new();
1510     tcg_gen_ext16s_i32(tmp, reg);
1511     tcg_gen_addi_i32(tmp, tmp, -1);
1512     gen_partset_reg(OS_WORD, reg, tmp);
1513     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1514     gen_jmp_tb(s, 1, base + offset);
1515     gen_set_label(l1);
1516     gen_jmp_tb(s, 0, s->pc);
1517 }
1518 
1519 DISAS_INSN(undef_mac)
1520 {
1521     gen_exception(s, s->pc - 2, EXCP_LINEA);
1522 }
1523 
1524 DISAS_INSN(undef_fpu)
1525 {
1526     gen_exception(s, s->pc - 2, EXCP_LINEF);
1527 }
1528 
1529 DISAS_INSN(undef)
1530 {
1531     /* ??? This is both instructions that are as yet unimplemented
1532        for the 680x0 series, as well as those that are implemented
1533        but actually illegal for CPU32 or pre-68020.  */
1534     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1535                   insn, s->pc - 2);
1536     gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1537 }
1538 
1539 DISAS_INSN(mulw)
1540 {
1541     TCGv reg;
1542     TCGv tmp;
1543     TCGv src;
1544     int sign;
1545 
1546     sign = (insn & 0x100) != 0;
1547     reg = DREG(insn, 9);
1548     tmp = tcg_temp_new();
1549     if (sign)
1550         tcg_gen_ext16s_i32(tmp, reg);
1551     else
1552         tcg_gen_ext16u_i32(tmp, reg);
1553     SRC_EA(env, src, OS_WORD, sign, NULL);
1554     tcg_gen_mul_i32(tmp, tmp, src);
1555     tcg_gen_mov_i32(reg, tmp);
1556     gen_logic_cc(s, tmp, OS_LONG);
1557     tcg_temp_free(tmp);
1558 }
1559 
1560 DISAS_INSN(divw)
1561 {
1562     int sign;
1563     TCGv src;
1564     TCGv destr;
1565 
1566     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1567 
1568     sign = (insn & 0x100) != 0;
1569 
1570     /* dest.l / src.w */
1571 
1572     SRC_EA(env, src, OS_WORD, sign, NULL);
1573     destr = tcg_const_i32(REG(insn, 9));
1574     if (sign) {
1575         gen_helper_divsw(cpu_env, destr, src);
1576     } else {
1577         gen_helper_divuw(cpu_env, destr, src);
1578     }
1579     tcg_temp_free(destr);
1580 
1581     set_cc_op(s, CC_OP_FLAGS);
1582 }
1583 
1584 DISAS_INSN(divl)
1585 {
1586     TCGv num, reg, den;
1587     int sign;
1588     uint16_t ext;
1589 
1590     ext = read_im16(env, s);
1591 
1592     sign = (ext & 0x0800) != 0;
1593 
1594     if (ext & 0x400) {
1595         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1596             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1597             return;
1598         }
1599 
1600         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1601 
1602         SRC_EA(env, den, OS_LONG, 0, NULL);
1603         num = tcg_const_i32(REG(ext, 12));
1604         reg = tcg_const_i32(REG(ext, 0));
1605         if (sign) {
1606             gen_helper_divsll(cpu_env, num, reg, den);
1607         } else {
1608             gen_helper_divull(cpu_env, num, reg, den);
1609         }
1610         tcg_temp_free(reg);
1611         tcg_temp_free(num);
1612         set_cc_op(s, CC_OP_FLAGS);
1613         return;
1614     }
1615 
1616     /* divX.l <EA>, Dq        32/32 -> 32q     */
1617     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1618 
1619     SRC_EA(env, den, OS_LONG, 0, NULL);
1620     num = tcg_const_i32(REG(ext, 12));
1621     reg = tcg_const_i32(REG(ext, 0));
1622     if (sign) {
1623         gen_helper_divsl(cpu_env, num, reg, den);
1624     } else {
1625         gen_helper_divul(cpu_env, num, reg, den);
1626     }
1627     tcg_temp_free(reg);
1628     tcg_temp_free(num);
1629 
1630     set_cc_op(s, CC_OP_FLAGS);
1631 }
1632 
1633 static void bcd_add(TCGv dest, TCGv src)
1634 {
1635     TCGv t0, t1;
1636 
1637     /*  dest10 = dest10 + src10 + X
1638      *
1639      *        t1 = src
1640      *        t2 = t1 + 0x066
1641      *        t3 = t2 + dest + X
1642      *        t4 = t2 ^ dest
1643      *        t5 = t3 ^ t4
1644      *        t6 = ~t5 & 0x110
1645      *        t7 = (t6 >> 2) | (t6 >> 3)
1646      *        return t3 - t7
1647      */
1648 
1649     /* t1 = (src + 0x066) + dest + X
1650      *    = result with some possible exceding 0x6
1651      */
1652 
1653     t0 = tcg_const_i32(0x066);
1654     tcg_gen_add_i32(t0, t0, src);
1655 
1656     t1 = tcg_temp_new();
1657     tcg_gen_add_i32(t1, t0, dest);
1658     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1659 
1660     /* we will remove exceding 0x6 where there is no carry */
1661 
1662     /* t0 = (src + 0x0066) ^ dest
1663      *    = t1 without carries
1664      */
1665 
1666     tcg_gen_xor_i32(t0, t0, dest);
1667 
1668     /* extract the carries
1669      * t0 = t0 ^ t1
1670      *    = only the carries
1671      */
1672 
1673     tcg_gen_xor_i32(t0, t0, t1);
1674 
1675     /* generate 0x1 where there is no carry
1676      * and for each 0x10, generate a 0x6
1677      */
1678 
1679     tcg_gen_shri_i32(t0, t0, 3);
1680     tcg_gen_not_i32(t0, t0);
1681     tcg_gen_andi_i32(t0, t0, 0x22);
1682     tcg_gen_add_i32(dest, t0, t0);
1683     tcg_gen_add_i32(dest, dest, t0);
1684     tcg_temp_free(t0);
1685 
1686     /* remove the exceding 0x6
1687      * for digits that have not generated a carry
1688      */
1689 
1690     tcg_gen_sub_i32(dest, t1, dest);
1691     tcg_temp_free(t1);
1692 }
1693 
1694 static void bcd_sub(TCGv dest, TCGv src)
1695 {
1696     TCGv t0, t1, t2;
1697 
1698     /*  dest10 = dest10 - src10 - X
1699      *         = bcd_add(dest + 1 - X, 0x199 - src)
1700      */
1701 
1702     /* t0 = 0x066 + (0x199 - src) */
1703 
1704     t0 = tcg_temp_new();
1705     tcg_gen_subfi_i32(t0, 0x1ff, src);
1706 
1707     /* t1 = t0 + dest + 1 - X*/
1708 
1709     t1 = tcg_temp_new();
1710     tcg_gen_add_i32(t1, t0, dest);
1711     tcg_gen_addi_i32(t1, t1, 1);
1712     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1713 
1714     /* t2 = t0 ^ dest */
1715 
1716     t2 = tcg_temp_new();
1717     tcg_gen_xor_i32(t2, t0, dest);
1718 
1719     /* t0 = t1 ^ t2 */
1720 
1721     tcg_gen_xor_i32(t0, t1, t2);
1722 
1723     /* t2 = ~t0 & 0x110
1724      * t0 = (t2 >> 2) | (t2 >> 3)
1725      *
1726      * to fit on 8bit operands, changed in:
1727      *
1728      * t2 = ~(t0 >> 3) & 0x22
1729      * t0 = t2 + t2
1730      * t0 = t0 + t2
1731      */
1732 
1733     tcg_gen_shri_i32(t2, t0, 3);
1734     tcg_gen_not_i32(t2, t2);
1735     tcg_gen_andi_i32(t2, t2, 0x22);
1736     tcg_gen_add_i32(t0, t2, t2);
1737     tcg_gen_add_i32(t0, t0, t2);
1738     tcg_temp_free(t2);
1739 
1740     /* return t1 - t0 */
1741 
1742     tcg_gen_sub_i32(dest, t1, t0);
1743     tcg_temp_free(t0);
1744     tcg_temp_free(t1);
1745 }
1746 
1747 static void bcd_flags(TCGv val)
1748 {
1749     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1750     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1751 
1752     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1753 
1754     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1755 }
1756 
1757 DISAS_INSN(abcd_reg)
1758 {
1759     TCGv src;
1760     TCGv dest;
1761 
1762     gen_flush_flags(s); /* !Z is sticky */
1763 
1764     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1765     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1766     bcd_add(dest, src);
1767     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1768 
1769     bcd_flags(dest);
1770 }
1771 
1772 DISAS_INSN(abcd_mem)
1773 {
1774     TCGv src, dest, addr;
1775 
1776     gen_flush_flags(s); /* !Z is sticky */
1777 
1778     /* Indirect pre-decrement load (mode 4) */
1779 
1780     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1781                       NULL_QREG, NULL, EA_LOADU);
1782     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1783                        NULL_QREG, &addr, EA_LOADU);
1784 
1785     bcd_add(dest, src);
1786 
1787     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1788 
1789     bcd_flags(dest);
1790 }
1791 
1792 DISAS_INSN(sbcd_reg)
1793 {
1794     TCGv src, dest;
1795 
1796     gen_flush_flags(s); /* !Z is sticky */
1797 
1798     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1799     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1800 
1801     bcd_sub(dest, src);
1802 
1803     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1804 
1805     bcd_flags(dest);
1806 }
1807 
1808 DISAS_INSN(sbcd_mem)
1809 {
1810     TCGv src, dest, addr;
1811 
1812     gen_flush_flags(s); /* !Z is sticky */
1813 
1814     /* Indirect pre-decrement load (mode 4) */
1815 
1816     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1817                       NULL_QREG, NULL, EA_LOADU);
1818     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1819                        NULL_QREG, &addr, EA_LOADU);
1820 
1821     bcd_sub(dest, src);
1822 
1823     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1824 
1825     bcd_flags(dest);
1826 }
1827 
1828 DISAS_INSN(nbcd)
1829 {
1830     TCGv src, dest;
1831     TCGv addr;
1832 
1833     gen_flush_flags(s); /* !Z is sticky */
1834 
1835     SRC_EA(env, src, OS_BYTE, 0, &addr);
1836 
1837     dest = tcg_const_i32(0);
1838     bcd_sub(dest, src);
1839 
1840     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1841 
1842     bcd_flags(dest);
1843 
1844     tcg_temp_free(dest);
1845 }
1846 
1847 DISAS_INSN(addsub)
1848 {
1849     TCGv reg;
1850     TCGv dest;
1851     TCGv src;
1852     TCGv tmp;
1853     TCGv addr;
1854     int add;
1855     int opsize;
1856 
1857     add = (insn & 0x4000) != 0;
1858     opsize = insn_opsize(insn);
1859     reg = gen_extend(DREG(insn, 9), opsize, 1);
1860     dest = tcg_temp_new();
1861     if (insn & 0x100) {
1862         SRC_EA(env, tmp, opsize, 1, &addr);
1863         src = reg;
1864     } else {
1865         tmp = reg;
1866         SRC_EA(env, src, opsize, 1, NULL);
1867     }
1868     if (add) {
1869         tcg_gen_add_i32(dest, tmp, src);
1870         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1871         set_cc_op(s, CC_OP_ADDB + opsize);
1872     } else {
1873         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1874         tcg_gen_sub_i32(dest, tmp, src);
1875         set_cc_op(s, CC_OP_SUBB + opsize);
1876     }
1877     gen_update_cc_add(dest, src, opsize);
1878     if (insn & 0x100) {
1879         DEST_EA(env, insn, opsize, dest, &addr);
1880     } else {
1881         gen_partset_reg(opsize, DREG(insn, 9), dest);
1882     }
1883     tcg_temp_free(dest);
1884 }
1885 
1886 /* Reverse the order of the bits in REG.  */
1887 DISAS_INSN(bitrev)
1888 {
1889     TCGv reg;
1890     reg = DREG(insn, 0);
1891     gen_helper_bitrev(reg, reg);
1892 }
1893 
1894 DISAS_INSN(bitop_reg)
1895 {
1896     int opsize;
1897     int op;
1898     TCGv src1;
1899     TCGv src2;
1900     TCGv tmp;
1901     TCGv addr;
1902     TCGv dest;
1903 
1904     if ((insn & 0x38) != 0)
1905         opsize = OS_BYTE;
1906     else
1907         opsize = OS_LONG;
1908     op = (insn >> 6) & 3;
1909     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1910 
1911     gen_flush_flags(s);
1912     src2 = tcg_temp_new();
1913     if (opsize == OS_BYTE)
1914         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1915     else
1916         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1917 
1918     tmp = tcg_const_i32(1);
1919     tcg_gen_shl_i32(tmp, tmp, src2);
1920     tcg_temp_free(src2);
1921 
1922     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1923 
1924     dest = tcg_temp_new();
1925     switch (op) {
1926     case 1: /* bchg */
1927         tcg_gen_xor_i32(dest, src1, tmp);
1928         break;
1929     case 2: /* bclr */
1930         tcg_gen_andc_i32(dest, src1, tmp);
1931         break;
1932     case 3: /* bset */
1933         tcg_gen_or_i32(dest, src1, tmp);
1934         break;
1935     default: /* btst */
1936         break;
1937     }
1938     tcg_temp_free(tmp);
1939     if (op) {
1940         DEST_EA(env, insn, opsize, dest, &addr);
1941     }
1942     tcg_temp_free(dest);
1943 }
1944 
1945 DISAS_INSN(sats)
1946 {
1947     TCGv reg;
1948     reg = DREG(insn, 0);
1949     gen_flush_flags(s);
1950     gen_helper_sats(reg, reg, QREG_CC_V);
1951     gen_logic_cc(s, reg, OS_LONG);
1952 }
1953 
1954 static void gen_push(DisasContext *s, TCGv val)
1955 {
1956     TCGv tmp;
1957 
1958     tmp = tcg_temp_new();
1959     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1960     gen_store(s, OS_LONG, tmp, val);
1961     tcg_gen_mov_i32(QREG_SP, tmp);
1962     tcg_temp_free(tmp);
1963 }
1964 
1965 static TCGv mreg(int reg)
1966 {
1967     if (reg < 8) {
1968         /* Dx */
1969         return cpu_dregs[reg];
1970     }
1971     /* Ax */
1972     return cpu_aregs[reg & 7];
1973 }
1974 
1975 DISAS_INSN(movem)
1976 {
1977     TCGv addr, incr, tmp, r[16];
1978     int is_load = (insn & 0x0400) != 0;
1979     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1980     uint16_t mask = read_im16(env, s);
1981     int mode = extract32(insn, 3, 3);
1982     int reg0 = REG(insn, 0);
1983     int i;
1984 
1985     tmp = cpu_aregs[reg0];
1986 
1987     switch (mode) {
1988     case 0: /* data register direct */
1989     case 1: /* addr register direct */
1990     do_addr_fault:
1991         gen_addr_fault(s);
1992         return;
1993 
1994     case 2: /* indirect */
1995         break;
1996 
1997     case 3: /* indirect post-increment */
1998         if (!is_load) {
1999             /* post-increment is not allowed */
2000             goto do_addr_fault;
2001         }
2002         break;
2003 
2004     case 4: /* indirect pre-decrement */
2005         if (is_load) {
2006             /* pre-decrement is not allowed */
2007             goto do_addr_fault;
2008         }
2009         /* We want a bare copy of the address reg, without any pre-decrement
2010            adjustment, as gen_lea would provide.  */
2011         break;
2012 
2013     default:
2014         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2015         if (IS_NULL_QREG(tmp)) {
2016             goto do_addr_fault;
2017         }
2018         break;
2019     }
2020 
2021     addr = tcg_temp_new();
2022     tcg_gen_mov_i32(addr, tmp);
2023     incr = tcg_const_i32(opsize_bytes(opsize));
2024 
2025     if (is_load) {
2026         /* memory to register */
2027         for (i = 0; i < 16; i++) {
2028             if (mask & (1 << i)) {
2029                 r[i] = gen_load(s, opsize, addr, 1);
2030                 tcg_gen_add_i32(addr, addr, incr);
2031             }
2032         }
2033         for (i = 0; i < 16; i++) {
2034             if (mask & (1 << i)) {
2035                 tcg_gen_mov_i32(mreg(i), r[i]);
2036                 tcg_temp_free(r[i]);
2037             }
2038         }
2039         if (mode == 3) {
2040             /* post-increment: movem (An)+,X */
2041             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2042         }
2043     } else {
2044         /* register to memory */
2045         if (mode == 4) {
2046             /* pre-decrement: movem X,-(An) */
2047             for (i = 15; i >= 0; i--) {
2048                 if ((mask << i) & 0x8000) {
2049                     tcg_gen_sub_i32(addr, addr, incr);
2050                     if (reg0 + 8 == i &&
2051                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2052                         /* M68020+: if the addressing register is the
2053                          * register moved to memory, the value written
2054                          * is the initial value decremented by the size of
2055                          * the operation, regardless of how many actual
2056                          * stores have been performed until this point.
2057                          * M68000/M68010: the value is the initial value.
2058                          */
2059                         tmp = tcg_temp_new();
2060                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2061                         gen_store(s, opsize, addr, tmp);
2062                         tcg_temp_free(tmp);
2063                     } else {
2064                         gen_store(s, opsize, addr, mreg(i));
2065                     }
2066                 }
2067             }
2068             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2069         } else {
2070             for (i = 0; i < 16; i++) {
2071                 if (mask & (1 << i)) {
2072                     gen_store(s, opsize, addr, mreg(i));
2073                     tcg_gen_add_i32(addr, addr, incr);
2074                 }
2075             }
2076         }
2077     }
2078 
2079     tcg_temp_free(incr);
2080     tcg_temp_free(addr);
2081 }
2082 
2083 DISAS_INSN(bitop_im)
2084 {
2085     int opsize;
2086     int op;
2087     TCGv src1;
2088     uint32_t mask;
2089     int bitnum;
2090     TCGv tmp;
2091     TCGv addr;
2092 
2093     if ((insn & 0x38) != 0)
2094         opsize = OS_BYTE;
2095     else
2096         opsize = OS_LONG;
2097     op = (insn >> 6) & 3;
2098 
2099     bitnum = read_im16(env, s);
2100     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2101         if (bitnum & 0xfe00) {
2102             disas_undef(env, s, insn);
2103             return;
2104         }
2105     } else {
2106         if (bitnum & 0xff00) {
2107             disas_undef(env, s, insn);
2108             return;
2109         }
2110     }
2111 
2112     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2113 
2114     gen_flush_flags(s);
2115     if (opsize == OS_BYTE)
2116         bitnum &= 7;
2117     else
2118         bitnum &= 31;
2119     mask = 1 << bitnum;
2120 
2121    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2122 
2123     if (op) {
2124         tmp = tcg_temp_new();
2125         switch (op) {
2126         case 1: /* bchg */
2127             tcg_gen_xori_i32(tmp, src1, mask);
2128             break;
2129         case 2: /* bclr */
2130             tcg_gen_andi_i32(tmp, src1, ~mask);
2131             break;
2132         case 3: /* bset */
2133             tcg_gen_ori_i32(tmp, src1, mask);
2134             break;
2135         default: /* btst */
2136             break;
2137         }
2138         DEST_EA(env, insn, opsize, tmp, &addr);
2139         tcg_temp_free(tmp);
2140     }
2141 }
2142 
2143 DISAS_INSN(arith_im)
2144 {
2145     int op;
2146     TCGv im;
2147     TCGv src1;
2148     TCGv dest;
2149     TCGv addr;
2150     int opsize;
2151 
2152     op = (insn >> 9) & 7;
2153     opsize = insn_opsize(insn);
2154     switch (opsize) {
2155     case OS_BYTE:
2156         im = tcg_const_i32((int8_t)read_im8(env, s));
2157         break;
2158     case OS_WORD:
2159         im = tcg_const_i32((int16_t)read_im16(env, s));
2160         break;
2161     case OS_LONG:
2162         im = tcg_const_i32(read_im32(env, s));
2163         break;
2164     default:
2165        abort();
2166     }
2167     SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2168     dest = tcg_temp_new();
2169     switch (op) {
2170     case 0: /* ori */
2171         tcg_gen_or_i32(dest, src1, im);
2172         gen_logic_cc(s, dest, opsize);
2173         break;
2174     case 1: /* andi */
2175         tcg_gen_and_i32(dest, src1, im);
2176         gen_logic_cc(s, dest, opsize);
2177         break;
2178     case 2: /* subi */
2179         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2180         tcg_gen_sub_i32(dest, src1, im);
2181         gen_update_cc_add(dest, im, opsize);
2182         set_cc_op(s, CC_OP_SUBB + opsize);
2183         break;
2184     case 3: /* addi */
2185         tcg_gen_add_i32(dest, src1, im);
2186         gen_update_cc_add(dest, im, opsize);
2187         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2188         set_cc_op(s, CC_OP_ADDB + opsize);
2189         break;
2190     case 5: /* eori */
2191         tcg_gen_xor_i32(dest, src1, im);
2192         gen_logic_cc(s, dest, opsize);
2193         break;
2194     case 6: /* cmpi */
2195         gen_update_cc_cmp(s, src1, im, opsize);
2196         break;
2197     default:
2198         abort();
2199     }
2200     tcg_temp_free(im);
2201     if (op != 6) {
2202         DEST_EA(env, insn, opsize, dest, &addr);
2203     }
2204     tcg_temp_free(dest);
2205 }
2206 
2207 DISAS_INSN(cas)
2208 {
2209     int opsize;
2210     TCGv addr;
2211     uint16_t ext;
2212     TCGv load;
2213     TCGv cmp;
2214     TCGMemOp opc;
2215 
2216     switch ((insn >> 9) & 3) {
2217     case 1:
2218         opsize = OS_BYTE;
2219         opc = MO_SB;
2220         break;
2221     case 2:
2222         opsize = OS_WORD;
2223         opc = MO_TESW;
2224         break;
2225     case 3:
2226         opsize = OS_LONG;
2227         opc = MO_TESL;
2228         break;
2229     default:
2230         g_assert_not_reached();
2231     }
2232 
2233     ext = read_im16(env, s);
2234 
2235     /* cas Dc,Du,<EA> */
2236 
2237     addr = gen_lea(env, s, insn, opsize);
2238     if (IS_NULL_QREG(addr)) {
2239         gen_addr_fault(s);
2240         return;
2241     }
2242 
2243     cmp = gen_extend(DREG(ext, 0), opsize, 1);
2244 
2245     /* if  <EA> == Dc then
2246      *     <EA> = Du
2247      *     Dc = <EA> (because <EA> == Dc)
2248      * else
2249      *     Dc = <EA>
2250      */
2251 
2252     load = tcg_temp_new();
2253     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2254                                IS_USER(s), opc);
2255     /* update flags before setting cmp to load */
2256     gen_update_cc_cmp(s, load, cmp, opsize);
2257     gen_partset_reg(opsize, DREG(ext, 0), load);
2258 
2259     tcg_temp_free(load);
2260 
2261     switch (extract32(insn, 3, 3)) {
2262     case 3: /* Indirect postincrement.  */
2263         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2264         break;
2265     case 4: /* Indirect predecrememnt.  */
2266         tcg_gen_mov_i32(AREG(insn, 0), addr);
2267         break;
2268     }
2269 }
2270 
2271 DISAS_INSN(cas2w)
2272 {
2273     uint16_t ext1, ext2;
2274     TCGv addr1, addr2;
2275     TCGv regs;
2276 
2277     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2278 
2279     ext1 = read_im16(env, s);
2280 
2281     if (ext1 & 0x8000) {
2282         /* Address Register */
2283         addr1 = AREG(ext1, 12);
2284     } else {
2285         /* Data Register */
2286         addr1 = DREG(ext1, 12);
2287     }
2288 
2289     ext2 = read_im16(env, s);
2290     if (ext2 & 0x8000) {
2291         /* Address Register */
2292         addr2 = AREG(ext2, 12);
2293     } else {
2294         /* Data Register */
2295         addr2 = DREG(ext2, 12);
2296     }
2297 
2298     /* if (R1) == Dc1 && (R2) == Dc2 then
2299      *     (R1) = Du1
2300      *     (R2) = Du2
2301      * else
2302      *     Dc1 = (R1)
2303      *     Dc2 = (R2)
2304      */
2305 
2306     regs = tcg_const_i32(REG(ext2, 6) |
2307                          (REG(ext1, 6) << 3) |
2308                          (REG(ext2, 0) << 6) |
2309                          (REG(ext1, 0) << 9));
2310     if (tb_cflags(s->tb) & CF_PARALLEL) {
2311         gen_helper_exit_atomic(cpu_env);
2312     } else {
2313         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2314     }
2315     tcg_temp_free(regs);
2316 
2317     /* Note that cas2w also assigned to env->cc_op.  */
2318     s->cc_op = CC_OP_CMPW;
2319     s->cc_op_synced = 1;
2320 }
2321 
2322 DISAS_INSN(cas2l)
2323 {
2324     uint16_t ext1, ext2;
2325     TCGv addr1, addr2, regs;
2326 
2327     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2328 
2329     ext1 = read_im16(env, s);
2330 
2331     if (ext1 & 0x8000) {
2332         /* Address Register */
2333         addr1 = AREG(ext1, 12);
2334     } else {
2335         /* Data Register */
2336         addr1 = DREG(ext1, 12);
2337     }
2338 
2339     ext2 = read_im16(env, s);
2340     if (ext2 & 0x8000) {
2341         /* Address Register */
2342         addr2 = AREG(ext2, 12);
2343     } else {
2344         /* Data Register */
2345         addr2 = DREG(ext2, 12);
2346     }
2347 
2348     /* if (R1) == Dc1 && (R2) == Dc2 then
2349      *     (R1) = Du1
2350      *     (R2) = Du2
2351      * else
2352      *     Dc1 = (R1)
2353      *     Dc2 = (R2)
2354      */
2355 
2356     regs = tcg_const_i32(REG(ext2, 6) |
2357                          (REG(ext1, 6) << 3) |
2358                          (REG(ext2, 0) << 6) |
2359                          (REG(ext1, 0) << 9));
2360     if (tb_cflags(s->tb) & CF_PARALLEL) {
2361         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2362     } else {
2363         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2364     }
2365     tcg_temp_free(regs);
2366 
2367     /* Note that cas2l also assigned to env->cc_op.  */
2368     s->cc_op = CC_OP_CMPL;
2369     s->cc_op_synced = 1;
2370 }
2371 
2372 DISAS_INSN(byterev)
2373 {
2374     TCGv reg;
2375 
2376     reg = DREG(insn, 0);
2377     tcg_gen_bswap32_i32(reg, reg);
2378 }
2379 
2380 DISAS_INSN(move)
2381 {
2382     TCGv src;
2383     TCGv dest;
2384     int op;
2385     int opsize;
2386 
2387     switch (insn >> 12) {
2388     case 1: /* move.b */
2389         opsize = OS_BYTE;
2390         break;
2391     case 2: /* move.l */
2392         opsize = OS_LONG;
2393         break;
2394     case 3: /* move.w */
2395         opsize = OS_WORD;
2396         break;
2397     default:
2398         abort();
2399     }
2400     SRC_EA(env, src, opsize, 1, NULL);
2401     op = (insn >> 6) & 7;
2402     if (op == 1) {
2403         /* movea */
2404         /* The value will already have been sign extended.  */
2405         dest = AREG(insn, 9);
2406         tcg_gen_mov_i32(dest, src);
2407     } else {
2408         /* normal move */
2409         uint16_t dest_ea;
2410         dest_ea = ((insn >> 9) & 7) | (op << 3);
2411         DEST_EA(env, dest_ea, opsize, src, NULL);
2412         /* This will be correct because loads sign extend.  */
2413         gen_logic_cc(s, src, opsize);
2414     }
2415 }
2416 
2417 DISAS_INSN(negx)
2418 {
2419     TCGv z;
2420     TCGv src;
2421     TCGv addr;
2422     int opsize;
2423 
2424     opsize = insn_opsize(insn);
2425     SRC_EA(env, src, opsize, 1, &addr);
2426 
2427     gen_flush_flags(s); /* compute old Z */
2428 
2429     /* Perform substract with borrow.
2430      * (X, N) =  -(src + X);
2431      */
2432 
2433     z = tcg_const_i32(0);
2434     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2435     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2436     tcg_temp_free(z);
2437     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2438 
2439     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2440 
2441     /* Compute signed-overflow for negation.  The normal formula for
2442      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2443      * this simplies to res & src.
2444      */
2445 
2446     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2447 
2448     /* Copy the rest of the results into place.  */
2449     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2450     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2451 
2452     set_cc_op(s, CC_OP_FLAGS);
2453 
2454     /* result is in QREG_CC_N */
2455 
2456     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2457 }
2458 
2459 DISAS_INSN(lea)
2460 {
2461     TCGv reg;
2462     TCGv tmp;
2463 
2464     reg = AREG(insn, 9);
2465     tmp = gen_lea(env, s, insn, OS_LONG);
2466     if (IS_NULL_QREG(tmp)) {
2467         gen_addr_fault(s);
2468         return;
2469     }
2470     tcg_gen_mov_i32(reg, tmp);
2471 }
2472 
2473 DISAS_INSN(clr)
2474 {
2475     int opsize;
2476     TCGv zero;
2477 
2478     zero = tcg_const_i32(0);
2479 
2480     opsize = insn_opsize(insn);
2481     DEST_EA(env, insn, opsize, zero, NULL);
2482     gen_logic_cc(s, zero, opsize);
2483     tcg_temp_free(zero);
2484 }
2485 
2486 static TCGv gen_get_ccr(DisasContext *s)
2487 {
2488     TCGv dest;
2489 
2490     gen_flush_flags(s);
2491     update_cc_op(s);
2492     dest = tcg_temp_new();
2493     gen_helper_get_ccr(dest, cpu_env);
2494     return dest;
2495 }
2496 
2497 DISAS_INSN(move_from_ccr)
2498 {
2499     TCGv ccr;
2500 
2501     ccr = gen_get_ccr(s);
2502     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2503 }
2504 
2505 DISAS_INSN(neg)
2506 {
2507     TCGv src1;
2508     TCGv dest;
2509     TCGv addr;
2510     int opsize;
2511 
2512     opsize = insn_opsize(insn);
2513     SRC_EA(env, src1, opsize, 1, &addr);
2514     dest = tcg_temp_new();
2515     tcg_gen_neg_i32(dest, src1);
2516     set_cc_op(s, CC_OP_SUBB + opsize);
2517     gen_update_cc_add(dest, src1, opsize);
2518     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2519     DEST_EA(env, insn, opsize, dest, &addr);
2520     tcg_temp_free(dest);
2521 }
2522 
2523 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2524 {
2525     if (ccr_only) {
2526         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2527         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2528         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2529         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2530         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2531     } else {
2532         gen_helper_set_sr(cpu_env, tcg_const_i32(val));
2533     }
2534     set_cc_op(s, CC_OP_FLAGS);
2535 }
2536 
2537 static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2538                        int ccr_only)
2539 {
2540     if ((insn & 0x38) == 0) {
2541         if (ccr_only) {
2542             gen_helper_set_ccr(cpu_env, DREG(insn, 0));
2543         } else {
2544             gen_helper_set_sr(cpu_env, DREG(insn, 0));
2545         }
2546         set_cc_op(s, CC_OP_FLAGS);
2547     } else if ((insn & 0x3f) == 0x3c) {
2548         uint16_t val;
2549         val = read_im16(env, s);
2550         gen_set_sr_im(s, val, ccr_only);
2551     } else {
2552         disas_undef(env, s, insn);
2553     }
2554 }
2555 
2556 
2557 DISAS_INSN(move_to_ccr)
2558 {
2559     gen_set_sr(env, s, insn, 1);
2560 }
2561 
2562 DISAS_INSN(not)
2563 {
2564     TCGv src1;
2565     TCGv dest;
2566     TCGv addr;
2567     int opsize;
2568 
2569     opsize = insn_opsize(insn);
2570     SRC_EA(env, src1, opsize, 1, &addr);
2571     dest = tcg_temp_new();
2572     tcg_gen_not_i32(dest, src1);
2573     DEST_EA(env, insn, opsize, dest, &addr);
2574     gen_logic_cc(s, dest, opsize);
2575 }
2576 
2577 DISAS_INSN(swap)
2578 {
2579     TCGv src1;
2580     TCGv src2;
2581     TCGv reg;
2582 
2583     src1 = tcg_temp_new();
2584     src2 = tcg_temp_new();
2585     reg = DREG(insn, 0);
2586     tcg_gen_shli_i32(src1, reg, 16);
2587     tcg_gen_shri_i32(src2, reg, 16);
2588     tcg_gen_or_i32(reg, src1, src2);
2589     tcg_temp_free(src2);
2590     tcg_temp_free(src1);
2591     gen_logic_cc(s, reg, OS_LONG);
2592 }
2593 
2594 DISAS_INSN(bkpt)
2595 {
2596     gen_exception(s, s->pc - 2, EXCP_DEBUG);
2597 }
2598 
2599 DISAS_INSN(pea)
2600 {
2601     TCGv tmp;
2602 
2603     tmp = gen_lea(env, s, insn, OS_LONG);
2604     if (IS_NULL_QREG(tmp)) {
2605         gen_addr_fault(s);
2606         return;
2607     }
2608     gen_push(s, tmp);
2609 }
2610 
2611 DISAS_INSN(ext)
2612 {
2613     int op;
2614     TCGv reg;
2615     TCGv tmp;
2616 
2617     reg = DREG(insn, 0);
2618     op = (insn >> 6) & 7;
2619     tmp = tcg_temp_new();
2620     if (op == 3)
2621         tcg_gen_ext16s_i32(tmp, reg);
2622     else
2623         tcg_gen_ext8s_i32(tmp, reg);
2624     if (op == 2)
2625         gen_partset_reg(OS_WORD, reg, tmp);
2626     else
2627         tcg_gen_mov_i32(reg, tmp);
2628     gen_logic_cc(s, tmp, OS_LONG);
2629     tcg_temp_free(tmp);
2630 }
2631 
2632 DISAS_INSN(tst)
2633 {
2634     int opsize;
2635     TCGv tmp;
2636 
2637     opsize = insn_opsize(insn);
2638     SRC_EA(env, tmp, opsize, 1, NULL);
2639     gen_logic_cc(s, tmp, opsize);
2640 }
2641 
2642 DISAS_INSN(pulse)
2643 {
2644   /* Implemented as a NOP.  */
2645 }
2646 
2647 DISAS_INSN(illegal)
2648 {
2649     gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2650 }
2651 
2652 /* ??? This should be atomic.  */
2653 DISAS_INSN(tas)
2654 {
2655     TCGv dest;
2656     TCGv src1;
2657     TCGv addr;
2658 
2659     dest = tcg_temp_new();
2660     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2661     gen_logic_cc(s, src1, OS_BYTE);
2662     tcg_gen_ori_i32(dest, src1, 0x80);
2663     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2664     tcg_temp_free(dest);
2665 }
2666 
2667 DISAS_INSN(mull)
2668 {
2669     uint16_t ext;
2670     TCGv src1;
2671     int sign;
2672 
2673     ext = read_im16(env, s);
2674 
2675     sign = ext & 0x800;
2676 
2677     if (ext & 0x400) {
2678         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2679             gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
2680             return;
2681         }
2682 
2683         SRC_EA(env, src1, OS_LONG, 0, NULL);
2684 
2685         if (sign) {
2686             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2687         } else {
2688             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2689         }
2690         /* if Dl == Dh, 68040 returns low word */
2691         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2692         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2693         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2694 
2695         tcg_gen_movi_i32(QREG_CC_V, 0);
2696         tcg_gen_movi_i32(QREG_CC_C, 0);
2697 
2698         set_cc_op(s, CC_OP_FLAGS);
2699         return;
2700     }
2701     SRC_EA(env, src1, OS_LONG, 0, NULL);
2702     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2703         tcg_gen_movi_i32(QREG_CC_C, 0);
2704         if (sign) {
2705             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2706             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2707             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2708             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2709         } else {
2710             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2711             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2712             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2713         }
2714         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2715         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2716 
2717         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2718 
2719         set_cc_op(s, CC_OP_FLAGS);
2720     } else {
2721         /* The upper 32 bits of the product are discarded, so
2722            muls.l and mulu.l are functionally equivalent.  */
2723         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2724         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2725     }
2726 }
2727 
2728 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2729 {
2730     TCGv reg;
2731     TCGv tmp;
2732 
2733     reg = AREG(insn, 0);
2734     tmp = tcg_temp_new();
2735     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2736     gen_store(s, OS_LONG, tmp, reg);
2737     if ((insn & 7) != 7) {
2738         tcg_gen_mov_i32(reg, tmp);
2739     }
2740     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2741     tcg_temp_free(tmp);
2742 }
2743 
2744 DISAS_INSN(link)
2745 {
2746     int16_t offset;
2747 
2748     offset = read_im16(env, s);
2749     gen_link(s, insn, offset);
2750 }
2751 
2752 DISAS_INSN(linkl)
2753 {
2754     int32_t offset;
2755 
2756     offset = read_im32(env, s);
2757     gen_link(s, insn, offset);
2758 }
2759 
2760 DISAS_INSN(unlk)
2761 {
2762     TCGv src;
2763     TCGv reg;
2764     TCGv tmp;
2765 
2766     src = tcg_temp_new();
2767     reg = AREG(insn, 0);
2768     tcg_gen_mov_i32(src, reg);
2769     tmp = gen_load(s, OS_LONG, src, 0);
2770     tcg_gen_mov_i32(reg, tmp);
2771     tcg_gen_addi_i32(QREG_SP, src, 4);
2772     tcg_temp_free(src);
2773 }
2774 
2775 DISAS_INSN(nop)
2776 {
2777 }
2778 
2779 DISAS_INSN(rtd)
2780 {
2781     TCGv tmp;
2782     int16_t offset = read_im16(env, s);
2783 
2784     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2785     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2786     gen_jmp(s, tmp);
2787 }
2788 
2789 DISAS_INSN(rts)
2790 {
2791     TCGv tmp;
2792 
2793     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2794     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2795     gen_jmp(s, tmp);
2796 }
2797 
2798 DISAS_INSN(jump)
2799 {
2800     TCGv tmp;
2801 
2802     /* Load the target address first to ensure correct exception
2803        behavior.  */
2804     tmp = gen_lea(env, s, insn, OS_LONG);
2805     if (IS_NULL_QREG(tmp)) {
2806         gen_addr_fault(s);
2807         return;
2808     }
2809     if ((insn & 0x40) == 0) {
2810         /* jsr */
2811         gen_push(s, tcg_const_i32(s->pc));
2812     }
2813     gen_jmp(s, tmp);
2814 }
2815 
2816 DISAS_INSN(addsubq)
2817 {
2818     TCGv src;
2819     TCGv dest;
2820     TCGv val;
2821     int imm;
2822     TCGv addr;
2823     int opsize;
2824 
2825     if ((insn & 070) == 010) {
2826         /* Operation on address register is always long.  */
2827         opsize = OS_LONG;
2828     } else {
2829         opsize = insn_opsize(insn);
2830     }
2831     SRC_EA(env, src, opsize, 1, &addr);
2832     imm = (insn >> 9) & 7;
2833     if (imm == 0) {
2834         imm = 8;
2835     }
2836     val = tcg_const_i32(imm);
2837     dest = tcg_temp_new();
2838     tcg_gen_mov_i32(dest, src);
2839     if ((insn & 0x38) == 0x08) {
2840         /* Don't update condition codes if the destination is an
2841            address register.  */
2842         if (insn & 0x0100) {
2843             tcg_gen_sub_i32(dest, dest, val);
2844         } else {
2845             tcg_gen_add_i32(dest, dest, val);
2846         }
2847     } else {
2848         if (insn & 0x0100) {
2849             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2850             tcg_gen_sub_i32(dest, dest, val);
2851             set_cc_op(s, CC_OP_SUBB + opsize);
2852         } else {
2853             tcg_gen_add_i32(dest, dest, val);
2854             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2855             set_cc_op(s, CC_OP_ADDB + opsize);
2856         }
2857         gen_update_cc_add(dest, val, opsize);
2858     }
2859     tcg_temp_free(val);
2860     DEST_EA(env, insn, opsize, dest, &addr);
2861     tcg_temp_free(dest);
2862 }
2863 
2864 DISAS_INSN(tpf)
2865 {
2866     switch (insn & 7) {
2867     case 2: /* One extension word.  */
2868         s->pc += 2;
2869         break;
2870     case 3: /* Two extension words.  */
2871         s->pc += 4;
2872         break;
2873     case 4: /* No extension words.  */
2874         break;
2875     default:
2876         disas_undef(env, s, insn);
2877     }
2878 }
2879 
2880 DISAS_INSN(branch)
2881 {
2882     int32_t offset;
2883     uint32_t base;
2884     int op;
2885     TCGLabel *l1;
2886 
2887     base = s->pc;
2888     op = (insn >> 8) & 0xf;
2889     offset = (int8_t)insn;
2890     if (offset == 0) {
2891         offset = (int16_t)read_im16(env, s);
2892     } else if (offset == -1) {
2893         offset = read_im32(env, s);
2894     }
2895     if (op == 1) {
2896         /* bsr */
2897         gen_push(s, tcg_const_i32(s->pc));
2898     }
2899     if (op > 1) {
2900         /* Bcc */
2901         l1 = gen_new_label();
2902         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2903         gen_jmp_tb(s, 1, base + offset);
2904         gen_set_label(l1);
2905         gen_jmp_tb(s, 0, s->pc);
2906     } else {
2907         /* Unconditional branch.  */
2908         gen_jmp_tb(s, 0, base + offset);
2909     }
2910 }
2911 
2912 DISAS_INSN(moveq)
2913 {
2914     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2915     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2916 }
2917 
2918 DISAS_INSN(mvzs)
2919 {
2920     int opsize;
2921     TCGv src;
2922     TCGv reg;
2923 
2924     if (insn & 0x40)
2925         opsize = OS_WORD;
2926     else
2927         opsize = OS_BYTE;
2928     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2929     reg = DREG(insn, 9);
2930     tcg_gen_mov_i32(reg, src);
2931     gen_logic_cc(s, src, opsize);
2932 }
2933 
2934 DISAS_INSN(or)
2935 {
2936     TCGv reg;
2937     TCGv dest;
2938     TCGv src;
2939     TCGv addr;
2940     int opsize;
2941 
2942     opsize = insn_opsize(insn);
2943     reg = gen_extend(DREG(insn, 9), opsize, 0);
2944     dest = tcg_temp_new();
2945     if (insn & 0x100) {
2946         SRC_EA(env, src, opsize, 0, &addr);
2947         tcg_gen_or_i32(dest, src, reg);
2948         DEST_EA(env, insn, opsize, dest, &addr);
2949     } else {
2950         SRC_EA(env, src, opsize, 0, NULL);
2951         tcg_gen_or_i32(dest, src, reg);
2952         gen_partset_reg(opsize, DREG(insn, 9), dest);
2953     }
2954     gen_logic_cc(s, dest, opsize);
2955     tcg_temp_free(dest);
2956 }
2957 
2958 DISAS_INSN(suba)
2959 {
2960     TCGv src;
2961     TCGv reg;
2962 
2963     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2964     reg = AREG(insn, 9);
2965     tcg_gen_sub_i32(reg, reg, src);
2966 }
2967 
2968 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2969 {
2970     TCGv tmp;
2971 
2972     gen_flush_flags(s); /* compute old Z */
2973 
2974     /* Perform substract with borrow.
2975      * (X, N) = dest - (src + X);
2976      */
2977 
2978     tmp = tcg_const_i32(0);
2979     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2980     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2981     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2982     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2983 
2984     /* Compute signed-overflow for substract.  */
2985 
2986     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2987     tcg_gen_xor_i32(tmp, dest, src);
2988     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2989     tcg_temp_free(tmp);
2990 
2991     /* Copy the rest of the results into place.  */
2992     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2993     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2994 
2995     set_cc_op(s, CC_OP_FLAGS);
2996 
2997     /* result is in QREG_CC_N */
2998 }
2999 
3000 DISAS_INSN(subx_reg)
3001 {
3002     TCGv dest;
3003     TCGv src;
3004     int opsize;
3005 
3006     opsize = insn_opsize(insn);
3007 
3008     src = gen_extend(DREG(insn, 0), opsize, 1);
3009     dest = gen_extend(DREG(insn, 9), opsize, 1);
3010 
3011     gen_subx(s, src, dest, opsize);
3012 
3013     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3014 }
3015 
3016 DISAS_INSN(subx_mem)
3017 {
3018     TCGv src;
3019     TCGv addr_src;
3020     TCGv dest;
3021     TCGv addr_dest;
3022     int opsize;
3023 
3024     opsize = insn_opsize(insn);
3025 
3026     addr_src = AREG(insn, 0);
3027     tcg_gen_subi_i32(addr_src, addr_src, opsize);
3028     src = gen_load(s, opsize, addr_src, 1);
3029 
3030     addr_dest = AREG(insn, 9);
3031     tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3032     dest = gen_load(s, opsize, addr_dest, 1);
3033 
3034     gen_subx(s, src, dest, opsize);
3035 
3036     gen_store(s, opsize, addr_dest, QREG_CC_N);
3037 }
3038 
3039 DISAS_INSN(mov3q)
3040 {
3041     TCGv src;
3042     int val;
3043 
3044     val = (insn >> 9) & 7;
3045     if (val == 0)
3046         val = -1;
3047     src = tcg_const_i32(val);
3048     gen_logic_cc(s, src, OS_LONG);
3049     DEST_EA(env, insn, OS_LONG, src, NULL);
3050     tcg_temp_free(src);
3051 }
3052 
3053 DISAS_INSN(cmp)
3054 {
3055     TCGv src;
3056     TCGv reg;
3057     int opsize;
3058 
3059     opsize = insn_opsize(insn);
3060     SRC_EA(env, src, opsize, 1, NULL);
3061     reg = gen_extend(DREG(insn, 9), opsize, 1);
3062     gen_update_cc_cmp(s, reg, src, opsize);
3063 }
3064 
3065 DISAS_INSN(cmpa)
3066 {
3067     int opsize;
3068     TCGv src;
3069     TCGv reg;
3070 
3071     if (insn & 0x100) {
3072         opsize = OS_LONG;
3073     } else {
3074         opsize = OS_WORD;
3075     }
3076     SRC_EA(env, src, opsize, 1, NULL);
3077     reg = AREG(insn, 9);
3078     gen_update_cc_cmp(s, reg, src, OS_LONG);
3079 }
3080 
3081 DISAS_INSN(cmpm)
3082 {
3083     int opsize = insn_opsize(insn);
3084     TCGv src, dst;
3085 
3086     /* Post-increment load (mode 3) from Ay.  */
3087     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3088                       NULL_QREG, NULL, EA_LOADS);
3089     /* Post-increment load (mode 3) from Ax.  */
3090     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3091                       NULL_QREG, NULL, EA_LOADS);
3092 
3093     gen_update_cc_cmp(s, dst, src, opsize);
3094 }
3095 
3096 DISAS_INSN(eor)
3097 {
3098     TCGv src;
3099     TCGv dest;
3100     TCGv addr;
3101     int opsize;
3102 
3103     opsize = insn_opsize(insn);
3104 
3105     SRC_EA(env, src, opsize, 0, &addr);
3106     dest = tcg_temp_new();
3107     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3108     gen_logic_cc(s, dest, opsize);
3109     DEST_EA(env, insn, opsize, dest, &addr);
3110     tcg_temp_free(dest);
3111 }
3112 
3113 static void do_exg(TCGv reg1, TCGv reg2)
3114 {
3115     TCGv temp = tcg_temp_new();
3116     tcg_gen_mov_i32(temp, reg1);
3117     tcg_gen_mov_i32(reg1, reg2);
3118     tcg_gen_mov_i32(reg2, temp);
3119     tcg_temp_free(temp);
3120 }
3121 
3122 DISAS_INSN(exg_dd)
3123 {
3124     /* exchange Dx and Dy */
3125     do_exg(DREG(insn, 9), DREG(insn, 0));
3126 }
3127 
3128 DISAS_INSN(exg_aa)
3129 {
3130     /* exchange Ax and Ay */
3131     do_exg(AREG(insn, 9), AREG(insn, 0));
3132 }
3133 
3134 DISAS_INSN(exg_da)
3135 {
3136     /* exchange Dx and Ay */
3137     do_exg(DREG(insn, 9), AREG(insn, 0));
3138 }
3139 
3140 DISAS_INSN(and)
3141 {
3142     TCGv src;
3143     TCGv reg;
3144     TCGv dest;
3145     TCGv addr;
3146     int opsize;
3147 
3148     dest = tcg_temp_new();
3149 
3150     opsize = insn_opsize(insn);
3151     reg = DREG(insn, 9);
3152     if (insn & 0x100) {
3153         SRC_EA(env, src, opsize, 0, &addr);
3154         tcg_gen_and_i32(dest, src, reg);
3155         DEST_EA(env, insn, opsize, dest, &addr);
3156     } else {
3157         SRC_EA(env, src, opsize, 0, NULL);
3158         tcg_gen_and_i32(dest, src, reg);
3159         gen_partset_reg(opsize, reg, dest);
3160     }
3161     gen_logic_cc(s, dest, opsize);
3162     tcg_temp_free(dest);
3163 }
3164 
3165 DISAS_INSN(adda)
3166 {
3167     TCGv src;
3168     TCGv reg;
3169 
3170     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3171     reg = AREG(insn, 9);
3172     tcg_gen_add_i32(reg, reg, src);
3173 }
3174 
3175 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3176 {
3177     TCGv tmp;
3178 
3179     gen_flush_flags(s); /* compute old Z */
3180 
3181     /* Perform addition with carry.
3182      * (X, N) = src + dest + X;
3183      */
3184 
3185     tmp = tcg_const_i32(0);
3186     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3187     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3188     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3189 
3190     /* Compute signed-overflow for addition.  */
3191 
3192     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3193     tcg_gen_xor_i32(tmp, dest, src);
3194     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3195     tcg_temp_free(tmp);
3196 
3197     /* Copy the rest of the results into place.  */
3198     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3199     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3200 
3201     set_cc_op(s, CC_OP_FLAGS);
3202 
3203     /* result is in QREG_CC_N */
3204 }
3205 
3206 DISAS_INSN(addx_reg)
3207 {
3208     TCGv dest;
3209     TCGv src;
3210     int opsize;
3211 
3212     opsize = insn_opsize(insn);
3213 
3214     dest = gen_extend(DREG(insn, 9), opsize, 1);
3215     src = gen_extend(DREG(insn, 0), opsize, 1);
3216 
3217     gen_addx(s, src, dest, opsize);
3218 
3219     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3220 }
3221 
3222 DISAS_INSN(addx_mem)
3223 {
3224     TCGv src;
3225     TCGv addr_src;
3226     TCGv dest;
3227     TCGv addr_dest;
3228     int opsize;
3229 
3230     opsize = insn_opsize(insn);
3231 
3232     addr_src = AREG(insn, 0);
3233     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3234     src = gen_load(s, opsize, addr_src, 1);
3235 
3236     addr_dest = AREG(insn, 9);
3237     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3238     dest = gen_load(s, opsize, addr_dest, 1);
3239 
3240     gen_addx(s, src, dest, opsize);
3241 
3242     gen_store(s, opsize, addr_dest, QREG_CC_N);
3243 }
3244 
3245 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3246 {
3247     int count = (insn >> 9) & 7;
3248     int logical = insn & 8;
3249     int left = insn & 0x100;
3250     int bits = opsize_bytes(opsize) * 8;
3251     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3252 
3253     if (count == 0) {
3254         count = 8;
3255     }
3256 
3257     tcg_gen_movi_i32(QREG_CC_V, 0);
3258     if (left) {
3259         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3260         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3261 
3262         /* Note that ColdFire always clears V (done above),
3263            while M68000 sets if the most significant bit is changed at
3264            any time during the shift operation */
3265         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3266             /* if shift count >= bits, V is (reg != 0) */
3267             if (count >= bits) {
3268                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3269             } else {
3270                 TCGv t0 = tcg_temp_new();
3271                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3272                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3273                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3274                 tcg_temp_free(t0);
3275             }
3276             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3277         }
3278     } else {
3279         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3280         if (logical) {
3281             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3282         } else {
3283             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3284         }
3285     }
3286 
3287     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3288     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3289     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3290     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3291 
3292     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3293     set_cc_op(s, CC_OP_FLAGS);
3294 }
3295 
3296 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3297 {
3298     int logical = insn & 8;
3299     int left = insn & 0x100;
3300     int bits = opsize_bytes(opsize) * 8;
3301     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3302     TCGv s32;
3303     TCGv_i64 t64, s64;
3304 
3305     t64 = tcg_temp_new_i64();
3306     s64 = tcg_temp_new_i64();
3307     s32 = tcg_temp_new();
3308 
3309     /* Note that m68k truncates the shift count modulo 64, not 32.
3310        In addition, a 64-bit shift makes it easy to find "the last
3311        bit shifted out", for the carry flag.  */
3312     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3313     tcg_gen_extu_i32_i64(s64, s32);
3314     tcg_gen_extu_i32_i64(t64, reg);
3315 
3316     /* Optimistically set V=0.  Also used as a zero source below.  */
3317     tcg_gen_movi_i32(QREG_CC_V, 0);
3318     if (left) {
3319         tcg_gen_shl_i64(t64, t64, s64);
3320 
3321         if (opsize == OS_LONG) {
3322             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3323             /* Note that C=0 if shift count is 0, and we get that for free.  */
3324         } else {
3325             TCGv zero = tcg_const_i32(0);
3326             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3327             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3328             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3329                                 s32, zero, zero, QREG_CC_C);
3330             tcg_temp_free(zero);
3331         }
3332         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3333 
3334         /* X = C, but only if the shift count was non-zero.  */
3335         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3336                             QREG_CC_C, QREG_CC_X);
3337 
3338         /* M68000 sets V if the most significant bit is changed at
3339          * any time during the shift operation.  Do this via creating
3340          * an extension of the sign bit, comparing, and discarding
3341          * the bits below the sign bit.  I.e.
3342          *     int64_t s = (intN_t)reg;
3343          *     int64_t t = (int64_t)(intN_t)reg << count;
3344          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3345          */
3346         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3347             TCGv_i64 tt = tcg_const_i64(32);
3348             /* if shift is greater than 32, use 32 */
3349             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3350             tcg_temp_free_i64(tt);
3351             /* Sign extend the input to 64 bits; re-do the shift.  */
3352             tcg_gen_ext_i32_i64(t64, reg);
3353             tcg_gen_shl_i64(s64, t64, s64);
3354             /* Clear all bits that are unchanged.  */
3355             tcg_gen_xor_i64(t64, t64, s64);
3356             /* Ignore the bits below the sign bit.  */
3357             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3358             /* If any bits remain set, we have overflow.  */
3359             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3360             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3361             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3362         }
3363     } else {
3364         tcg_gen_shli_i64(t64, t64, 32);
3365         if (logical) {
3366             tcg_gen_shr_i64(t64, t64, s64);
3367         } else {
3368             tcg_gen_sar_i64(t64, t64, s64);
3369         }
3370         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3371 
3372         /* Note that C=0 if shift count is 0, and we get that for free.  */
3373         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3374 
3375         /* X = C, but only if the shift count was non-zero.  */
3376         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3377                             QREG_CC_C, QREG_CC_X);
3378     }
3379     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3380     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3381 
3382     tcg_temp_free(s32);
3383     tcg_temp_free_i64(s64);
3384     tcg_temp_free_i64(t64);
3385 
3386     /* Write back the result.  */
3387     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3388     set_cc_op(s, CC_OP_FLAGS);
3389 }
3390 
3391 DISAS_INSN(shift8_im)
3392 {
3393     shift_im(s, insn, OS_BYTE);
3394 }
3395 
3396 DISAS_INSN(shift16_im)
3397 {
3398     shift_im(s, insn, OS_WORD);
3399 }
3400 
3401 DISAS_INSN(shift_im)
3402 {
3403     shift_im(s, insn, OS_LONG);
3404 }
3405 
3406 DISAS_INSN(shift8_reg)
3407 {
3408     shift_reg(s, insn, OS_BYTE);
3409 }
3410 
3411 DISAS_INSN(shift16_reg)
3412 {
3413     shift_reg(s, insn, OS_WORD);
3414 }
3415 
3416 DISAS_INSN(shift_reg)
3417 {
3418     shift_reg(s, insn, OS_LONG);
3419 }
3420 
3421 DISAS_INSN(shift_mem)
3422 {
3423     int logical = insn & 8;
3424     int left = insn & 0x100;
3425     TCGv src;
3426     TCGv addr;
3427 
3428     SRC_EA(env, src, OS_WORD, !logical, &addr);
3429     tcg_gen_movi_i32(QREG_CC_V, 0);
3430     if (left) {
3431         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3432         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3433 
3434         /* Note that ColdFire always clears V,
3435            while M68000 sets if the most significant bit is changed at
3436            any time during the shift operation */
3437         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3438             src = gen_extend(src, OS_WORD, 1);
3439             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3440         }
3441     } else {
3442         tcg_gen_mov_i32(QREG_CC_C, src);
3443         if (logical) {
3444             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3445         } else {
3446             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3447         }
3448     }
3449 
3450     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3451     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3452     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3453     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3454 
3455     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3456     set_cc_op(s, CC_OP_FLAGS);
3457 }
3458 
3459 static void rotate(TCGv reg, TCGv shift, int left, int size)
3460 {
3461     switch (size) {
3462     case 8:
3463         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3464         tcg_gen_ext8u_i32(reg, reg);
3465         tcg_gen_muli_i32(reg, reg, 0x01010101);
3466         goto do_long;
3467     case 16:
3468         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3469         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3470         goto do_long;
3471     do_long:
3472     default:
3473         if (left) {
3474             tcg_gen_rotl_i32(reg, reg, shift);
3475         } else {
3476             tcg_gen_rotr_i32(reg, reg, shift);
3477         }
3478     }
3479 
3480     /* compute flags */
3481 
3482     switch (size) {
3483     case 8:
3484         tcg_gen_ext8s_i32(reg, reg);
3485         break;
3486     case 16:
3487         tcg_gen_ext16s_i32(reg, reg);
3488         break;
3489     default:
3490         break;
3491     }
3492 
3493     /* QREG_CC_X is not affected */
3494 
3495     tcg_gen_mov_i32(QREG_CC_N, reg);
3496     tcg_gen_mov_i32(QREG_CC_Z, reg);
3497 
3498     if (left) {
3499         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3500     } else {
3501         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3502     }
3503 
3504     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3505 }
3506 
3507 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3508 {
3509     switch (size) {
3510     case 8:
3511         tcg_gen_ext8s_i32(reg, reg);
3512         break;
3513     case 16:
3514         tcg_gen_ext16s_i32(reg, reg);
3515         break;
3516     default:
3517         break;
3518     }
3519     tcg_gen_mov_i32(QREG_CC_N, reg);
3520     tcg_gen_mov_i32(QREG_CC_Z, reg);
3521     tcg_gen_mov_i32(QREG_CC_X, X);
3522     tcg_gen_mov_i32(QREG_CC_C, X);
3523     tcg_gen_movi_i32(QREG_CC_V, 0);
3524 }
3525 
3526 /* Result of rotate_x() is valid if 0 <= shift <= size */
3527 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3528 {
3529     TCGv X, shl, shr, shx, sz, zero;
3530 
3531     sz = tcg_const_i32(size);
3532 
3533     shr = tcg_temp_new();
3534     shl = tcg_temp_new();
3535     shx = tcg_temp_new();
3536     if (left) {
3537         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3538         tcg_gen_movi_i32(shr, size + 1);
3539         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3540         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3541         /* shx = shx < 0 ? size : shx; */
3542         zero = tcg_const_i32(0);
3543         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3544         tcg_temp_free(zero);
3545     } else {
3546         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3547         tcg_gen_movi_i32(shl, size + 1);
3548         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3549         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3550     }
3551 
3552     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3553 
3554     tcg_gen_shl_i32(shl, reg, shl);
3555     tcg_gen_shr_i32(shr, reg, shr);
3556     tcg_gen_or_i32(reg, shl, shr);
3557     tcg_temp_free(shl);
3558     tcg_temp_free(shr);
3559     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3560     tcg_gen_or_i32(reg, reg, shx);
3561     tcg_temp_free(shx);
3562 
3563     /* X = (reg >> size) & 1 */
3564 
3565     X = tcg_temp_new();
3566     tcg_gen_shr_i32(X, reg, sz);
3567     tcg_gen_andi_i32(X, X, 1);
3568     tcg_temp_free(sz);
3569 
3570     return X;
3571 }
3572 
3573 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3574 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3575 {
3576     TCGv_i64 t0, shift64;
3577     TCGv X, lo, hi, zero;
3578 
3579     shift64 = tcg_temp_new_i64();
3580     tcg_gen_extu_i32_i64(shift64, shift);
3581 
3582     t0 = tcg_temp_new_i64();
3583 
3584     X = tcg_temp_new();
3585     lo = tcg_temp_new();
3586     hi = tcg_temp_new();
3587 
3588     if (left) {
3589         /* create [reg:X:..] */
3590 
3591         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3592         tcg_gen_concat_i32_i64(t0, lo, reg);
3593 
3594         /* rotate */
3595 
3596         tcg_gen_rotl_i64(t0, t0, shift64);
3597         tcg_temp_free_i64(shift64);
3598 
3599         /* result is [reg:..:reg:X] */
3600 
3601         tcg_gen_extr_i64_i32(lo, hi, t0);
3602         tcg_gen_andi_i32(X, lo, 1);
3603 
3604         tcg_gen_shri_i32(lo, lo, 1);
3605     } else {
3606         /* create [..:X:reg] */
3607 
3608         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3609 
3610         tcg_gen_rotr_i64(t0, t0, shift64);
3611         tcg_temp_free_i64(shift64);
3612 
3613         /* result is value: [X:reg:..:reg] */
3614 
3615         tcg_gen_extr_i64_i32(lo, hi, t0);
3616 
3617         /* extract X */
3618 
3619         tcg_gen_shri_i32(X, hi, 31);
3620 
3621         /* extract result */
3622 
3623         tcg_gen_shli_i32(hi, hi, 1);
3624     }
3625     tcg_temp_free_i64(t0);
3626     tcg_gen_or_i32(lo, lo, hi);
3627     tcg_temp_free(hi);
3628 
3629     /* if shift == 0, register and X are not affected */
3630 
3631     zero = tcg_const_i32(0);
3632     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3633     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3634     tcg_temp_free(zero);
3635     tcg_temp_free(lo);
3636 
3637     return X;
3638 }
3639 
3640 DISAS_INSN(rotate_im)
3641 {
3642     TCGv shift;
3643     int tmp;
3644     int left = (insn & 0x100);
3645 
3646     tmp = (insn >> 9) & 7;
3647     if (tmp == 0) {
3648         tmp = 8;
3649     }
3650 
3651     shift = tcg_const_i32(tmp);
3652     if (insn & 8) {
3653         rotate(DREG(insn, 0), shift, left, 32);
3654     } else {
3655         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3656         rotate_x_flags(DREG(insn, 0), X, 32);
3657         tcg_temp_free(X);
3658     }
3659     tcg_temp_free(shift);
3660 
3661     set_cc_op(s, CC_OP_FLAGS);
3662 }
3663 
3664 DISAS_INSN(rotate8_im)
3665 {
3666     int left = (insn & 0x100);
3667     TCGv reg;
3668     TCGv shift;
3669     int tmp;
3670 
3671     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3672 
3673     tmp = (insn >> 9) & 7;
3674     if (tmp == 0) {
3675         tmp = 8;
3676     }
3677 
3678     shift = tcg_const_i32(tmp);
3679     if (insn & 8) {
3680         rotate(reg, shift, left, 8);
3681     } else {
3682         TCGv X = rotate_x(reg, shift, left, 8);
3683         rotate_x_flags(reg, X, 8);
3684         tcg_temp_free(X);
3685     }
3686     tcg_temp_free(shift);
3687     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3688     set_cc_op(s, CC_OP_FLAGS);
3689 }
3690 
3691 DISAS_INSN(rotate16_im)
3692 {
3693     int left = (insn & 0x100);
3694     TCGv reg;
3695     TCGv shift;
3696     int tmp;
3697 
3698     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3699     tmp = (insn >> 9) & 7;
3700     if (tmp == 0) {
3701         tmp = 8;
3702     }
3703 
3704     shift = tcg_const_i32(tmp);
3705     if (insn & 8) {
3706         rotate(reg, shift, left, 16);
3707     } else {
3708         TCGv X = rotate_x(reg, shift, left, 16);
3709         rotate_x_flags(reg, X, 16);
3710         tcg_temp_free(X);
3711     }
3712     tcg_temp_free(shift);
3713     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3714     set_cc_op(s, CC_OP_FLAGS);
3715 }
3716 
3717 DISAS_INSN(rotate_reg)
3718 {
3719     TCGv reg;
3720     TCGv src;
3721     TCGv t0, t1;
3722     int left = (insn & 0x100);
3723 
3724     reg = DREG(insn, 0);
3725     src = DREG(insn, 9);
3726     /* shift in [0..63] */
3727     t0 = tcg_temp_new();
3728     tcg_gen_andi_i32(t0, src, 63);
3729     t1 = tcg_temp_new_i32();
3730     if (insn & 8) {
3731         tcg_gen_andi_i32(t1, src, 31);
3732         rotate(reg, t1, left, 32);
3733         /* if shift == 0, clear C */
3734         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3735                             t0, QREG_CC_V /* 0 */,
3736                             QREG_CC_V /* 0 */, QREG_CC_C);
3737     } else {
3738         TCGv X;
3739         /* modulo 33 */
3740         tcg_gen_movi_i32(t1, 33);
3741         tcg_gen_remu_i32(t1, t0, t1);
3742         X = rotate32_x(DREG(insn, 0), t1, left);
3743         rotate_x_flags(DREG(insn, 0), X, 32);
3744         tcg_temp_free(X);
3745     }
3746     tcg_temp_free(t1);
3747     tcg_temp_free(t0);
3748     set_cc_op(s, CC_OP_FLAGS);
3749 }
3750 
3751 DISAS_INSN(rotate8_reg)
3752 {
3753     TCGv reg;
3754     TCGv src;
3755     TCGv t0, t1;
3756     int left = (insn & 0x100);
3757 
3758     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3759     src = DREG(insn, 9);
3760     /* shift in [0..63] */
3761     t0 = tcg_temp_new_i32();
3762     tcg_gen_andi_i32(t0, src, 63);
3763     t1 = tcg_temp_new_i32();
3764     if (insn & 8) {
3765         tcg_gen_andi_i32(t1, src, 7);
3766         rotate(reg, t1, left, 8);
3767         /* if shift == 0, clear C */
3768         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3769                             t0, QREG_CC_V /* 0 */,
3770                             QREG_CC_V /* 0 */, QREG_CC_C);
3771     } else {
3772         TCGv X;
3773         /* modulo 9 */
3774         tcg_gen_movi_i32(t1, 9);
3775         tcg_gen_remu_i32(t1, t0, t1);
3776         X = rotate_x(reg, t1, left, 8);
3777         rotate_x_flags(reg, X, 8);
3778         tcg_temp_free(X);
3779     }
3780     tcg_temp_free(t1);
3781     tcg_temp_free(t0);
3782     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3783     set_cc_op(s, CC_OP_FLAGS);
3784 }
3785 
3786 DISAS_INSN(rotate16_reg)
3787 {
3788     TCGv reg;
3789     TCGv src;
3790     TCGv t0, t1;
3791     int left = (insn & 0x100);
3792 
3793     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3794     src = DREG(insn, 9);
3795     /* shift in [0..63] */
3796     t0 = tcg_temp_new_i32();
3797     tcg_gen_andi_i32(t0, src, 63);
3798     t1 = tcg_temp_new_i32();
3799     if (insn & 8) {
3800         tcg_gen_andi_i32(t1, src, 15);
3801         rotate(reg, t1, left, 16);
3802         /* if shift == 0, clear C */
3803         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3804                             t0, QREG_CC_V /* 0 */,
3805                             QREG_CC_V /* 0 */, QREG_CC_C);
3806     } else {
3807         TCGv X;
3808         /* modulo 17 */
3809         tcg_gen_movi_i32(t1, 17);
3810         tcg_gen_remu_i32(t1, t0, t1);
3811         X = rotate_x(reg, t1, left, 16);
3812         rotate_x_flags(reg, X, 16);
3813         tcg_temp_free(X);
3814     }
3815     tcg_temp_free(t1);
3816     tcg_temp_free(t0);
3817     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3818     set_cc_op(s, CC_OP_FLAGS);
3819 }
3820 
3821 DISAS_INSN(rotate_mem)
3822 {
3823     TCGv src;
3824     TCGv addr;
3825     TCGv shift;
3826     int left = (insn & 0x100);
3827 
3828     SRC_EA(env, src, OS_WORD, 0, &addr);
3829 
3830     shift = tcg_const_i32(1);
3831     if (insn & 0x0200) {
3832         rotate(src, shift, left, 16);
3833     } else {
3834         TCGv X = rotate_x(src, shift, left, 16);
3835         rotate_x_flags(src, X, 16);
3836         tcg_temp_free(X);
3837     }
3838     tcg_temp_free(shift);
3839     DEST_EA(env, insn, OS_WORD, src, &addr);
3840     set_cc_op(s, CC_OP_FLAGS);
3841 }
3842 
3843 DISAS_INSN(bfext_reg)
3844 {
3845     int ext = read_im16(env, s);
3846     int is_sign = insn & 0x200;
3847     TCGv src = DREG(insn, 0);
3848     TCGv dst = DREG(ext, 12);
3849     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3850     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3851     int pos = 32 - ofs - len;        /* little bit-endian */
3852     TCGv tmp = tcg_temp_new();
3853     TCGv shift;
3854 
3855     /* In general, we're going to rotate the field so that it's at the
3856        top of the word and then right-shift by the compliment of the
3857        width to extend the field.  */
3858     if (ext & 0x20) {
3859         /* Variable width.  */
3860         if (ext & 0x800) {
3861             /* Variable offset.  */
3862             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3863             tcg_gen_rotl_i32(tmp, src, tmp);
3864         } else {
3865             tcg_gen_rotli_i32(tmp, src, ofs);
3866         }
3867 
3868         shift = tcg_temp_new();
3869         tcg_gen_neg_i32(shift, DREG(ext, 0));
3870         tcg_gen_andi_i32(shift, shift, 31);
3871         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3872         if (is_sign) {
3873             tcg_gen_mov_i32(dst, QREG_CC_N);
3874         } else {
3875             tcg_gen_shr_i32(dst, tmp, shift);
3876         }
3877         tcg_temp_free(shift);
3878     } else {
3879         /* Immediate width.  */
3880         if (ext & 0x800) {
3881             /* Variable offset */
3882             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3883             tcg_gen_rotl_i32(tmp, src, tmp);
3884             src = tmp;
3885             pos = 32 - len;
3886         } else {
3887             /* Immediate offset.  If the field doesn't wrap around the
3888                end of the word, rely on (s)extract completely.  */
3889             if (pos < 0) {
3890                 tcg_gen_rotli_i32(tmp, src, ofs);
3891                 src = tmp;
3892                 pos = 32 - len;
3893             }
3894         }
3895 
3896         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3897         if (is_sign) {
3898             tcg_gen_mov_i32(dst, QREG_CC_N);
3899         } else {
3900             tcg_gen_extract_i32(dst, src, pos, len);
3901         }
3902     }
3903 
3904     tcg_temp_free(tmp);
3905     set_cc_op(s, CC_OP_LOGIC);
3906 }
3907 
3908 DISAS_INSN(bfext_mem)
3909 {
3910     int ext = read_im16(env, s);
3911     int is_sign = insn & 0x200;
3912     TCGv dest = DREG(ext, 12);
3913     TCGv addr, len, ofs;
3914 
3915     addr = gen_lea(env, s, insn, OS_UNSIZED);
3916     if (IS_NULL_QREG(addr)) {
3917         gen_addr_fault(s);
3918         return;
3919     }
3920 
3921     if (ext & 0x20) {
3922         len = DREG(ext, 0);
3923     } else {
3924         len = tcg_const_i32(extract32(ext, 0, 5));
3925     }
3926     if (ext & 0x800) {
3927         ofs = DREG(ext, 6);
3928     } else {
3929         ofs = tcg_const_i32(extract32(ext, 6, 5));
3930     }
3931 
3932     if (is_sign) {
3933         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3934         tcg_gen_mov_i32(QREG_CC_N, dest);
3935     } else {
3936         TCGv_i64 tmp = tcg_temp_new_i64();
3937         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3938         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3939         tcg_temp_free_i64(tmp);
3940     }
3941     set_cc_op(s, CC_OP_LOGIC);
3942 
3943     if (!(ext & 0x20)) {
3944         tcg_temp_free(len);
3945     }
3946     if (!(ext & 0x800)) {
3947         tcg_temp_free(ofs);
3948     }
3949 }
3950 
3951 DISAS_INSN(bfop_reg)
3952 {
3953     int ext = read_im16(env, s);
3954     TCGv src = DREG(insn, 0);
3955     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3956     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3957     TCGv mask, tofs, tlen;
3958 
3959     TCGV_UNUSED(tofs);
3960     TCGV_UNUSED(tlen);
3961     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
3962         tofs = tcg_temp_new();
3963         tlen = tcg_temp_new();
3964     }
3965 
3966     if ((ext & 0x820) == 0) {
3967         /* Immediate width and offset.  */
3968         uint32_t maski = 0x7fffffffu >> (len - 1);
3969         if (ofs + len <= 32) {
3970             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3971         } else {
3972             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3973         }
3974         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
3975         mask = tcg_const_i32(ror32(maski, ofs));
3976         if (!TCGV_IS_UNUSED(tofs)) {
3977             tcg_gen_movi_i32(tofs, ofs);
3978             tcg_gen_movi_i32(tlen, len);
3979         }
3980     } else {
3981         TCGv tmp = tcg_temp_new();
3982         if (ext & 0x20) {
3983             /* Variable width */
3984             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
3985             tcg_gen_andi_i32(tmp, tmp, 31);
3986             mask = tcg_const_i32(0x7fffffffu);
3987             tcg_gen_shr_i32(mask, mask, tmp);
3988             if (!TCGV_IS_UNUSED(tlen)) {
3989                 tcg_gen_addi_i32(tlen, tmp, 1);
3990             }
3991         } else {
3992             /* Immediate width */
3993             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
3994             if (!TCGV_IS_UNUSED(tlen)) {
3995                 tcg_gen_movi_i32(tlen, len);
3996             }
3997         }
3998         if (ext & 0x800) {
3999             /* Variable offset */
4000             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4001             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4002             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4003             tcg_gen_rotr_i32(mask, mask, tmp);
4004             if (!TCGV_IS_UNUSED(tofs)) {
4005                 tcg_gen_mov_i32(tofs, tmp);
4006             }
4007         } else {
4008             /* Immediate offset (and variable width) */
4009             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4010             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4011             tcg_gen_rotri_i32(mask, mask, ofs);
4012             if (!TCGV_IS_UNUSED(tofs)) {
4013                 tcg_gen_movi_i32(tofs, ofs);
4014             }
4015         }
4016         tcg_temp_free(tmp);
4017     }
4018     set_cc_op(s, CC_OP_LOGIC);
4019 
4020     switch (insn & 0x0f00) {
4021     case 0x0a00: /* bfchg */
4022         tcg_gen_eqv_i32(src, src, mask);
4023         break;
4024     case 0x0c00: /* bfclr */
4025         tcg_gen_and_i32(src, src, mask);
4026         break;
4027     case 0x0d00: /* bfffo */
4028         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4029         tcg_temp_free(tlen);
4030         tcg_temp_free(tofs);
4031         break;
4032     case 0x0e00: /* bfset */
4033         tcg_gen_orc_i32(src, src, mask);
4034         break;
4035     case 0x0800: /* bftst */
4036         /* flags already set; no other work to do.  */
4037         break;
4038     default:
4039         g_assert_not_reached();
4040     }
4041     tcg_temp_free(mask);
4042 }
4043 
4044 DISAS_INSN(bfop_mem)
4045 {
4046     int ext = read_im16(env, s);
4047     TCGv addr, len, ofs;
4048     TCGv_i64 t64;
4049 
4050     addr = gen_lea(env, s, insn, OS_UNSIZED);
4051     if (IS_NULL_QREG(addr)) {
4052         gen_addr_fault(s);
4053         return;
4054     }
4055 
4056     if (ext & 0x20) {
4057         len = DREG(ext, 0);
4058     } else {
4059         len = tcg_const_i32(extract32(ext, 0, 5));
4060     }
4061     if (ext & 0x800) {
4062         ofs = DREG(ext, 6);
4063     } else {
4064         ofs = tcg_const_i32(extract32(ext, 6, 5));
4065     }
4066 
4067     switch (insn & 0x0f00) {
4068     case 0x0a00: /* bfchg */
4069         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4070         break;
4071     case 0x0c00: /* bfclr */
4072         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4073         break;
4074     case 0x0d00: /* bfffo */
4075         t64 = tcg_temp_new_i64();
4076         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4077         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4078         tcg_temp_free_i64(t64);
4079         break;
4080     case 0x0e00: /* bfset */
4081         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4082         break;
4083     case 0x0800: /* bftst */
4084         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4085         break;
4086     default:
4087         g_assert_not_reached();
4088     }
4089     set_cc_op(s, CC_OP_LOGIC);
4090 
4091     if (!(ext & 0x20)) {
4092         tcg_temp_free(len);
4093     }
4094     if (!(ext & 0x800)) {
4095         tcg_temp_free(ofs);
4096     }
4097 }
4098 
4099 DISAS_INSN(bfins_reg)
4100 {
4101     int ext = read_im16(env, s);
4102     TCGv dst = DREG(insn, 0);
4103     TCGv src = DREG(ext, 12);
4104     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4105     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4106     int pos = 32 - ofs - len;        /* little bit-endian */
4107     TCGv tmp;
4108 
4109     tmp = tcg_temp_new();
4110 
4111     if (ext & 0x20) {
4112         /* Variable width */
4113         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4114         tcg_gen_andi_i32(tmp, tmp, 31);
4115         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4116     } else {
4117         /* Immediate width */
4118         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4119     }
4120     set_cc_op(s, CC_OP_LOGIC);
4121 
4122     /* Immediate width and offset */
4123     if ((ext & 0x820) == 0) {
4124         /* Check for suitability for deposit.  */
4125         if (pos >= 0) {
4126             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4127         } else {
4128             uint32_t maski = -2U << (len - 1);
4129             uint32_t roti = (ofs + len) & 31;
4130             tcg_gen_andi_i32(tmp, src, ~maski);
4131             tcg_gen_rotri_i32(tmp, tmp, roti);
4132             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4133             tcg_gen_or_i32(dst, dst, tmp);
4134         }
4135     } else {
4136         TCGv mask = tcg_temp_new();
4137         TCGv rot = tcg_temp_new();
4138 
4139         if (ext & 0x20) {
4140             /* Variable width */
4141             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4142             tcg_gen_andi_i32(rot, rot, 31);
4143             tcg_gen_movi_i32(mask, -2);
4144             tcg_gen_shl_i32(mask, mask, rot);
4145             tcg_gen_mov_i32(rot, DREG(ext, 0));
4146             tcg_gen_andc_i32(tmp, src, mask);
4147         } else {
4148             /* Immediate width (variable offset) */
4149             uint32_t maski = -2U << (len - 1);
4150             tcg_gen_andi_i32(tmp, src, ~maski);
4151             tcg_gen_movi_i32(mask, maski);
4152             tcg_gen_movi_i32(rot, len & 31);
4153         }
4154         if (ext & 0x800) {
4155             /* Variable offset */
4156             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4157         } else {
4158             /* Immediate offset (variable width) */
4159             tcg_gen_addi_i32(rot, rot, ofs);
4160         }
4161         tcg_gen_andi_i32(rot, rot, 31);
4162         tcg_gen_rotr_i32(mask, mask, rot);
4163         tcg_gen_rotr_i32(tmp, tmp, rot);
4164         tcg_gen_and_i32(dst, dst, mask);
4165         tcg_gen_or_i32(dst, dst, tmp);
4166 
4167         tcg_temp_free(rot);
4168         tcg_temp_free(mask);
4169     }
4170     tcg_temp_free(tmp);
4171 }
4172 
4173 DISAS_INSN(bfins_mem)
4174 {
4175     int ext = read_im16(env, s);
4176     TCGv src = DREG(ext, 12);
4177     TCGv addr, len, ofs;
4178 
4179     addr = gen_lea(env, s, insn, OS_UNSIZED);
4180     if (IS_NULL_QREG(addr)) {
4181         gen_addr_fault(s);
4182         return;
4183     }
4184 
4185     if (ext & 0x20) {
4186         len = DREG(ext, 0);
4187     } else {
4188         len = tcg_const_i32(extract32(ext, 0, 5));
4189     }
4190     if (ext & 0x800) {
4191         ofs = DREG(ext, 6);
4192     } else {
4193         ofs = tcg_const_i32(extract32(ext, 6, 5));
4194     }
4195 
4196     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4197     set_cc_op(s, CC_OP_LOGIC);
4198 
4199     if (!(ext & 0x20)) {
4200         tcg_temp_free(len);
4201     }
4202     if (!(ext & 0x800)) {
4203         tcg_temp_free(ofs);
4204     }
4205 }
4206 
4207 DISAS_INSN(ff1)
4208 {
4209     TCGv reg;
4210     reg = DREG(insn, 0);
4211     gen_logic_cc(s, reg, OS_LONG);
4212     gen_helper_ff1(reg, reg);
4213 }
4214 
4215 static TCGv gen_get_sr(DisasContext *s)
4216 {
4217     TCGv ccr;
4218     TCGv sr;
4219 
4220     ccr = gen_get_ccr(s);
4221     sr = tcg_temp_new();
4222     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
4223     tcg_gen_or_i32(sr, sr, ccr);
4224     return sr;
4225 }
4226 
4227 DISAS_INSN(strldsr)
4228 {
4229     uint16_t ext;
4230     uint32_t addr;
4231 
4232     addr = s->pc - 2;
4233     ext = read_im16(env, s);
4234     if (ext != 0x46FC) {
4235         gen_exception(s, addr, EXCP_UNSUPPORTED);
4236         return;
4237     }
4238     ext = read_im16(env, s);
4239     if (IS_USER(s) || (ext & SR_S) == 0) {
4240         gen_exception(s, addr, EXCP_PRIVILEGE);
4241         return;
4242     }
4243     gen_push(s, gen_get_sr(s));
4244     gen_set_sr_im(s, ext, 0);
4245 }
4246 
4247 DISAS_INSN(move_from_sr)
4248 {
4249     TCGv sr;
4250 
4251     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4252         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4253         return;
4254     }
4255     sr = gen_get_sr(s);
4256     DEST_EA(env, insn, OS_WORD, sr, NULL);
4257 }
4258 
4259 DISAS_INSN(move_to_sr)
4260 {
4261     if (IS_USER(s)) {
4262         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4263         return;
4264     }
4265     gen_set_sr(env, s, insn, 0);
4266     gen_lookup_tb(s);
4267 }
4268 
4269 DISAS_INSN(move_from_usp)
4270 {
4271     if (IS_USER(s)) {
4272         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4273         return;
4274     }
4275     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4276                    offsetof(CPUM68KState, sp[M68K_USP]));
4277 }
4278 
4279 DISAS_INSN(move_to_usp)
4280 {
4281     if (IS_USER(s)) {
4282         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4283         return;
4284     }
4285     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4286                    offsetof(CPUM68KState, sp[M68K_USP]));
4287 }
4288 
4289 DISAS_INSN(halt)
4290 {
4291     gen_exception(s, s->pc, EXCP_HALT_INSN);
4292 }
4293 
4294 DISAS_INSN(stop)
4295 {
4296     uint16_t ext;
4297 
4298     if (IS_USER(s)) {
4299         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4300         return;
4301     }
4302 
4303     ext = read_im16(env, s);
4304 
4305     gen_set_sr_im(s, ext, 0);
4306     tcg_gen_movi_i32(cpu_halted, 1);
4307     gen_exception(s, s->pc, EXCP_HLT);
4308 }
4309 
4310 DISAS_INSN(rte)
4311 {
4312     if (IS_USER(s)) {
4313         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4314         return;
4315     }
4316     gen_exception(s, s->pc - 2, EXCP_RTE);
4317 }
4318 
4319 DISAS_INSN(movec)
4320 {
4321     uint16_t ext;
4322     TCGv reg;
4323 
4324     if (IS_USER(s)) {
4325         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4326         return;
4327     }
4328 
4329     ext = read_im16(env, s);
4330 
4331     if (ext & 0x8000) {
4332         reg = AREG(ext, 12);
4333     } else {
4334         reg = DREG(ext, 12);
4335     }
4336     gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4337     gen_lookup_tb(s);
4338 }
4339 
4340 DISAS_INSN(intouch)
4341 {
4342     if (IS_USER(s)) {
4343         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4344         return;
4345     }
4346     /* ICache fetch.  Implement as no-op.  */
4347 }
4348 
4349 DISAS_INSN(cpushl)
4350 {
4351     if (IS_USER(s)) {
4352         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4353         return;
4354     }
4355     /* Cache push/invalidate.  Implement as no-op.  */
4356 }
4357 
4358 DISAS_INSN(wddata)
4359 {
4360     gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4361 }
4362 
4363 DISAS_INSN(wdebug)
4364 {
4365     M68kCPU *cpu = m68k_env_get_cpu(env);
4366 
4367     if (IS_USER(s)) {
4368         gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4369         return;
4370     }
4371     /* TODO: Implement wdebug.  */
4372     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4373 }
4374 
4375 DISAS_INSN(trap)
4376 {
4377     gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
4378 }
4379 
4380 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4381 {
4382     switch (reg) {
4383     case M68K_FPIAR:
4384         tcg_gen_movi_i32(res, 0);
4385         break;
4386     case M68K_FPSR:
4387         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4388         break;
4389     case M68K_FPCR:
4390         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4391         break;
4392     }
4393 }
4394 
4395 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4396 {
4397     switch (reg) {
4398     case M68K_FPIAR:
4399         break;
4400     case M68K_FPSR:
4401         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4402         break;
4403     case M68K_FPCR:
4404         gen_helper_set_fpcr(cpu_env, val);
4405         break;
4406     }
4407 }
4408 
4409 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4410 {
4411     int index = IS_USER(s);
4412     TCGv tmp;
4413 
4414     tmp = tcg_temp_new();
4415     gen_load_fcr(s, tmp, reg);
4416     tcg_gen_qemu_st32(tmp, addr, index);
4417     tcg_temp_free(tmp);
4418 }
4419 
4420 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4421 {
4422     int index = IS_USER(s);
4423     TCGv tmp;
4424 
4425     tmp = tcg_temp_new();
4426     tcg_gen_qemu_ld32u(tmp, addr, index);
4427     gen_store_fcr(s, tmp, reg);
4428     tcg_temp_free(tmp);
4429 }
4430 
4431 
4432 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4433                              uint32_t insn, uint32_t ext)
4434 {
4435     int mask = (ext >> 10) & 7;
4436     int is_write = (ext >> 13) & 1;
4437     int mode = extract32(insn, 3, 3);
4438     int i;
4439     TCGv addr, tmp;
4440 
4441     switch (mode) {
4442     case 0: /* Dn */
4443         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4444             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4445             return;
4446         }
4447         if (is_write) {
4448             gen_load_fcr(s, DREG(insn, 0), mask);
4449         } else {
4450             gen_store_fcr(s, DREG(insn, 0), mask);
4451         }
4452         return;
4453     case 1: /* An, only with FPIAR */
4454         if (mask != M68K_FPIAR) {
4455             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4456             return;
4457         }
4458         if (is_write) {
4459             gen_load_fcr(s, AREG(insn, 0), mask);
4460         } else {
4461             gen_store_fcr(s, AREG(insn, 0), mask);
4462         }
4463         return;
4464     default:
4465         break;
4466     }
4467 
4468     tmp = gen_lea(env, s, insn, OS_LONG);
4469     if (IS_NULL_QREG(tmp)) {
4470         gen_addr_fault(s);
4471         return;
4472     }
4473 
4474     addr = tcg_temp_new();
4475     tcg_gen_mov_i32(addr, tmp);
4476 
4477     /* mask:
4478      *
4479      * 0b100 Floating-Point Control Register
4480      * 0b010 Floating-Point Status Register
4481      * 0b001 Floating-Point Instruction Address Register
4482      *
4483      */
4484 
4485     if (is_write && mode == 4) {
4486         for (i = 2; i >= 0; i--, mask >>= 1) {
4487             if (mask & 1) {
4488                 gen_qemu_store_fcr(s, addr, 1 << i);
4489                 if (mask != 1) {
4490                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4491                 }
4492             }
4493        }
4494        tcg_gen_mov_i32(AREG(insn, 0), addr);
4495     } else {
4496         for (i = 0; i < 3; i++, mask >>= 1) {
4497             if (mask & 1) {
4498                 if (is_write) {
4499                     gen_qemu_store_fcr(s, addr, 1 << i);
4500                 } else {
4501                     gen_qemu_load_fcr(s, addr, 1 << i);
4502                 }
4503                 if (mask != 1 || mode == 3) {
4504                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4505                 }
4506             }
4507         }
4508         if (mode == 3) {
4509             tcg_gen_mov_i32(AREG(insn, 0), addr);
4510         }
4511     }
4512     tcg_temp_free_i32(addr);
4513 }
4514 
4515 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4516                           uint32_t insn, uint32_t ext)
4517 {
4518     int opsize;
4519     TCGv addr, tmp;
4520     int mode = (ext >> 11) & 0x3;
4521     int is_load = ((ext & 0x2000) == 0);
4522 
4523     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4524         opsize = OS_EXTENDED;
4525     } else {
4526         opsize = OS_DOUBLE;  /* FIXME */
4527     }
4528 
4529     addr = gen_lea(env, s, insn, opsize);
4530     if (IS_NULL_QREG(addr)) {
4531         gen_addr_fault(s);
4532         return;
4533     }
4534 
4535     tmp = tcg_temp_new();
4536     if (mode & 0x1) {
4537         /* Dynamic register list */
4538         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4539     } else {
4540         /* Static register list */
4541         tcg_gen_movi_i32(tmp, ext & 0xff);
4542     }
4543 
4544     if (!is_load && (mode & 2) == 0) {
4545         /* predecrement addressing mode
4546          * only available to store register to memory
4547          */
4548         if (opsize == OS_EXTENDED) {
4549             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4550         } else {
4551             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4552         }
4553     } else {
4554         /* postincrement addressing mode */
4555         if (opsize == OS_EXTENDED) {
4556             if (is_load) {
4557                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4558             } else {
4559                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4560             }
4561         } else {
4562             if (is_load) {
4563                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4564             } else {
4565                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4566             }
4567         }
4568     }
4569     if ((insn & 070) == 030 || (insn & 070) == 040) {
4570         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4571     }
4572     tcg_temp_free(tmp);
4573 }
4574 
4575 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4576    immediately before the next FP instruction is executed.  */
4577 DISAS_INSN(fpu)
4578 {
4579     uint16_t ext;
4580     int opmode;
4581     int opsize;
4582     TCGv_ptr cpu_src, cpu_dest;
4583 
4584     ext = read_im16(env, s);
4585     opmode = ext & 0x7f;
4586     switch ((ext >> 13) & 7) {
4587     case 0:
4588         break;
4589     case 1:
4590         goto undef;
4591     case 2:
4592         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4593             /* fmovecr */
4594             TCGv rom_offset = tcg_const_i32(opmode);
4595             cpu_dest = gen_fp_ptr(REG(ext, 7));
4596             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4597             tcg_temp_free_ptr(cpu_dest);
4598             tcg_temp_free(rom_offset);
4599             return;
4600         }
4601         break;
4602     case 3: /* fmove out */
4603         cpu_src = gen_fp_ptr(REG(ext, 7));
4604         opsize = ext_opsize(ext, 10);
4605         if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_STORE) == -1) {
4606             gen_addr_fault(s);
4607         }
4608         gen_helper_ftst(cpu_env, cpu_src);
4609         tcg_temp_free_ptr(cpu_src);
4610         return;
4611     case 4: /* fmove to control register.  */
4612     case 5: /* fmove from control register.  */
4613         gen_op_fmove_fcr(env, s, insn, ext);
4614         return;
4615     case 6: /* fmovem */
4616     case 7:
4617         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4618             goto undef;
4619         }
4620         gen_op_fmovem(env, s, insn, ext);
4621         return;
4622     }
4623     if (ext & (1 << 14)) {
4624         /* Source effective address.  */
4625         opsize = ext_opsize(ext, 10);
4626         cpu_src = gen_fp_result_ptr();
4627         if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_LOADS) == -1) {
4628             gen_addr_fault(s);
4629             return;
4630         }
4631     } else {
4632         /* Source register.  */
4633         opsize = OS_EXTENDED;
4634         cpu_src = gen_fp_ptr(REG(ext, 10));
4635     }
4636     cpu_dest = gen_fp_ptr(REG(ext, 7));
4637     switch (opmode) {
4638     case 0: /* fmove */
4639         gen_fp_move(cpu_dest, cpu_src);
4640         break;
4641     case 0x40: /* fsmove */
4642         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4643         break;
4644     case 0x44: /* fdmove */
4645         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4646         break;
4647     case 1: /* fint */
4648         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4649         break;
4650     case 3: /* fintrz */
4651         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4652         break;
4653     case 4: /* fsqrt */
4654         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4655         break;
4656     case 0x41: /* fssqrt */
4657         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4658         break;
4659     case 0x45: /* fdsqrt */
4660         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4661         break;
4662     case 0x18: /* fabs */
4663         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
4664         break;
4665     case 0x58: /* fsabs */
4666         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
4667         break;
4668     case 0x5c: /* fdabs */
4669         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
4670         break;
4671     case 0x1a: /* fneg */
4672         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
4673         break;
4674     case 0x5a: /* fsneg */
4675         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
4676         break;
4677     case 0x5e: /* fdneg */
4678         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
4679         break;
4680     case 0x20: /* fdiv */
4681         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4682         break;
4683     case 0x60: /* fsdiv */
4684         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4685         break;
4686     case 0x64: /* fddiv */
4687         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4688         break;
4689     case 0x22: /* fadd */
4690         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4691         break;
4692     case 0x62: /* fsadd */
4693         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4694         break;
4695     case 0x66: /* fdadd */
4696         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4697         break;
4698     case 0x23: /* fmul */
4699         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4700         break;
4701     case 0x63: /* fsmul */
4702         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4703         break;
4704     case 0x67: /* fdmul */
4705         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4706         break;
4707     case 0x24: /* fsgldiv */
4708         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4709         break;
4710     case 0x27: /* fsglmul */
4711         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4712         break;
4713     case 0x28: /* fsub */
4714         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4715         break;
4716     case 0x68: /* fssub */
4717         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4718         break;
4719     case 0x6c: /* fdsub */
4720         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4721         break;
4722     case 0x38: /* fcmp */
4723         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
4724         return;
4725     case 0x3a: /* ftst */
4726         gen_helper_ftst(cpu_env, cpu_src);
4727         return;
4728     default:
4729         goto undef;
4730     }
4731     tcg_temp_free_ptr(cpu_src);
4732     gen_helper_ftst(cpu_env, cpu_dest);
4733     tcg_temp_free_ptr(cpu_dest);
4734     return;
4735 undef:
4736     /* FIXME: Is this right for offset addressing modes?  */
4737     s->pc -= 2;
4738     disas_undef_fpu(env, s, insn);
4739 }
4740 
4741 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
4742 {
4743     TCGv fpsr;
4744 
4745     c->g1 = 1;
4746     c->v2 = tcg_const_i32(0);
4747     c->g2 = 0;
4748     /* TODO: Raise BSUN exception.  */
4749     fpsr = tcg_temp_new();
4750     gen_load_fcr(s, fpsr, M68K_FPSR);
4751     switch (cond) {
4752     case 0:  /* False */
4753     case 16: /* Signaling False */
4754         c->v1 = c->v2;
4755         c->tcond = TCG_COND_NEVER;
4756         break;
4757     case 1:  /* EQual Z */
4758     case 17: /* Signaling EQual Z */
4759         c->v1 = tcg_temp_new();
4760         c->g1 = 0;
4761         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4762         c->tcond = TCG_COND_NE;
4763         break;
4764     case 2:  /* Ordered Greater Than !(A || Z || N) */
4765     case 18: /* Greater Than !(A || Z || N) */
4766         c->v1 = tcg_temp_new();
4767         c->g1 = 0;
4768         tcg_gen_andi_i32(c->v1, fpsr,
4769                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4770         c->tcond = TCG_COND_EQ;
4771         break;
4772     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
4773     case 19: /* Greater than or Equal Z || !(A || N) */
4774         c->v1 = tcg_temp_new();
4775         c->g1 = 0;
4776         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4777         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
4778         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
4779         tcg_gen_or_i32(c->v1, c->v1, fpsr);
4780         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4781         c->tcond = TCG_COND_NE;
4782         break;
4783     case 4:  /* Ordered Less Than !(!N || A || Z); */
4784     case 20: /* Less Than !(!N || A || Z); */
4785         c->v1 = tcg_temp_new();
4786         c->g1 = 0;
4787         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
4788         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
4789         c->tcond = TCG_COND_EQ;
4790         break;
4791     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
4792     case 21: /* Less than or Equal Z || (N && !A) */
4793         c->v1 = tcg_temp_new();
4794         c->g1 = 0;
4795         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4796         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
4797         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
4798         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
4799         c->tcond = TCG_COND_NE;
4800         break;
4801     case 6:  /* Ordered Greater or Less than !(A || Z) */
4802     case 22: /* Greater or Less than !(A || Z) */
4803         c->v1 = tcg_temp_new();
4804         c->g1 = 0;
4805         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
4806         c->tcond = TCG_COND_EQ;
4807         break;
4808     case 7:  /* Ordered !A */
4809     case 23: /* Greater, Less or Equal !A */
4810         c->v1 = tcg_temp_new();
4811         c->g1 = 0;
4812         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4813         c->tcond = TCG_COND_EQ;
4814         break;
4815     case 8:  /* Unordered A */
4816     case 24: /* Not Greater, Less or Equal A */
4817         c->v1 = tcg_temp_new();
4818         c->g1 = 0;
4819         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
4820         c->tcond = TCG_COND_NE;
4821         break;
4822     case 9:  /* Unordered or Equal A || Z */
4823     case 25: /* Not Greater or Less then A || Z */
4824         c->v1 = tcg_temp_new();
4825         c->g1 = 0;
4826         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
4827         c->tcond = TCG_COND_NE;
4828         break;
4829     case 10: /* Unordered or Greater Than A || !(N || Z)) */
4830     case 26: /* Not Less or Equal A || !(N || Z)) */
4831         c->v1 = tcg_temp_new();
4832         c->g1 = 0;
4833         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4834         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
4835         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
4836         tcg_gen_or_i32(c->v1, c->v1, fpsr);
4837         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4838         c->tcond = TCG_COND_NE;
4839         break;
4840     case 11: /* Unordered or Greater or Equal A || Z || !N */
4841     case 27: /* Not Less Than A || Z || !N */
4842         c->v1 = tcg_temp_new();
4843         c->g1 = 0;
4844         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4845         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
4846         c->tcond = TCG_COND_NE;
4847         break;
4848     case 12: /* Unordered or Less Than A || (N && !Z) */
4849     case 28: /* Not Greater than or Equal A || (N && !Z) */
4850         c->v1 = tcg_temp_new();
4851         c->g1 = 0;
4852         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4853         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
4854         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
4855         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
4856         c->tcond = TCG_COND_NE;
4857         break;
4858     case 13: /* Unordered or Less or Equal A || Z || N */
4859     case 29: /* Not Greater Than A || Z || N */
4860         c->v1 = tcg_temp_new();
4861         c->g1 = 0;
4862         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
4863         c->tcond = TCG_COND_NE;
4864         break;
4865     case 14: /* Not Equal !Z */
4866     case 30: /* Signaling Not Equal !Z */
4867         c->v1 = tcg_temp_new();
4868         c->g1 = 0;
4869         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
4870         c->tcond = TCG_COND_EQ;
4871         break;
4872     case 15: /* True */
4873     case 31: /* Signaling True */
4874         c->v1 = c->v2;
4875         c->tcond = TCG_COND_ALWAYS;
4876         break;
4877     }
4878     tcg_temp_free(fpsr);
4879 }
4880 
4881 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
4882 {
4883     DisasCompare c;
4884 
4885     gen_fcc_cond(&c, s, cond);
4886     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
4887     free_cond(&c);
4888 }
4889 
4890 DISAS_INSN(fbcc)
4891 {
4892     uint32_t offset;
4893     uint32_t base;
4894     TCGLabel *l1;
4895 
4896     base = s->pc;
4897     offset = (int16_t)read_im16(env, s);
4898     if (insn & (1 << 6)) {
4899         offset = (offset << 16) | read_im16(env, s);
4900     }
4901 
4902     l1 = gen_new_label();
4903     update_cc_op(s);
4904     gen_fjmpcc(s, insn & 0x3f, l1);
4905     gen_jmp_tb(s, 0, s->pc);
4906     gen_set_label(l1);
4907     gen_jmp_tb(s, 1, base + offset);
4908 }
4909 
4910 DISAS_INSN(fscc)
4911 {
4912     DisasCompare c;
4913     int cond;
4914     TCGv tmp;
4915     uint16_t ext;
4916 
4917     ext = read_im16(env, s);
4918     cond = ext & 0x3f;
4919     gen_fcc_cond(&c, s, cond);
4920 
4921     tmp = tcg_temp_new();
4922     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
4923     free_cond(&c);
4924 
4925     tcg_gen_neg_i32(tmp, tmp);
4926     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
4927     tcg_temp_free(tmp);
4928 }
4929 
4930 DISAS_INSN(frestore)
4931 {
4932     M68kCPU *cpu = m68k_env_get_cpu(env);
4933 
4934     /* TODO: Implement frestore.  */
4935     cpu_abort(CPU(cpu), "FRESTORE not implemented");
4936 }
4937 
4938 DISAS_INSN(fsave)
4939 {
4940     M68kCPU *cpu = m68k_env_get_cpu(env);
4941 
4942     /* TODO: Implement fsave.  */
4943     cpu_abort(CPU(cpu), "FSAVE not implemented");
4944 }
4945 
4946 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
4947 {
4948     TCGv tmp = tcg_temp_new();
4949     if (s->env->macsr & MACSR_FI) {
4950         if (upper)
4951             tcg_gen_andi_i32(tmp, val, 0xffff0000);
4952         else
4953             tcg_gen_shli_i32(tmp, val, 16);
4954     } else if (s->env->macsr & MACSR_SU) {
4955         if (upper)
4956             tcg_gen_sari_i32(tmp, val, 16);
4957         else
4958             tcg_gen_ext16s_i32(tmp, val);
4959     } else {
4960         if (upper)
4961             tcg_gen_shri_i32(tmp, val, 16);
4962         else
4963             tcg_gen_ext16u_i32(tmp, val);
4964     }
4965     return tmp;
4966 }
4967 
4968 static void gen_mac_clear_flags(void)
4969 {
4970     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
4971                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
4972 }
4973 
4974 DISAS_INSN(mac)
4975 {
4976     TCGv rx;
4977     TCGv ry;
4978     uint16_t ext;
4979     int acc;
4980     TCGv tmp;
4981     TCGv addr;
4982     TCGv loadval;
4983     int dual;
4984     TCGv saved_flags;
4985 
4986     if (!s->done_mac) {
4987         s->mactmp = tcg_temp_new_i64();
4988         s->done_mac = 1;
4989     }
4990 
4991     ext = read_im16(env, s);
4992 
4993     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
4994     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
4995     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
4996         disas_undef(env, s, insn);
4997         return;
4998     }
4999     if (insn & 0x30) {
5000         /* MAC with load.  */
5001         tmp = gen_lea(env, s, insn, OS_LONG);
5002         addr = tcg_temp_new();
5003         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5004         /* Load the value now to ensure correct exception behavior.
5005            Perform writeback after reading the MAC inputs.  */
5006         loadval = gen_load(s, OS_LONG, addr, 0);
5007 
5008         acc ^= 1;
5009         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5010         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5011     } else {
5012         loadval = addr = NULL_QREG;
5013         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5014         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5015     }
5016 
5017     gen_mac_clear_flags();
5018 #if 0
5019     l1 = -1;
5020     /* Disabled because conditional branches clobber temporary vars.  */
5021     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5022         /* Skip the multiply if we know we will ignore it.  */
5023         l1 = gen_new_label();
5024         tmp = tcg_temp_new();
5025         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5026         gen_op_jmp_nz32(tmp, l1);
5027     }
5028 #endif
5029 
5030     if ((ext & 0x0800) == 0) {
5031         /* Word.  */
5032         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5033         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5034     }
5035     if (s->env->macsr & MACSR_FI) {
5036         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5037     } else {
5038         if (s->env->macsr & MACSR_SU)
5039             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5040         else
5041             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5042         switch ((ext >> 9) & 3) {
5043         case 1:
5044             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5045             break;
5046         case 3:
5047             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5048             break;
5049         }
5050     }
5051 
5052     if (dual) {
5053         /* Save the overflow flag from the multiply.  */
5054         saved_flags = tcg_temp_new();
5055         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5056     } else {
5057         saved_flags = NULL_QREG;
5058     }
5059 
5060 #if 0
5061     /* Disabled because conditional branches clobber temporary vars.  */
5062     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5063         /* Skip the accumulate if the value is already saturated.  */
5064         l1 = gen_new_label();
5065         tmp = tcg_temp_new();
5066         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5067         gen_op_jmp_nz32(tmp, l1);
5068     }
5069 #endif
5070 
5071     if (insn & 0x100)
5072         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5073     else
5074         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5075 
5076     if (s->env->macsr & MACSR_FI)
5077         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5078     else if (s->env->macsr & MACSR_SU)
5079         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5080     else
5081         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5082 
5083 #if 0
5084     /* Disabled because conditional branches clobber temporary vars.  */
5085     if (l1 != -1)
5086         gen_set_label(l1);
5087 #endif
5088 
5089     if (dual) {
5090         /* Dual accumulate variant.  */
5091         acc = (ext >> 2) & 3;
5092         /* Restore the overflow flag from the multiplier.  */
5093         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5094 #if 0
5095         /* Disabled because conditional branches clobber temporary vars.  */
5096         if ((s->env->macsr & MACSR_OMC) != 0) {
5097             /* Skip the accumulate if the value is already saturated.  */
5098             l1 = gen_new_label();
5099             tmp = tcg_temp_new();
5100             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5101             gen_op_jmp_nz32(tmp, l1);
5102         }
5103 #endif
5104         if (ext & 2)
5105             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5106         else
5107             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5108         if (s->env->macsr & MACSR_FI)
5109             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5110         else if (s->env->macsr & MACSR_SU)
5111             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5112         else
5113             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5114 #if 0
5115         /* Disabled because conditional branches clobber temporary vars.  */
5116         if (l1 != -1)
5117             gen_set_label(l1);
5118 #endif
5119     }
5120     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5121 
5122     if (insn & 0x30) {
5123         TCGv rw;
5124         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5125         tcg_gen_mov_i32(rw, loadval);
5126         /* FIXME: Should address writeback happen with the masked or
5127            unmasked value?  */
5128         switch ((insn >> 3) & 7) {
5129         case 3: /* Post-increment.  */
5130             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5131             break;
5132         case 4: /* Pre-decrement.  */
5133             tcg_gen_mov_i32(AREG(insn, 0), addr);
5134         }
5135     }
5136 }
5137 
5138 DISAS_INSN(from_mac)
5139 {
5140     TCGv rx;
5141     TCGv_i64 acc;
5142     int accnum;
5143 
5144     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5145     accnum = (insn >> 9) & 3;
5146     acc = MACREG(accnum);
5147     if (s->env->macsr & MACSR_FI) {
5148         gen_helper_get_macf(rx, cpu_env, acc);
5149     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5150         tcg_gen_extrl_i64_i32(rx, acc);
5151     } else if (s->env->macsr & MACSR_SU) {
5152         gen_helper_get_macs(rx, acc);
5153     } else {
5154         gen_helper_get_macu(rx, acc);
5155     }
5156     if (insn & 0x40) {
5157         tcg_gen_movi_i64(acc, 0);
5158         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5159     }
5160 }
5161 
5162 DISAS_INSN(move_mac)
5163 {
5164     /* FIXME: This can be done without a helper.  */
5165     int src;
5166     TCGv dest;
5167     src = insn & 3;
5168     dest = tcg_const_i32((insn >> 9) & 3);
5169     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5170     gen_mac_clear_flags();
5171     gen_helper_mac_set_flags(cpu_env, dest);
5172 }
5173 
5174 DISAS_INSN(from_macsr)
5175 {
5176     TCGv reg;
5177 
5178     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5179     tcg_gen_mov_i32(reg, QREG_MACSR);
5180 }
5181 
5182 DISAS_INSN(from_mask)
5183 {
5184     TCGv reg;
5185     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5186     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5187 }
5188 
5189 DISAS_INSN(from_mext)
5190 {
5191     TCGv reg;
5192     TCGv acc;
5193     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5194     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5195     if (s->env->macsr & MACSR_FI)
5196         gen_helper_get_mac_extf(reg, cpu_env, acc);
5197     else
5198         gen_helper_get_mac_exti(reg, cpu_env, acc);
5199 }
5200 
5201 DISAS_INSN(macsr_to_ccr)
5202 {
5203     TCGv tmp = tcg_temp_new();
5204     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5205     gen_helper_set_sr(cpu_env, tmp);
5206     tcg_temp_free(tmp);
5207     set_cc_op(s, CC_OP_FLAGS);
5208 }
5209 
5210 DISAS_INSN(to_mac)
5211 {
5212     TCGv_i64 acc;
5213     TCGv val;
5214     int accnum;
5215     accnum = (insn >> 9) & 3;
5216     acc = MACREG(accnum);
5217     SRC_EA(env, val, OS_LONG, 0, NULL);
5218     if (s->env->macsr & MACSR_FI) {
5219         tcg_gen_ext_i32_i64(acc, val);
5220         tcg_gen_shli_i64(acc, acc, 8);
5221     } else if (s->env->macsr & MACSR_SU) {
5222         tcg_gen_ext_i32_i64(acc, val);
5223     } else {
5224         tcg_gen_extu_i32_i64(acc, val);
5225     }
5226     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5227     gen_mac_clear_flags();
5228     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5229 }
5230 
5231 DISAS_INSN(to_macsr)
5232 {
5233     TCGv val;
5234     SRC_EA(env, val, OS_LONG, 0, NULL);
5235     gen_helper_set_macsr(cpu_env, val);
5236     gen_lookup_tb(s);
5237 }
5238 
5239 DISAS_INSN(to_mask)
5240 {
5241     TCGv val;
5242     SRC_EA(env, val, OS_LONG, 0, NULL);
5243     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5244 }
5245 
5246 DISAS_INSN(to_mext)
5247 {
5248     TCGv val;
5249     TCGv acc;
5250     SRC_EA(env, val, OS_LONG, 0, NULL);
5251     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5252     if (s->env->macsr & MACSR_FI)
5253         gen_helper_set_mac_extf(cpu_env, val, acc);
5254     else if (s->env->macsr & MACSR_SU)
5255         gen_helper_set_mac_exts(cpu_env, val, acc);
5256     else
5257         gen_helper_set_mac_extu(cpu_env, val, acc);
5258 }
5259 
5260 static disas_proc opcode_table[65536];
5261 
5262 static void
5263 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5264 {
5265   int i;
5266   int from;
5267   int to;
5268 
5269   /* Sanity check.  All set bits must be included in the mask.  */
5270   if (opcode & ~mask) {
5271       fprintf(stderr,
5272               "qemu internal error: bogus opcode definition %04x/%04x\n",
5273               opcode, mask);
5274       abort();
5275   }
5276   /* This could probably be cleverer.  For now just optimize the case where
5277      the top bits are known.  */
5278   /* Find the first zero bit in the mask.  */
5279   i = 0x8000;
5280   while ((i & mask) != 0)
5281       i >>= 1;
5282   /* Iterate over all combinations of this and lower bits.  */
5283   if (i == 0)
5284       i = 1;
5285   else
5286       i <<= 1;
5287   from = opcode & ~(i - 1);
5288   to = from + i;
5289   for (i = from; i < to; i++) {
5290       if ((i & mask) == opcode)
5291           opcode_table[i] = proc;
5292   }
5293 }
5294 
5295 /* Register m68k opcode handlers.  Order is important.
5296    Later insn override earlier ones.  */
5297 void register_m68k_insns (CPUM68KState *env)
5298 {
5299     /* Build the opcode table only once to avoid
5300        multithreading issues. */
5301     if (opcode_table[0] != NULL) {
5302         return;
5303     }
5304 
5305     /* use BASE() for instruction available
5306      * for CF_ISA_A and M68000.
5307      */
5308 #define BASE(name, opcode, mask) \
5309     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5310 #define INSN(name, opcode, mask, feature) do { \
5311     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5312         BASE(name, opcode, mask); \
5313     } while(0)
5314     BASE(undef,     0000, 0000);
5315     INSN(arith_im,  0080, fff8, CF_ISA_A);
5316     INSN(arith_im,  0000, ff00, M68000);
5317     INSN(undef,     00c0, ffc0, M68000);
5318     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5319     BASE(bitop_reg, 0100, f1c0);
5320     BASE(bitop_reg, 0140, f1c0);
5321     BASE(bitop_reg, 0180, f1c0);
5322     BASE(bitop_reg, 01c0, f1c0);
5323     INSN(arith_im,  0280, fff8, CF_ISA_A);
5324     INSN(arith_im,  0200, ff00, M68000);
5325     INSN(undef,     02c0, ffc0, M68000);
5326     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5327     INSN(arith_im,  0480, fff8, CF_ISA_A);
5328     INSN(arith_im,  0400, ff00, M68000);
5329     INSN(undef,     04c0, ffc0, M68000);
5330     INSN(arith_im,  0600, ff00, M68000);
5331     INSN(undef,     06c0, ffc0, M68000);
5332     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5333     INSN(arith_im,  0680, fff8, CF_ISA_A);
5334     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5335     INSN(arith_im,  0c00, ff00, M68000);
5336     BASE(bitop_im,  0800, ffc0);
5337     BASE(bitop_im,  0840, ffc0);
5338     BASE(bitop_im,  0880, ffc0);
5339     BASE(bitop_im,  08c0, ffc0);
5340     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5341     INSN(arith_im,  0a00, ff00, M68000);
5342     INSN(cas,       0ac0, ffc0, CAS);
5343     INSN(cas,       0cc0, ffc0, CAS);
5344     INSN(cas,       0ec0, ffc0, CAS);
5345     INSN(cas2w,     0cfc, ffff, CAS);
5346     INSN(cas2l,     0efc, ffff, CAS);
5347     BASE(move,      1000, f000);
5348     BASE(move,      2000, f000);
5349     BASE(move,      3000, f000);
5350     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5351     INSN(negx,      4080, fff8, CF_ISA_A);
5352     INSN(negx,      4000, ff00, M68000);
5353     INSN(undef,     40c0, ffc0, M68000);
5354     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5355     INSN(move_from_sr, 40c0, ffc0, M68000);
5356     BASE(lea,       41c0, f1c0);
5357     BASE(clr,       4200, ff00);
5358     BASE(undef,     42c0, ffc0);
5359     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5360     INSN(move_from_ccr, 42c0, ffc0, M68000);
5361     INSN(neg,       4480, fff8, CF_ISA_A);
5362     INSN(neg,       4400, ff00, M68000);
5363     INSN(undef,     44c0, ffc0, M68000);
5364     BASE(move_to_ccr, 44c0, ffc0);
5365     INSN(not,       4680, fff8, CF_ISA_A);
5366     INSN(not,       4600, ff00, M68000);
5367     INSN(undef,     46c0, ffc0, M68000);
5368     INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
5369     INSN(nbcd,      4800, ffc0, M68000);
5370     INSN(linkl,     4808, fff8, M68000);
5371     BASE(pea,       4840, ffc0);
5372     BASE(swap,      4840, fff8);
5373     INSN(bkpt,      4848, fff8, BKPT);
5374     INSN(movem,     48d0, fbf8, CF_ISA_A);
5375     INSN(movem,     48e8, fbf8, CF_ISA_A);
5376     INSN(movem,     4880, fb80, M68000);
5377     BASE(ext,       4880, fff8);
5378     BASE(ext,       48c0, fff8);
5379     BASE(ext,       49c0, fff8);
5380     BASE(tst,       4a00, ff00);
5381     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5382     INSN(tas,       4ac0, ffc0, M68000);
5383     INSN(halt,      4ac8, ffff, CF_ISA_A);
5384     INSN(pulse,     4acc, ffff, CF_ISA_A);
5385     BASE(illegal,   4afc, ffff);
5386     INSN(mull,      4c00, ffc0, CF_ISA_A);
5387     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5388     INSN(divl,      4c40, ffc0, CF_ISA_A);
5389     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5390     INSN(sats,      4c80, fff8, CF_ISA_B);
5391     BASE(trap,      4e40, fff0);
5392     BASE(link,      4e50, fff8);
5393     BASE(unlk,      4e58, fff8);
5394     INSN(move_to_usp, 4e60, fff8, USP);
5395     INSN(move_from_usp, 4e68, fff8, USP);
5396     BASE(nop,       4e71, ffff);
5397     BASE(stop,      4e72, ffff);
5398     BASE(rte,       4e73, ffff);
5399     INSN(rtd,       4e74, ffff, RTD);
5400     BASE(rts,       4e75, ffff);
5401     INSN(movec,     4e7b, ffff, CF_ISA_A);
5402     BASE(jump,      4e80, ffc0);
5403     BASE(jump,      4ec0, ffc0);
5404     INSN(addsubq,   5000, f080, M68000);
5405     BASE(addsubq,   5080, f0c0);
5406     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5407     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
5408     INSN(dbcc,      50c8, f0f8, M68000);
5409     INSN(tpf,       51f8, fff8, CF_ISA_A);
5410 
5411     /* Branch instructions.  */
5412     BASE(branch,    6000, f000);
5413     /* Disable long branch instructions, then add back the ones we want.  */
5414     BASE(undef,     60ff, f0ff); /* All long branches.  */
5415     INSN(branch,    60ff, f0ff, CF_ISA_B);
5416     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5417     INSN(branch,    60ff, ffff, BRAL);
5418     INSN(branch,    60ff, f0ff, BCCL);
5419 
5420     BASE(moveq,     7000, f100);
5421     INSN(mvzs,      7100, f100, CF_ISA_B);
5422     BASE(or,        8000, f000);
5423     BASE(divw,      80c0, f0c0);
5424     INSN(sbcd_reg,  8100, f1f8, M68000);
5425     INSN(sbcd_mem,  8108, f1f8, M68000);
5426     BASE(addsub,    9000, f000);
5427     INSN(undef,     90c0, f0c0, CF_ISA_A);
5428     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5429     INSN(subx_reg,  9100, f138, M68000);
5430     INSN(subx_mem,  9108, f138, M68000);
5431     INSN(suba,      91c0, f1c0, CF_ISA_A);
5432     INSN(suba,      90c0, f0c0, M68000);
5433 
5434     BASE(undef_mac, a000, f000);
5435     INSN(mac,       a000, f100, CF_EMAC);
5436     INSN(from_mac,  a180, f9b0, CF_EMAC);
5437     INSN(move_mac,  a110, f9fc, CF_EMAC);
5438     INSN(from_macsr,a980, f9f0, CF_EMAC);
5439     INSN(from_mask, ad80, fff0, CF_EMAC);
5440     INSN(from_mext, ab80, fbf0, CF_EMAC);
5441     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5442     INSN(to_mac,    a100, f9c0, CF_EMAC);
5443     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5444     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5445     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5446 
5447     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5448     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5449     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5450     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5451     INSN(cmp,       b080, f1c0, CF_ISA_A);
5452     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5453     INSN(cmp,       b000, f100, M68000);
5454     INSN(eor,       b100, f100, M68000);
5455     INSN(cmpm,      b108, f138, M68000);
5456     INSN(cmpa,      b0c0, f0c0, M68000);
5457     INSN(eor,       b180, f1c0, CF_ISA_A);
5458     BASE(and,       c000, f000);
5459     INSN(exg_dd,    c140, f1f8, M68000);
5460     INSN(exg_aa,    c148, f1f8, M68000);
5461     INSN(exg_da,    c188, f1f8, M68000);
5462     BASE(mulw,      c0c0, f0c0);
5463     INSN(abcd_reg,  c100, f1f8, M68000);
5464     INSN(abcd_mem,  c108, f1f8, M68000);
5465     BASE(addsub,    d000, f000);
5466     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5467     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5468     INSN(addx_reg,  d100, f138, M68000);
5469     INSN(addx_mem,  d108, f138, M68000);
5470     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5471     INSN(adda,      d0c0, f0c0, M68000);
5472     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5473     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5474     INSN(shift8_im, e000, f0f0, M68000);
5475     INSN(shift16_im, e040, f0f0, M68000);
5476     INSN(shift_im,  e080, f0f0, M68000);
5477     INSN(shift8_reg, e020, f0f0, M68000);
5478     INSN(shift16_reg, e060, f0f0, M68000);
5479     INSN(shift_reg, e0a0, f0f0, M68000);
5480     INSN(shift_mem, e0c0, fcc0, M68000);
5481     INSN(rotate_im, e090, f0f0, M68000);
5482     INSN(rotate8_im, e010, f0f0, M68000);
5483     INSN(rotate16_im, e050, f0f0, M68000);
5484     INSN(rotate_reg, e0b0, f0f0, M68000);
5485     INSN(rotate8_reg, e030, f0f0, M68000);
5486     INSN(rotate16_reg, e070, f0f0, M68000);
5487     INSN(rotate_mem, e4c0, fcc0, M68000);
5488     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5489     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5490     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5491     INSN(bfins_reg, efc0, fff8, BITFIELD);
5492     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5493     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5494     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5495     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5496     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5497     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5498     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5499     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5500     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5501     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5502     BASE(undef_fpu, f000, f000);
5503     INSN(fpu,       f200, ffc0, CF_FPU);
5504     INSN(fbcc,      f280, ffc0, CF_FPU);
5505     INSN(frestore,  f340, ffc0, CF_FPU);
5506     INSN(fsave,     f300, ffc0, CF_FPU);
5507     INSN(fpu,       f200, ffc0, FPU);
5508     INSN(fscc,      f240, ffc0, FPU);
5509     INSN(fbcc,      f280, ff80, FPU);
5510     INSN(frestore,  f340, ffc0, FPU);
5511     INSN(fsave,     f300, ffc0, FPU);
5512     INSN(intouch,   f340, ffc0, CF_ISA_A);
5513     INSN(cpushl,    f428, ff38, CF_ISA_A);
5514     INSN(wddata,    fb00, ff00, CF_ISA_A);
5515     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5516 #undef INSN
5517 }
5518 
5519 /* ??? Some of this implementation is not exception safe.  We should always
5520    write back the result to memory before setting the condition codes.  */
5521 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5522 {
5523     uint16_t insn = read_im16(env, s);
5524     opcode_table[insn](env, s, insn);
5525     do_writebacks(s);
5526 }
5527 
5528 /* generate intermediate code for basic block 'tb'.  */
5529 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
5530 {
5531     CPUM68KState *env = cs->env_ptr;
5532     DisasContext dc1, *dc = &dc1;
5533     target_ulong pc_start;
5534     int pc_offset;
5535     int num_insns;
5536     int max_insns;
5537 
5538     /* generate intermediate code */
5539     pc_start = tb->pc;
5540 
5541     dc->tb = tb;
5542 
5543     dc->env = env;
5544     dc->is_jmp = DISAS_NEXT;
5545     dc->pc = pc_start;
5546     dc->cc_op = CC_OP_DYNAMIC;
5547     dc->cc_op_synced = 1;
5548     dc->singlestep_enabled = cs->singlestep_enabled;
5549     dc->user = (env->sr & SR_S) == 0;
5550     dc->done_mac = 0;
5551     dc->writeback_mask = 0;
5552     num_insns = 0;
5553     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5554     if (max_insns == 0) {
5555         max_insns = CF_COUNT_MASK;
5556     }
5557     if (max_insns > TCG_MAX_INSNS) {
5558         max_insns = TCG_MAX_INSNS;
5559     }
5560 
5561     gen_tb_start(tb);
5562     do {
5563         pc_offset = dc->pc - pc_start;
5564         gen_throws_exception = NULL;
5565         tcg_gen_insn_start(dc->pc, dc->cc_op);
5566         num_insns++;
5567 
5568         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5569             gen_exception(dc, dc->pc, EXCP_DEBUG);
5570             dc->is_jmp = DISAS_JUMP;
5571             /* The address covered by the breakpoint must be included in
5572                [tb->pc, tb->pc + tb->size) in order to for it to be
5573                properly cleared -- thus we increment the PC here so that
5574                the logic setting tb->size below does the right thing.  */
5575             dc->pc += 2;
5576             break;
5577         }
5578 
5579         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5580             gen_io_start();
5581         }
5582 
5583         dc->insn_pc = dc->pc;
5584 	disas_m68k_insn(env, dc);
5585     } while (!dc->is_jmp && !tcg_op_buf_full() &&
5586              !cs->singlestep_enabled &&
5587              !singlestep &&
5588              (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5589              num_insns < max_insns);
5590 
5591     if (tb_cflags(tb) & CF_LAST_IO)
5592         gen_io_end();
5593     if (unlikely(cs->singlestep_enabled)) {
5594         /* Make sure the pc is updated, and raise a debug exception.  */
5595         if (!dc->is_jmp) {
5596             update_cc_op(dc);
5597             tcg_gen_movi_i32(QREG_PC, dc->pc);
5598         }
5599         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5600     } else {
5601         switch(dc->is_jmp) {
5602         case DISAS_NEXT:
5603             update_cc_op(dc);
5604             gen_jmp_tb(dc, 0, dc->pc);
5605             break;
5606         default:
5607         case DISAS_JUMP:
5608         case DISAS_UPDATE:
5609             update_cc_op(dc);
5610             /* indicate that the hash table must be used to find the next TB */
5611             tcg_gen_exit_tb(0);
5612             break;
5613         case DISAS_TB_JUMP:
5614             /* nothing more to generate */
5615             break;
5616         }
5617     }
5618     gen_tb_end(tb, num_insns);
5619 
5620 #ifdef DEBUG_DISAS
5621     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5622         && qemu_log_in_addr_range(pc_start)) {
5623         qemu_log_lock();
5624         qemu_log("----------------\n");
5625         qemu_log("IN: %s\n", lookup_symbol(pc_start));
5626         log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
5627         qemu_log("\n");
5628         qemu_log_unlock();
5629     }
5630 #endif
5631     tb->size = dc->pc - pc_start;
5632     tb->icount = num_insns;
5633 }
5634 
5635 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
5636 {
5637     floatx80 a = { .high = high, .low = low };
5638     union {
5639         float64 f64;
5640         double d;
5641     } u;
5642 
5643     u.f64 = floatx80_to_float64(a, &env->fp_status);
5644     return u.d;
5645 }
5646 
5647 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
5648                          int flags)
5649 {
5650     M68kCPU *cpu = M68K_CPU(cs);
5651     CPUM68KState *env = &cpu->env;
5652     int i;
5653     uint16_t sr;
5654     for (i = 0; i < 8; i++) {
5655         cpu_fprintf(f, "D%d = %08x   A%d = %08x   "
5656                     "F%d = %04x %016"PRIx64"  (%12g)\n",
5657                     i, env->dregs[i], i, env->aregs[i],
5658                     i, env->fregs[i].l.upper, env->fregs[i].l.lower,
5659                     floatx80_to_double(env, env->fregs[i].l.upper,
5660                                        env->fregs[i].l.lower));
5661     }
5662     cpu_fprintf (f, "PC = %08x   ", env->pc);
5663     sr = env->sr | cpu_m68k_get_ccr(env);
5664     cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
5665                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
5666                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
5667     cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
5668                 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
5669                 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
5670                 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
5671                 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
5672     cpu_fprintf(f, "\n                                "
5673                    "FPCR =     %04x ", env->fpcr);
5674     switch (env->fpcr & FPCR_PREC_MASK) {
5675     case FPCR_PREC_X:
5676         cpu_fprintf(f, "X ");
5677         break;
5678     case FPCR_PREC_S:
5679         cpu_fprintf(f, "S ");
5680         break;
5681     case FPCR_PREC_D:
5682         cpu_fprintf(f, "D ");
5683         break;
5684     }
5685     switch (env->fpcr & FPCR_RND_MASK) {
5686     case FPCR_RND_N:
5687         cpu_fprintf(f, "RN ");
5688         break;
5689     case FPCR_RND_Z:
5690         cpu_fprintf(f, "RZ ");
5691         break;
5692     case FPCR_RND_M:
5693         cpu_fprintf(f, "RM ");
5694         break;
5695     case FPCR_RND_P:
5696         cpu_fprintf(f, "RP ");
5697         break;
5698     }
5699 }
5700 
5701 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
5702                           target_ulong *data)
5703 {
5704     int cc_op = data[1];
5705     env->pc = data[0];
5706     if (cc_op != CC_OP_DYNAMIC) {
5707         env->cc_op = cc_op;
5708     }
5709 }
5710