xref: /openbmc/qemu/target/m68k/translate.c (revision 7e450a8f)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "qemu/log.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "trace-tcg.h"
34 #include "exec/log.h"
35 
36 //#define DEBUG_DISPATCH 1
37 
38 #define DEFO32(name, offset) static TCGv QREG_##name;
39 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
40 #include "qregs.def"
41 #undef DEFO32
42 #undef DEFO64
43 
44 static TCGv_i32 cpu_halted;
45 static TCGv_i32 cpu_exception_index;
46 
47 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
48 static TCGv cpu_dregs[8];
49 static TCGv cpu_aregs[8];
50 static TCGv_i64 cpu_macc[4];
51 
52 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
53 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
54 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
55 #define MACREG(acc)     cpu_macc[acc]
56 #define QREG_SP         get_areg(s, 7)
57 
58 static TCGv NULL_QREG;
59 #define IS_NULL_QREG(t) (t == NULL_QREG)
60 /* Used to distinguish stores from bad addressing modes.  */
61 static TCGv store_dummy;
62 
63 #include "exec/gen-icount.h"
64 
65 void m68k_tcg_init(void)
66 {
67     char *p;
68     int i;
69 
70 #define DEFO32(name, offset) \
71     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
72         offsetof(CPUM68KState, offset), #name);
73 #define DEFO64(name, offset) \
74     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #include "qregs.def"
77 #undef DEFO32
78 #undef DEFO64
79 
80     cpu_halted = tcg_global_mem_new_i32(cpu_env,
81                                         -offsetof(M68kCPU, env) +
82                                         offsetof(CPUState, halted), "HALTED");
83     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
84                                                  -offsetof(M68kCPU, env) +
85                                                  offsetof(CPUState, exception_index),
86                                                  "EXCEPTION");
87 
88     p = cpu_reg_names;
89     for (i = 0; i < 8; i++) {
90         sprintf(p, "D%d", i);
91         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
92                                           offsetof(CPUM68KState, dregs[i]), p);
93         p += 3;
94         sprintf(p, "A%d", i);
95         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
96                                           offsetof(CPUM68KState, aregs[i]), p);
97         p += 3;
98     }
99     for (i = 0; i < 4; i++) {
100         sprintf(p, "ACC%d", i);
101         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
102                                          offsetof(CPUM68KState, macc[i]), p);
103         p += 5;
104     }
105 
106     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
107     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
108 }
109 
110 /* internal defines */
111 typedef struct DisasContext {
112     CPUM68KState *env;
113     target_ulong insn_pc; /* Start of the current instruction.  */
114     target_ulong pc;
115     int is_jmp;
116     CCOp cc_op; /* Current CC operation */
117     int cc_op_synced;
118     int user;
119     struct TranslationBlock *tb;
120     int singlestep_enabled;
121     TCGv_i64 mactmp;
122     int done_mac;
123     int writeback_mask;
124     TCGv writeback[8];
125 } DisasContext;
126 
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129     if (s->writeback_mask & (1 << regno)) {
130         return s->writeback[regno];
131     } else {
132         return cpu_aregs[regno];
133     }
134 }
135 
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137                            TCGv val, bool give_temp)
138 {
139     if (s->writeback_mask & (1 << regno)) {
140         if (give_temp) {
141             tcg_temp_free(s->writeback[regno]);
142             s->writeback[regno] = val;
143         } else {
144             tcg_gen_mov_i32(s->writeback[regno], val);
145         }
146     } else {
147         s->writeback_mask |= 1 << regno;
148         if (give_temp) {
149             s->writeback[regno] = val;
150         } else {
151             TCGv tmp = tcg_temp_new();
152             s->writeback[regno] = tmp;
153             tcg_gen_mov_i32(tmp, val);
154         }
155     }
156 }
157 
158 static void do_writebacks(DisasContext *s)
159 {
160     unsigned mask = s->writeback_mask;
161     if (mask) {
162         s->writeback_mask = 0;
163         do {
164             unsigned regno = ctz32(mask);
165             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
166             tcg_temp_free(s->writeback[regno]);
167             mask &= mask - 1;
168         } while (mask);
169     }
170 }
171 
172 /* is_jmp field values */
173 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
174 #define DISAS_UPDATE    DISAS_TARGET_1 /* cpu state was modified dynamically */
175 #define DISAS_TB_JUMP   DISAS_TARGET_2 /* only pc was modified statically */
176 #define DISAS_JUMP_NEXT DISAS_TARGET_3
177 
178 #if defined(CONFIG_USER_ONLY)
179 #define IS_USER(s) 1
180 #else
181 #define IS_USER(s) s->user
182 #endif
183 
184 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
185 
186 #ifdef DEBUG_DISPATCH
187 #define DISAS_INSN(name)                                                \
188     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
189                                   uint16_t insn);                       \
190     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
191                              uint16_t insn)                             \
192     {                                                                   \
193         qemu_log("Dispatch " #name "\n");                               \
194         real_disas_##name(env, s, insn);                                \
195     }                                                                   \
196     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
197                                   uint16_t insn)
198 #else
199 #define DISAS_INSN(name)                                                \
200     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
201                              uint16_t insn)
202 #endif
203 
204 static const uint8_t cc_op_live[CC_OP_NB] = {
205     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
208     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_LOGIC] = CCF_X | CCF_N
211 };
212 
213 static void set_cc_op(DisasContext *s, CCOp op)
214 {
215     CCOp old_op = s->cc_op;
216     int dead;
217 
218     if (old_op == op) {
219         return;
220     }
221     s->cc_op = op;
222     s->cc_op_synced = 0;
223 
224     /* Discard CC computation that will no longer be used.
225        Note that X and N are never dead.  */
226     dead = cc_op_live[old_op] & ~cc_op_live[op];
227     if (dead & CCF_C) {
228         tcg_gen_discard_i32(QREG_CC_C);
229     }
230     if (dead & CCF_Z) {
231         tcg_gen_discard_i32(QREG_CC_Z);
232     }
233     if (dead & CCF_V) {
234         tcg_gen_discard_i32(QREG_CC_V);
235     }
236 }
237 
238 /* Update the CPU env CC_OP state.  */
239 static void update_cc_op(DisasContext *s)
240 {
241     if (!s->cc_op_synced) {
242         s->cc_op_synced = 1;
243         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
244     }
245 }
246 
247 /* Generate a jump to an immediate address.  */
248 static void gen_jmp_im(DisasContext *s, uint32_t dest)
249 {
250     update_cc_op(s);
251     tcg_gen_movi_i32(QREG_PC, dest);
252     s->is_jmp = DISAS_JUMP;
253 }
254 
255 /* Generate a jump to the address in qreg DEST.  */
256 static void gen_jmp(DisasContext *s, TCGv dest)
257 {
258     update_cc_op(s);
259     tcg_gen_mov_i32(QREG_PC, dest);
260     s->is_jmp = DISAS_JUMP;
261 }
262 
263 static void gen_raise_exception(int nr)
264 {
265     TCGv_i32 tmp = tcg_const_i32(nr);
266 
267     gen_helper_raise_exception(cpu_env, tmp);
268     tcg_temp_free_i32(tmp);
269 }
270 
271 static void gen_exception(DisasContext *s, uint32_t where, int nr)
272 {
273     gen_jmp_im(s, where);
274     gen_raise_exception(nr);
275 }
276 
277 static inline void gen_addr_fault(DisasContext *s)
278 {
279     gen_exception(s, s->insn_pc, EXCP_ADDRESS);
280 }
281 
282 /* Generate a load from the specified address.  Narrow values are
283    sign extended to full register width.  */
284 static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
285 {
286     TCGv tmp;
287     int index = IS_USER(s);
288     tmp = tcg_temp_new_i32();
289     switch(opsize) {
290     case OS_BYTE:
291         if (sign)
292             tcg_gen_qemu_ld8s(tmp, addr, index);
293         else
294             tcg_gen_qemu_ld8u(tmp, addr, index);
295         break;
296     case OS_WORD:
297         if (sign)
298             tcg_gen_qemu_ld16s(tmp, addr, index);
299         else
300             tcg_gen_qemu_ld16u(tmp, addr, index);
301         break;
302     case OS_LONG:
303         tcg_gen_qemu_ld32u(tmp, addr, index);
304         break;
305     default:
306         g_assert_not_reached();
307     }
308     return tmp;
309 }
310 
311 /* Generate a store.  */
312 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
313 {
314     int index = IS_USER(s);
315     switch(opsize) {
316     case OS_BYTE:
317         tcg_gen_qemu_st8(val, addr, index);
318         break;
319     case OS_WORD:
320         tcg_gen_qemu_st16(val, addr, index);
321         break;
322     case OS_LONG:
323         tcg_gen_qemu_st32(val, addr, index);
324         break;
325     default:
326         g_assert_not_reached();
327     }
328 }
329 
330 typedef enum {
331     EA_STORE,
332     EA_LOADU,
333     EA_LOADS
334 } ea_what;
335 
336 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
337    otherwise generate a store.  */
338 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
339                      ea_what what)
340 {
341     if (what == EA_STORE) {
342         gen_store(s, opsize, addr, val);
343         return store_dummy;
344     } else {
345         return gen_load(s, opsize, addr, what == EA_LOADS);
346     }
347 }
348 
349 /* Read a 16-bit immediate constant */
350 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
351 {
352     uint16_t im;
353     im = cpu_lduw_code(env, s->pc);
354     s->pc += 2;
355     return im;
356 }
357 
358 /* Read an 8-bit immediate constant */
359 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
360 {
361     return read_im16(env, s);
362 }
363 
364 /* Read a 32-bit immediate constant.  */
365 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
366 {
367     uint32_t im;
368     im = read_im16(env, s) << 16;
369     im |= 0xffff & read_im16(env, s);
370     return im;
371 }
372 
373 /* Read a 64-bit immediate constant.  */
374 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
375 {
376     uint64_t im;
377     im = (uint64_t)read_im32(env, s) << 32;
378     im |= (uint64_t)read_im32(env, s);
379     return im;
380 }
381 
382 /* Calculate and address index.  */
383 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
384 {
385     TCGv add;
386     int scale;
387 
388     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
389     if ((ext & 0x800) == 0) {
390         tcg_gen_ext16s_i32(tmp, add);
391         add = tmp;
392     }
393     scale = (ext >> 9) & 3;
394     if (scale != 0) {
395         tcg_gen_shli_i32(tmp, add, scale);
396         add = tmp;
397     }
398     return add;
399 }
400 
401 /* Handle a base + index + displacement effective addresss.
402    A NULL_QREG base means pc-relative.  */
403 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
404 {
405     uint32_t offset;
406     uint16_t ext;
407     TCGv add;
408     TCGv tmp;
409     uint32_t bd, od;
410 
411     offset = s->pc;
412     ext = read_im16(env, s);
413 
414     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
415         return NULL_QREG;
416 
417     if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
418         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
419         ext &= ~(3 << 9);
420     }
421 
422     if (ext & 0x100) {
423         /* full extension word format */
424         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
425             return NULL_QREG;
426 
427         if ((ext & 0x30) > 0x10) {
428             /* base displacement */
429             if ((ext & 0x30) == 0x20) {
430                 bd = (int16_t)read_im16(env, s);
431             } else {
432                 bd = read_im32(env, s);
433             }
434         } else {
435             bd = 0;
436         }
437         tmp = tcg_temp_new();
438         if ((ext & 0x44) == 0) {
439             /* pre-index */
440             add = gen_addr_index(s, ext, tmp);
441         } else {
442             add = NULL_QREG;
443         }
444         if ((ext & 0x80) == 0) {
445             /* base not suppressed */
446             if (IS_NULL_QREG(base)) {
447                 base = tcg_const_i32(offset + bd);
448                 bd = 0;
449             }
450             if (!IS_NULL_QREG(add)) {
451                 tcg_gen_add_i32(tmp, add, base);
452                 add = tmp;
453             } else {
454                 add = base;
455             }
456         }
457         if (!IS_NULL_QREG(add)) {
458             if (bd != 0) {
459                 tcg_gen_addi_i32(tmp, add, bd);
460                 add = tmp;
461             }
462         } else {
463             add = tcg_const_i32(bd);
464         }
465         if ((ext & 3) != 0) {
466             /* memory indirect */
467             base = gen_load(s, OS_LONG, add, 0);
468             if ((ext & 0x44) == 4) {
469                 add = gen_addr_index(s, ext, tmp);
470                 tcg_gen_add_i32(tmp, add, base);
471                 add = tmp;
472             } else {
473                 add = base;
474             }
475             if ((ext & 3) > 1) {
476                 /* outer displacement */
477                 if ((ext & 3) == 2) {
478                     od = (int16_t)read_im16(env, s);
479                 } else {
480                     od = read_im32(env, s);
481                 }
482             } else {
483                 od = 0;
484             }
485             if (od != 0) {
486                 tcg_gen_addi_i32(tmp, add, od);
487                 add = tmp;
488             }
489         }
490     } else {
491         /* brief extension word format */
492         tmp = tcg_temp_new();
493         add = gen_addr_index(s, ext, tmp);
494         if (!IS_NULL_QREG(base)) {
495             tcg_gen_add_i32(tmp, add, base);
496             if ((int8_t)ext)
497                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
498         } else {
499             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
500         }
501         add = tmp;
502     }
503     return add;
504 }
505 
506 /* Sign or zero extend a value.  */
507 
508 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
509 {
510     switch (opsize) {
511     case OS_BYTE:
512         if (sign) {
513             tcg_gen_ext8s_i32(res, val);
514         } else {
515             tcg_gen_ext8u_i32(res, val);
516         }
517         break;
518     case OS_WORD:
519         if (sign) {
520             tcg_gen_ext16s_i32(res, val);
521         } else {
522             tcg_gen_ext16u_i32(res, val);
523         }
524         break;
525     case OS_LONG:
526         tcg_gen_mov_i32(res, val);
527         break;
528     default:
529         g_assert_not_reached();
530     }
531 }
532 
533 /* Evaluate all the CC flags.  */
534 
535 static void gen_flush_flags(DisasContext *s)
536 {
537     TCGv t0, t1;
538 
539     switch (s->cc_op) {
540     case CC_OP_FLAGS:
541         return;
542 
543     case CC_OP_ADDB:
544     case CC_OP_ADDW:
545     case CC_OP_ADDL:
546         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
547         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
548         /* Compute signed overflow for addition.  */
549         t0 = tcg_temp_new();
550         t1 = tcg_temp_new();
551         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
552         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
553         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
554         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
555         tcg_temp_free(t0);
556         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
557         tcg_temp_free(t1);
558         break;
559 
560     case CC_OP_SUBB:
561     case CC_OP_SUBW:
562     case CC_OP_SUBL:
563         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
564         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
565         /* Compute signed overflow for subtraction.  */
566         t0 = tcg_temp_new();
567         t1 = tcg_temp_new();
568         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
569         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
570         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
571         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
572         tcg_temp_free(t0);
573         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
574         tcg_temp_free(t1);
575         break;
576 
577     case CC_OP_CMPB:
578     case CC_OP_CMPW:
579     case CC_OP_CMPL:
580         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
581         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
582         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
583         /* Compute signed overflow for subtraction.  */
584         t0 = tcg_temp_new();
585         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
586         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
587         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
588         tcg_temp_free(t0);
589         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
590         break;
591 
592     case CC_OP_LOGIC:
593         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
594         tcg_gen_movi_i32(QREG_CC_C, 0);
595         tcg_gen_movi_i32(QREG_CC_V, 0);
596         break;
597 
598     case CC_OP_DYNAMIC:
599         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
600         s->cc_op_synced = 1;
601         break;
602 
603     default:
604         t0 = tcg_const_i32(s->cc_op);
605         gen_helper_flush_flags(cpu_env, t0);
606         tcg_temp_free(t0);
607         s->cc_op_synced = 1;
608         break;
609     }
610 
611     /* Note that flush_flags also assigned to env->cc_op.  */
612     s->cc_op = CC_OP_FLAGS;
613 }
614 
615 static inline TCGv gen_extend(TCGv val, int opsize, int sign)
616 {
617     TCGv tmp;
618 
619     if (opsize == OS_LONG) {
620         tmp = val;
621     } else {
622         tmp = tcg_temp_new();
623         gen_ext(tmp, val, opsize, sign);
624     }
625 
626     return tmp;
627 }
628 
629 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
630 {
631     gen_ext(QREG_CC_N, val, opsize, 1);
632     set_cc_op(s, CC_OP_LOGIC);
633 }
634 
635 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
636 {
637     tcg_gen_mov_i32(QREG_CC_N, dest);
638     tcg_gen_mov_i32(QREG_CC_V, src);
639     set_cc_op(s, CC_OP_CMPB + opsize);
640 }
641 
642 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
643 {
644     gen_ext(QREG_CC_N, dest, opsize, 1);
645     tcg_gen_mov_i32(QREG_CC_V, src);
646 }
647 
648 static inline int opsize_bytes(int opsize)
649 {
650     switch (opsize) {
651     case OS_BYTE: return 1;
652     case OS_WORD: return 2;
653     case OS_LONG: return 4;
654     case OS_SINGLE: return 4;
655     case OS_DOUBLE: return 8;
656     case OS_EXTENDED: return 12;
657     case OS_PACKED: return 12;
658     default:
659         g_assert_not_reached();
660     }
661 }
662 
663 static inline int insn_opsize(int insn)
664 {
665     switch ((insn >> 6) & 3) {
666     case 0: return OS_BYTE;
667     case 1: return OS_WORD;
668     case 2: return OS_LONG;
669     default:
670         g_assert_not_reached();
671     }
672 }
673 
674 static inline int ext_opsize(int ext, int pos)
675 {
676     switch ((ext >> pos) & 7) {
677     case 0: return OS_LONG;
678     case 1: return OS_SINGLE;
679     case 2: return OS_EXTENDED;
680     case 3: return OS_PACKED;
681     case 4: return OS_WORD;
682     case 5: return OS_DOUBLE;
683     case 6: return OS_BYTE;
684     default:
685         g_assert_not_reached();
686     }
687 }
688 
689 /* Assign value to a register.  If the width is less than the register width
690    only the low part of the register is set.  */
691 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
692 {
693     TCGv tmp;
694     switch (opsize) {
695     case OS_BYTE:
696         tcg_gen_andi_i32(reg, reg, 0xffffff00);
697         tmp = tcg_temp_new();
698         tcg_gen_ext8u_i32(tmp, val);
699         tcg_gen_or_i32(reg, reg, tmp);
700         tcg_temp_free(tmp);
701         break;
702     case OS_WORD:
703         tcg_gen_andi_i32(reg, reg, 0xffff0000);
704         tmp = tcg_temp_new();
705         tcg_gen_ext16u_i32(tmp, val);
706         tcg_gen_or_i32(reg, reg, tmp);
707         tcg_temp_free(tmp);
708         break;
709     case OS_LONG:
710     case OS_SINGLE:
711         tcg_gen_mov_i32(reg, val);
712         break;
713     default:
714         g_assert_not_reached();
715     }
716 }
717 
718 /* Generate code for an "effective address".  Does not adjust the base
719    register for autoincrement addressing modes.  */
720 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
721                          int mode, int reg0, int opsize)
722 {
723     TCGv reg;
724     TCGv tmp;
725     uint16_t ext;
726     uint32_t offset;
727 
728     switch (mode) {
729     case 0: /* Data register direct.  */
730     case 1: /* Address register direct.  */
731         return NULL_QREG;
732     case 3: /* Indirect postincrement.  */
733         if (opsize == OS_UNSIZED) {
734             return NULL_QREG;
735         }
736         /* fallthru */
737     case 2: /* Indirect register */
738         return get_areg(s, reg0);
739     case 4: /* Indirect predecrememnt.  */
740         if (opsize == OS_UNSIZED) {
741             return NULL_QREG;
742         }
743         reg = get_areg(s, reg0);
744         tmp = tcg_temp_new();
745         if (reg0 == 7 && opsize == OS_BYTE &&
746             m68k_feature(s->env, M68K_FEATURE_M68000)) {
747             tcg_gen_subi_i32(tmp, reg, 2);
748         } else {
749             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
750         }
751         return tmp;
752     case 5: /* Indirect displacement.  */
753         reg = get_areg(s, reg0);
754         tmp = tcg_temp_new();
755         ext = read_im16(env, s);
756         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
757         return tmp;
758     case 6: /* Indirect index + displacement.  */
759         reg = get_areg(s, reg0);
760         return gen_lea_indexed(env, s, reg);
761     case 7: /* Other */
762         switch (reg0) {
763         case 0: /* Absolute short.  */
764             offset = (int16_t)read_im16(env, s);
765             return tcg_const_i32(offset);
766         case 1: /* Absolute long.  */
767             offset = read_im32(env, s);
768             return tcg_const_i32(offset);
769         case 2: /* pc displacement  */
770             offset = s->pc;
771             offset += (int16_t)read_im16(env, s);
772             return tcg_const_i32(offset);
773         case 3: /* pc index+displacement.  */
774             return gen_lea_indexed(env, s, NULL_QREG);
775         case 4: /* Immediate.  */
776         default:
777             return NULL_QREG;
778         }
779     }
780     /* Should never happen.  */
781     return NULL_QREG;
782 }
783 
784 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
785                     int opsize)
786 {
787     int mode = extract32(insn, 3, 3);
788     int reg0 = REG(insn, 0);
789     return gen_lea_mode(env, s, mode, reg0, opsize);
790 }
791 
792 /* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
793    a write otherwise it is a read (0 == sign extend, -1 == zero extend).
794    ADDRP is non-null for readwrite operands.  */
795 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
796                         int opsize, TCGv val, TCGv *addrp, ea_what what)
797 {
798     TCGv reg, tmp, result;
799     int32_t offset;
800 
801     switch (mode) {
802     case 0: /* Data register direct.  */
803         reg = cpu_dregs[reg0];
804         if (what == EA_STORE) {
805             gen_partset_reg(opsize, reg, val);
806             return store_dummy;
807         } else {
808             return gen_extend(reg, opsize, what == EA_LOADS);
809         }
810     case 1: /* Address register direct.  */
811         reg = get_areg(s, reg0);
812         if (what == EA_STORE) {
813             tcg_gen_mov_i32(reg, val);
814             return store_dummy;
815         } else {
816             return gen_extend(reg, opsize, what == EA_LOADS);
817         }
818     case 2: /* Indirect register */
819         reg = get_areg(s, reg0);
820         return gen_ldst(s, opsize, reg, val, what);
821     case 3: /* Indirect postincrement.  */
822         reg = get_areg(s, reg0);
823         result = gen_ldst(s, opsize, reg, val, what);
824         if (what == EA_STORE || !addrp) {
825             TCGv tmp = tcg_temp_new();
826             if (reg0 == 7 && opsize == OS_BYTE &&
827                 m68k_feature(s->env, M68K_FEATURE_M68000)) {
828                 tcg_gen_addi_i32(tmp, reg, 2);
829             } else {
830                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
831             }
832             delay_set_areg(s, reg0, tmp, true);
833         }
834         return result;
835     case 4: /* Indirect predecrememnt.  */
836         if (addrp && what == EA_STORE) {
837             tmp = *addrp;
838         } else {
839             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
840             if (IS_NULL_QREG(tmp)) {
841                 return tmp;
842             }
843             if (addrp) {
844                 *addrp = tmp;
845             }
846         }
847         result = gen_ldst(s, opsize, tmp, val, what);
848         if (what == EA_STORE || !addrp) {
849             delay_set_areg(s, reg0, tmp, false);
850         }
851         return result;
852     case 5: /* Indirect displacement.  */
853     case 6: /* Indirect index + displacement.  */
854     do_indirect:
855         if (addrp && what == EA_STORE) {
856             tmp = *addrp;
857         } else {
858             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
859             if (IS_NULL_QREG(tmp)) {
860                 return tmp;
861             }
862             if (addrp) {
863                 *addrp = tmp;
864             }
865         }
866         return gen_ldst(s, opsize, tmp, val, what);
867     case 7: /* Other */
868         switch (reg0) {
869         case 0: /* Absolute short.  */
870         case 1: /* Absolute long.  */
871         case 2: /* pc displacement  */
872         case 3: /* pc index+displacement.  */
873             goto do_indirect;
874         case 4: /* Immediate.  */
875             /* Sign extend values for consistency.  */
876             switch (opsize) {
877             case OS_BYTE:
878                 if (what == EA_LOADS) {
879                     offset = (int8_t)read_im8(env, s);
880                 } else {
881                     offset = read_im8(env, s);
882                 }
883                 break;
884             case OS_WORD:
885                 if (what == EA_LOADS) {
886                     offset = (int16_t)read_im16(env, s);
887                 } else {
888                     offset = read_im16(env, s);
889                 }
890                 break;
891             case OS_LONG:
892                 offset = read_im32(env, s);
893                 break;
894             default:
895                 g_assert_not_reached();
896             }
897             return tcg_const_i32(offset);
898         default:
899             return NULL_QREG;
900         }
901     }
902     /* Should never happen.  */
903     return NULL_QREG;
904 }
905 
906 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
907                    int opsize, TCGv val, TCGv *addrp, ea_what what)
908 {
909     int mode = extract32(insn, 3, 3);
910     int reg0 = REG(insn, 0);
911     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
912 }
913 
914 static TCGv_ptr gen_fp_ptr(int freg)
915 {
916     TCGv_ptr fp = tcg_temp_new_ptr();
917     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
918     return fp;
919 }
920 
921 static TCGv_ptr gen_fp_result_ptr(void)
922 {
923     TCGv_ptr fp = tcg_temp_new_ptr();
924     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
925     return fp;
926 }
927 
928 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
929 {
930     TCGv t32;
931     TCGv_i64 t64;
932 
933     t32 = tcg_temp_new();
934     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
935     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
936     tcg_temp_free(t32);
937 
938     t64 = tcg_temp_new_i64();
939     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
940     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
941     tcg_temp_free_i64(t64);
942 }
943 
944 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
945 {
946     TCGv tmp;
947     TCGv_i64 t64;
948     int index = IS_USER(s);
949 
950     t64 = tcg_temp_new_i64();
951     tmp = tcg_temp_new();
952     switch (opsize) {
953     case OS_BYTE:
954         tcg_gen_qemu_ld8s(tmp, addr, index);
955         gen_helper_exts32(cpu_env, fp, tmp);
956         break;
957     case OS_WORD:
958         tcg_gen_qemu_ld16s(tmp, addr, index);
959         gen_helper_exts32(cpu_env, fp, tmp);
960         break;
961     case OS_LONG:
962         tcg_gen_qemu_ld32u(tmp, addr, index);
963         gen_helper_exts32(cpu_env, fp, tmp);
964         break;
965     case OS_SINGLE:
966         tcg_gen_qemu_ld32u(tmp, addr, index);
967         gen_helper_extf32(cpu_env, fp, tmp);
968         break;
969     case OS_DOUBLE:
970         tcg_gen_qemu_ld64(t64, addr, index);
971         gen_helper_extf64(cpu_env, fp, t64);
972         tcg_temp_free_i64(t64);
973         break;
974     case OS_EXTENDED:
975         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
976             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
977             break;
978         }
979         tcg_gen_qemu_ld32u(tmp, addr, index);
980         tcg_gen_shri_i32(tmp, tmp, 16);
981         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
982         tcg_gen_addi_i32(tmp, addr, 4);
983         tcg_gen_qemu_ld64(t64, tmp, index);
984         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
985         break;
986     case OS_PACKED:
987         /* unimplemented data type on 68040/ColdFire
988          * FIXME if needed for another FPU
989          */
990         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
991         break;
992     default:
993         g_assert_not_reached();
994     }
995     tcg_temp_free(tmp);
996     tcg_temp_free_i64(t64);
997 }
998 
999 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
1000 {
1001     TCGv tmp;
1002     TCGv_i64 t64;
1003     int index = IS_USER(s);
1004 
1005     t64 = tcg_temp_new_i64();
1006     tmp = tcg_temp_new();
1007     switch (opsize) {
1008     case OS_BYTE:
1009         gen_helper_reds32(tmp, cpu_env, fp);
1010         tcg_gen_qemu_st8(tmp, addr, index);
1011         break;
1012     case OS_WORD:
1013         gen_helper_reds32(tmp, cpu_env, fp);
1014         tcg_gen_qemu_st16(tmp, addr, index);
1015         break;
1016     case OS_LONG:
1017         gen_helper_reds32(tmp, cpu_env, fp);
1018         tcg_gen_qemu_st32(tmp, addr, index);
1019         break;
1020     case OS_SINGLE:
1021         gen_helper_redf32(tmp, cpu_env, fp);
1022         tcg_gen_qemu_st32(tmp, addr, index);
1023         break;
1024     case OS_DOUBLE:
1025         gen_helper_redf64(t64, cpu_env, fp);
1026         tcg_gen_qemu_st64(t64, addr, index);
1027         break;
1028     case OS_EXTENDED:
1029         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1030             gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1031             break;
1032         }
1033         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1034         tcg_gen_shli_i32(tmp, tmp, 16);
1035         tcg_gen_qemu_st32(tmp, addr, index);
1036         tcg_gen_addi_i32(tmp, addr, 4);
1037         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1038         tcg_gen_qemu_st64(t64, tmp, index);
1039         break;
1040     case OS_PACKED:
1041         /* unimplemented data type on 68040/ColdFire
1042          * FIXME if needed for another FPU
1043          */
1044         gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1045         break;
1046     default:
1047         g_assert_not_reached();
1048     }
1049     tcg_temp_free(tmp);
1050     tcg_temp_free_i64(t64);
1051 }
1052 
1053 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1054                         TCGv_ptr fp, ea_what what)
1055 {
1056     if (what == EA_STORE) {
1057         gen_store_fp(s, opsize, addr, fp);
1058     } else {
1059         gen_load_fp(s, opsize, addr, fp);
1060     }
1061 }
1062 
1063 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1064                           int reg0, int opsize, TCGv_ptr fp, ea_what what)
1065 {
1066     TCGv reg, addr, tmp;
1067     TCGv_i64 t64;
1068 
1069     switch (mode) {
1070     case 0: /* Data register direct.  */
1071         reg = cpu_dregs[reg0];
1072         if (what == EA_STORE) {
1073             switch (opsize) {
1074             case OS_BYTE:
1075             case OS_WORD:
1076             case OS_LONG:
1077                 gen_helper_reds32(reg, cpu_env, fp);
1078                 break;
1079             case OS_SINGLE:
1080                 gen_helper_redf32(reg, cpu_env, fp);
1081                 break;
1082             default:
1083                 g_assert_not_reached();
1084             }
1085         } else {
1086             tmp = tcg_temp_new();
1087             switch (opsize) {
1088             case OS_BYTE:
1089                 tcg_gen_ext8s_i32(tmp, reg);
1090                 gen_helper_exts32(cpu_env, fp, tmp);
1091                 break;
1092             case OS_WORD:
1093                 tcg_gen_ext16s_i32(tmp, reg);
1094                 gen_helper_exts32(cpu_env, fp, tmp);
1095                 break;
1096             case OS_LONG:
1097                 gen_helper_exts32(cpu_env, fp, reg);
1098                 break;
1099             case OS_SINGLE:
1100                 gen_helper_extf32(cpu_env, fp, reg);
1101                 break;
1102             default:
1103                 g_assert_not_reached();
1104             }
1105             tcg_temp_free(tmp);
1106         }
1107         return 0;
1108     case 1: /* Address register direct.  */
1109         return -1;
1110     case 2: /* Indirect register */
1111         addr = get_areg(s, reg0);
1112         gen_ldst_fp(s, opsize, addr, fp, what);
1113         return 0;
1114     case 3: /* Indirect postincrement.  */
1115         addr = cpu_aregs[reg0];
1116         gen_ldst_fp(s, opsize, addr, fp, what);
1117         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1118         return 0;
1119     case 4: /* Indirect predecrememnt.  */
1120         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1121         if (IS_NULL_QREG(addr)) {
1122             return -1;
1123         }
1124         gen_ldst_fp(s, opsize, addr, fp, what);
1125         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1126         return 0;
1127     case 5: /* Indirect displacement.  */
1128     case 6: /* Indirect index + displacement.  */
1129     do_indirect:
1130         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1131         if (IS_NULL_QREG(addr)) {
1132             return -1;
1133         }
1134         gen_ldst_fp(s, opsize, addr, fp, what);
1135         return 0;
1136     case 7: /* Other */
1137         switch (reg0) {
1138         case 0: /* Absolute short.  */
1139         case 1: /* Absolute long.  */
1140         case 2: /* pc displacement  */
1141         case 3: /* pc index+displacement.  */
1142             goto do_indirect;
1143         case 4: /* Immediate.  */
1144             if (what == EA_STORE) {
1145                 return -1;
1146             }
1147             switch (opsize) {
1148             case OS_BYTE:
1149                 tmp = tcg_const_i32((int8_t)read_im8(env, s));
1150                 gen_helper_exts32(cpu_env, fp, tmp);
1151                 tcg_temp_free(tmp);
1152                 break;
1153             case OS_WORD:
1154                 tmp = tcg_const_i32((int16_t)read_im16(env, s));
1155                 gen_helper_exts32(cpu_env, fp, tmp);
1156                 tcg_temp_free(tmp);
1157                 break;
1158             case OS_LONG:
1159                 tmp = tcg_const_i32(read_im32(env, s));
1160                 gen_helper_exts32(cpu_env, fp, tmp);
1161                 tcg_temp_free(tmp);
1162                 break;
1163             case OS_SINGLE:
1164                 tmp = tcg_const_i32(read_im32(env, s));
1165                 gen_helper_extf32(cpu_env, fp, tmp);
1166                 tcg_temp_free(tmp);
1167                 break;
1168             case OS_DOUBLE:
1169                 t64 = tcg_const_i64(read_im64(env, s));
1170                 gen_helper_extf64(cpu_env, fp, t64);
1171                 tcg_temp_free_i64(t64);
1172                 break;
1173             case OS_EXTENDED:
1174                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1175                     gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1176                     break;
1177                 }
1178                 tmp = tcg_const_i32(read_im32(env, s) >> 16);
1179                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1180                 tcg_temp_free(tmp);
1181                 t64 = tcg_const_i64(read_im64(env, s));
1182                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1183                 tcg_temp_free_i64(t64);
1184                 break;
1185             case OS_PACKED:
1186                 /* unimplemented data type on 68040/ColdFire
1187                  * FIXME if needed for another FPU
1188                  */
1189                 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP);
1190                 break;
1191             default:
1192                 g_assert_not_reached();
1193             }
1194             return 0;
1195         default:
1196             return -1;
1197         }
1198     }
1199     return -1;
1200 }
1201 
1202 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1203                        int opsize, TCGv_ptr fp, ea_what what)
1204 {
1205     int mode = extract32(insn, 3, 3);
1206     int reg0 = REG(insn, 0);
1207     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what);
1208 }
1209 
1210 typedef struct {
1211     TCGCond tcond;
1212     bool g1;
1213     bool g2;
1214     TCGv v1;
1215     TCGv v2;
1216 } DisasCompare;
1217 
1218 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1219 {
1220     TCGv tmp, tmp2;
1221     TCGCond tcond;
1222     CCOp op = s->cc_op;
1223 
1224     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1225     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1226         c->g1 = c->g2 = 1;
1227         c->v1 = QREG_CC_N;
1228         c->v2 = QREG_CC_V;
1229         switch (cond) {
1230         case 2: /* HI */
1231         case 3: /* LS */
1232             tcond = TCG_COND_LEU;
1233             goto done;
1234         case 4: /* CC */
1235         case 5: /* CS */
1236             tcond = TCG_COND_LTU;
1237             goto done;
1238         case 6: /* NE */
1239         case 7: /* EQ */
1240             tcond = TCG_COND_EQ;
1241             goto done;
1242         case 10: /* PL */
1243         case 11: /* MI */
1244             c->g1 = c->g2 = 0;
1245             c->v2 = tcg_const_i32(0);
1246             c->v1 = tmp = tcg_temp_new();
1247             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1248             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1249             /* fallthru */
1250         case 12: /* GE */
1251         case 13: /* LT */
1252             tcond = TCG_COND_LT;
1253             goto done;
1254         case 14: /* GT */
1255         case 15: /* LE */
1256             tcond = TCG_COND_LE;
1257             goto done;
1258         }
1259     }
1260 
1261     c->g1 = 1;
1262     c->g2 = 0;
1263     c->v2 = tcg_const_i32(0);
1264 
1265     switch (cond) {
1266     case 0: /* T */
1267     case 1: /* F */
1268         c->v1 = c->v2;
1269         tcond = TCG_COND_NEVER;
1270         goto done;
1271     case 14: /* GT (!(Z || (N ^ V))) */
1272     case 15: /* LE (Z || (N ^ V)) */
1273         /* Logic operations clear V, which simplifies LE to (Z || N),
1274            and since Z and N are co-located, this becomes a normal
1275            comparison vs N.  */
1276         if (op == CC_OP_LOGIC) {
1277             c->v1 = QREG_CC_N;
1278             tcond = TCG_COND_LE;
1279             goto done;
1280         }
1281         break;
1282     case 12: /* GE (!(N ^ V)) */
1283     case 13: /* LT (N ^ V) */
1284         /* Logic operations clear V, which simplifies this to N.  */
1285         if (op != CC_OP_LOGIC) {
1286             break;
1287         }
1288         /* fallthru */
1289     case 10: /* PL (!N) */
1290     case 11: /* MI (N) */
1291         /* Several cases represent N normally.  */
1292         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1293             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1294             op == CC_OP_LOGIC) {
1295             c->v1 = QREG_CC_N;
1296             tcond = TCG_COND_LT;
1297             goto done;
1298         }
1299         break;
1300     case 6: /* NE (!Z) */
1301     case 7: /* EQ (Z) */
1302         /* Some cases fold Z into N.  */
1303         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1304             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1305             op == CC_OP_LOGIC) {
1306             tcond = TCG_COND_EQ;
1307             c->v1 = QREG_CC_N;
1308             goto done;
1309         }
1310         break;
1311     case 4: /* CC (!C) */
1312     case 5: /* CS (C) */
1313         /* Some cases fold C into X.  */
1314         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1315             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1316             tcond = TCG_COND_NE;
1317             c->v1 = QREG_CC_X;
1318             goto done;
1319         }
1320         /* fallthru */
1321     case 8: /* VC (!V) */
1322     case 9: /* VS (V) */
1323         /* Logic operations clear V and C.  */
1324         if (op == CC_OP_LOGIC) {
1325             tcond = TCG_COND_NEVER;
1326             c->v1 = c->v2;
1327             goto done;
1328         }
1329         break;
1330     }
1331 
1332     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1333     gen_flush_flags(s);
1334 
1335     switch (cond) {
1336     case 0: /* T */
1337     case 1: /* F */
1338     default:
1339         /* Invalid, or handled above.  */
1340         abort();
1341     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1342     case 3: /* LS (C || Z) */
1343         c->v1 = tmp = tcg_temp_new();
1344         c->g1 = 0;
1345         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1346         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1347         tcond = TCG_COND_NE;
1348         break;
1349     case 4: /* CC (!C) */
1350     case 5: /* CS (C) */
1351         c->v1 = QREG_CC_C;
1352         tcond = TCG_COND_NE;
1353         break;
1354     case 6: /* NE (!Z) */
1355     case 7: /* EQ (Z) */
1356         c->v1 = QREG_CC_Z;
1357         tcond = TCG_COND_EQ;
1358         break;
1359     case 8: /* VC (!V) */
1360     case 9: /* VS (V) */
1361         c->v1 = QREG_CC_V;
1362         tcond = TCG_COND_LT;
1363         break;
1364     case 10: /* PL (!N) */
1365     case 11: /* MI (N) */
1366         c->v1 = QREG_CC_N;
1367         tcond = TCG_COND_LT;
1368         break;
1369     case 12: /* GE (!(N ^ V)) */
1370     case 13: /* LT (N ^ V) */
1371         c->v1 = tmp = tcg_temp_new();
1372         c->g1 = 0;
1373         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1374         tcond = TCG_COND_LT;
1375         break;
1376     case 14: /* GT (!(Z || (N ^ V))) */
1377     case 15: /* LE (Z || (N ^ V)) */
1378         c->v1 = tmp = tcg_temp_new();
1379         c->g1 = 0;
1380         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1381         tcg_gen_neg_i32(tmp, tmp);
1382         tmp2 = tcg_temp_new();
1383         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1384         tcg_gen_or_i32(tmp, tmp, tmp2);
1385         tcg_temp_free(tmp2);
1386         tcond = TCG_COND_LT;
1387         break;
1388     }
1389 
1390  done:
1391     if ((cond & 1) == 0) {
1392         tcond = tcg_invert_cond(tcond);
1393     }
1394     c->tcond = tcond;
1395 }
1396 
1397 static void free_cond(DisasCompare *c)
1398 {
1399     if (!c->g1) {
1400         tcg_temp_free(c->v1);
1401     }
1402     if (!c->g2) {
1403         tcg_temp_free(c->v2);
1404     }
1405 }
1406 
1407 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1408 {
1409   DisasCompare c;
1410 
1411   gen_cc_cond(&c, s, cond);
1412   update_cc_op(s);
1413   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1414   free_cond(&c);
1415 }
1416 
1417 /* Force a TB lookup after an instruction that changes the CPU state.  */
1418 static void gen_lookup_tb(DisasContext *s)
1419 {
1420     update_cc_op(s);
1421     tcg_gen_movi_i32(QREG_PC, s->pc);
1422     s->is_jmp = DISAS_UPDATE;
1423 }
1424 
1425 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1426         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1427                         op_sign ? EA_LOADS : EA_LOADU);                 \
1428         if (IS_NULL_QREG(result)) {                                     \
1429             gen_addr_fault(s);                                          \
1430             return;                                                     \
1431         }                                                               \
1432     } while (0)
1433 
1434 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1435         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1436         if (IS_NULL_QREG(ea_result)) {                                  \
1437             gen_addr_fault(s);                                          \
1438             return;                                                     \
1439         }                                                               \
1440     } while (0)
1441 
1442 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1443 {
1444 #ifndef CONFIG_USER_ONLY
1445     return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1446            (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1447 #else
1448     return true;
1449 #endif
1450 }
1451 
1452 /* Generate a jump to an immediate address.  */
1453 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1454 {
1455     if (unlikely(s->singlestep_enabled)) {
1456         gen_exception(s, dest, EXCP_DEBUG);
1457     } else if (use_goto_tb(s, dest)) {
1458         tcg_gen_goto_tb(n);
1459         tcg_gen_movi_i32(QREG_PC, dest);
1460         tcg_gen_exit_tb((uintptr_t)s->tb + n);
1461     } else {
1462         gen_jmp_im(s, dest);
1463         tcg_gen_exit_tb(0);
1464     }
1465     s->is_jmp = DISAS_TB_JUMP;
1466 }
1467 
1468 DISAS_INSN(scc)
1469 {
1470     DisasCompare c;
1471     int cond;
1472     TCGv tmp;
1473 
1474     cond = (insn >> 8) & 0xf;
1475     gen_cc_cond(&c, s, cond);
1476 
1477     tmp = tcg_temp_new();
1478     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1479     free_cond(&c);
1480 
1481     tcg_gen_neg_i32(tmp, tmp);
1482     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1483     tcg_temp_free(tmp);
1484 }
1485 
1486 DISAS_INSN(dbcc)
1487 {
1488     TCGLabel *l1;
1489     TCGv reg;
1490     TCGv tmp;
1491     int16_t offset;
1492     uint32_t base;
1493 
1494     reg = DREG(insn, 0);
1495     base = s->pc;
1496     offset = (int16_t)read_im16(env, s);
1497     l1 = gen_new_label();
1498     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1499 
1500     tmp = tcg_temp_new();
1501     tcg_gen_ext16s_i32(tmp, reg);
1502     tcg_gen_addi_i32(tmp, tmp, -1);
1503     gen_partset_reg(OS_WORD, reg, tmp);
1504     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1505     gen_jmp_tb(s, 1, base + offset);
1506     gen_set_label(l1);
1507     gen_jmp_tb(s, 0, s->pc);
1508 }
1509 
1510 DISAS_INSN(undef_mac)
1511 {
1512     gen_exception(s, s->insn_pc, EXCP_LINEA);
1513 }
1514 
1515 DISAS_INSN(undef_fpu)
1516 {
1517     gen_exception(s, s->insn_pc, EXCP_LINEF);
1518 }
1519 
1520 DISAS_INSN(undef)
1521 {
1522     /* ??? This is both instructions that are as yet unimplemented
1523        for the 680x0 series, as well as those that are implemented
1524        but actually illegal for CPU32 or pre-68020.  */
1525     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1526                   insn, s->insn_pc);
1527     gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
1528 }
1529 
1530 DISAS_INSN(mulw)
1531 {
1532     TCGv reg;
1533     TCGv tmp;
1534     TCGv src;
1535     int sign;
1536 
1537     sign = (insn & 0x100) != 0;
1538     reg = DREG(insn, 9);
1539     tmp = tcg_temp_new();
1540     if (sign)
1541         tcg_gen_ext16s_i32(tmp, reg);
1542     else
1543         tcg_gen_ext16u_i32(tmp, reg);
1544     SRC_EA(env, src, OS_WORD, sign, NULL);
1545     tcg_gen_mul_i32(tmp, tmp, src);
1546     tcg_gen_mov_i32(reg, tmp);
1547     gen_logic_cc(s, tmp, OS_LONG);
1548     tcg_temp_free(tmp);
1549 }
1550 
1551 DISAS_INSN(divw)
1552 {
1553     int sign;
1554     TCGv src;
1555     TCGv destr;
1556 
1557     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1558 
1559     sign = (insn & 0x100) != 0;
1560 
1561     /* dest.l / src.w */
1562 
1563     SRC_EA(env, src, OS_WORD, sign, NULL);
1564     destr = tcg_const_i32(REG(insn, 9));
1565     if (sign) {
1566         gen_helper_divsw(cpu_env, destr, src);
1567     } else {
1568         gen_helper_divuw(cpu_env, destr, src);
1569     }
1570     tcg_temp_free(destr);
1571 
1572     set_cc_op(s, CC_OP_FLAGS);
1573 }
1574 
1575 DISAS_INSN(divl)
1576 {
1577     TCGv num, reg, den;
1578     int sign;
1579     uint16_t ext;
1580 
1581     ext = read_im16(env, s);
1582 
1583     sign = (ext & 0x0800) != 0;
1584 
1585     if (ext & 0x400) {
1586         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1587             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1588             return;
1589         }
1590 
1591         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1592 
1593         SRC_EA(env, den, OS_LONG, 0, NULL);
1594         num = tcg_const_i32(REG(ext, 12));
1595         reg = tcg_const_i32(REG(ext, 0));
1596         if (sign) {
1597             gen_helper_divsll(cpu_env, num, reg, den);
1598         } else {
1599             gen_helper_divull(cpu_env, num, reg, den);
1600         }
1601         tcg_temp_free(reg);
1602         tcg_temp_free(num);
1603         set_cc_op(s, CC_OP_FLAGS);
1604         return;
1605     }
1606 
1607     /* divX.l <EA>, Dq        32/32 -> 32q     */
1608     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1609 
1610     SRC_EA(env, den, OS_LONG, 0, NULL);
1611     num = tcg_const_i32(REG(ext, 12));
1612     reg = tcg_const_i32(REG(ext, 0));
1613     if (sign) {
1614         gen_helper_divsl(cpu_env, num, reg, den);
1615     } else {
1616         gen_helper_divul(cpu_env, num, reg, den);
1617     }
1618     tcg_temp_free(reg);
1619     tcg_temp_free(num);
1620 
1621     set_cc_op(s, CC_OP_FLAGS);
1622 }
1623 
1624 static void bcd_add(TCGv dest, TCGv src)
1625 {
1626     TCGv t0, t1;
1627 
1628     /*  dest10 = dest10 + src10 + X
1629      *
1630      *        t1 = src
1631      *        t2 = t1 + 0x066
1632      *        t3 = t2 + dest + X
1633      *        t4 = t2 ^ dest
1634      *        t5 = t3 ^ t4
1635      *        t6 = ~t5 & 0x110
1636      *        t7 = (t6 >> 2) | (t6 >> 3)
1637      *        return t3 - t7
1638      */
1639 
1640     /* t1 = (src + 0x066) + dest + X
1641      *    = result with some possible exceding 0x6
1642      */
1643 
1644     t0 = tcg_const_i32(0x066);
1645     tcg_gen_add_i32(t0, t0, src);
1646 
1647     t1 = tcg_temp_new();
1648     tcg_gen_add_i32(t1, t0, dest);
1649     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1650 
1651     /* we will remove exceding 0x6 where there is no carry */
1652 
1653     /* t0 = (src + 0x0066) ^ dest
1654      *    = t1 without carries
1655      */
1656 
1657     tcg_gen_xor_i32(t0, t0, dest);
1658 
1659     /* extract the carries
1660      * t0 = t0 ^ t1
1661      *    = only the carries
1662      */
1663 
1664     tcg_gen_xor_i32(t0, t0, t1);
1665 
1666     /* generate 0x1 where there is no carry
1667      * and for each 0x10, generate a 0x6
1668      */
1669 
1670     tcg_gen_shri_i32(t0, t0, 3);
1671     tcg_gen_not_i32(t0, t0);
1672     tcg_gen_andi_i32(t0, t0, 0x22);
1673     tcg_gen_add_i32(dest, t0, t0);
1674     tcg_gen_add_i32(dest, dest, t0);
1675     tcg_temp_free(t0);
1676 
1677     /* remove the exceding 0x6
1678      * for digits that have not generated a carry
1679      */
1680 
1681     tcg_gen_sub_i32(dest, t1, dest);
1682     tcg_temp_free(t1);
1683 }
1684 
1685 static void bcd_sub(TCGv dest, TCGv src)
1686 {
1687     TCGv t0, t1, t2;
1688 
1689     /*  dest10 = dest10 - src10 - X
1690      *         = bcd_add(dest + 1 - X, 0x199 - src)
1691      */
1692 
1693     /* t0 = 0x066 + (0x199 - src) */
1694 
1695     t0 = tcg_temp_new();
1696     tcg_gen_subfi_i32(t0, 0x1ff, src);
1697 
1698     /* t1 = t0 + dest + 1 - X*/
1699 
1700     t1 = tcg_temp_new();
1701     tcg_gen_add_i32(t1, t0, dest);
1702     tcg_gen_addi_i32(t1, t1, 1);
1703     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1704 
1705     /* t2 = t0 ^ dest */
1706 
1707     t2 = tcg_temp_new();
1708     tcg_gen_xor_i32(t2, t0, dest);
1709 
1710     /* t0 = t1 ^ t2 */
1711 
1712     tcg_gen_xor_i32(t0, t1, t2);
1713 
1714     /* t2 = ~t0 & 0x110
1715      * t0 = (t2 >> 2) | (t2 >> 3)
1716      *
1717      * to fit on 8bit operands, changed in:
1718      *
1719      * t2 = ~(t0 >> 3) & 0x22
1720      * t0 = t2 + t2
1721      * t0 = t0 + t2
1722      */
1723 
1724     tcg_gen_shri_i32(t2, t0, 3);
1725     tcg_gen_not_i32(t2, t2);
1726     tcg_gen_andi_i32(t2, t2, 0x22);
1727     tcg_gen_add_i32(t0, t2, t2);
1728     tcg_gen_add_i32(t0, t0, t2);
1729     tcg_temp_free(t2);
1730 
1731     /* return t1 - t0 */
1732 
1733     tcg_gen_sub_i32(dest, t1, t0);
1734     tcg_temp_free(t0);
1735     tcg_temp_free(t1);
1736 }
1737 
1738 static void bcd_flags(TCGv val)
1739 {
1740     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1741     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1742 
1743     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1744 
1745     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1746 }
1747 
1748 DISAS_INSN(abcd_reg)
1749 {
1750     TCGv src;
1751     TCGv dest;
1752 
1753     gen_flush_flags(s); /* !Z is sticky */
1754 
1755     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1756     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1757     bcd_add(dest, src);
1758     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1759 
1760     bcd_flags(dest);
1761 }
1762 
1763 DISAS_INSN(abcd_mem)
1764 {
1765     TCGv src, dest, addr;
1766 
1767     gen_flush_flags(s); /* !Z is sticky */
1768 
1769     /* Indirect pre-decrement load (mode 4) */
1770 
1771     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1772                       NULL_QREG, NULL, EA_LOADU);
1773     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1774                        NULL_QREG, &addr, EA_LOADU);
1775 
1776     bcd_add(dest, src);
1777 
1778     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1779 
1780     bcd_flags(dest);
1781 }
1782 
1783 DISAS_INSN(sbcd_reg)
1784 {
1785     TCGv src, dest;
1786 
1787     gen_flush_flags(s); /* !Z is sticky */
1788 
1789     src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1790     dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1791 
1792     bcd_sub(dest, src);
1793 
1794     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1795 
1796     bcd_flags(dest);
1797 }
1798 
1799 DISAS_INSN(sbcd_mem)
1800 {
1801     TCGv src, dest, addr;
1802 
1803     gen_flush_flags(s); /* !Z is sticky */
1804 
1805     /* Indirect pre-decrement load (mode 4) */
1806 
1807     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1808                       NULL_QREG, NULL, EA_LOADU);
1809     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1810                        NULL_QREG, &addr, EA_LOADU);
1811 
1812     bcd_sub(dest, src);
1813 
1814     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1815 
1816     bcd_flags(dest);
1817 }
1818 
1819 DISAS_INSN(nbcd)
1820 {
1821     TCGv src, dest;
1822     TCGv addr;
1823 
1824     gen_flush_flags(s); /* !Z is sticky */
1825 
1826     SRC_EA(env, src, OS_BYTE, 0, &addr);
1827 
1828     dest = tcg_const_i32(0);
1829     bcd_sub(dest, src);
1830 
1831     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1832 
1833     bcd_flags(dest);
1834 
1835     tcg_temp_free(dest);
1836 }
1837 
1838 DISAS_INSN(addsub)
1839 {
1840     TCGv reg;
1841     TCGv dest;
1842     TCGv src;
1843     TCGv tmp;
1844     TCGv addr;
1845     int add;
1846     int opsize;
1847 
1848     add = (insn & 0x4000) != 0;
1849     opsize = insn_opsize(insn);
1850     reg = gen_extend(DREG(insn, 9), opsize, 1);
1851     dest = tcg_temp_new();
1852     if (insn & 0x100) {
1853         SRC_EA(env, tmp, opsize, 1, &addr);
1854         src = reg;
1855     } else {
1856         tmp = reg;
1857         SRC_EA(env, src, opsize, 1, NULL);
1858     }
1859     if (add) {
1860         tcg_gen_add_i32(dest, tmp, src);
1861         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1862         set_cc_op(s, CC_OP_ADDB + opsize);
1863     } else {
1864         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1865         tcg_gen_sub_i32(dest, tmp, src);
1866         set_cc_op(s, CC_OP_SUBB + opsize);
1867     }
1868     gen_update_cc_add(dest, src, opsize);
1869     if (insn & 0x100) {
1870         DEST_EA(env, insn, opsize, dest, &addr);
1871     } else {
1872         gen_partset_reg(opsize, DREG(insn, 9), dest);
1873     }
1874     tcg_temp_free(dest);
1875 }
1876 
1877 /* Reverse the order of the bits in REG.  */
1878 DISAS_INSN(bitrev)
1879 {
1880     TCGv reg;
1881     reg = DREG(insn, 0);
1882     gen_helper_bitrev(reg, reg);
1883 }
1884 
1885 DISAS_INSN(bitop_reg)
1886 {
1887     int opsize;
1888     int op;
1889     TCGv src1;
1890     TCGv src2;
1891     TCGv tmp;
1892     TCGv addr;
1893     TCGv dest;
1894 
1895     if ((insn & 0x38) != 0)
1896         opsize = OS_BYTE;
1897     else
1898         opsize = OS_LONG;
1899     op = (insn >> 6) & 3;
1900     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1901 
1902     gen_flush_flags(s);
1903     src2 = tcg_temp_new();
1904     if (opsize == OS_BYTE)
1905         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1906     else
1907         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1908 
1909     tmp = tcg_const_i32(1);
1910     tcg_gen_shl_i32(tmp, tmp, src2);
1911     tcg_temp_free(src2);
1912 
1913     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1914 
1915     dest = tcg_temp_new();
1916     switch (op) {
1917     case 1: /* bchg */
1918         tcg_gen_xor_i32(dest, src1, tmp);
1919         break;
1920     case 2: /* bclr */
1921         tcg_gen_andc_i32(dest, src1, tmp);
1922         break;
1923     case 3: /* bset */
1924         tcg_gen_or_i32(dest, src1, tmp);
1925         break;
1926     default: /* btst */
1927         break;
1928     }
1929     tcg_temp_free(tmp);
1930     if (op) {
1931         DEST_EA(env, insn, opsize, dest, &addr);
1932     }
1933     tcg_temp_free(dest);
1934 }
1935 
1936 DISAS_INSN(sats)
1937 {
1938     TCGv reg;
1939     reg = DREG(insn, 0);
1940     gen_flush_flags(s);
1941     gen_helper_sats(reg, reg, QREG_CC_V);
1942     gen_logic_cc(s, reg, OS_LONG);
1943 }
1944 
1945 static void gen_push(DisasContext *s, TCGv val)
1946 {
1947     TCGv tmp;
1948 
1949     tmp = tcg_temp_new();
1950     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1951     gen_store(s, OS_LONG, tmp, val);
1952     tcg_gen_mov_i32(QREG_SP, tmp);
1953     tcg_temp_free(tmp);
1954 }
1955 
1956 static TCGv mreg(int reg)
1957 {
1958     if (reg < 8) {
1959         /* Dx */
1960         return cpu_dregs[reg];
1961     }
1962     /* Ax */
1963     return cpu_aregs[reg & 7];
1964 }
1965 
1966 DISAS_INSN(movem)
1967 {
1968     TCGv addr, incr, tmp, r[16];
1969     int is_load = (insn & 0x0400) != 0;
1970     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1971     uint16_t mask = read_im16(env, s);
1972     int mode = extract32(insn, 3, 3);
1973     int reg0 = REG(insn, 0);
1974     int i;
1975 
1976     tmp = cpu_aregs[reg0];
1977 
1978     switch (mode) {
1979     case 0: /* data register direct */
1980     case 1: /* addr register direct */
1981     do_addr_fault:
1982         gen_addr_fault(s);
1983         return;
1984 
1985     case 2: /* indirect */
1986         break;
1987 
1988     case 3: /* indirect post-increment */
1989         if (!is_load) {
1990             /* post-increment is not allowed */
1991             goto do_addr_fault;
1992         }
1993         break;
1994 
1995     case 4: /* indirect pre-decrement */
1996         if (is_load) {
1997             /* pre-decrement is not allowed */
1998             goto do_addr_fault;
1999         }
2000         /* We want a bare copy of the address reg, without any pre-decrement
2001            adjustment, as gen_lea would provide.  */
2002         break;
2003 
2004     default:
2005         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2006         if (IS_NULL_QREG(tmp)) {
2007             goto do_addr_fault;
2008         }
2009         break;
2010     }
2011 
2012     addr = tcg_temp_new();
2013     tcg_gen_mov_i32(addr, tmp);
2014     incr = tcg_const_i32(opsize_bytes(opsize));
2015 
2016     if (is_load) {
2017         /* memory to register */
2018         for (i = 0; i < 16; i++) {
2019             if (mask & (1 << i)) {
2020                 r[i] = gen_load(s, opsize, addr, 1);
2021                 tcg_gen_add_i32(addr, addr, incr);
2022             }
2023         }
2024         for (i = 0; i < 16; i++) {
2025             if (mask & (1 << i)) {
2026                 tcg_gen_mov_i32(mreg(i), r[i]);
2027                 tcg_temp_free(r[i]);
2028             }
2029         }
2030         if (mode == 3) {
2031             /* post-increment: movem (An)+,X */
2032             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2033         }
2034     } else {
2035         /* register to memory */
2036         if (mode == 4) {
2037             /* pre-decrement: movem X,-(An) */
2038             for (i = 15; i >= 0; i--) {
2039                 if ((mask << i) & 0x8000) {
2040                     tcg_gen_sub_i32(addr, addr, incr);
2041                     if (reg0 + 8 == i &&
2042                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2043                         /* M68020+: if the addressing register is the
2044                          * register moved to memory, the value written
2045                          * is the initial value decremented by the size of
2046                          * the operation, regardless of how many actual
2047                          * stores have been performed until this point.
2048                          * M68000/M68010: the value is the initial value.
2049                          */
2050                         tmp = tcg_temp_new();
2051                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2052                         gen_store(s, opsize, addr, tmp);
2053                         tcg_temp_free(tmp);
2054                     } else {
2055                         gen_store(s, opsize, addr, mreg(i));
2056                     }
2057                 }
2058             }
2059             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2060         } else {
2061             for (i = 0; i < 16; i++) {
2062                 if (mask & (1 << i)) {
2063                     gen_store(s, opsize, addr, mreg(i));
2064                     tcg_gen_add_i32(addr, addr, incr);
2065                 }
2066             }
2067         }
2068     }
2069 
2070     tcg_temp_free(incr);
2071     tcg_temp_free(addr);
2072 }
2073 
2074 DISAS_INSN(bitop_im)
2075 {
2076     int opsize;
2077     int op;
2078     TCGv src1;
2079     uint32_t mask;
2080     int bitnum;
2081     TCGv tmp;
2082     TCGv addr;
2083 
2084     if ((insn & 0x38) != 0)
2085         opsize = OS_BYTE;
2086     else
2087         opsize = OS_LONG;
2088     op = (insn >> 6) & 3;
2089 
2090     bitnum = read_im16(env, s);
2091     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2092         if (bitnum & 0xfe00) {
2093             disas_undef(env, s, insn);
2094             return;
2095         }
2096     } else {
2097         if (bitnum & 0xff00) {
2098             disas_undef(env, s, insn);
2099             return;
2100         }
2101     }
2102 
2103     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2104 
2105     gen_flush_flags(s);
2106     if (opsize == OS_BYTE)
2107         bitnum &= 7;
2108     else
2109         bitnum &= 31;
2110     mask = 1 << bitnum;
2111 
2112    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2113 
2114     if (op) {
2115         tmp = tcg_temp_new();
2116         switch (op) {
2117         case 1: /* bchg */
2118             tcg_gen_xori_i32(tmp, src1, mask);
2119             break;
2120         case 2: /* bclr */
2121             tcg_gen_andi_i32(tmp, src1, ~mask);
2122             break;
2123         case 3: /* bset */
2124             tcg_gen_ori_i32(tmp, src1, mask);
2125             break;
2126         default: /* btst */
2127             break;
2128         }
2129         DEST_EA(env, insn, opsize, tmp, &addr);
2130         tcg_temp_free(tmp);
2131     }
2132 }
2133 
2134 static TCGv gen_get_ccr(DisasContext *s)
2135 {
2136     TCGv dest;
2137 
2138     update_cc_op(s);
2139     dest = tcg_temp_new();
2140     gen_helper_get_ccr(dest, cpu_env);
2141     return dest;
2142 }
2143 
2144 static TCGv gen_get_sr(DisasContext *s)
2145 {
2146     TCGv ccr;
2147     TCGv sr;
2148 
2149     ccr = gen_get_ccr(s);
2150     sr = tcg_temp_new();
2151     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2152     tcg_gen_or_i32(sr, sr, ccr);
2153     return sr;
2154 }
2155 
2156 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2157 {
2158     if (ccr_only) {
2159         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2160         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2161         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2162         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2163         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2164     } else {
2165         TCGv sr = tcg_const_i32(val);
2166         gen_helper_set_sr(cpu_env, sr);
2167         tcg_temp_free(sr);
2168     }
2169     set_cc_op(s, CC_OP_FLAGS);
2170 }
2171 
2172 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2173 {
2174     if (ccr_only) {
2175         gen_helper_set_ccr(cpu_env, val);
2176     } else {
2177         gen_helper_set_sr(cpu_env, val);
2178     }
2179     set_cc_op(s, CC_OP_FLAGS);
2180 }
2181 
2182 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2183                            bool ccr_only)
2184 {
2185     if ((insn & 0x3f) == 0x3c) {
2186         uint16_t val;
2187         val = read_im16(env, s);
2188         gen_set_sr_im(s, val, ccr_only);
2189     } else {
2190         TCGv src;
2191         SRC_EA(env, src, OS_WORD, 0, NULL);
2192         gen_set_sr(s, src, ccr_only);
2193     }
2194 }
2195 
2196 DISAS_INSN(arith_im)
2197 {
2198     int op;
2199     TCGv im;
2200     TCGv src1;
2201     TCGv dest;
2202     TCGv addr;
2203     int opsize;
2204     bool with_SR = ((insn & 0x3f) == 0x3c);
2205 
2206     op = (insn >> 9) & 7;
2207     opsize = insn_opsize(insn);
2208     switch (opsize) {
2209     case OS_BYTE:
2210         im = tcg_const_i32((int8_t)read_im8(env, s));
2211         break;
2212     case OS_WORD:
2213         im = tcg_const_i32((int16_t)read_im16(env, s));
2214         break;
2215     case OS_LONG:
2216         im = tcg_const_i32(read_im32(env, s));
2217         break;
2218     default:
2219        abort();
2220     }
2221 
2222     if (with_SR) {
2223         /* SR/CCR can only be used with andi/eori/ori */
2224         if (op == 2 || op == 3 || op == 6) {
2225             disas_undef(env, s, insn);
2226             return;
2227         }
2228         switch (opsize) {
2229         case OS_BYTE:
2230             src1 = gen_get_ccr(s);
2231             break;
2232         case OS_WORD:
2233             if (IS_USER(s)) {
2234                 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2235                 return;
2236             }
2237             src1 = gen_get_sr(s);
2238             break;
2239         case OS_LONG:
2240             disas_undef(env, s, insn);
2241             return;
2242         }
2243     } else {
2244         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2245     }
2246     dest = tcg_temp_new();
2247     switch (op) {
2248     case 0: /* ori */
2249         tcg_gen_or_i32(dest, src1, im);
2250         if (with_SR) {
2251             gen_set_sr(s, dest, opsize == OS_BYTE);
2252         } else {
2253             DEST_EA(env, insn, opsize, dest, &addr);
2254             gen_logic_cc(s, dest, opsize);
2255         }
2256         break;
2257     case 1: /* andi */
2258         tcg_gen_and_i32(dest, src1, im);
2259         if (with_SR) {
2260             gen_set_sr(s, dest, opsize == OS_BYTE);
2261         } else {
2262             DEST_EA(env, insn, opsize, dest, &addr);
2263             gen_logic_cc(s, dest, opsize);
2264         }
2265         break;
2266     case 2: /* subi */
2267         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2268         tcg_gen_sub_i32(dest, src1, im);
2269         gen_update_cc_add(dest, im, opsize);
2270         set_cc_op(s, CC_OP_SUBB + opsize);
2271         DEST_EA(env, insn, opsize, dest, &addr);
2272         break;
2273     case 3: /* addi */
2274         tcg_gen_add_i32(dest, src1, im);
2275         gen_update_cc_add(dest, im, opsize);
2276         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2277         set_cc_op(s, CC_OP_ADDB + opsize);
2278         DEST_EA(env, insn, opsize, dest, &addr);
2279         break;
2280     case 5: /* eori */
2281         tcg_gen_xor_i32(dest, src1, im);
2282         if (with_SR) {
2283             gen_set_sr(s, dest, opsize == OS_BYTE);
2284         } else {
2285             DEST_EA(env, insn, opsize, dest, &addr);
2286             gen_logic_cc(s, dest, opsize);
2287         }
2288         break;
2289     case 6: /* cmpi */
2290         gen_update_cc_cmp(s, src1, im, opsize);
2291         break;
2292     default:
2293         abort();
2294     }
2295     tcg_temp_free(im);
2296     tcg_temp_free(dest);
2297 }
2298 
2299 DISAS_INSN(cas)
2300 {
2301     int opsize;
2302     TCGv addr;
2303     uint16_t ext;
2304     TCGv load;
2305     TCGv cmp;
2306     TCGMemOp opc;
2307 
2308     switch ((insn >> 9) & 3) {
2309     case 1:
2310         opsize = OS_BYTE;
2311         opc = MO_SB;
2312         break;
2313     case 2:
2314         opsize = OS_WORD;
2315         opc = MO_TESW;
2316         break;
2317     case 3:
2318         opsize = OS_LONG;
2319         opc = MO_TESL;
2320         break;
2321     default:
2322         g_assert_not_reached();
2323     }
2324 
2325     ext = read_im16(env, s);
2326 
2327     /* cas Dc,Du,<EA> */
2328 
2329     addr = gen_lea(env, s, insn, opsize);
2330     if (IS_NULL_QREG(addr)) {
2331         gen_addr_fault(s);
2332         return;
2333     }
2334 
2335     cmp = gen_extend(DREG(ext, 0), opsize, 1);
2336 
2337     /* if  <EA> == Dc then
2338      *     <EA> = Du
2339      *     Dc = <EA> (because <EA> == Dc)
2340      * else
2341      *     Dc = <EA>
2342      */
2343 
2344     load = tcg_temp_new();
2345     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2346                                IS_USER(s), opc);
2347     /* update flags before setting cmp to load */
2348     gen_update_cc_cmp(s, load, cmp, opsize);
2349     gen_partset_reg(opsize, DREG(ext, 0), load);
2350 
2351     tcg_temp_free(load);
2352 
2353     switch (extract32(insn, 3, 3)) {
2354     case 3: /* Indirect postincrement.  */
2355         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2356         break;
2357     case 4: /* Indirect predecrememnt.  */
2358         tcg_gen_mov_i32(AREG(insn, 0), addr);
2359         break;
2360     }
2361 }
2362 
2363 DISAS_INSN(cas2w)
2364 {
2365     uint16_t ext1, ext2;
2366     TCGv addr1, addr2;
2367     TCGv regs;
2368 
2369     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2370 
2371     ext1 = read_im16(env, s);
2372 
2373     if (ext1 & 0x8000) {
2374         /* Address Register */
2375         addr1 = AREG(ext1, 12);
2376     } else {
2377         /* Data Register */
2378         addr1 = DREG(ext1, 12);
2379     }
2380 
2381     ext2 = read_im16(env, s);
2382     if (ext2 & 0x8000) {
2383         /* Address Register */
2384         addr2 = AREG(ext2, 12);
2385     } else {
2386         /* Data Register */
2387         addr2 = DREG(ext2, 12);
2388     }
2389 
2390     /* if (R1) == Dc1 && (R2) == Dc2 then
2391      *     (R1) = Du1
2392      *     (R2) = Du2
2393      * else
2394      *     Dc1 = (R1)
2395      *     Dc2 = (R2)
2396      */
2397 
2398     regs = tcg_const_i32(REG(ext2, 6) |
2399                          (REG(ext1, 6) << 3) |
2400                          (REG(ext2, 0) << 6) |
2401                          (REG(ext1, 0) << 9));
2402     if (tb_cflags(s->tb) & CF_PARALLEL) {
2403         gen_helper_exit_atomic(cpu_env);
2404     } else {
2405         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2406     }
2407     tcg_temp_free(regs);
2408 
2409     /* Note that cas2w also assigned to env->cc_op.  */
2410     s->cc_op = CC_OP_CMPW;
2411     s->cc_op_synced = 1;
2412 }
2413 
2414 DISAS_INSN(cas2l)
2415 {
2416     uint16_t ext1, ext2;
2417     TCGv addr1, addr2, regs;
2418 
2419     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2420 
2421     ext1 = read_im16(env, s);
2422 
2423     if (ext1 & 0x8000) {
2424         /* Address Register */
2425         addr1 = AREG(ext1, 12);
2426     } else {
2427         /* Data Register */
2428         addr1 = DREG(ext1, 12);
2429     }
2430 
2431     ext2 = read_im16(env, s);
2432     if (ext2 & 0x8000) {
2433         /* Address Register */
2434         addr2 = AREG(ext2, 12);
2435     } else {
2436         /* Data Register */
2437         addr2 = DREG(ext2, 12);
2438     }
2439 
2440     /* if (R1) == Dc1 && (R2) == Dc2 then
2441      *     (R1) = Du1
2442      *     (R2) = Du2
2443      * else
2444      *     Dc1 = (R1)
2445      *     Dc2 = (R2)
2446      */
2447 
2448     regs = tcg_const_i32(REG(ext2, 6) |
2449                          (REG(ext1, 6) << 3) |
2450                          (REG(ext2, 0) << 6) |
2451                          (REG(ext1, 0) << 9));
2452     if (tb_cflags(s->tb) & CF_PARALLEL) {
2453         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2454     } else {
2455         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2456     }
2457     tcg_temp_free(regs);
2458 
2459     /* Note that cas2l also assigned to env->cc_op.  */
2460     s->cc_op = CC_OP_CMPL;
2461     s->cc_op_synced = 1;
2462 }
2463 
2464 DISAS_INSN(byterev)
2465 {
2466     TCGv reg;
2467 
2468     reg = DREG(insn, 0);
2469     tcg_gen_bswap32_i32(reg, reg);
2470 }
2471 
2472 DISAS_INSN(move)
2473 {
2474     TCGv src;
2475     TCGv dest;
2476     int op;
2477     int opsize;
2478 
2479     switch (insn >> 12) {
2480     case 1: /* move.b */
2481         opsize = OS_BYTE;
2482         break;
2483     case 2: /* move.l */
2484         opsize = OS_LONG;
2485         break;
2486     case 3: /* move.w */
2487         opsize = OS_WORD;
2488         break;
2489     default:
2490         abort();
2491     }
2492     SRC_EA(env, src, opsize, 1, NULL);
2493     op = (insn >> 6) & 7;
2494     if (op == 1) {
2495         /* movea */
2496         /* The value will already have been sign extended.  */
2497         dest = AREG(insn, 9);
2498         tcg_gen_mov_i32(dest, src);
2499     } else {
2500         /* normal move */
2501         uint16_t dest_ea;
2502         dest_ea = ((insn >> 9) & 7) | (op << 3);
2503         DEST_EA(env, dest_ea, opsize, src, NULL);
2504         /* This will be correct because loads sign extend.  */
2505         gen_logic_cc(s, src, opsize);
2506     }
2507 }
2508 
2509 DISAS_INSN(negx)
2510 {
2511     TCGv z;
2512     TCGv src;
2513     TCGv addr;
2514     int opsize;
2515 
2516     opsize = insn_opsize(insn);
2517     SRC_EA(env, src, opsize, 1, &addr);
2518 
2519     gen_flush_flags(s); /* compute old Z */
2520 
2521     /* Perform substract with borrow.
2522      * (X, N) =  -(src + X);
2523      */
2524 
2525     z = tcg_const_i32(0);
2526     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2527     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2528     tcg_temp_free(z);
2529     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2530 
2531     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2532 
2533     /* Compute signed-overflow for negation.  The normal formula for
2534      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2535      * this simplies to res & src.
2536      */
2537 
2538     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2539 
2540     /* Copy the rest of the results into place.  */
2541     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2542     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2543 
2544     set_cc_op(s, CC_OP_FLAGS);
2545 
2546     /* result is in QREG_CC_N */
2547 
2548     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2549 }
2550 
2551 DISAS_INSN(lea)
2552 {
2553     TCGv reg;
2554     TCGv tmp;
2555 
2556     reg = AREG(insn, 9);
2557     tmp = gen_lea(env, s, insn, OS_LONG);
2558     if (IS_NULL_QREG(tmp)) {
2559         gen_addr_fault(s);
2560         return;
2561     }
2562     tcg_gen_mov_i32(reg, tmp);
2563 }
2564 
2565 DISAS_INSN(clr)
2566 {
2567     int opsize;
2568     TCGv zero;
2569 
2570     zero = tcg_const_i32(0);
2571 
2572     opsize = insn_opsize(insn);
2573     DEST_EA(env, insn, opsize, zero, NULL);
2574     gen_logic_cc(s, zero, opsize);
2575     tcg_temp_free(zero);
2576 }
2577 
2578 DISAS_INSN(move_from_ccr)
2579 {
2580     TCGv ccr;
2581 
2582     ccr = gen_get_ccr(s);
2583     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2584 }
2585 
2586 DISAS_INSN(neg)
2587 {
2588     TCGv src1;
2589     TCGv dest;
2590     TCGv addr;
2591     int opsize;
2592 
2593     opsize = insn_opsize(insn);
2594     SRC_EA(env, src1, opsize, 1, &addr);
2595     dest = tcg_temp_new();
2596     tcg_gen_neg_i32(dest, src1);
2597     set_cc_op(s, CC_OP_SUBB + opsize);
2598     gen_update_cc_add(dest, src1, opsize);
2599     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2600     DEST_EA(env, insn, opsize, dest, &addr);
2601     tcg_temp_free(dest);
2602 }
2603 
2604 DISAS_INSN(move_to_ccr)
2605 {
2606     gen_move_to_sr(env, s, insn, true);
2607 }
2608 
2609 DISAS_INSN(not)
2610 {
2611     TCGv src1;
2612     TCGv dest;
2613     TCGv addr;
2614     int opsize;
2615 
2616     opsize = insn_opsize(insn);
2617     SRC_EA(env, src1, opsize, 1, &addr);
2618     dest = tcg_temp_new();
2619     tcg_gen_not_i32(dest, src1);
2620     DEST_EA(env, insn, opsize, dest, &addr);
2621     gen_logic_cc(s, dest, opsize);
2622 }
2623 
2624 DISAS_INSN(swap)
2625 {
2626     TCGv src1;
2627     TCGv src2;
2628     TCGv reg;
2629 
2630     src1 = tcg_temp_new();
2631     src2 = tcg_temp_new();
2632     reg = DREG(insn, 0);
2633     tcg_gen_shli_i32(src1, reg, 16);
2634     tcg_gen_shri_i32(src2, reg, 16);
2635     tcg_gen_or_i32(reg, src1, src2);
2636     tcg_temp_free(src2);
2637     tcg_temp_free(src1);
2638     gen_logic_cc(s, reg, OS_LONG);
2639 }
2640 
2641 DISAS_INSN(bkpt)
2642 {
2643     gen_exception(s, s->insn_pc, EXCP_DEBUG);
2644 }
2645 
2646 DISAS_INSN(pea)
2647 {
2648     TCGv tmp;
2649 
2650     tmp = gen_lea(env, s, insn, OS_LONG);
2651     if (IS_NULL_QREG(tmp)) {
2652         gen_addr_fault(s);
2653         return;
2654     }
2655     gen_push(s, tmp);
2656 }
2657 
2658 DISAS_INSN(ext)
2659 {
2660     int op;
2661     TCGv reg;
2662     TCGv tmp;
2663 
2664     reg = DREG(insn, 0);
2665     op = (insn >> 6) & 7;
2666     tmp = tcg_temp_new();
2667     if (op == 3)
2668         tcg_gen_ext16s_i32(tmp, reg);
2669     else
2670         tcg_gen_ext8s_i32(tmp, reg);
2671     if (op == 2)
2672         gen_partset_reg(OS_WORD, reg, tmp);
2673     else
2674         tcg_gen_mov_i32(reg, tmp);
2675     gen_logic_cc(s, tmp, OS_LONG);
2676     tcg_temp_free(tmp);
2677 }
2678 
2679 DISAS_INSN(tst)
2680 {
2681     int opsize;
2682     TCGv tmp;
2683 
2684     opsize = insn_opsize(insn);
2685     SRC_EA(env, tmp, opsize, 1, NULL);
2686     gen_logic_cc(s, tmp, opsize);
2687 }
2688 
2689 DISAS_INSN(pulse)
2690 {
2691   /* Implemented as a NOP.  */
2692 }
2693 
2694 DISAS_INSN(illegal)
2695 {
2696     gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
2697 }
2698 
2699 /* ??? This should be atomic.  */
2700 DISAS_INSN(tas)
2701 {
2702     TCGv dest;
2703     TCGv src1;
2704     TCGv addr;
2705 
2706     dest = tcg_temp_new();
2707     SRC_EA(env, src1, OS_BYTE, 1, &addr);
2708     gen_logic_cc(s, src1, OS_BYTE);
2709     tcg_gen_ori_i32(dest, src1, 0x80);
2710     DEST_EA(env, insn, OS_BYTE, dest, &addr);
2711     tcg_temp_free(dest);
2712 }
2713 
2714 DISAS_INSN(mull)
2715 {
2716     uint16_t ext;
2717     TCGv src1;
2718     int sign;
2719 
2720     ext = read_im16(env, s);
2721 
2722     sign = ext & 0x800;
2723 
2724     if (ext & 0x400) {
2725         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2726             gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
2727             return;
2728         }
2729 
2730         SRC_EA(env, src1, OS_LONG, 0, NULL);
2731 
2732         if (sign) {
2733             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2734         } else {
2735             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2736         }
2737         /* if Dl == Dh, 68040 returns low word */
2738         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2739         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2740         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2741 
2742         tcg_gen_movi_i32(QREG_CC_V, 0);
2743         tcg_gen_movi_i32(QREG_CC_C, 0);
2744 
2745         set_cc_op(s, CC_OP_FLAGS);
2746         return;
2747     }
2748     SRC_EA(env, src1, OS_LONG, 0, NULL);
2749     if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2750         tcg_gen_movi_i32(QREG_CC_C, 0);
2751         if (sign) {
2752             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2753             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2754             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2755             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2756         } else {
2757             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2758             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2759             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2760         }
2761         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2762         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2763 
2764         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2765 
2766         set_cc_op(s, CC_OP_FLAGS);
2767     } else {
2768         /* The upper 32 bits of the product are discarded, so
2769            muls.l and mulu.l are functionally equivalent.  */
2770         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2771         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2772     }
2773 }
2774 
2775 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2776 {
2777     TCGv reg;
2778     TCGv tmp;
2779 
2780     reg = AREG(insn, 0);
2781     tmp = tcg_temp_new();
2782     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2783     gen_store(s, OS_LONG, tmp, reg);
2784     if ((insn & 7) != 7) {
2785         tcg_gen_mov_i32(reg, tmp);
2786     }
2787     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2788     tcg_temp_free(tmp);
2789 }
2790 
2791 DISAS_INSN(link)
2792 {
2793     int16_t offset;
2794 
2795     offset = read_im16(env, s);
2796     gen_link(s, insn, offset);
2797 }
2798 
2799 DISAS_INSN(linkl)
2800 {
2801     int32_t offset;
2802 
2803     offset = read_im32(env, s);
2804     gen_link(s, insn, offset);
2805 }
2806 
2807 DISAS_INSN(unlk)
2808 {
2809     TCGv src;
2810     TCGv reg;
2811     TCGv tmp;
2812 
2813     src = tcg_temp_new();
2814     reg = AREG(insn, 0);
2815     tcg_gen_mov_i32(src, reg);
2816     tmp = gen_load(s, OS_LONG, src, 0);
2817     tcg_gen_mov_i32(reg, tmp);
2818     tcg_gen_addi_i32(QREG_SP, src, 4);
2819     tcg_temp_free(src);
2820 }
2821 
2822 #if defined(CONFIG_SOFTMMU)
2823 DISAS_INSN(reset)
2824 {
2825     if (IS_USER(s)) {
2826         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
2827         return;
2828     }
2829 
2830     gen_helper_reset(cpu_env);
2831 }
2832 #endif
2833 
2834 DISAS_INSN(nop)
2835 {
2836 }
2837 
2838 DISAS_INSN(rtd)
2839 {
2840     TCGv tmp;
2841     int16_t offset = read_im16(env, s);
2842 
2843     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2844     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2845     gen_jmp(s, tmp);
2846 }
2847 
2848 DISAS_INSN(rts)
2849 {
2850     TCGv tmp;
2851 
2852     tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2853     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2854     gen_jmp(s, tmp);
2855 }
2856 
2857 DISAS_INSN(jump)
2858 {
2859     TCGv tmp;
2860 
2861     /* Load the target address first to ensure correct exception
2862        behavior.  */
2863     tmp = gen_lea(env, s, insn, OS_LONG);
2864     if (IS_NULL_QREG(tmp)) {
2865         gen_addr_fault(s);
2866         return;
2867     }
2868     if ((insn & 0x40) == 0) {
2869         /* jsr */
2870         gen_push(s, tcg_const_i32(s->pc));
2871     }
2872     gen_jmp(s, tmp);
2873 }
2874 
2875 DISAS_INSN(addsubq)
2876 {
2877     TCGv src;
2878     TCGv dest;
2879     TCGv val;
2880     int imm;
2881     TCGv addr;
2882     int opsize;
2883 
2884     if ((insn & 070) == 010) {
2885         /* Operation on address register is always long.  */
2886         opsize = OS_LONG;
2887     } else {
2888         opsize = insn_opsize(insn);
2889     }
2890     SRC_EA(env, src, opsize, 1, &addr);
2891     imm = (insn >> 9) & 7;
2892     if (imm == 0) {
2893         imm = 8;
2894     }
2895     val = tcg_const_i32(imm);
2896     dest = tcg_temp_new();
2897     tcg_gen_mov_i32(dest, src);
2898     if ((insn & 0x38) == 0x08) {
2899         /* Don't update condition codes if the destination is an
2900            address register.  */
2901         if (insn & 0x0100) {
2902             tcg_gen_sub_i32(dest, dest, val);
2903         } else {
2904             tcg_gen_add_i32(dest, dest, val);
2905         }
2906     } else {
2907         if (insn & 0x0100) {
2908             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2909             tcg_gen_sub_i32(dest, dest, val);
2910             set_cc_op(s, CC_OP_SUBB + opsize);
2911         } else {
2912             tcg_gen_add_i32(dest, dest, val);
2913             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2914             set_cc_op(s, CC_OP_ADDB + opsize);
2915         }
2916         gen_update_cc_add(dest, val, opsize);
2917     }
2918     tcg_temp_free(val);
2919     DEST_EA(env, insn, opsize, dest, &addr);
2920     tcg_temp_free(dest);
2921 }
2922 
2923 DISAS_INSN(tpf)
2924 {
2925     switch (insn & 7) {
2926     case 2: /* One extension word.  */
2927         s->pc += 2;
2928         break;
2929     case 3: /* Two extension words.  */
2930         s->pc += 4;
2931         break;
2932     case 4: /* No extension words.  */
2933         break;
2934     default:
2935         disas_undef(env, s, insn);
2936     }
2937 }
2938 
2939 DISAS_INSN(branch)
2940 {
2941     int32_t offset;
2942     uint32_t base;
2943     int op;
2944     TCGLabel *l1;
2945 
2946     base = s->pc;
2947     op = (insn >> 8) & 0xf;
2948     offset = (int8_t)insn;
2949     if (offset == 0) {
2950         offset = (int16_t)read_im16(env, s);
2951     } else if (offset == -1) {
2952         offset = read_im32(env, s);
2953     }
2954     if (op == 1) {
2955         /* bsr */
2956         gen_push(s, tcg_const_i32(s->pc));
2957     }
2958     if (op > 1) {
2959         /* Bcc */
2960         l1 = gen_new_label();
2961         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2962         gen_jmp_tb(s, 1, base + offset);
2963         gen_set_label(l1);
2964         gen_jmp_tb(s, 0, s->pc);
2965     } else {
2966         /* Unconditional branch.  */
2967         update_cc_op(s);
2968         gen_jmp_tb(s, 0, base + offset);
2969     }
2970 }
2971 
2972 DISAS_INSN(moveq)
2973 {
2974     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2975     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2976 }
2977 
2978 DISAS_INSN(mvzs)
2979 {
2980     int opsize;
2981     TCGv src;
2982     TCGv reg;
2983 
2984     if (insn & 0x40)
2985         opsize = OS_WORD;
2986     else
2987         opsize = OS_BYTE;
2988     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2989     reg = DREG(insn, 9);
2990     tcg_gen_mov_i32(reg, src);
2991     gen_logic_cc(s, src, opsize);
2992 }
2993 
2994 DISAS_INSN(or)
2995 {
2996     TCGv reg;
2997     TCGv dest;
2998     TCGv src;
2999     TCGv addr;
3000     int opsize;
3001 
3002     opsize = insn_opsize(insn);
3003     reg = gen_extend(DREG(insn, 9), opsize, 0);
3004     dest = tcg_temp_new();
3005     if (insn & 0x100) {
3006         SRC_EA(env, src, opsize, 0, &addr);
3007         tcg_gen_or_i32(dest, src, reg);
3008         DEST_EA(env, insn, opsize, dest, &addr);
3009     } else {
3010         SRC_EA(env, src, opsize, 0, NULL);
3011         tcg_gen_or_i32(dest, src, reg);
3012         gen_partset_reg(opsize, DREG(insn, 9), dest);
3013     }
3014     gen_logic_cc(s, dest, opsize);
3015     tcg_temp_free(dest);
3016 }
3017 
3018 DISAS_INSN(suba)
3019 {
3020     TCGv src;
3021     TCGv reg;
3022 
3023     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3024     reg = AREG(insn, 9);
3025     tcg_gen_sub_i32(reg, reg, src);
3026 }
3027 
3028 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3029 {
3030     TCGv tmp;
3031 
3032     gen_flush_flags(s); /* compute old Z */
3033 
3034     /* Perform substract with borrow.
3035      * (X, N) = dest - (src + X);
3036      */
3037 
3038     tmp = tcg_const_i32(0);
3039     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3040     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3041     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3042     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3043 
3044     /* Compute signed-overflow for substract.  */
3045 
3046     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3047     tcg_gen_xor_i32(tmp, dest, src);
3048     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3049     tcg_temp_free(tmp);
3050 
3051     /* Copy the rest of the results into place.  */
3052     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3053     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3054 
3055     set_cc_op(s, CC_OP_FLAGS);
3056 
3057     /* result is in QREG_CC_N */
3058 }
3059 
3060 DISAS_INSN(subx_reg)
3061 {
3062     TCGv dest;
3063     TCGv src;
3064     int opsize;
3065 
3066     opsize = insn_opsize(insn);
3067 
3068     src = gen_extend(DREG(insn, 0), opsize, 1);
3069     dest = gen_extend(DREG(insn, 9), opsize, 1);
3070 
3071     gen_subx(s, src, dest, opsize);
3072 
3073     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3074 }
3075 
3076 DISAS_INSN(subx_mem)
3077 {
3078     TCGv src;
3079     TCGv addr_src;
3080     TCGv dest;
3081     TCGv addr_dest;
3082     int opsize;
3083 
3084     opsize = insn_opsize(insn);
3085 
3086     addr_src = AREG(insn, 0);
3087     tcg_gen_subi_i32(addr_src, addr_src, opsize);
3088     src = gen_load(s, opsize, addr_src, 1);
3089 
3090     addr_dest = AREG(insn, 9);
3091     tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
3092     dest = gen_load(s, opsize, addr_dest, 1);
3093 
3094     gen_subx(s, src, dest, opsize);
3095 
3096     gen_store(s, opsize, addr_dest, QREG_CC_N);
3097 }
3098 
3099 DISAS_INSN(mov3q)
3100 {
3101     TCGv src;
3102     int val;
3103 
3104     val = (insn >> 9) & 7;
3105     if (val == 0)
3106         val = -1;
3107     src = tcg_const_i32(val);
3108     gen_logic_cc(s, src, OS_LONG);
3109     DEST_EA(env, insn, OS_LONG, src, NULL);
3110     tcg_temp_free(src);
3111 }
3112 
3113 DISAS_INSN(cmp)
3114 {
3115     TCGv src;
3116     TCGv reg;
3117     int opsize;
3118 
3119     opsize = insn_opsize(insn);
3120     SRC_EA(env, src, opsize, 1, NULL);
3121     reg = gen_extend(DREG(insn, 9), opsize, 1);
3122     gen_update_cc_cmp(s, reg, src, opsize);
3123 }
3124 
3125 DISAS_INSN(cmpa)
3126 {
3127     int opsize;
3128     TCGv src;
3129     TCGv reg;
3130 
3131     if (insn & 0x100) {
3132         opsize = OS_LONG;
3133     } else {
3134         opsize = OS_WORD;
3135     }
3136     SRC_EA(env, src, opsize, 1, NULL);
3137     reg = AREG(insn, 9);
3138     gen_update_cc_cmp(s, reg, src, OS_LONG);
3139 }
3140 
3141 DISAS_INSN(cmpm)
3142 {
3143     int opsize = insn_opsize(insn);
3144     TCGv src, dst;
3145 
3146     /* Post-increment load (mode 3) from Ay.  */
3147     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3148                       NULL_QREG, NULL, EA_LOADS);
3149     /* Post-increment load (mode 3) from Ax.  */
3150     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3151                       NULL_QREG, NULL, EA_LOADS);
3152 
3153     gen_update_cc_cmp(s, dst, src, opsize);
3154 }
3155 
3156 DISAS_INSN(eor)
3157 {
3158     TCGv src;
3159     TCGv dest;
3160     TCGv addr;
3161     int opsize;
3162 
3163     opsize = insn_opsize(insn);
3164 
3165     SRC_EA(env, src, opsize, 0, &addr);
3166     dest = tcg_temp_new();
3167     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3168     gen_logic_cc(s, dest, opsize);
3169     DEST_EA(env, insn, opsize, dest, &addr);
3170     tcg_temp_free(dest);
3171 }
3172 
3173 static void do_exg(TCGv reg1, TCGv reg2)
3174 {
3175     TCGv temp = tcg_temp_new();
3176     tcg_gen_mov_i32(temp, reg1);
3177     tcg_gen_mov_i32(reg1, reg2);
3178     tcg_gen_mov_i32(reg2, temp);
3179     tcg_temp_free(temp);
3180 }
3181 
3182 DISAS_INSN(exg_dd)
3183 {
3184     /* exchange Dx and Dy */
3185     do_exg(DREG(insn, 9), DREG(insn, 0));
3186 }
3187 
3188 DISAS_INSN(exg_aa)
3189 {
3190     /* exchange Ax and Ay */
3191     do_exg(AREG(insn, 9), AREG(insn, 0));
3192 }
3193 
3194 DISAS_INSN(exg_da)
3195 {
3196     /* exchange Dx and Ay */
3197     do_exg(DREG(insn, 9), AREG(insn, 0));
3198 }
3199 
3200 DISAS_INSN(and)
3201 {
3202     TCGv src;
3203     TCGv reg;
3204     TCGv dest;
3205     TCGv addr;
3206     int opsize;
3207 
3208     dest = tcg_temp_new();
3209 
3210     opsize = insn_opsize(insn);
3211     reg = DREG(insn, 9);
3212     if (insn & 0x100) {
3213         SRC_EA(env, src, opsize, 0, &addr);
3214         tcg_gen_and_i32(dest, src, reg);
3215         DEST_EA(env, insn, opsize, dest, &addr);
3216     } else {
3217         SRC_EA(env, src, opsize, 0, NULL);
3218         tcg_gen_and_i32(dest, src, reg);
3219         gen_partset_reg(opsize, reg, dest);
3220     }
3221     gen_logic_cc(s, dest, opsize);
3222     tcg_temp_free(dest);
3223 }
3224 
3225 DISAS_INSN(adda)
3226 {
3227     TCGv src;
3228     TCGv reg;
3229 
3230     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3231     reg = AREG(insn, 9);
3232     tcg_gen_add_i32(reg, reg, src);
3233 }
3234 
3235 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3236 {
3237     TCGv tmp;
3238 
3239     gen_flush_flags(s); /* compute old Z */
3240 
3241     /* Perform addition with carry.
3242      * (X, N) = src + dest + X;
3243      */
3244 
3245     tmp = tcg_const_i32(0);
3246     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3247     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3248     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3249 
3250     /* Compute signed-overflow for addition.  */
3251 
3252     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3253     tcg_gen_xor_i32(tmp, dest, src);
3254     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3255     tcg_temp_free(tmp);
3256 
3257     /* Copy the rest of the results into place.  */
3258     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3259     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3260 
3261     set_cc_op(s, CC_OP_FLAGS);
3262 
3263     /* result is in QREG_CC_N */
3264 }
3265 
3266 DISAS_INSN(addx_reg)
3267 {
3268     TCGv dest;
3269     TCGv src;
3270     int opsize;
3271 
3272     opsize = insn_opsize(insn);
3273 
3274     dest = gen_extend(DREG(insn, 9), opsize, 1);
3275     src = gen_extend(DREG(insn, 0), opsize, 1);
3276 
3277     gen_addx(s, src, dest, opsize);
3278 
3279     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3280 }
3281 
3282 DISAS_INSN(addx_mem)
3283 {
3284     TCGv src;
3285     TCGv addr_src;
3286     TCGv dest;
3287     TCGv addr_dest;
3288     int opsize;
3289 
3290     opsize = insn_opsize(insn);
3291 
3292     addr_src = AREG(insn, 0);
3293     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3294     src = gen_load(s, opsize, addr_src, 1);
3295 
3296     addr_dest = AREG(insn, 9);
3297     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3298     dest = gen_load(s, opsize, addr_dest, 1);
3299 
3300     gen_addx(s, src, dest, opsize);
3301 
3302     gen_store(s, opsize, addr_dest, QREG_CC_N);
3303 }
3304 
3305 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3306 {
3307     int count = (insn >> 9) & 7;
3308     int logical = insn & 8;
3309     int left = insn & 0x100;
3310     int bits = opsize_bytes(opsize) * 8;
3311     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3312 
3313     if (count == 0) {
3314         count = 8;
3315     }
3316 
3317     tcg_gen_movi_i32(QREG_CC_V, 0);
3318     if (left) {
3319         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3320         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3321 
3322         /* Note that ColdFire always clears V (done above),
3323            while M68000 sets if the most significant bit is changed at
3324            any time during the shift operation */
3325         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3326             /* if shift count >= bits, V is (reg != 0) */
3327             if (count >= bits) {
3328                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3329             } else {
3330                 TCGv t0 = tcg_temp_new();
3331                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3332                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3333                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3334                 tcg_temp_free(t0);
3335             }
3336             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3337         }
3338     } else {
3339         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3340         if (logical) {
3341             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3342         } else {
3343             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3344         }
3345     }
3346 
3347     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3348     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3349     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3350     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3351 
3352     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3353     set_cc_op(s, CC_OP_FLAGS);
3354 }
3355 
3356 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3357 {
3358     int logical = insn & 8;
3359     int left = insn & 0x100;
3360     int bits = opsize_bytes(opsize) * 8;
3361     TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
3362     TCGv s32;
3363     TCGv_i64 t64, s64;
3364 
3365     t64 = tcg_temp_new_i64();
3366     s64 = tcg_temp_new_i64();
3367     s32 = tcg_temp_new();
3368 
3369     /* Note that m68k truncates the shift count modulo 64, not 32.
3370        In addition, a 64-bit shift makes it easy to find "the last
3371        bit shifted out", for the carry flag.  */
3372     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3373     tcg_gen_extu_i32_i64(s64, s32);
3374     tcg_gen_extu_i32_i64(t64, reg);
3375 
3376     /* Optimistically set V=0.  Also used as a zero source below.  */
3377     tcg_gen_movi_i32(QREG_CC_V, 0);
3378     if (left) {
3379         tcg_gen_shl_i64(t64, t64, s64);
3380 
3381         if (opsize == OS_LONG) {
3382             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3383             /* Note that C=0 if shift count is 0, and we get that for free.  */
3384         } else {
3385             TCGv zero = tcg_const_i32(0);
3386             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3387             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3388             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3389                                 s32, zero, zero, QREG_CC_C);
3390             tcg_temp_free(zero);
3391         }
3392         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3393 
3394         /* X = C, but only if the shift count was non-zero.  */
3395         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3396                             QREG_CC_C, QREG_CC_X);
3397 
3398         /* M68000 sets V if the most significant bit is changed at
3399          * any time during the shift operation.  Do this via creating
3400          * an extension of the sign bit, comparing, and discarding
3401          * the bits below the sign bit.  I.e.
3402          *     int64_t s = (intN_t)reg;
3403          *     int64_t t = (int64_t)(intN_t)reg << count;
3404          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3405          */
3406         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3407             TCGv_i64 tt = tcg_const_i64(32);
3408             /* if shift is greater than 32, use 32 */
3409             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3410             tcg_temp_free_i64(tt);
3411             /* Sign extend the input to 64 bits; re-do the shift.  */
3412             tcg_gen_ext_i32_i64(t64, reg);
3413             tcg_gen_shl_i64(s64, t64, s64);
3414             /* Clear all bits that are unchanged.  */
3415             tcg_gen_xor_i64(t64, t64, s64);
3416             /* Ignore the bits below the sign bit.  */
3417             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3418             /* If any bits remain set, we have overflow.  */
3419             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3420             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3421             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3422         }
3423     } else {
3424         tcg_gen_shli_i64(t64, t64, 32);
3425         if (logical) {
3426             tcg_gen_shr_i64(t64, t64, s64);
3427         } else {
3428             tcg_gen_sar_i64(t64, t64, s64);
3429         }
3430         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3431 
3432         /* Note that C=0 if shift count is 0, and we get that for free.  */
3433         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3434 
3435         /* X = C, but only if the shift count was non-zero.  */
3436         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3437                             QREG_CC_C, QREG_CC_X);
3438     }
3439     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3440     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3441 
3442     tcg_temp_free(s32);
3443     tcg_temp_free_i64(s64);
3444     tcg_temp_free_i64(t64);
3445 
3446     /* Write back the result.  */
3447     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3448     set_cc_op(s, CC_OP_FLAGS);
3449 }
3450 
3451 DISAS_INSN(shift8_im)
3452 {
3453     shift_im(s, insn, OS_BYTE);
3454 }
3455 
3456 DISAS_INSN(shift16_im)
3457 {
3458     shift_im(s, insn, OS_WORD);
3459 }
3460 
3461 DISAS_INSN(shift_im)
3462 {
3463     shift_im(s, insn, OS_LONG);
3464 }
3465 
3466 DISAS_INSN(shift8_reg)
3467 {
3468     shift_reg(s, insn, OS_BYTE);
3469 }
3470 
3471 DISAS_INSN(shift16_reg)
3472 {
3473     shift_reg(s, insn, OS_WORD);
3474 }
3475 
3476 DISAS_INSN(shift_reg)
3477 {
3478     shift_reg(s, insn, OS_LONG);
3479 }
3480 
3481 DISAS_INSN(shift_mem)
3482 {
3483     int logical = insn & 8;
3484     int left = insn & 0x100;
3485     TCGv src;
3486     TCGv addr;
3487 
3488     SRC_EA(env, src, OS_WORD, !logical, &addr);
3489     tcg_gen_movi_i32(QREG_CC_V, 0);
3490     if (left) {
3491         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3492         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3493 
3494         /* Note that ColdFire always clears V,
3495            while M68000 sets if the most significant bit is changed at
3496            any time during the shift operation */
3497         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3498             src = gen_extend(src, OS_WORD, 1);
3499             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3500         }
3501     } else {
3502         tcg_gen_mov_i32(QREG_CC_C, src);
3503         if (logical) {
3504             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3505         } else {
3506             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3507         }
3508     }
3509 
3510     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3511     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3512     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3513     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3514 
3515     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3516     set_cc_op(s, CC_OP_FLAGS);
3517 }
3518 
3519 static void rotate(TCGv reg, TCGv shift, int left, int size)
3520 {
3521     switch (size) {
3522     case 8:
3523         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3524         tcg_gen_ext8u_i32(reg, reg);
3525         tcg_gen_muli_i32(reg, reg, 0x01010101);
3526         goto do_long;
3527     case 16:
3528         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3529         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3530         goto do_long;
3531     do_long:
3532     default:
3533         if (left) {
3534             tcg_gen_rotl_i32(reg, reg, shift);
3535         } else {
3536             tcg_gen_rotr_i32(reg, reg, shift);
3537         }
3538     }
3539 
3540     /* compute flags */
3541 
3542     switch (size) {
3543     case 8:
3544         tcg_gen_ext8s_i32(reg, reg);
3545         break;
3546     case 16:
3547         tcg_gen_ext16s_i32(reg, reg);
3548         break;
3549     default:
3550         break;
3551     }
3552 
3553     /* QREG_CC_X is not affected */
3554 
3555     tcg_gen_mov_i32(QREG_CC_N, reg);
3556     tcg_gen_mov_i32(QREG_CC_Z, reg);
3557 
3558     if (left) {
3559         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3560     } else {
3561         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3562     }
3563 
3564     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3565 }
3566 
3567 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3568 {
3569     switch (size) {
3570     case 8:
3571         tcg_gen_ext8s_i32(reg, reg);
3572         break;
3573     case 16:
3574         tcg_gen_ext16s_i32(reg, reg);
3575         break;
3576     default:
3577         break;
3578     }
3579     tcg_gen_mov_i32(QREG_CC_N, reg);
3580     tcg_gen_mov_i32(QREG_CC_Z, reg);
3581     tcg_gen_mov_i32(QREG_CC_X, X);
3582     tcg_gen_mov_i32(QREG_CC_C, X);
3583     tcg_gen_movi_i32(QREG_CC_V, 0);
3584 }
3585 
3586 /* Result of rotate_x() is valid if 0 <= shift <= size */
3587 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3588 {
3589     TCGv X, shl, shr, shx, sz, zero;
3590 
3591     sz = tcg_const_i32(size);
3592 
3593     shr = tcg_temp_new();
3594     shl = tcg_temp_new();
3595     shx = tcg_temp_new();
3596     if (left) {
3597         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3598         tcg_gen_movi_i32(shr, size + 1);
3599         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3600         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3601         /* shx = shx < 0 ? size : shx; */
3602         zero = tcg_const_i32(0);
3603         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3604         tcg_temp_free(zero);
3605     } else {
3606         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3607         tcg_gen_movi_i32(shl, size + 1);
3608         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3609         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3610     }
3611 
3612     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3613 
3614     tcg_gen_shl_i32(shl, reg, shl);
3615     tcg_gen_shr_i32(shr, reg, shr);
3616     tcg_gen_or_i32(reg, shl, shr);
3617     tcg_temp_free(shl);
3618     tcg_temp_free(shr);
3619     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3620     tcg_gen_or_i32(reg, reg, shx);
3621     tcg_temp_free(shx);
3622 
3623     /* X = (reg >> size) & 1 */
3624 
3625     X = tcg_temp_new();
3626     tcg_gen_shr_i32(X, reg, sz);
3627     tcg_gen_andi_i32(X, X, 1);
3628     tcg_temp_free(sz);
3629 
3630     return X;
3631 }
3632 
3633 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3634 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3635 {
3636     TCGv_i64 t0, shift64;
3637     TCGv X, lo, hi, zero;
3638 
3639     shift64 = tcg_temp_new_i64();
3640     tcg_gen_extu_i32_i64(shift64, shift);
3641 
3642     t0 = tcg_temp_new_i64();
3643 
3644     X = tcg_temp_new();
3645     lo = tcg_temp_new();
3646     hi = tcg_temp_new();
3647 
3648     if (left) {
3649         /* create [reg:X:..] */
3650 
3651         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3652         tcg_gen_concat_i32_i64(t0, lo, reg);
3653 
3654         /* rotate */
3655 
3656         tcg_gen_rotl_i64(t0, t0, shift64);
3657         tcg_temp_free_i64(shift64);
3658 
3659         /* result is [reg:..:reg:X] */
3660 
3661         tcg_gen_extr_i64_i32(lo, hi, t0);
3662         tcg_gen_andi_i32(X, lo, 1);
3663 
3664         tcg_gen_shri_i32(lo, lo, 1);
3665     } else {
3666         /* create [..:X:reg] */
3667 
3668         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3669 
3670         tcg_gen_rotr_i64(t0, t0, shift64);
3671         tcg_temp_free_i64(shift64);
3672 
3673         /* result is value: [X:reg:..:reg] */
3674 
3675         tcg_gen_extr_i64_i32(lo, hi, t0);
3676 
3677         /* extract X */
3678 
3679         tcg_gen_shri_i32(X, hi, 31);
3680 
3681         /* extract result */
3682 
3683         tcg_gen_shli_i32(hi, hi, 1);
3684     }
3685     tcg_temp_free_i64(t0);
3686     tcg_gen_or_i32(lo, lo, hi);
3687     tcg_temp_free(hi);
3688 
3689     /* if shift == 0, register and X are not affected */
3690 
3691     zero = tcg_const_i32(0);
3692     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3693     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3694     tcg_temp_free(zero);
3695     tcg_temp_free(lo);
3696 
3697     return X;
3698 }
3699 
3700 DISAS_INSN(rotate_im)
3701 {
3702     TCGv shift;
3703     int tmp;
3704     int left = (insn & 0x100);
3705 
3706     tmp = (insn >> 9) & 7;
3707     if (tmp == 0) {
3708         tmp = 8;
3709     }
3710 
3711     shift = tcg_const_i32(tmp);
3712     if (insn & 8) {
3713         rotate(DREG(insn, 0), shift, left, 32);
3714     } else {
3715         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3716         rotate_x_flags(DREG(insn, 0), X, 32);
3717         tcg_temp_free(X);
3718     }
3719     tcg_temp_free(shift);
3720 
3721     set_cc_op(s, CC_OP_FLAGS);
3722 }
3723 
3724 DISAS_INSN(rotate8_im)
3725 {
3726     int left = (insn & 0x100);
3727     TCGv reg;
3728     TCGv shift;
3729     int tmp;
3730 
3731     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3732 
3733     tmp = (insn >> 9) & 7;
3734     if (tmp == 0) {
3735         tmp = 8;
3736     }
3737 
3738     shift = tcg_const_i32(tmp);
3739     if (insn & 8) {
3740         rotate(reg, shift, left, 8);
3741     } else {
3742         TCGv X = rotate_x(reg, shift, left, 8);
3743         rotate_x_flags(reg, X, 8);
3744         tcg_temp_free(X);
3745     }
3746     tcg_temp_free(shift);
3747     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3748     set_cc_op(s, CC_OP_FLAGS);
3749 }
3750 
3751 DISAS_INSN(rotate16_im)
3752 {
3753     int left = (insn & 0x100);
3754     TCGv reg;
3755     TCGv shift;
3756     int tmp;
3757 
3758     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3759     tmp = (insn >> 9) & 7;
3760     if (tmp == 0) {
3761         tmp = 8;
3762     }
3763 
3764     shift = tcg_const_i32(tmp);
3765     if (insn & 8) {
3766         rotate(reg, shift, left, 16);
3767     } else {
3768         TCGv X = rotate_x(reg, shift, left, 16);
3769         rotate_x_flags(reg, X, 16);
3770         tcg_temp_free(X);
3771     }
3772     tcg_temp_free(shift);
3773     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3774     set_cc_op(s, CC_OP_FLAGS);
3775 }
3776 
3777 DISAS_INSN(rotate_reg)
3778 {
3779     TCGv reg;
3780     TCGv src;
3781     TCGv t0, t1;
3782     int left = (insn & 0x100);
3783 
3784     reg = DREG(insn, 0);
3785     src = DREG(insn, 9);
3786     /* shift in [0..63] */
3787     t0 = tcg_temp_new();
3788     tcg_gen_andi_i32(t0, src, 63);
3789     t1 = tcg_temp_new_i32();
3790     if (insn & 8) {
3791         tcg_gen_andi_i32(t1, src, 31);
3792         rotate(reg, t1, left, 32);
3793         /* if shift == 0, clear C */
3794         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3795                             t0, QREG_CC_V /* 0 */,
3796                             QREG_CC_V /* 0 */, QREG_CC_C);
3797     } else {
3798         TCGv X;
3799         /* modulo 33 */
3800         tcg_gen_movi_i32(t1, 33);
3801         tcg_gen_remu_i32(t1, t0, t1);
3802         X = rotate32_x(DREG(insn, 0), t1, left);
3803         rotate_x_flags(DREG(insn, 0), X, 32);
3804         tcg_temp_free(X);
3805     }
3806     tcg_temp_free(t1);
3807     tcg_temp_free(t0);
3808     set_cc_op(s, CC_OP_FLAGS);
3809 }
3810 
3811 DISAS_INSN(rotate8_reg)
3812 {
3813     TCGv reg;
3814     TCGv src;
3815     TCGv t0, t1;
3816     int left = (insn & 0x100);
3817 
3818     reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3819     src = DREG(insn, 9);
3820     /* shift in [0..63] */
3821     t0 = tcg_temp_new_i32();
3822     tcg_gen_andi_i32(t0, src, 63);
3823     t1 = tcg_temp_new_i32();
3824     if (insn & 8) {
3825         tcg_gen_andi_i32(t1, src, 7);
3826         rotate(reg, t1, left, 8);
3827         /* if shift == 0, clear C */
3828         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3829                             t0, QREG_CC_V /* 0 */,
3830                             QREG_CC_V /* 0 */, QREG_CC_C);
3831     } else {
3832         TCGv X;
3833         /* modulo 9 */
3834         tcg_gen_movi_i32(t1, 9);
3835         tcg_gen_remu_i32(t1, t0, t1);
3836         X = rotate_x(reg, t1, left, 8);
3837         rotate_x_flags(reg, X, 8);
3838         tcg_temp_free(X);
3839     }
3840     tcg_temp_free(t1);
3841     tcg_temp_free(t0);
3842     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3843     set_cc_op(s, CC_OP_FLAGS);
3844 }
3845 
3846 DISAS_INSN(rotate16_reg)
3847 {
3848     TCGv reg;
3849     TCGv src;
3850     TCGv t0, t1;
3851     int left = (insn & 0x100);
3852 
3853     reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3854     src = DREG(insn, 9);
3855     /* shift in [0..63] */
3856     t0 = tcg_temp_new_i32();
3857     tcg_gen_andi_i32(t0, src, 63);
3858     t1 = tcg_temp_new_i32();
3859     if (insn & 8) {
3860         tcg_gen_andi_i32(t1, src, 15);
3861         rotate(reg, t1, left, 16);
3862         /* if shift == 0, clear C */
3863         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3864                             t0, QREG_CC_V /* 0 */,
3865                             QREG_CC_V /* 0 */, QREG_CC_C);
3866     } else {
3867         TCGv X;
3868         /* modulo 17 */
3869         tcg_gen_movi_i32(t1, 17);
3870         tcg_gen_remu_i32(t1, t0, t1);
3871         X = rotate_x(reg, t1, left, 16);
3872         rotate_x_flags(reg, X, 16);
3873         tcg_temp_free(X);
3874     }
3875     tcg_temp_free(t1);
3876     tcg_temp_free(t0);
3877     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3878     set_cc_op(s, CC_OP_FLAGS);
3879 }
3880 
3881 DISAS_INSN(rotate_mem)
3882 {
3883     TCGv src;
3884     TCGv addr;
3885     TCGv shift;
3886     int left = (insn & 0x100);
3887 
3888     SRC_EA(env, src, OS_WORD, 0, &addr);
3889 
3890     shift = tcg_const_i32(1);
3891     if (insn & 0x0200) {
3892         rotate(src, shift, left, 16);
3893     } else {
3894         TCGv X = rotate_x(src, shift, left, 16);
3895         rotate_x_flags(src, X, 16);
3896         tcg_temp_free(X);
3897     }
3898     tcg_temp_free(shift);
3899     DEST_EA(env, insn, OS_WORD, src, &addr);
3900     set_cc_op(s, CC_OP_FLAGS);
3901 }
3902 
3903 DISAS_INSN(bfext_reg)
3904 {
3905     int ext = read_im16(env, s);
3906     int is_sign = insn & 0x200;
3907     TCGv src = DREG(insn, 0);
3908     TCGv dst = DREG(ext, 12);
3909     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3910     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3911     int pos = 32 - ofs - len;        /* little bit-endian */
3912     TCGv tmp = tcg_temp_new();
3913     TCGv shift;
3914 
3915     /* In general, we're going to rotate the field so that it's at the
3916        top of the word and then right-shift by the compliment of the
3917        width to extend the field.  */
3918     if (ext & 0x20) {
3919         /* Variable width.  */
3920         if (ext & 0x800) {
3921             /* Variable offset.  */
3922             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3923             tcg_gen_rotl_i32(tmp, src, tmp);
3924         } else {
3925             tcg_gen_rotli_i32(tmp, src, ofs);
3926         }
3927 
3928         shift = tcg_temp_new();
3929         tcg_gen_neg_i32(shift, DREG(ext, 0));
3930         tcg_gen_andi_i32(shift, shift, 31);
3931         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3932         if (is_sign) {
3933             tcg_gen_mov_i32(dst, QREG_CC_N);
3934         } else {
3935             tcg_gen_shr_i32(dst, tmp, shift);
3936         }
3937         tcg_temp_free(shift);
3938     } else {
3939         /* Immediate width.  */
3940         if (ext & 0x800) {
3941             /* Variable offset */
3942             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3943             tcg_gen_rotl_i32(tmp, src, tmp);
3944             src = tmp;
3945             pos = 32 - len;
3946         } else {
3947             /* Immediate offset.  If the field doesn't wrap around the
3948                end of the word, rely on (s)extract completely.  */
3949             if (pos < 0) {
3950                 tcg_gen_rotli_i32(tmp, src, ofs);
3951                 src = tmp;
3952                 pos = 32 - len;
3953             }
3954         }
3955 
3956         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3957         if (is_sign) {
3958             tcg_gen_mov_i32(dst, QREG_CC_N);
3959         } else {
3960             tcg_gen_extract_i32(dst, src, pos, len);
3961         }
3962     }
3963 
3964     tcg_temp_free(tmp);
3965     set_cc_op(s, CC_OP_LOGIC);
3966 }
3967 
3968 DISAS_INSN(bfext_mem)
3969 {
3970     int ext = read_im16(env, s);
3971     int is_sign = insn & 0x200;
3972     TCGv dest = DREG(ext, 12);
3973     TCGv addr, len, ofs;
3974 
3975     addr = gen_lea(env, s, insn, OS_UNSIZED);
3976     if (IS_NULL_QREG(addr)) {
3977         gen_addr_fault(s);
3978         return;
3979     }
3980 
3981     if (ext & 0x20) {
3982         len = DREG(ext, 0);
3983     } else {
3984         len = tcg_const_i32(extract32(ext, 0, 5));
3985     }
3986     if (ext & 0x800) {
3987         ofs = DREG(ext, 6);
3988     } else {
3989         ofs = tcg_const_i32(extract32(ext, 6, 5));
3990     }
3991 
3992     if (is_sign) {
3993         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3994         tcg_gen_mov_i32(QREG_CC_N, dest);
3995     } else {
3996         TCGv_i64 tmp = tcg_temp_new_i64();
3997         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3998         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3999         tcg_temp_free_i64(tmp);
4000     }
4001     set_cc_op(s, CC_OP_LOGIC);
4002 
4003     if (!(ext & 0x20)) {
4004         tcg_temp_free(len);
4005     }
4006     if (!(ext & 0x800)) {
4007         tcg_temp_free(ofs);
4008     }
4009 }
4010 
4011 DISAS_INSN(bfop_reg)
4012 {
4013     int ext = read_im16(env, s);
4014     TCGv src = DREG(insn, 0);
4015     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4016     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4017     TCGv mask, tofs, tlen;
4018 
4019     tofs = NULL;
4020     tlen = NULL;
4021     if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4022         tofs = tcg_temp_new();
4023         tlen = tcg_temp_new();
4024     }
4025 
4026     if ((ext & 0x820) == 0) {
4027         /* Immediate width and offset.  */
4028         uint32_t maski = 0x7fffffffu >> (len - 1);
4029         if (ofs + len <= 32) {
4030             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4031         } else {
4032             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4033         }
4034         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4035         mask = tcg_const_i32(ror32(maski, ofs));
4036         if (tofs) {
4037             tcg_gen_movi_i32(tofs, ofs);
4038             tcg_gen_movi_i32(tlen, len);
4039         }
4040     } else {
4041         TCGv tmp = tcg_temp_new();
4042         if (ext & 0x20) {
4043             /* Variable width */
4044             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4045             tcg_gen_andi_i32(tmp, tmp, 31);
4046             mask = tcg_const_i32(0x7fffffffu);
4047             tcg_gen_shr_i32(mask, mask, tmp);
4048             if (tlen) {
4049                 tcg_gen_addi_i32(tlen, tmp, 1);
4050             }
4051         } else {
4052             /* Immediate width */
4053             mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4054             if (tlen) {
4055                 tcg_gen_movi_i32(tlen, len);
4056             }
4057         }
4058         if (ext & 0x800) {
4059             /* Variable offset */
4060             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4061             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4062             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4063             tcg_gen_rotr_i32(mask, mask, tmp);
4064             if (tofs) {
4065                 tcg_gen_mov_i32(tofs, tmp);
4066             }
4067         } else {
4068             /* Immediate offset (and variable width) */
4069             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4070             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4071             tcg_gen_rotri_i32(mask, mask, ofs);
4072             if (tofs) {
4073                 tcg_gen_movi_i32(tofs, ofs);
4074             }
4075         }
4076         tcg_temp_free(tmp);
4077     }
4078     set_cc_op(s, CC_OP_LOGIC);
4079 
4080     switch (insn & 0x0f00) {
4081     case 0x0a00: /* bfchg */
4082         tcg_gen_eqv_i32(src, src, mask);
4083         break;
4084     case 0x0c00: /* bfclr */
4085         tcg_gen_and_i32(src, src, mask);
4086         break;
4087     case 0x0d00: /* bfffo */
4088         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4089         tcg_temp_free(tlen);
4090         tcg_temp_free(tofs);
4091         break;
4092     case 0x0e00: /* bfset */
4093         tcg_gen_orc_i32(src, src, mask);
4094         break;
4095     case 0x0800: /* bftst */
4096         /* flags already set; no other work to do.  */
4097         break;
4098     default:
4099         g_assert_not_reached();
4100     }
4101     tcg_temp_free(mask);
4102 }
4103 
4104 DISAS_INSN(bfop_mem)
4105 {
4106     int ext = read_im16(env, s);
4107     TCGv addr, len, ofs;
4108     TCGv_i64 t64;
4109 
4110     addr = gen_lea(env, s, insn, OS_UNSIZED);
4111     if (IS_NULL_QREG(addr)) {
4112         gen_addr_fault(s);
4113         return;
4114     }
4115 
4116     if (ext & 0x20) {
4117         len = DREG(ext, 0);
4118     } else {
4119         len = tcg_const_i32(extract32(ext, 0, 5));
4120     }
4121     if (ext & 0x800) {
4122         ofs = DREG(ext, 6);
4123     } else {
4124         ofs = tcg_const_i32(extract32(ext, 6, 5));
4125     }
4126 
4127     switch (insn & 0x0f00) {
4128     case 0x0a00: /* bfchg */
4129         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4130         break;
4131     case 0x0c00: /* bfclr */
4132         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4133         break;
4134     case 0x0d00: /* bfffo */
4135         t64 = tcg_temp_new_i64();
4136         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4137         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4138         tcg_temp_free_i64(t64);
4139         break;
4140     case 0x0e00: /* bfset */
4141         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4142         break;
4143     case 0x0800: /* bftst */
4144         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4145         break;
4146     default:
4147         g_assert_not_reached();
4148     }
4149     set_cc_op(s, CC_OP_LOGIC);
4150 
4151     if (!(ext & 0x20)) {
4152         tcg_temp_free(len);
4153     }
4154     if (!(ext & 0x800)) {
4155         tcg_temp_free(ofs);
4156     }
4157 }
4158 
4159 DISAS_INSN(bfins_reg)
4160 {
4161     int ext = read_im16(env, s);
4162     TCGv dst = DREG(insn, 0);
4163     TCGv src = DREG(ext, 12);
4164     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4165     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4166     int pos = 32 - ofs - len;        /* little bit-endian */
4167     TCGv tmp;
4168 
4169     tmp = tcg_temp_new();
4170 
4171     if (ext & 0x20) {
4172         /* Variable width */
4173         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4174         tcg_gen_andi_i32(tmp, tmp, 31);
4175         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4176     } else {
4177         /* Immediate width */
4178         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4179     }
4180     set_cc_op(s, CC_OP_LOGIC);
4181 
4182     /* Immediate width and offset */
4183     if ((ext & 0x820) == 0) {
4184         /* Check for suitability for deposit.  */
4185         if (pos >= 0) {
4186             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4187         } else {
4188             uint32_t maski = -2U << (len - 1);
4189             uint32_t roti = (ofs + len) & 31;
4190             tcg_gen_andi_i32(tmp, src, ~maski);
4191             tcg_gen_rotri_i32(tmp, tmp, roti);
4192             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4193             tcg_gen_or_i32(dst, dst, tmp);
4194         }
4195     } else {
4196         TCGv mask = tcg_temp_new();
4197         TCGv rot = tcg_temp_new();
4198 
4199         if (ext & 0x20) {
4200             /* Variable width */
4201             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4202             tcg_gen_andi_i32(rot, rot, 31);
4203             tcg_gen_movi_i32(mask, -2);
4204             tcg_gen_shl_i32(mask, mask, rot);
4205             tcg_gen_mov_i32(rot, DREG(ext, 0));
4206             tcg_gen_andc_i32(tmp, src, mask);
4207         } else {
4208             /* Immediate width (variable offset) */
4209             uint32_t maski = -2U << (len - 1);
4210             tcg_gen_andi_i32(tmp, src, ~maski);
4211             tcg_gen_movi_i32(mask, maski);
4212             tcg_gen_movi_i32(rot, len & 31);
4213         }
4214         if (ext & 0x800) {
4215             /* Variable offset */
4216             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4217         } else {
4218             /* Immediate offset (variable width) */
4219             tcg_gen_addi_i32(rot, rot, ofs);
4220         }
4221         tcg_gen_andi_i32(rot, rot, 31);
4222         tcg_gen_rotr_i32(mask, mask, rot);
4223         tcg_gen_rotr_i32(tmp, tmp, rot);
4224         tcg_gen_and_i32(dst, dst, mask);
4225         tcg_gen_or_i32(dst, dst, tmp);
4226 
4227         tcg_temp_free(rot);
4228         tcg_temp_free(mask);
4229     }
4230     tcg_temp_free(tmp);
4231 }
4232 
4233 DISAS_INSN(bfins_mem)
4234 {
4235     int ext = read_im16(env, s);
4236     TCGv src = DREG(ext, 12);
4237     TCGv addr, len, ofs;
4238 
4239     addr = gen_lea(env, s, insn, OS_UNSIZED);
4240     if (IS_NULL_QREG(addr)) {
4241         gen_addr_fault(s);
4242         return;
4243     }
4244 
4245     if (ext & 0x20) {
4246         len = DREG(ext, 0);
4247     } else {
4248         len = tcg_const_i32(extract32(ext, 0, 5));
4249     }
4250     if (ext & 0x800) {
4251         ofs = DREG(ext, 6);
4252     } else {
4253         ofs = tcg_const_i32(extract32(ext, 6, 5));
4254     }
4255 
4256     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4257     set_cc_op(s, CC_OP_LOGIC);
4258 
4259     if (!(ext & 0x20)) {
4260         tcg_temp_free(len);
4261     }
4262     if (!(ext & 0x800)) {
4263         tcg_temp_free(ofs);
4264     }
4265 }
4266 
4267 DISAS_INSN(ff1)
4268 {
4269     TCGv reg;
4270     reg = DREG(insn, 0);
4271     gen_logic_cc(s, reg, OS_LONG);
4272     gen_helper_ff1(reg, reg);
4273 }
4274 
4275 DISAS_INSN(chk)
4276 {
4277     TCGv src, reg;
4278     int opsize;
4279 
4280     switch ((insn >> 7) & 3) {
4281     case 3:
4282         opsize = OS_WORD;
4283         break;
4284     case 2:
4285         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4286             opsize = OS_LONG;
4287             break;
4288         }
4289         /* fallthru */
4290     default:
4291         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4292         return;
4293     }
4294     SRC_EA(env, src, opsize, 1, NULL);
4295     reg = gen_extend(DREG(insn, 9), opsize, 1);
4296 
4297     gen_flush_flags(s);
4298     gen_helper_chk(cpu_env, reg, src);
4299 }
4300 
4301 DISAS_INSN(chk2)
4302 {
4303     uint16_t ext;
4304     TCGv addr1, addr2, bound1, bound2, reg;
4305     int opsize;
4306 
4307     switch ((insn >> 9) & 3) {
4308     case 0:
4309         opsize = OS_BYTE;
4310         break;
4311     case 1:
4312         opsize = OS_WORD;
4313         break;
4314     case 2:
4315         opsize = OS_LONG;
4316         break;
4317     default:
4318         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4319         return;
4320     }
4321 
4322     ext = read_im16(env, s);
4323     if ((ext & 0x0800) == 0) {
4324         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4325         return;
4326     }
4327 
4328     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4329     addr2 = tcg_temp_new();
4330     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4331 
4332     bound1 = gen_load(s, opsize, addr1, 1);
4333     tcg_temp_free(addr1);
4334     bound2 = gen_load(s, opsize, addr2, 1);
4335     tcg_temp_free(addr2);
4336 
4337     reg = tcg_temp_new();
4338     if (ext & 0x8000) {
4339         tcg_gen_mov_i32(reg, AREG(ext, 12));
4340     } else {
4341         gen_ext(reg, DREG(ext, 12), opsize, 1);
4342     }
4343 
4344     gen_flush_flags(s);
4345     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4346     tcg_temp_free(reg);
4347 }
4348 
4349 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4350 {
4351     TCGv addr;
4352     TCGv_i64 t0, t1;
4353 
4354     addr = tcg_temp_new();
4355 
4356     t0 = tcg_temp_new_i64();
4357     t1 = tcg_temp_new_i64();
4358 
4359     tcg_gen_andi_i32(addr, src, ~15);
4360     tcg_gen_qemu_ld64(t0, addr, index);
4361     tcg_gen_addi_i32(addr, addr, 8);
4362     tcg_gen_qemu_ld64(t1, addr, index);
4363 
4364     tcg_gen_andi_i32(addr, dst, ~15);
4365     tcg_gen_qemu_st64(t0, addr, index);
4366     tcg_gen_addi_i32(addr, addr, 8);
4367     tcg_gen_qemu_st64(t1, addr, index);
4368 
4369     tcg_temp_free_i64(t0);
4370     tcg_temp_free_i64(t1);
4371     tcg_temp_free(addr);
4372 }
4373 
4374 DISAS_INSN(move16_reg)
4375 {
4376     int index = IS_USER(s);
4377     TCGv tmp;
4378     uint16_t ext;
4379 
4380     ext = read_im16(env, s);
4381     if ((ext & (1 << 15)) == 0) {
4382         gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4383     }
4384 
4385     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4386 
4387     /* Ax can be Ay, so save Ay before incrementing Ax */
4388     tmp = tcg_temp_new();
4389     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4390     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4391     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4392     tcg_temp_free(tmp);
4393 }
4394 
4395 DISAS_INSN(move16_mem)
4396 {
4397     int index = IS_USER(s);
4398     TCGv reg, addr;
4399 
4400     reg = AREG(insn, 0);
4401     addr = tcg_const_i32(read_im32(env, s));
4402 
4403     if ((insn >> 3) & 1) {
4404         /* MOVE16 (xxx).L, (Ay) */
4405         m68k_copy_line(reg, addr, index);
4406     } else {
4407         /* MOVE16 (Ay), (xxx).L */
4408         m68k_copy_line(addr, reg, index);
4409     }
4410 
4411     tcg_temp_free(addr);
4412 
4413     if (((insn >> 3) & 2) == 0) {
4414         /* (Ay)+ */
4415         tcg_gen_addi_i32(reg, reg, 16);
4416     }
4417 }
4418 
4419 DISAS_INSN(strldsr)
4420 {
4421     uint16_t ext;
4422     uint32_t addr;
4423 
4424     addr = s->pc - 2;
4425     ext = read_im16(env, s);
4426     if (ext != 0x46FC) {
4427         gen_exception(s, addr, EXCP_UNSUPPORTED);
4428         return;
4429     }
4430     ext = read_im16(env, s);
4431     if (IS_USER(s) || (ext & SR_S) == 0) {
4432         gen_exception(s, addr, EXCP_PRIVILEGE);
4433         return;
4434     }
4435     gen_push(s, gen_get_sr(s));
4436     gen_set_sr_im(s, ext, 0);
4437 }
4438 
4439 DISAS_INSN(move_from_sr)
4440 {
4441     TCGv sr;
4442 
4443     if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4444         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4445         return;
4446     }
4447     sr = gen_get_sr(s);
4448     DEST_EA(env, insn, OS_WORD, sr, NULL);
4449 }
4450 
4451 #if defined(CONFIG_SOFTMMU)
4452 DISAS_INSN(move_to_sr)
4453 {
4454     if (IS_USER(s)) {
4455         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4456         return;
4457     }
4458     gen_move_to_sr(env, s, insn, false);
4459     gen_lookup_tb(s);
4460 }
4461 
4462 DISAS_INSN(move_from_usp)
4463 {
4464     if (IS_USER(s)) {
4465         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4466         return;
4467     }
4468     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4469                    offsetof(CPUM68KState, sp[M68K_USP]));
4470 }
4471 
4472 DISAS_INSN(move_to_usp)
4473 {
4474     if (IS_USER(s)) {
4475         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4476         return;
4477     }
4478     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4479                    offsetof(CPUM68KState, sp[M68K_USP]));
4480 }
4481 
4482 DISAS_INSN(halt)
4483 {
4484     if (IS_USER(s)) {
4485         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4486         return;
4487     }
4488 
4489     gen_exception(s, s->pc, EXCP_HALT_INSN);
4490 }
4491 
4492 DISAS_INSN(stop)
4493 {
4494     uint16_t ext;
4495 
4496     if (IS_USER(s)) {
4497         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4498         return;
4499     }
4500 
4501     ext = read_im16(env, s);
4502 
4503     gen_set_sr_im(s, ext, 0);
4504     tcg_gen_movi_i32(cpu_halted, 1);
4505     gen_exception(s, s->pc, EXCP_HLT);
4506 }
4507 
4508 DISAS_INSN(rte)
4509 {
4510     if (IS_USER(s)) {
4511         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4512         return;
4513     }
4514     gen_exception(s, s->insn_pc, EXCP_RTE);
4515 }
4516 
4517 DISAS_INSN(cf_movec)
4518 {
4519     uint16_t ext;
4520     TCGv reg;
4521 
4522     if (IS_USER(s)) {
4523         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4524         return;
4525     }
4526 
4527     ext = read_im16(env, s);
4528 
4529     if (ext & 0x8000) {
4530         reg = AREG(ext, 12);
4531     } else {
4532         reg = DREG(ext, 12);
4533     }
4534     gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4535     gen_lookup_tb(s);
4536 }
4537 
4538 DISAS_INSN(m68k_movec)
4539 {
4540     uint16_t ext;
4541     TCGv reg;
4542 
4543     if (IS_USER(s)) {
4544         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4545         return;
4546     }
4547 
4548     ext = read_im16(env, s);
4549 
4550     if (ext & 0x8000) {
4551         reg = AREG(ext, 12);
4552     } else {
4553         reg = DREG(ext, 12);
4554     }
4555     if (insn & 1) {
4556         gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4557     } else {
4558         gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4559     }
4560     gen_lookup_tb(s);
4561 }
4562 
4563 DISAS_INSN(intouch)
4564 {
4565     if (IS_USER(s)) {
4566         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4567         return;
4568     }
4569     /* ICache fetch.  Implement as no-op.  */
4570 }
4571 
4572 DISAS_INSN(cpushl)
4573 {
4574     if (IS_USER(s)) {
4575         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4576         return;
4577     }
4578     /* Cache push/invalidate.  Implement as no-op.  */
4579 }
4580 
4581 DISAS_INSN(cpush)
4582 {
4583     if (IS_USER(s)) {
4584         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4585         return;
4586     }
4587     /* Cache push/invalidate.  Implement as no-op.  */
4588 }
4589 
4590 DISAS_INSN(cinv)
4591 {
4592     if (IS_USER(s)) {
4593         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4594         return;
4595     }
4596     /* Invalidate cache line.  Implement as no-op.  */
4597 }
4598 
4599 DISAS_INSN(wddata)
4600 {
4601     gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4602 }
4603 
4604 DISAS_INSN(wdebug)
4605 {
4606     M68kCPU *cpu = m68k_env_get_cpu(env);
4607 
4608     if (IS_USER(s)) {
4609         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
4610         return;
4611     }
4612     /* TODO: Implement wdebug.  */
4613     cpu_abort(CPU(cpu), "WDEBUG not implemented");
4614 }
4615 #endif
4616 
4617 DISAS_INSN(trap)
4618 {
4619     gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf));
4620 }
4621 
4622 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4623 {
4624     switch (reg) {
4625     case M68K_FPIAR:
4626         tcg_gen_movi_i32(res, 0);
4627         break;
4628     case M68K_FPSR:
4629         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4630         break;
4631     case M68K_FPCR:
4632         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4633         break;
4634     }
4635 }
4636 
4637 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4638 {
4639     switch (reg) {
4640     case M68K_FPIAR:
4641         break;
4642     case M68K_FPSR:
4643         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4644         break;
4645     case M68K_FPCR:
4646         gen_helper_set_fpcr(cpu_env, val);
4647         break;
4648     }
4649 }
4650 
4651 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4652 {
4653     int index = IS_USER(s);
4654     TCGv tmp;
4655 
4656     tmp = tcg_temp_new();
4657     gen_load_fcr(s, tmp, reg);
4658     tcg_gen_qemu_st32(tmp, addr, index);
4659     tcg_temp_free(tmp);
4660 }
4661 
4662 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4663 {
4664     int index = IS_USER(s);
4665     TCGv tmp;
4666 
4667     tmp = tcg_temp_new();
4668     tcg_gen_qemu_ld32u(tmp, addr, index);
4669     gen_store_fcr(s, tmp, reg);
4670     tcg_temp_free(tmp);
4671 }
4672 
4673 
4674 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4675                              uint32_t insn, uint32_t ext)
4676 {
4677     int mask = (ext >> 10) & 7;
4678     int is_write = (ext >> 13) & 1;
4679     int mode = extract32(insn, 3, 3);
4680     int i;
4681     TCGv addr, tmp;
4682 
4683     switch (mode) {
4684     case 0: /* Dn */
4685         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4686             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4687             return;
4688         }
4689         if (is_write) {
4690             gen_load_fcr(s, DREG(insn, 0), mask);
4691         } else {
4692             gen_store_fcr(s, DREG(insn, 0), mask);
4693         }
4694         return;
4695     case 1: /* An, only with FPIAR */
4696         if (mask != M68K_FPIAR) {
4697             gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
4698             return;
4699         }
4700         if (is_write) {
4701             gen_load_fcr(s, AREG(insn, 0), mask);
4702         } else {
4703             gen_store_fcr(s, AREG(insn, 0), mask);
4704         }
4705         return;
4706     default:
4707         break;
4708     }
4709 
4710     tmp = gen_lea(env, s, insn, OS_LONG);
4711     if (IS_NULL_QREG(tmp)) {
4712         gen_addr_fault(s);
4713         return;
4714     }
4715 
4716     addr = tcg_temp_new();
4717     tcg_gen_mov_i32(addr, tmp);
4718 
4719     /* mask:
4720      *
4721      * 0b100 Floating-Point Control Register
4722      * 0b010 Floating-Point Status Register
4723      * 0b001 Floating-Point Instruction Address Register
4724      *
4725      */
4726 
4727     if (is_write && mode == 4) {
4728         for (i = 2; i >= 0; i--, mask >>= 1) {
4729             if (mask & 1) {
4730                 gen_qemu_store_fcr(s, addr, 1 << i);
4731                 if (mask != 1) {
4732                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4733                 }
4734             }
4735        }
4736        tcg_gen_mov_i32(AREG(insn, 0), addr);
4737     } else {
4738         for (i = 0; i < 3; i++, mask >>= 1) {
4739             if (mask & 1) {
4740                 if (is_write) {
4741                     gen_qemu_store_fcr(s, addr, 1 << i);
4742                 } else {
4743                     gen_qemu_load_fcr(s, addr, 1 << i);
4744                 }
4745                 if (mask != 1 || mode == 3) {
4746                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4747                 }
4748             }
4749         }
4750         if (mode == 3) {
4751             tcg_gen_mov_i32(AREG(insn, 0), addr);
4752         }
4753     }
4754     tcg_temp_free_i32(addr);
4755 }
4756 
4757 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4758                           uint32_t insn, uint32_t ext)
4759 {
4760     int opsize;
4761     TCGv addr, tmp;
4762     int mode = (ext >> 11) & 0x3;
4763     int is_load = ((ext & 0x2000) == 0);
4764 
4765     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4766         opsize = OS_EXTENDED;
4767     } else {
4768         opsize = OS_DOUBLE;  /* FIXME */
4769     }
4770 
4771     addr = gen_lea(env, s, insn, opsize);
4772     if (IS_NULL_QREG(addr)) {
4773         gen_addr_fault(s);
4774         return;
4775     }
4776 
4777     tmp = tcg_temp_new();
4778     if (mode & 0x1) {
4779         /* Dynamic register list */
4780         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4781     } else {
4782         /* Static register list */
4783         tcg_gen_movi_i32(tmp, ext & 0xff);
4784     }
4785 
4786     if (!is_load && (mode & 2) == 0) {
4787         /* predecrement addressing mode
4788          * only available to store register to memory
4789          */
4790         if (opsize == OS_EXTENDED) {
4791             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4792         } else {
4793             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4794         }
4795     } else {
4796         /* postincrement addressing mode */
4797         if (opsize == OS_EXTENDED) {
4798             if (is_load) {
4799                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4800             } else {
4801                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4802             }
4803         } else {
4804             if (is_load) {
4805                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4806             } else {
4807                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4808             }
4809         }
4810     }
4811     if ((insn & 070) == 030 || (insn & 070) == 040) {
4812         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4813     }
4814     tcg_temp_free(tmp);
4815 }
4816 
4817 /* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4818    immediately before the next FP instruction is executed.  */
4819 DISAS_INSN(fpu)
4820 {
4821     uint16_t ext;
4822     int opmode;
4823     int opsize;
4824     TCGv_ptr cpu_src, cpu_dest;
4825 
4826     ext = read_im16(env, s);
4827     opmode = ext & 0x7f;
4828     switch ((ext >> 13) & 7) {
4829     case 0:
4830         break;
4831     case 1:
4832         goto undef;
4833     case 2:
4834         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4835             /* fmovecr */
4836             TCGv rom_offset = tcg_const_i32(opmode);
4837             cpu_dest = gen_fp_ptr(REG(ext, 7));
4838             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4839             tcg_temp_free_ptr(cpu_dest);
4840             tcg_temp_free(rom_offset);
4841             return;
4842         }
4843         break;
4844     case 3: /* fmove out */
4845         cpu_src = gen_fp_ptr(REG(ext, 7));
4846         opsize = ext_opsize(ext, 10);
4847         if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_STORE) == -1) {
4848             gen_addr_fault(s);
4849         }
4850         gen_helper_ftst(cpu_env, cpu_src);
4851         tcg_temp_free_ptr(cpu_src);
4852         return;
4853     case 4: /* fmove to control register.  */
4854     case 5: /* fmove from control register.  */
4855         gen_op_fmove_fcr(env, s, insn, ext);
4856         return;
4857     case 6: /* fmovem */
4858     case 7:
4859         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4860             goto undef;
4861         }
4862         gen_op_fmovem(env, s, insn, ext);
4863         return;
4864     }
4865     if (ext & (1 << 14)) {
4866         /* Source effective address.  */
4867         opsize = ext_opsize(ext, 10);
4868         cpu_src = gen_fp_result_ptr();
4869         if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_LOADS) == -1) {
4870             gen_addr_fault(s);
4871             return;
4872         }
4873     } else {
4874         /* Source register.  */
4875         opsize = OS_EXTENDED;
4876         cpu_src = gen_fp_ptr(REG(ext, 10));
4877     }
4878     cpu_dest = gen_fp_ptr(REG(ext, 7));
4879     switch (opmode) {
4880     case 0: /* fmove */
4881         gen_fp_move(cpu_dest, cpu_src);
4882         break;
4883     case 0x40: /* fsmove */
4884         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4885         break;
4886     case 0x44: /* fdmove */
4887         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4888         break;
4889     case 1: /* fint */
4890         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4891         break;
4892     case 3: /* fintrz */
4893         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4894         break;
4895     case 4: /* fsqrt */
4896         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4897         break;
4898     case 0x41: /* fssqrt */
4899         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4900         break;
4901     case 0x45: /* fdsqrt */
4902         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4903         break;
4904     case 0x18: /* fabs */
4905         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
4906         break;
4907     case 0x58: /* fsabs */
4908         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
4909         break;
4910     case 0x5c: /* fdabs */
4911         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
4912         break;
4913     case 0x1a: /* fneg */
4914         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
4915         break;
4916     case 0x5a: /* fsneg */
4917         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
4918         break;
4919     case 0x5e: /* fdneg */
4920         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
4921         break;
4922     case 0x20: /* fdiv */
4923         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4924         break;
4925     case 0x60: /* fsdiv */
4926         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4927         break;
4928     case 0x64: /* fddiv */
4929         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4930         break;
4931     case 0x22: /* fadd */
4932         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4933         break;
4934     case 0x62: /* fsadd */
4935         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4936         break;
4937     case 0x66: /* fdadd */
4938         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
4939         break;
4940     case 0x23: /* fmul */
4941         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4942         break;
4943     case 0x63: /* fsmul */
4944         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4945         break;
4946     case 0x67: /* fdmul */
4947         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4948         break;
4949     case 0x24: /* fsgldiv */
4950         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
4951         break;
4952     case 0x27: /* fsglmul */
4953         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
4954         break;
4955     case 0x28: /* fsub */
4956         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4957         break;
4958     case 0x68: /* fssub */
4959         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4960         break;
4961     case 0x6c: /* fdsub */
4962         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
4963         break;
4964     case 0x38: /* fcmp */
4965         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
4966         return;
4967     case 0x3a: /* ftst */
4968         gen_helper_ftst(cpu_env, cpu_src);
4969         return;
4970     default:
4971         goto undef;
4972     }
4973     tcg_temp_free_ptr(cpu_src);
4974     gen_helper_ftst(cpu_env, cpu_dest);
4975     tcg_temp_free_ptr(cpu_dest);
4976     return;
4977 undef:
4978     /* FIXME: Is this right for offset addressing modes?  */
4979     s->pc -= 2;
4980     disas_undef_fpu(env, s, insn);
4981 }
4982 
4983 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
4984 {
4985     TCGv fpsr;
4986 
4987     c->g1 = 1;
4988     c->v2 = tcg_const_i32(0);
4989     c->g2 = 0;
4990     /* TODO: Raise BSUN exception.  */
4991     fpsr = tcg_temp_new();
4992     gen_load_fcr(s, fpsr, M68K_FPSR);
4993     switch (cond) {
4994     case 0:  /* False */
4995     case 16: /* Signaling False */
4996         c->v1 = c->v2;
4997         c->tcond = TCG_COND_NEVER;
4998         break;
4999     case 1:  /* EQual Z */
5000     case 17: /* Signaling EQual Z */
5001         c->v1 = tcg_temp_new();
5002         c->g1 = 0;
5003         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5004         c->tcond = TCG_COND_NE;
5005         break;
5006     case 2:  /* Ordered Greater Than !(A || Z || N) */
5007     case 18: /* Greater Than !(A || Z || N) */
5008         c->v1 = tcg_temp_new();
5009         c->g1 = 0;
5010         tcg_gen_andi_i32(c->v1, fpsr,
5011                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5012         c->tcond = TCG_COND_EQ;
5013         break;
5014     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5015     case 19: /* Greater than or Equal Z || !(A || N) */
5016         c->v1 = tcg_temp_new();
5017         c->g1 = 0;
5018         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5019         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5020         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5021         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5022         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5023         c->tcond = TCG_COND_NE;
5024         break;
5025     case 4:  /* Ordered Less Than !(!N || A || Z); */
5026     case 20: /* Less Than !(!N || A || Z); */
5027         c->v1 = tcg_temp_new();
5028         c->g1 = 0;
5029         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5030         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5031         c->tcond = TCG_COND_EQ;
5032         break;
5033     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5034     case 21: /* Less than or Equal Z || (N && !A) */
5035         c->v1 = tcg_temp_new();
5036         c->g1 = 0;
5037         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5038         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5039         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5040         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5041         c->tcond = TCG_COND_NE;
5042         break;
5043     case 6:  /* Ordered Greater or Less than !(A || Z) */
5044     case 22: /* Greater or Less than !(A || Z) */
5045         c->v1 = tcg_temp_new();
5046         c->g1 = 0;
5047         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5048         c->tcond = TCG_COND_EQ;
5049         break;
5050     case 7:  /* Ordered !A */
5051     case 23: /* Greater, Less or Equal !A */
5052         c->v1 = tcg_temp_new();
5053         c->g1 = 0;
5054         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5055         c->tcond = TCG_COND_EQ;
5056         break;
5057     case 8:  /* Unordered A */
5058     case 24: /* Not Greater, Less or Equal A */
5059         c->v1 = tcg_temp_new();
5060         c->g1 = 0;
5061         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5062         c->tcond = TCG_COND_NE;
5063         break;
5064     case 9:  /* Unordered or Equal A || Z */
5065     case 25: /* Not Greater or Less then A || Z */
5066         c->v1 = tcg_temp_new();
5067         c->g1 = 0;
5068         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5069         c->tcond = TCG_COND_NE;
5070         break;
5071     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5072     case 26: /* Not Less or Equal A || !(N || Z)) */
5073         c->v1 = tcg_temp_new();
5074         c->g1 = 0;
5075         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5076         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5077         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5078         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5079         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5080         c->tcond = TCG_COND_NE;
5081         break;
5082     case 11: /* Unordered or Greater or Equal A || Z || !N */
5083     case 27: /* Not Less Than A || Z || !N */
5084         c->v1 = tcg_temp_new();
5085         c->g1 = 0;
5086         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5087         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5088         c->tcond = TCG_COND_NE;
5089         break;
5090     case 12: /* Unordered or Less Than A || (N && !Z) */
5091     case 28: /* Not Greater than or Equal A || (N && !Z) */
5092         c->v1 = tcg_temp_new();
5093         c->g1 = 0;
5094         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5095         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5096         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5097         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5098         c->tcond = TCG_COND_NE;
5099         break;
5100     case 13: /* Unordered or Less or Equal A || Z || N */
5101     case 29: /* Not Greater Than A || Z || N */
5102         c->v1 = tcg_temp_new();
5103         c->g1 = 0;
5104         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5105         c->tcond = TCG_COND_NE;
5106         break;
5107     case 14: /* Not Equal !Z */
5108     case 30: /* Signaling Not Equal !Z */
5109         c->v1 = tcg_temp_new();
5110         c->g1 = 0;
5111         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5112         c->tcond = TCG_COND_EQ;
5113         break;
5114     case 15: /* True */
5115     case 31: /* Signaling True */
5116         c->v1 = c->v2;
5117         c->tcond = TCG_COND_ALWAYS;
5118         break;
5119     }
5120     tcg_temp_free(fpsr);
5121 }
5122 
5123 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5124 {
5125     DisasCompare c;
5126 
5127     gen_fcc_cond(&c, s, cond);
5128     update_cc_op(s);
5129     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5130     free_cond(&c);
5131 }
5132 
5133 DISAS_INSN(fbcc)
5134 {
5135     uint32_t offset;
5136     uint32_t base;
5137     TCGLabel *l1;
5138 
5139     base = s->pc;
5140     offset = (int16_t)read_im16(env, s);
5141     if (insn & (1 << 6)) {
5142         offset = (offset << 16) | read_im16(env, s);
5143     }
5144 
5145     l1 = gen_new_label();
5146     update_cc_op(s);
5147     gen_fjmpcc(s, insn & 0x3f, l1);
5148     gen_jmp_tb(s, 0, s->pc);
5149     gen_set_label(l1);
5150     gen_jmp_tb(s, 1, base + offset);
5151 }
5152 
5153 DISAS_INSN(fscc)
5154 {
5155     DisasCompare c;
5156     int cond;
5157     TCGv tmp;
5158     uint16_t ext;
5159 
5160     ext = read_im16(env, s);
5161     cond = ext & 0x3f;
5162     gen_fcc_cond(&c, s, cond);
5163 
5164     tmp = tcg_temp_new();
5165     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5166     free_cond(&c);
5167 
5168     tcg_gen_neg_i32(tmp, tmp);
5169     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5170     tcg_temp_free(tmp);
5171 }
5172 
5173 #if defined(CONFIG_SOFTMMU)
5174 DISAS_INSN(frestore)
5175 {
5176     TCGv addr;
5177 
5178     if (IS_USER(s)) {
5179         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5180         return;
5181     }
5182     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5183         SRC_EA(env, addr, OS_LONG, 0, NULL);
5184         /* FIXME: check the state frame */
5185     } else {
5186         disas_undef(env, s, insn);
5187     }
5188 }
5189 
5190 DISAS_INSN(fsave)
5191 {
5192     if (IS_USER(s)) {
5193         gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
5194         return;
5195     }
5196 
5197     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5198         /* always write IDLE */
5199         TCGv idle = tcg_const_i32(0x41000000);
5200         DEST_EA(env, insn, OS_LONG, idle, NULL);
5201         tcg_temp_free(idle);
5202     } else {
5203         disas_undef(env, s, insn);
5204     }
5205 }
5206 #endif
5207 
5208 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5209 {
5210     TCGv tmp = tcg_temp_new();
5211     if (s->env->macsr & MACSR_FI) {
5212         if (upper)
5213             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5214         else
5215             tcg_gen_shli_i32(tmp, val, 16);
5216     } else if (s->env->macsr & MACSR_SU) {
5217         if (upper)
5218             tcg_gen_sari_i32(tmp, val, 16);
5219         else
5220             tcg_gen_ext16s_i32(tmp, val);
5221     } else {
5222         if (upper)
5223             tcg_gen_shri_i32(tmp, val, 16);
5224         else
5225             tcg_gen_ext16u_i32(tmp, val);
5226     }
5227     return tmp;
5228 }
5229 
5230 static void gen_mac_clear_flags(void)
5231 {
5232     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5233                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5234 }
5235 
5236 DISAS_INSN(mac)
5237 {
5238     TCGv rx;
5239     TCGv ry;
5240     uint16_t ext;
5241     int acc;
5242     TCGv tmp;
5243     TCGv addr;
5244     TCGv loadval;
5245     int dual;
5246     TCGv saved_flags;
5247 
5248     if (!s->done_mac) {
5249         s->mactmp = tcg_temp_new_i64();
5250         s->done_mac = 1;
5251     }
5252 
5253     ext = read_im16(env, s);
5254 
5255     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5256     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5257     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5258         disas_undef(env, s, insn);
5259         return;
5260     }
5261     if (insn & 0x30) {
5262         /* MAC with load.  */
5263         tmp = gen_lea(env, s, insn, OS_LONG);
5264         addr = tcg_temp_new();
5265         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5266         /* Load the value now to ensure correct exception behavior.
5267            Perform writeback after reading the MAC inputs.  */
5268         loadval = gen_load(s, OS_LONG, addr, 0);
5269 
5270         acc ^= 1;
5271         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5272         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5273     } else {
5274         loadval = addr = NULL_QREG;
5275         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5276         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5277     }
5278 
5279     gen_mac_clear_flags();
5280 #if 0
5281     l1 = -1;
5282     /* Disabled because conditional branches clobber temporary vars.  */
5283     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5284         /* Skip the multiply if we know we will ignore it.  */
5285         l1 = gen_new_label();
5286         tmp = tcg_temp_new();
5287         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5288         gen_op_jmp_nz32(tmp, l1);
5289     }
5290 #endif
5291 
5292     if ((ext & 0x0800) == 0) {
5293         /* Word.  */
5294         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5295         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5296     }
5297     if (s->env->macsr & MACSR_FI) {
5298         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5299     } else {
5300         if (s->env->macsr & MACSR_SU)
5301             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5302         else
5303             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5304         switch ((ext >> 9) & 3) {
5305         case 1:
5306             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5307             break;
5308         case 3:
5309             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5310             break;
5311         }
5312     }
5313 
5314     if (dual) {
5315         /* Save the overflow flag from the multiply.  */
5316         saved_flags = tcg_temp_new();
5317         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5318     } else {
5319         saved_flags = NULL_QREG;
5320     }
5321 
5322 #if 0
5323     /* Disabled because conditional branches clobber temporary vars.  */
5324     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5325         /* Skip the accumulate if the value is already saturated.  */
5326         l1 = gen_new_label();
5327         tmp = tcg_temp_new();
5328         gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5329         gen_op_jmp_nz32(tmp, l1);
5330     }
5331 #endif
5332 
5333     if (insn & 0x100)
5334         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5335     else
5336         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5337 
5338     if (s->env->macsr & MACSR_FI)
5339         gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5340     else if (s->env->macsr & MACSR_SU)
5341         gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5342     else
5343         gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5344 
5345 #if 0
5346     /* Disabled because conditional branches clobber temporary vars.  */
5347     if (l1 != -1)
5348         gen_set_label(l1);
5349 #endif
5350 
5351     if (dual) {
5352         /* Dual accumulate variant.  */
5353         acc = (ext >> 2) & 3;
5354         /* Restore the overflow flag from the multiplier.  */
5355         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5356 #if 0
5357         /* Disabled because conditional branches clobber temporary vars.  */
5358         if ((s->env->macsr & MACSR_OMC) != 0) {
5359             /* Skip the accumulate if the value is already saturated.  */
5360             l1 = gen_new_label();
5361             tmp = tcg_temp_new();
5362             gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5363             gen_op_jmp_nz32(tmp, l1);
5364         }
5365 #endif
5366         if (ext & 2)
5367             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5368         else
5369             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5370         if (s->env->macsr & MACSR_FI)
5371             gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5372         else if (s->env->macsr & MACSR_SU)
5373             gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5374         else
5375             gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5376 #if 0
5377         /* Disabled because conditional branches clobber temporary vars.  */
5378         if (l1 != -1)
5379             gen_set_label(l1);
5380 #endif
5381     }
5382     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5383 
5384     if (insn & 0x30) {
5385         TCGv rw;
5386         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5387         tcg_gen_mov_i32(rw, loadval);
5388         /* FIXME: Should address writeback happen with the masked or
5389            unmasked value?  */
5390         switch ((insn >> 3) & 7) {
5391         case 3: /* Post-increment.  */
5392             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5393             break;
5394         case 4: /* Pre-decrement.  */
5395             tcg_gen_mov_i32(AREG(insn, 0), addr);
5396         }
5397     }
5398 }
5399 
5400 DISAS_INSN(from_mac)
5401 {
5402     TCGv rx;
5403     TCGv_i64 acc;
5404     int accnum;
5405 
5406     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5407     accnum = (insn >> 9) & 3;
5408     acc = MACREG(accnum);
5409     if (s->env->macsr & MACSR_FI) {
5410         gen_helper_get_macf(rx, cpu_env, acc);
5411     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5412         tcg_gen_extrl_i64_i32(rx, acc);
5413     } else if (s->env->macsr & MACSR_SU) {
5414         gen_helper_get_macs(rx, acc);
5415     } else {
5416         gen_helper_get_macu(rx, acc);
5417     }
5418     if (insn & 0x40) {
5419         tcg_gen_movi_i64(acc, 0);
5420         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5421     }
5422 }
5423 
5424 DISAS_INSN(move_mac)
5425 {
5426     /* FIXME: This can be done without a helper.  */
5427     int src;
5428     TCGv dest;
5429     src = insn & 3;
5430     dest = tcg_const_i32((insn >> 9) & 3);
5431     gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5432     gen_mac_clear_flags();
5433     gen_helper_mac_set_flags(cpu_env, dest);
5434 }
5435 
5436 DISAS_INSN(from_macsr)
5437 {
5438     TCGv reg;
5439 
5440     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5441     tcg_gen_mov_i32(reg, QREG_MACSR);
5442 }
5443 
5444 DISAS_INSN(from_mask)
5445 {
5446     TCGv reg;
5447     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5448     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5449 }
5450 
5451 DISAS_INSN(from_mext)
5452 {
5453     TCGv reg;
5454     TCGv acc;
5455     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5456     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5457     if (s->env->macsr & MACSR_FI)
5458         gen_helper_get_mac_extf(reg, cpu_env, acc);
5459     else
5460         gen_helper_get_mac_exti(reg, cpu_env, acc);
5461 }
5462 
5463 DISAS_INSN(macsr_to_ccr)
5464 {
5465     TCGv tmp = tcg_temp_new();
5466     tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5467     gen_helper_set_sr(cpu_env, tmp);
5468     tcg_temp_free(tmp);
5469     set_cc_op(s, CC_OP_FLAGS);
5470 }
5471 
5472 DISAS_INSN(to_mac)
5473 {
5474     TCGv_i64 acc;
5475     TCGv val;
5476     int accnum;
5477     accnum = (insn >> 9) & 3;
5478     acc = MACREG(accnum);
5479     SRC_EA(env, val, OS_LONG, 0, NULL);
5480     if (s->env->macsr & MACSR_FI) {
5481         tcg_gen_ext_i32_i64(acc, val);
5482         tcg_gen_shli_i64(acc, acc, 8);
5483     } else if (s->env->macsr & MACSR_SU) {
5484         tcg_gen_ext_i32_i64(acc, val);
5485     } else {
5486         tcg_gen_extu_i32_i64(acc, val);
5487     }
5488     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5489     gen_mac_clear_flags();
5490     gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5491 }
5492 
5493 DISAS_INSN(to_macsr)
5494 {
5495     TCGv val;
5496     SRC_EA(env, val, OS_LONG, 0, NULL);
5497     gen_helper_set_macsr(cpu_env, val);
5498     gen_lookup_tb(s);
5499 }
5500 
5501 DISAS_INSN(to_mask)
5502 {
5503     TCGv val;
5504     SRC_EA(env, val, OS_LONG, 0, NULL);
5505     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5506 }
5507 
5508 DISAS_INSN(to_mext)
5509 {
5510     TCGv val;
5511     TCGv acc;
5512     SRC_EA(env, val, OS_LONG, 0, NULL);
5513     acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5514     if (s->env->macsr & MACSR_FI)
5515         gen_helper_set_mac_extf(cpu_env, val, acc);
5516     else if (s->env->macsr & MACSR_SU)
5517         gen_helper_set_mac_exts(cpu_env, val, acc);
5518     else
5519         gen_helper_set_mac_extu(cpu_env, val, acc);
5520 }
5521 
5522 static disas_proc opcode_table[65536];
5523 
5524 static void
5525 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5526 {
5527   int i;
5528   int from;
5529   int to;
5530 
5531   /* Sanity check.  All set bits must be included in the mask.  */
5532   if (opcode & ~mask) {
5533       fprintf(stderr,
5534               "qemu internal error: bogus opcode definition %04x/%04x\n",
5535               opcode, mask);
5536       abort();
5537   }
5538   /* This could probably be cleverer.  For now just optimize the case where
5539      the top bits are known.  */
5540   /* Find the first zero bit in the mask.  */
5541   i = 0x8000;
5542   while ((i & mask) != 0)
5543       i >>= 1;
5544   /* Iterate over all combinations of this and lower bits.  */
5545   if (i == 0)
5546       i = 1;
5547   else
5548       i <<= 1;
5549   from = opcode & ~(i - 1);
5550   to = from + i;
5551   for (i = from; i < to; i++) {
5552       if ((i & mask) == opcode)
5553           opcode_table[i] = proc;
5554   }
5555 }
5556 
5557 /* Register m68k opcode handlers.  Order is important.
5558    Later insn override earlier ones.  */
5559 void register_m68k_insns (CPUM68KState *env)
5560 {
5561     /* Build the opcode table only once to avoid
5562        multithreading issues. */
5563     if (opcode_table[0] != NULL) {
5564         return;
5565     }
5566 
5567     /* use BASE() for instruction available
5568      * for CF_ISA_A and M68000.
5569      */
5570 #define BASE(name, opcode, mask) \
5571     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5572 #define INSN(name, opcode, mask, feature) do { \
5573     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5574         BASE(name, opcode, mask); \
5575     } while(0)
5576     BASE(undef,     0000, 0000);
5577     INSN(arith_im,  0080, fff8, CF_ISA_A);
5578     INSN(arith_im,  0000, ff00, M68000);
5579     INSN(chk2,      00c0, f9c0, CHK2);
5580     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5581     BASE(bitop_reg, 0100, f1c0);
5582     BASE(bitop_reg, 0140, f1c0);
5583     BASE(bitop_reg, 0180, f1c0);
5584     BASE(bitop_reg, 01c0, f1c0);
5585     INSN(arith_im,  0280, fff8, CF_ISA_A);
5586     INSN(arith_im,  0200, ff00, M68000);
5587     INSN(undef,     02c0, ffc0, M68000);
5588     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5589     INSN(arith_im,  0480, fff8, CF_ISA_A);
5590     INSN(arith_im,  0400, ff00, M68000);
5591     INSN(undef,     04c0, ffc0, M68000);
5592     INSN(arith_im,  0600, ff00, M68000);
5593     INSN(undef,     06c0, ffc0, M68000);
5594     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5595     INSN(arith_im,  0680, fff8, CF_ISA_A);
5596     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5597     INSN(arith_im,  0c00, ff00, M68000);
5598     BASE(bitop_im,  0800, ffc0);
5599     BASE(bitop_im,  0840, ffc0);
5600     BASE(bitop_im,  0880, ffc0);
5601     BASE(bitop_im,  08c0, ffc0);
5602     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5603     INSN(arith_im,  0a00, ff00, M68000);
5604     INSN(cas,       0ac0, ffc0, CAS);
5605     INSN(cas,       0cc0, ffc0, CAS);
5606     INSN(cas,       0ec0, ffc0, CAS);
5607     INSN(cas2w,     0cfc, ffff, CAS);
5608     INSN(cas2l,     0efc, ffff, CAS);
5609     BASE(move,      1000, f000);
5610     BASE(move,      2000, f000);
5611     BASE(move,      3000, f000);
5612     INSN(chk,       4000, f040, M68000);
5613     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5614     INSN(negx,      4080, fff8, CF_ISA_A);
5615     INSN(negx,      4000, ff00, M68000);
5616     INSN(undef,     40c0, ffc0, M68000);
5617     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5618     INSN(move_from_sr, 40c0, ffc0, M68000);
5619     BASE(lea,       41c0, f1c0);
5620     BASE(clr,       4200, ff00);
5621     BASE(undef,     42c0, ffc0);
5622     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5623     INSN(move_from_ccr, 42c0, ffc0, M68000);
5624     INSN(neg,       4480, fff8, CF_ISA_A);
5625     INSN(neg,       4400, ff00, M68000);
5626     INSN(undef,     44c0, ffc0, M68000);
5627     BASE(move_to_ccr, 44c0, ffc0);
5628     INSN(not,       4680, fff8, CF_ISA_A);
5629     INSN(not,       4600, ff00, M68000);
5630 #if defined(CONFIG_SOFTMMU)
5631     BASE(move_to_sr, 46c0, ffc0);
5632 #endif
5633     INSN(nbcd,      4800, ffc0, M68000);
5634     INSN(linkl,     4808, fff8, M68000);
5635     BASE(pea,       4840, ffc0);
5636     BASE(swap,      4840, fff8);
5637     INSN(bkpt,      4848, fff8, BKPT);
5638     INSN(movem,     48d0, fbf8, CF_ISA_A);
5639     INSN(movem,     48e8, fbf8, CF_ISA_A);
5640     INSN(movem,     4880, fb80, M68000);
5641     BASE(ext,       4880, fff8);
5642     BASE(ext,       48c0, fff8);
5643     BASE(ext,       49c0, fff8);
5644     BASE(tst,       4a00, ff00);
5645     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5646     INSN(tas,       4ac0, ffc0, M68000);
5647 #if defined(CONFIG_SOFTMMU)
5648     INSN(halt,      4ac8, ffff, CF_ISA_A);
5649 #endif
5650     INSN(pulse,     4acc, ffff, CF_ISA_A);
5651     BASE(illegal,   4afc, ffff);
5652     INSN(mull,      4c00, ffc0, CF_ISA_A);
5653     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5654     INSN(divl,      4c40, ffc0, CF_ISA_A);
5655     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5656     INSN(sats,      4c80, fff8, CF_ISA_B);
5657     BASE(trap,      4e40, fff0);
5658     BASE(link,      4e50, fff8);
5659     BASE(unlk,      4e58, fff8);
5660 #if defined(CONFIG_SOFTMMU)
5661     INSN(move_to_usp, 4e60, fff8, USP);
5662     INSN(move_from_usp, 4e68, fff8, USP);
5663     INSN(reset,     4e70, ffff, M68000);
5664     BASE(stop,      4e72, ffff);
5665     BASE(rte,       4e73, ffff);
5666     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5667     INSN(m68k_movec, 4e7a, fffe, M68000);
5668 #endif
5669     BASE(nop,       4e71, ffff);
5670     INSN(rtd,       4e74, ffff, RTD);
5671     BASE(rts,       4e75, ffff);
5672     BASE(jump,      4e80, ffc0);
5673     BASE(jump,      4ec0, ffc0);
5674     INSN(addsubq,   5000, f080, M68000);
5675     BASE(addsubq,   5080, f0c0);
5676     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5677     INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
5678     INSN(dbcc,      50c8, f0f8, M68000);
5679     INSN(tpf,       51f8, fff8, CF_ISA_A);
5680 
5681     /* Branch instructions.  */
5682     BASE(branch,    6000, f000);
5683     /* Disable long branch instructions, then add back the ones we want.  */
5684     BASE(undef,     60ff, f0ff); /* All long branches.  */
5685     INSN(branch,    60ff, f0ff, CF_ISA_B);
5686     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5687     INSN(branch,    60ff, ffff, BRAL);
5688     INSN(branch,    60ff, f0ff, BCCL);
5689 
5690     BASE(moveq,     7000, f100);
5691     INSN(mvzs,      7100, f100, CF_ISA_B);
5692     BASE(or,        8000, f000);
5693     BASE(divw,      80c0, f0c0);
5694     INSN(sbcd_reg,  8100, f1f8, M68000);
5695     INSN(sbcd_mem,  8108, f1f8, M68000);
5696     BASE(addsub,    9000, f000);
5697     INSN(undef,     90c0, f0c0, CF_ISA_A);
5698     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5699     INSN(subx_reg,  9100, f138, M68000);
5700     INSN(subx_mem,  9108, f138, M68000);
5701     INSN(suba,      91c0, f1c0, CF_ISA_A);
5702     INSN(suba,      90c0, f0c0, M68000);
5703 
5704     BASE(undef_mac, a000, f000);
5705     INSN(mac,       a000, f100, CF_EMAC);
5706     INSN(from_mac,  a180, f9b0, CF_EMAC);
5707     INSN(move_mac,  a110, f9fc, CF_EMAC);
5708     INSN(from_macsr,a980, f9f0, CF_EMAC);
5709     INSN(from_mask, ad80, fff0, CF_EMAC);
5710     INSN(from_mext, ab80, fbf0, CF_EMAC);
5711     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5712     INSN(to_mac,    a100, f9c0, CF_EMAC);
5713     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5714     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5715     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5716 
5717     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5718     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5719     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5720     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5721     INSN(cmp,       b080, f1c0, CF_ISA_A);
5722     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5723     INSN(cmp,       b000, f100, M68000);
5724     INSN(eor,       b100, f100, M68000);
5725     INSN(cmpm,      b108, f138, M68000);
5726     INSN(cmpa,      b0c0, f0c0, M68000);
5727     INSN(eor,       b180, f1c0, CF_ISA_A);
5728     BASE(and,       c000, f000);
5729     INSN(exg_dd,    c140, f1f8, M68000);
5730     INSN(exg_aa,    c148, f1f8, M68000);
5731     INSN(exg_da,    c188, f1f8, M68000);
5732     BASE(mulw,      c0c0, f0c0);
5733     INSN(abcd_reg,  c100, f1f8, M68000);
5734     INSN(abcd_mem,  c108, f1f8, M68000);
5735     BASE(addsub,    d000, f000);
5736     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5737     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5738     INSN(addx_reg,  d100, f138, M68000);
5739     INSN(addx_mem,  d108, f138, M68000);
5740     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5741     INSN(adda,      d0c0, f0c0, M68000);
5742     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5743     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5744     INSN(shift8_im, e000, f0f0, M68000);
5745     INSN(shift16_im, e040, f0f0, M68000);
5746     INSN(shift_im,  e080, f0f0, M68000);
5747     INSN(shift8_reg, e020, f0f0, M68000);
5748     INSN(shift16_reg, e060, f0f0, M68000);
5749     INSN(shift_reg, e0a0, f0f0, M68000);
5750     INSN(shift_mem, e0c0, fcc0, M68000);
5751     INSN(rotate_im, e090, f0f0, M68000);
5752     INSN(rotate8_im, e010, f0f0, M68000);
5753     INSN(rotate16_im, e050, f0f0, M68000);
5754     INSN(rotate_reg, e0b0, f0f0, M68000);
5755     INSN(rotate8_reg, e030, f0f0, M68000);
5756     INSN(rotate16_reg, e070, f0f0, M68000);
5757     INSN(rotate_mem, e4c0, fcc0, M68000);
5758     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5759     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5760     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5761     INSN(bfins_reg, efc0, fff8, BITFIELD);
5762     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5763     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5764     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5765     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5766     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5767     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5768     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5769     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5770     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5771     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5772     BASE(undef_fpu, f000, f000);
5773     INSN(fpu,       f200, ffc0, CF_FPU);
5774     INSN(fbcc,      f280, ffc0, CF_FPU);
5775     INSN(fpu,       f200, ffc0, FPU);
5776     INSN(fscc,      f240, ffc0, FPU);
5777     INSN(fbcc,      f280, ff80, FPU);
5778 #if defined(CONFIG_SOFTMMU)
5779     INSN(frestore,  f340, ffc0, CF_FPU);
5780     INSN(fsave,     f300, ffc0, CF_FPU);
5781     INSN(frestore,  f340, ffc0, FPU);
5782     INSN(fsave,     f300, ffc0, FPU);
5783     INSN(intouch,   f340, ffc0, CF_ISA_A);
5784     INSN(cpushl,    f428, ff38, CF_ISA_A);
5785     INSN(cpush,     f420, ff20, M68040);
5786     INSN(cinv,      f400, ff20, M68040);
5787     INSN(wddata,    fb00, ff00, CF_ISA_A);
5788     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5789 #endif
5790     INSN(move16_mem, f600, ffe0, M68040);
5791     INSN(move16_reg, f620, fff8, M68040);
5792 #undef INSN
5793 }
5794 
5795 /* ??? Some of this implementation is not exception safe.  We should always
5796    write back the result to memory before setting the condition codes.  */
5797 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5798 {
5799     uint16_t insn = read_im16(env, s);
5800     opcode_table[insn](env, s, insn);
5801     do_writebacks(s);
5802 }
5803 
5804 /* generate intermediate code for basic block 'tb'.  */
5805 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
5806 {
5807     CPUM68KState *env = cs->env_ptr;
5808     DisasContext dc1, *dc = &dc1;
5809     target_ulong pc_start;
5810     int pc_offset;
5811     int num_insns;
5812     int max_insns;
5813 
5814     /* generate intermediate code */
5815     pc_start = tb->pc;
5816 
5817     dc->tb = tb;
5818 
5819     dc->env = env;
5820     dc->is_jmp = DISAS_NEXT;
5821     dc->pc = pc_start;
5822     dc->cc_op = CC_OP_DYNAMIC;
5823     dc->cc_op_synced = 1;
5824     dc->singlestep_enabled = cs->singlestep_enabled;
5825     dc->user = (env->sr & SR_S) == 0;
5826     dc->done_mac = 0;
5827     dc->writeback_mask = 0;
5828     num_insns = 0;
5829     max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5830     if (max_insns == 0) {
5831         max_insns = CF_COUNT_MASK;
5832     }
5833     if (max_insns > TCG_MAX_INSNS) {
5834         max_insns = TCG_MAX_INSNS;
5835     }
5836 
5837     gen_tb_start(tb);
5838     do {
5839         pc_offset = dc->pc - pc_start;
5840         tcg_gen_insn_start(dc->pc, dc->cc_op);
5841         num_insns++;
5842 
5843         if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5844             gen_exception(dc, dc->pc, EXCP_DEBUG);
5845             dc->is_jmp = DISAS_JUMP;
5846             /* The address covered by the breakpoint must be included in
5847                [tb->pc, tb->pc + tb->size) in order to for it to be
5848                properly cleared -- thus we increment the PC here so that
5849                the logic setting tb->size below does the right thing.  */
5850             dc->pc += 2;
5851             break;
5852         }
5853 
5854         if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5855             gen_io_start();
5856         }
5857 
5858         dc->insn_pc = dc->pc;
5859 	disas_m68k_insn(env, dc);
5860     } while (!dc->is_jmp && !tcg_op_buf_full() &&
5861              !cs->singlestep_enabled &&
5862              !singlestep &&
5863              (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5864              num_insns < max_insns);
5865 
5866     if (tb_cflags(tb) & CF_LAST_IO)
5867         gen_io_end();
5868     if (unlikely(cs->singlestep_enabled)) {
5869         /* Make sure the pc is updated, and raise a debug exception.  */
5870         if (!dc->is_jmp) {
5871             update_cc_op(dc);
5872             tcg_gen_movi_i32(QREG_PC, dc->pc);
5873         }
5874         gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5875     } else {
5876         switch(dc->is_jmp) {
5877         case DISAS_NEXT:
5878             update_cc_op(dc);
5879             gen_jmp_tb(dc, 0, dc->pc);
5880             break;
5881         default:
5882         case DISAS_JUMP:
5883         case DISAS_UPDATE:
5884             update_cc_op(dc);
5885             /* indicate that the hash table must be used to find the next TB */
5886             tcg_gen_exit_tb(0);
5887             break;
5888         case DISAS_TB_JUMP:
5889             /* nothing more to generate */
5890             break;
5891         }
5892     }
5893     gen_tb_end(tb, num_insns);
5894 
5895 #ifdef DEBUG_DISAS
5896     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5897         && qemu_log_in_addr_range(pc_start)) {
5898         qemu_log_lock();
5899         qemu_log("----------------\n");
5900         qemu_log("IN: %s\n", lookup_symbol(pc_start));
5901         log_target_disas(cs, pc_start, dc->pc - pc_start);
5902         qemu_log("\n");
5903         qemu_log_unlock();
5904     }
5905 #endif
5906     tb->size = dc->pc - pc_start;
5907     tb->icount = num_insns;
5908 }
5909 
5910 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
5911 {
5912     floatx80 a = { .high = high, .low = low };
5913     union {
5914         float64 f64;
5915         double d;
5916     } u;
5917 
5918     u.f64 = floatx80_to_float64(a, &env->fp_status);
5919     return u.d;
5920 }
5921 
5922 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
5923                          int flags)
5924 {
5925     M68kCPU *cpu = M68K_CPU(cs);
5926     CPUM68KState *env = &cpu->env;
5927     int i;
5928     uint16_t sr;
5929     for (i = 0; i < 8; i++) {
5930         cpu_fprintf(f, "D%d = %08x   A%d = %08x   "
5931                     "F%d = %04x %016"PRIx64"  (%12g)\n",
5932                     i, env->dregs[i], i, env->aregs[i],
5933                     i, env->fregs[i].l.upper, env->fregs[i].l.lower,
5934                     floatx80_to_double(env, env->fregs[i].l.upper,
5935                                        env->fregs[i].l.lower));
5936     }
5937     cpu_fprintf (f, "PC = %08x   ", env->pc);
5938     sr = env->sr | cpu_m68k_get_ccr(env);
5939     cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
5940                 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
5941                 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
5942                 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
5943                 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
5944                 (sr & CCF_C) ? 'C' : '-');
5945     cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
5946                 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
5947                 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
5948                 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
5949                 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
5950     cpu_fprintf(f, "\n                                "
5951                    "FPCR =     %04x ", env->fpcr);
5952     switch (env->fpcr & FPCR_PREC_MASK) {
5953     case FPCR_PREC_X:
5954         cpu_fprintf(f, "X ");
5955         break;
5956     case FPCR_PREC_S:
5957         cpu_fprintf(f, "S ");
5958         break;
5959     case FPCR_PREC_D:
5960         cpu_fprintf(f, "D ");
5961         break;
5962     }
5963     switch (env->fpcr & FPCR_RND_MASK) {
5964     case FPCR_RND_N:
5965         cpu_fprintf(f, "RN ");
5966         break;
5967     case FPCR_RND_Z:
5968         cpu_fprintf(f, "RZ ");
5969         break;
5970     case FPCR_RND_M:
5971         cpu_fprintf(f, "RM ");
5972         break;
5973     case FPCR_RND_P:
5974         cpu_fprintf(f, "RP ");
5975         break;
5976     }
5977     cpu_fprintf(f, "\n");
5978 #ifdef CONFIG_SOFTMMU
5979     cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
5980                env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
5981                env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
5982                env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
5983     cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
5984 #endif
5985 }
5986 
5987 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
5988                           target_ulong *data)
5989 {
5990     int cc_op = data[1];
5991     env->pc = data[0];
5992     if (cc_op != CC_OP_DYNAMIC) {
5993         env->cc_op = cc_op;
5994     }
5995 }
5996