xref: /openbmc/qemu/target/m68k/translate.c (revision 851ec6eb)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 //#define DEBUG_DISPATCH 1
42 
43 #define DEFO32(name, offset) static TCGv QREG_##name;
44 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
45 #include "qregs.h.inc"
46 #undef DEFO32
47 #undef DEFO64
48 
49 static TCGv_i32 cpu_halted;
50 static TCGv_i32 cpu_exception_index;
51 
52 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
53 static TCGv cpu_dregs[8];
54 static TCGv cpu_aregs[8];
55 static TCGv_i64 cpu_macc[4];
56 
57 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
58 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
59 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
60 #define MACREG(acc)     cpu_macc[acc]
61 #define QREG_SP         get_areg(s, 7)
62 
63 static TCGv NULL_QREG;
64 #define IS_NULL_QREG(t) (t == NULL_QREG)
65 /* Used to distinguish stores from bad addressing modes.  */
66 static TCGv store_dummy;
67 
68 void m68k_tcg_init(void)
69 {
70     char *p;
71     int i;
72 
73 #define DEFO32(name, offset) \
74     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
75         offsetof(CPUM68KState, offset), #name);
76 #define DEFO64(name, offset) \
77     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
78         offsetof(CPUM68KState, offset), #name);
79 #include "qregs.h.inc"
80 #undef DEFO32
81 #undef DEFO64
82 
83     cpu_halted = tcg_global_mem_new_i32(cpu_env,
84                                         -offsetof(M68kCPU, env) +
85                                         offsetof(CPUState, halted), "HALTED");
86     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
87                                                  -offsetof(M68kCPU, env) +
88                                                  offsetof(CPUState, exception_index),
89                                                  "EXCEPTION");
90 
91     p = cpu_reg_names;
92     for (i = 0; i < 8; i++) {
93         sprintf(p, "D%d", i);
94         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
95                                           offsetof(CPUM68KState, dregs[i]), p);
96         p += 3;
97         sprintf(p, "A%d", i);
98         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
99                                           offsetof(CPUM68KState, aregs[i]), p);
100         p += 3;
101     }
102     for (i = 0; i < 4; i++) {
103         sprintf(p, "ACC%d", i);
104         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
105                                          offsetof(CPUM68KState, macc[i]), p);
106         p += 5;
107     }
108 
109     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
110     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
111 }
112 
113 /* internal defines */
114 typedef struct DisasContext {
115     DisasContextBase base;
116     CPUM68KState *env;
117     target_ulong pc;
118     target_ulong pc_prev;
119     CCOp cc_op; /* Current CC operation */
120     int cc_op_synced;
121     TCGv_i64 mactmp;
122     int done_mac;
123     int writeback_mask;
124     TCGv writeback[8];
125     bool ss_active;
126 } DisasContext;
127 
128 static TCGv get_areg(DisasContext *s, unsigned regno)
129 {
130     if (s->writeback_mask & (1 << regno)) {
131         return s->writeback[regno];
132     } else {
133         return cpu_aregs[regno];
134     }
135 }
136 
137 static void delay_set_areg(DisasContext *s, unsigned regno,
138                            TCGv val, bool give_temp)
139 {
140     if (s->writeback_mask & (1 << regno)) {
141         if (give_temp) {
142             s->writeback[regno] = val;
143         } else {
144             tcg_gen_mov_i32(s->writeback[regno], val);
145         }
146     } else {
147         s->writeback_mask |= 1 << regno;
148         if (give_temp) {
149             s->writeback[regno] = val;
150         } else {
151             TCGv tmp = tcg_temp_new();
152             s->writeback[regno] = tmp;
153             tcg_gen_mov_i32(tmp, val);
154         }
155     }
156 }
157 
158 static void do_writebacks(DisasContext *s)
159 {
160     unsigned mask = s->writeback_mask;
161     if (mask) {
162         s->writeback_mask = 0;
163         do {
164             unsigned regno = ctz32(mask);
165             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
166             mask &= mask - 1;
167         } while (mask);
168     }
169 }
170 
171 /* is_jmp field values */
172 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
173 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
174 
175 #if defined(CONFIG_USER_ONLY)
176 #define IS_USER(s) 1
177 #else
178 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
179 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
180                       MMU_KERNEL_IDX : MMU_USER_IDX)
181 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
182                       MMU_KERNEL_IDX : MMU_USER_IDX)
183 #endif
184 
185 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
186 
187 #ifdef DEBUG_DISPATCH
188 #define DISAS_INSN(name)                                                \
189     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
190                                   uint16_t insn);                       \
191     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
192                              uint16_t insn)                             \
193     {                                                                   \
194         qemu_log("Dispatch " #name "\n");                               \
195         real_disas_##name(env, s, insn);                                \
196     }                                                                   \
197     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
198                                   uint16_t insn)
199 #else
200 #define DISAS_INSN(name)                                                \
201     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
202                              uint16_t insn)
203 #endif
204 
205 static const uint8_t cc_op_live[CC_OP_NB] = {
206     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
208     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
211     [CC_OP_LOGIC] = CCF_X | CCF_N
212 };
213 
214 static void set_cc_op(DisasContext *s, CCOp op)
215 {
216     CCOp old_op = s->cc_op;
217     int dead;
218 
219     if (old_op == op) {
220         return;
221     }
222     s->cc_op = op;
223     s->cc_op_synced = 0;
224 
225     /*
226      * Discard CC computation that will no longer be used.
227      * Note that X and N are never dead.
228      */
229     dead = cc_op_live[old_op] & ~cc_op_live[op];
230     if (dead & CCF_C) {
231         tcg_gen_discard_i32(QREG_CC_C);
232     }
233     if (dead & CCF_Z) {
234         tcg_gen_discard_i32(QREG_CC_Z);
235     }
236     if (dead & CCF_V) {
237         tcg_gen_discard_i32(QREG_CC_V);
238     }
239 }
240 
241 /* Update the CPU env CC_OP state.  */
242 static void update_cc_op(DisasContext *s)
243 {
244     if (!s->cc_op_synced) {
245         s->cc_op_synced = 1;
246         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
247     }
248 }
249 
250 /* Generate a jump to an immediate address.  */
251 static void gen_jmp_im(DisasContext *s, uint32_t dest)
252 {
253     update_cc_op(s);
254     tcg_gen_movi_i32(QREG_PC, dest);
255     s->base.is_jmp = DISAS_JUMP;
256 }
257 
258 /* Generate a jump to the address in qreg DEST.  */
259 static void gen_jmp(DisasContext *s, TCGv dest)
260 {
261     update_cc_op(s);
262     tcg_gen_mov_i32(QREG_PC, dest);
263     s->base.is_jmp = DISAS_JUMP;
264 }
265 
266 static void gen_raise_exception(int nr)
267 {
268     gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
269 }
270 
271 static void gen_raise_exception_format2(DisasContext *s, int nr,
272                                         target_ulong this_pc)
273 {
274     /*
275      * Pass the address of the insn to the exception handler,
276      * for recording in the Format $2 (6-word) stack frame.
277      * Re-use mmu.ar for the purpose, since that's only valid
278      * after tlb_fill.
279      */
280     tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
281                    offsetof(CPUM68KState, mmu.ar));
282     gen_raise_exception(nr);
283     s->base.is_jmp = DISAS_NORETURN;
284 }
285 
286 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
287 {
288     update_cc_op(s);
289     tcg_gen_movi_i32(QREG_PC, dest);
290 
291     gen_raise_exception(nr);
292 
293     s->base.is_jmp = DISAS_NORETURN;
294 }
295 
296 static inline void gen_addr_fault(DisasContext *s)
297 {
298     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
299 }
300 
301 /*
302  * Generate a load from the specified address.  Narrow values are
303  *  sign extended to full register width.
304  */
305 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
306                             int sign, int index)
307 {
308     TCGv tmp = tcg_temp_new_i32();
309 
310     switch (opsize) {
311     case OS_BYTE:
312     case OS_WORD:
313     case OS_LONG:
314         tcg_gen_qemu_ld_tl(tmp, addr, index,
315                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
316         break;
317     default:
318         g_assert_not_reached();
319     }
320     return tmp;
321 }
322 
323 /* Generate a store.  */
324 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
325                              int index)
326 {
327     switch (opsize) {
328     case OS_BYTE:
329     case OS_WORD:
330     case OS_LONG:
331         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
332         break;
333     default:
334         g_assert_not_reached();
335     }
336 }
337 
338 typedef enum {
339     EA_STORE,
340     EA_LOADU,
341     EA_LOADS
342 } ea_what;
343 
344 /*
345  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
346  * otherwise generate a store.
347  */
348 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
349                      ea_what what, int index)
350 {
351     if (what == EA_STORE) {
352         gen_store(s, opsize, addr, val, index);
353         return store_dummy;
354     } else {
355         return gen_load(s, opsize, addr, what == EA_LOADS, index);
356     }
357 }
358 
359 /* Read a 16-bit immediate constant */
360 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
361 {
362     uint16_t im;
363     im = translator_lduw(env, &s->base, s->pc);
364     s->pc += 2;
365     return im;
366 }
367 
368 /* Read an 8-bit immediate constant */
369 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
370 {
371     return read_im16(env, s);
372 }
373 
374 /* Read a 32-bit immediate constant.  */
375 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
376 {
377     uint32_t im;
378     im = read_im16(env, s) << 16;
379     im |= 0xffff & read_im16(env, s);
380     return im;
381 }
382 
383 /* Read a 64-bit immediate constant.  */
384 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
385 {
386     uint64_t im;
387     im = (uint64_t)read_im32(env, s) << 32;
388     im |= (uint64_t)read_im32(env, s);
389     return im;
390 }
391 
392 /* Calculate and address index.  */
393 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
394 {
395     TCGv add;
396     int scale;
397 
398     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
399     if ((ext & 0x800) == 0) {
400         tcg_gen_ext16s_i32(tmp, add);
401         add = tmp;
402     }
403     scale = (ext >> 9) & 3;
404     if (scale != 0) {
405         tcg_gen_shli_i32(tmp, add, scale);
406         add = tmp;
407     }
408     return add;
409 }
410 
411 /*
412  * Handle a base + index + displacement effective address.
413  * A NULL_QREG base means pc-relative.
414  */
415 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
416 {
417     uint32_t offset;
418     uint16_t ext;
419     TCGv add;
420     TCGv tmp;
421     uint32_t bd, od;
422 
423     offset = s->pc;
424     ext = read_im16(env, s);
425 
426     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
427         return NULL_QREG;
428 
429     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
430         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
431         ext &= ~(3 << 9);
432     }
433 
434     if (ext & 0x100) {
435         /* full extension word format */
436         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
437             return NULL_QREG;
438 
439         if ((ext & 0x30) > 0x10) {
440             /* base displacement */
441             if ((ext & 0x30) == 0x20) {
442                 bd = (int16_t)read_im16(env, s);
443             } else {
444                 bd = read_im32(env, s);
445             }
446         } else {
447             bd = 0;
448         }
449         tmp = tcg_temp_new();
450         if ((ext & 0x44) == 0) {
451             /* pre-index */
452             add = gen_addr_index(s, ext, tmp);
453         } else {
454             add = NULL_QREG;
455         }
456         if ((ext & 0x80) == 0) {
457             /* base not suppressed */
458             if (IS_NULL_QREG(base)) {
459                 base = tcg_constant_i32(offset + bd);
460                 bd = 0;
461             }
462             if (!IS_NULL_QREG(add)) {
463                 tcg_gen_add_i32(tmp, add, base);
464                 add = tmp;
465             } else {
466                 add = base;
467             }
468         }
469         if (!IS_NULL_QREG(add)) {
470             if (bd != 0) {
471                 tcg_gen_addi_i32(tmp, add, bd);
472                 add = tmp;
473             }
474         } else {
475             add = tcg_constant_i32(bd);
476         }
477         if ((ext & 3) != 0) {
478             /* memory indirect */
479             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
480             if ((ext & 0x44) == 4) {
481                 add = gen_addr_index(s, ext, tmp);
482                 tcg_gen_add_i32(tmp, add, base);
483                 add = tmp;
484             } else {
485                 add = base;
486             }
487             if ((ext & 3) > 1) {
488                 /* outer displacement */
489                 if ((ext & 3) == 2) {
490                     od = (int16_t)read_im16(env, s);
491                 } else {
492                     od = read_im32(env, s);
493                 }
494             } else {
495                 od = 0;
496             }
497             if (od != 0) {
498                 tcg_gen_addi_i32(tmp, add, od);
499                 add = tmp;
500             }
501         }
502     } else {
503         /* brief extension word format */
504         tmp = tcg_temp_new();
505         add = gen_addr_index(s, ext, tmp);
506         if (!IS_NULL_QREG(base)) {
507             tcg_gen_add_i32(tmp, add, base);
508             if ((int8_t)ext)
509                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
510         } else {
511             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
512         }
513         add = tmp;
514     }
515     return add;
516 }
517 
518 /* Sign or zero extend a value.  */
519 
520 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
521 {
522     switch (opsize) {
523     case OS_BYTE:
524         if (sign) {
525             tcg_gen_ext8s_i32(res, val);
526         } else {
527             tcg_gen_ext8u_i32(res, val);
528         }
529         break;
530     case OS_WORD:
531         if (sign) {
532             tcg_gen_ext16s_i32(res, val);
533         } else {
534             tcg_gen_ext16u_i32(res, val);
535         }
536         break;
537     case OS_LONG:
538         tcg_gen_mov_i32(res, val);
539         break;
540     default:
541         g_assert_not_reached();
542     }
543 }
544 
545 /* Evaluate all the CC flags.  */
546 
547 static void gen_flush_flags(DisasContext *s)
548 {
549     TCGv t0, t1;
550 
551     switch (s->cc_op) {
552     case CC_OP_FLAGS:
553         return;
554 
555     case CC_OP_ADDB:
556     case CC_OP_ADDW:
557     case CC_OP_ADDL:
558         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
559         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
560         /* Compute signed overflow for addition.  */
561         t0 = tcg_temp_new();
562         t1 = tcg_temp_new();
563         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
564         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
565         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
566         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
567         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
568         break;
569 
570     case CC_OP_SUBB:
571     case CC_OP_SUBW:
572     case CC_OP_SUBL:
573         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
574         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
575         /* Compute signed overflow for subtraction.  */
576         t0 = tcg_temp_new();
577         t1 = tcg_temp_new();
578         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
579         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
580         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
581         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
582         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
583         break;
584 
585     case CC_OP_CMPB:
586     case CC_OP_CMPW:
587     case CC_OP_CMPL:
588         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
589         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
590         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
591         /* Compute signed overflow for subtraction.  */
592         t0 = tcg_temp_new();
593         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
594         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
595         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
596         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
597         break;
598 
599     case CC_OP_LOGIC:
600         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
601         tcg_gen_movi_i32(QREG_CC_C, 0);
602         tcg_gen_movi_i32(QREG_CC_V, 0);
603         break;
604 
605     case CC_OP_DYNAMIC:
606         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
607         s->cc_op_synced = 1;
608         break;
609 
610     default:
611         gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
612         s->cc_op_synced = 1;
613         break;
614     }
615 
616     /* Note that flush_flags also assigned to env->cc_op.  */
617     s->cc_op = CC_OP_FLAGS;
618 }
619 
620 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
621 {
622     TCGv tmp;
623 
624     if (opsize == OS_LONG) {
625         tmp = val;
626     } else {
627         tmp = tcg_temp_new();
628         gen_ext(tmp, val, opsize, sign);
629     }
630 
631     return tmp;
632 }
633 
634 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
635 {
636     gen_ext(QREG_CC_N, val, opsize, 1);
637     set_cc_op(s, CC_OP_LOGIC);
638 }
639 
640 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
641 {
642     tcg_gen_mov_i32(QREG_CC_N, dest);
643     tcg_gen_mov_i32(QREG_CC_V, src);
644     set_cc_op(s, CC_OP_CMPB + opsize);
645 }
646 
647 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
648 {
649     gen_ext(QREG_CC_N, dest, opsize, 1);
650     tcg_gen_mov_i32(QREG_CC_V, src);
651 }
652 
653 static inline int opsize_bytes(int opsize)
654 {
655     switch (opsize) {
656     case OS_BYTE: return 1;
657     case OS_WORD: return 2;
658     case OS_LONG: return 4;
659     case OS_SINGLE: return 4;
660     case OS_DOUBLE: return 8;
661     case OS_EXTENDED: return 12;
662     case OS_PACKED: return 12;
663     default:
664         g_assert_not_reached();
665     }
666 }
667 
668 static inline int insn_opsize(int insn)
669 {
670     switch ((insn >> 6) & 3) {
671     case 0: return OS_BYTE;
672     case 1: return OS_WORD;
673     case 2: return OS_LONG;
674     default:
675         g_assert_not_reached();
676     }
677 }
678 
679 static inline int ext_opsize(int ext, int pos)
680 {
681     switch ((ext >> pos) & 7) {
682     case 0: return OS_LONG;
683     case 1: return OS_SINGLE;
684     case 2: return OS_EXTENDED;
685     case 3: return OS_PACKED;
686     case 4: return OS_WORD;
687     case 5: return OS_DOUBLE;
688     case 6: return OS_BYTE;
689     default:
690         g_assert_not_reached();
691     }
692 }
693 
694 /*
695  * Assign value to a register.  If the width is less than the register width
696  * only the low part of the register is set.
697  */
698 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
699 {
700     switch (opsize) {
701     case OS_BYTE:
702         tcg_gen_deposit_i32(reg, reg, val, 0, 8);
703         break;
704     case OS_WORD:
705         tcg_gen_deposit_i32(reg, reg, val, 0, 16);
706         break;
707     case OS_LONG:
708     case OS_SINGLE:
709         tcg_gen_mov_i32(reg, val);
710         break;
711     default:
712         g_assert_not_reached();
713     }
714 }
715 
716 /*
717  * Generate code for an "effective address".  Does not adjust the base
718  * register for autoincrement addressing modes.
719  */
720 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
721                          int mode, int reg0, int opsize)
722 {
723     TCGv reg;
724     TCGv tmp;
725     uint16_t ext;
726     uint32_t offset;
727 
728     switch (mode) {
729     case 0: /* Data register direct.  */
730     case 1: /* Address register direct.  */
731         return NULL_QREG;
732     case 3: /* Indirect postincrement.  */
733         if (opsize == OS_UNSIZED) {
734             return NULL_QREG;
735         }
736         /* fallthru */
737     case 2: /* Indirect register */
738         return get_areg(s, reg0);
739     case 4: /* Indirect predecrememnt.  */
740         if (opsize == OS_UNSIZED) {
741             return NULL_QREG;
742         }
743         reg = get_areg(s, reg0);
744         tmp = tcg_temp_new();
745         if (reg0 == 7 && opsize == OS_BYTE &&
746             m68k_feature(s->env, M68K_FEATURE_M68K)) {
747             tcg_gen_subi_i32(tmp, reg, 2);
748         } else {
749             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
750         }
751         return tmp;
752     case 5: /* Indirect displacement.  */
753         reg = get_areg(s, reg0);
754         tmp = tcg_temp_new();
755         ext = read_im16(env, s);
756         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
757         return tmp;
758     case 6: /* Indirect index + displacement.  */
759         reg = get_areg(s, reg0);
760         return gen_lea_indexed(env, s, reg);
761     case 7: /* Other */
762         switch (reg0) {
763         case 0: /* Absolute short.  */
764             offset = (int16_t)read_im16(env, s);
765             return tcg_constant_i32(offset);
766         case 1: /* Absolute long.  */
767             offset = read_im32(env, s);
768             return tcg_constant_i32(offset);
769         case 2: /* pc displacement  */
770             offset = s->pc;
771             offset += (int16_t)read_im16(env, s);
772             return tcg_constant_i32(offset);
773         case 3: /* pc index+displacement.  */
774             return gen_lea_indexed(env, s, NULL_QREG);
775         case 4: /* Immediate.  */
776         default:
777             return NULL_QREG;
778         }
779     }
780     /* Should never happen.  */
781     return NULL_QREG;
782 }
783 
784 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
785                     int opsize)
786 {
787     int mode = extract32(insn, 3, 3);
788     int reg0 = REG(insn, 0);
789     return gen_lea_mode(env, s, mode, reg0, opsize);
790 }
791 
792 /*
793  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
794  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
795  * ADDRP is non-null for readwrite operands.
796  */
797 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
798                         int opsize, TCGv val, TCGv *addrp, ea_what what,
799                         int index)
800 {
801     TCGv reg, tmp, result;
802     int32_t offset;
803 
804     switch (mode) {
805     case 0: /* Data register direct.  */
806         reg = cpu_dregs[reg0];
807         if (what == EA_STORE) {
808             gen_partset_reg(opsize, reg, val);
809             return store_dummy;
810         } else {
811             return gen_extend(s, reg, opsize, what == EA_LOADS);
812         }
813     case 1: /* Address register direct.  */
814         reg = get_areg(s, reg0);
815         if (what == EA_STORE) {
816             tcg_gen_mov_i32(reg, val);
817             return store_dummy;
818         } else {
819             return gen_extend(s, reg, opsize, what == EA_LOADS);
820         }
821     case 2: /* Indirect register */
822         reg = get_areg(s, reg0);
823         return gen_ldst(s, opsize, reg, val, what, index);
824     case 3: /* Indirect postincrement.  */
825         reg = get_areg(s, reg0);
826         result = gen_ldst(s, opsize, reg, val, what, index);
827         if (what == EA_STORE || !addrp) {
828             TCGv tmp = tcg_temp_new();
829             if (reg0 == 7 && opsize == OS_BYTE &&
830                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
831                 tcg_gen_addi_i32(tmp, reg, 2);
832             } else {
833                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
834             }
835             delay_set_areg(s, reg0, tmp, true);
836         }
837         return result;
838     case 4: /* Indirect predecrememnt.  */
839         if (addrp && what == EA_STORE) {
840             tmp = *addrp;
841         } else {
842             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
843             if (IS_NULL_QREG(tmp)) {
844                 return tmp;
845             }
846             if (addrp) {
847                 *addrp = tmp;
848             }
849         }
850         result = gen_ldst(s, opsize, tmp, val, what, index);
851         if (what == EA_STORE || !addrp) {
852             delay_set_areg(s, reg0, tmp, false);
853         }
854         return result;
855     case 5: /* Indirect displacement.  */
856     case 6: /* Indirect index + displacement.  */
857     do_indirect:
858         if (addrp && what == EA_STORE) {
859             tmp = *addrp;
860         } else {
861             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
862             if (IS_NULL_QREG(tmp)) {
863                 return tmp;
864             }
865             if (addrp) {
866                 *addrp = tmp;
867             }
868         }
869         return gen_ldst(s, opsize, tmp, val, what, index);
870     case 7: /* Other */
871         switch (reg0) {
872         case 0: /* Absolute short.  */
873         case 1: /* Absolute long.  */
874         case 2: /* pc displacement  */
875         case 3: /* pc index+displacement.  */
876             goto do_indirect;
877         case 4: /* Immediate.  */
878             /* Sign extend values for consistency.  */
879             switch (opsize) {
880             case OS_BYTE:
881                 if (what == EA_LOADS) {
882                     offset = (int8_t)read_im8(env, s);
883                 } else {
884                     offset = read_im8(env, s);
885                 }
886                 break;
887             case OS_WORD:
888                 if (what == EA_LOADS) {
889                     offset = (int16_t)read_im16(env, s);
890                 } else {
891                     offset = read_im16(env, s);
892                 }
893                 break;
894             case OS_LONG:
895                 offset = read_im32(env, s);
896                 break;
897             default:
898                 g_assert_not_reached();
899             }
900             return tcg_constant_i32(offset);
901         default:
902             return NULL_QREG;
903         }
904     }
905     /* Should never happen.  */
906     return NULL_QREG;
907 }
908 
909 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
910                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
911 {
912     int mode = extract32(insn, 3, 3);
913     int reg0 = REG(insn, 0);
914     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
915 }
916 
917 static TCGv_ptr gen_fp_ptr(int freg)
918 {
919     TCGv_ptr fp = tcg_temp_new_ptr();
920     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
921     return fp;
922 }
923 
924 static TCGv_ptr gen_fp_result_ptr(void)
925 {
926     TCGv_ptr fp = tcg_temp_new_ptr();
927     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
928     return fp;
929 }
930 
931 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
932 {
933     TCGv t32;
934     TCGv_i64 t64;
935 
936     t32 = tcg_temp_new();
937     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
938     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
939 
940     t64 = tcg_temp_new_i64();
941     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
942     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
943 }
944 
945 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
946                         int index)
947 {
948     TCGv tmp;
949     TCGv_i64 t64;
950 
951     t64 = tcg_temp_new_i64();
952     tmp = tcg_temp_new();
953     switch (opsize) {
954     case OS_BYTE:
955     case OS_WORD:
956     case OS_LONG:
957         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
958         gen_helper_exts32(cpu_env, fp, tmp);
959         break;
960     case OS_SINGLE:
961         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
962         gen_helper_extf32(cpu_env, fp, tmp);
963         break;
964     case OS_DOUBLE:
965         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
966         gen_helper_extf64(cpu_env, fp, t64);
967         break;
968     case OS_EXTENDED:
969         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
970             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
971             break;
972         }
973         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
974         tcg_gen_shri_i32(tmp, tmp, 16);
975         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
976         tcg_gen_addi_i32(tmp, addr, 4);
977         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
978         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
979         break;
980     case OS_PACKED:
981         /*
982          * unimplemented data type on 68040/ColdFire
983          * FIXME if needed for another FPU
984          */
985         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
986         break;
987     default:
988         g_assert_not_reached();
989     }
990 }
991 
992 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
993                          int index)
994 {
995     TCGv tmp;
996     TCGv_i64 t64;
997 
998     t64 = tcg_temp_new_i64();
999     tmp = tcg_temp_new();
1000     switch (opsize) {
1001     case OS_BYTE:
1002     case OS_WORD:
1003     case OS_LONG:
1004         gen_helper_reds32(tmp, cpu_env, fp);
1005         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1006         break;
1007     case OS_SINGLE:
1008         gen_helper_redf32(tmp, cpu_env, fp);
1009         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1010         break;
1011     case OS_DOUBLE:
1012         gen_helper_redf64(t64, cpu_env, fp);
1013         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1014         break;
1015     case OS_EXTENDED:
1016         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1017             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1018             break;
1019         }
1020         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1021         tcg_gen_shli_i32(tmp, tmp, 16);
1022         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1023         tcg_gen_addi_i32(tmp, addr, 4);
1024         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1025         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1026         break;
1027     case OS_PACKED:
1028         /*
1029          * unimplemented data type on 68040/ColdFire
1030          * FIXME if needed for another FPU
1031          */
1032         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1033         break;
1034     default:
1035         g_assert_not_reached();
1036     }
1037 }
1038 
1039 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1040                         TCGv_ptr fp, ea_what what, int index)
1041 {
1042     if (what == EA_STORE) {
1043         gen_store_fp(s, opsize, addr, fp, index);
1044     } else {
1045         gen_load_fp(s, opsize, addr, fp, index);
1046     }
1047 }
1048 
1049 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1050                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1051                           int index)
1052 {
1053     TCGv reg, addr, tmp;
1054     TCGv_i64 t64;
1055 
1056     switch (mode) {
1057     case 0: /* Data register direct.  */
1058         reg = cpu_dregs[reg0];
1059         if (what == EA_STORE) {
1060             switch (opsize) {
1061             case OS_BYTE:
1062             case OS_WORD:
1063             case OS_LONG:
1064                 gen_helper_reds32(reg, cpu_env, fp);
1065                 break;
1066             case OS_SINGLE:
1067                 gen_helper_redf32(reg, cpu_env, fp);
1068                 break;
1069             default:
1070                 g_assert_not_reached();
1071             }
1072         } else {
1073             tmp = tcg_temp_new();
1074             switch (opsize) {
1075             case OS_BYTE:
1076                 tcg_gen_ext8s_i32(tmp, reg);
1077                 gen_helper_exts32(cpu_env, fp, tmp);
1078                 break;
1079             case OS_WORD:
1080                 tcg_gen_ext16s_i32(tmp, reg);
1081                 gen_helper_exts32(cpu_env, fp, tmp);
1082                 break;
1083             case OS_LONG:
1084                 gen_helper_exts32(cpu_env, fp, reg);
1085                 break;
1086             case OS_SINGLE:
1087                 gen_helper_extf32(cpu_env, fp, reg);
1088                 break;
1089             default:
1090                 g_assert_not_reached();
1091             }
1092         }
1093         return 0;
1094     case 1: /* Address register direct.  */
1095         return -1;
1096     case 2: /* Indirect register */
1097         addr = get_areg(s, reg0);
1098         gen_ldst_fp(s, opsize, addr, fp, what, index);
1099         return 0;
1100     case 3: /* Indirect postincrement.  */
1101         addr = cpu_aregs[reg0];
1102         gen_ldst_fp(s, opsize, addr, fp, what, index);
1103         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1104         return 0;
1105     case 4: /* Indirect predecrememnt.  */
1106         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1107         if (IS_NULL_QREG(addr)) {
1108             return -1;
1109         }
1110         gen_ldst_fp(s, opsize, addr, fp, what, index);
1111         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1112         return 0;
1113     case 5: /* Indirect displacement.  */
1114     case 6: /* Indirect index + displacement.  */
1115     do_indirect:
1116         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1117         if (IS_NULL_QREG(addr)) {
1118             return -1;
1119         }
1120         gen_ldst_fp(s, opsize, addr, fp, what, index);
1121         return 0;
1122     case 7: /* Other */
1123         switch (reg0) {
1124         case 0: /* Absolute short.  */
1125         case 1: /* Absolute long.  */
1126         case 2: /* pc displacement  */
1127         case 3: /* pc index+displacement.  */
1128             goto do_indirect;
1129         case 4: /* Immediate.  */
1130             if (what == EA_STORE) {
1131                 return -1;
1132             }
1133             switch (opsize) {
1134             case OS_BYTE:
1135                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1136                 gen_helper_exts32(cpu_env, fp, tmp);
1137                 break;
1138             case OS_WORD:
1139                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1140                 gen_helper_exts32(cpu_env, fp, tmp);
1141                 break;
1142             case OS_LONG:
1143                 tmp = tcg_constant_i32(read_im32(env, s));
1144                 gen_helper_exts32(cpu_env, fp, tmp);
1145                 break;
1146             case OS_SINGLE:
1147                 tmp = tcg_constant_i32(read_im32(env, s));
1148                 gen_helper_extf32(cpu_env, fp, tmp);
1149                 break;
1150             case OS_DOUBLE:
1151                 t64 = tcg_constant_i64(read_im64(env, s));
1152                 gen_helper_extf64(cpu_env, fp, t64);
1153                 break;
1154             case OS_EXTENDED:
1155                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1156                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1157                     break;
1158                 }
1159                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1160                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1161                 t64 = tcg_constant_i64(read_im64(env, s));
1162                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1163                 break;
1164             case OS_PACKED:
1165                 /*
1166                  * unimplemented data type on 68040/ColdFire
1167                  * FIXME if needed for another FPU
1168                  */
1169                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1170                 break;
1171             default:
1172                 g_assert_not_reached();
1173             }
1174             return 0;
1175         default:
1176             return -1;
1177         }
1178     }
1179     return -1;
1180 }
1181 
1182 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1183                        int opsize, TCGv_ptr fp, ea_what what, int index)
1184 {
1185     int mode = extract32(insn, 3, 3);
1186     int reg0 = REG(insn, 0);
1187     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1188 }
1189 
1190 typedef struct {
1191     TCGCond tcond;
1192     TCGv v1;
1193     TCGv v2;
1194 } DisasCompare;
1195 
1196 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1197 {
1198     TCGv tmp, tmp2;
1199     TCGCond tcond;
1200     CCOp op = s->cc_op;
1201 
1202     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1203     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1204         c->v1 = QREG_CC_N;
1205         c->v2 = QREG_CC_V;
1206         switch (cond) {
1207         case 2: /* HI */
1208         case 3: /* LS */
1209             tcond = TCG_COND_LEU;
1210             goto done;
1211         case 4: /* CC */
1212         case 5: /* CS */
1213             tcond = TCG_COND_LTU;
1214             goto done;
1215         case 6: /* NE */
1216         case 7: /* EQ */
1217             tcond = TCG_COND_EQ;
1218             goto done;
1219         case 10: /* PL */
1220         case 11: /* MI */
1221             c->v2 = tcg_constant_i32(0);
1222             c->v1 = tmp = tcg_temp_new();
1223             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1224             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1225             /* fallthru */
1226         case 12: /* GE */
1227         case 13: /* LT */
1228             tcond = TCG_COND_LT;
1229             goto done;
1230         case 14: /* GT */
1231         case 15: /* LE */
1232             tcond = TCG_COND_LE;
1233             goto done;
1234         }
1235     }
1236 
1237     c->v2 = tcg_constant_i32(0);
1238 
1239     switch (cond) {
1240     case 0: /* T */
1241     case 1: /* F */
1242         c->v1 = c->v2;
1243         tcond = TCG_COND_NEVER;
1244         goto done;
1245     case 14: /* GT (!(Z || (N ^ V))) */
1246     case 15: /* LE (Z || (N ^ V)) */
1247         /*
1248          * Logic operations clear V, which simplifies LE to (Z || N),
1249          * and since Z and N are co-located, this becomes a normal
1250          * comparison vs N.
1251          */
1252         if (op == CC_OP_LOGIC) {
1253             c->v1 = QREG_CC_N;
1254             tcond = TCG_COND_LE;
1255             goto done;
1256         }
1257         break;
1258     case 12: /* GE (!(N ^ V)) */
1259     case 13: /* LT (N ^ V) */
1260         /* Logic operations clear V, which simplifies this to N.  */
1261         if (op != CC_OP_LOGIC) {
1262             break;
1263         }
1264         /* fallthru */
1265     case 10: /* PL (!N) */
1266     case 11: /* MI (N) */
1267         /* Several cases represent N normally.  */
1268         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1269             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1270             op == CC_OP_LOGIC) {
1271             c->v1 = QREG_CC_N;
1272             tcond = TCG_COND_LT;
1273             goto done;
1274         }
1275         break;
1276     case 6: /* NE (!Z) */
1277     case 7: /* EQ (Z) */
1278         /* Some cases fold Z into N.  */
1279         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1280             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1281             op == CC_OP_LOGIC) {
1282             tcond = TCG_COND_EQ;
1283             c->v1 = QREG_CC_N;
1284             goto done;
1285         }
1286         break;
1287     case 4: /* CC (!C) */
1288     case 5: /* CS (C) */
1289         /* Some cases fold C into X.  */
1290         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1291             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1292             tcond = TCG_COND_NE;
1293             c->v1 = QREG_CC_X;
1294             goto done;
1295         }
1296         /* fallthru */
1297     case 8: /* VC (!V) */
1298     case 9: /* VS (V) */
1299         /* Logic operations clear V and C.  */
1300         if (op == CC_OP_LOGIC) {
1301             tcond = TCG_COND_NEVER;
1302             c->v1 = c->v2;
1303             goto done;
1304         }
1305         break;
1306     }
1307 
1308     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1309     gen_flush_flags(s);
1310 
1311     switch (cond) {
1312     case 0: /* T */
1313     case 1: /* F */
1314     default:
1315         /* Invalid, or handled above.  */
1316         abort();
1317     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1318     case 3: /* LS (C || Z) */
1319         c->v1 = tmp = tcg_temp_new();
1320         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1321         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1322         tcond = TCG_COND_NE;
1323         break;
1324     case 4: /* CC (!C) */
1325     case 5: /* CS (C) */
1326         c->v1 = QREG_CC_C;
1327         tcond = TCG_COND_NE;
1328         break;
1329     case 6: /* NE (!Z) */
1330     case 7: /* EQ (Z) */
1331         c->v1 = QREG_CC_Z;
1332         tcond = TCG_COND_EQ;
1333         break;
1334     case 8: /* VC (!V) */
1335     case 9: /* VS (V) */
1336         c->v1 = QREG_CC_V;
1337         tcond = TCG_COND_LT;
1338         break;
1339     case 10: /* PL (!N) */
1340     case 11: /* MI (N) */
1341         c->v1 = QREG_CC_N;
1342         tcond = TCG_COND_LT;
1343         break;
1344     case 12: /* GE (!(N ^ V)) */
1345     case 13: /* LT (N ^ V) */
1346         c->v1 = tmp = tcg_temp_new();
1347         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1348         tcond = TCG_COND_LT;
1349         break;
1350     case 14: /* GT (!(Z || (N ^ V))) */
1351     case 15: /* LE (Z || (N ^ V)) */
1352         c->v1 = tmp = tcg_temp_new();
1353         tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1354         tmp2 = tcg_temp_new();
1355         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1356         tcg_gen_or_i32(tmp, tmp, tmp2);
1357         tcond = TCG_COND_LT;
1358         break;
1359     }
1360 
1361  done:
1362     if ((cond & 1) == 0) {
1363         tcond = tcg_invert_cond(tcond);
1364     }
1365     c->tcond = tcond;
1366 }
1367 
1368 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1369 {
1370   DisasCompare c;
1371 
1372   gen_cc_cond(&c, s, cond);
1373   update_cc_op(s);
1374   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1375 }
1376 
1377 /* Force a TB lookup after an instruction that changes the CPU state.  */
1378 static void gen_exit_tb(DisasContext *s)
1379 {
1380     update_cc_op(s);
1381     tcg_gen_movi_i32(QREG_PC, s->pc);
1382     s->base.is_jmp = DISAS_EXIT;
1383 }
1384 
1385 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1386         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1387                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1388         if (IS_NULL_QREG(result)) {                                     \
1389             gen_addr_fault(s);                                          \
1390             return;                                                     \
1391         }                                                               \
1392     } while (0)
1393 
1394 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1395         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1396                                 EA_STORE, IS_USER(s));                  \
1397         if (IS_NULL_QREG(ea_result)) {                                  \
1398             gen_addr_fault(s);                                          \
1399             return;                                                     \
1400         }                                                               \
1401     } while (0)
1402 
1403 /* Generate a jump to an immediate address.  */
1404 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1405                        target_ulong src)
1406 {
1407     if (unlikely(s->ss_active)) {
1408         update_cc_op(s);
1409         tcg_gen_movi_i32(QREG_PC, dest);
1410         gen_raise_exception_format2(s, EXCP_TRACE, src);
1411     } else if (translator_use_goto_tb(&s->base, dest)) {
1412         tcg_gen_goto_tb(n);
1413         tcg_gen_movi_i32(QREG_PC, dest);
1414         tcg_gen_exit_tb(s->base.tb, n);
1415     } else {
1416         gen_jmp_im(s, dest);
1417         tcg_gen_exit_tb(NULL, 0);
1418     }
1419     s->base.is_jmp = DISAS_NORETURN;
1420 }
1421 
1422 DISAS_INSN(scc)
1423 {
1424     DisasCompare c;
1425     int cond;
1426     TCGv tmp;
1427 
1428     cond = (insn >> 8) & 0xf;
1429     gen_cc_cond(&c, s, cond);
1430 
1431     tmp = tcg_temp_new();
1432     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
1433 
1434     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1435 }
1436 
1437 DISAS_INSN(dbcc)
1438 {
1439     TCGLabel *l1;
1440     TCGv reg;
1441     TCGv tmp;
1442     int16_t offset;
1443     uint32_t base;
1444 
1445     reg = DREG(insn, 0);
1446     base = s->pc;
1447     offset = (int16_t)read_im16(env, s);
1448     l1 = gen_new_label();
1449     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1450 
1451     tmp = tcg_temp_new();
1452     tcg_gen_ext16s_i32(tmp, reg);
1453     tcg_gen_addi_i32(tmp, tmp, -1);
1454     gen_partset_reg(OS_WORD, reg, tmp);
1455     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1456     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1457     gen_set_label(l1);
1458     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1459 }
1460 
1461 DISAS_INSN(undef_mac)
1462 {
1463     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1464 }
1465 
1466 DISAS_INSN(undef_fpu)
1467 {
1468     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1469 }
1470 
1471 DISAS_INSN(undef)
1472 {
1473     /*
1474      * ??? This is both instructions that are as yet unimplemented
1475      * for the 680x0 series, as well as those that are implemented
1476      * but actually illegal for CPU32 or pre-68020.
1477      */
1478     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1479                   insn, s->base.pc_next);
1480     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1481 }
1482 
1483 DISAS_INSN(mulw)
1484 {
1485     TCGv reg;
1486     TCGv tmp;
1487     TCGv src;
1488     int sign;
1489 
1490     sign = (insn & 0x100) != 0;
1491     reg = DREG(insn, 9);
1492     tmp = tcg_temp_new();
1493     if (sign)
1494         tcg_gen_ext16s_i32(tmp, reg);
1495     else
1496         tcg_gen_ext16u_i32(tmp, reg);
1497     SRC_EA(env, src, OS_WORD, sign, NULL);
1498     tcg_gen_mul_i32(tmp, tmp, src);
1499     tcg_gen_mov_i32(reg, tmp);
1500     gen_logic_cc(s, tmp, OS_LONG);
1501 }
1502 
1503 DISAS_INSN(divw)
1504 {
1505     int sign;
1506     TCGv src;
1507     TCGv destr;
1508     TCGv ilen;
1509 
1510     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1511 
1512     sign = (insn & 0x100) != 0;
1513 
1514     /* dest.l / src.w */
1515 
1516     SRC_EA(env, src, OS_WORD, sign, NULL);
1517     destr = tcg_constant_i32(REG(insn, 9));
1518     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1519     if (sign) {
1520         gen_helper_divsw(cpu_env, destr, src, ilen);
1521     } else {
1522         gen_helper_divuw(cpu_env, destr, src, ilen);
1523     }
1524 
1525     set_cc_op(s, CC_OP_FLAGS);
1526 }
1527 
1528 DISAS_INSN(divl)
1529 {
1530     TCGv num, reg, den, ilen;
1531     int sign;
1532     uint16_t ext;
1533 
1534     ext = read_im16(env, s);
1535 
1536     sign = (ext & 0x0800) != 0;
1537 
1538     if (ext & 0x400) {
1539         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1540             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1541             return;
1542         }
1543 
1544         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1545 
1546         SRC_EA(env, den, OS_LONG, 0, NULL);
1547         num = tcg_constant_i32(REG(ext, 12));
1548         reg = tcg_constant_i32(REG(ext, 0));
1549         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1550         if (sign) {
1551             gen_helper_divsll(cpu_env, num, reg, den, ilen);
1552         } else {
1553             gen_helper_divull(cpu_env, num, reg, den, ilen);
1554         }
1555         set_cc_op(s, CC_OP_FLAGS);
1556         return;
1557     }
1558 
1559     /* divX.l <EA>, Dq        32/32 -> 32q     */
1560     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1561 
1562     SRC_EA(env, den, OS_LONG, 0, NULL);
1563     num = tcg_constant_i32(REG(ext, 12));
1564     reg = tcg_constant_i32(REG(ext, 0));
1565     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1566     if (sign) {
1567         gen_helper_divsl(cpu_env, num, reg, den, ilen);
1568     } else {
1569         gen_helper_divul(cpu_env, num, reg, den, ilen);
1570     }
1571 
1572     set_cc_op(s, CC_OP_FLAGS);
1573 }
1574 
1575 static void bcd_add(TCGv dest, TCGv src)
1576 {
1577     TCGv t0, t1;
1578 
1579     /*
1580      * dest10 = dest10 + src10 + X
1581      *
1582      *        t1 = src
1583      *        t2 = t1 + 0x066
1584      *        t3 = t2 + dest + X
1585      *        t4 = t2 ^ dest
1586      *        t5 = t3 ^ t4
1587      *        t6 = ~t5 & 0x110
1588      *        t7 = (t6 >> 2) | (t6 >> 3)
1589      *        return t3 - t7
1590      */
1591 
1592     /*
1593      * t1 = (src + 0x066) + dest + X
1594      *    = result with some possible exceeding 0x6
1595      */
1596 
1597     t0 = tcg_temp_new();
1598     tcg_gen_addi_i32(t0, src, 0x066);
1599 
1600     t1 = tcg_temp_new();
1601     tcg_gen_add_i32(t1, t0, dest);
1602     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1603 
1604     /* we will remove exceeding 0x6 where there is no carry */
1605 
1606     /*
1607      * t0 = (src + 0x0066) ^ dest
1608      *    = t1 without carries
1609      */
1610 
1611     tcg_gen_xor_i32(t0, t0, dest);
1612 
1613     /*
1614      * extract the carries
1615      * t0 = t0 ^ t1
1616      *    = only the carries
1617      */
1618 
1619     tcg_gen_xor_i32(t0, t0, t1);
1620 
1621     /*
1622      * generate 0x1 where there is no carry
1623      * and for each 0x10, generate a 0x6
1624      */
1625 
1626     tcg_gen_shri_i32(t0, t0, 3);
1627     tcg_gen_not_i32(t0, t0);
1628     tcg_gen_andi_i32(t0, t0, 0x22);
1629     tcg_gen_add_i32(dest, t0, t0);
1630     tcg_gen_add_i32(dest, dest, t0);
1631 
1632     /*
1633      * remove the exceeding 0x6
1634      * for digits that have not generated a carry
1635      */
1636 
1637     tcg_gen_sub_i32(dest, t1, dest);
1638 }
1639 
1640 static void bcd_sub(TCGv dest, TCGv src)
1641 {
1642     TCGv t0, t1, t2;
1643 
1644     /*
1645      *  dest10 = dest10 - src10 - X
1646      *         = bcd_add(dest + 1 - X, 0x199 - src)
1647      */
1648 
1649     /* t0 = 0x066 + (0x199 - src) */
1650 
1651     t0 = tcg_temp_new();
1652     tcg_gen_subfi_i32(t0, 0x1ff, src);
1653 
1654     /* t1 = t0 + dest + 1 - X*/
1655 
1656     t1 = tcg_temp_new();
1657     tcg_gen_add_i32(t1, t0, dest);
1658     tcg_gen_addi_i32(t1, t1, 1);
1659     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1660 
1661     /* t2 = t0 ^ dest */
1662 
1663     t2 = tcg_temp_new();
1664     tcg_gen_xor_i32(t2, t0, dest);
1665 
1666     /* t0 = t1 ^ t2 */
1667 
1668     tcg_gen_xor_i32(t0, t1, t2);
1669 
1670     /*
1671      * t2 = ~t0 & 0x110
1672      * t0 = (t2 >> 2) | (t2 >> 3)
1673      *
1674      * to fit on 8bit operands, changed in:
1675      *
1676      * t2 = ~(t0 >> 3) & 0x22
1677      * t0 = t2 + t2
1678      * t0 = t0 + t2
1679      */
1680 
1681     tcg_gen_shri_i32(t2, t0, 3);
1682     tcg_gen_not_i32(t2, t2);
1683     tcg_gen_andi_i32(t2, t2, 0x22);
1684     tcg_gen_add_i32(t0, t2, t2);
1685     tcg_gen_add_i32(t0, t0, t2);
1686 
1687     /* return t1 - t0 */
1688 
1689     tcg_gen_sub_i32(dest, t1, t0);
1690 }
1691 
1692 static void bcd_flags(TCGv val)
1693 {
1694     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1695     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1696 
1697     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1698 
1699     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1700 }
1701 
1702 DISAS_INSN(abcd_reg)
1703 {
1704     TCGv src;
1705     TCGv dest;
1706 
1707     gen_flush_flags(s); /* !Z is sticky */
1708 
1709     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1710     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1711     bcd_add(dest, src);
1712     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1713 
1714     bcd_flags(dest);
1715 }
1716 
1717 DISAS_INSN(abcd_mem)
1718 {
1719     TCGv src, dest, addr;
1720 
1721     gen_flush_flags(s); /* !Z is sticky */
1722 
1723     /* Indirect pre-decrement load (mode 4) */
1724 
1725     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1726                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1727     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1728                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1729 
1730     bcd_add(dest, src);
1731 
1732     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1733                 EA_STORE, IS_USER(s));
1734 
1735     bcd_flags(dest);
1736 }
1737 
1738 DISAS_INSN(sbcd_reg)
1739 {
1740     TCGv src, dest;
1741 
1742     gen_flush_flags(s); /* !Z is sticky */
1743 
1744     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1745     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1746 
1747     bcd_sub(dest, src);
1748 
1749     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1750 
1751     bcd_flags(dest);
1752 }
1753 
1754 DISAS_INSN(sbcd_mem)
1755 {
1756     TCGv src, dest, addr;
1757 
1758     gen_flush_flags(s); /* !Z is sticky */
1759 
1760     /* Indirect pre-decrement load (mode 4) */
1761 
1762     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1763                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1764     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1765                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1766 
1767     bcd_sub(dest, src);
1768 
1769     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1770                 EA_STORE, IS_USER(s));
1771 
1772     bcd_flags(dest);
1773 }
1774 
1775 DISAS_INSN(nbcd)
1776 {
1777     TCGv src, dest;
1778     TCGv addr;
1779 
1780     gen_flush_flags(s); /* !Z is sticky */
1781 
1782     SRC_EA(env, src, OS_BYTE, 0, &addr);
1783 
1784     dest = tcg_temp_new();
1785     tcg_gen_movi_i32(dest, 0);
1786     bcd_sub(dest, src);
1787 
1788     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1789 
1790     bcd_flags(dest);
1791 }
1792 
1793 DISAS_INSN(addsub)
1794 {
1795     TCGv reg;
1796     TCGv dest;
1797     TCGv src;
1798     TCGv tmp;
1799     TCGv addr;
1800     int add;
1801     int opsize;
1802 
1803     add = (insn & 0x4000) != 0;
1804     opsize = insn_opsize(insn);
1805     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1806     dest = tcg_temp_new();
1807     if (insn & 0x100) {
1808         SRC_EA(env, tmp, opsize, 1, &addr);
1809         src = reg;
1810     } else {
1811         tmp = reg;
1812         SRC_EA(env, src, opsize, 1, NULL);
1813     }
1814     if (add) {
1815         tcg_gen_add_i32(dest, tmp, src);
1816         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1817         set_cc_op(s, CC_OP_ADDB + opsize);
1818     } else {
1819         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1820         tcg_gen_sub_i32(dest, tmp, src);
1821         set_cc_op(s, CC_OP_SUBB + opsize);
1822     }
1823     gen_update_cc_add(dest, src, opsize);
1824     if (insn & 0x100) {
1825         DEST_EA(env, insn, opsize, dest, &addr);
1826     } else {
1827         gen_partset_reg(opsize, DREG(insn, 9), dest);
1828     }
1829 }
1830 
1831 /* Reverse the order of the bits in REG.  */
1832 DISAS_INSN(bitrev)
1833 {
1834     TCGv reg;
1835     reg = DREG(insn, 0);
1836     gen_helper_bitrev(reg, reg);
1837 }
1838 
1839 DISAS_INSN(bitop_reg)
1840 {
1841     int opsize;
1842     int op;
1843     TCGv src1;
1844     TCGv src2;
1845     TCGv tmp;
1846     TCGv addr;
1847     TCGv dest;
1848 
1849     if ((insn & 0x38) != 0)
1850         opsize = OS_BYTE;
1851     else
1852         opsize = OS_LONG;
1853     op = (insn >> 6) & 3;
1854     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1855 
1856     gen_flush_flags(s);
1857     src2 = tcg_temp_new();
1858     if (opsize == OS_BYTE)
1859         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1860     else
1861         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1862 
1863     tmp = tcg_temp_new();
1864     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1865 
1866     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1867 
1868     dest = tcg_temp_new();
1869     switch (op) {
1870     case 1: /* bchg */
1871         tcg_gen_xor_i32(dest, src1, tmp);
1872         break;
1873     case 2: /* bclr */
1874         tcg_gen_andc_i32(dest, src1, tmp);
1875         break;
1876     case 3: /* bset */
1877         tcg_gen_or_i32(dest, src1, tmp);
1878         break;
1879     default: /* btst */
1880         break;
1881     }
1882     if (op) {
1883         DEST_EA(env, insn, opsize, dest, &addr);
1884     }
1885 }
1886 
1887 DISAS_INSN(sats)
1888 {
1889     TCGv reg;
1890     reg = DREG(insn, 0);
1891     gen_flush_flags(s);
1892     gen_helper_sats(reg, reg, QREG_CC_V);
1893     gen_logic_cc(s, reg, OS_LONG);
1894 }
1895 
1896 static void gen_push(DisasContext *s, TCGv val)
1897 {
1898     TCGv tmp;
1899 
1900     tmp = tcg_temp_new();
1901     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1902     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1903     tcg_gen_mov_i32(QREG_SP, tmp);
1904 }
1905 
1906 static TCGv mreg(int reg)
1907 {
1908     if (reg < 8) {
1909         /* Dx */
1910         return cpu_dregs[reg];
1911     }
1912     /* Ax */
1913     return cpu_aregs[reg & 7];
1914 }
1915 
1916 DISAS_INSN(movem)
1917 {
1918     TCGv addr, incr, tmp, r[16];
1919     int is_load = (insn & 0x0400) != 0;
1920     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1921     uint16_t mask = read_im16(env, s);
1922     int mode = extract32(insn, 3, 3);
1923     int reg0 = REG(insn, 0);
1924     int i;
1925 
1926     tmp = cpu_aregs[reg0];
1927 
1928     switch (mode) {
1929     case 0: /* data register direct */
1930     case 1: /* addr register direct */
1931     do_addr_fault:
1932         gen_addr_fault(s);
1933         return;
1934 
1935     case 2: /* indirect */
1936         break;
1937 
1938     case 3: /* indirect post-increment */
1939         if (!is_load) {
1940             /* post-increment is not allowed */
1941             goto do_addr_fault;
1942         }
1943         break;
1944 
1945     case 4: /* indirect pre-decrement */
1946         if (is_load) {
1947             /* pre-decrement is not allowed */
1948             goto do_addr_fault;
1949         }
1950         /*
1951          * We want a bare copy of the address reg, without any pre-decrement
1952          * adjustment, as gen_lea would provide.
1953          */
1954         break;
1955 
1956     default:
1957         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1958         if (IS_NULL_QREG(tmp)) {
1959             goto do_addr_fault;
1960         }
1961         break;
1962     }
1963 
1964     addr = tcg_temp_new();
1965     tcg_gen_mov_i32(addr, tmp);
1966     incr = tcg_constant_i32(opsize_bytes(opsize));
1967 
1968     if (is_load) {
1969         /* memory to register */
1970         for (i = 0; i < 16; i++) {
1971             if (mask & (1 << i)) {
1972                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1973                 tcg_gen_add_i32(addr, addr, incr);
1974             }
1975         }
1976         for (i = 0; i < 16; i++) {
1977             if (mask & (1 << i)) {
1978                 tcg_gen_mov_i32(mreg(i), r[i]);
1979             }
1980         }
1981         if (mode == 3) {
1982             /* post-increment: movem (An)+,X */
1983             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1984         }
1985     } else {
1986         /* register to memory */
1987         if (mode == 4) {
1988             /* pre-decrement: movem X,-(An) */
1989             for (i = 15; i >= 0; i--) {
1990                 if ((mask << i) & 0x8000) {
1991                     tcg_gen_sub_i32(addr, addr, incr);
1992                     if (reg0 + 8 == i &&
1993                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1994                         /*
1995                          * M68020+: if the addressing register is the
1996                          * register moved to memory, the value written
1997                          * is the initial value decremented by the size of
1998                          * the operation, regardless of how many actual
1999                          * stores have been performed until this point.
2000                          * M68000/M68010: the value is the initial value.
2001                          */
2002                         tmp = tcg_temp_new();
2003                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2004                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2005                     } else {
2006                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2007                     }
2008                 }
2009             }
2010             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2011         } else {
2012             for (i = 0; i < 16; i++) {
2013                 if (mask & (1 << i)) {
2014                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2015                     tcg_gen_add_i32(addr, addr, incr);
2016                 }
2017             }
2018         }
2019     }
2020 }
2021 
2022 DISAS_INSN(movep)
2023 {
2024     uint8_t i;
2025     int16_t displ;
2026     TCGv reg;
2027     TCGv addr;
2028     TCGv abuf;
2029     TCGv dbuf;
2030 
2031     displ = read_im16(env, s);
2032 
2033     addr = AREG(insn, 0);
2034     reg = DREG(insn, 9);
2035 
2036     abuf = tcg_temp_new();
2037     tcg_gen_addi_i32(abuf, addr, displ);
2038     dbuf = tcg_temp_new();
2039 
2040     if (insn & 0x40) {
2041         i = 4;
2042     } else {
2043         i = 2;
2044     }
2045 
2046     if (insn & 0x80) {
2047         for ( ; i > 0 ; i--) {
2048             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2049             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2050             if (i > 1) {
2051                 tcg_gen_addi_i32(abuf, abuf, 2);
2052             }
2053         }
2054     } else {
2055         for ( ; i > 0 ; i--) {
2056             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2057             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2058             if (i > 1) {
2059                 tcg_gen_addi_i32(abuf, abuf, 2);
2060             }
2061         }
2062     }
2063 }
2064 
2065 DISAS_INSN(bitop_im)
2066 {
2067     int opsize;
2068     int op;
2069     TCGv src1;
2070     uint32_t mask;
2071     int bitnum;
2072     TCGv tmp;
2073     TCGv addr;
2074 
2075     if ((insn & 0x38) != 0)
2076         opsize = OS_BYTE;
2077     else
2078         opsize = OS_LONG;
2079     op = (insn >> 6) & 3;
2080 
2081     bitnum = read_im16(env, s);
2082     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2083         if (bitnum & 0xfe00) {
2084             disas_undef(env, s, insn);
2085             return;
2086         }
2087     } else {
2088         if (bitnum & 0xff00) {
2089             disas_undef(env, s, insn);
2090             return;
2091         }
2092     }
2093 
2094     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2095 
2096     gen_flush_flags(s);
2097     if (opsize == OS_BYTE)
2098         bitnum &= 7;
2099     else
2100         bitnum &= 31;
2101     mask = 1 << bitnum;
2102 
2103    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2104 
2105     if (op) {
2106         tmp = tcg_temp_new();
2107         switch (op) {
2108         case 1: /* bchg */
2109             tcg_gen_xori_i32(tmp, src1, mask);
2110             break;
2111         case 2: /* bclr */
2112             tcg_gen_andi_i32(tmp, src1, ~mask);
2113             break;
2114         case 3: /* bset */
2115             tcg_gen_ori_i32(tmp, src1, mask);
2116             break;
2117         default: /* btst */
2118             break;
2119         }
2120         DEST_EA(env, insn, opsize, tmp, &addr);
2121     }
2122 }
2123 
2124 static TCGv gen_get_ccr(DisasContext *s)
2125 {
2126     TCGv dest;
2127 
2128     update_cc_op(s);
2129     dest = tcg_temp_new();
2130     gen_helper_get_ccr(dest, cpu_env);
2131     return dest;
2132 }
2133 
2134 static TCGv gen_get_sr(DisasContext *s)
2135 {
2136     TCGv ccr;
2137     TCGv sr;
2138 
2139     ccr = gen_get_ccr(s);
2140     sr = tcg_temp_new();
2141     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2142     tcg_gen_or_i32(sr, sr, ccr);
2143     return sr;
2144 }
2145 
2146 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2147 {
2148     if (ccr_only) {
2149         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2150         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2151         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2152         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2153         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2154     } else {
2155         /* Must writeback before changing security state. */
2156         do_writebacks(s);
2157         gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2158     }
2159     set_cc_op(s, CC_OP_FLAGS);
2160 }
2161 
2162 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2163 {
2164     if (ccr_only) {
2165         gen_helper_set_ccr(cpu_env, val);
2166     } else {
2167         /* Must writeback before changing security state. */
2168         do_writebacks(s);
2169         gen_helper_set_sr(cpu_env, val);
2170     }
2171     set_cc_op(s, CC_OP_FLAGS);
2172 }
2173 
2174 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2175                            bool ccr_only)
2176 {
2177     if ((insn & 0x3f) == 0x3c) {
2178         uint16_t val;
2179         val = read_im16(env, s);
2180         gen_set_sr_im(s, val, ccr_only);
2181     } else {
2182         TCGv src;
2183         SRC_EA(env, src, OS_WORD, 0, NULL);
2184         gen_set_sr(s, src, ccr_only);
2185     }
2186 }
2187 
2188 DISAS_INSN(arith_im)
2189 {
2190     int op;
2191     TCGv im;
2192     TCGv src1;
2193     TCGv dest;
2194     TCGv addr;
2195     int opsize;
2196     bool with_SR = ((insn & 0x3f) == 0x3c);
2197 
2198     op = (insn >> 9) & 7;
2199     opsize = insn_opsize(insn);
2200     switch (opsize) {
2201     case OS_BYTE:
2202         im = tcg_constant_i32((int8_t)read_im8(env, s));
2203         break;
2204     case OS_WORD:
2205         im = tcg_constant_i32((int16_t)read_im16(env, s));
2206         break;
2207     case OS_LONG:
2208         im = tcg_constant_i32(read_im32(env, s));
2209         break;
2210     default:
2211         g_assert_not_reached();
2212     }
2213 
2214     if (with_SR) {
2215         /* SR/CCR can only be used with andi/eori/ori */
2216         if (op == 2 || op == 3 || op == 6) {
2217             disas_undef(env, s, insn);
2218             return;
2219         }
2220         switch (opsize) {
2221         case OS_BYTE:
2222             src1 = gen_get_ccr(s);
2223             break;
2224         case OS_WORD:
2225             if (IS_USER(s)) {
2226                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2227                 return;
2228             }
2229             src1 = gen_get_sr(s);
2230             break;
2231         default:
2232             /* OS_LONG; others already g_assert_not_reached.  */
2233             disas_undef(env, s, insn);
2234             return;
2235         }
2236     } else {
2237         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2238     }
2239     dest = tcg_temp_new();
2240     switch (op) {
2241     case 0: /* ori */
2242         tcg_gen_or_i32(dest, src1, im);
2243         if (with_SR) {
2244             gen_set_sr(s, dest, opsize == OS_BYTE);
2245             gen_exit_tb(s);
2246         } else {
2247             DEST_EA(env, insn, opsize, dest, &addr);
2248             gen_logic_cc(s, dest, opsize);
2249         }
2250         break;
2251     case 1: /* andi */
2252         tcg_gen_and_i32(dest, src1, im);
2253         if (with_SR) {
2254             gen_set_sr(s, dest, opsize == OS_BYTE);
2255             gen_exit_tb(s);
2256         } else {
2257             DEST_EA(env, insn, opsize, dest, &addr);
2258             gen_logic_cc(s, dest, opsize);
2259         }
2260         break;
2261     case 2: /* subi */
2262         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2263         tcg_gen_sub_i32(dest, src1, im);
2264         gen_update_cc_add(dest, im, opsize);
2265         set_cc_op(s, CC_OP_SUBB + opsize);
2266         DEST_EA(env, insn, opsize, dest, &addr);
2267         break;
2268     case 3: /* addi */
2269         tcg_gen_add_i32(dest, src1, im);
2270         gen_update_cc_add(dest, im, opsize);
2271         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2272         set_cc_op(s, CC_OP_ADDB + opsize);
2273         DEST_EA(env, insn, opsize, dest, &addr);
2274         break;
2275     case 5: /* eori */
2276         tcg_gen_xor_i32(dest, src1, im);
2277         if (with_SR) {
2278             gen_set_sr(s, dest, opsize == OS_BYTE);
2279             gen_exit_tb(s);
2280         } else {
2281             DEST_EA(env, insn, opsize, dest, &addr);
2282             gen_logic_cc(s, dest, opsize);
2283         }
2284         break;
2285     case 6: /* cmpi */
2286         gen_update_cc_cmp(s, src1, im, opsize);
2287         break;
2288     default:
2289         abort();
2290     }
2291 }
2292 
2293 DISAS_INSN(cas)
2294 {
2295     int opsize;
2296     TCGv addr;
2297     uint16_t ext;
2298     TCGv load;
2299     TCGv cmp;
2300     MemOp opc;
2301 
2302     switch ((insn >> 9) & 3) {
2303     case 1:
2304         opsize = OS_BYTE;
2305         opc = MO_SB;
2306         break;
2307     case 2:
2308         opsize = OS_WORD;
2309         opc = MO_TESW;
2310         break;
2311     case 3:
2312         opsize = OS_LONG;
2313         opc = MO_TESL;
2314         break;
2315     default:
2316         g_assert_not_reached();
2317     }
2318 
2319     ext = read_im16(env, s);
2320 
2321     /* cas Dc,Du,<EA> */
2322 
2323     addr = gen_lea(env, s, insn, opsize);
2324     if (IS_NULL_QREG(addr)) {
2325         gen_addr_fault(s);
2326         return;
2327     }
2328 
2329     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2330 
2331     /*
2332      * if  <EA> == Dc then
2333      *     <EA> = Du
2334      *     Dc = <EA> (because <EA> == Dc)
2335      * else
2336      *     Dc = <EA>
2337      */
2338 
2339     load = tcg_temp_new();
2340     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2341                                IS_USER(s), opc);
2342     /* update flags before setting cmp to load */
2343     gen_update_cc_cmp(s, load, cmp, opsize);
2344     gen_partset_reg(opsize, DREG(ext, 0), load);
2345 
2346     switch (extract32(insn, 3, 3)) {
2347     case 3: /* Indirect postincrement.  */
2348         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2349         break;
2350     case 4: /* Indirect predecrememnt.  */
2351         tcg_gen_mov_i32(AREG(insn, 0), addr);
2352         break;
2353     }
2354 }
2355 
2356 DISAS_INSN(cas2w)
2357 {
2358     uint16_t ext1, ext2;
2359     TCGv addr1, addr2;
2360 
2361     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2362 
2363     ext1 = read_im16(env, s);
2364 
2365     if (ext1 & 0x8000) {
2366         /* Address Register */
2367         addr1 = AREG(ext1, 12);
2368     } else {
2369         /* Data Register */
2370         addr1 = DREG(ext1, 12);
2371     }
2372 
2373     ext2 = read_im16(env, s);
2374     if (ext2 & 0x8000) {
2375         /* Address Register */
2376         addr2 = AREG(ext2, 12);
2377     } else {
2378         /* Data Register */
2379         addr2 = DREG(ext2, 12);
2380     }
2381 
2382     /*
2383      * if (R1) == Dc1 && (R2) == Dc2 then
2384      *     (R1) = Du1
2385      *     (R2) = Du2
2386      * else
2387      *     Dc1 = (R1)
2388      *     Dc2 = (R2)
2389      */
2390 
2391     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2392         gen_helper_exit_atomic(cpu_env);
2393     } else {
2394         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2395                                      (REG(ext1, 6) << 3) |
2396                                      (REG(ext2, 0) << 6) |
2397                                      (REG(ext1, 0) << 9));
2398         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2399     }
2400 
2401     /* Note that cas2w also assigned to env->cc_op.  */
2402     s->cc_op = CC_OP_CMPW;
2403     s->cc_op_synced = 1;
2404 }
2405 
2406 DISAS_INSN(cas2l)
2407 {
2408     uint16_t ext1, ext2;
2409     TCGv addr1, addr2, regs;
2410 
2411     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2412 
2413     ext1 = read_im16(env, s);
2414 
2415     if (ext1 & 0x8000) {
2416         /* Address Register */
2417         addr1 = AREG(ext1, 12);
2418     } else {
2419         /* Data Register */
2420         addr1 = DREG(ext1, 12);
2421     }
2422 
2423     ext2 = read_im16(env, s);
2424     if (ext2 & 0x8000) {
2425         /* Address Register */
2426         addr2 = AREG(ext2, 12);
2427     } else {
2428         /* Data Register */
2429         addr2 = DREG(ext2, 12);
2430     }
2431 
2432     /*
2433      * if (R1) == Dc1 && (R2) == Dc2 then
2434      *     (R1) = Du1
2435      *     (R2) = Du2
2436      * else
2437      *     Dc1 = (R1)
2438      *     Dc2 = (R2)
2439      */
2440 
2441     regs = tcg_constant_i32(REG(ext2, 6) |
2442                             (REG(ext1, 6) << 3) |
2443                             (REG(ext2, 0) << 6) |
2444                             (REG(ext1, 0) << 9));
2445     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2446         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2447     } else {
2448         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2449     }
2450 
2451     /* Note that cas2l also assigned to env->cc_op.  */
2452     s->cc_op = CC_OP_CMPL;
2453     s->cc_op_synced = 1;
2454 }
2455 
2456 DISAS_INSN(byterev)
2457 {
2458     TCGv reg;
2459 
2460     reg = DREG(insn, 0);
2461     tcg_gen_bswap32_i32(reg, reg);
2462 }
2463 
2464 DISAS_INSN(move)
2465 {
2466     TCGv src;
2467     TCGv dest;
2468     int op;
2469     int opsize;
2470 
2471     switch (insn >> 12) {
2472     case 1: /* move.b */
2473         opsize = OS_BYTE;
2474         break;
2475     case 2: /* move.l */
2476         opsize = OS_LONG;
2477         break;
2478     case 3: /* move.w */
2479         opsize = OS_WORD;
2480         break;
2481     default:
2482         abort();
2483     }
2484     SRC_EA(env, src, opsize, 1, NULL);
2485     op = (insn >> 6) & 7;
2486     if (op == 1) {
2487         /* movea */
2488         /* The value will already have been sign extended.  */
2489         dest = AREG(insn, 9);
2490         tcg_gen_mov_i32(dest, src);
2491     } else {
2492         /* normal move */
2493         uint16_t dest_ea;
2494         dest_ea = ((insn >> 9) & 7) | (op << 3);
2495         DEST_EA(env, dest_ea, opsize, src, NULL);
2496         /* This will be correct because loads sign extend.  */
2497         gen_logic_cc(s, src, opsize);
2498     }
2499 }
2500 
2501 DISAS_INSN(negx)
2502 {
2503     TCGv z;
2504     TCGv src;
2505     TCGv addr;
2506     int opsize;
2507 
2508     opsize = insn_opsize(insn);
2509     SRC_EA(env, src, opsize, 1, &addr);
2510 
2511     gen_flush_flags(s); /* compute old Z */
2512 
2513     /*
2514      * Perform subtract with borrow.
2515      * (X, N) =  -(src + X);
2516      */
2517 
2518     z = tcg_constant_i32(0);
2519     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2520     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2521     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2522 
2523     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2524 
2525     /*
2526      * Compute signed-overflow for negation.  The normal formula for
2527      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2528      * this simplifies to res & src.
2529      */
2530 
2531     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2532 
2533     /* Copy the rest of the results into place.  */
2534     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2535     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2536 
2537     set_cc_op(s, CC_OP_FLAGS);
2538 
2539     /* result is in QREG_CC_N */
2540 
2541     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2542 }
2543 
2544 DISAS_INSN(lea)
2545 {
2546     TCGv reg;
2547     TCGv tmp;
2548 
2549     reg = AREG(insn, 9);
2550     tmp = gen_lea(env, s, insn, OS_LONG);
2551     if (IS_NULL_QREG(tmp)) {
2552         gen_addr_fault(s);
2553         return;
2554     }
2555     tcg_gen_mov_i32(reg, tmp);
2556 }
2557 
2558 DISAS_INSN(clr)
2559 {
2560     int opsize;
2561     TCGv zero;
2562 
2563     zero = tcg_constant_i32(0);
2564     opsize = insn_opsize(insn);
2565     DEST_EA(env, insn, opsize, zero, NULL);
2566     gen_logic_cc(s, zero, opsize);
2567 }
2568 
2569 DISAS_INSN(move_from_ccr)
2570 {
2571     TCGv ccr;
2572 
2573     ccr = gen_get_ccr(s);
2574     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2575 }
2576 
2577 DISAS_INSN(neg)
2578 {
2579     TCGv src1;
2580     TCGv dest;
2581     TCGv addr;
2582     int opsize;
2583 
2584     opsize = insn_opsize(insn);
2585     SRC_EA(env, src1, opsize, 1, &addr);
2586     dest = tcg_temp_new();
2587     tcg_gen_neg_i32(dest, src1);
2588     set_cc_op(s, CC_OP_SUBB + opsize);
2589     gen_update_cc_add(dest, src1, opsize);
2590     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2591     DEST_EA(env, insn, opsize, dest, &addr);
2592 }
2593 
2594 DISAS_INSN(move_to_ccr)
2595 {
2596     gen_move_to_sr(env, s, insn, true);
2597 }
2598 
2599 DISAS_INSN(not)
2600 {
2601     TCGv src1;
2602     TCGv dest;
2603     TCGv addr;
2604     int opsize;
2605 
2606     opsize = insn_opsize(insn);
2607     SRC_EA(env, src1, opsize, 1, &addr);
2608     dest = tcg_temp_new();
2609     tcg_gen_not_i32(dest, src1);
2610     DEST_EA(env, insn, opsize, dest, &addr);
2611     gen_logic_cc(s, dest, opsize);
2612 }
2613 
2614 DISAS_INSN(swap)
2615 {
2616     TCGv src1;
2617     TCGv src2;
2618     TCGv reg;
2619 
2620     src1 = tcg_temp_new();
2621     src2 = tcg_temp_new();
2622     reg = DREG(insn, 0);
2623     tcg_gen_shli_i32(src1, reg, 16);
2624     tcg_gen_shri_i32(src2, reg, 16);
2625     tcg_gen_or_i32(reg, src1, src2);
2626     gen_logic_cc(s, reg, OS_LONG);
2627 }
2628 
2629 DISAS_INSN(bkpt)
2630 {
2631 #if defined(CONFIG_USER_ONLY)
2632     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2633 #else
2634     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2635 #endif
2636 }
2637 
2638 DISAS_INSN(pea)
2639 {
2640     TCGv tmp;
2641 
2642     tmp = gen_lea(env, s, insn, OS_LONG);
2643     if (IS_NULL_QREG(tmp)) {
2644         gen_addr_fault(s);
2645         return;
2646     }
2647     gen_push(s, tmp);
2648 }
2649 
2650 DISAS_INSN(ext)
2651 {
2652     int op;
2653     TCGv reg;
2654     TCGv tmp;
2655 
2656     reg = DREG(insn, 0);
2657     op = (insn >> 6) & 7;
2658     tmp = tcg_temp_new();
2659     if (op == 3)
2660         tcg_gen_ext16s_i32(tmp, reg);
2661     else
2662         tcg_gen_ext8s_i32(tmp, reg);
2663     if (op == 2)
2664         gen_partset_reg(OS_WORD, reg, tmp);
2665     else
2666         tcg_gen_mov_i32(reg, tmp);
2667     gen_logic_cc(s, tmp, OS_LONG);
2668 }
2669 
2670 DISAS_INSN(tst)
2671 {
2672     int opsize;
2673     TCGv tmp;
2674 
2675     opsize = insn_opsize(insn);
2676     SRC_EA(env, tmp, opsize, 1, NULL);
2677     gen_logic_cc(s, tmp, opsize);
2678 }
2679 
2680 DISAS_INSN(pulse)
2681 {
2682   /* Implemented as a NOP.  */
2683 }
2684 
2685 DISAS_INSN(illegal)
2686 {
2687     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2688 }
2689 
2690 DISAS_INSN(tas)
2691 {
2692     int mode = extract32(insn, 3, 3);
2693     int reg0 = REG(insn, 0);
2694 
2695     if (mode == 0) {
2696         /* data register direct */
2697         TCGv dest = cpu_dregs[reg0];
2698         gen_logic_cc(s, dest, OS_BYTE);
2699         tcg_gen_ori_tl(dest, dest, 0x80);
2700     } else {
2701         TCGv src1, addr;
2702 
2703         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2704         if (IS_NULL_QREG(addr)) {
2705             gen_addr_fault(s);
2706             return;
2707         }
2708         src1 = tcg_temp_new();
2709         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2710                                    IS_USER(s), MO_SB);
2711         gen_logic_cc(s, src1, OS_BYTE);
2712 
2713         switch (mode) {
2714         case 3: /* Indirect postincrement.  */
2715             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2716             break;
2717         case 4: /* Indirect predecrememnt.  */
2718             tcg_gen_mov_i32(AREG(insn, 0), addr);
2719             break;
2720         }
2721     }
2722 }
2723 
2724 DISAS_INSN(mull)
2725 {
2726     uint16_t ext;
2727     TCGv src1;
2728     int sign;
2729 
2730     ext = read_im16(env, s);
2731 
2732     sign = ext & 0x800;
2733 
2734     if (ext & 0x400) {
2735         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2736             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2737             return;
2738         }
2739 
2740         SRC_EA(env, src1, OS_LONG, 0, NULL);
2741 
2742         if (sign) {
2743             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2744         } else {
2745             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2746         }
2747         /* if Dl == Dh, 68040 returns low word */
2748         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2749         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2750         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2751 
2752         tcg_gen_movi_i32(QREG_CC_V, 0);
2753         tcg_gen_movi_i32(QREG_CC_C, 0);
2754 
2755         set_cc_op(s, CC_OP_FLAGS);
2756         return;
2757     }
2758     SRC_EA(env, src1, OS_LONG, 0, NULL);
2759     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2760         tcg_gen_movi_i32(QREG_CC_C, 0);
2761         if (sign) {
2762             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2763             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2764             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2765             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2766                                    QREG_CC_V, QREG_CC_Z);
2767         } else {
2768             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2769             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2770             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2771                                    QREG_CC_V, QREG_CC_C);
2772         }
2773         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2774 
2775         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2776 
2777         set_cc_op(s, CC_OP_FLAGS);
2778     } else {
2779         /*
2780          * The upper 32 bits of the product are discarded, so
2781          * muls.l and mulu.l are functionally equivalent.
2782          */
2783         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2784         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2785     }
2786 }
2787 
2788 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2789 {
2790     TCGv reg;
2791     TCGv tmp;
2792 
2793     reg = AREG(insn, 0);
2794     tmp = tcg_temp_new();
2795     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2796     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2797     if ((insn & 7) != 7) {
2798         tcg_gen_mov_i32(reg, tmp);
2799     }
2800     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2801 }
2802 
2803 DISAS_INSN(link)
2804 {
2805     int16_t offset;
2806 
2807     offset = read_im16(env, s);
2808     gen_link(s, insn, offset);
2809 }
2810 
2811 DISAS_INSN(linkl)
2812 {
2813     int32_t offset;
2814 
2815     offset = read_im32(env, s);
2816     gen_link(s, insn, offset);
2817 }
2818 
2819 DISAS_INSN(unlk)
2820 {
2821     TCGv src;
2822     TCGv reg;
2823     TCGv tmp;
2824 
2825     src = tcg_temp_new();
2826     reg = AREG(insn, 0);
2827     tcg_gen_mov_i32(src, reg);
2828     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2829     tcg_gen_mov_i32(reg, tmp);
2830     tcg_gen_addi_i32(QREG_SP, src, 4);
2831 }
2832 
2833 #if !defined(CONFIG_USER_ONLY)
2834 DISAS_INSN(reset)
2835 {
2836     if (IS_USER(s)) {
2837         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2838         return;
2839     }
2840 
2841     gen_helper_reset(cpu_env);
2842 }
2843 #endif
2844 
2845 DISAS_INSN(nop)
2846 {
2847 }
2848 
2849 DISAS_INSN(rtd)
2850 {
2851     TCGv tmp;
2852     int16_t offset = read_im16(env, s);
2853 
2854     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2855     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2856     gen_jmp(s, tmp);
2857 }
2858 
2859 DISAS_INSN(rtr)
2860 {
2861     TCGv tmp;
2862     TCGv ccr;
2863     TCGv sp;
2864 
2865     sp = tcg_temp_new();
2866     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2867     tcg_gen_addi_i32(sp, QREG_SP, 2);
2868     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2869     tcg_gen_addi_i32(QREG_SP, sp, 4);
2870 
2871     gen_set_sr(s, ccr, true);
2872 
2873     gen_jmp(s, tmp);
2874 }
2875 
2876 DISAS_INSN(rts)
2877 {
2878     TCGv tmp;
2879 
2880     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2881     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2882     gen_jmp(s, tmp);
2883 }
2884 
2885 DISAS_INSN(jump)
2886 {
2887     TCGv tmp;
2888 
2889     /*
2890      * Load the target address first to ensure correct exception
2891      * behavior.
2892      */
2893     tmp = gen_lea(env, s, insn, OS_LONG);
2894     if (IS_NULL_QREG(tmp)) {
2895         gen_addr_fault(s);
2896         return;
2897     }
2898     if ((insn & 0x40) == 0) {
2899         /* jsr */
2900         gen_push(s, tcg_constant_i32(s->pc));
2901     }
2902     gen_jmp(s, tmp);
2903 }
2904 
2905 DISAS_INSN(addsubq)
2906 {
2907     TCGv src;
2908     TCGv dest;
2909     TCGv val;
2910     int imm;
2911     TCGv addr;
2912     int opsize;
2913 
2914     if ((insn & 070) == 010) {
2915         /* Operation on address register is always long.  */
2916         opsize = OS_LONG;
2917     } else {
2918         opsize = insn_opsize(insn);
2919     }
2920     SRC_EA(env, src, opsize, 1, &addr);
2921     imm = (insn >> 9) & 7;
2922     if (imm == 0) {
2923         imm = 8;
2924     }
2925     val = tcg_constant_i32(imm);
2926     dest = tcg_temp_new();
2927     tcg_gen_mov_i32(dest, src);
2928     if ((insn & 0x38) == 0x08) {
2929         /*
2930          * Don't update condition codes if the destination is an
2931          * address register.
2932          */
2933         if (insn & 0x0100) {
2934             tcg_gen_sub_i32(dest, dest, val);
2935         } else {
2936             tcg_gen_add_i32(dest, dest, val);
2937         }
2938     } else {
2939         if (insn & 0x0100) {
2940             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2941             tcg_gen_sub_i32(dest, dest, val);
2942             set_cc_op(s, CC_OP_SUBB + opsize);
2943         } else {
2944             tcg_gen_add_i32(dest, dest, val);
2945             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2946             set_cc_op(s, CC_OP_ADDB + opsize);
2947         }
2948         gen_update_cc_add(dest, val, opsize);
2949     }
2950     DEST_EA(env, insn, opsize, dest, &addr);
2951 }
2952 
2953 DISAS_INSN(branch)
2954 {
2955     int32_t offset;
2956     uint32_t base;
2957     int op;
2958 
2959     base = s->pc;
2960     op = (insn >> 8) & 0xf;
2961     offset = (int8_t)insn;
2962     if (offset == 0) {
2963         offset = (int16_t)read_im16(env, s);
2964     } else if (offset == -1) {
2965         offset = read_im32(env, s);
2966     }
2967     if (op == 1) {
2968         /* bsr */
2969         gen_push(s, tcg_constant_i32(s->pc));
2970     }
2971     if (op > 1) {
2972         /* Bcc */
2973         TCGLabel *l1 = gen_new_label();
2974         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2975         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2976         gen_set_label(l1);
2977         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2978     } else {
2979         /* Unconditional branch.  */
2980         update_cc_op(s);
2981         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2982     }
2983 }
2984 
2985 DISAS_INSN(moveq)
2986 {
2987     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2988     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2989 }
2990 
2991 DISAS_INSN(mvzs)
2992 {
2993     int opsize;
2994     TCGv src;
2995     TCGv reg;
2996 
2997     if (insn & 0x40)
2998         opsize = OS_WORD;
2999     else
3000         opsize = OS_BYTE;
3001     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3002     reg = DREG(insn, 9);
3003     tcg_gen_mov_i32(reg, src);
3004     gen_logic_cc(s, src, opsize);
3005 }
3006 
3007 DISAS_INSN(or)
3008 {
3009     TCGv reg;
3010     TCGv dest;
3011     TCGv src;
3012     TCGv addr;
3013     int opsize;
3014 
3015     opsize = insn_opsize(insn);
3016     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3017     dest = tcg_temp_new();
3018     if (insn & 0x100) {
3019         SRC_EA(env, src, opsize, 0, &addr);
3020         tcg_gen_or_i32(dest, src, reg);
3021         DEST_EA(env, insn, opsize, dest, &addr);
3022     } else {
3023         SRC_EA(env, src, opsize, 0, NULL);
3024         tcg_gen_or_i32(dest, src, reg);
3025         gen_partset_reg(opsize, DREG(insn, 9), dest);
3026     }
3027     gen_logic_cc(s, dest, opsize);
3028 }
3029 
3030 DISAS_INSN(suba)
3031 {
3032     TCGv src;
3033     TCGv reg;
3034 
3035     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3036     reg = AREG(insn, 9);
3037     tcg_gen_sub_i32(reg, reg, src);
3038 }
3039 
3040 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3041 {
3042     TCGv tmp, zero;
3043 
3044     gen_flush_flags(s); /* compute old Z */
3045 
3046     /*
3047      * Perform subtract with borrow.
3048      * (X, N) = dest - (src + X);
3049      */
3050 
3051     zero = tcg_constant_i32(0);
3052     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3053     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3054     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3055     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3056 
3057     /* Compute signed-overflow for subtract.  */
3058 
3059     tmp = tcg_temp_new();
3060     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3061     tcg_gen_xor_i32(tmp, dest, src);
3062     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3063 
3064     /* Copy the rest of the results into place.  */
3065     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3066     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3067 
3068     set_cc_op(s, CC_OP_FLAGS);
3069 
3070     /* result is in QREG_CC_N */
3071 }
3072 
3073 DISAS_INSN(subx_reg)
3074 {
3075     TCGv dest;
3076     TCGv src;
3077     int opsize;
3078 
3079     opsize = insn_opsize(insn);
3080 
3081     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3082     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3083 
3084     gen_subx(s, src, dest, opsize);
3085 
3086     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3087 }
3088 
3089 DISAS_INSN(subx_mem)
3090 {
3091     TCGv src;
3092     TCGv addr_src;
3093     TCGv dest;
3094     TCGv addr_dest;
3095     int opsize;
3096 
3097     opsize = insn_opsize(insn);
3098 
3099     addr_src = AREG(insn, 0);
3100     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3101     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3102 
3103     addr_dest = AREG(insn, 9);
3104     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3105     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3106 
3107     gen_subx(s, src, dest, opsize);
3108 
3109     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3110 }
3111 
3112 DISAS_INSN(mov3q)
3113 {
3114     TCGv src;
3115     int val;
3116 
3117     val = (insn >> 9) & 7;
3118     if (val == 0) {
3119         val = -1;
3120     }
3121     src = tcg_constant_i32(val);
3122     gen_logic_cc(s, src, OS_LONG);
3123     DEST_EA(env, insn, OS_LONG, src, NULL);
3124 }
3125 
3126 DISAS_INSN(cmp)
3127 {
3128     TCGv src;
3129     TCGv reg;
3130     int opsize;
3131 
3132     opsize = insn_opsize(insn);
3133     SRC_EA(env, src, opsize, 1, NULL);
3134     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3135     gen_update_cc_cmp(s, reg, src, opsize);
3136 }
3137 
3138 DISAS_INSN(cmpa)
3139 {
3140     int opsize;
3141     TCGv src;
3142     TCGv reg;
3143 
3144     if (insn & 0x100) {
3145         opsize = OS_LONG;
3146     } else {
3147         opsize = OS_WORD;
3148     }
3149     SRC_EA(env, src, opsize, 1, NULL);
3150     reg = AREG(insn, 9);
3151     gen_update_cc_cmp(s, reg, src, OS_LONG);
3152 }
3153 
3154 DISAS_INSN(cmpm)
3155 {
3156     int opsize = insn_opsize(insn);
3157     TCGv src, dst;
3158 
3159     /* Post-increment load (mode 3) from Ay.  */
3160     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3161                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3162     /* Post-increment load (mode 3) from Ax.  */
3163     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3164                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3165 
3166     gen_update_cc_cmp(s, dst, src, opsize);
3167 }
3168 
3169 DISAS_INSN(eor)
3170 {
3171     TCGv src;
3172     TCGv dest;
3173     TCGv addr;
3174     int opsize;
3175 
3176     opsize = insn_opsize(insn);
3177 
3178     SRC_EA(env, src, opsize, 0, &addr);
3179     dest = tcg_temp_new();
3180     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3181     gen_logic_cc(s, dest, opsize);
3182     DEST_EA(env, insn, opsize, dest, &addr);
3183 }
3184 
3185 static void do_exg(TCGv reg1, TCGv reg2)
3186 {
3187     TCGv temp = tcg_temp_new();
3188     tcg_gen_mov_i32(temp, reg1);
3189     tcg_gen_mov_i32(reg1, reg2);
3190     tcg_gen_mov_i32(reg2, temp);
3191 }
3192 
3193 DISAS_INSN(exg_dd)
3194 {
3195     /* exchange Dx and Dy */
3196     do_exg(DREG(insn, 9), DREG(insn, 0));
3197 }
3198 
3199 DISAS_INSN(exg_aa)
3200 {
3201     /* exchange Ax and Ay */
3202     do_exg(AREG(insn, 9), AREG(insn, 0));
3203 }
3204 
3205 DISAS_INSN(exg_da)
3206 {
3207     /* exchange Dx and Ay */
3208     do_exg(DREG(insn, 9), AREG(insn, 0));
3209 }
3210 
3211 DISAS_INSN(and)
3212 {
3213     TCGv src;
3214     TCGv reg;
3215     TCGv dest;
3216     TCGv addr;
3217     int opsize;
3218 
3219     dest = tcg_temp_new();
3220 
3221     opsize = insn_opsize(insn);
3222     reg = DREG(insn, 9);
3223     if (insn & 0x100) {
3224         SRC_EA(env, src, opsize, 0, &addr);
3225         tcg_gen_and_i32(dest, src, reg);
3226         DEST_EA(env, insn, opsize, dest, &addr);
3227     } else {
3228         SRC_EA(env, src, opsize, 0, NULL);
3229         tcg_gen_and_i32(dest, src, reg);
3230         gen_partset_reg(opsize, reg, dest);
3231     }
3232     gen_logic_cc(s, dest, opsize);
3233 }
3234 
3235 DISAS_INSN(adda)
3236 {
3237     TCGv src;
3238     TCGv reg;
3239 
3240     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3241     reg = AREG(insn, 9);
3242     tcg_gen_add_i32(reg, reg, src);
3243 }
3244 
3245 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3246 {
3247     TCGv tmp, zero;
3248 
3249     gen_flush_flags(s); /* compute old Z */
3250 
3251     /*
3252      * Perform addition with carry.
3253      * (X, N) = src + dest + X;
3254      */
3255 
3256     zero = tcg_constant_i32(0);
3257     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3258     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3259     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3260 
3261     /* Compute signed-overflow for addition.  */
3262 
3263     tmp = tcg_temp_new();
3264     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3265     tcg_gen_xor_i32(tmp, dest, src);
3266     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3267 
3268     /* Copy the rest of the results into place.  */
3269     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3270     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3271 
3272     set_cc_op(s, CC_OP_FLAGS);
3273 
3274     /* result is in QREG_CC_N */
3275 }
3276 
3277 DISAS_INSN(addx_reg)
3278 {
3279     TCGv dest;
3280     TCGv src;
3281     int opsize;
3282 
3283     opsize = insn_opsize(insn);
3284 
3285     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3286     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3287 
3288     gen_addx(s, src, dest, opsize);
3289 
3290     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3291 }
3292 
3293 DISAS_INSN(addx_mem)
3294 {
3295     TCGv src;
3296     TCGv addr_src;
3297     TCGv dest;
3298     TCGv addr_dest;
3299     int opsize;
3300 
3301     opsize = insn_opsize(insn);
3302 
3303     addr_src = AREG(insn, 0);
3304     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3305     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3306 
3307     addr_dest = AREG(insn, 9);
3308     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3309     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3310 
3311     gen_addx(s, src, dest, opsize);
3312 
3313     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3314 }
3315 
3316 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3317 {
3318     int count = (insn >> 9) & 7;
3319     int logical = insn & 8;
3320     int left = insn & 0x100;
3321     int bits = opsize_bytes(opsize) * 8;
3322     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3323 
3324     if (count == 0) {
3325         count = 8;
3326     }
3327 
3328     tcg_gen_movi_i32(QREG_CC_V, 0);
3329     if (left) {
3330         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3331         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3332 
3333         /*
3334          * Note that ColdFire always clears V (done above),
3335          * while M68000 sets if the most significant bit is changed at
3336          * any time during the shift operation.
3337          */
3338         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3339             /* if shift count >= bits, V is (reg != 0) */
3340             if (count >= bits) {
3341                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3342             } else {
3343                 TCGv t0 = tcg_temp_new();
3344                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3345                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3346                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3347             }
3348         }
3349     } else {
3350         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3351         if (logical) {
3352             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3353         } else {
3354             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3355         }
3356     }
3357 
3358     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3359     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3360     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3361     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3362 
3363     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3364     set_cc_op(s, CC_OP_FLAGS);
3365 }
3366 
3367 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3368 {
3369     int logical = insn & 8;
3370     int left = insn & 0x100;
3371     int bits = opsize_bytes(opsize) * 8;
3372     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3373     TCGv s32;
3374     TCGv_i64 t64, s64;
3375 
3376     t64 = tcg_temp_new_i64();
3377     s64 = tcg_temp_new_i64();
3378     s32 = tcg_temp_new();
3379 
3380     /*
3381      * Note that m68k truncates the shift count modulo 64, not 32.
3382      * In addition, a 64-bit shift makes it easy to find "the last
3383      * bit shifted out", for the carry flag.
3384      */
3385     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3386     tcg_gen_extu_i32_i64(s64, s32);
3387     tcg_gen_extu_i32_i64(t64, reg);
3388 
3389     /* Optimistically set V=0.  Also used as a zero source below.  */
3390     tcg_gen_movi_i32(QREG_CC_V, 0);
3391     if (left) {
3392         tcg_gen_shl_i64(t64, t64, s64);
3393 
3394         if (opsize == OS_LONG) {
3395             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3396             /* Note that C=0 if shift count is 0, and we get that for free.  */
3397         } else {
3398             TCGv zero = tcg_constant_i32(0);
3399             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3400             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3401             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3402                                 s32, zero, zero, QREG_CC_C);
3403         }
3404         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3405 
3406         /* X = C, but only if the shift count was non-zero.  */
3407         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3408                             QREG_CC_C, QREG_CC_X);
3409 
3410         /*
3411          * M68000 sets V if the most significant bit is changed at
3412          * any time during the shift operation.  Do this via creating
3413          * an extension of the sign bit, comparing, and discarding
3414          * the bits below the sign bit.  I.e.
3415          *     int64_t s = (intN_t)reg;
3416          *     int64_t t = (int64_t)(intN_t)reg << count;
3417          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3418          */
3419         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3420             TCGv_i64 tt = tcg_constant_i64(32);
3421             /* if shift is greater than 32, use 32 */
3422             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3423             /* Sign extend the input to 64 bits; re-do the shift.  */
3424             tcg_gen_ext_i32_i64(t64, reg);
3425             tcg_gen_shl_i64(s64, t64, s64);
3426             /* Clear all bits that are unchanged.  */
3427             tcg_gen_xor_i64(t64, t64, s64);
3428             /* Ignore the bits below the sign bit.  */
3429             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3430             /* If any bits remain set, we have overflow.  */
3431             tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
3432             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3433         }
3434     } else {
3435         tcg_gen_shli_i64(t64, t64, 32);
3436         if (logical) {
3437             tcg_gen_shr_i64(t64, t64, s64);
3438         } else {
3439             tcg_gen_sar_i64(t64, t64, s64);
3440         }
3441         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3442 
3443         /* Note that C=0 if shift count is 0, and we get that for free.  */
3444         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3445 
3446         /* X = C, but only if the shift count was non-zero.  */
3447         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3448                             QREG_CC_C, QREG_CC_X);
3449     }
3450     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3451     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3452 
3453     /* Write back the result.  */
3454     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3455     set_cc_op(s, CC_OP_FLAGS);
3456 }
3457 
3458 DISAS_INSN(shift8_im)
3459 {
3460     shift_im(s, insn, OS_BYTE);
3461 }
3462 
3463 DISAS_INSN(shift16_im)
3464 {
3465     shift_im(s, insn, OS_WORD);
3466 }
3467 
3468 DISAS_INSN(shift_im)
3469 {
3470     shift_im(s, insn, OS_LONG);
3471 }
3472 
3473 DISAS_INSN(shift8_reg)
3474 {
3475     shift_reg(s, insn, OS_BYTE);
3476 }
3477 
3478 DISAS_INSN(shift16_reg)
3479 {
3480     shift_reg(s, insn, OS_WORD);
3481 }
3482 
3483 DISAS_INSN(shift_reg)
3484 {
3485     shift_reg(s, insn, OS_LONG);
3486 }
3487 
3488 DISAS_INSN(shift_mem)
3489 {
3490     int logical = insn & 8;
3491     int left = insn & 0x100;
3492     TCGv src;
3493     TCGv addr;
3494 
3495     SRC_EA(env, src, OS_WORD, !logical, &addr);
3496     tcg_gen_movi_i32(QREG_CC_V, 0);
3497     if (left) {
3498         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3499         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3500 
3501         /*
3502          * Note that ColdFire always clears V,
3503          * while M68000 sets if the most significant bit is changed at
3504          * any time during the shift operation
3505          */
3506         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3507             src = gen_extend(s, src, OS_WORD, 1);
3508             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3509         }
3510     } else {
3511         tcg_gen_mov_i32(QREG_CC_C, src);
3512         if (logical) {
3513             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3514         } else {
3515             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3516         }
3517     }
3518 
3519     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3520     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3521     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3522     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3523 
3524     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3525     set_cc_op(s, CC_OP_FLAGS);
3526 }
3527 
3528 static void rotate(TCGv reg, TCGv shift, int left, int size)
3529 {
3530     switch (size) {
3531     case 8:
3532         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3533         tcg_gen_ext8u_i32(reg, reg);
3534         tcg_gen_muli_i32(reg, reg, 0x01010101);
3535         goto do_long;
3536     case 16:
3537         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3538         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3539         goto do_long;
3540     do_long:
3541     default:
3542         if (left) {
3543             tcg_gen_rotl_i32(reg, reg, shift);
3544         } else {
3545             tcg_gen_rotr_i32(reg, reg, shift);
3546         }
3547     }
3548 
3549     /* compute flags */
3550 
3551     switch (size) {
3552     case 8:
3553         tcg_gen_ext8s_i32(reg, reg);
3554         break;
3555     case 16:
3556         tcg_gen_ext16s_i32(reg, reg);
3557         break;
3558     default:
3559         break;
3560     }
3561 
3562     /* QREG_CC_X is not affected */
3563 
3564     tcg_gen_mov_i32(QREG_CC_N, reg);
3565     tcg_gen_mov_i32(QREG_CC_Z, reg);
3566 
3567     if (left) {
3568         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3569     } else {
3570         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3571     }
3572 
3573     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3574 }
3575 
3576 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3577 {
3578     switch (size) {
3579     case 8:
3580         tcg_gen_ext8s_i32(reg, reg);
3581         break;
3582     case 16:
3583         tcg_gen_ext16s_i32(reg, reg);
3584         break;
3585     default:
3586         break;
3587     }
3588     tcg_gen_mov_i32(QREG_CC_N, reg);
3589     tcg_gen_mov_i32(QREG_CC_Z, reg);
3590     tcg_gen_mov_i32(QREG_CC_X, X);
3591     tcg_gen_mov_i32(QREG_CC_C, X);
3592     tcg_gen_movi_i32(QREG_CC_V, 0);
3593 }
3594 
3595 /* Result of rotate_x() is valid if 0 <= shift <= size */
3596 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3597 {
3598     TCGv X, shl, shr, shx, sz, zero;
3599 
3600     sz = tcg_constant_i32(size);
3601 
3602     shr = tcg_temp_new();
3603     shl = tcg_temp_new();
3604     shx = tcg_temp_new();
3605     if (left) {
3606         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3607         tcg_gen_movi_i32(shr, size + 1);
3608         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3609         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3610         /* shx = shx < 0 ? size : shx; */
3611         zero = tcg_constant_i32(0);
3612         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3613     } else {
3614         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3615         tcg_gen_movi_i32(shl, size + 1);
3616         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3617         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3618     }
3619 
3620     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3621 
3622     tcg_gen_shl_i32(shl, reg, shl);
3623     tcg_gen_shr_i32(shr, reg, shr);
3624     tcg_gen_or_i32(reg, shl, shr);
3625     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3626     tcg_gen_or_i32(reg, reg, shx);
3627 
3628     /* X = (reg >> size) & 1 */
3629 
3630     X = tcg_temp_new();
3631     tcg_gen_extract_i32(X, reg, size, 1);
3632 
3633     return X;
3634 }
3635 
3636 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3637 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3638 {
3639     TCGv_i64 t0, shift64;
3640     TCGv X, lo, hi, zero;
3641 
3642     shift64 = tcg_temp_new_i64();
3643     tcg_gen_extu_i32_i64(shift64, shift);
3644 
3645     t0 = tcg_temp_new_i64();
3646 
3647     X = tcg_temp_new();
3648     lo = tcg_temp_new();
3649     hi = tcg_temp_new();
3650 
3651     if (left) {
3652         /* create [reg:X:..] */
3653 
3654         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3655         tcg_gen_concat_i32_i64(t0, lo, reg);
3656 
3657         /* rotate */
3658 
3659         tcg_gen_rotl_i64(t0, t0, shift64);
3660 
3661         /* result is [reg:..:reg:X] */
3662 
3663         tcg_gen_extr_i64_i32(lo, hi, t0);
3664         tcg_gen_andi_i32(X, lo, 1);
3665 
3666         tcg_gen_shri_i32(lo, lo, 1);
3667     } else {
3668         /* create [..:X:reg] */
3669 
3670         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3671 
3672         tcg_gen_rotr_i64(t0, t0, shift64);
3673 
3674         /* result is value: [X:reg:..:reg] */
3675 
3676         tcg_gen_extr_i64_i32(lo, hi, t0);
3677 
3678         /* extract X */
3679 
3680         tcg_gen_shri_i32(X, hi, 31);
3681 
3682         /* extract result */
3683 
3684         tcg_gen_shli_i32(hi, hi, 1);
3685     }
3686     tcg_gen_or_i32(lo, lo, hi);
3687 
3688     /* if shift == 0, register and X are not affected */
3689 
3690     zero = tcg_constant_i32(0);
3691     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3692     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3693 
3694     return X;
3695 }
3696 
3697 DISAS_INSN(rotate_im)
3698 {
3699     TCGv shift;
3700     int tmp;
3701     int left = (insn & 0x100);
3702 
3703     tmp = (insn >> 9) & 7;
3704     if (tmp == 0) {
3705         tmp = 8;
3706     }
3707 
3708     shift = tcg_constant_i32(tmp);
3709     if (insn & 8) {
3710         rotate(DREG(insn, 0), shift, left, 32);
3711     } else {
3712         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3713         rotate_x_flags(DREG(insn, 0), X, 32);
3714     }
3715 
3716     set_cc_op(s, CC_OP_FLAGS);
3717 }
3718 
3719 DISAS_INSN(rotate8_im)
3720 {
3721     int left = (insn & 0x100);
3722     TCGv reg;
3723     TCGv shift;
3724     int tmp;
3725 
3726     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3727 
3728     tmp = (insn >> 9) & 7;
3729     if (tmp == 0) {
3730         tmp = 8;
3731     }
3732 
3733     shift = tcg_constant_i32(tmp);
3734     if (insn & 8) {
3735         rotate(reg, shift, left, 8);
3736     } else {
3737         TCGv X = rotate_x(reg, shift, left, 8);
3738         rotate_x_flags(reg, X, 8);
3739     }
3740     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3741     set_cc_op(s, CC_OP_FLAGS);
3742 }
3743 
3744 DISAS_INSN(rotate16_im)
3745 {
3746     int left = (insn & 0x100);
3747     TCGv reg;
3748     TCGv shift;
3749     int tmp;
3750 
3751     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3752     tmp = (insn >> 9) & 7;
3753     if (tmp == 0) {
3754         tmp = 8;
3755     }
3756 
3757     shift = tcg_constant_i32(tmp);
3758     if (insn & 8) {
3759         rotate(reg, shift, left, 16);
3760     } else {
3761         TCGv X = rotate_x(reg, shift, left, 16);
3762         rotate_x_flags(reg, X, 16);
3763     }
3764     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3765     set_cc_op(s, CC_OP_FLAGS);
3766 }
3767 
3768 DISAS_INSN(rotate_reg)
3769 {
3770     TCGv reg;
3771     TCGv src;
3772     TCGv t0, t1;
3773     int left = (insn & 0x100);
3774 
3775     reg = DREG(insn, 0);
3776     src = DREG(insn, 9);
3777     /* shift in [0..63] */
3778     t0 = tcg_temp_new();
3779     tcg_gen_andi_i32(t0, src, 63);
3780     t1 = tcg_temp_new_i32();
3781     if (insn & 8) {
3782         tcg_gen_andi_i32(t1, src, 31);
3783         rotate(reg, t1, left, 32);
3784         /* if shift == 0, clear C */
3785         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3786                             t0, QREG_CC_V /* 0 */,
3787                             QREG_CC_V /* 0 */, QREG_CC_C);
3788     } else {
3789         TCGv X;
3790         /* modulo 33 */
3791         tcg_gen_movi_i32(t1, 33);
3792         tcg_gen_remu_i32(t1, t0, t1);
3793         X = rotate32_x(DREG(insn, 0), t1, left);
3794         rotate_x_flags(DREG(insn, 0), X, 32);
3795     }
3796     set_cc_op(s, CC_OP_FLAGS);
3797 }
3798 
3799 DISAS_INSN(rotate8_reg)
3800 {
3801     TCGv reg;
3802     TCGv src;
3803     TCGv t0, t1;
3804     int left = (insn & 0x100);
3805 
3806     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3807     src = DREG(insn, 9);
3808     /* shift in [0..63] */
3809     t0 = tcg_temp_new_i32();
3810     tcg_gen_andi_i32(t0, src, 63);
3811     t1 = tcg_temp_new_i32();
3812     if (insn & 8) {
3813         tcg_gen_andi_i32(t1, src, 7);
3814         rotate(reg, t1, left, 8);
3815         /* if shift == 0, clear C */
3816         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3817                             t0, QREG_CC_V /* 0 */,
3818                             QREG_CC_V /* 0 */, QREG_CC_C);
3819     } else {
3820         TCGv X;
3821         /* modulo 9 */
3822         tcg_gen_movi_i32(t1, 9);
3823         tcg_gen_remu_i32(t1, t0, t1);
3824         X = rotate_x(reg, t1, left, 8);
3825         rotate_x_flags(reg, X, 8);
3826     }
3827     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3828     set_cc_op(s, CC_OP_FLAGS);
3829 }
3830 
3831 DISAS_INSN(rotate16_reg)
3832 {
3833     TCGv reg;
3834     TCGv src;
3835     TCGv t0, t1;
3836     int left = (insn & 0x100);
3837 
3838     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3839     src = DREG(insn, 9);
3840     /* shift in [0..63] */
3841     t0 = tcg_temp_new_i32();
3842     tcg_gen_andi_i32(t0, src, 63);
3843     t1 = tcg_temp_new_i32();
3844     if (insn & 8) {
3845         tcg_gen_andi_i32(t1, src, 15);
3846         rotate(reg, t1, left, 16);
3847         /* if shift == 0, clear C */
3848         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3849                             t0, QREG_CC_V /* 0 */,
3850                             QREG_CC_V /* 0 */, QREG_CC_C);
3851     } else {
3852         TCGv X;
3853         /* modulo 17 */
3854         tcg_gen_movi_i32(t1, 17);
3855         tcg_gen_remu_i32(t1, t0, t1);
3856         X = rotate_x(reg, t1, left, 16);
3857         rotate_x_flags(reg, X, 16);
3858     }
3859     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3860     set_cc_op(s, CC_OP_FLAGS);
3861 }
3862 
3863 DISAS_INSN(rotate_mem)
3864 {
3865     TCGv src;
3866     TCGv addr;
3867     TCGv shift;
3868     int left = (insn & 0x100);
3869 
3870     SRC_EA(env, src, OS_WORD, 0, &addr);
3871 
3872     shift = tcg_constant_i32(1);
3873     if (insn & 0x0200) {
3874         rotate(src, shift, left, 16);
3875     } else {
3876         TCGv X = rotate_x(src, shift, left, 16);
3877         rotate_x_flags(src, X, 16);
3878     }
3879     DEST_EA(env, insn, OS_WORD, src, &addr);
3880     set_cc_op(s, CC_OP_FLAGS);
3881 }
3882 
3883 DISAS_INSN(bfext_reg)
3884 {
3885     int ext = read_im16(env, s);
3886     int is_sign = insn & 0x200;
3887     TCGv src = DREG(insn, 0);
3888     TCGv dst = DREG(ext, 12);
3889     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3890     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3891     int pos = 32 - ofs - len;        /* little bit-endian */
3892     TCGv tmp = tcg_temp_new();
3893     TCGv shift;
3894 
3895     /*
3896      * In general, we're going to rotate the field so that it's at the
3897      * top of the word and then right-shift by the complement of the
3898      * width to extend the field.
3899      */
3900     if (ext & 0x20) {
3901         /* Variable width.  */
3902         if (ext & 0x800) {
3903             /* Variable offset.  */
3904             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3905             tcg_gen_rotl_i32(tmp, src, tmp);
3906         } else {
3907             tcg_gen_rotli_i32(tmp, src, ofs);
3908         }
3909 
3910         shift = tcg_temp_new();
3911         tcg_gen_neg_i32(shift, DREG(ext, 0));
3912         tcg_gen_andi_i32(shift, shift, 31);
3913         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3914         if (is_sign) {
3915             tcg_gen_mov_i32(dst, QREG_CC_N);
3916         } else {
3917             tcg_gen_shr_i32(dst, tmp, shift);
3918         }
3919     } else {
3920         /* Immediate width.  */
3921         if (ext & 0x800) {
3922             /* Variable offset */
3923             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3924             tcg_gen_rotl_i32(tmp, src, tmp);
3925             src = tmp;
3926             pos = 32 - len;
3927         } else {
3928             /*
3929              * Immediate offset.  If the field doesn't wrap around the
3930              * end of the word, rely on (s)extract completely.
3931              */
3932             if (pos < 0) {
3933                 tcg_gen_rotli_i32(tmp, src, ofs);
3934                 src = tmp;
3935                 pos = 32 - len;
3936             }
3937         }
3938 
3939         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3940         if (is_sign) {
3941             tcg_gen_mov_i32(dst, QREG_CC_N);
3942         } else {
3943             tcg_gen_extract_i32(dst, src, pos, len);
3944         }
3945     }
3946 
3947     set_cc_op(s, CC_OP_LOGIC);
3948 }
3949 
3950 DISAS_INSN(bfext_mem)
3951 {
3952     int ext = read_im16(env, s);
3953     int is_sign = insn & 0x200;
3954     TCGv dest = DREG(ext, 12);
3955     TCGv addr, len, ofs;
3956 
3957     addr = gen_lea(env, s, insn, OS_UNSIZED);
3958     if (IS_NULL_QREG(addr)) {
3959         gen_addr_fault(s);
3960         return;
3961     }
3962 
3963     if (ext & 0x20) {
3964         len = DREG(ext, 0);
3965     } else {
3966         len = tcg_constant_i32(extract32(ext, 0, 5));
3967     }
3968     if (ext & 0x800) {
3969         ofs = DREG(ext, 6);
3970     } else {
3971         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3972     }
3973 
3974     if (is_sign) {
3975         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3976         tcg_gen_mov_i32(QREG_CC_N, dest);
3977     } else {
3978         TCGv_i64 tmp = tcg_temp_new_i64();
3979         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3980         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3981     }
3982     set_cc_op(s, CC_OP_LOGIC);
3983 }
3984 
3985 DISAS_INSN(bfop_reg)
3986 {
3987     int ext = read_im16(env, s);
3988     TCGv src = DREG(insn, 0);
3989     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3990     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3991     TCGv mask, tofs = NULL, tlen = NULL;
3992     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
3993 
3994     if ((ext & 0x820) == 0) {
3995         /* Immediate width and offset.  */
3996         uint32_t maski = 0x7fffffffu >> (len - 1);
3997         if (ofs + len <= 32) {
3998             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3999         } else {
4000             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4001         }
4002         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4003 
4004         mask = tcg_constant_i32(ror32(maski, ofs));
4005         if (is_bfffo) {
4006             tofs = tcg_constant_i32(ofs);
4007             tlen = tcg_constant_i32(len);
4008         }
4009     } else {
4010         TCGv tmp = tcg_temp_new();
4011 
4012         mask = tcg_temp_new();
4013         if (ext & 0x20) {
4014             /* Variable width */
4015             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4016             tcg_gen_andi_i32(tmp, tmp, 31);
4017             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4018             if (is_bfffo) {
4019                 tlen = tcg_temp_new();
4020                 tcg_gen_addi_i32(tlen, tmp, 1);
4021             }
4022         } else {
4023             /* Immediate width */
4024             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4025             if (is_bfffo) {
4026                 tlen = tcg_constant_i32(len);
4027             }
4028         }
4029 
4030         if (ext & 0x800) {
4031             /* Variable offset */
4032             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4033             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4034             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4035             tcg_gen_rotr_i32(mask, mask, tmp);
4036             if (is_bfffo) {
4037                 tofs = tmp;
4038             }
4039         } else {
4040             /* Immediate offset (and variable width) */
4041             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4042             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4043             tcg_gen_rotri_i32(mask, mask, ofs);
4044             if (is_bfffo) {
4045                 tofs = tcg_constant_i32(ofs);
4046             }
4047         }
4048     }
4049     set_cc_op(s, CC_OP_LOGIC);
4050 
4051     switch (insn & 0x0f00) {
4052     case 0x0a00: /* bfchg */
4053         tcg_gen_eqv_i32(src, src, mask);
4054         break;
4055     case 0x0c00: /* bfclr */
4056         tcg_gen_and_i32(src, src, mask);
4057         break;
4058     case 0x0d00: /* bfffo */
4059         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4060         break;
4061     case 0x0e00: /* bfset */
4062         tcg_gen_orc_i32(src, src, mask);
4063         break;
4064     case 0x0800: /* bftst */
4065         /* flags already set; no other work to do.  */
4066         break;
4067     default:
4068         g_assert_not_reached();
4069     }
4070 }
4071 
4072 DISAS_INSN(bfop_mem)
4073 {
4074     int ext = read_im16(env, s);
4075     TCGv addr, len, ofs;
4076     TCGv_i64 t64;
4077 
4078     addr = gen_lea(env, s, insn, OS_UNSIZED);
4079     if (IS_NULL_QREG(addr)) {
4080         gen_addr_fault(s);
4081         return;
4082     }
4083 
4084     if (ext & 0x20) {
4085         len = DREG(ext, 0);
4086     } else {
4087         len = tcg_constant_i32(extract32(ext, 0, 5));
4088     }
4089     if (ext & 0x800) {
4090         ofs = DREG(ext, 6);
4091     } else {
4092         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4093     }
4094 
4095     switch (insn & 0x0f00) {
4096     case 0x0a00: /* bfchg */
4097         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4098         break;
4099     case 0x0c00: /* bfclr */
4100         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4101         break;
4102     case 0x0d00: /* bfffo */
4103         t64 = tcg_temp_new_i64();
4104         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4105         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4106         break;
4107     case 0x0e00: /* bfset */
4108         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4109         break;
4110     case 0x0800: /* bftst */
4111         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4112         break;
4113     default:
4114         g_assert_not_reached();
4115     }
4116     set_cc_op(s, CC_OP_LOGIC);
4117 }
4118 
4119 DISAS_INSN(bfins_reg)
4120 {
4121     int ext = read_im16(env, s);
4122     TCGv dst = DREG(insn, 0);
4123     TCGv src = DREG(ext, 12);
4124     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4125     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4126     int pos = 32 - ofs - len;        /* little bit-endian */
4127     TCGv tmp;
4128 
4129     tmp = tcg_temp_new();
4130 
4131     if (ext & 0x20) {
4132         /* Variable width */
4133         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4134         tcg_gen_andi_i32(tmp, tmp, 31);
4135         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4136     } else {
4137         /* Immediate width */
4138         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4139     }
4140     set_cc_op(s, CC_OP_LOGIC);
4141 
4142     /* Immediate width and offset */
4143     if ((ext & 0x820) == 0) {
4144         /* Check for suitability for deposit.  */
4145         if (pos >= 0) {
4146             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4147         } else {
4148             uint32_t maski = -2U << (len - 1);
4149             uint32_t roti = (ofs + len) & 31;
4150             tcg_gen_andi_i32(tmp, src, ~maski);
4151             tcg_gen_rotri_i32(tmp, tmp, roti);
4152             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4153             tcg_gen_or_i32(dst, dst, tmp);
4154         }
4155     } else {
4156         TCGv mask = tcg_temp_new();
4157         TCGv rot = tcg_temp_new();
4158 
4159         if (ext & 0x20) {
4160             /* Variable width */
4161             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4162             tcg_gen_andi_i32(rot, rot, 31);
4163             tcg_gen_movi_i32(mask, -2);
4164             tcg_gen_shl_i32(mask, mask, rot);
4165             tcg_gen_mov_i32(rot, DREG(ext, 0));
4166             tcg_gen_andc_i32(tmp, src, mask);
4167         } else {
4168             /* Immediate width (variable offset) */
4169             uint32_t maski = -2U << (len - 1);
4170             tcg_gen_andi_i32(tmp, src, ~maski);
4171             tcg_gen_movi_i32(mask, maski);
4172             tcg_gen_movi_i32(rot, len & 31);
4173         }
4174         if (ext & 0x800) {
4175             /* Variable offset */
4176             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4177         } else {
4178             /* Immediate offset (variable width) */
4179             tcg_gen_addi_i32(rot, rot, ofs);
4180         }
4181         tcg_gen_andi_i32(rot, rot, 31);
4182         tcg_gen_rotr_i32(mask, mask, rot);
4183         tcg_gen_rotr_i32(tmp, tmp, rot);
4184         tcg_gen_and_i32(dst, dst, mask);
4185         tcg_gen_or_i32(dst, dst, tmp);
4186     }
4187 }
4188 
4189 DISAS_INSN(bfins_mem)
4190 {
4191     int ext = read_im16(env, s);
4192     TCGv src = DREG(ext, 12);
4193     TCGv addr, len, ofs;
4194 
4195     addr = gen_lea(env, s, insn, OS_UNSIZED);
4196     if (IS_NULL_QREG(addr)) {
4197         gen_addr_fault(s);
4198         return;
4199     }
4200 
4201     if (ext & 0x20) {
4202         len = DREG(ext, 0);
4203     } else {
4204         len = tcg_constant_i32(extract32(ext, 0, 5));
4205     }
4206     if (ext & 0x800) {
4207         ofs = DREG(ext, 6);
4208     } else {
4209         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4210     }
4211 
4212     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4213     set_cc_op(s, CC_OP_LOGIC);
4214 }
4215 
4216 DISAS_INSN(ff1)
4217 {
4218     TCGv reg;
4219     reg = DREG(insn, 0);
4220     gen_logic_cc(s, reg, OS_LONG);
4221     gen_helper_ff1(reg, reg);
4222 }
4223 
4224 DISAS_INSN(chk)
4225 {
4226     TCGv src, reg;
4227     int opsize;
4228 
4229     switch ((insn >> 7) & 3) {
4230     case 3:
4231         opsize = OS_WORD;
4232         break;
4233     case 2:
4234         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4235             opsize = OS_LONG;
4236             break;
4237         }
4238         /* fallthru */
4239     default:
4240         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4241         return;
4242     }
4243     SRC_EA(env, src, opsize, 1, NULL);
4244     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4245 
4246     gen_flush_flags(s);
4247     gen_helper_chk(cpu_env, reg, src);
4248 }
4249 
4250 DISAS_INSN(chk2)
4251 {
4252     uint16_t ext;
4253     TCGv addr1, addr2, bound1, bound2, reg;
4254     int opsize;
4255 
4256     switch ((insn >> 9) & 3) {
4257     case 0:
4258         opsize = OS_BYTE;
4259         break;
4260     case 1:
4261         opsize = OS_WORD;
4262         break;
4263     case 2:
4264         opsize = OS_LONG;
4265         break;
4266     default:
4267         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4268         return;
4269     }
4270 
4271     ext = read_im16(env, s);
4272     if ((ext & 0x0800) == 0) {
4273         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4274         return;
4275     }
4276 
4277     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4278     addr2 = tcg_temp_new();
4279     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4280 
4281     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4282     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4283 
4284     reg = tcg_temp_new();
4285     if (ext & 0x8000) {
4286         tcg_gen_mov_i32(reg, AREG(ext, 12));
4287     } else {
4288         gen_ext(reg, DREG(ext, 12), opsize, 1);
4289     }
4290 
4291     gen_flush_flags(s);
4292     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4293 }
4294 
4295 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4296 {
4297     TCGv addr;
4298     TCGv_i64 t0, t1;
4299 
4300     addr = tcg_temp_new();
4301 
4302     t0 = tcg_temp_new_i64();
4303     t1 = tcg_temp_new_i64();
4304 
4305     tcg_gen_andi_i32(addr, src, ~15);
4306     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4307     tcg_gen_addi_i32(addr, addr, 8);
4308     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4309 
4310     tcg_gen_andi_i32(addr, dst, ~15);
4311     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4312     tcg_gen_addi_i32(addr, addr, 8);
4313     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4314 }
4315 
4316 DISAS_INSN(move16_reg)
4317 {
4318     int index = IS_USER(s);
4319     TCGv tmp;
4320     uint16_t ext;
4321 
4322     ext = read_im16(env, s);
4323     if ((ext & (1 << 15)) == 0) {
4324         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4325     }
4326 
4327     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4328 
4329     /* Ax can be Ay, so save Ay before incrementing Ax */
4330     tmp = tcg_temp_new();
4331     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4332     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4333     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4334 }
4335 
4336 DISAS_INSN(move16_mem)
4337 {
4338     int index = IS_USER(s);
4339     TCGv reg, addr;
4340 
4341     reg = AREG(insn, 0);
4342     addr = tcg_constant_i32(read_im32(env, s));
4343 
4344     if ((insn >> 3) & 1) {
4345         /* MOVE16 (xxx).L, (Ay) */
4346         m68k_copy_line(reg, addr, index);
4347     } else {
4348         /* MOVE16 (Ay), (xxx).L */
4349         m68k_copy_line(addr, reg, index);
4350     }
4351 
4352     if (((insn >> 3) & 2) == 0) {
4353         /* (Ay)+ */
4354         tcg_gen_addi_i32(reg, reg, 16);
4355     }
4356 }
4357 
4358 DISAS_INSN(strldsr)
4359 {
4360     uint16_t ext;
4361     uint32_t addr;
4362 
4363     addr = s->pc - 2;
4364     ext = read_im16(env, s);
4365     if (ext != 0x46FC) {
4366         gen_exception(s, addr, EXCP_ILLEGAL);
4367         return;
4368     }
4369     ext = read_im16(env, s);
4370     if (IS_USER(s) || (ext & SR_S) == 0) {
4371         gen_exception(s, addr, EXCP_PRIVILEGE);
4372         return;
4373     }
4374     gen_push(s, gen_get_sr(s));
4375     gen_set_sr_im(s, ext, 0);
4376     gen_exit_tb(s);
4377 }
4378 
4379 DISAS_INSN(move_from_sr)
4380 {
4381     TCGv sr;
4382 
4383     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4384         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4385         return;
4386     }
4387     sr = gen_get_sr(s);
4388     DEST_EA(env, insn, OS_WORD, sr, NULL);
4389 }
4390 
4391 #if !defined(CONFIG_USER_ONLY)
4392 DISAS_INSN(moves)
4393 {
4394     int opsize;
4395     uint16_t ext;
4396     TCGv reg;
4397     TCGv addr;
4398     int extend;
4399 
4400     if (IS_USER(s)) {
4401         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4402         return;
4403     }
4404 
4405     ext = read_im16(env, s);
4406 
4407     opsize = insn_opsize(insn);
4408 
4409     if (ext & 0x8000) {
4410         /* address register */
4411         reg = AREG(ext, 12);
4412         extend = 1;
4413     } else {
4414         /* data register */
4415         reg = DREG(ext, 12);
4416         extend = 0;
4417     }
4418 
4419     addr = gen_lea(env, s, insn, opsize);
4420     if (IS_NULL_QREG(addr)) {
4421         gen_addr_fault(s);
4422         return;
4423     }
4424 
4425     if (ext & 0x0800) {
4426         /* from reg to ea */
4427         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4428     } else {
4429         /* from ea to reg */
4430         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4431         if (extend) {
4432             gen_ext(reg, tmp, opsize, 1);
4433         } else {
4434             gen_partset_reg(opsize, reg, tmp);
4435         }
4436     }
4437     switch (extract32(insn, 3, 3)) {
4438     case 3: /* Indirect postincrement.  */
4439         tcg_gen_addi_i32(AREG(insn, 0), addr,
4440                          REG(insn, 0) == 7 && opsize == OS_BYTE
4441                          ? 2
4442                          : opsize_bytes(opsize));
4443         break;
4444     case 4: /* Indirect predecrememnt.  */
4445         tcg_gen_mov_i32(AREG(insn, 0), addr);
4446         break;
4447     }
4448 }
4449 
4450 DISAS_INSN(move_to_sr)
4451 {
4452     if (IS_USER(s)) {
4453         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4454         return;
4455     }
4456     gen_move_to_sr(env, s, insn, false);
4457     gen_exit_tb(s);
4458 }
4459 
4460 DISAS_INSN(move_from_usp)
4461 {
4462     if (IS_USER(s)) {
4463         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4464         return;
4465     }
4466     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4467                    offsetof(CPUM68KState, sp[M68K_USP]));
4468 }
4469 
4470 DISAS_INSN(move_to_usp)
4471 {
4472     if (IS_USER(s)) {
4473         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4474         return;
4475     }
4476     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4477                    offsetof(CPUM68KState, sp[M68K_USP]));
4478 }
4479 
4480 DISAS_INSN(halt)
4481 {
4482     if (IS_USER(s)) {
4483         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4484         return;
4485     }
4486 
4487     gen_exception(s, s->pc, EXCP_HALT_INSN);
4488 }
4489 
4490 DISAS_INSN(stop)
4491 {
4492     uint16_t ext;
4493 
4494     if (IS_USER(s)) {
4495         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4496         return;
4497     }
4498 
4499     ext = read_im16(env, s);
4500 
4501     gen_set_sr_im(s, ext, 0);
4502     tcg_gen_movi_i32(cpu_halted, 1);
4503     gen_exception(s, s->pc, EXCP_HLT);
4504 }
4505 
4506 DISAS_INSN(rte)
4507 {
4508     if (IS_USER(s)) {
4509         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4510         return;
4511     }
4512     gen_exception(s, s->base.pc_next, EXCP_RTE);
4513 }
4514 
4515 DISAS_INSN(cf_movec)
4516 {
4517     uint16_t ext;
4518     TCGv reg;
4519 
4520     if (IS_USER(s)) {
4521         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4522         return;
4523     }
4524 
4525     ext = read_im16(env, s);
4526 
4527     if (ext & 0x8000) {
4528         reg = AREG(ext, 12);
4529     } else {
4530         reg = DREG(ext, 12);
4531     }
4532     gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4533     gen_exit_tb(s);
4534 }
4535 
4536 DISAS_INSN(m68k_movec)
4537 {
4538     uint16_t ext;
4539     TCGv reg, creg;
4540 
4541     if (IS_USER(s)) {
4542         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4543         return;
4544     }
4545 
4546     ext = read_im16(env, s);
4547 
4548     if (ext & 0x8000) {
4549         reg = AREG(ext, 12);
4550     } else {
4551         reg = DREG(ext, 12);
4552     }
4553     creg = tcg_constant_i32(ext & 0xfff);
4554     if (insn & 1) {
4555         gen_helper_m68k_movec_to(cpu_env, creg, reg);
4556     } else {
4557         gen_helper_m68k_movec_from(reg, cpu_env, creg);
4558     }
4559     gen_exit_tb(s);
4560 }
4561 
4562 DISAS_INSN(intouch)
4563 {
4564     if (IS_USER(s)) {
4565         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4566         return;
4567     }
4568     /* ICache fetch.  Implement as no-op.  */
4569 }
4570 
4571 DISAS_INSN(cpushl)
4572 {
4573     if (IS_USER(s)) {
4574         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4575         return;
4576     }
4577     /* Cache push/invalidate.  Implement as no-op.  */
4578 }
4579 
4580 DISAS_INSN(cpush)
4581 {
4582     if (IS_USER(s)) {
4583         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4584         return;
4585     }
4586     /* Cache push/invalidate.  Implement as no-op.  */
4587 }
4588 
4589 DISAS_INSN(cinv)
4590 {
4591     if (IS_USER(s)) {
4592         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4593         return;
4594     }
4595     /* Invalidate cache line.  Implement as no-op.  */
4596 }
4597 
4598 #if !defined(CONFIG_USER_ONLY)
4599 DISAS_INSN(pflush)
4600 {
4601     TCGv opmode;
4602 
4603     if (IS_USER(s)) {
4604         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4605         return;
4606     }
4607 
4608     opmode = tcg_constant_i32((insn >> 3) & 3);
4609     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4610 }
4611 
4612 DISAS_INSN(ptest)
4613 {
4614     TCGv is_read;
4615 
4616     if (IS_USER(s)) {
4617         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4618         return;
4619     }
4620     is_read = tcg_constant_i32((insn >> 5) & 1);
4621     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4622 }
4623 #endif
4624 
4625 DISAS_INSN(wddata)
4626 {
4627     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4628 }
4629 
4630 DISAS_INSN(wdebug)
4631 {
4632     if (IS_USER(s)) {
4633         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4634         return;
4635     }
4636     /* TODO: Implement wdebug.  */
4637     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4638 }
4639 #endif
4640 
4641 DISAS_INSN(trap)
4642 {
4643     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4644 }
4645 
4646 static void do_trapcc(DisasContext *s, DisasCompare *c)
4647 {
4648     if (c->tcond != TCG_COND_NEVER) {
4649         TCGLabel *over = NULL;
4650 
4651         update_cc_op(s);
4652 
4653         if (c->tcond != TCG_COND_ALWAYS) {
4654             /* Jump over if !c. */
4655             over = gen_new_label();
4656             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4657         }
4658 
4659         tcg_gen_movi_i32(QREG_PC, s->pc);
4660         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4661 
4662         if (over != NULL) {
4663             gen_set_label(over);
4664             s->base.is_jmp = DISAS_NEXT;
4665         }
4666     }
4667 }
4668 
4669 DISAS_INSN(trapcc)
4670 {
4671     DisasCompare c;
4672 
4673     /* Consume and discard the immediate operand. */
4674     switch (extract32(insn, 0, 3)) {
4675     case 2: /* trapcc.w */
4676         (void)read_im16(env, s);
4677         break;
4678     case 3: /* trapcc.l */
4679         (void)read_im32(env, s);
4680         break;
4681     case 4: /* trapcc (no operand) */
4682         break;
4683     default:
4684         /* trapcc registered with only valid opmodes */
4685         g_assert_not_reached();
4686     }
4687 
4688     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4689     do_trapcc(s, &c);
4690 }
4691 
4692 DISAS_INSN(trapv)
4693 {
4694     DisasCompare c;
4695 
4696     gen_cc_cond(&c, s, 9); /* V set */
4697     do_trapcc(s, &c);
4698 }
4699 
4700 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4701 {
4702     switch (reg) {
4703     case M68K_FPIAR:
4704         tcg_gen_movi_i32(res, 0);
4705         break;
4706     case M68K_FPSR:
4707         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4708         break;
4709     case M68K_FPCR:
4710         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4711         break;
4712     }
4713 }
4714 
4715 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4716 {
4717     switch (reg) {
4718     case M68K_FPIAR:
4719         break;
4720     case M68K_FPSR:
4721         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4722         break;
4723     case M68K_FPCR:
4724         gen_helper_set_fpcr(cpu_env, val);
4725         break;
4726     }
4727 }
4728 
4729 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4730 {
4731     int index = IS_USER(s);
4732     TCGv tmp;
4733 
4734     tmp = tcg_temp_new();
4735     gen_load_fcr(s, tmp, reg);
4736     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4737 }
4738 
4739 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4740 {
4741     int index = IS_USER(s);
4742     TCGv tmp;
4743 
4744     tmp = tcg_temp_new();
4745     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4746     gen_store_fcr(s, tmp, reg);
4747 }
4748 
4749 
4750 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4751                              uint32_t insn, uint32_t ext)
4752 {
4753     int mask = (ext >> 10) & 7;
4754     int is_write = (ext >> 13) & 1;
4755     int mode = extract32(insn, 3, 3);
4756     int i;
4757     TCGv addr, tmp;
4758 
4759     switch (mode) {
4760     case 0: /* Dn */
4761         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4762             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4763             return;
4764         }
4765         if (is_write) {
4766             gen_load_fcr(s, DREG(insn, 0), mask);
4767         } else {
4768             gen_store_fcr(s, DREG(insn, 0), mask);
4769         }
4770         return;
4771     case 1: /* An, only with FPIAR */
4772         if (mask != M68K_FPIAR) {
4773             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4774             return;
4775         }
4776         if (is_write) {
4777             gen_load_fcr(s, AREG(insn, 0), mask);
4778         } else {
4779             gen_store_fcr(s, AREG(insn, 0), mask);
4780         }
4781         return;
4782     case 7: /* Immediate */
4783         if (REG(insn, 0) == 4) {
4784             if (is_write ||
4785                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4786                  mask != M68K_FPCR)) {
4787                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4788                 return;
4789             }
4790             tmp = tcg_constant_i32(read_im32(env, s));
4791             gen_store_fcr(s, tmp, mask);
4792             return;
4793         }
4794         break;
4795     default:
4796         break;
4797     }
4798 
4799     tmp = gen_lea(env, s, insn, OS_LONG);
4800     if (IS_NULL_QREG(tmp)) {
4801         gen_addr_fault(s);
4802         return;
4803     }
4804 
4805     addr = tcg_temp_new();
4806     tcg_gen_mov_i32(addr, tmp);
4807 
4808     /*
4809      * mask:
4810      *
4811      * 0b100 Floating-Point Control Register
4812      * 0b010 Floating-Point Status Register
4813      * 0b001 Floating-Point Instruction Address Register
4814      *
4815      */
4816 
4817     if (is_write && mode == 4) {
4818         for (i = 2; i >= 0; i--, mask >>= 1) {
4819             if (mask & 1) {
4820                 gen_qemu_store_fcr(s, addr, 1 << i);
4821                 if (mask != 1) {
4822                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4823                 }
4824             }
4825        }
4826        tcg_gen_mov_i32(AREG(insn, 0), addr);
4827     } else {
4828         for (i = 0; i < 3; i++, mask >>= 1) {
4829             if (mask & 1) {
4830                 if (is_write) {
4831                     gen_qemu_store_fcr(s, addr, 1 << i);
4832                 } else {
4833                     gen_qemu_load_fcr(s, addr, 1 << i);
4834                 }
4835                 if (mask != 1 || mode == 3) {
4836                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4837                 }
4838             }
4839         }
4840         if (mode == 3) {
4841             tcg_gen_mov_i32(AREG(insn, 0), addr);
4842         }
4843     }
4844 }
4845 
4846 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4847                           uint32_t insn, uint32_t ext)
4848 {
4849     int opsize;
4850     TCGv addr, tmp;
4851     int mode = (ext >> 11) & 0x3;
4852     int is_load = ((ext & 0x2000) == 0);
4853 
4854     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4855         opsize = OS_EXTENDED;
4856     } else {
4857         opsize = OS_DOUBLE;  /* FIXME */
4858     }
4859 
4860     addr = gen_lea(env, s, insn, opsize);
4861     if (IS_NULL_QREG(addr)) {
4862         gen_addr_fault(s);
4863         return;
4864     }
4865 
4866     tmp = tcg_temp_new();
4867     if (mode & 0x1) {
4868         /* Dynamic register list */
4869         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4870     } else {
4871         /* Static register list */
4872         tcg_gen_movi_i32(tmp, ext & 0xff);
4873     }
4874 
4875     if (!is_load && (mode & 2) == 0) {
4876         /*
4877          * predecrement addressing mode
4878          * only available to store register to memory
4879          */
4880         if (opsize == OS_EXTENDED) {
4881             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4882         } else {
4883             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4884         }
4885     } else {
4886         /* postincrement addressing mode */
4887         if (opsize == OS_EXTENDED) {
4888             if (is_load) {
4889                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4890             } else {
4891                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4892             }
4893         } else {
4894             if (is_load) {
4895                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4896             } else {
4897                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4898             }
4899         }
4900     }
4901     if ((insn & 070) == 030 || (insn & 070) == 040) {
4902         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4903     }
4904 }
4905 
4906 /*
4907  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4908  * immediately before the next FP instruction is executed.
4909  */
4910 DISAS_INSN(fpu)
4911 {
4912     uint16_t ext;
4913     int opmode;
4914     int opsize;
4915     TCGv_ptr cpu_src, cpu_dest;
4916 
4917     ext = read_im16(env, s);
4918     opmode = ext & 0x7f;
4919     switch ((ext >> 13) & 7) {
4920     case 0:
4921         break;
4922     case 1:
4923         goto undef;
4924     case 2:
4925         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4926             /* fmovecr */
4927             TCGv rom_offset = tcg_constant_i32(opmode);
4928             cpu_dest = gen_fp_ptr(REG(ext, 7));
4929             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4930             return;
4931         }
4932         break;
4933     case 3: /* fmove out */
4934         cpu_src = gen_fp_ptr(REG(ext, 7));
4935         opsize = ext_opsize(ext, 10);
4936         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4937                       EA_STORE, IS_USER(s)) == -1) {
4938             gen_addr_fault(s);
4939         }
4940         gen_helper_ftst(cpu_env, cpu_src);
4941         return;
4942     case 4: /* fmove to control register.  */
4943     case 5: /* fmove from control register.  */
4944         gen_op_fmove_fcr(env, s, insn, ext);
4945         return;
4946     case 6: /* fmovem */
4947     case 7:
4948         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4949             goto undef;
4950         }
4951         gen_op_fmovem(env, s, insn, ext);
4952         return;
4953     }
4954     if (ext & (1 << 14)) {
4955         /* Source effective address.  */
4956         opsize = ext_opsize(ext, 10);
4957         cpu_src = gen_fp_result_ptr();
4958         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4959                       EA_LOADS, IS_USER(s)) == -1) {
4960             gen_addr_fault(s);
4961             return;
4962         }
4963     } else {
4964         /* Source register.  */
4965         opsize = OS_EXTENDED;
4966         cpu_src = gen_fp_ptr(REG(ext, 10));
4967     }
4968     cpu_dest = gen_fp_ptr(REG(ext, 7));
4969     switch (opmode) {
4970     case 0: /* fmove */
4971         gen_fp_move(cpu_dest, cpu_src);
4972         break;
4973     case 0x40: /* fsmove */
4974         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4975         break;
4976     case 0x44: /* fdmove */
4977         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4978         break;
4979     case 1: /* fint */
4980         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4981         break;
4982     case 2: /* fsinh */
4983         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4984         break;
4985     case 3: /* fintrz */
4986         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4987         break;
4988     case 4: /* fsqrt */
4989         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4990         break;
4991     case 0x41: /* fssqrt */
4992         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
4993         break;
4994     case 0x45: /* fdsqrt */
4995         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
4996         break;
4997     case 0x06: /* flognp1 */
4998         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
4999         break;
5000     case 0x08: /* fetoxm1 */
5001         gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5002         break;
5003     case 0x09: /* ftanh */
5004         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5005         break;
5006     case 0x0a: /* fatan */
5007         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5008         break;
5009     case 0x0c: /* fasin */
5010         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5011         break;
5012     case 0x0d: /* fatanh */
5013         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5014         break;
5015     case 0x0e: /* fsin */
5016         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5017         break;
5018     case 0x0f: /* ftan */
5019         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5020         break;
5021     case 0x10: /* fetox */
5022         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5023         break;
5024     case 0x11: /* ftwotox */
5025         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5026         break;
5027     case 0x12: /* ftentox */
5028         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5029         break;
5030     case 0x14: /* flogn */
5031         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5032         break;
5033     case 0x15: /* flog10 */
5034         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5035         break;
5036     case 0x16: /* flog2 */
5037         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5038         break;
5039     case 0x18: /* fabs */
5040         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5041         break;
5042     case 0x58: /* fsabs */
5043         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5044         break;
5045     case 0x5c: /* fdabs */
5046         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5047         break;
5048     case 0x19: /* fcosh */
5049         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5050         break;
5051     case 0x1a: /* fneg */
5052         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5053         break;
5054     case 0x5a: /* fsneg */
5055         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5056         break;
5057     case 0x5e: /* fdneg */
5058         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5059         break;
5060     case 0x1c: /* facos */
5061         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x1d: /* fcos */
5064         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x1e: /* fgetexp */
5067         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5068         break;
5069     case 0x1f: /* fgetman */
5070         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5071         break;
5072     case 0x20: /* fdiv */
5073         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5074         break;
5075     case 0x60: /* fsdiv */
5076         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5077         break;
5078     case 0x64: /* fddiv */
5079         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5080         break;
5081     case 0x21: /* fmod */
5082         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5083         break;
5084     case 0x22: /* fadd */
5085         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5086         break;
5087     case 0x62: /* fsadd */
5088         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5089         break;
5090     case 0x66: /* fdadd */
5091         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5092         break;
5093     case 0x23: /* fmul */
5094         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5095         break;
5096     case 0x63: /* fsmul */
5097         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5098         break;
5099     case 0x67: /* fdmul */
5100         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5101         break;
5102     case 0x24: /* fsgldiv */
5103         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5104         break;
5105     case 0x25: /* frem */
5106         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5107         break;
5108     case 0x26: /* fscale */
5109         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5110         break;
5111     case 0x27: /* fsglmul */
5112         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5113         break;
5114     case 0x28: /* fsub */
5115         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5116         break;
5117     case 0x68: /* fssub */
5118         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5119         break;
5120     case 0x6c: /* fdsub */
5121         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5122         break;
5123     case 0x30: case 0x31: case 0x32:
5124     case 0x33: case 0x34: case 0x35:
5125     case 0x36: case 0x37: {
5126             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5127             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5128         }
5129         break;
5130     case 0x38: /* fcmp */
5131         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5132         return;
5133     case 0x3a: /* ftst */
5134         gen_helper_ftst(cpu_env, cpu_src);
5135         return;
5136     default:
5137         goto undef;
5138     }
5139     gen_helper_ftst(cpu_env, cpu_dest);
5140     return;
5141 undef:
5142     /* FIXME: Is this right for offset addressing modes?  */
5143     s->pc -= 2;
5144     disas_undef_fpu(env, s, insn);
5145 }
5146 
5147 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5148 {
5149     TCGv fpsr;
5150 
5151     c->v2 = tcg_constant_i32(0);
5152     /* TODO: Raise BSUN exception.  */
5153     fpsr = tcg_temp_new();
5154     gen_load_fcr(s, fpsr, M68K_FPSR);
5155     switch (cond) {
5156     case 0:  /* False */
5157     case 16: /* Signaling False */
5158         c->v1 = c->v2;
5159         c->tcond = TCG_COND_NEVER;
5160         break;
5161     case 1:  /* EQual Z */
5162     case 17: /* Signaling EQual Z */
5163         c->v1 = tcg_temp_new();
5164         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5165         c->tcond = TCG_COND_NE;
5166         break;
5167     case 2:  /* Ordered Greater Than !(A || Z || N) */
5168     case 18: /* Greater Than !(A || Z || N) */
5169         c->v1 = tcg_temp_new();
5170         tcg_gen_andi_i32(c->v1, fpsr,
5171                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5172         c->tcond = TCG_COND_EQ;
5173         break;
5174     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5175     case 19: /* Greater than or Equal Z || !(A || N) */
5176         c->v1 = tcg_temp_new();
5177         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5178         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5179         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5180         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5181         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5182         c->tcond = TCG_COND_NE;
5183         break;
5184     case 4:  /* Ordered Less Than !(!N || A || Z); */
5185     case 20: /* Less Than !(!N || A || Z); */
5186         c->v1 = tcg_temp_new();
5187         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5188         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5189         c->tcond = TCG_COND_EQ;
5190         break;
5191     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5192     case 21: /* Less than or Equal Z || (N && !A) */
5193         c->v1 = tcg_temp_new();
5194         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5195         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5196         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5197         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5198         c->tcond = TCG_COND_NE;
5199         break;
5200     case 6:  /* Ordered Greater or Less than !(A || Z) */
5201     case 22: /* Greater or Less than !(A || Z) */
5202         c->v1 = tcg_temp_new();
5203         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5204         c->tcond = TCG_COND_EQ;
5205         break;
5206     case 7:  /* Ordered !A */
5207     case 23: /* Greater, Less or Equal !A */
5208         c->v1 = tcg_temp_new();
5209         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5210         c->tcond = TCG_COND_EQ;
5211         break;
5212     case 8:  /* Unordered A */
5213     case 24: /* Not Greater, Less or Equal A */
5214         c->v1 = tcg_temp_new();
5215         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5216         c->tcond = TCG_COND_NE;
5217         break;
5218     case 9:  /* Unordered or Equal A || Z */
5219     case 25: /* Not Greater or Less then A || Z */
5220         c->v1 = tcg_temp_new();
5221         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5222         c->tcond = TCG_COND_NE;
5223         break;
5224     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5225     case 26: /* Not Less or Equal A || !(N || Z)) */
5226         c->v1 = tcg_temp_new();
5227         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5228         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5229         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5230         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5231         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5232         c->tcond = TCG_COND_NE;
5233         break;
5234     case 11: /* Unordered or Greater or Equal A || Z || !N */
5235     case 27: /* Not Less Than A || Z || !N */
5236         c->v1 = tcg_temp_new();
5237         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5238         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5239         c->tcond = TCG_COND_NE;
5240         break;
5241     case 12: /* Unordered or Less Than A || (N && !Z) */
5242     case 28: /* Not Greater than or Equal A || (N && !Z) */
5243         c->v1 = tcg_temp_new();
5244         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5245         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5246         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5247         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5248         c->tcond = TCG_COND_NE;
5249         break;
5250     case 13: /* Unordered or Less or Equal A || Z || N */
5251     case 29: /* Not Greater Than A || Z || N */
5252         c->v1 = tcg_temp_new();
5253         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5254         c->tcond = TCG_COND_NE;
5255         break;
5256     case 14: /* Not Equal !Z */
5257     case 30: /* Signaling Not Equal !Z */
5258         c->v1 = tcg_temp_new();
5259         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5260         c->tcond = TCG_COND_EQ;
5261         break;
5262     case 15: /* True */
5263     case 31: /* Signaling True */
5264         c->v1 = c->v2;
5265         c->tcond = TCG_COND_ALWAYS;
5266         break;
5267     }
5268 }
5269 
5270 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5271 {
5272     DisasCompare c;
5273 
5274     gen_fcc_cond(&c, s, cond);
5275     update_cc_op(s);
5276     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5277 }
5278 
5279 DISAS_INSN(fbcc)
5280 {
5281     uint32_t offset;
5282     uint32_t base;
5283     TCGLabel *l1;
5284 
5285     base = s->pc;
5286     offset = (int16_t)read_im16(env, s);
5287     if (insn & (1 << 6)) {
5288         offset = (offset << 16) | read_im16(env, s);
5289     }
5290 
5291     l1 = gen_new_label();
5292     update_cc_op(s);
5293     gen_fjmpcc(s, insn & 0x3f, l1);
5294     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5295     gen_set_label(l1);
5296     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5297 }
5298 
5299 DISAS_INSN(fscc)
5300 {
5301     DisasCompare c;
5302     int cond;
5303     TCGv tmp;
5304     uint16_t ext;
5305 
5306     ext = read_im16(env, s);
5307     cond = ext & 0x3f;
5308     gen_fcc_cond(&c, s, cond);
5309 
5310     tmp = tcg_temp_new();
5311     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
5312 
5313     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5314 }
5315 
5316 DISAS_INSN(ftrapcc)
5317 {
5318     DisasCompare c;
5319     uint16_t ext;
5320     int cond;
5321 
5322     ext = read_im16(env, s);
5323     cond = ext & 0x3f;
5324 
5325     /* Consume and discard the immediate operand. */
5326     switch (extract32(insn, 0, 3)) {
5327     case 2: /* ftrapcc.w */
5328         (void)read_im16(env, s);
5329         break;
5330     case 3: /* ftrapcc.l */
5331         (void)read_im32(env, s);
5332         break;
5333     case 4: /* ftrapcc (no operand) */
5334         break;
5335     default:
5336         /* ftrapcc registered with only valid opmodes */
5337         g_assert_not_reached();
5338     }
5339 
5340     gen_fcc_cond(&c, s, cond);
5341     do_trapcc(s, &c);
5342 }
5343 
5344 #if !defined(CONFIG_USER_ONLY)
5345 DISAS_INSN(frestore)
5346 {
5347     TCGv addr;
5348 
5349     if (IS_USER(s)) {
5350         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5351         return;
5352     }
5353     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5354         SRC_EA(env, addr, OS_LONG, 0, NULL);
5355         /* FIXME: check the state frame */
5356     } else {
5357         disas_undef(env, s, insn);
5358     }
5359 }
5360 
5361 DISAS_INSN(fsave)
5362 {
5363     if (IS_USER(s)) {
5364         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5365         return;
5366     }
5367 
5368     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5369         /* always write IDLE */
5370         TCGv idle = tcg_constant_i32(0x41000000);
5371         DEST_EA(env, insn, OS_LONG, idle, NULL);
5372     } else {
5373         disas_undef(env, s, insn);
5374     }
5375 }
5376 #endif
5377 
5378 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5379 {
5380     TCGv tmp = tcg_temp_new();
5381     if (s->env->macsr & MACSR_FI) {
5382         if (upper)
5383             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5384         else
5385             tcg_gen_shli_i32(tmp, val, 16);
5386     } else if (s->env->macsr & MACSR_SU) {
5387         if (upper)
5388             tcg_gen_sari_i32(tmp, val, 16);
5389         else
5390             tcg_gen_ext16s_i32(tmp, val);
5391     } else {
5392         if (upper)
5393             tcg_gen_shri_i32(tmp, val, 16);
5394         else
5395             tcg_gen_ext16u_i32(tmp, val);
5396     }
5397     return tmp;
5398 }
5399 
5400 static void gen_mac_clear_flags(void)
5401 {
5402     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5403                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5404 }
5405 
5406 DISAS_INSN(mac)
5407 {
5408     TCGv rx;
5409     TCGv ry;
5410     uint16_t ext;
5411     int acc;
5412     TCGv tmp;
5413     TCGv addr;
5414     TCGv loadval;
5415     int dual;
5416     TCGv saved_flags;
5417 
5418     if (!s->done_mac) {
5419         s->mactmp = tcg_temp_new_i64();
5420         s->done_mac = 1;
5421     }
5422 
5423     ext = read_im16(env, s);
5424 
5425     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5426     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5427     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5428         disas_undef(env, s, insn);
5429         return;
5430     }
5431     if (insn & 0x30) {
5432         /* MAC with load.  */
5433         tmp = gen_lea(env, s, insn, OS_LONG);
5434         addr = tcg_temp_new();
5435         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5436         /*
5437          * Load the value now to ensure correct exception behavior.
5438          * Perform writeback after reading the MAC inputs.
5439          */
5440         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5441 
5442         acc ^= 1;
5443         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5444         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5445     } else {
5446         loadval = addr = NULL_QREG;
5447         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5448         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5449     }
5450 
5451     gen_mac_clear_flags();
5452 #if 0
5453     l1 = -1;
5454     /* Disabled because conditional branches clobber temporary vars.  */
5455     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5456         /* Skip the multiply if we know we will ignore it.  */
5457         l1 = gen_new_label();
5458         tmp = tcg_temp_new();
5459         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5460         gen_op_jmp_nz32(tmp, l1);
5461     }
5462 #endif
5463 
5464     if ((ext & 0x0800) == 0) {
5465         /* Word.  */
5466         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5467         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5468     }
5469     if (s->env->macsr & MACSR_FI) {
5470         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5471     } else {
5472         if (s->env->macsr & MACSR_SU)
5473             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5474         else
5475             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5476         switch ((ext >> 9) & 3) {
5477         case 1:
5478             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5479             break;
5480         case 3:
5481             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5482             break;
5483         }
5484     }
5485 
5486     if (dual) {
5487         /* Save the overflow flag from the multiply.  */
5488         saved_flags = tcg_temp_new();
5489         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5490     } else {
5491         saved_flags = NULL_QREG;
5492     }
5493 
5494 #if 0
5495     /* Disabled because conditional branches clobber temporary vars.  */
5496     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5497         /* Skip the accumulate if the value is already saturated.  */
5498         l1 = gen_new_label();
5499         tmp = tcg_temp_new();
5500         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5501         gen_op_jmp_nz32(tmp, l1);
5502     }
5503 #endif
5504 
5505     if (insn & 0x100)
5506         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5507     else
5508         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5509 
5510     if (s->env->macsr & MACSR_FI)
5511         gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5512     else if (s->env->macsr & MACSR_SU)
5513         gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5514     else
5515         gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5516 
5517 #if 0
5518     /* Disabled because conditional branches clobber temporary vars.  */
5519     if (l1 != -1)
5520         gen_set_label(l1);
5521 #endif
5522 
5523     if (dual) {
5524         /* Dual accumulate variant.  */
5525         acc = (ext >> 2) & 3;
5526         /* Restore the overflow flag from the multiplier.  */
5527         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5528 #if 0
5529         /* Disabled because conditional branches clobber temporary vars.  */
5530         if ((s->env->macsr & MACSR_OMC) != 0) {
5531             /* Skip the accumulate if the value is already saturated.  */
5532             l1 = gen_new_label();
5533             tmp = tcg_temp_new();
5534             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5535             gen_op_jmp_nz32(tmp, l1);
5536         }
5537 #endif
5538         if (ext & 2)
5539             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5540         else
5541             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5542         if (s->env->macsr & MACSR_FI)
5543             gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5544         else if (s->env->macsr & MACSR_SU)
5545             gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5546         else
5547             gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5548 #if 0
5549         /* Disabled because conditional branches clobber temporary vars.  */
5550         if (l1 != -1)
5551             gen_set_label(l1);
5552 #endif
5553     }
5554     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5555 
5556     if (insn & 0x30) {
5557         TCGv rw;
5558         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5559         tcg_gen_mov_i32(rw, loadval);
5560         /*
5561          * FIXME: Should address writeback happen with the masked or
5562          * unmasked value?
5563          */
5564         switch ((insn >> 3) & 7) {
5565         case 3: /* Post-increment.  */
5566             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5567             break;
5568         case 4: /* Pre-decrement.  */
5569             tcg_gen_mov_i32(AREG(insn, 0), addr);
5570         }
5571     }
5572 }
5573 
5574 DISAS_INSN(from_mac)
5575 {
5576     TCGv rx;
5577     TCGv_i64 acc;
5578     int accnum;
5579 
5580     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5581     accnum = (insn >> 9) & 3;
5582     acc = MACREG(accnum);
5583     if (s->env->macsr & MACSR_FI) {
5584         gen_helper_get_macf(rx, cpu_env, acc);
5585     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5586         tcg_gen_extrl_i64_i32(rx, acc);
5587     } else if (s->env->macsr & MACSR_SU) {
5588         gen_helper_get_macs(rx, acc);
5589     } else {
5590         gen_helper_get_macu(rx, acc);
5591     }
5592     if (insn & 0x40) {
5593         tcg_gen_movi_i64(acc, 0);
5594         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5595     }
5596 }
5597 
5598 DISAS_INSN(move_mac)
5599 {
5600     /* FIXME: This can be done without a helper.  */
5601     int src;
5602     TCGv dest;
5603     src = insn & 3;
5604     dest = tcg_constant_i32((insn >> 9) & 3);
5605     gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5606     gen_mac_clear_flags();
5607     gen_helper_mac_set_flags(cpu_env, dest);
5608 }
5609 
5610 DISAS_INSN(from_macsr)
5611 {
5612     TCGv reg;
5613 
5614     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5615     tcg_gen_mov_i32(reg, QREG_MACSR);
5616 }
5617 
5618 DISAS_INSN(from_mask)
5619 {
5620     TCGv reg;
5621     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5622     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5623 }
5624 
5625 DISAS_INSN(from_mext)
5626 {
5627     TCGv reg;
5628     TCGv acc;
5629     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5630     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5631     if (s->env->macsr & MACSR_FI)
5632         gen_helper_get_mac_extf(reg, cpu_env, acc);
5633     else
5634         gen_helper_get_mac_exti(reg, cpu_env, acc);
5635 }
5636 
5637 DISAS_INSN(macsr_to_ccr)
5638 {
5639     TCGv tmp = tcg_temp_new();
5640 
5641     /* Note that X and C are always cleared. */
5642     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5643     gen_helper_set_ccr(cpu_env, tmp);
5644     set_cc_op(s, CC_OP_FLAGS);
5645 }
5646 
5647 DISAS_INSN(to_mac)
5648 {
5649     TCGv_i64 acc;
5650     TCGv val;
5651     int accnum;
5652     accnum = (insn >> 9) & 3;
5653     acc = MACREG(accnum);
5654     SRC_EA(env, val, OS_LONG, 0, NULL);
5655     if (s->env->macsr & MACSR_FI) {
5656         tcg_gen_ext_i32_i64(acc, val);
5657         tcg_gen_shli_i64(acc, acc, 8);
5658     } else if (s->env->macsr & MACSR_SU) {
5659         tcg_gen_ext_i32_i64(acc, val);
5660     } else {
5661         tcg_gen_extu_i32_i64(acc, val);
5662     }
5663     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5664     gen_mac_clear_flags();
5665     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5666 }
5667 
5668 DISAS_INSN(to_macsr)
5669 {
5670     TCGv val;
5671     SRC_EA(env, val, OS_LONG, 0, NULL);
5672     gen_helper_set_macsr(cpu_env, val);
5673     gen_exit_tb(s);
5674 }
5675 
5676 DISAS_INSN(to_mask)
5677 {
5678     TCGv val;
5679     SRC_EA(env, val, OS_LONG, 0, NULL);
5680     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5681 }
5682 
5683 DISAS_INSN(to_mext)
5684 {
5685     TCGv val;
5686     TCGv acc;
5687     SRC_EA(env, val, OS_LONG, 0, NULL);
5688     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5689     if (s->env->macsr & MACSR_FI)
5690         gen_helper_set_mac_extf(cpu_env, val, acc);
5691     else if (s->env->macsr & MACSR_SU)
5692         gen_helper_set_mac_exts(cpu_env, val, acc);
5693     else
5694         gen_helper_set_mac_extu(cpu_env, val, acc);
5695 }
5696 
5697 static disas_proc opcode_table[65536];
5698 
5699 static void
5700 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5701 {
5702   int i;
5703   int from;
5704   int to;
5705 
5706   /* Sanity check.  All set bits must be included in the mask.  */
5707   if (opcode & ~mask) {
5708       fprintf(stderr,
5709               "qemu internal error: bogus opcode definition %04x/%04x\n",
5710               opcode, mask);
5711       abort();
5712   }
5713   /*
5714    * This could probably be cleverer.  For now just optimize the case where
5715    * the top bits are known.
5716    */
5717   /* Find the first zero bit in the mask.  */
5718   i = 0x8000;
5719   while ((i & mask) != 0)
5720       i >>= 1;
5721   /* Iterate over all combinations of this and lower bits.  */
5722   if (i == 0)
5723       i = 1;
5724   else
5725       i <<= 1;
5726   from = opcode & ~(i - 1);
5727   to = from + i;
5728   for (i = from; i < to; i++) {
5729       if ((i & mask) == opcode)
5730           opcode_table[i] = proc;
5731   }
5732 }
5733 
5734 /*
5735  * Register m68k opcode handlers.  Order is important.
5736  * Later insn override earlier ones.
5737  */
5738 void register_m68k_insns (CPUM68KState *env)
5739 {
5740     /*
5741      * Build the opcode table only once to avoid
5742      * multithreading issues.
5743      */
5744     if (opcode_table[0] != NULL) {
5745         return;
5746     }
5747 
5748     /*
5749      * use BASE() for instruction available
5750      * for CF_ISA_A and M68000.
5751      */
5752 #define BASE(name, opcode, mask) \
5753     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5754 #define INSN(name, opcode, mask, feature) do { \
5755     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5756         BASE(name, opcode, mask); \
5757     } while(0)
5758     BASE(undef,     0000, 0000);
5759     INSN(arith_im,  0080, fff8, CF_ISA_A);
5760     INSN(arith_im,  0000, ff00, M68K);
5761     INSN(chk2,      00c0, f9c0, CHK2);
5762     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5763     BASE(bitop_reg, 0100, f1c0);
5764     BASE(bitop_reg, 0140, f1c0);
5765     BASE(bitop_reg, 0180, f1c0);
5766     BASE(bitop_reg, 01c0, f1c0);
5767     INSN(movep,     0108, f138, MOVEP);
5768     INSN(arith_im,  0280, fff8, CF_ISA_A);
5769     INSN(arith_im,  0200, ff00, M68K);
5770     INSN(undef,     02c0, ffc0, M68K);
5771     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5772     INSN(arith_im,  0480, fff8, CF_ISA_A);
5773     INSN(arith_im,  0400, ff00, M68K);
5774     INSN(undef,     04c0, ffc0, M68K);
5775     INSN(arith_im,  0600, ff00, M68K);
5776     INSN(undef,     06c0, ffc0, M68K);
5777     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5778     INSN(arith_im,  0680, fff8, CF_ISA_A);
5779     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5780     INSN(arith_im,  0c00, ff00, M68K);
5781     BASE(bitop_im,  0800, ffc0);
5782     BASE(bitop_im,  0840, ffc0);
5783     BASE(bitop_im,  0880, ffc0);
5784     BASE(bitop_im,  08c0, ffc0);
5785     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5786     INSN(arith_im,  0a00, ff00, M68K);
5787 #if !defined(CONFIG_USER_ONLY)
5788     INSN(moves,     0e00, ff00, M68K);
5789 #endif
5790     INSN(cas,       0ac0, ffc0, CAS);
5791     INSN(cas,       0cc0, ffc0, CAS);
5792     INSN(cas,       0ec0, ffc0, CAS);
5793     INSN(cas2w,     0cfc, ffff, CAS);
5794     INSN(cas2l,     0efc, ffff, CAS);
5795     BASE(move,      1000, f000);
5796     BASE(move,      2000, f000);
5797     BASE(move,      3000, f000);
5798     INSN(chk,       4000, f040, M68K);
5799     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5800     INSN(negx,      4080, fff8, CF_ISA_A);
5801     INSN(negx,      4000, ff00, M68K);
5802     INSN(undef,     40c0, ffc0, M68K);
5803     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5804     INSN(move_from_sr, 40c0, ffc0, M68K);
5805     BASE(lea,       41c0, f1c0);
5806     BASE(clr,       4200, ff00);
5807     BASE(undef,     42c0, ffc0);
5808     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5809     INSN(move_from_ccr, 42c0, ffc0, M68K);
5810     INSN(neg,       4480, fff8, CF_ISA_A);
5811     INSN(neg,       4400, ff00, M68K);
5812     INSN(undef,     44c0, ffc0, M68K);
5813     BASE(move_to_ccr, 44c0, ffc0);
5814     INSN(not,       4680, fff8, CF_ISA_A);
5815     INSN(not,       4600, ff00, M68K);
5816 #if !defined(CONFIG_USER_ONLY)
5817     BASE(move_to_sr, 46c0, ffc0);
5818 #endif
5819     INSN(nbcd,      4800, ffc0, M68K);
5820     INSN(linkl,     4808, fff8, M68K);
5821     BASE(pea,       4840, ffc0);
5822     BASE(swap,      4840, fff8);
5823     INSN(bkpt,      4848, fff8, BKPT);
5824     INSN(movem,     48d0, fbf8, CF_ISA_A);
5825     INSN(movem,     48e8, fbf8, CF_ISA_A);
5826     INSN(movem,     4880, fb80, M68K);
5827     BASE(ext,       4880, fff8);
5828     BASE(ext,       48c0, fff8);
5829     BASE(ext,       49c0, fff8);
5830     BASE(tst,       4a00, ff00);
5831     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5832     INSN(tas,       4ac0, ffc0, M68K);
5833 #if !defined(CONFIG_USER_ONLY)
5834     INSN(halt,      4ac8, ffff, CF_ISA_A);
5835     INSN(halt,      4ac8, ffff, M68K);
5836 #endif
5837     INSN(pulse,     4acc, ffff, CF_ISA_A);
5838     BASE(illegal,   4afc, ffff);
5839     INSN(mull,      4c00, ffc0, CF_ISA_A);
5840     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5841     INSN(divl,      4c40, ffc0, CF_ISA_A);
5842     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5843     INSN(sats,      4c80, fff8, CF_ISA_B);
5844     BASE(trap,      4e40, fff0);
5845     BASE(link,      4e50, fff8);
5846     BASE(unlk,      4e58, fff8);
5847 #if !defined(CONFIG_USER_ONLY)
5848     INSN(move_to_usp, 4e60, fff8, USP);
5849     INSN(move_from_usp, 4e68, fff8, USP);
5850     INSN(reset,     4e70, ffff, M68K);
5851     BASE(stop,      4e72, ffff);
5852     BASE(rte,       4e73, ffff);
5853     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5854     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5855 #endif
5856     BASE(nop,       4e71, ffff);
5857     INSN(rtd,       4e74, ffff, RTD);
5858     BASE(rts,       4e75, ffff);
5859     INSN(trapv,     4e76, ffff, M68K);
5860     INSN(rtr,       4e77, ffff, M68K);
5861     BASE(jump,      4e80, ffc0);
5862     BASE(jump,      4ec0, ffc0);
5863     INSN(addsubq,   5000, f080, M68K);
5864     BASE(addsubq,   5080, f0c0);
5865     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5866     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5867     INSN(dbcc,      50c8, f0f8, M68K);
5868     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5869     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5870     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5871     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5872 
5873     /* Branch instructions.  */
5874     BASE(branch,    6000, f000);
5875     /* Disable long branch instructions, then add back the ones we want.  */
5876     BASE(undef,     60ff, f0ff); /* All long branches.  */
5877     INSN(branch,    60ff, f0ff, CF_ISA_B);
5878     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5879     INSN(branch,    60ff, ffff, BRAL);
5880     INSN(branch,    60ff, f0ff, BCCL);
5881 
5882     BASE(moveq,     7000, f100);
5883     INSN(mvzs,      7100, f100, CF_ISA_B);
5884     BASE(or,        8000, f000);
5885     BASE(divw,      80c0, f0c0);
5886     INSN(sbcd_reg,  8100, f1f8, M68K);
5887     INSN(sbcd_mem,  8108, f1f8, M68K);
5888     BASE(addsub,    9000, f000);
5889     INSN(undef,     90c0, f0c0, CF_ISA_A);
5890     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5891     INSN(subx_reg,  9100, f138, M68K);
5892     INSN(subx_mem,  9108, f138, M68K);
5893     INSN(suba,      91c0, f1c0, CF_ISA_A);
5894     INSN(suba,      90c0, f0c0, M68K);
5895 
5896     BASE(undef_mac, a000, f000);
5897     INSN(mac,       a000, f100, CF_EMAC);
5898     INSN(from_mac,  a180, f9b0, CF_EMAC);
5899     INSN(move_mac,  a110, f9fc, CF_EMAC);
5900     INSN(from_macsr,a980, f9f0, CF_EMAC);
5901     INSN(from_mask, ad80, fff0, CF_EMAC);
5902     INSN(from_mext, ab80, fbf0, CF_EMAC);
5903     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5904     INSN(to_mac,    a100, f9c0, CF_EMAC);
5905     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5906     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5907     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5908 
5909     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5910     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5911     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5912     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5913     INSN(cmp,       b080, f1c0, CF_ISA_A);
5914     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5915     INSN(cmp,       b000, f100, M68K);
5916     INSN(eor,       b100, f100, M68K);
5917     INSN(cmpm,      b108, f138, M68K);
5918     INSN(cmpa,      b0c0, f0c0, M68K);
5919     INSN(eor,       b180, f1c0, CF_ISA_A);
5920     BASE(and,       c000, f000);
5921     INSN(exg_dd,    c140, f1f8, M68K);
5922     INSN(exg_aa,    c148, f1f8, M68K);
5923     INSN(exg_da,    c188, f1f8, M68K);
5924     BASE(mulw,      c0c0, f0c0);
5925     INSN(abcd_reg,  c100, f1f8, M68K);
5926     INSN(abcd_mem,  c108, f1f8, M68K);
5927     BASE(addsub,    d000, f000);
5928     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5929     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5930     INSN(addx_reg,  d100, f138, M68K);
5931     INSN(addx_mem,  d108, f138, M68K);
5932     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5933     INSN(adda,      d0c0, f0c0, M68K);
5934     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5935     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5936     INSN(shift8_im, e000, f0f0, M68K);
5937     INSN(shift16_im, e040, f0f0, M68K);
5938     INSN(shift_im,  e080, f0f0, M68K);
5939     INSN(shift8_reg, e020, f0f0, M68K);
5940     INSN(shift16_reg, e060, f0f0, M68K);
5941     INSN(shift_reg, e0a0, f0f0, M68K);
5942     INSN(shift_mem, e0c0, fcc0, M68K);
5943     INSN(rotate_im, e090, f0f0, M68K);
5944     INSN(rotate8_im, e010, f0f0, M68K);
5945     INSN(rotate16_im, e050, f0f0, M68K);
5946     INSN(rotate_reg, e0b0, f0f0, M68K);
5947     INSN(rotate8_reg, e030, f0f0, M68K);
5948     INSN(rotate16_reg, e070, f0f0, M68K);
5949     INSN(rotate_mem, e4c0, fcc0, M68K);
5950     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5951     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5952     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5953     INSN(bfins_reg, efc0, fff8, BITFIELD);
5954     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5955     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5956     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5957     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5958     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5959     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5960     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5961     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5962     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5963     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5964     BASE(undef_fpu, f000, f000);
5965     INSN(fpu,       f200, ffc0, CF_FPU);
5966     INSN(fbcc,      f280, ffc0, CF_FPU);
5967     INSN(fpu,       f200, ffc0, FPU);
5968     INSN(fscc,      f240, ffc0, FPU);
5969     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5970     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5971     INSN(fbcc,      f280, ff80, FPU);
5972 #if !defined(CONFIG_USER_ONLY)
5973     INSN(frestore,  f340, ffc0, CF_FPU);
5974     INSN(fsave,     f300, ffc0, CF_FPU);
5975     INSN(frestore,  f340, ffc0, FPU);
5976     INSN(fsave,     f300, ffc0, FPU);
5977     INSN(intouch,   f340, ffc0, CF_ISA_A);
5978     INSN(cpushl,    f428, ff38, CF_ISA_A);
5979     INSN(cpush,     f420, ff20, M68040);
5980     INSN(cinv,      f400, ff20, M68040);
5981     INSN(pflush,    f500, ffe0, M68040);
5982     INSN(ptest,     f548, ffd8, M68040);
5983     INSN(wddata,    fb00, ff00, CF_ISA_A);
5984     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5985 #endif
5986     INSN(move16_mem, f600, ffe0, M68040);
5987     INSN(move16_reg, f620, fff8, M68040);
5988 #undef INSN
5989 }
5990 
5991 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
5992 {
5993     DisasContext *dc = container_of(dcbase, DisasContext, base);
5994     CPUM68KState *env = cpu->env_ptr;
5995 
5996     dc->env = env;
5997     dc->pc = dc->base.pc_first;
5998     /* This value will always be filled in properly before m68k_tr_tb_stop. */
5999     dc->pc_prev = 0xdeadbeef;
6000     dc->cc_op = CC_OP_DYNAMIC;
6001     dc->cc_op_synced = 1;
6002     dc->done_mac = 0;
6003     dc->writeback_mask = 0;
6004 
6005     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6006     /* If architectural single step active, limit to 1 */
6007     if (dc->ss_active) {
6008         dc->base.max_insns = 1;
6009     }
6010 }
6011 
6012 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6013 {
6014 }
6015 
6016 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6017 {
6018     DisasContext *dc = container_of(dcbase, DisasContext, base);
6019     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6020 }
6021 
6022 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6023 {
6024     DisasContext *dc = container_of(dcbase, DisasContext, base);
6025     CPUM68KState *env = cpu->env_ptr;
6026     uint16_t insn = read_im16(env, dc);
6027 
6028     opcode_table[insn](env, dc, insn);
6029     do_writebacks(dc);
6030 
6031     dc->pc_prev = dc->base.pc_next;
6032     dc->base.pc_next = dc->pc;
6033 
6034     if (dc->base.is_jmp == DISAS_NEXT) {
6035         /*
6036          * Stop translation when the next insn might touch a new page.
6037          * This ensures that prefetch aborts at the right place.
6038          *
6039          * We cannot determine the size of the next insn without
6040          * completely decoding it.  However, the maximum insn size
6041          * is 32 bytes, so end if we do not have that much remaining.
6042          * This may produce several small TBs at the end of each page,
6043          * but they will all be linked with goto_tb.
6044          *
6045          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6046          * smaller than MC68020's.
6047          */
6048         target_ulong start_page_offset
6049             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6050 
6051         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6052             dc->base.is_jmp = DISAS_TOO_MANY;
6053         }
6054     }
6055 }
6056 
6057 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6058 {
6059     DisasContext *dc = container_of(dcbase, DisasContext, base);
6060 
6061     switch (dc->base.is_jmp) {
6062     case DISAS_NORETURN:
6063         break;
6064     case DISAS_TOO_MANY:
6065         update_cc_op(dc);
6066         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6067         break;
6068     case DISAS_JUMP:
6069         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6070         if (dc->ss_active) {
6071             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6072         } else {
6073             tcg_gen_lookup_and_goto_ptr();
6074         }
6075         break;
6076     case DISAS_EXIT:
6077         /*
6078          * We updated CC_OP and PC in gen_exit_tb, but also modified
6079          * other state that may require returning to the main loop.
6080          */
6081         if (dc->ss_active) {
6082             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6083         } else {
6084             tcg_gen_exit_tb(NULL, 0);
6085         }
6086         break;
6087     default:
6088         g_assert_not_reached();
6089     }
6090 }
6091 
6092 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6093                               CPUState *cpu, FILE *logfile)
6094 {
6095     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6096     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6097 }
6098 
6099 static const TranslatorOps m68k_tr_ops = {
6100     .init_disas_context = m68k_tr_init_disas_context,
6101     .tb_start           = m68k_tr_tb_start,
6102     .insn_start         = m68k_tr_insn_start,
6103     .translate_insn     = m68k_tr_translate_insn,
6104     .tb_stop            = m68k_tr_tb_stop,
6105     .disas_log          = m68k_tr_disas_log,
6106 };
6107 
6108 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6109                            target_ulong pc, void *host_pc)
6110 {
6111     DisasContext dc;
6112     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6113 }
6114 
6115 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6116 {
6117     floatx80 a = { .high = high, .low = low };
6118     union {
6119         float64 f64;
6120         double d;
6121     } u;
6122 
6123     u.f64 = floatx80_to_float64(a, &env->fp_status);
6124     return u.d;
6125 }
6126 
6127 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6128 {
6129     M68kCPU *cpu = M68K_CPU(cs);
6130     CPUM68KState *env = &cpu->env;
6131     int i;
6132     uint16_t sr;
6133     for (i = 0; i < 8; i++) {
6134         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6135                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6136                      i, env->dregs[i], i, env->aregs[i],
6137                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6138                      floatx80_to_double(env, env->fregs[i].l.upper,
6139                                         env->fregs[i].l.lower));
6140     }
6141     qemu_fprintf(f, "PC = %08x   ", env->pc);
6142     sr = env->sr | cpu_m68k_get_ccr(env);
6143     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6144                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6145                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6146                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6147                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6148                  (sr & CCF_C) ? 'C' : '-');
6149     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6150                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6151                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6152                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6153                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6154     qemu_fprintf(f, "\n                                "
6155                  "FPCR =     %04x ", env->fpcr);
6156     switch (env->fpcr & FPCR_PREC_MASK) {
6157     case FPCR_PREC_X:
6158         qemu_fprintf(f, "X ");
6159         break;
6160     case FPCR_PREC_S:
6161         qemu_fprintf(f, "S ");
6162         break;
6163     case FPCR_PREC_D:
6164         qemu_fprintf(f, "D ");
6165         break;
6166     }
6167     switch (env->fpcr & FPCR_RND_MASK) {
6168     case FPCR_RND_N:
6169         qemu_fprintf(f, "RN ");
6170         break;
6171     case FPCR_RND_Z:
6172         qemu_fprintf(f, "RZ ");
6173         break;
6174     case FPCR_RND_M:
6175         qemu_fprintf(f, "RM ");
6176         break;
6177     case FPCR_RND_P:
6178         qemu_fprintf(f, "RP ");
6179         break;
6180     }
6181     qemu_fprintf(f, "\n");
6182 #ifndef CONFIG_USER_ONLY
6183     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6184                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6185                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6186                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6187     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6188     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6189     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6190                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6191     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6192                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6193                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6194     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6195                  env->mmu.mmusr, env->mmu.ar);
6196 #endif /* !CONFIG_USER_ONLY */
6197 }
6198