xref: /openbmc/qemu/target/m68k/translate.c (revision 83ecdb18)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/translator.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 
34 #include "exec/log.h"
35 #include "fpu/softfloat.h"
36 
37 
38 //#define DEBUG_DISPATCH 1
39 
40 #define DEFO32(name, offset) static TCGv QREG_##name;
41 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
42 #include "qregs.h.inc"
43 #undef DEFO32
44 #undef DEFO64
45 
46 static TCGv_i32 cpu_halted;
47 static TCGv_i32 cpu_exception_index;
48 
49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
50 static TCGv cpu_dregs[8];
51 static TCGv cpu_aregs[8];
52 static TCGv_i64 cpu_macc[4];
53 
54 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
56 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
57 #define MACREG(acc)     cpu_macc[acc]
58 #define QREG_SP         get_areg(s, 7)
59 
60 static TCGv NULL_QREG;
61 #define IS_NULL_QREG(t) (t == NULL_QREG)
62 /* Used to distinguish stores from bad addressing modes.  */
63 static TCGv store_dummy;
64 
65 #include "exec/gen-icount.h"
66 
67 void m68k_tcg_init(void)
68 {
69     char *p;
70     int i;
71 
72 #define DEFO32(name, offset) \
73     QREG_##name = tcg_global_mem_new_i32(cpu_env, \
74         offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76     QREG_##name = tcg_global_mem_new_i64(cpu_env, \
77         offsetof(CPUM68KState, offset), #name);
78 #include "qregs.h.inc"
79 #undef DEFO32
80 #undef DEFO64
81 
82     cpu_halted = tcg_global_mem_new_i32(cpu_env,
83                                         -offsetof(M68kCPU, env) +
84                                         offsetof(CPUState, halted), "HALTED");
85     cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
86                                                  -offsetof(M68kCPU, env) +
87                                                  offsetof(CPUState, exception_index),
88                                                  "EXCEPTION");
89 
90     p = cpu_reg_names;
91     for (i = 0; i < 8; i++) {
92         sprintf(p, "D%d", i);
93         cpu_dregs[i] = tcg_global_mem_new(cpu_env,
94                                           offsetof(CPUM68KState, dregs[i]), p);
95         p += 3;
96         sprintf(p, "A%d", i);
97         cpu_aregs[i] = tcg_global_mem_new(cpu_env,
98                                           offsetof(CPUM68KState, aregs[i]), p);
99         p += 3;
100     }
101     for (i = 0; i < 4; i++) {
102         sprintf(p, "ACC%d", i);
103         cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
104                                          offsetof(CPUM68KState, macc[i]), p);
105         p += 5;
106     }
107 
108     NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
109     store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
110 }
111 
112 /* internal defines */
113 typedef struct DisasContext {
114     DisasContextBase base;
115     CPUM68KState *env;
116     target_ulong pc;
117     target_ulong pc_prev;
118     CCOp cc_op; /* Current CC operation */
119     int cc_op_synced;
120     TCGv_i64 mactmp;
121     int done_mac;
122     int writeback_mask;
123     TCGv writeback[8];
124     bool ss_active;
125 } DisasContext;
126 
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129     if (s->writeback_mask & (1 << regno)) {
130         return s->writeback[regno];
131     } else {
132         return cpu_aregs[regno];
133     }
134 }
135 
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137                            TCGv val, bool give_temp)
138 {
139     if (s->writeback_mask & (1 << regno)) {
140         if (give_temp) {
141             s->writeback[regno] = val;
142         } else {
143             tcg_gen_mov_i32(s->writeback[regno], val);
144         }
145     } else {
146         s->writeback_mask |= 1 << regno;
147         if (give_temp) {
148             s->writeback[regno] = val;
149         } else {
150             TCGv tmp = tcg_temp_new();
151             s->writeback[regno] = tmp;
152             tcg_gen_mov_i32(tmp, val);
153         }
154     }
155 }
156 
157 static void do_writebacks(DisasContext *s)
158 {
159     unsigned mask = s->writeback_mask;
160     if (mask) {
161         s->writeback_mask = 0;
162         do {
163             unsigned regno = ctz32(mask);
164             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165             mask &= mask - 1;
166         } while (mask);
167     }
168 }
169 
170 /* is_jmp field values */
171 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
172 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
173 
174 #if defined(CONFIG_USER_ONLY)
175 #define IS_USER(s) 1
176 #else
177 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
178 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
179                       MMU_KERNEL_IDX : MMU_USER_IDX)
180 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
181                       MMU_KERNEL_IDX : MMU_USER_IDX)
182 #endif
183 
184 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
185 
186 #ifdef DEBUG_DISPATCH
187 #define DISAS_INSN(name)                                                \
188     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
189                                   uint16_t insn);                       \
190     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
191                              uint16_t insn)                             \
192     {                                                                   \
193         qemu_log("Dispatch " #name "\n");                               \
194         real_disas_##name(env, s, insn);                                \
195     }                                                                   \
196     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
197                                   uint16_t insn)
198 #else
199 #define DISAS_INSN(name)                                                \
200     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
201                              uint16_t insn)
202 #endif
203 
204 static const uint8_t cc_op_live[CC_OP_NB] = {
205     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
208     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_LOGIC] = CCF_X | CCF_N
211 };
212 
213 static void set_cc_op(DisasContext *s, CCOp op)
214 {
215     CCOp old_op = s->cc_op;
216     int dead;
217 
218     if (old_op == op) {
219         return;
220     }
221     s->cc_op = op;
222     s->cc_op_synced = 0;
223 
224     /*
225      * Discard CC computation that will no longer be used.
226      * Note that X and N are never dead.
227      */
228     dead = cc_op_live[old_op] & ~cc_op_live[op];
229     if (dead & CCF_C) {
230         tcg_gen_discard_i32(QREG_CC_C);
231     }
232     if (dead & CCF_Z) {
233         tcg_gen_discard_i32(QREG_CC_Z);
234     }
235     if (dead & CCF_V) {
236         tcg_gen_discard_i32(QREG_CC_V);
237     }
238 }
239 
240 /* Update the CPU env CC_OP state.  */
241 static void update_cc_op(DisasContext *s)
242 {
243     if (!s->cc_op_synced) {
244         s->cc_op_synced = 1;
245         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
246     }
247 }
248 
249 /* Generate a jump to an immediate address.  */
250 static void gen_jmp_im(DisasContext *s, uint32_t dest)
251 {
252     update_cc_op(s);
253     tcg_gen_movi_i32(QREG_PC, dest);
254     s->base.is_jmp = DISAS_JUMP;
255 }
256 
257 /* Generate a jump to the address in qreg DEST.  */
258 static void gen_jmp(DisasContext *s, TCGv dest)
259 {
260     update_cc_op(s);
261     tcg_gen_mov_i32(QREG_PC, dest);
262     s->base.is_jmp = DISAS_JUMP;
263 }
264 
265 static void gen_raise_exception(int nr)
266 {
267     gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
268 }
269 
270 static void gen_raise_exception_format2(DisasContext *s, int nr,
271                                         target_ulong this_pc)
272 {
273     /*
274      * Pass the address of the insn to the exception handler,
275      * for recording in the Format $2 (6-word) stack frame.
276      * Re-use mmu.ar for the purpose, since that's only valid
277      * after tlb_fill.
278      */
279     tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
280                    offsetof(CPUM68KState, mmu.ar));
281     gen_raise_exception(nr);
282     s->base.is_jmp = DISAS_NORETURN;
283 }
284 
285 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
286 {
287     update_cc_op(s);
288     tcg_gen_movi_i32(QREG_PC, dest);
289 
290     gen_raise_exception(nr);
291 
292     s->base.is_jmp = DISAS_NORETURN;
293 }
294 
295 static inline void gen_addr_fault(DisasContext *s)
296 {
297     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
298 }
299 
300 /*
301  * Generate a load from the specified address.  Narrow values are
302  *  sign extended to full register width.
303  */
304 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
305                             int sign, int index)
306 {
307     TCGv tmp = tcg_temp_new_i32();
308 
309     switch (opsize) {
310     case OS_BYTE:
311     case OS_WORD:
312     case OS_LONG:
313         tcg_gen_qemu_ld_tl(tmp, addr, index,
314                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
315         break;
316     default:
317         g_assert_not_reached();
318     }
319     return tmp;
320 }
321 
322 /* Generate a store.  */
323 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
324                              int index)
325 {
326     switch (opsize) {
327     case OS_BYTE:
328     case OS_WORD:
329     case OS_LONG:
330         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
331         break;
332     default:
333         g_assert_not_reached();
334     }
335 }
336 
337 typedef enum {
338     EA_STORE,
339     EA_LOADU,
340     EA_LOADS
341 } ea_what;
342 
343 /*
344  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
345  * otherwise generate a store.
346  */
347 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
348                      ea_what what, int index)
349 {
350     if (what == EA_STORE) {
351         gen_store(s, opsize, addr, val, index);
352         return store_dummy;
353     } else {
354         return gen_load(s, opsize, addr, what == EA_LOADS, index);
355     }
356 }
357 
358 /* Read a 16-bit immediate constant */
359 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
360 {
361     uint16_t im;
362     im = translator_lduw(env, &s->base, s->pc);
363     s->pc += 2;
364     return im;
365 }
366 
367 /* Read an 8-bit immediate constant */
368 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
369 {
370     return read_im16(env, s);
371 }
372 
373 /* Read a 32-bit immediate constant.  */
374 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
375 {
376     uint32_t im;
377     im = read_im16(env, s) << 16;
378     im |= 0xffff & read_im16(env, s);
379     return im;
380 }
381 
382 /* Read a 64-bit immediate constant.  */
383 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
384 {
385     uint64_t im;
386     im = (uint64_t)read_im32(env, s) << 32;
387     im |= (uint64_t)read_im32(env, s);
388     return im;
389 }
390 
391 /* Calculate and address index.  */
392 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
393 {
394     TCGv add;
395     int scale;
396 
397     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
398     if ((ext & 0x800) == 0) {
399         tcg_gen_ext16s_i32(tmp, add);
400         add = tmp;
401     }
402     scale = (ext >> 9) & 3;
403     if (scale != 0) {
404         tcg_gen_shli_i32(tmp, add, scale);
405         add = tmp;
406     }
407     return add;
408 }
409 
410 /*
411  * Handle a base + index + displacement effective address.
412  * A NULL_QREG base means pc-relative.
413  */
414 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
415 {
416     uint32_t offset;
417     uint16_t ext;
418     TCGv add;
419     TCGv tmp;
420     uint32_t bd, od;
421 
422     offset = s->pc;
423     ext = read_im16(env, s);
424 
425     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
426         return NULL_QREG;
427 
428     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
429         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
430         ext &= ~(3 << 9);
431     }
432 
433     if (ext & 0x100) {
434         /* full extension word format */
435         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
436             return NULL_QREG;
437 
438         if ((ext & 0x30) > 0x10) {
439             /* base displacement */
440             if ((ext & 0x30) == 0x20) {
441                 bd = (int16_t)read_im16(env, s);
442             } else {
443                 bd = read_im32(env, s);
444             }
445         } else {
446             bd = 0;
447         }
448         tmp = tcg_temp_new();
449         if ((ext & 0x44) == 0) {
450             /* pre-index */
451             add = gen_addr_index(s, ext, tmp);
452         } else {
453             add = NULL_QREG;
454         }
455         if ((ext & 0x80) == 0) {
456             /* base not suppressed */
457             if (IS_NULL_QREG(base)) {
458                 base = tcg_constant_i32(offset + bd);
459                 bd = 0;
460             }
461             if (!IS_NULL_QREG(add)) {
462                 tcg_gen_add_i32(tmp, add, base);
463                 add = tmp;
464             } else {
465                 add = base;
466             }
467         }
468         if (!IS_NULL_QREG(add)) {
469             if (bd != 0) {
470                 tcg_gen_addi_i32(tmp, add, bd);
471                 add = tmp;
472             }
473         } else {
474             add = tcg_constant_i32(bd);
475         }
476         if ((ext & 3) != 0) {
477             /* memory indirect */
478             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
479             if ((ext & 0x44) == 4) {
480                 add = gen_addr_index(s, ext, tmp);
481                 tcg_gen_add_i32(tmp, add, base);
482                 add = tmp;
483             } else {
484                 add = base;
485             }
486             if ((ext & 3) > 1) {
487                 /* outer displacement */
488                 if ((ext & 3) == 2) {
489                     od = (int16_t)read_im16(env, s);
490                 } else {
491                     od = read_im32(env, s);
492                 }
493             } else {
494                 od = 0;
495             }
496             if (od != 0) {
497                 tcg_gen_addi_i32(tmp, add, od);
498                 add = tmp;
499             }
500         }
501     } else {
502         /* brief extension word format */
503         tmp = tcg_temp_new();
504         add = gen_addr_index(s, ext, tmp);
505         if (!IS_NULL_QREG(base)) {
506             tcg_gen_add_i32(tmp, add, base);
507             if ((int8_t)ext)
508                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
509         } else {
510             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
511         }
512         add = tmp;
513     }
514     return add;
515 }
516 
517 /* Sign or zero extend a value.  */
518 
519 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
520 {
521     switch (opsize) {
522     case OS_BYTE:
523         if (sign) {
524             tcg_gen_ext8s_i32(res, val);
525         } else {
526             tcg_gen_ext8u_i32(res, val);
527         }
528         break;
529     case OS_WORD:
530         if (sign) {
531             tcg_gen_ext16s_i32(res, val);
532         } else {
533             tcg_gen_ext16u_i32(res, val);
534         }
535         break;
536     case OS_LONG:
537         tcg_gen_mov_i32(res, val);
538         break;
539     default:
540         g_assert_not_reached();
541     }
542 }
543 
544 /* Evaluate all the CC flags.  */
545 
546 static void gen_flush_flags(DisasContext *s)
547 {
548     TCGv t0, t1;
549 
550     switch (s->cc_op) {
551     case CC_OP_FLAGS:
552         return;
553 
554     case CC_OP_ADDB:
555     case CC_OP_ADDW:
556     case CC_OP_ADDL:
557         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
558         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
559         /* Compute signed overflow for addition.  */
560         t0 = tcg_temp_new();
561         t1 = tcg_temp_new();
562         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
563         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
564         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
565         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
566         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
567         break;
568 
569     case CC_OP_SUBB:
570     case CC_OP_SUBW:
571     case CC_OP_SUBL:
572         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
573         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
574         /* Compute signed overflow for subtraction.  */
575         t0 = tcg_temp_new();
576         t1 = tcg_temp_new();
577         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
578         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
579         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
580         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
581         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
582         break;
583 
584     case CC_OP_CMPB:
585     case CC_OP_CMPW:
586     case CC_OP_CMPL:
587         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
588         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
589         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
590         /* Compute signed overflow for subtraction.  */
591         t0 = tcg_temp_new();
592         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
593         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
594         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
595         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
596         break;
597 
598     case CC_OP_LOGIC:
599         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
600         tcg_gen_movi_i32(QREG_CC_C, 0);
601         tcg_gen_movi_i32(QREG_CC_V, 0);
602         break;
603 
604     case CC_OP_DYNAMIC:
605         gen_helper_flush_flags(cpu_env, QREG_CC_OP);
606         s->cc_op_synced = 1;
607         break;
608 
609     default:
610         gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
611         s->cc_op_synced = 1;
612         break;
613     }
614 
615     /* Note that flush_flags also assigned to env->cc_op.  */
616     s->cc_op = CC_OP_FLAGS;
617 }
618 
619 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
620 {
621     TCGv tmp;
622 
623     if (opsize == OS_LONG) {
624         tmp = val;
625     } else {
626         tmp = tcg_temp_new();
627         gen_ext(tmp, val, opsize, sign);
628     }
629 
630     return tmp;
631 }
632 
633 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
634 {
635     gen_ext(QREG_CC_N, val, opsize, 1);
636     set_cc_op(s, CC_OP_LOGIC);
637 }
638 
639 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
640 {
641     tcg_gen_mov_i32(QREG_CC_N, dest);
642     tcg_gen_mov_i32(QREG_CC_V, src);
643     set_cc_op(s, CC_OP_CMPB + opsize);
644 }
645 
646 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
647 {
648     gen_ext(QREG_CC_N, dest, opsize, 1);
649     tcg_gen_mov_i32(QREG_CC_V, src);
650 }
651 
652 static inline int opsize_bytes(int opsize)
653 {
654     switch (opsize) {
655     case OS_BYTE: return 1;
656     case OS_WORD: return 2;
657     case OS_LONG: return 4;
658     case OS_SINGLE: return 4;
659     case OS_DOUBLE: return 8;
660     case OS_EXTENDED: return 12;
661     case OS_PACKED: return 12;
662     default:
663         g_assert_not_reached();
664     }
665 }
666 
667 static inline int insn_opsize(int insn)
668 {
669     switch ((insn >> 6) & 3) {
670     case 0: return OS_BYTE;
671     case 1: return OS_WORD;
672     case 2: return OS_LONG;
673     default:
674         g_assert_not_reached();
675     }
676 }
677 
678 static inline int ext_opsize(int ext, int pos)
679 {
680     switch ((ext >> pos) & 7) {
681     case 0: return OS_LONG;
682     case 1: return OS_SINGLE;
683     case 2: return OS_EXTENDED;
684     case 3: return OS_PACKED;
685     case 4: return OS_WORD;
686     case 5: return OS_DOUBLE;
687     case 6: return OS_BYTE;
688     default:
689         g_assert_not_reached();
690     }
691 }
692 
693 /*
694  * Assign value to a register.  If the width is less than the register width
695  * only the low part of the register is set.
696  */
697 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
698 {
699     TCGv tmp;
700     switch (opsize) {
701     case OS_BYTE:
702         tcg_gen_andi_i32(reg, reg, 0xffffff00);
703         tmp = tcg_temp_new();
704         tcg_gen_ext8u_i32(tmp, val);
705         tcg_gen_or_i32(reg, reg, tmp);
706         break;
707     case OS_WORD:
708         tcg_gen_andi_i32(reg, reg, 0xffff0000);
709         tmp = tcg_temp_new();
710         tcg_gen_ext16u_i32(tmp, val);
711         tcg_gen_or_i32(reg, reg, tmp);
712         break;
713     case OS_LONG:
714     case OS_SINGLE:
715         tcg_gen_mov_i32(reg, val);
716         break;
717     default:
718         g_assert_not_reached();
719     }
720 }
721 
722 /*
723  * Generate code for an "effective address".  Does not adjust the base
724  * register for autoincrement addressing modes.
725  */
726 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
727                          int mode, int reg0, int opsize)
728 {
729     TCGv reg;
730     TCGv tmp;
731     uint16_t ext;
732     uint32_t offset;
733 
734     switch (mode) {
735     case 0: /* Data register direct.  */
736     case 1: /* Address register direct.  */
737         return NULL_QREG;
738     case 3: /* Indirect postincrement.  */
739         if (opsize == OS_UNSIZED) {
740             return NULL_QREG;
741         }
742         /* fallthru */
743     case 2: /* Indirect register */
744         return get_areg(s, reg0);
745     case 4: /* Indirect predecrememnt.  */
746         if (opsize == OS_UNSIZED) {
747             return NULL_QREG;
748         }
749         reg = get_areg(s, reg0);
750         tmp = tcg_temp_new();
751         if (reg0 == 7 && opsize == OS_BYTE &&
752             m68k_feature(s->env, M68K_FEATURE_M68K)) {
753             tcg_gen_subi_i32(tmp, reg, 2);
754         } else {
755             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
756         }
757         return tmp;
758     case 5: /* Indirect displacement.  */
759         reg = get_areg(s, reg0);
760         tmp = tcg_temp_new();
761         ext = read_im16(env, s);
762         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
763         return tmp;
764     case 6: /* Indirect index + displacement.  */
765         reg = get_areg(s, reg0);
766         return gen_lea_indexed(env, s, reg);
767     case 7: /* Other */
768         switch (reg0) {
769         case 0: /* Absolute short.  */
770             offset = (int16_t)read_im16(env, s);
771             return tcg_constant_i32(offset);
772         case 1: /* Absolute long.  */
773             offset = read_im32(env, s);
774             return tcg_constant_i32(offset);
775         case 2: /* pc displacement  */
776             offset = s->pc;
777             offset += (int16_t)read_im16(env, s);
778             return tcg_constant_i32(offset);
779         case 3: /* pc index+displacement.  */
780             return gen_lea_indexed(env, s, NULL_QREG);
781         case 4: /* Immediate.  */
782         default:
783             return NULL_QREG;
784         }
785     }
786     /* Should never happen.  */
787     return NULL_QREG;
788 }
789 
790 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
791                     int opsize)
792 {
793     int mode = extract32(insn, 3, 3);
794     int reg0 = REG(insn, 0);
795     return gen_lea_mode(env, s, mode, reg0, opsize);
796 }
797 
798 /*
799  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
800  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
801  * ADDRP is non-null for readwrite operands.
802  */
803 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
804                         int opsize, TCGv val, TCGv *addrp, ea_what what,
805                         int index)
806 {
807     TCGv reg, tmp, result;
808     int32_t offset;
809 
810     switch (mode) {
811     case 0: /* Data register direct.  */
812         reg = cpu_dregs[reg0];
813         if (what == EA_STORE) {
814             gen_partset_reg(opsize, reg, val);
815             return store_dummy;
816         } else {
817             return gen_extend(s, reg, opsize, what == EA_LOADS);
818         }
819     case 1: /* Address register direct.  */
820         reg = get_areg(s, reg0);
821         if (what == EA_STORE) {
822             tcg_gen_mov_i32(reg, val);
823             return store_dummy;
824         } else {
825             return gen_extend(s, reg, opsize, what == EA_LOADS);
826         }
827     case 2: /* Indirect register */
828         reg = get_areg(s, reg0);
829         return gen_ldst(s, opsize, reg, val, what, index);
830     case 3: /* Indirect postincrement.  */
831         reg = get_areg(s, reg0);
832         result = gen_ldst(s, opsize, reg, val, what, index);
833         if (what == EA_STORE || !addrp) {
834             TCGv tmp = tcg_temp_new();
835             if (reg0 == 7 && opsize == OS_BYTE &&
836                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
837                 tcg_gen_addi_i32(tmp, reg, 2);
838             } else {
839                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
840             }
841             delay_set_areg(s, reg0, tmp, true);
842         }
843         return result;
844     case 4: /* Indirect predecrememnt.  */
845         if (addrp && what == EA_STORE) {
846             tmp = *addrp;
847         } else {
848             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
849             if (IS_NULL_QREG(tmp)) {
850                 return tmp;
851             }
852             if (addrp) {
853                 *addrp = tmp;
854             }
855         }
856         result = gen_ldst(s, opsize, tmp, val, what, index);
857         if (what == EA_STORE || !addrp) {
858             delay_set_areg(s, reg0, tmp, false);
859         }
860         return result;
861     case 5: /* Indirect displacement.  */
862     case 6: /* Indirect index + displacement.  */
863     do_indirect:
864         if (addrp && what == EA_STORE) {
865             tmp = *addrp;
866         } else {
867             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
868             if (IS_NULL_QREG(tmp)) {
869                 return tmp;
870             }
871             if (addrp) {
872                 *addrp = tmp;
873             }
874         }
875         return gen_ldst(s, opsize, tmp, val, what, index);
876     case 7: /* Other */
877         switch (reg0) {
878         case 0: /* Absolute short.  */
879         case 1: /* Absolute long.  */
880         case 2: /* pc displacement  */
881         case 3: /* pc index+displacement.  */
882             goto do_indirect;
883         case 4: /* Immediate.  */
884             /* Sign extend values for consistency.  */
885             switch (opsize) {
886             case OS_BYTE:
887                 if (what == EA_LOADS) {
888                     offset = (int8_t)read_im8(env, s);
889                 } else {
890                     offset = read_im8(env, s);
891                 }
892                 break;
893             case OS_WORD:
894                 if (what == EA_LOADS) {
895                     offset = (int16_t)read_im16(env, s);
896                 } else {
897                     offset = read_im16(env, s);
898                 }
899                 break;
900             case OS_LONG:
901                 offset = read_im32(env, s);
902                 break;
903             default:
904                 g_assert_not_reached();
905             }
906             return tcg_constant_i32(offset);
907         default:
908             return NULL_QREG;
909         }
910     }
911     /* Should never happen.  */
912     return NULL_QREG;
913 }
914 
915 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
916                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
917 {
918     int mode = extract32(insn, 3, 3);
919     int reg0 = REG(insn, 0);
920     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
921 }
922 
923 static TCGv_ptr gen_fp_ptr(int freg)
924 {
925     TCGv_ptr fp = tcg_temp_new_ptr();
926     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
927     return fp;
928 }
929 
930 static TCGv_ptr gen_fp_result_ptr(void)
931 {
932     TCGv_ptr fp = tcg_temp_new_ptr();
933     tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
934     return fp;
935 }
936 
937 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
938 {
939     TCGv t32;
940     TCGv_i64 t64;
941 
942     t32 = tcg_temp_new();
943     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
944     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
945 
946     t64 = tcg_temp_new_i64();
947     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
948     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
949 }
950 
951 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
952                         int index)
953 {
954     TCGv tmp;
955     TCGv_i64 t64;
956 
957     t64 = tcg_temp_new_i64();
958     tmp = tcg_temp_new();
959     switch (opsize) {
960     case OS_BYTE:
961     case OS_WORD:
962     case OS_LONG:
963         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
964         gen_helper_exts32(cpu_env, fp, tmp);
965         break;
966     case OS_SINGLE:
967         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
968         gen_helper_extf32(cpu_env, fp, tmp);
969         break;
970     case OS_DOUBLE:
971         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
972         gen_helper_extf64(cpu_env, fp, t64);
973         break;
974     case OS_EXTENDED:
975         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
976             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
977             break;
978         }
979         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
980         tcg_gen_shri_i32(tmp, tmp, 16);
981         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
982         tcg_gen_addi_i32(tmp, addr, 4);
983         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
984         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
985         break;
986     case OS_PACKED:
987         /*
988          * unimplemented data type on 68040/ColdFire
989          * FIXME if needed for another FPU
990          */
991         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
992         break;
993     default:
994         g_assert_not_reached();
995     }
996 }
997 
998 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
999                          int index)
1000 {
1001     TCGv tmp;
1002     TCGv_i64 t64;
1003 
1004     t64 = tcg_temp_new_i64();
1005     tmp = tcg_temp_new();
1006     switch (opsize) {
1007     case OS_BYTE:
1008     case OS_WORD:
1009     case OS_LONG:
1010         gen_helper_reds32(tmp, cpu_env, fp);
1011         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
1012         break;
1013     case OS_SINGLE:
1014         gen_helper_redf32(tmp, cpu_env, fp);
1015         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1016         break;
1017     case OS_DOUBLE:
1018         gen_helper_redf64(t64, cpu_env, fp);
1019         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1020         break;
1021     case OS_EXTENDED:
1022         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1023             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1024             break;
1025         }
1026         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1027         tcg_gen_shli_i32(tmp, tmp, 16);
1028         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1029         tcg_gen_addi_i32(tmp, addr, 4);
1030         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1031         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1032         break;
1033     case OS_PACKED:
1034         /*
1035          * unimplemented data type on 68040/ColdFire
1036          * FIXME if needed for another FPU
1037          */
1038         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1039         break;
1040     default:
1041         g_assert_not_reached();
1042     }
1043 }
1044 
1045 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1046                         TCGv_ptr fp, ea_what what, int index)
1047 {
1048     if (what == EA_STORE) {
1049         gen_store_fp(s, opsize, addr, fp, index);
1050     } else {
1051         gen_load_fp(s, opsize, addr, fp, index);
1052     }
1053 }
1054 
1055 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1056                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1057                           int index)
1058 {
1059     TCGv reg, addr, tmp;
1060     TCGv_i64 t64;
1061 
1062     switch (mode) {
1063     case 0: /* Data register direct.  */
1064         reg = cpu_dregs[reg0];
1065         if (what == EA_STORE) {
1066             switch (opsize) {
1067             case OS_BYTE:
1068             case OS_WORD:
1069             case OS_LONG:
1070                 gen_helper_reds32(reg, cpu_env, fp);
1071                 break;
1072             case OS_SINGLE:
1073                 gen_helper_redf32(reg, cpu_env, fp);
1074                 break;
1075             default:
1076                 g_assert_not_reached();
1077             }
1078         } else {
1079             tmp = tcg_temp_new();
1080             switch (opsize) {
1081             case OS_BYTE:
1082                 tcg_gen_ext8s_i32(tmp, reg);
1083                 gen_helper_exts32(cpu_env, fp, tmp);
1084                 break;
1085             case OS_WORD:
1086                 tcg_gen_ext16s_i32(tmp, reg);
1087                 gen_helper_exts32(cpu_env, fp, tmp);
1088                 break;
1089             case OS_LONG:
1090                 gen_helper_exts32(cpu_env, fp, reg);
1091                 break;
1092             case OS_SINGLE:
1093                 gen_helper_extf32(cpu_env, fp, reg);
1094                 break;
1095             default:
1096                 g_assert_not_reached();
1097             }
1098         }
1099         return 0;
1100     case 1: /* Address register direct.  */
1101         return -1;
1102     case 2: /* Indirect register */
1103         addr = get_areg(s, reg0);
1104         gen_ldst_fp(s, opsize, addr, fp, what, index);
1105         return 0;
1106     case 3: /* Indirect postincrement.  */
1107         addr = cpu_aregs[reg0];
1108         gen_ldst_fp(s, opsize, addr, fp, what, index);
1109         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1110         return 0;
1111     case 4: /* Indirect predecrememnt.  */
1112         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1113         if (IS_NULL_QREG(addr)) {
1114             return -1;
1115         }
1116         gen_ldst_fp(s, opsize, addr, fp, what, index);
1117         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1118         return 0;
1119     case 5: /* Indirect displacement.  */
1120     case 6: /* Indirect index + displacement.  */
1121     do_indirect:
1122         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1123         if (IS_NULL_QREG(addr)) {
1124             return -1;
1125         }
1126         gen_ldst_fp(s, opsize, addr, fp, what, index);
1127         return 0;
1128     case 7: /* Other */
1129         switch (reg0) {
1130         case 0: /* Absolute short.  */
1131         case 1: /* Absolute long.  */
1132         case 2: /* pc displacement  */
1133         case 3: /* pc index+displacement.  */
1134             goto do_indirect;
1135         case 4: /* Immediate.  */
1136             if (what == EA_STORE) {
1137                 return -1;
1138             }
1139             switch (opsize) {
1140             case OS_BYTE:
1141                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1142                 gen_helper_exts32(cpu_env, fp, tmp);
1143                 break;
1144             case OS_WORD:
1145                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1146                 gen_helper_exts32(cpu_env, fp, tmp);
1147                 break;
1148             case OS_LONG:
1149                 tmp = tcg_constant_i32(read_im32(env, s));
1150                 gen_helper_exts32(cpu_env, fp, tmp);
1151                 break;
1152             case OS_SINGLE:
1153                 tmp = tcg_constant_i32(read_im32(env, s));
1154                 gen_helper_extf32(cpu_env, fp, tmp);
1155                 break;
1156             case OS_DOUBLE:
1157                 t64 = tcg_constant_i64(read_im64(env, s));
1158                 gen_helper_extf64(cpu_env, fp, t64);
1159                 break;
1160             case OS_EXTENDED:
1161                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1162                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1163                     break;
1164                 }
1165                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1166                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1167                 t64 = tcg_constant_i64(read_im64(env, s));
1168                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1169                 break;
1170             case OS_PACKED:
1171                 /*
1172                  * unimplemented data type on 68040/ColdFire
1173                  * FIXME if needed for another FPU
1174                  */
1175                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1176                 break;
1177             default:
1178                 g_assert_not_reached();
1179             }
1180             return 0;
1181         default:
1182             return -1;
1183         }
1184     }
1185     return -1;
1186 }
1187 
1188 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1189                        int opsize, TCGv_ptr fp, ea_what what, int index)
1190 {
1191     int mode = extract32(insn, 3, 3);
1192     int reg0 = REG(insn, 0);
1193     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1194 }
1195 
1196 typedef struct {
1197     TCGCond tcond;
1198     TCGv v1;
1199     TCGv v2;
1200 } DisasCompare;
1201 
1202 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1203 {
1204     TCGv tmp, tmp2;
1205     TCGCond tcond;
1206     CCOp op = s->cc_op;
1207 
1208     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1209     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1210         c->v1 = QREG_CC_N;
1211         c->v2 = QREG_CC_V;
1212         switch (cond) {
1213         case 2: /* HI */
1214         case 3: /* LS */
1215             tcond = TCG_COND_LEU;
1216             goto done;
1217         case 4: /* CC */
1218         case 5: /* CS */
1219             tcond = TCG_COND_LTU;
1220             goto done;
1221         case 6: /* NE */
1222         case 7: /* EQ */
1223             tcond = TCG_COND_EQ;
1224             goto done;
1225         case 10: /* PL */
1226         case 11: /* MI */
1227             c->v2 = tcg_constant_i32(0);
1228             c->v1 = tmp = tcg_temp_new();
1229             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1230             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1231             /* fallthru */
1232         case 12: /* GE */
1233         case 13: /* LT */
1234             tcond = TCG_COND_LT;
1235             goto done;
1236         case 14: /* GT */
1237         case 15: /* LE */
1238             tcond = TCG_COND_LE;
1239             goto done;
1240         }
1241     }
1242 
1243     c->v2 = tcg_constant_i32(0);
1244 
1245     switch (cond) {
1246     case 0: /* T */
1247     case 1: /* F */
1248         c->v1 = c->v2;
1249         tcond = TCG_COND_NEVER;
1250         goto done;
1251     case 14: /* GT (!(Z || (N ^ V))) */
1252     case 15: /* LE (Z || (N ^ V)) */
1253         /*
1254          * Logic operations clear V, which simplifies LE to (Z || N),
1255          * and since Z and N are co-located, this becomes a normal
1256          * comparison vs N.
1257          */
1258         if (op == CC_OP_LOGIC) {
1259             c->v1 = QREG_CC_N;
1260             tcond = TCG_COND_LE;
1261             goto done;
1262         }
1263         break;
1264     case 12: /* GE (!(N ^ V)) */
1265     case 13: /* LT (N ^ V) */
1266         /* Logic operations clear V, which simplifies this to N.  */
1267         if (op != CC_OP_LOGIC) {
1268             break;
1269         }
1270         /* fallthru */
1271     case 10: /* PL (!N) */
1272     case 11: /* MI (N) */
1273         /* Several cases represent N normally.  */
1274         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1275             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1276             op == CC_OP_LOGIC) {
1277             c->v1 = QREG_CC_N;
1278             tcond = TCG_COND_LT;
1279             goto done;
1280         }
1281         break;
1282     case 6: /* NE (!Z) */
1283     case 7: /* EQ (Z) */
1284         /* Some cases fold Z into N.  */
1285         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1286             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1287             op == CC_OP_LOGIC) {
1288             tcond = TCG_COND_EQ;
1289             c->v1 = QREG_CC_N;
1290             goto done;
1291         }
1292         break;
1293     case 4: /* CC (!C) */
1294     case 5: /* CS (C) */
1295         /* Some cases fold C into X.  */
1296         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1297             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1298             tcond = TCG_COND_NE;
1299             c->v1 = QREG_CC_X;
1300             goto done;
1301         }
1302         /* fallthru */
1303     case 8: /* VC (!V) */
1304     case 9: /* VS (V) */
1305         /* Logic operations clear V and C.  */
1306         if (op == CC_OP_LOGIC) {
1307             tcond = TCG_COND_NEVER;
1308             c->v1 = c->v2;
1309             goto done;
1310         }
1311         break;
1312     }
1313 
1314     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1315     gen_flush_flags(s);
1316 
1317     switch (cond) {
1318     case 0: /* T */
1319     case 1: /* F */
1320     default:
1321         /* Invalid, or handled above.  */
1322         abort();
1323     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1324     case 3: /* LS (C || Z) */
1325         c->v1 = tmp = tcg_temp_new();
1326         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1327         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1328         tcond = TCG_COND_NE;
1329         break;
1330     case 4: /* CC (!C) */
1331     case 5: /* CS (C) */
1332         c->v1 = QREG_CC_C;
1333         tcond = TCG_COND_NE;
1334         break;
1335     case 6: /* NE (!Z) */
1336     case 7: /* EQ (Z) */
1337         c->v1 = QREG_CC_Z;
1338         tcond = TCG_COND_EQ;
1339         break;
1340     case 8: /* VC (!V) */
1341     case 9: /* VS (V) */
1342         c->v1 = QREG_CC_V;
1343         tcond = TCG_COND_LT;
1344         break;
1345     case 10: /* PL (!N) */
1346     case 11: /* MI (N) */
1347         c->v1 = QREG_CC_N;
1348         tcond = TCG_COND_LT;
1349         break;
1350     case 12: /* GE (!(N ^ V)) */
1351     case 13: /* LT (N ^ V) */
1352         c->v1 = tmp = tcg_temp_new();
1353         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1354         tcond = TCG_COND_LT;
1355         break;
1356     case 14: /* GT (!(Z || (N ^ V))) */
1357     case 15: /* LE (Z || (N ^ V)) */
1358         c->v1 = tmp = tcg_temp_new();
1359         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1360         tcg_gen_neg_i32(tmp, tmp);
1361         tmp2 = tcg_temp_new();
1362         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1363         tcg_gen_or_i32(tmp, tmp, tmp2);
1364         tcond = TCG_COND_LT;
1365         break;
1366     }
1367 
1368  done:
1369     if ((cond & 1) == 0) {
1370         tcond = tcg_invert_cond(tcond);
1371     }
1372     c->tcond = tcond;
1373 }
1374 
1375 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1376 {
1377   DisasCompare c;
1378 
1379   gen_cc_cond(&c, s, cond);
1380   update_cc_op(s);
1381   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1382 }
1383 
1384 /* Force a TB lookup after an instruction that changes the CPU state.  */
1385 static void gen_exit_tb(DisasContext *s)
1386 {
1387     update_cc_op(s);
1388     tcg_gen_movi_i32(QREG_PC, s->pc);
1389     s->base.is_jmp = DISAS_EXIT;
1390 }
1391 
1392 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1393         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1394                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1395         if (IS_NULL_QREG(result)) {                                     \
1396             gen_addr_fault(s);                                          \
1397             return;                                                     \
1398         }                                                               \
1399     } while (0)
1400 
1401 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1402         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1403                                 EA_STORE, IS_USER(s));                  \
1404         if (IS_NULL_QREG(ea_result)) {                                  \
1405             gen_addr_fault(s);                                          \
1406             return;                                                     \
1407         }                                                               \
1408     } while (0)
1409 
1410 /* Generate a jump to an immediate address.  */
1411 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1412                        target_ulong src)
1413 {
1414     if (unlikely(s->ss_active)) {
1415         update_cc_op(s);
1416         tcg_gen_movi_i32(QREG_PC, dest);
1417         gen_raise_exception_format2(s, EXCP_TRACE, src);
1418     } else if (translator_use_goto_tb(&s->base, dest)) {
1419         tcg_gen_goto_tb(n);
1420         tcg_gen_movi_i32(QREG_PC, dest);
1421         tcg_gen_exit_tb(s->base.tb, n);
1422     } else {
1423         gen_jmp_im(s, dest);
1424         tcg_gen_exit_tb(NULL, 0);
1425     }
1426     s->base.is_jmp = DISAS_NORETURN;
1427 }
1428 
1429 DISAS_INSN(scc)
1430 {
1431     DisasCompare c;
1432     int cond;
1433     TCGv tmp;
1434 
1435     cond = (insn >> 8) & 0xf;
1436     gen_cc_cond(&c, s, cond);
1437 
1438     tmp = tcg_temp_new();
1439     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1440 
1441     tcg_gen_neg_i32(tmp, tmp);
1442     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1443 }
1444 
1445 DISAS_INSN(dbcc)
1446 {
1447     TCGLabel *l1;
1448     TCGv reg;
1449     TCGv tmp;
1450     int16_t offset;
1451     uint32_t base;
1452 
1453     reg = DREG(insn, 0);
1454     base = s->pc;
1455     offset = (int16_t)read_im16(env, s);
1456     l1 = gen_new_label();
1457     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1458 
1459     tmp = tcg_temp_new();
1460     tcg_gen_ext16s_i32(tmp, reg);
1461     tcg_gen_addi_i32(tmp, tmp, -1);
1462     gen_partset_reg(OS_WORD, reg, tmp);
1463     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1464     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1465     gen_set_label(l1);
1466     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1467 }
1468 
1469 DISAS_INSN(undef_mac)
1470 {
1471     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1472 }
1473 
1474 DISAS_INSN(undef_fpu)
1475 {
1476     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1477 }
1478 
1479 DISAS_INSN(undef)
1480 {
1481     /*
1482      * ??? This is both instructions that are as yet unimplemented
1483      * for the 680x0 series, as well as those that are implemented
1484      * but actually illegal for CPU32 or pre-68020.
1485      */
1486     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1487                   insn, s->base.pc_next);
1488     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1489 }
1490 
1491 DISAS_INSN(mulw)
1492 {
1493     TCGv reg;
1494     TCGv tmp;
1495     TCGv src;
1496     int sign;
1497 
1498     sign = (insn & 0x100) != 0;
1499     reg = DREG(insn, 9);
1500     tmp = tcg_temp_new();
1501     if (sign)
1502         tcg_gen_ext16s_i32(tmp, reg);
1503     else
1504         tcg_gen_ext16u_i32(tmp, reg);
1505     SRC_EA(env, src, OS_WORD, sign, NULL);
1506     tcg_gen_mul_i32(tmp, tmp, src);
1507     tcg_gen_mov_i32(reg, tmp);
1508     gen_logic_cc(s, tmp, OS_LONG);
1509 }
1510 
1511 DISAS_INSN(divw)
1512 {
1513     int sign;
1514     TCGv src;
1515     TCGv destr;
1516     TCGv ilen;
1517 
1518     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1519 
1520     sign = (insn & 0x100) != 0;
1521 
1522     /* dest.l / src.w */
1523 
1524     SRC_EA(env, src, OS_WORD, sign, NULL);
1525     destr = tcg_constant_i32(REG(insn, 9));
1526     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1527     if (sign) {
1528         gen_helper_divsw(cpu_env, destr, src, ilen);
1529     } else {
1530         gen_helper_divuw(cpu_env, destr, src, ilen);
1531     }
1532 
1533     set_cc_op(s, CC_OP_FLAGS);
1534 }
1535 
1536 DISAS_INSN(divl)
1537 {
1538     TCGv num, reg, den, ilen;
1539     int sign;
1540     uint16_t ext;
1541 
1542     ext = read_im16(env, s);
1543 
1544     sign = (ext & 0x0800) != 0;
1545 
1546     if (ext & 0x400) {
1547         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1548             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1549             return;
1550         }
1551 
1552         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1553 
1554         SRC_EA(env, den, OS_LONG, 0, NULL);
1555         num = tcg_constant_i32(REG(ext, 12));
1556         reg = tcg_constant_i32(REG(ext, 0));
1557         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1558         if (sign) {
1559             gen_helper_divsll(cpu_env, num, reg, den, ilen);
1560         } else {
1561             gen_helper_divull(cpu_env, num, reg, den, ilen);
1562         }
1563         set_cc_op(s, CC_OP_FLAGS);
1564         return;
1565     }
1566 
1567     /* divX.l <EA>, Dq        32/32 -> 32q     */
1568     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1569 
1570     SRC_EA(env, den, OS_LONG, 0, NULL);
1571     num = tcg_constant_i32(REG(ext, 12));
1572     reg = tcg_constant_i32(REG(ext, 0));
1573     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1574     if (sign) {
1575         gen_helper_divsl(cpu_env, num, reg, den, ilen);
1576     } else {
1577         gen_helper_divul(cpu_env, num, reg, den, ilen);
1578     }
1579 
1580     set_cc_op(s, CC_OP_FLAGS);
1581 }
1582 
1583 static void bcd_add(TCGv dest, TCGv src)
1584 {
1585     TCGv t0, t1;
1586 
1587     /*
1588      * dest10 = dest10 + src10 + X
1589      *
1590      *        t1 = src
1591      *        t2 = t1 + 0x066
1592      *        t3 = t2 + dest + X
1593      *        t4 = t2 ^ dest
1594      *        t5 = t3 ^ t4
1595      *        t6 = ~t5 & 0x110
1596      *        t7 = (t6 >> 2) | (t6 >> 3)
1597      *        return t3 - t7
1598      */
1599 
1600     /*
1601      * t1 = (src + 0x066) + dest + X
1602      *    = result with some possible exceeding 0x6
1603      */
1604 
1605     t0 = tcg_temp_new();
1606     tcg_gen_addi_i32(t0, src, 0x066);
1607 
1608     t1 = tcg_temp_new();
1609     tcg_gen_add_i32(t1, t0, dest);
1610     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1611 
1612     /* we will remove exceeding 0x6 where there is no carry */
1613 
1614     /*
1615      * t0 = (src + 0x0066) ^ dest
1616      *    = t1 without carries
1617      */
1618 
1619     tcg_gen_xor_i32(t0, t0, dest);
1620 
1621     /*
1622      * extract the carries
1623      * t0 = t0 ^ t1
1624      *    = only the carries
1625      */
1626 
1627     tcg_gen_xor_i32(t0, t0, t1);
1628 
1629     /*
1630      * generate 0x1 where there is no carry
1631      * and for each 0x10, generate a 0x6
1632      */
1633 
1634     tcg_gen_shri_i32(t0, t0, 3);
1635     tcg_gen_not_i32(t0, t0);
1636     tcg_gen_andi_i32(t0, t0, 0x22);
1637     tcg_gen_add_i32(dest, t0, t0);
1638     tcg_gen_add_i32(dest, dest, t0);
1639 
1640     /*
1641      * remove the exceeding 0x6
1642      * for digits that have not generated a carry
1643      */
1644 
1645     tcg_gen_sub_i32(dest, t1, dest);
1646 }
1647 
1648 static void bcd_sub(TCGv dest, TCGv src)
1649 {
1650     TCGv t0, t1, t2;
1651 
1652     /*
1653      *  dest10 = dest10 - src10 - X
1654      *         = bcd_add(dest + 1 - X, 0x199 - src)
1655      */
1656 
1657     /* t0 = 0x066 + (0x199 - src) */
1658 
1659     t0 = tcg_temp_new();
1660     tcg_gen_subfi_i32(t0, 0x1ff, src);
1661 
1662     /* t1 = t0 + dest + 1 - X*/
1663 
1664     t1 = tcg_temp_new();
1665     tcg_gen_add_i32(t1, t0, dest);
1666     tcg_gen_addi_i32(t1, t1, 1);
1667     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1668 
1669     /* t2 = t0 ^ dest */
1670 
1671     t2 = tcg_temp_new();
1672     tcg_gen_xor_i32(t2, t0, dest);
1673 
1674     /* t0 = t1 ^ t2 */
1675 
1676     tcg_gen_xor_i32(t0, t1, t2);
1677 
1678     /*
1679      * t2 = ~t0 & 0x110
1680      * t0 = (t2 >> 2) | (t2 >> 3)
1681      *
1682      * to fit on 8bit operands, changed in:
1683      *
1684      * t2 = ~(t0 >> 3) & 0x22
1685      * t0 = t2 + t2
1686      * t0 = t0 + t2
1687      */
1688 
1689     tcg_gen_shri_i32(t2, t0, 3);
1690     tcg_gen_not_i32(t2, t2);
1691     tcg_gen_andi_i32(t2, t2, 0x22);
1692     tcg_gen_add_i32(t0, t2, t2);
1693     tcg_gen_add_i32(t0, t0, t2);
1694 
1695     /* return t1 - t0 */
1696 
1697     tcg_gen_sub_i32(dest, t1, t0);
1698 }
1699 
1700 static void bcd_flags(TCGv val)
1701 {
1702     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1703     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1704 
1705     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1706 
1707     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1708 }
1709 
1710 DISAS_INSN(abcd_reg)
1711 {
1712     TCGv src;
1713     TCGv dest;
1714 
1715     gen_flush_flags(s); /* !Z is sticky */
1716 
1717     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1718     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1719     bcd_add(dest, src);
1720     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1721 
1722     bcd_flags(dest);
1723 }
1724 
1725 DISAS_INSN(abcd_mem)
1726 {
1727     TCGv src, dest, addr;
1728 
1729     gen_flush_flags(s); /* !Z is sticky */
1730 
1731     /* Indirect pre-decrement load (mode 4) */
1732 
1733     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1734                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1735     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1736                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1737 
1738     bcd_add(dest, src);
1739 
1740     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1741                 EA_STORE, IS_USER(s));
1742 
1743     bcd_flags(dest);
1744 }
1745 
1746 DISAS_INSN(sbcd_reg)
1747 {
1748     TCGv src, dest;
1749 
1750     gen_flush_flags(s); /* !Z is sticky */
1751 
1752     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1753     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1754 
1755     bcd_sub(dest, src);
1756 
1757     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1758 
1759     bcd_flags(dest);
1760 }
1761 
1762 DISAS_INSN(sbcd_mem)
1763 {
1764     TCGv src, dest, addr;
1765 
1766     gen_flush_flags(s); /* !Z is sticky */
1767 
1768     /* Indirect pre-decrement load (mode 4) */
1769 
1770     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1771                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1772     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1773                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1774 
1775     bcd_sub(dest, src);
1776 
1777     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1778                 EA_STORE, IS_USER(s));
1779 
1780     bcd_flags(dest);
1781 }
1782 
1783 DISAS_INSN(nbcd)
1784 {
1785     TCGv src, dest;
1786     TCGv addr;
1787 
1788     gen_flush_flags(s); /* !Z is sticky */
1789 
1790     SRC_EA(env, src, OS_BYTE, 0, &addr);
1791 
1792     dest = tcg_temp_new();
1793     tcg_gen_movi_i32(dest, 0);
1794     bcd_sub(dest, src);
1795 
1796     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1797 
1798     bcd_flags(dest);
1799 }
1800 
1801 DISAS_INSN(addsub)
1802 {
1803     TCGv reg;
1804     TCGv dest;
1805     TCGv src;
1806     TCGv tmp;
1807     TCGv addr;
1808     int add;
1809     int opsize;
1810 
1811     add = (insn & 0x4000) != 0;
1812     opsize = insn_opsize(insn);
1813     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1814     dest = tcg_temp_new();
1815     if (insn & 0x100) {
1816         SRC_EA(env, tmp, opsize, 1, &addr);
1817         src = reg;
1818     } else {
1819         tmp = reg;
1820         SRC_EA(env, src, opsize, 1, NULL);
1821     }
1822     if (add) {
1823         tcg_gen_add_i32(dest, tmp, src);
1824         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1825         set_cc_op(s, CC_OP_ADDB + opsize);
1826     } else {
1827         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1828         tcg_gen_sub_i32(dest, tmp, src);
1829         set_cc_op(s, CC_OP_SUBB + opsize);
1830     }
1831     gen_update_cc_add(dest, src, opsize);
1832     if (insn & 0x100) {
1833         DEST_EA(env, insn, opsize, dest, &addr);
1834     } else {
1835         gen_partset_reg(opsize, DREG(insn, 9), dest);
1836     }
1837 }
1838 
1839 /* Reverse the order of the bits in REG.  */
1840 DISAS_INSN(bitrev)
1841 {
1842     TCGv reg;
1843     reg = DREG(insn, 0);
1844     gen_helper_bitrev(reg, reg);
1845 }
1846 
1847 DISAS_INSN(bitop_reg)
1848 {
1849     int opsize;
1850     int op;
1851     TCGv src1;
1852     TCGv src2;
1853     TCGv tmp;
1854     TCGv addr;
1855     TCGv dest;
1856 
1857     if ((insn & 0x38) != 0)
1858         opsize = OS_BYTE;
1859     else
1860         opsize = OS_LONG;
1861     op = (insn >> 6) & 3;
1862     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1863 
1864     gen_flush_flags(s);
1865     src2 = tcg_temp_new();
1866     if (opsize == OS_BYTE)
1867         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1868     else
1869         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1870 
1871     tmp = tcg_temp_new();
1872     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1873 
1874     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1875 
1876     dest = tcg_temp_new();
1877     switch (op) {
1878     case 1: /* bchg */
1879         tcg_gen_xor_i32(dest, src1, tmp);
1880         break;
1881     case 2: /* bclr */
1882         tcg_gen_andc_i32(dest, src1, tmp);
1883         break;
1884     case 3: /* bset */
1885         tcg_gen_or_i32(dest, src1, tmp);
1886         break;
1887     default: /* btst */
1888         break;
1889     }
1890     if (op) {
1891         DEST_EA(env, insn, opsize, dest, &addr);
1892     }
1893 }
1894 
1895 DISAS_INSN(sats)
1896 {
1897     TCGv reg;
1898     reg = DREG(insn, 0);
1899     gen_flush_flags(s);
1900     gen_helper_sats(reg, reg, QREG_CC_V);
1901     gen_logic_cc(s, reg, OS_LONG);
1902 }
1903 
1904 static void gen_push(DisasContext *s, TCGv val)
1905 {
1906     TCGv tmp;
1907 
1908     tmp = tcg_temp_new();
1909     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1910     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1911     tcg_gen_mov_i32(QREG_SP, tmp);
1912 }
1913 
1914 static TCGv mreg(int reg)
1915 {
1916     if (reg < 8) {
1917         /* Dx */
1918         return cpu_dregs[reg];
1919     }
1920     /* Ax */
1921     return cpu_aregs[reg & 7];
1922 }
1923 
1924 DISAS_INSN(movem)
1925 {
1926     TCGv addr, incr, tmp, r[16];
1927     int is_load = (insn & 0x0400) != 0;
1928     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1929     uint16_t mask = read_im16(env, s);
1930     int mode = extract32(insn, 3, 3);
1931     int reg0 = REG(insn, 0);
1932     int i;
1933 
1934     tmp = cpu_aregs[reg0];
1935 
1936     switch (mode) {
1937     case 0: /* data register direct */
1938     case 1: /* addr register direct */
1939     do_addr_fault:
1940         gen_addr_fault(s);
1941         return;
1942 
1943     case 2: /* indirect */
1944         break;
1945 
1946     case 3: /* indirect post-increment */
1947         if (!is_load) {
1948             /* post-increment is not allowed */
1949             goto do_addr_fault;
1950         }
1951         break;
1952 
1953     case 4: /* indirect pre-decrement */
1954         if (is_load) {
1955             /* pre-decrement is not allowed */
1956             goto do_addr_fault;
1957         }
1958         /*
1959          * We want a bare copy of the address reg, without any pre-decrement
1960          * adjustment, as gen_lea would provide.
1961          */
1962         break;
1963 
1964     default:
1965         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1966         if (IS_NULL_QREG(tmp)) {
1967             goto do_addr_fault;
1968         }
1969         break;
1970     }
1971 
1972     addr = tcg_temp_new();
1973     tcg_gen_mov_i32(addr, tmp);
1974     incr = tcg_constant_i32(opsize_bytes(opsize));
1975 
1976     if (is_load) {
1977         /* memory to register */
1978         for (i = 0; i < 16; i++) {
1979             if (mask & (1 << i)) {
1980                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1981                 tcg_gen_add_i32(addr, addr, incr);
1982             }
1983         }
1984         for (i = 0; i < 16; i++) {
1985             if (mask & (1 << i)) {
1986                 tcg_gen_mov_i32(mreg(i), r[i]);
1987             }
1988         }
1989         if (mode == 3) {
1990             /* post-increment: movem (An)+,X */
1991             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1992         }
1993     } else {
1994         /* register to memory */
1995         if (mode == 4) {
1996             /* pre-decrement: movem X,-(An) */
1997             for (i = 15; i >= 0; i--) {
1998                 if ((mask << i) & 0x8000) {
1999                     tcg_gen_sub_i32(addr, addr, incr);
2000                     if (reg0 + 8 == i &&
2001                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2002                         /*
2003                          * M68020+: if the addressing register is the
2004                          * register moved to memory, the value written
2005                          * is the initial value decremented by the size of
2006                          * the operation, regardless of how many actual
2007                          * stores have been performed until this point.
2008                          * M68000/M68010: the value is the initial value.
2009                          */
2010                         tmp = tcg_temp_new();
2011                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2012                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2013                     } else {
2014                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2015                     }
2016                 }
2017             }
2018             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2019         } else {
2020             for (i = 0; i < 16; i++) {
2021                 if (mask & (1 << i)) {
2022                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2023                     tcg_gen_add_i32(addr, addr, incr);
2024                 }
2025             }
2026         }
2027     }
2028 }
2029 
2030 DISAS_INSN(movep)
2031 {
2032     uint8_t i;
2033     int16_t displ;
2034     TCGv reg;
2035     TCGv addr;
2036     TCGv abuf;
2037     TCGv dbuf;
2038 
2039     displ = read_im16(env, s);
2040 
2041     addr = AREG(insn, 0);
2042     reg = DREG(insn, 9);
2043 
2044     abuf = tcg_temp_new();
2045     tcg_gen_addi_i32(abuf, addr, displ);
2046     dbuf = tcg_temp_new();
2047 
2048     if (insn & 0x40) {
2049         i = 4;
2050     } else {
2051         i = 2;
2052     }
2053 
2054     if (insn & 0x80) {
2055         for ( ; i > 0 ; i--) {
2056             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2057             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2058             if (i > 1) {
2059                 tcg_gen_addi_i32(abuf, abuf, 2);
2060             }
2061         }
2062     } else {
2063         for ( ; i > 0 ; i--) {
2064             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2065             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2066             if (i > 1) {
2067                 tcg_gen_addi_i32(abuf, abuf, 2);
2068             }
2069         }
2070     }
2071 }
2072 
2073 DISAS_INSN(bitop_im)
2074 {
2075     int opsize;
2076     int op;
2077     TCGv src1;
2078     uint32_t mask;
2079     int bitnum;
2080     TCGv tmp;
2081     TCGv addr;
2082 
2083     if ((insn & 0x38) != 0)
2084         opsize = OS_BYTE;
2085     else
2086         opsize = OS_LONG;
2087     op = (insn >> 6) & 3;
2088 
2089     bitnum = read_im16(env, s);
2090     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2091         if (bitnum & 0xfe00) {
2092             disas_undef(env, s, insn);
2093             return;
2094         }
2095     } else {
2096         if (bitnum & 0xff00) {
2097             disas_undef(env, s, insn);
2098             return;
2099         }
2100     }
2101 
2102     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2103 
2104     gen_flush_flags(s);
2105     if (opsize == OS_BYTE)
2106         bitnum &= 7;
2107     else
2108         bitnum &= 31;
2109     mask = 1 << bitnum;
2110 
2111    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2112 
2113     if (op) {
2114         tmp = tcg_temp_new();
2115         switch (op) {
2116         case 1: /* bchg */
2117             tcg_gen_xori_i32(tmp, src1, mask);
2118             break;
2119         case 2: /* bclr */
2120             tcg_gen_andi_i32(tmp, src1, ~mask);
2121             break;
2122         case 3: /* bset */
2123             tcg_gen_ori_i32(tmp, src1, mask);
2124             break;
2125         default: /* btst */
2126             break;
2127         }
2128         DEST_EA(env, insn, opsize, tmp, &addr);
2129     }
2130 }
2131 
2132 static TCGv gen_get_ccr(DisasContext *s)
2133 {
2134     TCGv dest;
2135 
2136     update_cc_op(s);
2137     dest = tcg_temp_new();
2138     gen_helper_get_ccr(dest, cpu_env);
2139     return dest;
2140 }
2141 
2142 static TCGv gen_get_sr(DisasContext *s)
2143 {
2144     TCGv ccr;
2145     TCGv sr;
2146 
2147     ccr = gen_get_ccr(s);
2148     sr = tcg_temp_new();
2149     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2150     tcg_gen_or_i32(sr, sr, ccr);
2151     return sr;
2152 }
2153 
2154 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2155 {
2156     if (ccr_only) {
2157         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2158         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2159         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2160         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2161         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2162     } else {
2163         /* Must writeback before changing security state. */
2164         do_writebacks(s);
2165         gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
2166     }
2167     set_cc_op(s, CC_OP_FLAGS);
2168 }
2169 
2170 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2171 {
2172     if (ccr_only) {
2173         gen_helper_set_ccr(cpu_env, val);
2174     } else {
2175         /* Must writeback before changing security state. */
2176         do_writebacks(s);
2177         gen_helper_set_sr(cpu_env, val);
2178     }
2179     set_cc_op(s, CC_OP_FLAGS);
2180 }
2181 
2182 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2183                            bool ccr_only)
2184 {
2185     if ((insn & 0x3f) == 0x3c) {
2186         uint16_t val;
2187         val = read_im16(env, s);
2188         gen_set_sr_im(s, val, ccr_only);
2189     } else {
2190         TCGv src;
2191         SRC_EA(env, src, OS_WORD, 0, NULL);
2192         gen_set_sr(s, src, ccr_only);
2193     }
2194 }
2195 
2196 DISAS_INSN(arith_im)
2197 {
2198     int op;
2199     TCGv im;
2200     TCGv src1;
2201     TCGv dest;
2202     TCGv addr;
2203     int opsize;
2204     bool with_SR = ((insn & 0x3f) == 0x3c);
2205 
2206     op = (insn >> 9) & 7;
2207     opsize = insn_opsize(insn);
2208     switch (opsize) {
2209     case OS_BYTE:
2210         im = tcg_constant_i32((int8_t)read_im8(env, s));
2211         break;
2212     case OS_WORD:
2213         im = tcg_constant_i32((int16_t)read_im16(env, s));
2214         break;
2215     case OS_LONG:
2216         im = tcg_constant_i32(read_im32(env, s));
2217         break;
2218     default:
2219         g_assert_not_reached();
2220     }
2221 
2222     if (with_SR) {
2223         /* SR/CCR can only be used with andi/eori/ori */
2224         if (op == 2 || op == 3 || op == 6) {
2225             disas_undef(env, s, insn);
2226             return;
2227         }
2228         switch (opsize) {
2229         case OS_BYTE:
2230             src1 = gen_get_ccr(s);
2231             break;
2232         case OS_WORD:
2233             if (IS_USER(s)) {
2234                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2235                 return;
2236             }
2237             src1 = gen_get_sr(s);
2238             break;
2239         default:
2240             /* OS_LONG; others already g_assert_not_reached.  */
2241             disas_undef(env, s, insn);
2242             return;
2243         }
2244     } else {
2245         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2246     }
2247     dest = tcg_temp_new();
2248     switch (op) {
2249     case 0: /* ori */
2250         tcg_gen_or_i32(dest, src1, im);
2251         if (with_SR) {
2252             gen_set_sr(s, dest, opsize == OS_BYTE);
2253             gen_exit_tb(s);
2254         } else {
2255             DEST_EA(env, insn, opsize, dest, &addr);
2256             gen_logic_cc(s, dest, opsize);
2257         }
2258         break;
2259     case 1: /* andi */
2260         tcg_gen_and_i32(dest, src1, im);
2261         if (with_SR) {
2262             gen_set_sr(s, dest, opsize == OS_BYTE);
2263             gen_exit_tb(s);
2264         } else {
2265             DEST_EA(env, insn, opsize, dest, &addr);
2266             gen_logic_cc(s, dest, opsize);
2267         }
2268         break;
2269     case 2: /* subi */
2270         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2271         tcg_gen_sub_i32(dest, src1, im);
2272         gen_update_cc_add(dest, im, opsize);
2273         set_cc_op(s, CC_OP_SUBB + opsize);
2274         DEST_EA(env, insn, opsize, dest, &addr);
2275         break;
2276     case 3: /* addi */
2277         tcg_gen_add_i32(dest, src1, im);
2278         gen_update_cc_add(dest, im, opsize);
2279         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2280         set_cc_op(s, CC_OP_ADDB + opsize);
2281         DEST_EA(env, insn, opsize, dest, &addr);
2282         break;
2283     case 5: /* eori */
2284         tcg_gen_xor_i32(dest, src1, im);
2285         if (with_SR) {
2286             gen_set_sr(s, dest, opsize == OS_BYTE);
2287             gen_exit_tb(s);
2288         } else {
2289             DEST_EA(env, insn, opsize, dest, &addr);
2290             gen_logic_cc(s, dest, opsize);
2291         }
2292         break;
2293     case 6: /* cmpi */
2294         gen_update_cc_cmp(s, src1, im, opsize);
2295         break;
2296     default:
2297         abort();
2298     }
2299 }
2300 
2301 DISAS_INSN(cas)
2302 {
2303     int opsize;
2304     TCGv addr;
2305     uint16_t ext;
2306     TCGv load;
2307     TCGv cmp;
2308     MemOp opc;
2309 
2310     switch ((insn >> 9) & 3) {
2311     case 1:
2312         opsize = OS_BYTE;
2313         opc = MO_SB;
2314         break;
2315     case 2:
2316         opsize = OS_WORD;
2317         opc = MO_TESW;
2318         break;
2319     case 3:
2320         opsize = OS_LONG;
2321         opc = MO_TESL;
2322         break;
2323     default:
2324         g_assert_not_reached();
2325     }
2326 
2327     ext = read_im16(env, s);
2328 
2329     /* cas Dc,Du,<EA> */
2330 
2331     addr = gen_lea(env, s, insn, opsize);
2332     if (IS_NULL_QREG(addr)) {
2333         gen_addr_fault(s);
2334         return;
2335     }
2336 
2337     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2338 
2339     /*
2340      * if  <EA> == Dc then
2341      *     <EA> = Du
2342      *     Dc = <EA> (because <EA> == Dc)
2343      * else
2344      *     Dc = <EA>
2345      */
2346 
2347     load = tcg_temp_new();
2348     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2349                                IS_USER(s), opc);
2350     /* update flags before setting cmp to load */
2351     gen_update_cc_cmp(s, load, cmp, opsize);
2352     gen_partset_reg(opsize, DREG(ext, 0), load);
2353 
2354     switch (extract32(insn, 3, 3)) {
2355     case 3: /* Indirect postincrement.  */
2356         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2357         break;
2358     case 4: /* Indirect predecrememnt.  */
2359         tcg_gen_mov_i32(AREG(insn, 0), addr);
2360         break;
2361     }
2362 }
2363 
2364 DISAS_INSN(cas2w)
2365 {
2366     uint16_t ext1, ext2;
2367     TCGv addr1, addr2;
2368 
2369     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2370 
2371     ext1 = read_im16(env, s);
2372 
2373     if (ext1 & 0x8000) {
2374         /* Address Register */
2375         addr1 = AREG(ext1, 12);
2376     } else {
2377         /* Data Register */
2378         addr1 = DREG(ext1, 12);
2379     }
2380 
2381     ext2 = read_im16(env, s);
2382     if (ext2 & 0x8000) {
2383         /* Address Register */
2384         addr2 = AREG(ext2, 12);
2385     } else {
2386         /* Data Register */
2387         addr2 = DREG(ext2, 12);
2388     }
2389 
2390     /*
2391      * if (R1) == Dc1 && (R2) == Dc2 then
2392      *     (R1) = Du1
2393      *     (R2) = Du2
2394      * else
2395      *     Dc1 = (R1)
2396      *     Dc2 = (R2)
2397      */
2398 
2399     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2400         gen_helper_exit_atomic(cpu_env);
2401     } else {
2402         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2403                                      (REG(ext1, 6) << 3) |
2404                                      (REG(ext2, 0) << 6) |
2405                                      (REG(ext1, 0) << 9));
2406         gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2407     }
2408 
2409     /* Note that cas2w also assigned to env->cc_op.  */
2410     s->cc_op = CC_OP_CMPW;
2411     s->cc_op_synced = 1;
2412 }
2413 
2414 DISAS_INSN(cas2l)
2415 {
2416     uint16_t ext1, ext2;
2417     TCGv addr1, addr2, regs;
2418 
2419     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2420 
2421     ext1 = read_im16(env, s);
2422 
2423     if (ext1 & 0x8000) {
2424         /* Address Register */
2425         addr1 = AREG(ext1, 12);
2426     } else {
2427         /* Data Register */
2428         addr1 = DREG(ext1, 12);
2429     }
2430 
2431     ext2 = read_im16(env, s);
2432     if (ext2 & 0x8000) {
2433         /* Address Register */
2434         addr2 = AREG(ext2, 12);
2435     } else {
2436         /* Data Register */
2437         addr2 = DREG(ext2, 12);
2438     }
2439 
2440     /*
2441      * if (R1) == Dc1 && (R2) == Dc2 then
2442      *     (R1) = Du1
2443      *     (R2) = Du2
2444      * else
2445      *     Dc1 = (R1)
2446      *     Dc2 = (R2)
2447      */
2448 
2449     regs = tcg_constant_i32(REG(ext2, 6) |
2450                             (REG(ext1, 6) << 3) |
2451                             (REG(ext2, 0) << 6) |
2452                             (REG(ext1, 0) << 9));
2453     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2454         gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2455     } else {
2456         gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2457     }
2458 
2459     /* Note that cas2l also assigned to env->cc_op.  */
2460     s->cc_op = CC_OP_CMPL;
2461     s->cc_op_synced = 1;
2462 }
2463 
2464 DISAS_INSN(byterev)
2465 {
2466     TCGv reg;
2467 
2468     reg = DREG(insn, 0);
2469     tcg_gen_bswap32_i32(reg, reg);
2470 }
2471 
2472 DISAS_INSN(move)
2473 {
2474     TCGv src;
2475     TCGv dest;
2476     int op;
2477     int opsize;
2478 
2479     switch (insn >> 12) {
2480     case 1: /* move.b */
2481         opsize = OS_BYTE;
2482         break;
2483     case 2: /* move.l */
2484         opsize = OS_LONG;
2485         break;
2486     case 3: /* move.w */
2487         opsize = OS_WORD;
2488         break;
2489     default:
2490         abort();
2491     }
2492     SRC_EA(env, src, opsize, 1, NULL);
2493     op = (insn >> 6) & 7;
2494     if (op == 1) {
2495         /* movea */
2496         /* The value will already have been sign extended.  */
2497         dest = AREG(insn, 9);
2498         tcg_gen_mov_i32(dest, src);
2499     } else {
2500         /* normal move */
2501         uint16_t dest_ea;
2502         dest_ea = ((insn >> 9) & 7) | (op << 3);
2503         DEST_EA(env, dest_ea, opsize, src, NULL);
2504         /* This will be correct because loads sign extend.  */
2505         gen_logic_cc(s, src, opsize);
2506     }
2507 }
2508 
2509 DISAS_INSN(negx)
2510 {
2511     TCGv z;
2512     TCGv src;
2513     TCGv addr;
2514     int opsize;
2515 
2516     opsize = insn_opsize(insn);
2517     SRC_EA(env, src, opsize, 1, &addr);
2518 
2519     gen_flush_flags(s); /* compute old Z */
2520 
2521     /*
2522      * Perform subtract with borrow.
2523      * (X, N) =  -(src + X);
2524      */
2525 
2526     z = tcg_constant_i32(0);
2527     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2528     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2529     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2530 
2531     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2532 
2533     /*
2534      * Compute signed-overflow for negation.  The normal formula for
2535      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2536      * this simplifies to res & src.
2537      */
2538 
2539     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2540 
2541     /* Copy the rest of the results into place.  */
2542     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2543     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2544 
2545     set_cc_op(s, CC_OP_FLAGS);
2546 
2547     /* result is in QREG_CC_N */
2548 
2549     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2550 }
2551 
2552 DISAS_INSN(lea)
2553 {
2554     TCGv reg;
2555     TCGv tmp;
2556 
2557     reg = AREG(insn, 9);
2558     tmp = gen_lea(env, s, insn, OS_LONG);
2559     if (IS_NULL_QREG(tmp)) {
2560         gen_addr_fault(s);
2561         return;
2562     }
2563     tcg_gen_mov_i32(reg, tmp);
2564 }
2565 
2566 DISAS_INSN(clr)
2567 {
2568     int opsize;
2569     TCGv zero;
2570 
2571     zero = tcg_constant_i32(0);
2572     opsize = insn_opsize(insn);
2573     DEST_EA(env, insn, opsize, zero, NULL);
2574     gen_logic_cc(s, zero, opsize);
2575 }
2576 
2577 DISAS_INSN(move_from_ccr)
2578 {
2579     TCGv ccr;
2580 
2581     ccr = gen_get_ccr(s);
2582     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2583 }
2584 
2585 DISAS_INSN(neg)
2586 {
2587     TCGv src1;
2588     TCGv dest;
2589     TCGv addr;
2590     int opsize;
2591 
2592     opsize = insn_opsize(insn);
2593     SRC_EA(env, src1, opsize, 1, &addr);
2594     dest = tcg_temp_new();
2595     tcg_gen_neg_i32(dest, src1);
2596     set_cc_op(s, CC_OP_SUBB + opsize);
2597     gen_update_cc_add(dest, src1, opsize);
2598     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2599     DEST_EA(env, insn, opsize, dest, &addr);
2600 }
2601 
2602 DISAS_INSN(move_to_ccr)
2603 {
2604     gen_move_to_sr(env, s, insn, true);
2605 }
2606 
2607 DISAS_INSN(not)
2608 {
2609     TCGv src1;
2610     TCGv dest;
2611     TCGv addr;
2612     int opsize;
2613 
2614     opsize = insn_opsize(insn);
2615     SRC_EA(env, src1, opsize, 1, &addr);
2616     dest = tcg_temp_new();
2617     tcg_gen_not_i32(dest, src1);
2618     DEST_EA(env, insn, opsize, dest, &addr);
2619     gen_logic_cc(s, dest, opsize);
2620 }
2621 
2622 DISAS_INSN(swap)
2623 {
2624     TCGv src1;
2625     TCGv src2;
2626     TCGv reg;
2627 
2628     src1 = tcg_temp_new();
2629     src2 = tcg_temp_new();
2630     reg = DREG(insn, 0);
2631     tcg_gen_shli_i32(src1, reg, 16);
2632     tcg_gen_shri_i32(src2, reg, 16);
2633     tcg_gen_or_i32(reg, src1, src2);
2634     gen_logic_cc(s, reg, OS_LONG);
2635 }
2636 
2637 DISAS_INSN(bkpt)
2638 {
2639 #if defined(CONFIG_SOFTMMU)
2640     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2641 #else
2642     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2643 #endif
2644 }
2645 
2646 DISAS_INSN(pea)
2647 {
2648     TCGv tmp;
2649 
2650     tmp = gen_lea(env, s, insn, OS_LONG);
2651     if (IS_NULL_QREG(tmp)) {
2652         gen_addr_fault(s);
2653         return;
2654     }
2655     gen_push(s, tmp);
2656 }
2657 
2658 DISAS_INSN(ext)
2659 {
2660     int op;
2661     TCGv reg;
2662     TCGv tmp;
2663 
2664     reg = DREG(insn, 0);
2665     op = (insn >> 6) & 7;
2666     tmp = tcg_temp_new();
2667     if (op == 3)
2668         tcg_gen_ext16s_i32(tmp, reg);
2669     else
2670         tcg_gen_ext8s_i32(tmp, reg);
2671     if (op == 2)
2672         gen_partset_reg(OS_WORD, reg, tmp);
2673     else
2674         tcg_gen_mov_i32(reg, tmp);
2675     gen_logic_cc(s, tmp, OS_LONG);
2676 }
2677 
2678 DISAS_INSN(tst)
2679 {
2680     int opsize;
2681     TCGv tmp;
2682 
2683     opsize = insn_opsize(insn);
2684     SRC_EA(env, tmp, opsize, 1, NULL);
2685     gen_logic_cc(s, tmp, opsize);
2686 }
2687 
2688 DISAS_INSN(pulse)
2689 {
2690   /* Implemented as a NOP.  */
2691 }
2692 
2693 DISAS_INSN(illegal)
2694 {
2695     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2696 }
2697 
2698 DISAS_INSN(tas)
2699 {
2700     int mode = extract32(insn, 3, 3);
2701     int reg0 = REG(insn, 0);
2702 
2703     if (mode == 0) {
2704         /* data register direct */
2705         TCGv dest = cpu_dregs[reg0];
2706         gen_logic_cc(s, dest, OS_BYTE);
2707         tcg_gen_ori_tl(dest, dest, 0x80);
2708     } else {
2709         TCGv src1, addr;
2710 
2711         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2712         if (IS_NULL_QREG(addr)) {
2713             gen_addr_fault(s);
2714             return;
2715         }
2716         src1 = tcg_temp_new();
2717         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2718                                    IS_USER(s), MO_SB);
2719         gen_logic_cc(s, src1, OS_BYTE);
2720 
2721         switch (mode) {
2722         case 3: /* Indirect postincrement.  */
2723             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2724             break;
2725         case 4: /* Indirect predecrememnt.  */
2726             tcg_gen_mov_i32(AREG(insn, 0), addr);
2727             break;
2728         }
2729     }
2730 }
2731 
2732 DISAS_INSN(mull)
2733 {
2734     uint16_t ext;
2735     TCGv src1;
2736     int sign;
2737 
2738     ext = read_im16(env, s);
2739 
2740     sign = ext & 0x800;
2741 
2742     if (ext & 0x400) {
2743         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2744             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2745             return;
2746         }
2747 
2748         SRC_EA(env, src1, OS_LONG, 0, NULL);
2749 
2750         if (sign) {
2751             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2752         } else {
2753             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2754         }
2755         /* if Dl == Dh, 68040 returns low word */
2756         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2757         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2758         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2759 
2760         tcg_gen_movi_i32(QREG_CC_V, 0);
2761         tcg_gen_movi_i32(QREG_CC_C, 0);
2762 
2763         set_cc_op(s, CC_OP_FLAGS);
2764         return;
2765     }
2766     SRC_EA(env, src1, OS_LONG, 0, NULL);
2767     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2768         tcg_gen_movi_i32(QREG_CC_C, 0);
2769         if (sign) {
2770             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2771             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2772             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2773             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2774         } else {
2775             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2776             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2777             tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2778         }
2779         tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2780         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2781 
2782         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2783 
2784         set_cc_op(s, CC_OP_FLAGS);
2785     } else {
2786         /*
2787          * The upper 32 bits of the product are discarded, so
2788          * muls.l and mulu.l are functionally equivalent.
2789          */
2790         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2791         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2792     }
2793 }
2794 
2795 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2796 {
2797     TCGv reg;
2798     TCGv tmp;
2799 
2800     reg = AREG(insn, 0);
2801     tmp = tcg_temp_new();
2802     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2803     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2804     if ((insn & 7) != 7) {
2805         tcg_gen_mov_i32(reg, tmp);
2806     }
2807     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2808 }
2809 
2810 DISAS_INSN(link)
2811 {
2812     int16_t offset;
2813 
2814     offset = read_im16(env, s);
2815     gen_link(s, insn, offset);
2816 }
2817 
2818 DISAS_INSN(linkl)
2819 {
2820     int32_t offset;
2821 
2822     offset = read_im32(env, s);
2823     gen_link(s, insn, offset);
2824 }
2825 
2826 DISAS_INSN(unlk)
2827 {
2828     TCGv src;
2829     TCGv reg;
2830     TCGv tmp;
2831 
2832     src = tcg_temp_new();
2833     reg = AREG(insn, 0);
2834     tcg_gen_mov_i32(src, reg);
2835     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2836     tcg_gen_mov_i32(reg, tmp);
2837     tcg_gen_addi_i32(QREG_SP, src, 4);
2838 }
2839 
2840 #if defined(CONFIG_SOFTMMU)
2841 DISAS_INSN(reset)
2842 {
2843     if (IS_USER(s)) {
2844         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2845         return;
2846     }
2847 
2848     gen_helper_reset(cpu_env);
2849 }
2850 #endif
2851 
2852 DISAS_INSN(nop)
2853 {
2854 }
2855 
2856 DISAS_INSN(rtd)
2857 {
2858     TCGv tmp;
2859     int16_t offset = read_im16(env, s);
2860 
2861     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2862     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2863     gen_jmp(s, tmp);
2864 }
2865 
2866 DISAS_INSN(rtr)
2867 {
2868     TCGv tmp;
2869     TCGv ccr;
2870     TCGv sp;
2871 
2872     sp = tcg_temp_new();
2873     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2874     tcg_gen_addi_i32(sp, QREG_SP, 2);
2875     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2876     tcg_gen_addi_i32(QREG_SP, sp, 4);
2877 
2878     gen_set_sr(s, ccr, true);
2879 
2880     gen_jmp(s, tmp);
2881 }
2882 
2883 DISAS_INSN(rts)
2884 {
2885     TCGv tmp;
2886 
2887     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2888     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2889     gen_jmp(s, tmp);
2890 }
2891 
2892 DISAS_INSN(jump)
2893 {
2894     TCGv tmp;
2895 
2896     /*
2897      * Load the target address first to ensure correct exception
2898      * behavior.
2899      */
2900     tmp = gen_lea(env, s, insn, OS_LONG);
2901     if (IS_NULL_QREG(tmp)) {
2902         gen_addr_fault(s);
2903         return;
2904     }
2905     if ((insn & 0x40) == 0) {
2906         /* jsr */
2907         gen_push(s, tcg_constant_i32(s->pc));
2908     }
2909     gen_jmp(s, tmp);
2910 }
2911 
2912 DISAS_INSN(addsubq)
2913 {
2914     TCGv src;
2915     TCGv dest;
2916     TCGv val;
2917     int imm;
2918     TCGv addr;
2919     int opsize;
2920 
2921     if ((insn & 070) == 010) {
2922         /* Operation on address register is always long.  */
2923         opsize = OS_LONG;
2924     } else {
2925         opsize = insn_opsize(insn);
2926     }
2927     SRC_EA(env, src, opsize, 1, &addr);
2928     imm = (insn >> 9) & 7;
2929     if (imm == 0) {
2930         imm = 8;
2931     }
2932     val = tcg_constant_i32(imm);
2933     dest = tcg_temp_new();
2934     tcg_gen_mov_i32(dest, src);
2935     if ((insn & 0x38) == 0x08) {
2936         /*
2937          * Don't update condition codes if the destination is an
2938          * address register.
2939          */
2940         if (insn & 0x0100) {
2941             tcg_gen_sub_i32(dest, dest, val);
2942         } else {
2943             tcg_gen_add_i32(dest, dest, val);
2944         }
2945     } else {
2946         if (insn & 0x0100) {
2947             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2948             tcg_gen_sub_i32(dest, dest, val);
2949             set_cc_op(s, CC_OP_SUBB + opsize);
2950         } else {
2951             tcg_gen_add_i32(dest, dest, val);
2952             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2953             set_cc_op(s, CC_OP_ADDB + opsize);
2954         }
2955         gen_update_cc_add(dest, val, opsize);
2956     }
2957     DEST_EA(env, insn, opsize, dest, &addr);
2958 }
2959 
2960 DISAS_INSN(branch)
2961 {
2962     int32_t offset;
2963     uint32_t base;
2964     int op;
2965 
2966     base = s->pc;
2967     op = (insn >> 8) & 0xf;
2968     offset = (int8_t)insn;
2969     if (offset == 0) {
2970         offset = (int16_t)read_im16(env, s);
2971     } else if (offset == -1) {
2972         offset = read_im32(env, s);
2973     }
2974     if (op == 1) {
2975         /* bsr */
2976         gen_push(s, tcg_constant_i32(s->pc));
2977     }
2978     if (op > 1) {
2979         /* Bcc */
2980         TCGLabel *l1 = gen_new_label();
2981         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2982         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2983         gen_set_label(l1);
2984         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2985     } else {
2986         /* Unconditional branch.  */
2987         update_cc_op(s);
2988         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
2989     }
2990 }
2991 
2992 DISAS_INSN(moveq)
2993 {
2994     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2995     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2996 }
2997 
2998 DISAS_INSN(mvzs)
2999 {
3000     int opsize;
3001     TCGv src;
3002     TCGv reg;
3003 
3004     if (insn & 0x40)
3005         opsize = OS_WORD;
3006     else
3007         opsize = OS_BYTE;
3008     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3009     reg = DREG(insn, 9);
3010     tcg_gen_mov_i32(reg, src);
3011     gen_logic_cc(s, src, opsize);
3012 }
3013 
3014 DISAS_INSN(or)
3015 {
3016     TCGv reg;
3017     TCGv dest;
3018     TCGv src;
3019     TCGv addr;
3020     int opsize;
3021 
3022     opsize = insn_opsize(insn);
3023     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3024     dest = tcg_temp_new();
3025     if (insn & 0x100) {
3026         SRC_EA(env, src, opsize, 0, &addr);
3027         tcg_gen_or_i32(dest, src, reg);
3028         DEST_EA(env, insn, opsize, dest, &addr);
3029     } else {
3030         SRC_EA(env, src, opsize, 0, NULL);
3031         tcg_gen_or_i32(dest, src, reg);
3032         gen_partset_reg(opsize, DREG(insn, 9), dest);
3033     }
3034     gen_logic_cc(s, dest, opsize);
3035 }
3036 
3037 DISAS_INSN(suba)
3038 {
3039     TCGv src;
3040     TCGv reg;
3041 
3042     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3043     reg = AREG(insn, 9);
3044     tcg_gen_sub_i32(reg, reg, src);
3045 }
3046 
3047 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3048 {
3049     TCGv tmp, zero;
3050 
3051     gen_flush_flags(s); /* compute old Z */
3052 
3053     /*
3054      * Perform subtract with borrow.
3055      * (X, N) = dest - (src + X);
3056      */
3057 
3058     zero = tcg_constant_i32(0);
3059     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3060     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3061     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3062     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3063 
3064     /* Compute signed-overflow for subtract.  */
3065 
3066     tmp = tcg_temp_new();
3067     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3068     tcg_gen_xor_i32(tmp, dest, src);
3069     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3070 
3071     /* Copy the rest of the results into place.  */
3072     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3073     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3074 
3075     set_cc_op(s, CC_OP_FLAGS);
3076 
3077     /* result is in QREG_CC_N */
3078 }
3079 
3080 DISAS_INSN(subx_reg)
3081 {
3082     TCGv dest;
3083     TCGv src;
3084     int opsize;
3085 
3086     opsize = insn_opsize(insn);
3087 
3088     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3089     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3090 
3091     gen_subx(s, src, dest, opsize);
3092 
3093     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3094 }
3095 
3096 DISAS_INSN(subx_mem)
3097 {
3098     TCGv src;
3099     TCGv addr_src;
3100     TCGv dest;
3101     TCGv addr_dest;
3102     int opsize;
3103 
3104     opsize = insn_opsize(insn);
3105 
3106     addr_src = AREG(insn, 0);
3107     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3108     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3109 
3110     addr_dest = AREG(insn, 9);
3111     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3112     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3113 
3114     gen_subx(s, src, dest, opsize);
3115 
3116     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3117 }
3118 
3119 DISAS_INSN(mov3q)
3120 {
3121     TCGv src;
3122     int val;
3123 
3124     val = (insn >> 9) & 7;
3125     if (val == 0) {
3126         val = -1;
3127     }
3128     src = tcg_constant_i32(val);
3129     gen_logic_cc(s, src, OS_LONG);
3130     DEST_EA(env, insn, OS_LONG, src, NULL);
3131 }
3132 
3133 DISAS_INSN(cmp)
3134 {
3135     TCGv src;
3136     TCGv reg;
3137     int opsize;
3138 
3139     opsize = insn_opsize(insn);
3140     SRC_EA(env, src, opsize, 1, NULL);
3141     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3142     gen_update_cc_cmp(s, reg, src, opsize);
3143 }
3144 
3145 DISAS_INSN(cmpa)
3146 {
3147     int opsize;
3148     TCGv src;
3149     TCGv reg;
3150 
3151     if (insn & 0x100) {
3152         opsize = OS_LONG;
3153     } else {
3154         opsize = OS_WORD;
3155     }
3156     SRC_EA(env, src, opsize, 1, NULL);
3157     reg = AREG(insn, 9);
3158     gen_update_cc_cmp(s, reg, src, OS_LONG);
3159 }
3160 
3161 DISAS_INSN(cmpm)
3162 {
3163     int opsize = insn_opsize(insn);
3164     TCGv src, dst;
3165 
3166     /* Post-increment load (mode 3) from Ay.  */
3167     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3168                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3169     /* Post-increment load (mode 3) from Ax.  */
3170     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3171                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3172 
3173     gen_update_cc_cmp(s, dst, src, opsize);
3174 }
3175 
3176 DISAS_INSN(eor)
3177 {
3178     TCGv src;
3179     TCGv dest;
3180     TCGv addr;
3181     int opsize;
3182 
3183     opsize = insn_opsize(insn);
3184 
3185     SRC_EA(env, src, opsize, 0, &addr);
3186     dest = tcg_temp_new();
3187     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3188     gen_logic_cc(s, dest, opsize);
3189     DEST_EA(env, insn, opsize, dest, &addr);
3190 }
3191 
3192 static void do_exg(TCGv reg1, TCGv reg2)
3193 {
3194     TCGv temp = tcg_temp_new();
3195     tcg_gen_mov_i32(temp, reg1);
3196     tcg_gen_mov_i32(reg1, reg2);
3197     tcg_gen_mov_i32(reg2, temp);
3198 }
3199 
3200 DISAS_INSN(exg_dd)
3201 {
3202     /* exchange Dx and Dy */
3203     do_exg(DREG(insn, 9), DREG(insn, 0));
3204 }
3205 
3206 DISAS_INSN(exg_aa)
3207 {
3208     /* exchange Ax and Ay */
3209     do_exg(AREG(insn, 9), AREG(insn, 0));
3210 }
3211 
3212 DISAS_INSN(exg_da)
3213 {
3214     /* exchange Dx and Ay */
3215     do_exg(DREG(insn, 9), AREG(insn, 0));
3216 }
3217 
3218 DISAS_INSN(and)
3219 {
3220     TCGv src;
3221     TCGv reg;
3222     TCGv dest;
3223     TCGv addr;
3224     int opsize;
3225 
3226     dest = tcg_temp_new();
3227 
3228     opsize = insn_opsize(insn);
3229     reg = DREG(insn, 9);
3230     if (insn & 0x100) {
3231         SRC_EA(env, src, opsize, 0, &addr);
3232         tcg_gen_and_i32(dest, src, reg);
3233         DEST_EA(env, insn, opsize, dest, &addr);
3234     } else {
3235         SRC_EA(env, src, opsize, 0, NULL);
3236         tcg_gen_and_i32(dest, src, reg);
3237         gen_partset_reg(opsize, reg, dest);
3238     }
3239     gen_logic_cc(s, dest, opsize);
3240 }
3241 
3242 DISAS_INSN(adda)
3243 {
3244     TCGv src;
3245     TCGv reg;
3246 
3247     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3248     reg = AREG(insn, 9);
3249     tcg_gen_add_i32(reg, reg, src);
3250 }
3251 
3252 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3253 {
3254     TCGv tmp, zero;
3255 
3256     gen_flush_flags(s); /* compute old Z */
3257 
3258     /*
3259      * Perform addition with carry.
3260      * (X, N) = src + dest + X;
3261      */
3262 
3263     zero = tcg_constant_i32(0);
3264     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3265     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3266     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3267 
3268     /* Compute signed-overflow for addition.  */
3269 
3270     tmp = tcg_temp_new();
3271     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3272     tcg_gen_xor_i32(tmp, dest, src);
3273     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3274 
3275     /* Copy the rest of the results into place.  */
3276     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3277     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3278 
3279     set_cc_op(s, CC_OP_FLAGS);
3280 
3281     /* result is in QREG_CC_N */
3282 }
3283 
3284 DISAS_INSN(addx_reg)
3285 {
3286     TCGv dest;
3287     TCGv src;
3288     int opsize;
3289 
3290     opsize = insn_opsize(insn);
3291 
3292     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3293     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3294 
3295     gen_addx(s, src, dest, opsize);
3296 
3297     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3298 }
3299 
3300 DISAS_INSN(addx_mem)
3301 {
3302     TCGv src;
3303     TCGv addr_src;
3304     TCGv dest;
3305     TCGv addr_dest;
3306     int opsize;
3307 
3308     opsize = insn_opsize(insn);
3309 
3310     addr_src = AREG(insn, 0);
3311     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3312     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3313 
3314     addr_dest = AREG(insn, 9);
3315     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3316     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3317 
3318     gen_addx(s, src, dest, opsize);
3319 
3320     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3321 }
3322 
3323 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3324 {
3325     int count = (insn >> 9) & 7;
3326     int logical = insn & 8;
3327     int left = insn & 0x100;
3328     int bits = opsize_bytes(opsize) * 8;
3329     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3330 
3331     if (count == 0) {
3332         count = 8;
3333     }
3334 
3335     tcg_gen_movi_i32(QREG_CC_V, 0);
3336     if (left) {
3337         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3338         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3339 
3340         /*
3341          * Note that ColdFire always clears V (done above),
3342          * while M68000 sets if the most significant bit is changed at
3343          * any time during the shift operation.
3344          */
3345         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3346             /* if shift count >= bits, V is (reg != 0) */
3347             if (count >= bits) {
3348                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3349             } else {
3350                 TCGv t0 = tcg_temp_new();
3351                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3352                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3353                 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3354             }
3355             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3356         }
3357     } else {
3358         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3359         if (logical) {
3360             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3361         } else {
3362             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3363         }
3364     }
3365 
3366     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3367     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3368     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3369     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3370 
3371     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3372     set_cc_op(s, CC_OP_FLAGS);
3373 }
3374 
3375 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3376 {
3377     int logical = insn & 8;
3378     int left = insn & 0x100;
3379     int bits = opsize_bytes(opsize) * 8;
3380     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3381     TCGv s32;
3382     TCGv_i64 t64, s64;
3383 
3384     t64 = tcg_temp_new_i64();
3385     s64 = tcg_temp_new_i64();
3386     s32 = tcg_temp_new();
3387 
3388     /*
3389      * Note that m68k truncates the shift count modulo 64, not 32.
3390      * In addition, a 64-bit shift makes it easy to find "the last
3391      * bit shifted out", for the carry flag.
3392      */
3393     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3394     tcg_gen_extu_i32_i64(s64, s32);
3395     tcg_gen_extu_i32_i64(t64, reg);
3396 
3397     /* Optimistically set V=0.  Also used as a zero source below.  */
3398     tcg_gen_movi_i32(QREG_CC_V, 0);
3399     if (left) {
3400         tcg_gen_shl_i64(t64, t64, s64);
3401 
3402         if (opsize == OS_LONG) {
3403             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3404             /* Note that C=0 if shift count is 0, and we get that for free.  */
3405         } else {
3406             TCGv zero = tcg_constant_i32(0);
3407             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3408             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3409             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3410                                 s32, zero, zero, QREG_CC_C);
3411         }
3412         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3413 
3414         /* X = C, but only if the shift count was non-zero.  */
3415         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3416                             QREG_CC_C, QREG_CC_X);
3417 
3418         /*
3419          * M68000 sets V if the most significant bit is changed at
3420          * any time during the shift operation.  Do this via creating
3421          * an extension of the sign bit, comparing, and discarding
3422          * the bits below the sign bit.  I.e.
3423          *     int64_t s = (intN_t)reg;
3424          *     int64_t t = (int64_t)(intN_t)reg << count;
3425          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3426          */
3427         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3428             TCGv_i64 tt = tcg_constant_i64(32);
3429             /* if shift is greater than 32, use 32 */
3430             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3431             /* Sign extend the input to 64 bits; re-do the shift.  */
3432             tcg_gen_ext_i32_i64(t64, reg);
3433             tcg_gen_shl_i64(s64, t64, s64);
3434             /* Clear all bits that are unchanged.  */
3435             tcg_gen_xor_i64(t64, t64, s64);
3436             /* Ignore the bits below the sign bit.  */
3437             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3438             /* If any bits remain set, we have overflow.  */
3439             tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3440             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3441             tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3442         }
3443     } else {
3444         tcg_gen_shli_i64(t64, t64, 32);
3445         if (logical) {
3446             tcg_gen_shr_i64(t64, t64, s64);
3447         } else {
3448             tcg_gen_sar_i64(t64, t64, s64);
3449         }
3450         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3451 
3452         /* Note that C=0 if shift count is 0, and we get that for free.  */
3453         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3454 
3455         /* X = C, but only if the shift count was non-zero.  */
3456         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3457                             QREG_CC_C, QREG_CC_X);
3458     }
3459     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3460     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3461 
3462     /* Write back the result.  */
3463     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3464     set_cc_op(s, CC_OP_FLAGS);
3465 }
3466 
3467 DISAS_INSN(shift8_im)
3468 {
3469     shift_im(s, insn, OS_BYTE);
3470 }
3471 
3472 DISAS_INSN(shift16_im)
3473 {
3474     shift_im(s, insn, OS_WORD);
3475 }
3476 
3477 DISAS_INSN(shift_im)
3478 {
3479     shift_im(s, insn, OS_LONG);
3480 }
3481 
3482 DISAS_INSN(shift8_reg)
3483 {
3484     shift_reg(s, insn, OS_BYTE);
3485 }
3486 
3487 DISAS_INSN(shift16_reg)
3488 {
3489     shift_reg(s, insn, OS_WORD);
3490 }
3491 
3492 DISAS_INSN(shift_reg)
3493 {
3494     shift_reg(s, insn, OS_LONG);
3495 }
3496 
3497 DISAS_INSN(shift_mem)
3498 {
3499     int logical = insn & 8;
3500     int left = insn & 0x100;
3501     TCGv src;
3502     TCGv addr;
3503 
3504     SRC_EA(env, src, OS_WORD, !logical, &addr);
3505     tcg_gen_movi_i32(QREG_CC_V, 0);
3506     if (left) {
3507         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3508         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3509 
3510         /*
3511          * Note that ColdFire always clears V,
3512          * while M68000 sets if the most significant bit is changed at
3513          * any time during the shift operation
3514          */
3515         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3516             src = gen_extend(s, src, OS_WORD, 1);
3517             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3518         }
3519     } else {
3520         tcg_gen_mov_i32(QREG_CC_C, src);
3521         if (logical) {
3522             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3523         } else {
3524             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3525         }
3526     }
3527 
3528     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3529     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3530     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3531     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3532 
3533     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3534     set_cc_op(s, CC_OP_FLAGS);
3535 }
3536 
3537 static void rotate(TCGv reg, TCGv shift, int left, int size)
3538 {
3539     switch (size) {
3540     case 8:
3541         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3542         tcg_gen_ext8u_i32(reg, reg);
3543         tcg_gen_muli_i32(reg, reg, 0x01010101);
3544         goto do_long;
3545     case 16:
3546         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3547         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3548         goto do_long;
3549     do_long:
3550     default:
3551         if (left) {
3552             tcg_gen_rotl_i32(reg, reg, shift);
3553         } else {
3554             tcg_gen_rotr_i32(reg, reg, shift);
3555         }
3556     }
3557 
3558     /* compute flags */
3559 
3560     switch (size) {
3561     case 8:
3562         tcg_gen_ext8s_i32(reg, reg);
3563         break;
3564     case 16:
3565         tcg_gen_ext16s_i32(reg, reg);
3566         break;
3567     default:
3568         break;
3569     }
3570 
3571     /* QREG_CC_X is not affected */
3572 
3573     tcg_gen_mov_i32(QREG_CC_N, reg);
3574     tcg_gen_mov_i32(QREG_CC_Z, reg);
3575 
3576     if (left) {
3577         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3578     } else {
3579         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3580     }
3581 
3582     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3583 }
3584 
3585 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3586 {
3587     switch (size) {
3588     case 8:
3589         tcg_gen_ext8s_i32(reg, reg);
3590         break;
3591     case 16:
3592         tcg_gen_ext16s_i32(reg, reg);
3593         break;
3594     default:
3595         break;
3596     }
3597     tcg_gen_mov_i32(QREG_CC_N, reg);
3598     tcg_gen_mov_i32(QREG_CC_Z, reg);
3599     tcg_gen_mov_i32(QREG_CC_X, X);
3600     tcg_gen_mov_i32(QREG_CC_C, X);
3601     tcg_gen_movi_i32(QREG_CC_V, 0);
3602 }
3603 
3604 /* Result of rotate_x() is valid if 0 <= shift <= size */
3605 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3606 {
3607     TCGv X, shl, shr, shx, sz, zero;
3608 
3609     sz = tcg_constant_i32(size);
3610 
3611     shr = tcg_temp_new();
3612     shl = tcg_temp_new();
3613     shx = tcg_temp_new();
3614     if (left) {
3615         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3616         tcg_gen_movi_i32(shr, size + 1);
3617         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3618         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3619         /* shx = shx < 0 ? size : shx; */
3620         zero = tcg_constant_i32(0);
3621         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3622     } else {
3623         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3624         tcg_gen_movi_i32(shl, size + 1);
3625         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3626         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3627     }
3628 
3629     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3630 
3631     tcg_gen_shl_i32(shl, reg, shl);
3632     tcg_gen_shr_i32(shr, reg, shr);
3633     tcg_gen_or_i32(reg, shl, shr);
3634     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3635     tcg_gen_or_i32(reg, reg, shx);
3636 
3637     /* X = (reg >> size) & 1 */
3638 
3639     X = tcg_temp_new();
3640     tcg_gen_extract_i32(X, reg, size, 1);
3641 
3642     return X;
3643 }
3644 
3645 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3646 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3647 {
3648     TCGv_i64 t0, shift64;
3649     TCGv X, lo, hi, zero;
3650 
3651     shift64 = tcg_temp_new_i64();
3652     tcg_gen_extu_i32_i64(shift64, shift);
3653 
3654     t0 = tcg_temp_new_i64();
3655 
3656     X = tcg_temp_new();
3657     lo = tcg_temp_new();
3658     hi = tcg_temp_new();
3659 
3660     if (left) {
3661         /* create [reg:X:..] */
3662 
3663         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3664         tcg_gen_concat_i32_i64(t0, lo, reg);
3665 
3666         /* rotate */
3667 
3668         tcg_gen_rotl_i64(t0, t0, shift64);
3669 
3670         /* result is [reg:..:reg:X] */
3671 
3672         tcg_gen_extr_i64_i32(lo, hi, t0);
3673         tcg_gen_andi_i32(X, lo, 1);
3674 
3675         tcg_gen_shri_i32(lo, lo, 1);
3676     } else {
3677         /* create [..:X:reg] */
3678 
3679         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3680 
3681         tcg_gen_rotr_i64(t0, t0, shift64);
3682 
3683         /* result is value: [X:reg:..:reg] */
3684 
3685         tcg_gen_extr_i64_i32(lo, hi, t0);
3686 
3687         /* extract X */
3688 
3689         tcg_gen_shri_i32(X, hi, 31);
3690 
3691         /* extract result */
3692 
3693         tcg_gen_shli_i32(hi, hi, 1);
3694     }
3695     tcg_gen_or_i32(lo, lo, hi);
3696 
3697     /* if shift == 0, register and X are not affected */
3698 
3699     zero = tcg_constant_i32(0);
3700     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3701     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3702 
3703     return X;
3704 }
3705 
3706 DISAS_INSN(rotate_im)
3707 {
3708     TCGv shift;
3709     int tmp;
3710     int left = (insn & 0x100);
3711 
3712     tmp = (insn >> 9) & 7;
3713     if (tmp == 0) {
3714         tmp = 8;
3715     }
3716 
3717     shift = tcg_constant_i32(tmp);
3718     if (insn & 8) {
3719         rotate(DREG(insn, 0), shift, left, 32);
3720     } else {
3721         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3722         rotate_x_flags(DREG(insn, 0), X, 32);
3723     }
3724 
3725     set_cc_op(s, CC_OP_FLAGS);
3726 }
3727 
3728 DISAS_INSN(rotate8_im)
3729 {
3730     int left = (insn & 0x100);
3731     TCGv reg;
3732     TCGv shift;
3733     int tmp;
3734 
3735     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3736 
3737     tmp = (insn >> 9) & 7;
3738     if (tmp == 0) {
3739         tmp = 8;
3740     }
3741 
3742     shift = tcg_constant_i32(tmp);
3743     if (insn & 8) {
3744         rotate(reg, shift, left, 8);
3745     } else {
3746         TCGv X = rotate_x(reg, shift, left, 8);
3747         rotate_x_flags(reg, X, 8);
3748     }
3749     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3750     set_cc_op(s, CC_OP_FLAGS);
3751 }
3752 
3753 DISAS_INSN(rotate16_im)
3754 {
3755     int left = (insn & 0x100);
3756     TCGv reg;
3757     TCGv shift;
3758     int tmp;
3759 
3760     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3761     tmp = (insn >> 9) & 7;
3762     if (tmp == 0) {
3763         tmp = 8;
3764     }
3765 
3766     shift = tcg_constant_i32(tmp);
3767     if (insn & 8) {
3768         rotate(reg, shift, left, 16);
3769     } else {
3770         TCGv X = rotate_x(reg, shift, left, 16);
3771         rotate_x_flags(reg, X, 16);
3772     }
3773     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3774     set_cc_op(s, CC_OP_FLAGS);
3775 }
3776 
3777 DISAS_INSN(rotate_reg)
3778 {
3779     TCGv reg;
3780     TCGv src;
3781     TCGv t0, t1;
3782     int left = (insn & 0x100);
3783 
3784     reg = DREG(insn, 0);
3785     src = DREG(insn, 9);
3786     /* shift in [0..63] */
3787     t0 = tcg_temp_new();
3788     tcg_gen_andi_i32(t0, src, 63);
3789     t1 = tcg_temp_new_i32();
3790     if (insn & 8) {
3791         tcg_gen_andi_i32(t1, src, 31);
3792         rotate(reg, t1, left, 32);
3793         /* if shift == 0, clear C */
3794         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3795                             t0, QREG_CC_V /* 0 */,
3796                             QREG_CC_V /* 0 */, QREG_CC_C);
3797     } else {
3798         TCGv X;
3799         /* modulo 33 */
3800         tcg_gen_movi_i32(t1, 33);
3801         tcg_gen_remu_i32(t1, t0, t1);
3802         X = rotate32_x(DREG(insn, 0), t1, left);
3803         rotate_x_flags(DREG(insn, 0), X, 32);
3804     }
3805     set_cc_op(s, CC_OP_FLAGS);
3806 }
3807 
3808 DISAS_INSN(rotate8_reg)
3809 {
3810     TCGv reg;
3811     TCGv src;
3812     TCGv t0, t1;
3813     int left = (insn & 0x100);
3814 
3815     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3816     src = DREG(insn, 9);
3817     /* shift in [0..63] */
3818     t0 = tcg_temp_new_i32();
3819     tcg_gen_andi_i32(t0, src, 63);
3820     t1 = tcg_temp_new_i32();
3821     if (insn & 8) {
3822         tcg_gen_andi_i32(t1, src, 7);
3823         rotate(reg, t1, left, 8);
3824         /* if shift == 0, clear C */
3825         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3826                             t0, QREG_CC_V /* 0 */,
3827                             QREG_CC_V /* 0 */, QREG_CC_C);
3828     } else {
3829         TCGv X;
3830         /* modulo 9 */
3831         tcg_gen_movi_i32(t1, 9);
3832         tcg_gen_remu_i32(t1, t0, t1);
3833         X = rotate_x(reg, t1, left, 8);
3834         rotate_x_flags(reg, X, 8);
3835     }
3836     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3837     set_cc_op(s, CC_OP_FLAGS);
3838 }
3839 
3840 DISAS_INSN(rotate16_reg)
3841 {
3842     TCGv reg;
3843     TCGv src;
3844     TCGv t0, t1;
3845     int left = (insn & 0x100);
3846 
3847     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3848     src = DREG(insn, 9);
3849     /* shift in [0..63] */
3850     t0 = tcg_temp_new_i32();
3851     tcg_gen_andi_i32(t0, src, 63);
3852     t1 = tcg_temp_new_i32();
3853     if (insn & 8) {
3854         tcg_gen_andi_i32(t1, src, 15);
3855         rotate(reg, t1, left, 16);
3856         /* if shift == 0, clear C */
3857         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3858                             t0, QREG_CC_V /* 0 */,
3859                             QREG_CC_V /* 0 */, QREG_CC_C);
3860     } else {
3861         TCGv X;
3862         /* modulo 17 */
3863         tcg_gen_movi_i32(t1, 17);
3864         tcg_gen_remu_i32(t1, t0, t1);
3865         X = rotate_x(reg, t1, left, 16);
3866         rotate_x_flags(reg, X, 16);
3867     }
3868     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3869     set_cc_op(s, CC_OP_FLAGS);
3870 }
3871 
3872 DISAS_INSN(rotate_mem)
3873 {
3874     TCGv src;
3875     TCGv addr;
3876     TCGv shift;
3877     int left = (insn & 0x100);
3878 
3879     SRC_EA(env, src, OS_WORD, 0, &addr);
3880 
3881     shift = tcg_constant_i32(1);
3882     if (insn & 0x0200) {
3883         rotate(src, shift, left, 16);
3884     } else {
3885         TCGv X = rotate_x(src, shift, left, 16);
3886         rotate_x_flags(src, X, 16);
3887     }
3888     DEST_EA(env, insn, OS_WORD, src, &addr);
3889     set_cc_op(s, CC_OP_FLAGS);
3890 }
3891 
3892 DISAS_INSN(bfext_reg)
3893 {
3894     int ext = read_im16(env, s);
3895     int is_sign = insn & 0x200;
3896     TCGv src = DREG(insn, 0);
3897     TCGv dst = DREG(ext, 12);
3898     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3899     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3900     int pos = 32 - ofs - len;        /* little bit-endian */
3901     TCGv tmp = tcg_temp_new();
3902     TCGv shift;
3903 
3904     /*
3905      * In general, we're going to rotate the field so that it's at the
3906      * top of the word and then right-shift by the complement of the
3907      * width to extend the field.
3908      */
3909     if (ext & 0x20) {
3910         /* Variable width.  */
3911         if (ext & 0x800) {
3912             /* Variable offset.  */
3913             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3914             tcg_gen_rotl_i32(tmp, src, tmp);
3915         } else {
3916             tcg_gen_rotli_i32(tmp, src, ofs);
3917         }
3918 
3919         shift = tcg_temp_new();
3920         tcg_gen_neg_i32(shift, DREG(ext, 0));
3921         tcg_gen_andi_i32(shift, shift, 31);
3922         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3923         if (is_sign) {
3924             tcg_gen_mov_i32(dst, QREG_CC_N);
3925         } else {
3926             tcg_gen_shr_i32(dst, tmp, shift);
3927         }
3928     } else {
3929         /* Immediate width.  */
3930         if (ext & 0x800) {
3931             /* Variable offset */
3932             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3933             tcg_gen_rotl_i32(tmp, src, tmp);
3934             src = tmp;
3935             pos = 32 - len;
3936         } else {
3937             /*
3938              * Immediate offset.  If the field doesn't wrap around the
3939              * end of the word, rely on (s)extract completely.
3940              */
3941             if (pos < 0) {
3942                 tcg_gen_rotli_i32(tmp, src, ofs);
3943                 src = tmp;
3944                 pos = 32 - len;
3945             }
3946         }
3947 
3948         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3949         if (is_sign) {
3950             tcg_gen_mov_i32(dst, QREG_CC_N);
3951         } else {
3952             tcg_gen_extract_i32(dst, src, pos, len);
3953         }
3954     }
3955 
3956     set_cc_op(s, CC_OP_LOGIC);
3957 }
3958 
3959 DISAS_INSN(bfext_mem)
3960 {
3961     int ext = read_im16(env, s);
3962     int is_sign = insn & 0x200;
3963     TCGv dest = DREG(ext, 12);
3964     TCGv addr, len, ofs;
3965 
3966     addr = gen_lea(env, s, insn, OS_UNSIZED);
3967     if (IS_NULL_QREG(addr)) {
3968         gen_addr_fault(s);
3969         return;
3970     }
3971 
3972     if (ext & 0x20) {
3973         len = DREG(ext, 0);
3974     } else {
3975         len = tcg_constant_i32(extract32(ext, 0, 5));
3976     }
3977     if (ext & 0x800) {
3978         ofs = DREG(ext, 6);
3979     } else {
3980         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3981     }
3982 
3983     if (is_sign) {
3984         gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3985         tcg_gen_mov_i32(QREG_CC_N, dest);
3986     } else {
3987         TCGv_i64 tmp = tcg_temp_new_i64();
3988         gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3989         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3990     }
3991     set_cc_op(s, CC_OP_LOGIC);
3992 }
3993 
3994 DISAS_INSN(bfop_reg)
3995 {
3996     int ext = read_im16(env, s);
3997     TCGv src = DREG(insn, 0);
3998     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3999     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4000     TCGv mask, tofs = NULL, tlen = NULL;
4001     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4002 
4003     if ((ext & 0x820) == 0) {
4004         /* Immediate width and offset.  */
4005         uint32_t maski = 0x7fffffffu >> (len - 1);
4006         if (ofs + len <= 32) {
4007             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4008         } else {
4009             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4010         }
4011         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4012 
4013         mask = tcg_constant_i32(ror32(maski, ofs));
4014         if (is_bfffo) {
4015             tofs = tcg_constant_i32(ofs);
4016             tlen = tcg_constant_i32(len);
4017         }
4018     } else {
4019         TCGv tmp = tcg_temp_new();
4020 
4021         mask = tcg_temp_new();
4022         if (ext & 0x20) {
4023             /* Variable width */
4024             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4025             tcg_gen_andi_i32(tmp, tmp, 31);
4026             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4027             if (is_bfffo) {
4028                 tlen = tcg_temp_new();
4029                 tcg_gen_addi_i32(tlen, tmp, 1);
4030             }
4031         } else {
4032             /* Immediate width */
4033             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4034             if (is_bfffo) {
4035                 tlen = tcg_constant_i32(len);
4036             }
4037         }
4038 
4039         if (ext & 0x800) {
4040             /* Variable offset */
4041             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4042             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4043             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4044             tcg_gen_rotr_i32(mask, mask, tmp);
4045             if (is_bfffo) {
4046                 tofs = tmp;
4047             }
4048         } else {
4049             /* Immediate offset (and variable width) */
4050             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4051             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4052             tcg_gen_rotri_i32(mask, mask, ofs);
4053             if (is_bfffo) {
4054                 tofs = tcg_constant_i32(ofs);
4055             }
4056         }
4057     }
4058     set_cc_op(s, CC_OP_LOGIC);
4059 
4060     switch (insn & 0x0f00) {
4061     case 0x0a00: /* bfchg */
4062         tcg_gen_eqv_i32(src, src, mask);
4063         break;
4064     case 0x0c00: /* bfclr */
4065         tcg_gen_and_i32(src, src, mask);
4066         break;
4067     case 0x0d00: /* bfffo */
4068         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4069         break;
4070     case 0x0e00: /* bfset */
4071         tcg_gen_orc_i32(src, src, mask);
4072         break;
4073     case 0x0800: /* bftst */
4074         /* flags already set; no other work to do.  */
4075         break;
4076     default:
4077         g_assert_not_reached();
4078     }
4079 }
4080 
4081 DISAS_INSN(bfop_mem)
4082 {
4083     int ext = read_im16(env, s);
4084     TCGv addr, len, ofs;
4085     TCGv_i64 t64;
4086 
4087     addr = gen_lea(env, s, insn, OS_UNSIZED);
4088     if (IS_NULL_QREG(addr)) {
4089         gen_addr_fault(s);
4090         return;
4091     }
4092 
4093     if (ext & 0x20) {
4094         len = DREG(ext, 0);
4095     } else {
4096         len = tcg_constant_i32(extract32(ext, 0, 5));
4097     }
4098     if (ext & 0x800) {
4099         ofs = DREG(ext, 6);
4100     } else {
4101         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4102     }
4103 
4104     switch (insn & 0x0f00) {
4105     case 0x0a00: /* bfchg */
4106         gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4107         break;
4108     case 0x0c00: /* bfclr */
4109         gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4110         break;
4111     case 0x0d00: /* bfffo */
4112         t64 = tcg_temp_new_i64();
4113         gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4114         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4115         break;
4116     case 0x0e00: /* bfset */
4117         gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4118         break;
4119     case 0x0800: /* bftst */
4120         gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4121         break;
4122     default:
4123         g_assert_not_reached();
4124     }
4125     set_cc_op(s, CC_OP_LOGIC);
4126 }
4127 
4128 DISAS_INSN(bfins_reg)
4129 {
4130     int ext = read_im16(env, s);
4131     TCGv dst = DREG(insn, 0);
4132     TCGv src = DREG(ext, 12);
4133     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4134     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4135     int pos = 32 - ofs - len;        /* little bit-endian */
4136     TCGv tmp;
4137 
4138     tmp = tcg_temp_new();
4139 
4140     if (ext & 0x20) {
4141         /* Variable width */
4142         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4143         tcg_gen_andi_i32(tmp, tmp, 31);
4144         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4145     } else {
4146         /* Immediate width */
4147         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4148     }
4149     set_cc_op(s, CC_OP_LOGIC);
4150 
4151     /* Immediate width and offset */
4152     if ((ext & 0x820) == 0) {
4153         /* Check for suitability for deposit.  */
4154         if (pos >= 0) {
4155             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4156         } else {
4157             uint32_t maski = -2U << (len - 1);
4158             uint32_t roti = (ofs + len) & 31;
4159             tcg_gen_andi_i32(tmp, src, ~maski);
4160             tcg_gen_rotri_i32(tmp, tmp, roti);
4161             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4162             tcg_gen_or_i32(dst, dst, tmp);
4163         }
4164     } else {
4165         TCGv mask = tcg_temp_new();
4166         TCGv rot = tcg_temp_new();
4167 
4168         if (ext & 0x20) {
4169             /* Variable width */
4170             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4171             tcg_gen_andi_i32(rot, rot, 31);
4172             tcg_gen_movi_i32(mask, -2);
4173             tcg_gen_shl_i32(mask, mask, rot);
4174             tcg_gen_mov_i32(rot, DREG(ext, 0));
4175             tcg_gen_andc_i32(tmp, src, mask);
4176         } else {
4177             /* Immediate width (variable offset) */
4178             uint32_t maski = -2U << (len - 1);
4179             tcg_gen_andi_i32(tmp, src, ~maski);
4180             tcg_gen_movi_i32(mask, maski);
4181             tcg_gen_movi_i32(rot, len & 31);
4182         }
4183         if (ext & 0x800) {
4184             /* Variable offset */
4185             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4186         } else {
4187             /* Immediate offset (variable width) */
4188             tcg_gen_addi_i32(rot, rot, ofs);
4189         }
4190         tcg_gen_andi_i32(rot, rot, 31);
4191         tcg_gen_rotr_i32(mask, mask, rot);
4192         tcg_gen_rotr_i32(tmp, tmp, rot);
4193         tcg_gen_and_i32(dst, dst, mask);
4194         tcg_gen_or_i32(dst, dst, tmp);
4195     }
4196 }
4197 
4198 DISAS_INSN(bfins_mem)
4199 {
4200     int ext = read_im16(env, s);
4201     TCGv src = DREG(ext, 12);
4202     TCGv addr, len, ofs;
4203 
4204     addr = gen_lea(env, s, insn, OS_UNSIZED);
4205     if (IS_NULL_QREG(addr)) {
4206         gen_addr_fault(s);
4207         return;
4208     }
4209 
4210     if (ext & 0x20) {
4211         len = DREG(ext, 0);
4212     } else {
4213         len = tcg_constant_i32(extract32(ext, 0, 5));
4214     }
4215     if (ext & 0x800) {
4216         ofs = DREG(ext, 6);
4217     } else {
4218         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4219     }
4220 
4221     gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4222     set_cc_op(s, CC_OP_LOGIC);
4223 }
4224 
4225 DISAS_INSN(ff1)
4226 {
4227     TCGv reg;
4228     reg = DREG(insn, 0);
4229     gen_logic_cc(s, reg, OS_LONG);
4230     gen_helper_ff1(reg, reg);
4231 }
4232 
4233 DISAS_INSN(chk)
4234 {
4235     TCGv src, reg;
4236     int opsize;
4237 
4238     switch ((insn >> 7) & 3) {
4239     case 3:
4240         opsize = OS_WORD;
4241         break;
4242     case 2:
4243         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4244             opsize = OS_LONG;
4245             break;
4246         }
4247         /* fallthru */
4248     default:
4249         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4250         return;
4251     }
4252     SRC_EA(env, src, opsize, 1, NULL);
4253     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4254 
4255     gen_flush_flags(s);
4256     gen_helper_chk(cpu_env, reg, src);
4257 }
4258 
4259 DISAS_INSN(chk2)
4260 {
4261     uint16_t ext;
4262     TCGv addr1, addr2, bound1, bound2, reg;
4263     int opsize;
4264 
4265     switch ((insn >> 9) & 3) {
4266     case 0:
4267         opsize = OS_BYTE;
4268         break;
4269     case 1:
4270         opsize = OS_WORD;
4271         break;
4272     case 2:
4273         opsize = OS_LONG;
4274         break;
4275     default:
4276         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4277         return;
4278     }
4279 
4280     ext = read_im16(env, s);
4281     if ((ext & 0x0800) == 0) {
4282         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4283         return;
4284     }
4285 
4286     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4287     addr2 = tcg_temp_new();
4288     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4289 
4290     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4291     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4292 
4293     reg = tcg_temp_new();
4294     if (ext & 0x8000) {
4295         tcg_gen_mov_i32(reg, AREG(ext, 12));
4296     } else {
4297         gen_ext(reg, DREG(ext, 12), opsize, 1);
4298     }
4299 
4300     gen_flush_flags(s);
4301     gen_helper_chk2(cpu_env, reg, bound1, bound2);
4302 }
4303 
4304 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4305 {
4306     TCGv addr;
4307     TCGv_i64 t0, t1;
4308 
4309     addr = tcg_temp_new();
4310 
4311     t0 = tcg_temp_new_i64();
4312     t1 = tcg_temp_new_i64();
4313 
4314     tcg_gen_andi_i32(addr, src, ~15);
4315     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4316     tcg_gen_addi_i32(addr, addr, 8);
4317     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4318 
4319     tcg_gen_andi_i32(addr, dst, ~15);
4320     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4321     tcg_gen_addi_i32(addr, addr, 8);
4322     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4323 }
4324 
4325 DISAS_INSN(move16_reg)
4326 {
4327     int index = IS_USER(s);
4328     TCGv tmp;
4329     uint16_t ext;
4330 
4331     ext = read_im16(env, s);
4332     if ((ext & (1 << 15)) == 0) {
4333         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4334     }
4335 
4336     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4337 
4338     /* Ax can be Ay, so save Ay before incrementing Ax */
4339     tmp = tcg_temp_new();
4340     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4341     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4342     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4343 }
4344 
4345 DISAS_INSN(move16_mem)
4346 {
4347     int index = IS_USER(s);
4348     TCGv reg, addr;
4349 
4350     reg = AREG(insn, 0);
4351     addr = tcg_constant_i32(read_im32(env, s));
4352 
4353     if ((insn >> 3) & 1) {
4354         /* MOVE16 (xxx).L, (Ay) */
4355         m68k_copy_line(reg, addr, index);
4356     } else {
4357         /* MOVE16 (Ay), (xxx).L */
4358         m68k_copy_line(addr, reg, index);
4359     }
4360 
4361     if (((insn >> 3) & 2) == 0) {
4362         /* (Ay)+ */
4363         tcg_gen_addi_i32(reg, reg, 16);
4364     }
4365 }
4366 
4367 DISAS_INSN(strldsr)
4368 {
4369     uint16_t ext;
4370     uint32_t addr;
4371 
4372     addr = s->pc - 2;
4373     ext = read_im16(env, s);
4374     if (ext != 0x46FC) {
4375         gen_exception(s, addr, EXCP_ILLEGAL);
4376         return;
4377     }
4378     ext = read_im16(env, s);
4379     if (IS_USER(s) || (ext & SR_S) == 0) {
4380         gen_exception(s, addr, EXCP_PRIVILEGE);
4381         return;
4382     }
4383     gen_push(s, gen_get_sr(s));
4384     gen_set_sr_im(s, ext, 0);
4385     gen_exit_tb(s);
4386 }
4387 
4388 DISAS_INSN(move_from_sr)
4389 {
4390     TCGv sr;
4391 
4392     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4393         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4394         return;
4395     }
4396     sr = gen_get_sr(s);
4397     DEST_EA(env, insn, OS_WORD, sr, NULL);
4398 }
4399 
4400 #if defined(CONFIG_SOFTMMU)
4401 DISAS_INSN(moves)
4402 {
4403     int opsize;
4404     uint16_t ext;
4405     TCGv reg;
4406     TCGv addr;
4407     int extend;
4408 
4409     if (IS_USER(s)) {
4410         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4411         return;
4412     }
4413 
4414     ext = read_im16(env, s);
4415 
4416     opsize = insn_opsize(insn);
4417 
4418     if (ext & 0x8000) {
4419         /* address register */
4420         reg = AREG(ext, 12);
4421         extend = 1;
4422     } else {
4423         /* data register */
4424         reg = DREG(ext, 12);
4425         extend = 0;
4426     }
4427 
4428     addr = gen_lea(env, s, insn, opsize);
4429     if (IS_NULL_QREG(addr)) {
4430         gen_addr_fault(s);
4431         return;
4432     }
4433 
4434     if (ext & 0x0800) {
4435         /* from reg to ea */
4436         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4437     } else {
4438         /* from ea to reg */
4439         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4440         if (extend) {
4441             gen_ext(reg, tmp, opsize, 1);
4442         } else {
4443             gen_partset_reg(opsize, reg, tmp);
4444         }
4445     }
4446     switch (extract32(insn, 3, 3)) {
4447     case 3: /* Indirect postincrement.  */
4448         tcg_gen_addi_i32(AREG(insn, 0), addr,
4449                          REG(insn, 0) == 7 && opsize == OS_BYTE
4450                          ? 2
4451                          : opsize_bytes(opsize));
4452         break;
4453     case 4: /* Indirect predecrememnt.  */
4454         tcg_gen_mov_i32(AREG(insn, 0), addr);
4455         break;
4456     }
4457 }
4458 
4459 DISAS_INSN(move_to_sr)
4460 {
4461     if (IS_USER(s)) {
4462         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4463         return;
4464     }
4465     gen_move_to_sr(env, s, insn, false);
4466     gen_exit_tb(s);
4467 }
4468 
4469 DISAS_INSN(move_from_usp)
4470 {
4471     if (IS_USER(s)) {
4472         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4473         return;
4474     }
4475     tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4476                    offsetof(CPUM68KState, sp[M68K_USP]));
4477 }
4478 
4479 DISAS_INSN(move_to_usp)
4480 {
4481     if (IS_USER(s)) {
4482         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4483         return;
4484     }
4485     tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4486                    offsetof(CPUM68KState, sp[M68K_USP]));
4487 }
4488 
4489 DISAS_INSN(halt)
4490 {
4491     if (IS_USER(s)) {
4492         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4493         return;
4494     }
4495 
4496     gen_exception(s, s->pc, EXCP_HALT_INSN);
4497 }
4498 
4499 DISAS_INSN(stop)
4500 {
4501     uint16_t ext;
4502 
4503     if (IS_USER(s)) {
4504         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4505         return;
4506     }
4507 
4508     ext = read_im16(env, s);
4509 
4510     gen_set_sr_im(s, ext, 0);
4511     tcg_gen_movi_i32(cpu_halted, 1);
4512     gen_exception(s, s->pc, EXCP_HLT);
4513 }
4514 
4515 DISAS_INSN(rte)
4516 {
4517     if (IS_USER(s)) {
4518         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4519         return;
4520     }
4521     gen_exception(s, s->base.pc_next, EXCP_RTE);
4522 }
4523 
4524 DISAS_INSN(cf_movec)
4525 {
4526     uint16_t ext;
4527     TCGv reg;
4528 
4529     if (IS_USER(s)) {
4530         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4531         return;
4532     }
4533 
4534     ext = read_im16(env, s);
4535 
4536     if (ext & 0x8000) {
4537         reg = AREG(ext, 12);
4538     } else {
4539         reg = DREG(ext, 12);
4540     }
4541     gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
4542     gen_exit_tb(s);
4543 }
4544 
4545 DISAS_INSN(m68k_movec)
4546 {
4547     uint16_t ext;
4548     TCGv reg, creg;
4549 
4550     if (IS_USER(s)) {
4551         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4552         return;
4553     }
4554 
4555     ext = read_im16(env, s);
4556 
4557     if (ext & 0x8000) {
4558         reg = AREG(ext, 12);
4559     } else {
4560         reg = DREG(ext, 12);
4561     }
4562     creg = tcg_constant_i32(ext & 0xfff);
4563     if (insn & 1) {
4564         gen_helper_m68k_movec_to(cpu_env, creg, reg);
4565     } else {
4566         gen_helper_m68k_movec_from(reg, cpu_env, creg);
4567     }
4568     gen_exit_tb(s);
4569 }
4570 
4571 DISAS_INSN(intouch)
4572 {
4573     if (IS_USER(s)) {
4574         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4575         return;
4576     }
4577     /* ICache fetch.  Implement as no-op.  */
4578 }
4579 
4580 DISAS_INSN(cpushl)
4581 {
4582     if (IS_USER(s)) {
4583         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4584         return;
4585     }
4586     /* Cache push/invalidate.  Implement as no-op.  */
4587 }
4588 
4589 DISAS_INSN(cpush)
4590 {
4591     if (IS_USER(s)) {
4592         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4593         return;
4594     }
4595     /* Cache push/invalidate.  Implement as no-op.  */
4596 }
4597 
4598 DISAS_INSN(cinv)
4599 {
4600     if (IS_USER(s)) {
4601         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4602         return;
4603     }
4604     /* Invalidate cache line.  Implement as no-op.  */
4605 }
4606 
4607 #if defined(CONFIG_SOFTMMU)
4608 DISAS_INSN(pflush)
4609 {
4610     TCGv opmode;
4611 
4612     if (IS_USER(s)) {
4613         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4614         return;
4615     }
4616 
4617     opmode = tcg_constant_i32((insn >> 3) & 3);
4618     gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4619 }
4620 
4621 DISAS_INSN(ptest)
4622 {
4623     TCGv is_read;
4624 
4625     if (IS_USER(s)) {
4626         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4627         return;
4628     }
4629     is_read = tcg_constant_i32((insn >> 5) & 1);
4630     gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4631 }
4632 #endif
4633 
4634 DISAS_INSN(wddata)
4635 {
4636     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4637 }
4638 
4639 DISAS_INSN(wdebug)
4640 {
4641     if (IS_USER(s)) {
4642         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4643         return;
4644     }
4645     /* TODO: Implement wdebug.  */
4646     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4647 }
4648 #endif
4649 
4650 DISAS_INSN(trap)
4651 {
4652     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4653 }
4654 
4655 static void do_trapcc(DisasContext *s, DisasCompare *c)
4656 {
4657     if (c->tcond != TCG_COND_NEVER) {
4658         TCGLabel *over = NULL;
4659 
4660         update_cc_op(s);
4661 
4662         if (c->tcond != TCG_COND_ALWAYS) {
4663             /* Jump over if !c. */
4664             over = gen_new_label();
4665             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4666         }
4667 
4668         tcg_gen_movi_i32(QREG_PC, s->pc);
4669         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4670 
4671         if (over != NULL) {
4672             gen_set_label(over);
4673             s->base.is_jmp = DISAS_NEXT;
4674         }
4675     }
4676 }
4677 
4678 DISAS_INSN(trapcc)
4679 {
4680     DisasCompare c;
4681 
4682     /* Consume and discard the immediate operand. */
4683     switch (extract32(insn, 0, 3)) {
4684     case 2: /* trapcc.w */
4685         (void)read_im16(env, s);
4686         break;
4687     case 3: /* trapcc.l */
4688         (void)read_im32(env, s);
4689         break;
4690     case 4: /* trapcc (no operand) */
4691         break;
4692     default:
4693         /* trapcc registered with only valid opmodes */
4694         g_assert_not_reached();
4695     }
4696 
4697     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4698     do_trapcc(s, &c);
4699 }
4700 
4701 DISAS_INSN(trapv)
4702 {
4703     DisasCompare c;
4704 
4705     gen_cc_cond(&c, s, 9); /* V set */
4706     do_trapcc(s, &c);
4707 }
4708 
4709 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4710 {
4711     switch (reg) {
4712     case M68K_FPIAR:
4713         tcg_gen_movi_i32(res, 0);
4714         break;
4715     case M68K_FPSR:
4716         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4717         break;
4718     case M68K_FPCR:
4719         tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4720         break;
4721     }
4722 }
4723 
4724 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4725 {
4726     switch (reg) {
4727     case M68K_FPIAR:
4728         break;
4729     case M68K_FPSR:
4730         tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4731         break;
4732     case M68K_FPCR:
4733         gen_helper_set_fpcr(cpu_env, val);
4734         break;
4735     }
4736 }
4737 
4738 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4739 {
4740     int index = IS_USER(s);
4741     TCGv tmp;
4742 
4743     tmp = tcg_temp_new();
4744     gen_load_fcr(s, tmp, reg);
4745     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4746 }
4747 
4748 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4749 {
4750     int index = IS_USER(s);
4751     TCGv tmp;
4752 
4753     tmp = tcg_temp_new();
4754     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4755     gen_store_fcr(s, tmp, reg);
4756 }
4757 
4758 
4759 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4760                              uint32_t insn, uint32_t ext)
4761 {
4762     int mask = (ext >> 10) & 7;
4763     int is_write = (ext >> 13) & 1;
4764     int mode = extract32(insn, 3, 3);
4765     int i;
4766     TCGv addr, tmp;
4767 
4768     switch (mode) {
4769     case 0: /* Dn */
4770         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4771             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4772             return;
4773         }
4774         if (is_write) {
4775             gen_load_fcr(s, DREG(insn, 0), mask);
4776         } else {
4777             gen_store_fcr(s, DREG(insn, 0), mask);
4778         }
4779         return;
4780     case 1: /* An, only with FPIAR */
4781         if (mask != M68K_FPIAR) {
4782             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4783             return;
4784         }
4785         if (is_write) {
4786             gen_load_fcr(s, AREG(insn, 0), mask);
4787         } else {
4788             gen_store_fcr(s, AREG(insn, 0), mask);
4789         }
4790         return;
4791     case 7: /* Immediate */
4792         if (REG(insn, 0) == 4) {
4793             if (is_write ||
4794                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4795                  mask != M68K_FPCR)) {
4796                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4797                 return;
4798             }
4799             tmp = tcg_constant_i32(read_im32(env, s));
4800             gen_store_fcr(s, tmp, mask);
4801             return;
4802         }
4803         break;
4804     default:
4805         break;
4806     }
4807 
4808     tmp = gen_lea(env, s, insn, OS_LONG);
4809     if (IS_NULL_QREG(tmp)) {
4810         gen_addr_fault(s);
4811         return;
4812     }
4813 
4814     addr = tcg_temp_new();
4815     tcg_gen_mov_i32(addr, tmp);
4816 
4817     /*
4818      * mask:
4819      *
4820      * 0b100 Floating-Point Control Register
4821      * 0b010 Floating-Point Status Register
4822      * 0b001 Floating-Point Instruction Address Register
4823      *
4824      */
4825 
4826     if (is_write && mode == 4) {
4827         for (i = 2; i >= 0; i--, mask >>= 1) {
4828             if (mask & 1) {
4829                 gen_qemu_store_fcr(s, addr, 1 << i);
4830                 if (mask != 1) {
4831                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4832                 }
4833             }
4834        }
4835        tcg_gen_mov_i32(AREG(insn, 0), addr);
4836     } else {
4837         for (i = 0; i < 3; i++, mask >>= 1) {
4838             if (mask & 1) {
4839                 if (is_write) {
4840                     gen_qemu_store_fcr(s, addr, 1 << i);
4841                 } else {
4842                     gen_qemu_load_fcr(s, addr, 1 << i);
4843                 }
4844                 if (mask != 1 || mode == 3) {
4845                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4846                 }
4847             }
4848         }
4849         if (mode == 3) {
4850             tcg_gen_mov_i32(AREG(insn, 0), addr);
4851         }
4852     }
4853 }
4854 
4855 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4856                           uint32_t insn, uint32_t ext)
4857 {
4858     int opsize;
4859     TCGv addr, tmp;
4860     int mode = (ext >> 11) & 0x3;
4861     int is_load = ((ext & 0x2000) == 0);
4862 
4863     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4864         opsize = OS_EXTENDED;
4865     } else {
4866         opsize = OS_DOUBLE;  /* FIXME */
4867     }
4868 
4869     addr = gen_lea(env, s, insn, opsize);
4870     if (IS_NULL_QREG(addr)) {
4871         gen_addr_fault(s);
4872         return;
4873     }
4874 
4875     tmp = tcg_temp_new();
4876     if (mode & 0x1) {
4877         /* Dynamic register list */
4878         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4879     } else {
4880         /* Static register list */
4881         tcg_gen_movi_i32(tmp, ext & 0xff);
4882     }
4883 
4884     if (!is_load && (mode & 2) == 0) {
4885         /*
4886          * predecrement addressing mode
4887          * only available to store register to memory
4888          */
4889         if (opsize == OS_EXTENDED) {
4890             gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
4891         } else {
4892             gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
4893         }
4894     } else {
4895         /* postincrement addressing mode */
4896         if (opsize == OS_EXTENDED) {
4897             if (is_load) {
4898                 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
4899             } else {
4900                 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
4901             }
4902         } else {
4903             if (is_load) {
4904                 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
4905             } else {
4906                 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
4907             }
4908         }
4909     }
4910     if ((insn & 070) == 030 || (insn & 070) == 040) {
4911         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4912     }
4913 }
4914 
4915 /*
4916  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4917  * immediately before the next FP instruction is executed.
4918  */
4919 DISAS_INSN(fpu)
4920 {
4921     uint16_t ext;
4922     int opmode;
4923     int opsize;
4924     TCGv_ptr cpu_src, cpu_dest;
4925 
4926     ext = read_im16(env, s);
4927     opmode = ext & 0x7f;
4928     switch ((ext >> 13) & 7) {
4929     case 0:
4930         break;
4931     case 1:
4932         goto undef;
4933     case 2:
4934         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4935             /* fmovecr */
4936             TCGv rom_offset = tcg_constant_i32(opmode);
4937             cpu_dest = gen_fp_ptr(REG(ext, 7));
4938             gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
4939             return;
4940         }
4941         break;
4942     case 3: /* fmove out */
4943         cpu_src = gen_fp_ptr(REG(ext, 7));
4944         opsize = ext_opsize(ext, 10);
4945         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4946                       EA_STORE, IS_USER(s)) == -1) {
4947             gen_addr_fault(s);
4948         }
4949         gen_helper_ftst(cpu_env, cpu_src);
4950         return;
4951     case 4: /* fmove to control register.  */
4952     case 5: /* fmove from control register.  */
4953         gen_op_fmove_fcr(env, s, insn, ext);
4954         return;
4955     case 6: /* fmovem */
4956     case 7:
4957         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4958             goto undef;
4959         }
4960         gen_op_fmovem(env, s, insn, ext);
4961         return;
4962     }
4963     if (ext & (1 << 14)) {
4964         /* Source effective address.  */
4965         opsize = ext_opsize(ext, 10);
4966         cpu_src = gen_fp_result_ptr();
4967         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4968                       EA_LOADS, IS_USER(s)) == -1) {
4969             gen_addr_fault(s);
4970             return;
4971         }
4972     } else {
4973         /* Source register.  */
4974         opsize = OS_EXTENDED;
4975         cpu_src = gen_fp_ptr(REG(ext, 10));
4976     }
4977     cpu_dest = gen_fp_ptr(REG(ext, 7));
4978     switch (opmode) {
4979     case 0: /* fmove */
4980         gen_fp_move(cpu_dest, cpu_src);
4981         break;
4982     case 0x40: /* fsmove */
4983         gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
4984         break;
4985     case 0x44: /* fdmove */
4986         gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
4987         break;
4988     case 1: /* fint */
4989         gen_helper_firound(cpu_env, cpu_dest, cpu_src);
4990         break;
4991     case 2: /* fsinh */
4992         gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
4993         break;
4994     case 3: /* fintrz */
4995         gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
4996         break;
4997     case 4: /* fsqrt */
4998         gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
4999         break;
5000     case 0x41: /* fssqrt */
5001         gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5002         break;
5003     case 0x45: /* fdsqrt */
5004         gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5005         break;
5006     case 0x06: /* flognp1 */
5007         gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5008         break;
5009     case 0x08: /* fetoxm1 */
5010         gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5011         break;
5012     case 0x09: /* ftanh */
5013         gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5014         break;
5015     case 0x0a: /* fatan */
5016         gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5017         break;
5018     case 0x0c: /* fasin */
5019         gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5020         break;
5021     case 0x0d: /* fatanh */
5022         gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5023         break;
5024     case 0x0e: /* fsin */
5025         gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5026         break;
5027     case 0x0f: /* ftan */
5028         gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5029         break;
5030     case 0x10: /* fetox */
5031         gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5032         break;
5033     case 0x11: /* ftwotox */
5034         gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5035         break;
5036     case 0x12: /* ftentox */
5037         gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5038         break;
5039     case 0x14: /* flogn */
5040         gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5041         break;
5042     case 0x15: /* flog10 */
5043         gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5044         break;
5045     case 0x16: /* flog2 */
5046         gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5047         break;
5048     case 0x18: /* fabs */
5049         gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5050         break;
5051     case 0x58: /* fsabs */
5052         gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5053         break;
5054     case 0x5c: /* fdabs */
5055         gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5056         break;
5057     case 0x19: /* fcosh */
5058         gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5059         break;
5060     case 0x1a: /* fneg */
5061         gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x5a: /* fsneg */
5064         gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x5e: /* fdneg */
5067         gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5068         break;
5069     case 0x1c: /* facos */
5070         gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5071         break;
5072     case 0x1d: /* fcos */
5073         gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5074         break;
5075     case 0x1e: /* fgetexp */
5076         gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5077         break;
5078     case 0x1f: /* fgetman */
5079         gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5080         break;
5081     case 0x20: /* fdiv */
5082         gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5083         break;
5084     case 0x60: /* fsdiv */
5085         gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5086         break;
5087     case 0x64: /* fddiv */
5088         gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5089         break;
5090     case 0x21: /* fmod */
5091         gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5092         break;
5093     case 0x22: /* fadd */
5094         gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5095         break;
5096     case 0x62: /* fsadd */
5097         gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5098         break;
5099     case 0x66: /* fdadd */
5100         gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5101         break;
5102     case 0x23: /* fmul */
5103         gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5104         break;
5105     case 0x63: /* fsmul */
5106         gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5107         break;
5108     case 0x67: /* fdmul */
5109         gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5110         break;
5111     case 0x24: /* fsgldiv */
5112         gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5113         break;
5114     case 0x25: /* frem */
5115         gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5116         break;
5117     case 0x26: /* fscale */
5118         gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5119         break;
5120     case 0x27: /* fsglmul */
5121         gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5122         break;
5123     case 0x28: /* fsub */
5124         gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5125         break;
5126     case 0x68: /* fssub */
5127         gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5128         break;
5129     case 0x6c: /* fdsub */
5130         gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5131         break;
5132     case 0x30: case 0x31: case 0x32:
5133     case 0x33: case 0x34: case 0x35:
5134     case 0x36: case 0x37: {
5135             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5136             gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5137         }
5138         break;
5139     case 0x38: /* fcmp */
5140         gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5141         return;
5142     case 0x3a: /* ftst */
5143         gen_helper_ftst(cpu_env, cpu_src);
5144         return;
5145     default:
5146         goto undef;
5147     }
5148     gen_helper_ftst(cpu_env, cpu_dest);
5149     return;
5150 undef:
5151     /* FIXME: Is this right for offset addressing modes?  */
5152     s->pc -= 2;
5153     disas_undef_fpu(env, s, insn);
5154 }
5155 
5156 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5157 {
5158     TCGv fpsr;
5159 
5160     c->v2 = tcg_constant_i32(0);
5161     /* TODO: Raise BSUN exception.  */
5162     fpsr = tcg_temp_new();
5163     gen_load_fcr(s, fpsr, M68K_FPSR);
5164     switch (cond) {
5165     case 0:  /* False */
5166     case 16: /* Signaling False */
5167         c->v1 = c->v2;
5168         c->tcond = TCG_COND_NEVER;
5169         break;
5170     case 1:  /* EQual Z */
5171     case 17: /* Signaling EQual Z */
5172         c->v1 = tcg_temp_new();
5173         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5174         c->tcond = TCG_COND_NE;
5175         break;
5176     case 2:  /* Ordered Greater Than !(A || Z || N) */
5177     case 18: /* Greater Than !(A || Z || N) */
5178         c->v1 = tcg_temp_new();
5179         tcg_gen_andi_i32(c->v1, fpsr,
5180                          FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5181         c->tcond = TCG_COND_EQ;
5182         break;
5183     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5184     case 19: /* Greater than or Equal Z || !(A || N) */
5185         c->v1 = tcg_temp_new();
5186         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5187         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5188         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5189         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5190         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5191         c->tcond = TCG_COND_NE;
5192         break;
5193     case 4:  /* Ordered Less Than !(!N || A || Z); */
5194     case 20: /* Less Than !(!N || A || Z); */
5195         c->v1 = tcg_temp_new();
5196         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5197         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5198         c->tcond = TCG_COND_EQ;
5199         break;
5200     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5201     case 21: /* Less than or Equal Z || (N && !A) */
5202         c->v1 = tcg_temp_new();
5203         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5204         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5205         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5206         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5207         c->tcond = TCG_COND_NE;
5208         break;
5209     case 6:  /* Ordered Greater or Less than !(A || Z) */
5210     case 22: /* Greater or Less than !(A || Z) */
5211         c->v1 = tcg_temp_new();
5212         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5213         c->tcond = TCG_COND_EQ;
5214         break;
5215     case 7:  /* Ordered !A */
5216     case 23: /* Greater, Less or Equal !A */
5217         c->v1 = tcg_temp_new();
5218         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5219         c->tcond = TCG_COND_EQ;
5220         break;
5221     case 8:  /* Unordered A */
5222     case 24: /* Not Greater, Less or Equal A */
5223         c->v1 = tcg_temp_new();
5224         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5225         c->tcond = TCG_COND_NE;
5226         break;
5227     case 9:  /* Unordered or Equal A || Z */
5228     case 25: /* Not Greater or Less then A || Z */
5229         c->v1 = tcg_temp_new();
5230         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5231         c->tcond = TCG_COND_NE;
5232         break;
5233     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5234     case 26: /* Not Less or Equal A || !(N || Z)) */
5235         c->v1 = tcg_temp_new();
5236         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5237         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5238         tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5239         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5240         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5241         c->tcond = TCG_COND_NE;
5242         break;
5243     case 11: /* Unordered or Greater or Equal A || Z || !N */
5244     case 27: /* Not Less Than A || Z || !N */
5245         c->v1 = tcg_temp_new();
5246         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5247         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5248         c->tcond = TCG_COND_NE;
5249         break;
5250     case 12: /* Unordered or Less Than A || (N && !Z) */
5251     case 28: /* Not Greater than or Equal A || (N && !Z) */
5252         c->v1 = tcg_temp_new();
5253         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5254         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5255         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5256         tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5257         c->tcond = TCG_COND_NE;
5258         break;
5259     case 13: /* Unordered or Less or Equal A || Z || N */
5260     case 29: /* Not Greater Than A || Z || N */
5261         c->v1 = tcg_temp_new();
5262         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5263         c->tcond = TCG_COND_NE;
5264         break;
5265     case 14: /* Not Equal !Z */
5266     case 30: /* Signaling Not Equal !Z */
5267         c->v1 = tcg_temp_new();
5268         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5269         c->tcond = TCG_COND_EQ;
5270         break;
5271     case 15: /* True */
5272     case 31: /* Signaling True */
5273         c->v1 = c->v2;
5274         c->tcond = TCG_COND_ALWAYS;
5275         break;
5276     }
5277 }
5278 
5279 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5280 {
5281     DisasCompare c;
5282 
5283     gen_fcc_cond(&c, s, cond);
5284     update_cc_op(s);
5285     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5286 }
5287 
5288 DISAS_INSN(fbcc)
5289 {
5290     uint32_t offset;
5291     uint32_t base;
5292     TCGLabel *l1;
5293 
5294     base = s->pc;
5295     offset = (int16_t)read_im16(env, s);
5296     if (insn & (1 << 6)) {
5297         offset = (offset << 16) | read_im16(env, s);
5298     }
5299 
5300     l1 = gen_new_label();
5301     update_cc_op(s);
5302     gen_fjmpcc(s, insn & 0x3f, l1);
5303     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5304     gen_set_label(l1);
5305     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5306 }
5307 
5308 DISAS_INSN(fscc)
5309 {
5310     DisasCompare c;
5311     int cond;
5312     TCGv tmp;
5313     uint16_t ext;
5314 
5315     ext = read_im16(env, s);
5316     cond = ext & 0x3f;
5317     gen_fcc_cond(&c, s, cond);
5318 
5319     tmp = tcg_temp_new();
5320     tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5321 
5322     tcg_gen_neg_i32(tmp, tmp);
5323     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5324 }
5325 
5326 DISAS_INSN(ftrapcc)
5327 {
5328     DisasCompare c;
5329     uint16_t ext;
5330     int cond;
5331 
5332     ext = read_im16(env, s);
5333     cond = ext & 0x3f;
5334 
5335     /* Consume and discard the immediate operand. */
5336     switch (extract32(insn, 0, 3)) {
5337     case 2: /* ftrapcc.w */
5338         (void)read_im16(env, s);
5339         break;
5340     case 3: /* ftrapcc.l */
5341         (void)read_im32(env, s);
5342         break;
5343     case 4: /* ftrapcc (no operand) */
5344         break;
5345     default:
5346         /* ftrapcc registered with only valid opmodes */
5347         g_assert_not_reached();
5348     }
5349 
5350     gen_fcc_cond(&c, s, cond);
5351     do_trapcc(s, &c);
5352 }
5353 
5354 #if defined(CONFIG_SOFTMMU)
5355 DISAS_INSN(frestore)
5356 {
5357     TCGv addr;
5358 
5359     if (IS_USER(s)) {
5360         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5361         return;
5362     }
5363     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5364         SRC_EA(env, addr, OS_LONG, 0, NULL);
5365         /* FIXME: check the state frame */
5366     } else {
5367         disas_undef(env, s, insn);
5368     }
5369 }
5370 
5371 DISAS_INSN(fsave)
5372 {
5373     if (IS_USER(s)) {
5374         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5375         return;
5376     }
5377 
5378     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5379         /* always write IDLE */
5380         TCGv idle = tcg_constant_i32(0x41000000);
5381         DEST_EA(env, insn, OS_LONG, idle, NULL);
5382     } else {
5383         disas_undef(env, s, insn);
5384     }
5385 }
5386 #endif
5387 
5388 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5389 {
5390     TCGv tmp = tcg_temp_new();
5391     if (s->env->macsr & MACSR_FI) {
5392         if (upper)
5393             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5394         else
5395             tcg_gen_shli_i32(tmp, val, 16);
5396     } else if (s->env->macsr & MACSR_SU) {
5397         if (upper)
5398             tcg_gen_sari_i32(tmp, val, 16);
5399         else
5400             tcg_gen_ext16s_i32(tmp, val);
5401     } else {
5402         if (upper)
5403             tcg_gen_shri_i32(tmp, val, 16);
5404         else
5405             tcg_gen_ext16u_i32(tmp, val);
5406     }
5407     return tmp;
5408 }
5409 
5410 static void gen_mac_clear_flags(void)
5411 {
5412     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5413                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5414 }
5415 
5416 DISAS_INSN(mac)
5417 {
5418     TCGv rx;
5419     TCGv ry;
5420     uint16_t ext;
5421     int acc;
5422     TCGv tmp;
5423     TCGv addr;
5424     TCGv loadval;
5425     int dual;
5426     TCGv saved_flags;
5427 
5428     if (!s->done_mac) {
5429         s->mactmp = tcg_temp_new_i64();
5430         s->done_mac = 1;
5431     }
5432 
5433     ext = read_im16(env, s);
5434 
5435     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5436     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5437     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5438         disas_undef(env, s, insn);
5439         return;
5440     }
5441     if (insn & 0x30) {
5442         /* MAC with load.  */
5443         tmp = gen_lea(env, s, insn, OS_LONG);
5444         addr = tcg_temp_new();
5445         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5446         /*
5447          * Load the value now to ensure correct exception behavior.
5448          * Perform writeback after reading the MAC inputs.
5449          */
5450         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5451 
5452         acc ^= 1;
5453         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5454         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5455     } else {
5456         loadval = addr = NULL_QREG;
5457         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5458         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5459     }
5460 
5461     gen_mac_clear_flags();
5462 #if 0
5463     l1 = -1;
5464     /* Disabled because conditional branches clobber temporary vars.  */
5465     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5466         /* Skip the multiply if we know we will ignore it.  */
5467         l1 = gen_new_label();
5468         tmp = tcg_temp_new();
5469         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5470         gen_op_jmp_nz32(tmp, l1);
5471     }
5472 #endif
5473 
5474     if ((ext & 0x0800) == 0) {
5475         /* Word.  */
5476         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5477         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5478     }
5479     if (s->env->macsr & MACSR_FI) {
5480         gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5481     } else {
5482         if (s->env->macsr & MACSR_SU)
5483             gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5484         else
5485             gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5486         switch ((ext >> 9) & 3) {
5487         case 1:
5488             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5489             break;
5490         case 3:
5491             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5492             break;
5493         }
5494     }
5495 
5496     if (dual) {
5497         /* Save the overflow flag from the multiply.  */
5498         saved_flags = tcg_temp_new();
5499         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5500     } else {
5501         saved_flags = NULL_QREG;
5502     }
5503 
5504 #if 0
5505     /* Disabled because conditional branches clobber temporary vars.  */
5506     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5507         /* Skip the accumulate if the value is already saturated.  */
5508         l1 = gen_new_label();
5509         tmp = tcg_temp_new();
5510         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5511         gen_op_jmp_nz32(tmp, l1);
5512     }
5513 #endif
5514 
5515     if (insn & 0x100)
5516         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5517     else
5518         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5519 
5520     if (s->env->macsr & MACSR_FI)
5521         gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5522     else if (s->env->macsr & MACSR_SU)
5523         gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5524     else
5525         gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5526 
5527 #if 0
5528     /* Disabled because conditional branches clobber temporary vars.  */
5529     if (l1 != -1)
5530         gen_set_label(l1);
5531 #endif
5532 
5533     if (dual) {
5534         /* Dual accumulate variant.  */
5535         acc = (ext >> 2) & 3;
5536         /* Restore the overflow flag from the multiplier.  */
5537         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5538 #if 0
5539         /* Disabled because conditional branches clobber temporary vars.  */
5540         if ((s->env->macsr & MACSR_OMC) != 0) {
5541             /* Skip the accumulate if the value is already saturated.  */
5542             l1 = gen_new_label();
5543             tmp = tcg_temp_new();
5544             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5545             gen_op_jmp_nz32(tmp, l1);
5546         }
5547 #endif
5548         if (ext & 2)
5549             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5550         else
5551             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5552         if (s->env->macsr & MACSR_FI)
5553             gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
5554         else if (s->env->macsr & MACSR_SU)
5555             gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
5556         else
5557             gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
5558 #if 0
5559         /* Disabled because conditional branches clobber temporary vars.  */
5560         if (l1 != -1)
5561             gen_set_label(l1);
5562 #endif
5563     }
5564     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
5565 
5566     if (insn & 0x30) {
5567         TCGv rw;
5568         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5569         tcg_gen_mov_i32(rw, loadval);
5570         /*
5571          * FIXME: Should address writeback happen with the masked or
5572          * unmasked value?
5573          */
5574         switch ((insn >> 3) & 7) {
5575         case 3: /* Post-increment.  */
5576             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5577             break;
5578         case 4: /* Pre-decrement.  */
5579             tcg_gen_mov_i32(AREG(insn, 0), addr);
5580         }
5581     }
5582 }
5583 
5584 DISAS_INSN(from_mac)
5585 {
5586     TCGv rx;
5587     TCGv_i64 acc;
5588     int accnum;
5589 
5590     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5591     accnum = (insn >> 9) & 3;
5592     acc = MACREG(accnum);
5593     if (s->env->macsr & MACSR_FI) {
5594         gen_helper_get_macf(rx, cpu_env, acc);
5595     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5596         tcg_gen_extrl_i64_i32(rx, acc);
5597     } else if (s->env->macsr & MACSR_SU) {
5598         gen_helper_get_macs(rx, acc);
5599     } else {
5600         gen_helper_get_macu(rx, acc);
5601     }
5602     if (insn & 0x40) {
5603         tcg_gen_movi_i64(acc, 0);
5604         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5605     }
5606 }
5607 
5608 DISAS_INSN(move_mac)
5609 {
5610     /* FIXME: This can be done without a helper.  */
5611     int src;
5612     TCGv dest;
5613     src = insn & 3;
5614     dest = tcg_constant_i32((insn >> 9) & 3);
5615     gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
5616     gen_mac_clear_flags();
5617     gen_helper_mac_set_flags(cpu_env, dest);
5618 }
5619 
5620 DISAS_INSN(from_macsr)
5621 {
5622     TCGv reg;
5623 
5624     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5625     tcg_gen_mov_i32(reg, QREG_MACSR);
5626 }
5627 
5628 DISAS_INSN(from_mask)
5629 {
5630     TCGv reg;
5631     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5632     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5633 }
5634 
5635 DISAS_INSN(from_mext)
5636 {
5637     TCGv reg;
5638     TCGv acc;
5639     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5640     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5641     if (s->env->macsr & MACSR_FI)
5642         gen_helper_get_mac_extf(reg, cpu_env, acc);
5643     else
5644         gen_helper_get_mac_exti(reg, cpu_env, acc);
5645 }
5646 
5647 DISAS_INSN(macsr_to_ccr)
5648 {
5649     TCGv tmp = tcg_temp_new();
5650 
5651     /* Note that X and C are always cleared. */
5652     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5653     gen_helper_set_ccr(cpu_env, tmp);
5654     set_cc_op(s, CC_OP_FLAGS);
5655 }
5656 
5657 DISAS_INSN(to_mac)
5658 {
5659     TCGv_i64 acc;
5660     TCGv val;
5661     int accnum;
5662     accnum = (insn >> 9) & 3;
5663     acc = MACREG(accnum);
5664     SRC_EA(env, val, OS_LONG, 0, NULL);
5665     if (s->env->macsr & MACSR_FI) {
5666         tcg_gen_ext_i32_i64(acc, val);
5667         tcg_gen_shli_i64(acc, acc, 8);
5668     } else if (s->env->macsr & MACSR_SU) {
5669         tcg_gen_ext_i32_i64(acc, val);
5670     } else {
5671         tcg_gen_extu_i32_i64(acc, val);
5672     }
5673     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5674     gen_mac_clear_flags();
5675     gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
5676 }
5677 
5678 DISAS_INSN(to_macsr)
5679 {
5680     TCGv val;
5681     SRC_EA(env, val, OS_LONG, 0, NULL);
5682     gen_helper_set_macsr(cpu_env, val);
5683     gen_exit_tb(s);
5684 }
5685 
5686 DISAS_INSN(to_mask)
5687 {
5688     TCGv val;
5689     SRC_EA(env, val, OS_LONG, 0, NULL);
5690     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5691 }
5692 
5693 DISAS_INSN(to_mext)
5694 {
5695     TCGv val;
5696     TCGv acc;
5697     SRC_EA(env, val, OS_LONG, 0, NULL);
5698     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5699     if (s->env->macsr & MACSR_FI)
5700         gen_helper_set_mac_extf(cpu_env, val, acc);
5701     else if (s->env->macsr & MACSR_SU)
5702         gen_helper_set_mac_exts(cpu_env, val, acc);
5703     else
5704         gen_helper_set_mac_extu(cpu_env, val, acc);
5705 }
5706 
5707 static disas_proc opcode_table[65536];
5708 
5709 static void
5710 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5711 {
5712   int i;
5713   int from;
5714   int to;
5715 
5716   /* Sanity check.  All set bits must be included in the mask.  */
5717   if (opcode & ~mask) {
5718       fprintf(stderr,
5719               "qemu internal error: bogus opcode definition %04x/%04x\n",
5720               opcode, mask);
5721       abort();
5722   }
5723   /*
5724    * This could probably be cleverer.  For now just optimize the case where
5725    * the top bits are known.
5726    */
5727   /* Find the first zero bit in the mask.  */
5728   i = 0x8000;
5729   while ((i & mask) != 0)
5730       i >>= 1;
5731   /* Iterate over all combinations of this and lower bits.  */
5732   if (i == 0)
5733       i = 1;
5734   else
5735       i <<= 1;
5736   from = opcode & ~(i - 1);
5737   to = from + i;
5738   for (i = from; i < to; i++) {
5739       if ((i & mask) == opcode)
5740           opcode_table[i] = proc;
5741   }
5742 }
5743 
5744 /*
5745  * Register m68k opcode handlers.  Order is important.
5746  * Later insn override earlier ones.
5747  */
5748 void register_m68k_insns (CPUM68KState *env)
5749 {
5750     /*
5751      * Build the opcode table only once to avoid
5752      * multithreading issues.
5753      */
5754     if (opcode_table[0] != NULL) {
5755         return;
5756     }
5757 
5758     /*
5759      * use BASE() for instruction available
5760      * for CF_ISA_A and M68000.
5761      */
5762 #define BASE(name, opcode, mask) \
5763     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5764 #define INSN(name, opcode, mask, feature) do { \
5765     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5766         BASE(name, opcode, mask); \
5767     } while(0)
5768     BASE(undef,     0000, 0000);
5769     INSN(arith_im,  0080, fff8, CF_ISA_A);
5770     INSN(arith_im,  0000, ff00, M68K);
5771     INSN(chk2,      00c0, f9c0, CHK2);
5772     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5773     BASE(bitop_reg, 0100, f1c0);
5774     BASE(bitop_reg, 0140, f1c0);
5775     BASE(bitop_reg, 0180, f1c0);
5776     BASE(bitop_reg, 01c0, f1c0);
5777     INSN(movep,     0108, f138, MOVEP);
5778     INSN(arith_im,  0280, fff8, CF_ISA_A);
5779     INSN(arith_im,  0200, ff00, M68K);
5780     INSN(undef,     02c0, ffc0, M68K);
5781     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5782     INSN(arith_im,  0480, fff8, CF_ISA_A);
5783     INSN(arith_im,  0400, ff00, M68K);
5784     INSN(undef,     04c0, ffc0, M68K);
5785     INSN(arith_im,  0600, ff00, M68K);
5786     INSN(undef,     06c0, ffc0, M68K);
5787     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5788     INSN(arith_im,  0680, fff8, CF_ISA_A);
5789     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5790     INSN(arith_im,  0c00, ff00, M68K);
5791     BASE(bitop_im,  0800, ffc0);
5792     BASE(bitop_im,  0840, ffc0);
5793     BASE(bitop_im,  0880, ffc0);
5794     BASE(bitop_im,  08c0, ffc0);
5795     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5796     INSN(arith_im,  0a00, ff00, M68K);
5797 #if defined(CONFIG_SOFTMMU)
5798     INSN(moves,     0e00, ff00, M68K);
5799 #endif
5800     INSN(cas,       0ac0, ffc0, CAS);
5801     INSN(cas,       0cc0, ffc0, CAS);
5802     INSN(cas,       0ec0, ffc0, CAS);
5803     INSN(cas2w,     0cfc, ffff, CAS);
5804     INSN(cas2l,     0efc, ffff, CAS);
5805     BASE(move,      1000, f000);
5806     BASE(move,      2000, f000);
5807     BASE(move,      3000, f000);
5808     INSN(chk,       4000, f040, M68K);
5809     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5810     INSN(negx,      4080, fff8, CF_ISA_A);
5811     INSN(negx,      4000, ff00, M68K);
5812     INSN(undef,     40c0, ffc0, M68K);
5813     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5814     INSN(move_from_sr, 40c0, ffc0, M68K);
5815     BASE(lea,       41c0, f1c0);
5816     BASE(clr,       4200, ff00);
5817     BASE(undef,     42c0, ffc0);
5818     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5819     INSN(move_from_ccr, 42c0, ffc0, M68K);
5820     INSN(neg,       4480, fff8, CF_ISA_A);
5821     INSN(neg,       4400, ff00, M68K);
5822     INSN(undef,     44c0, ffc0, M68K);
5823     BASE(move_to_ccr, 44c0, ffc0);
5824     INSN(not,       4680, fff8, CF_ISA_A);
5825     INSN(not,       4600, ff00, M68K);
5826 #if defined(CONFIG_SOFTMMU)
5827     BASE(move_to_sr, 46c0, ffc0);
5828 #endif
5829     INSN(nbcd,      4800, ffc0, M68K);
5830     INSN(linkl,     4808, fff8, M68K);
5831     BASE(pea,       4840, ffc0);
5832     BASE(swap,      4840, fff8);
5833     INSN(bkpt,      4848, fff8, BKPT);
5834     INSN(movem,     48d0, fbf8, CF_ISA_A);
5835     INSN(movem,     48e8, fbf8, CF_ISA_A);
5836     INSN(movem,     4880, fb80, M68K);
5837     BASE(ext,       4880, fff8);
5838     BASE(ext,       48c0, fff8);
5839     BASE(ext,       49c0, fff8);
5840     BASE(tst,       4a00, ff00);
5841     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5842     INSN(tas,       4ac0, ffc0, M68K);
5843 #if defined(CONFIG_SOFTMMU)
5844     INSN(halt,      4ac8, ffff, CF_ISA_A);
5845     INSN(halt,      4ac8, ffff, M68K);
5846 #endif
5847     INSN(pulse,     4acc, ffff, CF_ISA_A);
5848     BASE(illegal,   4afc, ffff);
5849     INSN(mull,      4c00, ffc0, CF_ISA_A);
5850     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5851     INSN(divl,      4c40, ffc0, CF_ISA_A);
5852     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5853     INSN(sats,      4c80, fff8, CF_ISA_B);
5854     BASE(trap,      4e40, fff0);
5855     BASE(link,      4e50, fff8);
5856     BASE(unlk,      4e58, fff8);
5857 #if defined(CONFIG_SOFTMMU)
5858     INSN(move_to_usp, 4e60, fff8, USP);
5859     INSN(move_from_usp, 4e68, fff8, USP);
5860     INSN(reset,     4e70, ffff, M68K);
5861     BASE(stop,      4e72, ffff);
5862     BASE(rte,       4e73, ffff);
5863     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5864     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5865 #endif
5866     BASE(nop,       4e71, ffff);
5867     INSN(rtd,       4e74, ffff, RTD);
5868     BASE(rts,       4e75, ffff);
5869     INSN(trapv,     4e76, ffff, M68K);
5870     INSN(rtr,       4e77, ffff, M68K);
5871     BASE(jump,      4e80, ffc0);
5872     BASE(jump,      4ec0, ffc0);
5873     INSN(addsubq,   5000, f080, M68K);
5874     BASE(addsubq,   5080, f0c0);
5875     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5876     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5877     INSN(dbcc,      50c8, f0f8, M68K);
5878     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5879     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5880     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5881     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5882 
5883     /* Branch instructions.  */
5884     BASE(branch,    6000, f000);
5885     /* Disable long branch instructions, then add back the ones we want.  */
5886     BASE(undef,     60ff, f0ff); /* All long branches.  */
5887     INSN(branch,    60ff, f0ff, CF_ISA_B);
5888     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5889     INSN(branch,    60ff, ffff, BRAL);
5890     INSN(branch,    60ff, f0ff, BCCL);
5891 
5892     BASE(moveq,     7000, f100);
5893     INSN(mvzs,      7100, f100, CF_ISA_B);
5894     BASE(or,        8000, f000);
5895     BASE(divw,      80c0, f0c0);
5896     INSN(sbcd_reg,  8100, f1f8, M68K);
5897     INSN(sbcd_mem,  8108, f1f8, M68K);
5898     BASE(addsub,    9000, f000);
5899     INSN(undef,     90c0, f0c0, CF_ISA_A);
5900     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5901     INSN(subx_reg,  9100, f138, M68K);
5902     INSN(subx_mem,  9108, f138, M68K);
5903     INSN(suba,      91c0, f1c0, CF_ISA_A);
5904     INSN(suba,      90c0, f0c0, M68K);
5905 
5906     BASE(undef_mac, a000, f000);
5907     INSN(mac,       a000, f100, CF_EMAC);
5908     INSN(from_mac,  a180, f9b0, CF_EMAC);
5909     INSN(move_mac,  a110, f9fc, CF_EMAC);
5910     INSN(from_macsr,a980, f9f0, CF_EMAC);
5911     INSN(from_mask, ad80, fff0, CF_EMAC);
5912     INSN(from_mext, ab80, fbf0, CF_EMAC);
5913     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5914     INSN(to_mac,    a100, f9c0, CF_EMAC);
5915     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5916     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5917     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5918 
5919     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5920     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5921     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5922     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5923     INSN(cmp,       b080, f1c0, CF_ISA_A);
5924     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5925     INSN(cmp,       b000, f100, M68K);
5926     INSN(eor,       b100, f100, M68K);
5927     INSN(cmpm,      b108, f138, M68K);
5928     INSN(cmpa,      b0c0, f0c0, M68K);
5929     INSN(eor,       b180, f1c0, CF_ISA_A);
5930     BASE(and,       c000, f000);
5931     INSN(exg_dd,    c140, f1f8, M68K);
5932     INSN(exg_aa,    c148, f1f8, M68K);
5933     INSN(exg_da,    c188, f1f8, M68K);
5934     BASE(mulw,      c0c0, f0c0);
5935     INSN(abcd_reg,  c100, f1f8, M68K);
5936     INSN(abcd_mem,  c108, f1f8, M68K);
5937     BASE(addsub,    d000, f000);
5938     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5939     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5940     INSN(addx_reg,  d100, f138, M68K);
5941     INSN(addx_mem,  d108, f138, M68K);
5942     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5943     INSN(adda,      d0c0, f0c0, M68K);
5944     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5945     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5946     INSN(shift8_im, e000, f0f0, M68K);
5947     INSN(shift16_im, e040, f0f0, M68K);
5948     INSN(shift_im,  e080, f0f0, M68K);
5949     INSN(shift8_reg, e020, f0f0, M68K);
5950     INSN(shift16_reg, e060, f0f0, M68K);
5951     INSN(shift_reg, e0a0, f0f0, M68K);
5952     INSN(shift_mem, e0c0, fcc0, M68K);
5953     INSN(rotate_im, e090, f0f0, M68K);
5954     INSN(rotate8_im, e010, f0f0, M68K);
5955     INSN(rotate16_im, e050, f0f0, M68K);
5956     INSN(rotate_reg, e0b0, f0f0, M68K);
5957     INSN(rotate8_reg, e030, f0f0, M68K);
5958     INSN(rotate16_reg, e070, f0f0, M68K);
5959     INSN(rotate_mem, e4c0, fcc0, M68K);
5960     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5961     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5962     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5963     INSN(bfins_reg, efc0, fff8, BITFIELD);
5964     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5965     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5966     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5967     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5968     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5969     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5970     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5971     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5972     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5973     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5974     BASE(undef_fpu, f000, f000);
5975     INSN(fpu,       f200, ffc0, CF_FPU);
5976     INSN(fbcc,      f280, ffc0, CF_FPU);
5977     INSN(fpu,       f200, ffc0, FPU);
5978     INSN(fscc,      f240, ffc0, FPU);
5979     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5980     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5981     INSN(fbcc,      f280, ff80, FPU);
5982 #if defined(CONFIG_SOFTMMU)
5983     INSN(frestore,  f340, ffc0, CF_FPU);
5984     INSN(fsave,     f300, ffc0, CF_FPU);
5985     INSN(frestore,  f340, ffc0, FPU);
5986     INSN(fsave,     f300, ffc0, FPU);
5987     INSN(intouch,   f340, ffc0, CF_ISA_A);
5988     INSN(cpushl,    f428, ff38, CF_ISA_A);
5989     INSN(cpush,     f420, ff20, M68040);
5990     INSN(cinv,      f400, ff20, M68040);
5991     INSN(pflush,    f500, ffe0, M68040);
5992     INSN(ptest,     f548, ffd8, M68040);
5993     INSN(wddata,    fb00, ff00, CF_ISA_A);
5994     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5995 #endif
5996     INSN(move16_mem, f600, ffe0, M68040);
5997     INSN(move16_reg, f620, fff8, M68040);
5998 #undef INSN
5999 }
6000 
6001 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6002 {
6003     DisasContext *dc = container_of(dcbase, DisasContext, base);
6004     CPUM68KState *env = cpu->env_ptr;
6005 
6006     dc->env = env;
6007     dc->pc = dc->base.pc_first;
6008     /* This value will always be filled in properly before m68k_tr_tb_stop. */
6009     dc->pc_prev = 0xdeadbeef;
6010     dc->cc_op = CC_OP_DYNAMIC;
6011     dc->cc_op_synced = 1;
6012     dc->done_mac = 0;
6013     dc->writeback_mask = 0;
6014 
6015     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6016     /* If architectural single step active, limit to 1 */
6017     if (dc->ss_active) {
6018         dc->base.max_insns = 1;
6019     }
6020 }
6021 
6022 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6023 {
6024 }
6025 
6026 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6027 {
6028     DisasContext *dc = container_of(dcbase, DisasContext, base);
6029     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6030 }
6031 
6032 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6033 {
6034     DisasContext *dc = container_of(dcbase, DisasContext, base);
6035     CPUM68KState *env = cpu->env_ptr;
6036     uint16_t insn = read_im16(env, dc);
6037 
6038     opcode_table[insn](env, dc, insn);
6039     do_writebacks(dc);
6040 
6041     dc->pc_prev = dc->base.pc_next;
6042     dc->base.pc_next = dc->pc;
6043 
6044     if (dc->base.is_jmp == DISAS_NEXT) {
6045         /*
6046          * Stop translation when the next insn might touch a new page.
6047          * This ensures that prefetch aborts at the right place.
6048          *
6049          * We cannot determine the size of the next insn without
6050          * completely decoding it.  However, the maximum insn size
6051          * is 32 bytes, so end if we do not have that much remaining.
6052          * This may produce several small TBs at the end of each page,
6053          * but they will all be linked with goto_tb.
6054          *
6055          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6056          * smaller than MC68020's.
6057          */
6058         target_ulong start_page_offset
6059             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6060 
6061         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6062             dc->base.is_jmp = DISAS_TOO_MANY;
6063         }
6064     }
6065 }
6066 
6067 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6068 {
6069     DisasContext *dc = container_of(dcbase, DisasContext, base);
6070 
6071     switch (dc->base.is_jmp) {
6072     case DISAS_NORETURN:
6073         break;
6074     case DISAS_TOO_MANY:
6075         update_cc_op(dc);
6076         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6077         break;
6078     case DISAS_JUMP:
6079         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6080         if (dc->ss_active) {
6081             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6082         } else {
6083             tcg_gen_lookup_and_goto_ptr();
6084         }
6085         break;
6086     case DISAS_EXIT:
6087         /*
6088          * We updated CC_OP and PC in gen_exit_tb, but also modified
6089          * other state that may require returning to the main loop.
6090          */
6091         if (dc->ss_active) {
6092             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6093         } else {
6094             tcg_gen_exit_tb(NULL, 0);
6095         }
6096         break;
6097     default:
6098         g_assert_not_reached();
6099     }
6100 }
6101 
6102 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6103                               CPUState *cpu, FILE *logfile)
6104 {
6105     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6106     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6107 }
6108 
6109 static const TranslatorOps m68k_tr_ops = {
6110     .init_disas_context = m68k_tr_init_disas_context,
6111     .tb_start           = m68k_tr_tb_start,
6112     .insn_start         = m68k_tr_insn_start,
6113     .translate_insn     = m68k_tr_translate_insn,
6114     .tb_stop            = m68k_tr_tb_stop,
6115     .disas_log          = m68k_tr_disas_log,
6116 };
6117 
6118 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6119                            target_ulong pc, void *host_pc)
6120 {
6121     DisasContext dc;
6122     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6123 }
6124 
6125 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6126 {
6127     floatx80 a = { .high = high, .low = low };
6128     union {
6129         float64 f64;
6130         double d;
6131     } u;
6132 
6133     u.f64 = floatx80_to_float64(a, &env->fp_status);
6134     return u.d;
6135 }
6136 
6137 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6138 {
6139     M68kCPU *cpu = M68K_CPU(cs);
6140     CPUM68KState *env = &cpu->env;
6141     int i;
6142     uint16_t sr;
6143     for (i = 0; i < 8; i++) {
6144         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6145                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6146                      i, env->dregs[i], i, env->aregs[i],
6147                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6148                      floatx80_to_double(env, env->fregs[i].l.upper,
6149                                         env->fregs[i].l.lower));
6150     }
6151     qemu_fprintf(f, "PC = %08x   ", env->pc);
6152     sr = env->sr | cpu_m68k_get_ccr(env);
6153     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6154                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6155                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6156                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6157                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6158                  (sr & CCF_C) ? 'C' : '-');
6159     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6160                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6161                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6162                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6163                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6164     qemu_fprintf(f, "\n                                "
6165                  "FPCR =     %04x ", env->fpcr);
6166     switch (env->fpcr & FPCR_PREC_MASK) {
6167     case FPCR_PREC_X:
6168         qemu_fprintf(f, "X ");
6169         break;
6170     case FPCR_PREC_S:
6171         qemu_fprintf(f, "S ");
6172         break;
6173     case FPCR_PREC_D:
6174         qemu_fprintf(f, "D ");
6175         break;
6176     }
6177     switch (env->fpcr & FPCR_RND_MASK) {
6178     case FPCR_RND_N:
6179         qemu_fprintf(f, "RN ");
6180         break;
6181     case FPCR_RND_Z:
6182         qemu_fprintf(f, "RZ ");
6183         break;
6184     case FPCR_RND_M:
6185         qemu_fprintf(f, "RM ");
6186         break;
6187     case FPCR_RND_P:
6188         qemu_fprintf(f, "RP ");
6189         break;
6190     }
6191     qemu_fprintf(f, "\n");
6192 #ifdef CONFIG_SOFTMMU
6193     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6194                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6195                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6196                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6197     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6198     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6199     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6200                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6201     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6202                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6203                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6204     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6205                  env->mmu.mmusr, env->mmu.ar);
6206 #endif
6207 }
6208