xref: /openbmc/qemu/target/m68k/translate.c (revision bf616ce4)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "semihosting/semihost.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 //#define DEBUG_DISPATCH 1
40 
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.h.inc"
44 #undef DEFO32
45 #undef DEFO64
46 
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
49 
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
54 
55 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc)     cpu_macc[acc]
59 #define QREG_SP         get_areg(s, 7)
60 
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes.  */
64 static TCGv store_dummy;
65 
66 void m68k_tcg_init(void)
67 {
68     char *p;
69     int i;
70 
71 #define DEFO32(name, offset) \
72     QREG_##name = tcg_global_mem_new_i32(tcg_env, \
73         offsetof(CPUM68KState, offset), #name);
74 #define DEFO64(name, offset) \
75     QREG_##name = tcg_global_mem_new_i64(tcg_env, \
76         offsetof(CPUM68KState, offset), #name);
77 #include "qregs.h.inc"
78 #undef DEFO32
79 #undef DEFO64
80 
81     cpu_halted = tcg_global_mem_new_i32(tcg_env,
82                                         -offsetof(M68kCPU, env) +
83                                         offsetof(CPUState, halted), "HALTED");
84     cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
85                                                  -offsetof(M68kCPU, env) +
86                                                  offsetof(CPUState, exception_index),
87                                                  "EXCEPTION");
88 
89     p = cpu_reg_names;
90     for (i = 0; i < 8; i++) {
91         sprintf(p, "D%d", i);
92         cpu_dregs[i] = tcg_global_mem_new(tcg_env,
93                                           offsetof(CPUM68KState, dregs[i]), p);
94         p += 3;
95         sprintf(p, "A%d", i);
96         cpu_aregs[i] = tcg_global_mem_new(tcg_env,
97                                           offsetof(CPUM68KState, aregs[i]), p);
98         p += 3;
99     }
100     for (i = 0; i < 4; i++) {
101         sprintf(p, "ACC%d", i);
102         cpu_macc[i] = tcg_global_mem_new_i64(tcg_env,
103                                          offsetof(CPUM68KState, macc[i]), p);
104         p += 5;
105     }
106 
107     NULL_QREG = tcg_global_mem_new(tcg_env, -4, "NULL");
108     store_dummy = tcg_global_mem_new(tcg_env, -8, "NULL");
109 }
110 
111 /* internal defines */
112 typedef struct DisasContext {
113     DisasContextBase base;
114     CPUM68KState *env;
115     target_ulong pc;
116     target_ulong pc_prev;
117     CCOp cc_op; /* Current CC operation */
118     int cc_op_synced;
119     TCGv_i64 mactmp;
120     int done_mac;
121     int writeback_mask;
122     TCGv writeback[8];
123     bool ss_active;
124 } DisasContext;
125 
126 static TCGv get_areg(DisasContext *s, unsigned regno)
127 {
128     if (s->writeback_mask & (1 << regno)) {
129         return s->writeback[regno];
130     } else {
131         return cpu_aregs[regno];
132     }
133 }
134 
135 static void delay_set_areg(DisasContext *s, unsigned regno,
136                            TCGv val, bool give_temp)
137 {
138     if (s->writeback_mask & (1 << regno)) {
139         if (give_temp) {
140             s->writeback[regno] = val;
141         } else {
142             tcg_gen_mov_i32(s->writeback[regno], val);
143         }
144     } else {
145         s->writeback_mask |= 1 << regno;
146         if (give_temp) {
147             s->writeback[regno] = val;
148         } else {
149             TCGv tmp = tcg_temp_new();
150             s->writeback[regno] = tmp;
151             tcg_gen_mov_i32(tmp, val);
152         }
153     }
154 }
155 
156 static void do_writebacks(DisasContext *s)
157 {
158     unsigned mask = s->writeback_mask;
159     if (mask) {
160         s->writeback_mask = 0;
161         do {
162             unsigned regno = ctz32(mask);
163             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
164             mask &= mask - 1;
165         } while (mask);
166     }
167 }
168 
169 /* is_jmp field values */
170 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
171 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
172 
173 #if defined(CONFIG_USER_ONLY)
174 #define IS_USER(s) 1
175 #else
176 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
177 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
178                       MMU_KERNEL_IDX : MMU_USER_IDX)
179 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
180                       MMU_KERNEL_IDX : MMU_USER_IDX)
181 #endif
182 
183 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
184 
185 #ifdef DEBUG_DISPATCH
186 #define DISAS_INSN(name)                                                \
187     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
188                                   uint16_t insn);                       \
189     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
190                              uint16_t insn)                             \
191     {                                                                   \
192         qemu_log("Dispatch " #name "\n");                               \
193         real_disas_##name(env, s, insn);                                \
194     }                                                                   \
195     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
196                                   uint16_t insn)
197 #else
198 #define DISAS_INSN(name)                                                \
199     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
200                              uint16_t insn)
201 #endif
202 
203 static const uint8_t cc_op_live[CC_OP_NB] = {
204     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
205     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
207     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
208     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_LOGIC] = CCF_X | CCF_N
210 };
211 
212 static void set_cc_op(DisasContext *s, CCOp op)
213 {
214     CCOp old_op = s->cc_op;
215     int dead;
216 
217     if (old_op == op) {
218         return;
219     }
220     s->cc_op = op;
221     s->cc_op_synced = 0;
222 
223     /*
224      * Discard CC computation that will no longer be used.
225      * Note that X and N are never dead.
226      */
227     dead = cc_op_live[old_op] & ~cc_op_live[op];
228     if (dead & CCF_C) {
229         tcg_gen_discard_i32(QREG_CC_C);
230     }
231     if (dead & CCF_Z) {
232         tcg_gen_discard_i32(QREG_CC_Z);
233     }
234     if (dead & CCF_V) {
235         tcg_gen_discard_i32(QREG_CC_V);
236     }
237 }
238 
239 /* Update the CPU env CC_OP state.  */
240 static void update_cc_op(DisasContext *s)
241 {
242     if (!s->cc_op_synced) {
243         s->cc_op_synced = 1;
244         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
245     }
246 }
247 
248 /* Generate a jump to an immediate address.  */
249 static void gen_jmp_im(DisasContext *s, uint32_t dest)
250 {
251     update_cc_op(s);
252     tcg_gen_movi_i32(QREG_PC, dest);
253     s->base.is_jmp = DISAS_JUMP;
254 }
255 
256 /* Generate a jump to the address in qreg DEST.  */
257 static void gen_jmp(DisasContext *s, TCGv dest)
258 {
259     update_cc_op(s);
260     tcg_gen_mov_i32(QREG_PC, dest);
261     s->base.is_jmp = DISAS_JUMP;
262 }
263 
264 static void gen_raise_exception(int nr)
265 {
266     gen_helper_raise_exception(tcg_env, tcg_constant_i32(nr));
267 }
268 
269 static void gen_raise_exception_format2(DisasContext *s, int nr,
270                                         target_ulong this_pc)
271 {
272     /*
273      * Pass the address of the insn to the exception handler,
274      * for recording in the Format $2 (6-word) stack frame.
275      * Re-use mmu.ar for the purpose, since that's only valid
276      * after tlb_fill.
277      */
278     tcg_gen_st_i32(tcg_constant_i32(this_pc), tcg_env,
279                    offsetof(CPUM68KState, mmu.ar));
280     gen_raise_exception(nr);
281     s->base.is_jmp = DISAS_NORETURN;
282 }
283 
284 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
285 {
286     update_cc_op(s);
287     tcg_gen_movi_i32(QREG_PC, dest);
288 
289     gen_raise_exception(nr);
290 
291     s->base.is_jmp = DISAS_NORETURN;
292 }
293 
294 static inline void gen_addr_fault(DisasContext *s)
295 {
296     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
297 }
298 
299 /*
300  * Generate a load from the specified address.  Narrow values are
301  *  sign extended to full register width.
302  */
303 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
304                             int sign, int index)
305 {
306     TCGv tmp = tcg_temp_new_i32();
307 
308     switch (opsize) {
309     case OS_BYTE:
310     case OS_WORD:
311     case OS_LONG:
312         tcg_gen_qemu_ld_tl(tmp, addr, index,
313                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
314         break;
315     default:
316         g_assert_not_reached();
317     }
318     return tmp;
319 }
320 
321 /* Generate a store.  */
322 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
323                              int index)
324 {
325     switch (opsize) {
326     case OS_BYTE:
327     case OS_WORD:
328     case OS_LONG:
329         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
330         break;
331     default:
332         g_assert_not_reached();
333     }
334 }
335 
336 typedef enum {
337     EA_STORE,
338     EA_LOADU,
339     EA_LOADS
340 } ea_what;
341 
342 /*
343  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
344  * otherwise generate a store.
345  */
346 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
347                      ea_what what, int index)
348 {
349     if (what == EA_STORE) {
350         gen_store(s, opsize, addr, val, index);
351         return store_dummy;
352     } else {
353         return gen_load(s, opsize, addr, what == EA_LOADS, index);
354     }
355 }
356 
357 /* Read a 16-bit immediate constant */
358 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
359 {
360     uint16_t im;
361     im = translator_lduw(env, &s->base, s->pc);
362     s->pc += 2;
363     return im;
364 }
365 
366 /* Read an 8-bit immediate constant */
367 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
368 {
369     return read_im16(env, s);
370 }
371 
372 /* Read a 32-bit immediate constant.  */
373 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
374 {
375     uint32_t im;
376     im = read_im16(env, s) << 16;
377     im |= 0xffff & read_im16(env, s);
378     return im;
379 }
380 
381 /* Read a 64-bit immediate constant.  */
382 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
383 {
384     uint64_t im;
385     im = (uint64_t)read_im32(env, s) << 32;
386     im |= (uint64_t)read_im32(env, s);
387     return im;
388 }
389 
390 /* Calculate and address index.  */
391 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
392 {
393     TCGv add;
394     int scale;
395 
396     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
397     if ((ext & 0x800) == 0) {
398         tcg_gen_ext16s_i32(tmp, add);
399         add = tmp;
400     }
401     scale = (ext >> 9) & 3;
402     if (scale != 0) {
403         tcg_gen_shli_i32(tmp, add, scale);
404         add = tmp;
405     }
406     return add;
407 }
408 
409 /*
410  * Handle a base + index + displacement effective address.
411  * A NULL_QREG base means pc-relative.
412  */
413 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
414 {
415     uint32_t offset;
416     uint16_t ext;
417     TCGv add;
418     TCGv tmp;
419     uint32_t bd, od;
420 
421     offset = s->pc;
422     ext = read_im16(env, s);
423 
424     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
425         return NULL_QREG;
426 
427     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
428         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
429         ext &= ~(3 << 9);
430     }
431 
432     if (ext & 0x100) {
433         /* full extension word format */
434         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
435             return NULL_QREG;
436 
437         if ((ext & 0x30) > 0x10) {
438             /* base displacement */
439             if ((ext & 0x30) == 0x20) {
440                 bd = (int16_t)read_im16(env, s);
441             } else {
442                 bd = read_im32(env, s);
443             }
444         } else {
445             bd = 0;
446         }
447         tmp = tcg_temp_new();
448         if ((ext & 0x44) == 0) {
449             /* pre-index */
450             add = gen_addr_index(s, ext, tmp);
451         } else {
452             add = NULL_QREG;
453         }
454         if ((ext & 0x80) == 0) {
455             /* base not suppressed */
456             if (IS_NULL_QREG(base)) {
457                 base = tcg_constant_i32(offset + bd);
458                 bd = 0;
459             }
460             if (!IS_NULL_QREG(add)) {
461                 tcg_gen_add_i32(tmp, add, base);
462                 add = tmp;
463             } else {
464                 add = base;
465             }
466         }
467         if (!IS_NULL_QREG(add)) {
468             if (bd != 0) {
469                 tcg_gen_addi_i32(tmp, add, bd);
470                 add = tmp;
471             }
472         } else {
473             add = tcg_constant_i32(bd);
474         }
475         if ((ext & 3) != 0) {
476             /* memory indirect */
477             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
478             if ((ext & 0x44) == 4) {
479                 add = gen_addr_index(s, ext, tmp);
480                 tcg_gen_add_i32(tmp, add, base);
481                 add = tmp;
482             } else {
483                 add = base;
484             }
485             if ((ext & 3) > 1) {
486                 /* outer displacement */
487                 if ((ext & 3) == 2) {
488                     od = (int16_t)read_im16(env, s);
489                 } else {
490                     od = read_im32(env, s);
491                 }
492             } else {
493                 od = 0;
494             }
495             if (od != 0) {
496                 tcg_gen_addi_i32(tmp, add, od);
497                 add = tmp;
498             }
499         }
500     } else {
501         /* brief extension word format */
502         tmp = tcg_temp_new();
503         add = gen_addr_index(s, ext, tmp);
504         if (!IS_NULL_QREG(base)) {
505             tcg_gen_add_i32(tmp, add, base);
506             if ((int8_t)ext)
507                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
508         } else {
509             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
510         }
511         add = tmp;
512     }
513     return add;
514 }
515 
516 /* Sign or zero extend a value.  */
517 
518 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
519 {
520     switch (opsize) {
521     case OS_BYTE:
522     case OS_WORD:
523     case OS_LONG:
524         tcg_gen_ext_i32(res, val, opsize | (sign ? MO_SIGN : 0));
525         break;
526     default:
527         g_assert_not_reached();
528     }
529 }
530 
531 /* Evaluate all the CC flags.  */
532 
533 static void gen_flush_flags(DisasContext *s)
534 {
535     TCGv t0, t1;
536 
537     switch (s->cc_op) {
538     case CC_OP_FLAGS:
539         return;
540 
541     case CC_OP_ADDB:
542     case CC_OP_ADDW:
543     case CC_OP_ADDL:
544         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
545         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
546         /* Compute signed overflow for addition.  */
547         t0 = tcg_temp_new();
548         t1 = tcg_temp_new();
549         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
550         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
551         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
552         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
553         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
554         break;
555 
556     case CC_OP_SUBB:
557     case CC_OP_SUBW:
558     case CC_OP_SUBL:
559         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
560         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
561         /* Compute signed overflow for subtraction.  */
562         t0 = tcg_temp_new();
563         t1 = tcg_temp_new();
564         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
565         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
566         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
567         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
568         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
569         break;
570 
571     case CC_OP_CMPB:
572     case CC_OP_CMPW:
573     case CC_OP_CMPL:
574         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
575         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
576         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
577         /* Compute signed overflow for subtraction.  */
578         t0 = tcg_temp_new();
579         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
580         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
581         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
582         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
583         break;
584 
585     case CC_OP_LOGIC:
586         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
587         tcg_gen_movi_i32(QREG_CC_C, 0);
588         tcg_gen_movi_i32(QREG_CC_V, 0);
589         break;
590 
591     case CC_OP_DYNAMIC:
592         gen_helper_flush_flags(tcg_env, QREG_CC_OP);
593         s->cc_op_synced = 1;
594         break;
595 
596     default:
597         gen_helper_flush_flags(tcg_env, tcg_constant_i32(s->cc_op));
598         s->cc_op_synced = 1;
599         break;
600     }
601 
602     /* Note that flush_flags also assigned to env->cc_op.  */
603     s->cc_op = CC_OP_FLAGS;
604 }
605 
606 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
607 {
608     TCGv tmp;
609 
610     if (opsize == OS_LONG) {
611         tmp = val;
612     } else {
613         tmp = tcg_temp_new();
614         gen_ext(tmp, val, opsize, sign);
615     }
616 
617     return tmp;
618 }
619 
620 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
621 {
622     gen_ext(QREG_CC_N, val, opsize, 1);
623     set_cc_op(s, CC_OP_LOGIC);
624 }
625 
626 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
627 {
628     tcg_gen_mov_i32(QREG_CC_N, dest);
629     tcg_gen_mov_i32(QREG_CC_V, src);
630     set_cc_op(s, CC_OP_CMPB + opsize);
631 }
632 
633 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
634 {
635     gen_ext(QREG_CC_N, dest, opsize, 1);
636     tcg_gen_mov_i32(QREG_CC_V, src);
637 }
638 
639 static inline int opsize_bytes(int opsize)
640 {
641     switch (opsize) {
642     case OS_BYTE: return 1;
643     case OS_WORD: return 2;
644     case OS_LONG: return 4;
645     case OS_SINGLE: return 4;
646     case OS_DOUBLE: return 8;
647     case OS_EXTENDED: return 12;
648     case OS_PACKED: return 12;
649     default:
650         g_assert_not_reached();
651     }
652 }
653 
654 static inline int insn_opsize(int insn)
655 {
656     switch ((insn >> 6) & 3) {
657     case 0: return OS_BYTE;
658     case 1: return OS_WORD;
659     case 2: return OS_LONG;
660     default:
661         g_assert_not_reached();
662     }
663 }
664 
665 static inline int ext_opsize(int ext, int pos)
666 {
667     switch ((ext >> pos) & 7) {
668     case 0: return OS_LONG;
669     case 1: return OS_SINGLE;
670     case 2: return OS_EXTENDED;
671     case 3: return OS_PACKED;
672     case 4: return OS_WORD;
673     case 5: return OS_DOUBLE;
674     case 6: return OS_BYTE;
675     default:
676         g_assert_not_reached();
677     }
678 }
679 
680 /*
681  * Assign value to a register.  If the width is less than the register width
682  * only the low part of the register is set.
683  */
684 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
685 {
686     switch (opsize) {
687     case OS_BYTE:
688         tcg_gen_deposit_i32(reg, reg, val, 0, 8);
689         break;
690     case OS_WORD:
691         tcg_gen_deposit_i32(reg, reg, val, 0, 16);
692         break;
693     case OS_LONG:
694     case OS_SINGLE:
695         tcg_gen_mov_i32(reg, val);
696         break;
697     default:
698         g_assert_not_reached();
699     }
700 }
701 
702 /*
703  * Generate code for an "effective address".  Does not adjust the base
704  * register for autoincrement addressing modes.
705  */
706 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
707                          int mode, int reg0, int opsize)
708 {
709     TCGv reg;
710     TCGv tmp;
711     uint16_t ext;
712     uint32_t offset;
713 
714     switch (mode) {
715     case 0: /* Data register direct.  */
716     case 1: /* Address register direct.  */
717         return NULL_QREG;
718     case 3: /* Indirect postincrement.  */
719         if (opsize == OS_UNSIZED) {
720             return NULL_QREG;
721         }
722         /* fallthru */
723     case 2: /* Indirect register */
724         return get_areg(s, reg0);
725     case 4: /* Indirect predecrememnt.  */
726         if (opsize == OS_UNSIZED) {
727             return NULL_QREG;
728         }
729         reg = get_areg(s, reg0);
730         tmp = tcg_temp_new();
731         if (reg0 == 7 && opsize == OS_BYTE &&
732             m68k_feature(s->env, M68K_FEATURE_M68K)) {
733             tcg_gen_subi_i32(tmp, reg, 2);
734         } else {
735             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
736         }
737         return tmp;
738     case 5: /* Indirect displacement.  */
739         reg = get_areg(s, reg0);
740         tmp = tcg_temp_new();
741         ext = read_im16(env, s);
742         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
743         return tmp;
744     case 6: /* Indirect index + displacement.  */
745         reg = get_areg(s, reg0);
746         return gen_lea_indexed(env, s, reg);
747     case 7: /* Other */
748         switch (reg0) {
749         case 0: /* Absolute short.  */
750             offset = (int16_t)read_im16(env, s);
751             return tcg_constant_i32(offset);
752         case 1: /* Absolute long.  */
753             offset = read_im32(env, s);
754             return tcg_constant_i32(offset);
755         case 2: /* pc displacement  */
756             offset = s->pc;
757             offset += (int16_t)read_im16(env, s);
758             return tcg_constant_i32(offset);
759         case 3: /* pc index+displacement.  */
760             return gen_lea_indexed(env, s, NULL_QREG);
761         case 4: /* Immediate.  */
762         default:
763             return NULL_QREG;
764         }
765     }
766     /* Should never happen.  */
767     return NULL_QREG;
768 }
769 
770 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
771                     int opsize)
772 {
773     int mode = extract32(insn, 3, 3);
774     int reg0 = REG(insn, 0);
775     return gen_lea_mode(env, s, mode, reg0, opsize);
776 }
777 
778 /*
779  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
780  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
781  * ADDRP is non-null for readwrite operands.
782  */
783 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
784                         int opsize, TCGv val, TCGv *addrp, ea_what what,
785                         int index)
786 {
787     TCGv reg, tmp, result;
788     int32_t offset;
789 
790     switch (mode) {
791     case 0: /* Data register direct.  */
792         reg = cpu_dregs[reg0];
793         if (what == EA_STORE) {
794             gen_partset_reg(opsize, reg, val);
795             return store_dummy;
796         } else {
797             return gen_extend(s, reg, opsize, what == EA_LOADS);
798         }
799     case 1: /* Address register direct.  */
800         reg = get_areg(s, reg0);
801         if (what == EA_STORE) {
802             tcg_gen_mov_i32(reg, val);
803             return store_dummy;
804         } else {
805             return gen_extend(s, reg, opsize, what == EA_LOADS);
806         }
807     case 2: /* Indirect register */
808         reg = get_areg(s, reg0);
809         return gen_ldst(s, opsize, reg, val, what, index);
810     case 3: /* Indirect postincrement.  */
811         reg = get_areg(s, reg0);
812         result = gen_ldst(s, opsize, reg, val, what, index);
813         if (what == EA_STORE || !addrp) {
814             tmp = tcg_temp_new();
815             if (reg0 == 7 && opsize == OS_BYTE &&
816                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
817                 tcg_gen_addi_i32(tmp, reg, 2);
818             } else {
819                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
820             }
821             delay_set_areg(s, reg0, tmp, true);
822         }
823         return result;
824     case 4: /* Indirect predecrememnt.  */
825         if (addrp && what == EA_STORE) {
826             tmp = *addrp;
827         } else {
828             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
829             if (IS_NULL_QREG(tmp)) {
830                 return tmp;
831             }
832             if (addrp) {
833                 *addrp = tmp;
834             }
835         }
836         result = gen_ldst(s, opsize, tmp, val, what, index);
837         if (what == EA_STORE || !addrp) {
838             delay_set_areg(s, reg0, tmp, false);
839         }
840         return result;
841     case 5: /* Indirect displacement.  */
842     case 6: /* Indirect index + displacement.  */
843     do_indirect:
844         if (addrp && what == EA_STORE) {
845             tmp = *addrp;
846         } else {
847             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
848             if (IS_NULL_QREG(tmp)) {
849                 return tmp;
850             }
851             if (addrp) {
852                 *addrp = tmp;
853             }
854         }
855         return gen_ldst(s, opsize, tmp, val, what, index);
856     case 7: /* Other */
857         switch (reg0) {
858         case 0: /* Absolute short.  */
859         case 1: /* Absolute long.  */
860         case 2: /* pc displacement  */
861         case 3: /* pc index+displacement.  */
862             goto do_indirect;
863         case 4: /* Immediate.  */
864             /* Sign extend values for consistency.  */
865             switch (opsize) {
866             case OS_BYTE:
867                 if (what == EA_LOADS) {
868                     offset = (int8_t)read_im8(env, s);
869                 } else {
870                     offset = read_im8(env, s);
871                 }
872                 break;
873             case OS_WORD:
874                 if (what == EA_LOADS) {
875                     offset = (int16_t)read_im16(env, s);
876                 } else {
877                     offset = read_im16(env, s);
878                 }
879                 break;
880             case OS_LONG:
881                 offset = read_im32(env, s);
882                 break;
883             default:
884                 g_assert_not_reached();
885             }
886             return tcg_constant_i32(offset);
887         default:
888             return NULL_QREG;
889         }
890     }
891     /* Should never happen.  */
892     return NULL_QREG;
893 }
894 
895 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
896                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
897 {
898     int mode = extract32(insn, 3, 3);
899     int reg0 = REG(insn, 0);
900     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
901 }
902 
903 static TCGv_ptr gen_fp_ptr(int freg)
904 {
905     TCGv_ptr fp = tcg_temp_new_ptr();
906     tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fregs[freg]));
907     return fp;
908 }
909 
910 static TCGv_ptr gen_fp_result_ptr(void)
911 {
912     TCGv_ptr fp = tcg_temp_new_ptr();
913     tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fp_result));
914     return fp;
915 }
916 
917 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
918 {
919     TCGv t32;
920     TCGv_i64 t64;
921 
922     t32 = tcg_temp_new();
923     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
924     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
925 
926     t64 = tcg_temp_new_i64();
927     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
928     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
929 }
930 
931 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
932                         int index)
933 {
934     TCGv tmp;
935     TCGv_i64 t64;
936 
937     t64 = tcg_temp_new_i64();
938     tmp = tcg_temp_new();
939     switch (opsize) {
940     case OS_BYTE:
941     case OS_WORD:
942     case OS_LONG:
943         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
944         gen_helper_exts32(tcg_env, fp, tmp);
945         break;
946     case OS_SINGLE:
947         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
948         gen_helper_extf32(tcg_env, fp, tmp);
949         break;
950     case OS_DOUBLE:
951         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
952         gen_helper_extf64(tcg_env, fp, t64);
953         break;
954     case OS_EXTENDED:
955         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
956             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
957             break;
958         }
959         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
960         tcg_gen_shri_i32(tmp, tmp, 16);
961         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
962         tcg_gen_addi_i32(tmp, addr, 4);
963         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
964         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
965         break;
966     case OS_PACKED:
967         /*
968          * unimplemented data type on 68040/ColdFire
969          * FIXME if needed for another FPU
970          */
971         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
972         break;
973     default:
974         g_assert_not_reached();
975     }
976 }
977 
978 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
979                          int index)
980 {
981     TCGv tmp;
982     TCGv_i64 t64;
983 
984     t64 = tcg_temp_new_i64();
985     tmp = tcg_temp_new();
986     switch (opsize) {
987     case OS_BYTE:
988     case OS_WORD:
989     case OS_LONG:
990         gen_helper_reds32(tmp, tcg_env, fp);
991         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
992         break;
993     case OS_SINGLE:
994         gen_helper_redf32(tmp, tcg_env, fp);
995         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
996         break;
997     case OS_DOUBLE:
998         gen_helper_redf64(t64, tcg_env, fp);
999         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1000         break;
1001     case OS_EXTENDED:
1002         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1003             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1004             break;
1005         }
1006         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1007         tcg_gen_shli_i32(tmp, tmp, 16);
1008         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1009         tcg_gen_addi_i32(tmp, addr, 4);
1010         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1011         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1012         break;
1013     case OS_PACKED:
1014         /*
1015          * unimplemented data type on 68040/ColdFire
1016          * FIXME if needed for another FPU
1017          */
1018         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1019         break;
1020     default:
1021         g_assert_not_reached();
1022     }
1023 }
1024 
1025 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1026                         TCGv_ptr fp, ea_what what, int index)
1027 {
1028     if (what == EA_STORE) {
1029         gen_store_fp(s, opsize, addr, fp, index);
1030     } else {
1031         gen_load_fp(s, opsize, addr, fp, index);
1032     }
1033 }
1034 
1035 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1036                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1037                           int index)
1038 {
1039     TCGv reg, addr, tmp;
1040     TCGv_i64 t64;
1041 
1042     switch (mode) {
1043     case 0: /* Data register direct.  */
1044         reg = cpu_dregs[reg0];
1045         if (what == EA_STORE) {
1046             switch (opsize) {
1047             case OS_BYTE:
1048             case OS_WORD:
1049             case OS_LONG:
1050                 gen_helper_reds32(reg, tcg_env, fp);
1051                 break;
1052             case OS_SINGLE:
1053                 gen_helper_redf32(reg, tcg_env, fp);
1054                 break;
1055             default:
1056                 g_assert_not_reached();
1057             }
1058         } else {
1059             tmp = tcg_temp_new();
1060             switch (opsize) {
1061             case OS_BYTE:
1062             case OS_WORD:
1063             case OS_LONG:
1064                 tcg_gen_ext_i32(tmp, reg, opsize | MO_SIGN);
1065                 gen_helper_exts32(tcg_env, fp, tmp);
1066                 break;
1067             case OS_SINGLE:
1068                 gen_helper_extf32(tcg_env, fp, reg);
1069                 break;
1070             default:
1071                 g_assert_not_reached();
1072             }
1073         }
1074         return 0;
1075     case 1: /* Address register direct.  */
1076         return -1;
1077     case 2: /* Indirect register */
1078         addr = get_areg(s, reg0);
1079         gen_ldst_fp(s, opsize, addr, fp, what, index);
1080         return 0;
1081     case 3: /* Indirect postincrement.  */
1082         addr = cpu_aregs[reg0];
1083         gen_ldst_fp(s, opsize, addr, fp, what, index);
1084         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1085         return 0;
1086     case 4: /* Indirect predecrememnt.  */
1087         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1088         if (IS_NULL_QREG(addr)) {
1089             return -1;
1090         }
1091         gen_ldst_fp(s, opsize, addr, fp, what, index);
1092         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1093         return 0;
1094     case 5: /* Indirect displacement.  */
1095     case 6: /* Indirect index + displacement.  */
1096     do_indirect:
1097         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1098         if (IS_NULL_QREG(addr)) {
1099             return -1;
1100         }
1101         gen_ldst_fp(s, opsize, addr, fp, what, index);
1102         return 0;
1103     case 7: /* Other */
1104         switch (reg0) {
1105         case 0: /* Absolute short.  */
1106         case 1: /* Absolute long.  */
1107         case 2: /* pc displacement  */
1108         case 3: /* pc index+displacement.  */
1109             goto do_indirect;
1110         case 4: /* Immediate.  */
1111             if (what == EA_STORE) {
1112                 return -1;
1113             }
1114             switch (opsize) {
1115             case OS_BYTE:
1116                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1117                 gen_helper_exts32(tcg_env, fp, tmp);
1118                 break;
1119             case OS_WORD:
1120                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1121                 gen_helper_exts32(tcg_env, fp, tmp);
1122                 break;
1123             case OS_LONG:
1124                 tmp = tcg_constant_i32(read_im32(env, s));
1125                 gen_helper_exts32(tcg_env, fp, tmp);
1126                 break;
1127             case OS_SINGLE:
1128                 tmp = tcg_constant_i32(read_im32(env, s));
1129                 gen_helper_extf32(tcg_env, fp, tmp);
1130                 break;
1131             case OS_DOUBLE:
1132                 t64 = tcg_constant_i64(read_im64(env, s));
1133                 gen_helper_extf64(tcg_env, fp, t64);
1134                 break;
1135             case OS_EXTENDED:
1136                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1137                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1138                     break;
1139                 }
1140                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1141                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1142                 t64 = tcg_constant_i64(read_im64(env, s));
1143                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1144                 break;
1145             case OS_PACKED:
1146                 /*
1147                  * unimplemented data type on 68040/ColdFire
1148                  * FIXME if needed for another FPU
1149                  */
1150                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1151                 break;
1152             default:
1153                 g_assert_not_reached();
1154             }
1155             return 0;
1156         default:
1157             return -1;
1158         }
1159     }
1160     return -1;
1161 }
1162 
1163 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1164                        int opsize, TCGv_ptr fp, ea_what what, int index)
1165 {
1166     int mode = extract32(insn, 3, 3);
1167     int reg0 = REG(insn, 0);
1168     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1169 }
1170 
1171 typedef struct {
1172     TCGCond tcond;
1173     TCGv v1;
1174     TCGv v2;
1175 } DisasCompare;
1176 
1177 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1178 {
1179     TCGv tmp, tmp2;
1180     TCGCond tcond;
1181     CCOp op = s->cc_op;
1182 
1183     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1184     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1185         c->v1 = QREG_CC_N;
1186         c->v2 = QREG_CC_V;
1187         switch (cond) {
1188         case 2: /* HI */
1189         case 3: /* LS */
1190             tcond = TCG_COND_LEU;
1191             goto done;
1192         case 4: /* CC */
1193         case 5: /* CS */
1194             tcond = TCG_COND_LTU;
1195             goto done;
1196         case 6: /* NE */
1197         case 7: /* EQ */
1198             tcond = TCG_COND_EQ;
1199             goto done;
1200         case 10: /* PL */
1201         case 11: /* MI */
1202             c->v2 = tcg_constant_i32(0);
1203             c->v1 = tmp = tcg_temp_new();
1204             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1205             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1206             /* fallthru */
1207         case 12: /* GE */
1208         case 13: /* LT */
1209             tcond = TCG_COND_LT;
1210             goto done;
1211         case 14: /* GT */
1212         case 15: /* LE */
1213             tcond = TCG_COND_LE;
1214             goto done;
1215         }
1216     }
1217 
1218     c->v2 = tcg_constant_i32(0);
1219 
1220     switch (cond) {
1221     case 0: /* T */
1222     case 1: /* F */
1223         c->v1 = c->v2;
1224         tcond = TCG_COND_NEVER;
1225         goto done;
1226     case 14: /* GT (!(Z || (N ^ V))) */
1227     case 15: /* LE (Z || (N ^ V)) */
1228         /*
1229          * Logic operations clear V, which simplifies LE to (Z || N),
1230          * and since Z and N are co-located, this becomes a normal
1231          * comparison vs N.
1232          */
1233         if (op == CC_OP_LOGIC) {
1234             c->v1 = QREG_CC_N;
1235             tcond = TCG_COND_LE;
1236             goto done;
1237         }
1238         break;
1239     case 12: /* GE (!(N ^ V)) */
1240     case 13: /* LT (N ^ V) */
1241         /* Logic operations clear V, which simplifies this to N.  */
1242         if (op != CC_OP_LOGIC) {
1243             break;
1244         }
1245         /* fallthru */
1246     case 10: /* PL (!N) */
1247     case 11: /* MI (N) */
1248         /* Several cases represent N normally.  */
1249         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1250             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1251             op == CC_OP_LOGIC) {
1252             c->v1 = QREG_CC_N;
1253             tcond = TCG_COND_LT;
1254             goto done;
1255         }
1256         break;
1257     case 6: /* NE (!Z) */
1258     case 7: /* EQ (Z) */
1259         /* Some cases fold Z into N.  */
1260         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1261             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1262             op == CC_OP_LOGIC) {
1263             tcond = TCG_COND_EQ;
1264             c->v1 = QREG_CC_N;
1265             goto done;
1266         }
1267         break;
1268     case 4: /* CC (!C) */
1269     case 5: /* CS (C) */
1270         /* Some cases fold C into X.  */
1271         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1272             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1273             tcond = TCG_COND_NE;
1274             c->v1 = QREG_CC_X;
1275             goto done;
1276         }
1277         /* fallthru */
1278     case 8: /* VC (!V) */
1279     case 9: /* VS (V) */
1280         /* Logic operations clear V and C.  */
1281         if (op == CC_OP_LOGIC) {
1282             tcond = TCG_COND_NEVER;
1283             c->v1 = c->v2;
1284             goto done;
1285         }
1286         break;
1287     }
1288 
1289     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1290     gen_flush_flags(s);
1291 
1292     switch (cond) {
1293     case 0: /* T */
1294     case 1: /* F */
1295     default:
1296         /* Invalid, or handled above.  */
1297         abort();
1298     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1299     case 3: /* LS (C || Z) */
1300         c->v1 = tmp = tcg_temp_new();
1301         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1302         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1303         tcond = TCG_COND_NE;
1304         break;
1305     case 4: /* CC (!C) */
1306     case 5: /* CS (C) */
1307         c->v1 = QREG_CC_C;
1308         tcond = TCG_COND_NE;
1309         break;
1310     case 6: /* NE (!Z) */
1311     case 7: /* EQ (Z) */
1312         c->v1 = QREG_CC_Z;
1313         tcond = TCG_COND_EQ;
1314         break;
1315     case 8: /* VC (!V) */
1316     case 9: /* VS (V) */
1317         c->v1 = QREG_CC_V;
1318         tcond = TCG_COND_LT;
1319         break;
1320     case 10: /* PL (!N) */
1321     case 11: /* MI (N) */
1322         c->v1 = QREG_CC_N;
1323         tcond = TCG_COND_LT;
1324         break;
1325     case 12: /* GE (!(N ^ V)) */
1326     case 13: /* LT (N ^ V) */
1327         c->v1 = tmp = tcg_temp_new();
1328         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1329         tcond = TCG_COND_LT;
1330         break;
1331     case 14: /* GT (!(Z || (N ^ V))) */
1332     case 15: /* LE (Z || (N ^ V)) */
1333         c->v1 = tmp = tcg_temp_new();
1334         tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1335         tmp2 = tcg_temp_new();
1336         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1337         tcg_gen_or_i32(tmp, tmp, tmp2);
1338         tcond = TCG_COND_LT;
1339         break;
1340     }
1341 
1342  done:
1343     if ((cond & 1) == 0) {
1344         tcond = tcg_invert_cond(tcond);
1345     }
1346     c->tcond = tcond;
1347 }
1348 
1349 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1350 {
1351   DisasCompare c;
1352 
1353   gen_cc_cond(&c, s, cond);
1354   update_cc_op(s);
1355   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1356 }
1357 
1358 /* Force a TB lookup after an instruction that changes the CPU state.  */
1359 static void gen_exit_tb(DisasContext *s)
1360 {
1361     update_cc_op(s);
1362     tcg_gen_movi_i32(QREG_PC, s->pc);
1363     s->base.is_jmp = DISAS_EXIT;
1364 }
1365 
1366 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1367         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1368                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1369         if (IS_NULL_QREG(result)) {                                     \
1370             gen_addr_fault(s);                                          \
1371             return;                                                     \
1372         }                                                               \
1373     } while (0)
1374 
1375 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1376         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1377                                 EA_STORE, IS_USER(s));                  \
1378         if (IS_NULL_QREG(ea_result)) {                                  \
1379             gen_addr_fault(s);                                          \
1380             return;                                                     \
1381         }                                                               \
1382     } while (0)
1383 
1384 /* Generate a jump to an immediate address.  */
1385 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1386                        target_ulong src)
1387 {
1388     if (unlikely(s->ss_active)) {
1389         update_cc_op(s);
1390         tcg_gen_movi_i32(QREG_PC, dest);
1391         gen_raise_exception_format2(s, EXCP_TRACE, src);
1392     } else if (translator_use_goto_tb(&s->base, dest)) {
1393         tcg_gen_goto_tb(n);
1394         tcg_gen_movi_i32(QREG_PC, dest);
1395         tcg_gen_exit_tb(s->base.tb, n);
1396     } else {
1397         gen_jmp_im(s, dest);
1398         tcg_gen_exit_tb(NULL, 0);
1399     }
1400     s->base.is_jmp = DISAS_NORETURN;
1401 }
1402 
1403 #ifndef CONFIG_USER_ONLY
1404 static bool semihosting_test(DisasContext *s)
1405 {
1406     uint32_t test;
1407 
1408     if (!semihosting_enabled(IS_USER(s))) {
1409         return false;
1410     }
1411 
1412     /*
1413      * "The semihosting instruction is immediately preceded by a
1414      * nop aligned to a 4-byte boundary..."
1415      * The preceding 2-byte (aligned) nop plus the 2-byte halt/bkpt
1416      * means that we have advanced 4 bytes from the required nop.
1417      */
1418     if (s->pc % 4 != 0) {
1419         return false;
1420     }
1421     test = translator_lduw(s->env, &s->base, s->pc - 4);
1422     if (test != 0x4e71) {
1423         return false;
1424     }
1425     /* "... and followed by an invalid sentinel instruction movec %sp,0." */
1426     test = translator_ldl(s->env, &s->base, s->pc);
1427     if (test != 0x4e7bf000) {
1428         return false;
1429     }
1430 
1431     /* Consume the sentinel. */
1432     s->pc += 4;
1433     return true;
1434 }
1435 #endif /* !CONFIG_USER_ONLY */
1436 
1437 DISAS_INSN(scc)
1438 {
1439     DisasCompare c;
1440     int cond;
1441     TCGv tmp;
1442 
1443     cond = (insn >> 8) & 0xf;
1444     gen_cc_cond(&c, s, cond);
1445 
1446     tmp = tcg_temp_new();
1447     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
1448 
1449     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1450 }
1451 
1452 DISAS_INSN(dbcc)
1453 {
1454     TCGLabel *l1;
1455     TCGv reg;
1456     TCGv tmp;
1457     int16_t offset;
1458     uint32_t base;
1459 
1460     reg = DREG(insn, 0);
1461     base = s->pc;
1462     offset = (int16_t)read_im16(env, s);
1463     l1 = gen_new_label();
1464     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1465 
1466     tmp = tcg_temp_new();
1467     tcg_gen_ext16s_i32(tmp, reg);
1468     tcg_gen_addi_i32(tmp, tmp, -1);
1469     gen_partset_reg(OS_WORD, reg, tmp);
1470     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1471     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1472     gen_set_label(l1);
1473     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1474 }
1475 
1476 DISAS_INSN(undef_mac)
1477 {
1478     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1479 }
1480 
1481 DISAS_INSN(undef_fpu)
1482 {
1483     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1484 }
1485 
1486 DISAS_INSN(undef)
1487 {
1488     /*
1489      * ??? This is both instructions that are as yet unimplemented
1490      * for the 680x0 series, as well as those that are implemented
1491      * but actually illegal for CPU32 or pre-68020.
1492      */
1493     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %" VADDR_PRIx "\n",
1494                   insn, s->base.pc_next);
1495     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1496 }
1497 
1498 DISAS_INSN(mulw)
1499 {
1500     TCGv reg;
1501     TCGv tmp;
1502     TCGv src;
1503     int sign;
1504 
1505     sign = (insn & 0x100) != 0;
1506     reg = DREG(insn, 9);
1507     tmp = tcg_temp_new();
1508     if (sign)
1509         tcg_gen_ext16s_i32(tmp, reg);
1510     else
1511         tcg_gen_ext16u_i32(tmp, reg);
1512     SRC_EA(env, src, OS_WORD, sign, NULL);
1513     tcg_gen_mul_i32(tmp, tmp, src);
1514     tcg_gen_mov_i32(reg, tmp);
1515     gen_logic_cc(s, tmp, OS_LONG);
1516 }
1517 
1518 DISAS_INSN(divw)
1519 {
1520     int sign;
1521     TCGv src;
1522     TCGv destr;
1523     TCGv ilen;
1524 
1525     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1526 
1527     sign = (insn & 0x100) != 0;
1528 
1529     /* dest.l / src.w */
1530 
1531     SRC_EA(env, src, OS_WORD, sign, NULL);
1532     destr = tcg_constant_i32(REG(insn, 9));
1533     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1534     if (sign) {
1535         gen_helper_divsw(tcg_env, destr, src, ilen);
1536     } else {
1537         gen_helper_divuw(tcg_env, destr, src, ilen);
1538     }
1539 
1540     set_cc_op(s, CC_OP_FLAGS);
1541 }
1542 
1543 DISAS_INSN(divl)
1544 {
1545     TCGv num, reg, den, ilen;
1546     int sign;
1547     uint16_t ext;
1548 
1549     ext = read_im16(env, s);
1550 
1551     sign = (ext & 0x0800) != 0;
1552 
1553     if (ext & 0x400) {
1554         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1555             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1556             return;
1557         }
1558 
1559         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1560 
1561         SRC_EA(env, den, OS_LONG, 0, NULL);
1562         num = tcg_constant_i32(REG(ext, 12));
1563         reg = tcg_constant_i32(REG(ext, 0));
1564         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1565         if (sign) {
1566             gen_helper_divsll(tcg_env, num, reg, den, ilen);
1567         } else {
1568             gen_helper_divull(tcg_env, num, reg, den, ilen);
1569         }
1570         set_cc_op(s, CC_OP_FLAGS);
1571         return;
1572     }
1573 
1574     /* divX.l <EA>, Dq        32/32 -> 32q     */
1575     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1576 
1577     SRC_EA(env, den, OS_LONG, 0, NULL);
1578     num = tcg_constant_i32(REG(ext, 12));
1579     reg = tcg_constant_i32(REG(ext, 0));
1580     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1581     if (sign) {
1582         gen_helper_divsl(tcg_env, num, reg, den, ilen);
1583     } else {
1584         gen_helper_divul(tcg_env, num, reg, den, ilen);
1585     }
1586 
1587     set_cc_op(s, CC_OP_FLAGS);
1588 }
1589 
1590 static void bcd_add(TCGv dest, TCGv src)
1591 {
1592     TCGv t0, t1;
1593 
1594     /*
1595      * dest10 = dest10 + src10 + X
1596      *
1597      *        t1 = src
1598      *        t2 = t1 + 0x066
1599      *        t3 = t2 + dest + X
1600      *        t4 = t2 ^ dest
1601      *        t5 = t3 ^ t4
1602      *        t6 = ~t5 & 0x110
1603      *        t7 = (t6 >> 2) | (t6 >> 3)
1604      *        return t3 - t7
1605      */
1606 
1607     /*
1608      * t1 = (src + 0x066) + dest + X
1609      *    = result with some possible exceeding 0x6
1610      */
1611 
1612     t0 = tcg_temp_new();
1613     tcg_gen_addi_i32(t0, src, 0x066);
1614 
1615     t1 = tcg_temp_new();
1616     tcg_gen_add_i32(t1, t0, dest);
1617     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1618 
1619     /* we will remove exceeding 0x6 where there is no carry */
1620 
1621     /*
1622      * t0 = (src + 0x0066) ^ dest
1623      *    = t1 without carries
1624      */
1625 
1626     tcg_gen_xor_i32(t0, t0, dest);
1627 
1628     /*
1629      * extract the carries
1630      * t0 = t0 ^ t1
1631      *    = only the carries
1632      */
1633 
1634     tcg_gen_xor_i32(t0, t0, t1);
1635 
1636     /*
1637      * generate 0x1 where there is no carry
1638      * and for each 0x10, generate a 0x6
1639      */
1640 
1641     tcg_gen_shri_i32(t0, t0, 3);
1642     tcg_gen_not_i32(t0, t0);
1643     tcg_gen_andi_i32(t0, t0, 0x22);
1644     tcg_gen_add_i32(dest, t0, t0);
1645     tcg_gen_add_i32(dest, dest, t0);
1646 
1647     /*
1648      * remove the exceeding 0x6
1649      * for digits that have not generated a carry
1650      */
1651 
1652     tcg_gen_sub_i32(dest, t1, dest);
1653 }
1654 
1655 static void bcd_sub(TCGv dest, TCGv src)
1656 {
1657     TCGv t0, t1, t2;
1658 
1659     /*
1660      *  dest10 = dest10 - src10 - X
1661      *         = bcd_add(dest + 1 - X, 0x199 - src)
1662      */
1663 
1664     /* t0 = 0x066 + (0x199 - src) */
1665 
1666     t0 = tcg_temp_new();
1667     tcg_gen_subfi_i32(t0, 0x1ff, src);
1668 
1669     /* t1 = t0 + dest + 1 - X*/
1670 
1671     t1 = tcg_temp_new();
1672     tcg_gen_add_i32(t1, t0, dest);
1673     tcg_gen_addi_i32(t1, t1, 1);
1674     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1675 
1676     /* t2 = t0 ^ dest */
1677 
1678     t2 = tcg_temp_new();
1679     tcg_gen_xor_i32(t2, t0, dest);
1680 
1681     /* t0 = t1 ^ t2 */
1682 
1683     tcg_gen_xor_i32(t0, t1, t2);
1684 
1685     /*
1686      * t2 = ~t0 & 0x110
1687      * t0 = (t2 >> 2) | (t2 >> 3)
1688      *
1689      * to fit on 8bit operands, changed in:
1690      *
1691      * t2 = ~(t0 >> 3) & 0x22
1692      * t0 = t2 + t2
1693      * t0 = t0 + t2
1694      */
1695 
1696     tcg_gen_shri_i32(t2, t0, 3);
1697     tcg_gen_not_i32(t2, t2);
1698     tcg_gen_andi_i32(t2, t2, 0x22);
1699     tcg_gen_add_i32(t0, t2, t2);
1700     tcg_gen_add_i32(t0, t0, t2);
1701 
1702     /* return t1 - t0 */
1703 
1704     tcg_gen_sub_i32(dest, t1, t0);
1705 }
1706 
1707 static void bcd_flags(TCGv val)
1708 {
1709     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1710     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1711 
1712     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1713 
1714     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1715 }
1716 
1717 DISAS_INSN(abcd_reg)
1718 {
1719     TCGv src;
1720     TCGv dest;
1721 
1722     gen_flush_flags(s); /* !Z is sticky */
1723 
1724     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1725     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1726     bcd_add(dest, src);
1727     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1728 
1729     bcd_flags(dest);
1730 }
1731 
1732 DISAS_INSN(abcd_mem)
1733 {
1734     TCGv src, dest, addr;
1735 
1736     gen_flush_flags(s); /* !Z is sticky */
1737 
1738     /* Indirect pre-decrement load (mode 4) */
1739 
1740     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1741                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1742     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1743                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1744 
1745     bcd_add(dest, src);
1746 
1747     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1748                 EA_STORE, IS_USER(s));
1749 
1750     bcd_flags(dest);
1751 }
1752 
1753 DISAS_INSN(sbcd_reg)
1754 {
1755     TCGv src, dest;
1756 
1757     gen_flush_flags(s); /* !Z is sticky */
1758 
1759     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1760     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1761 
1762     bcd_sub(dest, src);
1763 
1764     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1765 
1766     bcd_flags(dest);
1767 }
1768 
1769 DISAS_INSN(sbcd_mem)
1770 {
1771     TCGv src, dest, addr;
1772 
1773     gen_flush_flags(s); /* !Z is sticky */
1774 
1775     /* Indirect pre-decrement load (mode 4) */
1776 
1777     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1778                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1779     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1780                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1781 
1782     bcd_sub(dest, src);
1783 
1784     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1785                 EA_STORE, IS_USER(s));
1786 
1787     bcd_flags(dest);
1788 }
1789 
1790 DISAS_INSN(nbcd)
1791 {
1792     TCGv src, dest;
1793     TCGv addr;
1794 
1795     gen_flush_flags(s); /* !Z is sticky */
1796 
1797     SRC_EA(env, src, OS_BYTE, 0, &addr);
1798 
1799     dest = tcg_temp_new();
1800     tcg_gen_movi_i32(dest, 0);
1801     bcd_sub(dest, src);
1802 
1803     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1804 
1805     bcd_flags(dest);
1806 }
1807 
1808 DISAS_INSN(addsub)
1809 {
1810     TCGv reg;
1811     TCGv dest;
1812     TCGv src;
1813     TCGv tmp;
1814     TCGv addr;
1815     int add;
1816     int opsize;
1817 
1818     add = (insn & 0x4000) != 0;
1819     opsize = insn_opsize(insn);
1820     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1821     dest = tcg_temp_new();
1822     if (insn & 0x100) {
1823         SRC_EA(env, tmp, opsize, 1, &addr);
1824         src = reg;
1825     } else {
1826         tmp = reg;
1827         SRC_EA(env, src, opsize, 1, NULL);
1828     }
1829     if (add) {
1830         tcg_gen_add_i32(dest, tmp, src);
1831         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1832         set_cc_op(s, CC_OP_ADDB + opsize);
1833     } else {
1834         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1835         tcg_gen_sub_i32(dest, tmp, src);
1836         set_cc_op(s, CC_OP_SUBB + opsize);
1837     }
1838     gen_update_cc_add(dest, src, opsize);
1839     if (insn & 0x100) {
1840         DEST_EA(env, insn, opsize, dest, &addr);
1841     } else {
1842         gen_partset_reg(opsize, DREG(insn, 9), dest);
1843     }
1844 }
1845 
1846 /* Reverse the order of the bits in REG.  */
1847 DISAS_INSN(bitrev)
1848 {
1849     TCGv reg;
1850     reg = DREG(insn, 0);
1851     gen_helper_bitrev(reg, reg);
1852 }
1853 
1854 DISAS_INSN(bitop_reg)
1855 {
1856     int opsize;
1857     int op;
1858     TCGv src1;
1859     TCGv src2;
1860     TCGv tmp;
1861     TCGv addr;
1862     TCGv dest;
1863 
1864     if ((insn & 0x38) != 0)
1865         opsize = OS_BYTE;
1866     else
1867         opsize = OS_LONG;
1868     op = (insn >> 6) & 3;
1869     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1870 
1871     gen_flush_flags(s);
1872     src2 = tcg_temp_new();
1873     if (opsize == OS_BYTE)
1874         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1875     else
1876         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1877 
1878     tmp = tcg_temp_new();
1879     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1880 
1881     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1882 
1883     dest = tcg_temp_new();
1884     switch (op) {
1885     case 1: /* bchg */
1886         tcg_gen_xor_i32(dest, src1, tmp);
1887         break;
1888     case 2: /* bclr */
1889         tcg_gen_andc_i32(dest, src1, tmp);
1890         break;
1891     case 3: /* bset */
1892         tcg_gen_or_i32(dest, src1, tmp);
1893         break;
1894     default: /* btst */
1895         break;
1896     }
1897     if (op) {
1898         DEST_EA(env, insn, opsize, dest, &addr);
1899     }
1900 }
1901 
1902 DISAS_INSN(sats)
1903 {
1904     TCGv reg;
1905     reg = DREG(insn, 0);
1906     gen_flush_flags(s);
1907     gen_helper_sats(reg, reg, QREG_CC_V);
1908     gen_logic_cc(s, reg, OS_LONG);
1909 }
1910 
1911 static void gen_push(DisasContext *s, TCGv val)
1912 {
1913     TCGv tmp;
1914 
1915     tmp = tcg_temp_new();
1916     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1917     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1918     tcg_gen_mov_i32(QREG_SP, tmp);
1919 }
1920 
1921 static TCGv mreg(int reg)
1922 {
1923     if (reg < 8) {
1924         /* Dx */
1925         return cpu_dregs[reg];
1926     }
1927     /* Ax */
1928     return cpu_aregs[reg & 7];
1929 }
1930 
1931 DISAS_INSN(movem)
1932 {
1933     TCGv addr, incr, tmp, r[16];
1934     int is_load = (insn & 0x0400) != 0;
1935     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1936     uint16_t mask = read_im16(env, s);
1937     int mode = extract32(insn, 3, 3);
1938     int reg0 = REG(insn, 0);
1939     int i;
1940 
1941     tmp = cpu_aregs[reg0];
1942 
1943     switch (mode) {
1944     case 0: /* data register direct */
1945     case 1: /* addr register direct */
1946     do_addr_fault:
1947         gen_addr_fault(s);
1948         return;
1949 
1950     case 2: /* indirect */
1951         break;
1952 
1953     case 3: /* indirect post-increment */
1954         if (!is_load) {
1955             /* post-increment is not allowed */
1956             goto do_addr_fault;
1957         }
1958         break;
1959 
1960     case 4: /* indirect pre-decrement */
1961         if (is_load) {
1962             /* pre-decrement is not allowed */
1963             goto do_addr_fault;
1964         }
1965         /*
1966          * We want a bare copy of the address reg, without any pre-decrement
1967          * adjustment, as gen_lea would provide.
1968          */
1969         break;
1970 
1971     default:
1972         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1973         if (IS_NULL_QREG(tmp)) {
1974             goto do_addr_fault;
1975         }
1976         break;
1977     }
1978 
1979     addr = tcg_temp_new();
1980     tcg_gen_mov_i32(addr, tmp);
1981     incr = tcg_constant_i32(opsize_bytes(opsize));
1982 
1983     if (is_load) {
1984         /* memory to register */
1985         for (i = 0; i < 16; i++) {
1986             if (mask & (1 << i)) {
1987                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1988                 tcg_gen_add_i32(addr, addr, incr);
1989             }
1990         }
1991         for (i = 0; i < 16; i++) {
1992             if (mask & (1 << i)) {
1993                 tcg_gen_mov_i32(mreg(i), r[i]);
1994             }
1995         }
1996         if (mode == 3) {
1997             /* post-increment: movem (An)+,X */
1998             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1999         }
2000     } else {
2001         /* register to memory */
2002         if (mode == 4) {
2003             /* pre-decrement: movem X,-(An) */
2004             for (i = 15; i >= 0; i--) {
2005                 if ((mask << i) & 0x8000) {
2006                     tcg_gen_sub_i32(addr, addr, incr);
2007                     if (reg0 + 8 == i &&
2008                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2009                         /*
2010                          * M68020+: if the addressing register is the
2011                          * register moved to memory, the value written
2012                          * is the initial value decremented by the size of
2013                          * the operation, regardless of how many actual
2014                          * stores have been performed until this point.
2015                          * M68000/M68010: the value is the initial value.
2016                          */
2017                         tmp = tcg_temp_new();
2018                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2019                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2020                     } else {
2021                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2022                     }
2023                 }
2024             }
2025             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2026         } else {
2027             for (i = 0; i < 16; i++) {
2028                 if (mask & (1 << i)) {
2029                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2030                     tcg_gen_add_i32(addr, addr, incr);
2031                 }
2032             }
2033         }
2034     }
2035 }
2036 
2037 DISAS_INSN(movep)
2038 {
2039     uint8_t i;
2040     int16_t displ;
2041     TCGv reg;
2042     TCGv addr;
2043     TCGv abuf;
2044     TCGv dbuf;
2045 
2046     displ = read_im16(env, s);
2047 
2048     addr = AREG(insn, 0);
2049     reg = DREG(insn, 9);
2050 
2051     abuf = tcg_temp_new();
2052     tcg_gen_addi_i32(abuf, addr, displ);
2053     dbuf = tcg_temp_new();
2054 
2055     if (insn & 0x40) {
2056         i = 4;
2057     } else {
2058         i = 2;
2059     }
2060 
2061     if (insn & 0x80) {
2062         for ( ; i > 0 ; i--) {
2063             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2064             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2065             if (i > 1) {
2066                 tcg_gen_addi_i32(abuf, abuf, 2);
2067             }
2068         }
2069     } else {
2070         for ( ; i > 0 ; i--) {
2071             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2072             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2073             if (i > 1) {
2074                 tcg_gen_addi_i32(abuf, abuf, 2);
2075             }
2076         }
2077     }
2078 }
2079 
2080 DISAS_INSN(bitop_im)
2081 {
2082     int opsize;
2083     int op;
2084     TCGv src1;
2085     uint32_t mask;
2086     int bitnum;
2087     TCGv tmp;
2088     TCGv addr;
2089 
2090     if ((insn & 0x38) != 0)
2091         opsize = OS_BYTE;
2092     else
2093         opsize = OS_LONG;
2094     op = (insn >> 6) & 3;
2095 
2096     bitnum = read_im16(env, s);
2097     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2098         if (bitnum & 0xfe00) {
2099             disas_undef(env, s, insn);
2100             return;
2101         }
2102     } else {
2103         if (bitnum & 0xff00) {
2104             disas_undef(env, s, insn);
2105             return;
2106         }
2107     }
2108 
2109     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2110 
2111     gen_flush_flags(s);
2112     if (opsize == OS_BYTE)
2113         bitnum &= 7;
2114     else
2115         bitnum &= 31;
2116     mask = 1 << bitnum;
2117 
2118    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2119 
2120     if (op) {
2121         tmp = tcg_temp_new();
2122         switch (op) {
2123         case 1: /* bchg */
2124             tcg_gen_xori_i32(tmp, src1, mask);
2125             break;
2126         case 2: /* bclr */
2127             tcg_gen_andi_i32(tmp, src1, ~mask);
2128             break;
2129         case 3: /* bset */
2130             tcg_gen_ori_i32(tmp, src1, mask);
2131             break;
2132         default: /* btst */
2133             break;
2134         }
2135         DEST_EA(env, insn, opsize, tmp, &addr);
2136     }
2137 }
2138 
2139 static TCGv gen_get_ccr(DisasContext *s)
2140 {
2141     TCGv dest;
2142 
2143     update_cc_op(s);
2144     dest = tcg_temp_new();
2145     gen_helper_get_ccr(dest, tcg_env);
2146     return dest;
2147 }
2148 
2149 static TCGv gen_get_sr(DisasContext *s)
2150 {
2151     TCGv ccr;
2152     TCGv sr;
2153 
2154     ccr = gen_get_ccr(s);
2155     sr = tcg_temp_new();
2156     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2157     tcg_gen_or_i32(sr, sr, ccr);
2158     return sr;
2159 }
2160 
2161 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2162 {
2163     if (ccr_only) {
2164         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2165         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2166         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2167         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2168         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2169     } else {
2170         /* Must writeback before changing security state. */
2171         do_writebacks(s);
2172         gen_helper_set_sr(tcg_env, tcg_constant_i32(val));
2173     }
2174     set_cc_op(s, CC_OP_FLAGS);
2175 }
2176 
2177 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2178 {
2179     if (ccr_only) {
2180         gen_helper_set_ccr(tcg_env, val);
2181     } else {
2182         /* Must writeback before changing security state. */
2183         do_writebacks(s);
2184         gen_helper_set_sr(tcg_env, val);
2185     }
2186     set_cc_op(s, CC_OP_FLAGS);
2187 }
2188 
2189 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2190                            bool ccr_only)
2191 {
2192     if ((insn & 0x3f) == 0x3c) {
2193         uint16_t val;
2194         val = read_im16(env, s);
2195         gen_set_sr_im(s, val, ccr_only);
2196     } else {
2197         TCGv src;
2198         SRC_EA(env, src, OS_WORD, 0, NULL);
2199         gen_set_sr(s, src, ccr_only);
2200     }
2201 }
2202 
2203 DISAS_INSN(arith_im)
2204 {
2205     int op;
2206     TCGv im;
2207     TCGv src1;
2208     TCGv dest;
2209     TCGv addr;
2210     int opsize;
2211     bool with_SR = ((insn & 0x3f) == 0x3c);
2212 
2213     op = (insn >> 9) & 7;
2214     opsize = insn_opsize(insn);
2215     switch (opsize) {
2216     case OS_BYTE:
2217         im = tcg_constant_i32((int8_t)read_im8(env, s));
2218         break;
2219     case OS_WORD:
2220         im = tcg_constant_i32((int16_t)read_im16(env, s));
2221         break;
2222     case OS_LONG:
2223         im = tcg_constant_i32(read_im32(env, s));
2224         break;
2225     default:
2226         g_assert_not_reached();
2227     }
2228 
2229     if (with_SR) {
2230         /* SR/CCR can only be used with andi/eori/ori */
2231         if (op == 2 || op == 3 || op == 6) {
2232             disas_undef(env, s, insn);
2233             return;
2234         }
2235         switch (opsize) {
2236         case OS_BYTE:
2237             src1 = gen_get_ccr(s);
2238             break;
2239         case OS_WORD:
2240             if (IS_USER(s)) {
2241                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2242                 return;
2243             }
2244             src1 = gen_get_sr(s);
2245             break;
2246         default:
2247             /* OS_LONG; others already g_assert_not_reached.  */
2248             disas_undef(env, s, insn);
2249             return;
2250         }
2251     } else {
2252         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2253     }
2254     dest = tcg_temp_new();
2255     switch (op) {
2256     case 0: /* ori */
2257         tcg_gen_or_i32(dest, src1, im);
2258         if (with_SR) {
2259             gen_set_sr(s, dest, opsize == OS_BYTE);
2260             gen_exit_tb(s);
2261         } else {
2262             DEST_EA(env, insn, opsize, dest, &addr);
2263             gen_logic_cc(s, dest, opsize);
2264         }
2265         break;
2266     case 1: /* andi */
2267         tcg_gen_and_i32(dest, src1, im);
2268         if (with_SR) {
2269             gen_set_sr(s, dest, opsize == OS_BYTE);
2270             gen_exit_tb(s);
2271         } else {
2272             DEST_EA(env, insn, opsize, dest, &addr);
2273             gen_logic_cc(s, dest, opsize);
2274         }
2275         break;
2276     case 2: /* subi */
2277         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2278         tcg_gen_sub_i32(dest, src1, im);
2279         gen_update_cc_add(dest, im, opsize);
2280         set_cc_op(s, CC_OP_SUBB + opsize);
2281         DEST_EA(env, insn, opsize, dest, &addr);
2282         break;
2283     case 3: /* addi */
2284         tcg_gen_add_i32(dest, src1, im);
2285         gen_update_cc_add(dest, im, opsize);
2286         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2287         set_cc_op(s, CC_OP_ADDB + opsize);
2288         DEST_EA(env, insn, opsize, dest, &addr);
2289         break;
2290     case 5: /* eori */
2291         tcg_gen_xor_i32(dest, src1, im);
2292         if (with_SR) {
2293             gen_set_sr(s, dest, opsize == OS_BYTE);
2294             gen_exit_tb(s);
2295         } else {
2296             DEST_EA(env, insn, opsize, dest, &addr);
2297             gen_logic_cc(s, dest, opsize);
2298         }
2299         break;
2300     case 6: /* cmpi */
2301         gen_update_cc_cmp(s, src1, im, opsize);
2302         break;
2303     default:
2304         abort();
2305     }
2306 }
2307 
2308 DISAS_INSN(cas)
2309 {
2310     int opsize;
2311     TCGv addr;
2312     uint16_t ext;
2313     TCGv load;
2314     TCGv cmp;
2315     MemOp opc;
2316 
2317     switch ((insn >> 9) & 3) {
2318     case 1:
2319         opsize = OS_BYTE;
2320         opc = MO_SB;
2321         break;
2322     case 2:
2323         opsize = OS_WORD;
2324         opc = MO_TESW;
2325         break;
2326     case 3:
2327         opsize = OS_LONG;
2328         opc = MO_TESL;
2329         break;
2330     default:
2331         g_assert_not_reached();
2332     }
2333 
2334     ext = read_im16(env, s);
2335 
2336     /* cas Dc,Du,<EA> */
2337 
2338     addr = gen_lea(env, s, insn, opsize);
2339     if (IS_NULL_QREG(addr)) {
2340         gen_addr_fault(s);
2341         return;
2342     }
2343 
2344     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2345 
2346     /*
2347      * if  <EA> == Dc then
2348      *     <EA> = Du
2349      *     Dc = <EA> (because <EA> == Dc)
2350      * else
2351      *     Dc = <EA>
2352      */
2353 
2354     load = tcg_temp_new();
2355     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2356                                IS_USER(s), opc);
2357     /* update flags before setting cmp to load */
2358     gen_update_cc_cmp(s, load, cmp, opsize);
2359     gen_partset_reg(opsize, DREG(ext, 0), load);
2360 
2361     switch (extract32(insn, 3, 3)) {
2362     case 3: /* Indirect postincrement.  */
2363         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2364         break;
2365     case 4: /* Indirect predecrememnt.  */
2366         tcg_gen_mov_i32(AREG(insn, 0), addr);
2367         break;
2368     }
2369 }
2370 
2371 DISAS_INSN(cas2w)
2372 {
2373     uint16_t ext1, ext2;
2374     TCGv addr1, addr2;
2375 
2376     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2377 
2378     ext1 = read_im16(env, s);
2379 
2380     if (ext1 & 0x8000) {
2381         /* Address Register */
2382         addr1 = AREG(ext1, 12);
2383     } else {
2384         /* Data Register */
2385         addr1 = DREG(ext1, 12);
2386     }
2387 
2388     ext2 = read_im16(env, s);
2389     if (ext2 & 0x8000) {
2390         /* Address Register */
2391         addr2 = AREG(ext2, 12);
2392     } else {
2393         /* Data Register */
2394         addr2 = DREG(ext2, 12);
2395     }
2396 
2397     /*
2398      * if (R1) == Dc1 && (R2) == Dc2 then
2399      *     (R1) = Du1
2400      *     (R2) = Du2
2401      * else
2402      *     Dc1 = (R1)
2403      *     Dc2 = (R2)
2404      */
2405 
2406     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2407         gen_helper_exit_atomic(tcg_env);
2408     } else {
2409         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2410                                      (REG(ext1, 6) << 3) |
2411                                      (REG(ext2, 0) << 6) |
2412                                      (REG(ext1, 0) << 9));
2413         gen_helper_cas2w(tcg_env, regs, addr1, addr2);
2414     }
2415 
2416     /* Note that cas2w also assigned to env->cc_op.  */
2417     s->cc_op = CC_OP_CMPW;
2418     s->cc_op_synced = 1;
2419 }
2420 
2421 DISAS_INSN(cas2l)
2422 {
2423     uint16_t ext1, ext2;
2424     TCGv addr1, addr2, regs;
2425 
2426     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2427 
2428     ext1 = read_im16(env, s);
2429 
2430     if (ext1 & 0x8000) {
2431         /* Address Register */
2432         addr1 = AREG(ext1, 12);
2433     } else {
2434         /* Data Register */
2435         addr1 = DREG(ext1, 12);
2436     }
2437 
2438     ext2 = read_im16(env, s);
2439     if (ext2 & 0x8000) {
2440         /* Address Register */
2441         addr2 = AREG(ext2, 12);
2442     } else {
2443         /* Data Register */
2444         addr2 = DREG(ext2, 12);
2445     }
2446 
2447     /*
2448      * if (R1) == Dc1 && (R2) == Dc2 then
2449      *     (R1) = Du1
2450      *     (R2) = Du2
2451      * else
2452      *     Dc1 = (R1)
2453      *     Dc2 = (R2)
2454      */
2455 
2456     regs = tcg_constant_i32(REG(ext2, 6) |
2457                             (REG(ext1, 6) << 3) |
2458                             (REG(ext2, 0) << 6) |
2459                             (REG(ext1, 0) << 9));
2460     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2461         gen_helper_cas2l_parallel(tcg_env, regs, addr1, addr2);
2462     } else {
2463         gen_helper_cas2l(tcg_env, regs, addr1, addr2);
2464     }
2465 
2466     /* Note that cas2l also assigned to env->cc_op.  */
2467     s->cc_op = CC_OP_CMPL;
2468     s->cc_op_synced = 1;
2469 }
2470 
2471 DISAS_INSN(byterev)
2472 {
2473     TCGv reg;
2474 
2475     reg = DREG(insn, 0);
2476     tcg_gen_bswap32_i32(reg, reg);
2477 }
2478 
2479 DISAS_INSN(move)
2480 {
2481     TCGv src;
2482     TCGv dest;
2483     int op;
2484     int opsize;
2485 
2486     switch (insn >> 12) {
2487     case 1: /* move.b */
2488         opsize = OS_BYTE;
2489         break;
2490     case 2: /* move.l */
2491         opsize = OS_LONG;
2492         break;
2493     case 3: /* move.w */
2494         opsize = OS_WORD;
2495         break;
2496     default:
2497         abort();
2498     }
2499     SRC_EA(env, src, opsize, 1, NULL);
2500     op = (insn >> 6) & 7;
2501     if (op == 1) {
2502         /* movea */
2503         /* The value will already have been sign extended.  */
2504         dest = AREG(insn, 9);
2505         tcg_gen_mov_i32(dest, src);
2506     } else {
2507         /* normal move */
2508         uint16_t dest_ea;
2509         dest_ea = ((insn >> 9) & 7) | (op << 3);
2510         DEST_EA(env, dest_ea, opsize, src, NULL);
2511         /* This will be correct because loads sign extend.  */
2512         gen_logic_cc(s, src, opsize);
2513     }
2514 }
2515 
2516 DISAS_INSN(negx)
2517 {
2518     TCGv z;
2519     TCGv src;
2520     TCGv addr;
2521     int opsize;
2522 
2523     opsize = insn_opsize(insn);
2524     SRC_EA(env, src, opsize, 1, &addr);
2525 
2526     gen_flush_flags(s); /* compute old Z */
2527 
2528     /*
2529      * Perform subtract with borrow.
2530      * (X, N) =  -(src + X);
2531      */
2532 
2533     z = tcg_constant_i32(0);
2534     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2535     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2536     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2537 
2538     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2539 
2540     /*
2541      * Compute signed-overflow for negation.  The normal formula for
2542      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2543      * this simplifies to res & src.
2544      */
2545 
2546     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2547 
2548     /* Copy the rest of the results into place.  */
2549     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2550     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2551 
2552     set_cc_op(s, CC_OP_FLAGS);
2553 
2554     /* result is in QREG_CC_N */
2555 
2556     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2557 }
2558 
2559 DISAS_INSN(lea)
2560 {
2561     TCGv reg;
2562     TCGv tmp;
2563 
2564     reg = AREG(insn, 9);
2565     tmp = gen_lea(env, s, insn, OS_LONG);
2566     if (IS_NULL_QREG(tmp)) {
2567         gen_addr_fault(s);
2568         return;
2569     }
2570     tcg_gen_mov_i32(reg, tmp);
2571 }
2572 
2573 DISAS_INSN(clr)
2574 {
2575     int opsize;
2576     TCGv zero;
2577 
2578     zero = tcg_constant_i32(0);
2579     opsize = insn_opsize(insn);
2580     DEST_EA(env, insn, opsize, zero, NULL);
2581     gen_logic_cc(s, zero, opsize);
2582 }
2583 
2584 DISAS_INSN(move_from_ccr)
2585 {
2586     TCGv ccr;
2587 
2588     ccr = gen_get_ccr(s);
2589     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2590 }
2591 
2592 DISAS_INSN(neg)
2593 {
2594     TCGv src1;
2595     TCGv dest;
2596     TCGv addr;
2597     int opsize;
2598 
2599     opsize = insn_opsize(insn);
2600     SRC_EA(env, src1, opsize, 1, &addr);
2601     dest = tcg_temp_new();
2602     tcg_gen_neg_i32(dest, src1);
2603     set_cc_op(s, CC_OP_SUBB + opsize);
2604     gen_update_cc_add(dest, src1, opsize);
2605     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2606     DEST_EA(env, insn, opsize, dest, &addr);
2607 }
2608 
2609 DISAS_INSN(move_to_ccr)
2610 {
2611     gen_move_to_sr(env, s, insn, true);
2612 }
2613 
2614 DISAS_INSN(not)
2615 {
2616     TCGv src1;
2617     TCGv dest;
2618     TCGv addr;
2619     int opsize;
2620 
2621     opsize = insn_opsize(insn);
2622     SRC_EA(env, src1, opsize, 1, &addr);
2623     dest = tcg_temp_new();
2624     tcg_gen_not_i32(dest, src1);
2625     DEST_EA(env, insn, opsize, dest, &addr);
2626     gen_logic_cc(s, dest, opsize);
2627 }
2628 
2629 DISAS_INSN(swap)
2630 {
2631     TCGv src1;
2632     TCGv src2;
2633     TCGv reg;
2634 
2635     src1 = tcg_temp_new();
2636     src2 = tcg_temp_new();
2637     reg = DREG(insn, 0);
2638     tcg_gen_shli_i32(src1, reg, 16);
2639     tcg_gen_shri_i32(src2, reg, 16);
2640     tcg_gen_or_i32(reg, src1, src2);
2641     gen_logic_cc(s, reg, OS_LONG);
2642 }
2643 
2644 DISAS_INSN(bkpt)
2645 {
2646 #if defined(CONFIG_USER_ONLY)
2647     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2648 #else
2649     /* BKPT #0 is the alternate semihosting instruction. */
2650     if ((insn & 7) == 0 && semihosting_test(s)) {
2651         gen_exception(s, s->pc, EXCP_SEMIHOSTING);
2652         return;
2653     }
2654     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2655 #endif
2656 }
2657 
2658 DISAS_INSN(pea)
2659 {
2660     TCGv tmp;
2661 
2662     tmp = gen_lea(env, s, insn, OS_LONG);
2663     if (IS_NULL_QREG(tmp)) {
2664         gen_addr_fault(s);
2665         return;
2666     }
2667     gen_push(s, tmp);
2668 }
2669 
2670 DISAS_INSN(ext)
2671 {
2672     int op;
2673     TCGv reg;
2674     TCGv tmp;
2675 
2676     reg = DREG(insn, 0);
2677     op = (insn >> 6) & 7;
2678     tmp = tcg_temp_new();
2679     if (op == 3)
2680         tcg_gen_ext16s_i32(tmp, reg);
2681     else
2682         tcg_gen_ext8s_i32(tmp, reg);
2683     if (op == 2)
2684         gen_partset_reg(OS_WORD, reg, tmp);
2685     else
2686         tcg_gen_mov_i32(reg, tmp);
2687     gen_logic_cc(s, tmp, OS_LONG);
2688 }
2689 
2690 DISAS_INSN(tst)
2691 {
2692     int opsize;
2693     TCGv tmp;
2694 
2695     opsize = insn_opsize(insn);
2696     SRC_EA(env, tmp, opsize, 1, NULL);
2697     gen_logic_cc(s, tmp, opsize);
2698 }
2699 
2700 DISAS_INSN(pulse)
2701 {
2702   /* Implemented as a NOP.  */
2703 }
2704 
2705 DISAS_INSN(illegal)
2706 {
2707     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2708 }
2709 
2710 DISAS_INSN(tas)
2711 {
2712     int mode = extract32(insn, 3, 3);
2713     int reg0 = REG(insn, 0);
2714 
2715     if (mode == 0) {
2716         /* data register direct */
2717         TCGv dest = cpu_dregs[reg0];
2718         gen_logic_cc(s, dest, OS_BYTE);
2719         tcg_gen_ori_tl(dest, dest, 0x80);
2720     } else {
2721         TCGv src1, addr;
2722 
2723         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2724         if (IS_NULL_QREG(addr)) {
2725             gen_addr_fault(s);
2726             return;
2727         }
2728         src1 = tcg_temp_new();
2729         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2730                                    IS_USER(s), MO_SB);
2731         gen_logic_cc(s, src1, OS_BYTE);
2732 
2733         switch (mode) {
2734         case 3: /* Indirect postincrement.  */
2735             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2736             break;
2737         case 4: /* Indirect predecrememnt.  */
2738             tcg_gen_mov_i32(AREG(insn, 0), addr);
2739             break;
2740         }
2741     }
2742 }
2743 
2744 DISAS_INSN(mull)
2745 {
2746     uint16_t ext;
2747     TCGv src1;
2748     int sign;
2749 
2750     ext = read_im16(env, s);
2751 
2752     sign = ext & 0x800;
2753 
2754     if (ext & 0x400) {
2755         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2756             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2757             return;
2758         }
2759 
2760         SRC_EA(env, src1, OS_LONG, 0, NULL);
2761 
2762         if (sign) {
2763             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2764         } else {
2765             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2766         }
2767         /* if Dl == Dh, 68040 returns low word */
2768         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2769         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2770         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2771 
2772         tcg_gen_movi_i32(QREG_CC_V, 0);
2773         tcg_gen_movi_i32(QREG_CC_C, 0);
2774 
2775         set_cc_op(s, CC_OP_FLAGS);
2776         return;
2777     }
2778     SRC_EA(env, src1, OS_LONG, 0, NULL);
2779     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2780         tcg_gen_movi_i32(QREG_CC_C, 0);
2781         if (sign) {
2782             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2783             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2784             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2785             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2786                                    QREG_CC_V, QREG_CC_Z);
2787         } else {
2788             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2789             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2790             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2791                                    QREG_CC_V, QREG_CC_C);
2792         }
2793         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2794 
2795         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2796 
2797         set_cc_op(s, CC_OP_FLAGS);
2798     } else {
2799         /*
2800          * The upper 32 bits of the product are discarded, so
2801          * muls.l and mulu.l are functionally equivalent.
2802          */
2803         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2804         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2805     }
2806 }
2807 
2808 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2809 {
2810     TCGv reg;
2811     TCGv tmp;
2812 
2813     reg = AREG(insn, 0);
2814     tmp = tcg_temp_new();
2815     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2816     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2817     if ((insn & 7) != 7) {
2818         tcg_gen_mov_i32(reg, tmp);
2819     }
2820     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2821 }
2822 
2823 DISAS_INSN(link)
2824 {
2825     int16_t offset;
2826 
2827     offset = read_im16(env, s);
2828     gen_link(s, insn, offset);
2829 }
2830 
2831 DISAS_INSN(linkl)
2832 {
2833     int32_t offset;
2834 
2835     offset = read_im32(env, s);
2836     gen_link(s, insn, offset);
2837 }
2838 
2839 DISAS_INSN(unlk)
2840 {
2841     TCGv src;
2842     TCGv reg;
2843     TCGv tmp;
2844 
2845     src = tcg_temp_new();
2846     reg = AREG(insn, 0);
2847     tcg_gen_mov_i32(src, reg);
2848     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2849     tcg_gen_mov_i32(reg, tmp);
2850     tcg_gen_addi_i32(QREG_SP, src, 4);
2851 }
2852 
2853 #if !defined(CONFIG_USER_ONLY)
2854 DISAS_INSN(reset)
2855 {
2856     if (IS_USER(s)) {
2857         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2858         return;
2859     }
2860 
2861     gen_helper_reset(tcg_env);
2862 }
2863 #endif
2864 
2865 DISAS_INSN(nop)
2866 {
2867 }
2868 
2869 DISAS_INSN(rtd)
2870 {
2871     TCGv tmp;
2872     int16_t offset = read_im16(env, s);
2873 
2874     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2875     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2876     gen_jmp(s, tmp);
2877 }
2878 
2879 DISAS_INSN(rtr)
2880 {
2881     TCGv tmp;
2882     TCGv ccr;
2883     TCGv sp;
2884 
2885     sp = tcg_temp_new();
2886     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2887     tcg_gen_addi_i32(sp, QREG_SP, 2);
2888     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2889     tcg_gen_addi_i32(QREG_SP, sp, 4);
2890 
2891     gen_set_sr(s, ccr, true);
2892 
2893     gen_jmp(s, tmp);
2894 }
2895 
2896 DISAS_INSN(rts)
2897 {
2898     TCGv tmp;
2899 
2900     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2901     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2902     gen_jmp(s, tmp);
2903 }
2904 
2905 DISAS_INSN(jump)
2906 {
2907     TCGv tmp;
2908 
2909     /*
2910      * Load the target address first to ensure correct exception
2911      * behavior.
2912      */
2913     tmp = gen_lea(env, s, insn, OS_LONG);
2914     if (IS_NULL_QREG(tmp)) {
2915         gen_addr_fault(s);
2916         return;
2917     }
2918     if ((insn & 0x40) == 0) {
2919         /* jsr */
2920         gen_push(s, tcg_constant_i32(s->pc));
2921     }
2922     gen_jmp(s, tmp);
2923 }
2924 
2925 DISAS_INSN(addsubq)
2926 {
2927     TCGv src;
2928     TCGv dest;
2929     TCGv val;
2930     int imm;
2931     TCGv addr;
2932     int opsize;
2933 
2934     if ((insn & 070) == 010) {
2935         /* Operation on address register is always long.  */
2936         opsize = OS_LONG;
2937     } else {
2938         opsize = insn_opsize(insn);
2939     }
2940     SRC_EA(env, src, opsize, 1, &addr);
2941     imm = (insn >> 9) & 7;
2942     if (imm == 0) {
2943         imm = 8;
2944     }
2945     val = tcg_constant_i32(imm);
2946     dest = tcg_temp_new();
2947     tcg_gen_mov_i32(dest, src);
2948     if ((insn & 0x38) == 0x08) {
2949         /*
2950          * Don't update condition codes if the destination is an
2951          * address register.
2952          */
2953         if (insn & 0x0100) {
2954             tcg_gen_sub_i32(dest, dest, val);
2955         } else {
2956             tcg_gen_add_i32(dest, dest, val);
2957         }
2958     } else {
2959         if (insn & 0x0100) {
2960             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2961             tcg_gen_sub_i32(dest, dest, val);
2962             set_cc_op(s, CC_OP_SUBB + opsize);
2963         } else {
2964             tcg_gen_add_i32(dest, dest, val);
2965             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2966             set_cc_op(s, CC_OP_ADDB + opsize);
2967         }
2968         gen_update_cc_add(dest, val, opsize);
2969     }
2970     DEST_EA(env, insn, opsize, dest, &addr);
2971 }
2972 
2973 DISAS_INSN(branch)
2974 {
2975     int32_t offset;
2976     uint32_t base;
2977     int op;
2978 
2979     base = s->pc;
2980     op = (insn >> 8) & 0xf;
2981     offset = (int8_t)insn;
2982     if (offset == 0) {
2983         offset = (int16_t)read_im16(env, s);
2984     } else if (offset == -1) {
2985         offset = read_im32(env, s);
2986     }
2987     if (op == 1) {
2988         /* bsr */
2989         gen_push(s, tcg_constant_i32(s->pc));
2990     }
2991     if (op > 1) {
2992         /* Bcc */
2993         TCGLabel *l1 = gen_new_label();
2994         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2995         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
2996         gen_set_label(l1);
2997         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
2998     } else {
2999         /* Unconditional branch.  */
3000         update_cc_op(s);
3001         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
3002     }
3003 }
3004 
3005 DISAS_INSN(moveq)
3006 {
3007     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3008     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3009 }
3010 
3011 DISAS_INSN(mvzs)
3012 {
3013     int opsize;
3014     TCGv src;
3015     TCGv reg;
3016 
3017     if (insn & 0x40)
3018         opsize = OS_WORD;
3019     else
3020         opsize = OS_BYTE;
3021     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3022     reg = DREG(insn, 9);
3023     tcg_gen_mov_i32(reg, src);
3024     gen_logic_cc(s, src, opsize);
3025 }
3026 
3027 DISAS_INSN(or)
3028 {
3029     TCGv reg;
3030     TCGv dest;
3031     TCGv src;
3032     TCGv addr;
3033     int opsize;
3034 
3035     opsize = insn_opsize(insn);
3036     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3037     dest = tcg_temp_new();
3038     if (insn & 0x100) {
3039         SRC_EA(env, src, opsize, 0, &addr);
3040         tcg_gen_or_i32(dest, src, reg);
3041         DEST_EA(env, insn, opsize, dest, &addr);
3042     } else {
3043         SRC_EA(env, src, opsize, 0, NULL);
3044         tcg_gen_or_i32(dest, src, reg);
3045         gen_partset_reg(opsize, DREG(insn, 9), dest);
3046     }
3047     gen_logic_cc(s, dest, opsize);
3048 }
3049 
3050 DISAS_INSN(suba)
3051 {
3052     TCGv src;
3053     TCGv reg;
3054 
3055     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3056     reg = AREG(insn, 9);
3057     tcg_gen_sub_i32(reg, reg, src);
3058 }
3059 
3060 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3061 {
3062     TCGv tmp, zero;
3063 
3064     gen_flush_flags(s); /* compute old Z */
3065 
3066     /*
3067      * Perform subtract with borrow.
3068      * (X, N) = dest - (src + X);
3069      */
3070 
3071     zero = tcg_constant_i32(0);
3072     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3073     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3074     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3075     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3076 
3077     /* Compute signed-overflow for subtract.  */
3078 
3079     tmp = tcg_temp_new();
3080     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3081     tcg_gen_xor_i32(tmp, dest, src);
3082     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3083 
3084     /* Copy the rest of the results into place.  */
3085     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3086     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3087 
3088     set_cc_op(s, CC_OP_FLAGS);
3089 
3090     /* result is in QREG_CC_N */
3091 }
3092 
3093 DISAS_INSN(subx_reg)
3094 {
3095     TCGv dest;
3096     TCGv src;
3097     int opsize;
3098 
3099     opsize = insn_opsize(insn);
3100 
3101     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3102     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3103 
3104     gen_subx(s, src, dest, opsize);
3105 
3106     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3107 }
3108 
3109 DISAS_INSN(subx_mem)
3110 {
3111     TCGv src;
3112     TCGv addr_src;
3113     TCGv dest;
3114     TCGv addr_dest;
3115     int opsize;
3116 
3117     opsize = insn_opsize(insn);
3118 
3119     addr_src = AREG(insn, 0);
3120     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3121     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3122 
3123     addr_dest = AREG(insn, 9);
3124     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3125     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3126 
3127     gen_subx(s, src, dest, opsize);
3128 
3129     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3130 }
3131 
3132 DISAS_INSN(mov3q)
3133 {
3134     TCGv src;
3135     int val;
3136 
3137     val = (insn >> 9) & 7;
3138     if (val == 0) {
3139         val = -1;
3140     }
3141     src = tcg_constant_i32(val);
3142     gen_logic_cc(s, src, OS_LONG);
3143     DEST_EA(env, insn, OS_LONG, src, NULL);
3144 }
3145 
3146 DISAS_INSN(cmp)
3147 {
3148     TCGv src;
3149     TCGv reg;
3150     int opsize;
3151 
3152     opsize = insn_opsize(insn);
3153     SRC_EA(env, src, opsize, 1, NULL);
3154     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3155     gen_update_cc_cmp(s, reg, src, opsize);
3156 }
3157 
3158 DISAS_INSN(cmpa)
3159 {
3160     int opsize;
3161     TCGv src;
3162     TCGv reg;
3163 
3164     if (insn & 0x100) {
3165         opsize = OS_LONG;
3166     } else {
3167         opsize = OS_WORD;
3168     }
3169     SRC_EA(env, src, opsize, 1, NULL);
3170     reg = AREG(insn, 9);
3171     gen_update_cc_cmp(s, reg, src, OS_LONG);
3172 }
3173 
3174 DISAS_INSN(cmpm)
3175 {
3176     int opsize = insn_opsize(insn);
3177     TCGv src, dst;
3178 
3179     /* Post-increment load (mode 3) from Ay.  */
3180     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3181                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3182     /* Post-increment load (mode 3) from Ax.  */
3183     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3184                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3185 
3186     gen_update_cc_cmp(s, dst, src, opsize);
3187 }
3188 
3189 DISAS_INSN(eor)
3190 {
3191     TCGv src;
3192     TCGv dest;
3193     TCGv addr;
3194     int opsize;
3195 
3196     opsize = insn_opsize(insn);
3197 
3198     SRC_EA(env, src, opsize, 0, &addr);
3199     dest = tcg_temp_new();
3200     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3201     gen_logic_cc(s, dest, opsize);
3202     DEST_EA(env, insn, opsize, dest, &addr);
3203 }
3204 
3205 static void do_exg(TCGv reg1, TCGv reg2)
3206 {
3207     TCGv temp = tcg_temp_new();
3208     tcg_gen_mov_i32(temp, reg1);
3209     tcg_gen_mov_i32(reg1, reg2);
3210     tcg_gen_mov_i32(reg2, temp);
3211 }
3212 
3213 DISAS_INSN(exg_dd)
3214 {
3215     /* exchange Dx and Dy */
3216     do_exg(DREG(insn, 9), DREG(insn, 0));
3217 }
3218 
3219 DISAS_INSN(exg_aa)
3220 {
3221     /* exchange Ax and Ay */
3222     do_exg(AREG(insn, 9), AREG(insn, 0));
3223 }
3224 
3225 DISAS_INSN(exg_da)
3226 {
3227     /* exchange Dx and Ay */
3228     do_exg(DREG(insn, 9), AREG(insn, 0));
3229 }
3230 
3231 DISAS_INSN(and)
3232 {
3233     TCGv src;
3234     TCGv reg;
3235     TCGv dest;
3236     TCGv addr;
3237     int opsize;
3238 
3239     dest = tcg_temp_new();
3240 
3241     opsize = insn_opsize(insn);
3242     reg = DREG(insn, 9);
3243     if (insn & 0x100) {
3244         SRC_EA(env, src, opsize, 0, &addr);
3245         tcg_gen_and_i32(dest, src, reg);
3246         DEST_EA(env, insn, opsize, dest, &addr);
3247     } else {
3248         SRC_EA(env, src, opsize, 0, NULL);
3249         tcg_gen_and_i32(dest, src, reg);
3250         gen_partset_reg(opsize, reg, dest);
3251     }
3252     gen_logic_cc(s, dest, opsize);
3253 }
3254 
3255 DISAS_INSN(adda)
3256 {
3257     TCGv src;
3258     TCGv reg;
3259 
3260     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3261     reg = AREG(insn, 9);
3262     tcg_gen_add_i32(reg, reg, src);
3263 }
3264 
3265 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3266 {
3267     TCGv tmp, zero;
3268 
3269     gen_flush_flags(s); /* compute old Z */
3270 
3271     /*
3272      * Perform addition with carry.
3273      * (X, N) = src + dest + X;
3274      */
3275 
3276     zero = tcg_constant_i32(0);
3277     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3278     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3279     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3280 
3281     /* Compute signed-overflow for addition.  */
3282 
3283     tmp = tcg_temp_new();
3284     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3285     tcg_gen_xor_i32(tmp, dest, src);
3286     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3287 
3288     /* Copy the rest of the results into place.  */
3289     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3290     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3291 
3292     set_cc_op(s, CC_OP_FLAGS);
3293 
3294     /* result is in QREG_CC_N */
3295 }
3296 
3297 DISAS_INSN(addx_reg)
3298 {
3299     TCGv dest;
3300     TCGv src;
3301     int opsize;
3302 
3303     opsize = insn_opsize(insn);
3304 
3305     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3306     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3307 
3308     gen_addx(s, src, dest, opsize);
3309 
3310     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3311 }
3312 
3313 DISAS_INSN(addx_mem)
3314 {
3315     TCGv src;
3316     TCGv addr_src;
3317     TCGv dest;
3318     TCGv addr_dest;
3319     int opsize;
3320 
3321     opsize = insn_opsize(insn);
3322 
3323     addr_src = AREG(insn, 0);
3324     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3325     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3326 
3327     addr_dest = AREG(insn, 9);
3328     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3329     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3330 
3331     gen_addx(s, src, dest, opsize);
3332 
3333     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3334 }
3335 
3336 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3337 {
3338     int count = (insn >> 9) & 7;
3339     int logical = insn & 8;
3340     int left = insn & 0x100;
3341     int bits = opsize_bytes(opsize) * 8;
3342     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3343 
3344     if (count == 0) {
3345         count = 8;
3346     }
3347 
3348     tcg_gen_movi_i32(QREG_CC_V, 0);
3349     if (left) {
3350         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3351         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3352 
3353         /*
3354          * Note that ColdFire always clears V (done above),
3355          * while M68000 sets if the most significant bit is changed at
3356          * any time during the shift operation.
3357          */
3358         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3359             /* if shift count >= bits, V is (reg != 0) */
3360             if (count >= bits) {
3361                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3362             } else {
3363                 TCGv t0 = tcg_temp_new();
3364                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3365                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3366                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3367             }
3368         }
3369     } else {
3370         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3371         if (logical) {
3372             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3373         } else {
3374             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3375         }
3376     }
3377 
3378     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3379     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3380     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3381     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3382 
3383     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3384     set_cc_op(s, CC_OP_FLAGS);
3385 }
3386 
3387 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3388 {
3389     int logical = insn & 8;
3390     int left = insn & 0x100;
3391     int bits = opsize_bytes(opsize) * 8;
3392     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3393     TCGv s32;
3394     TCGv_i64 t64, s64;
3395 
3396     t64 = tcg_temp_new_i64();
3397     s64 = tcg_temp_new_i64();
3398     s32 = tcg_temp_new();
3399 
3400     /*
3401      * Note that m68k truncates the shift count modulo 64, not 32.
3402      * In addition, a 64-bit shift makes it easy to find "the last
3403      * bit shifted out", for the carry flag.
3404      */
3405     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3406     tcg_gen_extu_i32_i64(s64, s32);
3407     tcg_gen_extu_i32_i64(t64, reg);
3408 
3409     /* Optimistically set V=0.  Also used as a zero source below.  */
3410     tcg_gen_movi_i32(QREG_CC_V, 0);
3411     if (left) {
3412         tcg_gen_shl_i64(t64, t64, s64);
3413 
3414         if (opsize == OS_LONG) {
3415             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3416             /* Note that C=0 if shift count is 0, and we get that for free.  */
3417         } else {
3418             TCGv zero = tcg_constant_i32(0);
3419             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3420             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3421             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3422                                 s32, zero, zero, QREG_CC_C);
3423         }
3424         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3425 
3426         /* X = C, but only if the shift count was non-zero.  */
3427         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3428                             QREG_CC_C, QREG_CC_X);
3429 
3430         /*
3431          * M68000 sets V if the most significant bit is changed at
3432          * any time during the shift operation.  Do this via creating
3433          * an extension of the sign bit, comparing, and discarding
3434          * the bits below the sign bit.  I.e.
3435          *     int64_t s = (intN_t)reg;
3436          *     int64_t t = (int64_t)(intN_t)reg << count;
3437          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3438          */
3439         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3440             TCGv_i64 tt = tcg_constant_i64(32);
3441             /* if shift is greater than 32, use 32 */
3442             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3443             /* Sign extend the input to 64 bits; re-do the shift.  */
3444             tcg_gen_ext_i32_i64(t64, reg);
3445             tcg_gen_shl_i64(s64, t64, s64);
3446             /* Clear all bits that are unchanged.  */
3447             tcg_gen_xor_i64(t64, t64, s64);
3448             /* Ignore the bits below the sign bit.  */
3449             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3450             /* If any bits remain set, we have overflow.  */
3451             tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
3452             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3453         }
3454     } else {
3455         tcg_gen_shli_i64(t64, t64, 32);
3456         if (logical) {
3457             tcg_gen_shr_i64(t64, t64, s64);
3458         } else {
3459             tcg_gen_sar_i64(t64, t64, s64);
3460         }
3461         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3462 
3463         /* Note that C=0 if shift count is 0, and we get that for free.  */
3464         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3465 
3466         /* X = C, but only if the shift count was non-zero.  */
3467         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3468                             QREG_CC_C, QREG_CC_X);
3469     }
3470     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3471     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3472 
3473     /* Write back the result.  */
3474     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3475     set_cc_op(s, CC_OP_FLAGS);
3476 }
3477 
3478 DISAS_INSN(shift8_im)
3479 {
3480     shift_im(s, insn, OS_BYTE);
3481 }
3482 
3483 DISAS_INSN(shift16_im)
3484 {
3485     shift_im(s, insn, OS_WORD);
3486 }
3487 
3488 DISAS_INSN(shift_im)
3489 {
3490     shift_im(s, insn, OS_LONG);
3491 }
3492 
3493 DISAS_INSN(shift8_reg)
3494 {
3495     shift_reg(s, insn, OS_BYTE);
3496 }
3497 
3498 DISAS_INSN(shift16_reg)
3499 {
3500     shift_reg(s, insn, OS_WORD);
3501 }
3502 
3503 DISAS_INSN(shift_reg)
3504 {
3505     shift_reg(s, insn, OS_LONG);
3506 }
3507 
3508 DISAS_INSN(shift_mem)
3509 {
3510     int logical = insn & 8;
3511     int left = insn & 0x100;
3512     TCGv src;
3513     TCGv addr;
3514 
3515     SRC_EA(env, src, OS_WORD, !logical, &addr);
3516     tcg_gen_movi_i32(QREG_CC_V, 0);
3517     if (left) {
3518         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3519         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3520 
3521         /*
3522          * Note that ColdFire always clears V,
3523          * while M68000 sets if the most significant bit is changed at
3524          * any time during the shift operation
3525          */
3526         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3527             src = gen_extend(s, src, OS_WORD, 1);
3528             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3529         }
3530     } else {
3531         tcg_gen_mov_i32(QREG_CC_C, src);
3532         if (logical) {
3533             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3534         } else {
3535             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3536         }
3537     }
3538 
3539     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3540     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3541     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3542     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3543 
3544     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3545     set_cc_op(s, CC_OP_FLAGS);
3546 }
3547 
3548 static void rotate(TCGv reg, TCGv shift, int left, int size)
3549 {
3550     switch (size) {
3551     case 8:
3552         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3553         tcg_gen_ext8u_i32(reg, reg);
3554         tcg_gen_muli_i32(reg, reg, 0x01010101);
3555         goto do_long;
3556     case 16:
3557         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3558         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3559         goto do_long;
3560     do_long:
3561     default:
3562         if (left) {
3563             tcg_gen_rotl_i32(reg, reg, shift);
3564         } else {
3565             tcg_gen_rotr_i32(reg, reg, shift);
3566         }
3567     }
3568 
3569     /* compute flags */
3570 
3571     switch (size) {
3572     case 8:
3573         tcg_gen_ext8s_i32(reg, reg);
3574         break;
3575     case 16:
3576         tcg_gen_ext16s_i32(reg, reg);
3577         break;
3578     default:
3579         break;
3580     }
3581 
3582     /* QREG_CC_X is not affected */
3583 
3584     tcg_gen_mov_i32(QREG_CC_N, reg);
3585     tcg_gen_mov_i32(QREG_CC_Z, reg);
3586 
3587     if (left) {
3588         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3589     } else {
3590         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3591     }
3592 
3593     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3594 }
3595 
3596 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3597 {
3598     switch (size) {
3599     case 8:
3600         tcg_gen_ext8s_i32(reg, reg);
3601         break;
3602     case 16:
3603         tcg_gen_ext16s_i32(reg, reg);
3604         break;
3605     default:
3606         break;
3607     }
3608     tcg_gen_mov_i32(QREG_CC_N, reg);
3609     tcg_gen_mov_i32(QREG_CC_Z, reg);
3610     tcg_gen_mov_i32(QREG_CC_X, X);
3611     tcg_gen_mov_i32(QREG_CC_C, X);
3612     tcg_gen_movi_i32(QREG_CC_V, 0);
3613 }
3614 
3615 /* Result of rotate_x() is valid if 0 <= shift <= size */
3616 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3617 {
3618     TCGv X, shl, shr, shx, sz, zero;
3619 
3620     sz = tcg_constant_i32(size);
3621 
3622     shr = tcg_temp_new();
3623     shl = tcg_temp_new();
3624     shx = tcg_temp_new();
3625     if (left) {
3626         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3627         tcg_gen_movi_i32(shr, size + 1);
3628         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3629         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3630         /* shx = shx < 0 ? size : shx; */
3631         zero = tcg_constant_i32(0);
3632         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3633     } else {
3634         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3635         tcg_gen_movi_i32(shl, size + 1);
3636         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3637         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3638     }
3639 
3640     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3641 
3642     tcg_gen_shl_i32(shl, reg, shl);
3643     tcg_gen_shr_i32(shr, reg, shr);
3644     tcg_gen_or_i32(reg, shl, shr);
3645     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3646     tcg_gen_or_i32(reg, reg, shx);
3647 
3648     /* X = (reg >> size) & 1 */
3649 
3650     X = tcg_temp_new();
3651     tcg_gen_extract_i32(X, reg, size, 1);
3652 
3653     return X;
3654 }
3655 
3656 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3657 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3658 {
3659     TCGv_i64 t0, shift64;
3660     TCGv X, lo, hi, zero;
3661 
3662     shift64 = tcg_temp_new_i64();
3663     tcg_gen_extu_i32_i64(shift64, shift);
3664 
3665     t0 = tcg_temp_new_i64();
3666 
3667     X = tcg_temp_new();
3668     lo = tcg_temp_new();
3669     hi = tcg_temp_new();
3670 
3671     if (left) {
3672         /* create [reg:X:..] */
3673 
3674         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3675         tcg_gen_concat_i32_i64(t0, lo, reg);
3676 
3677         /* rotate */
3678 
3679         tcg_gen_rotl_i64(t0, t0, shift64);
3680 
3681         /* result is [reg:..:reg:X] */
3682 
3683         tcg_gen_extr_i64_i32(lo, hi, t0);
3684         tcg_gen_andi_i32(X, lo, 1);
3685 
3686         tcg_gen_shri_i32(lo, lo, 1);
3687     } else {
3688         /* create [..:X:reg] */
3689 
3690         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3691 
3692         tcg_gen_rotr_i64(t0, t0, shift64);
3693 
3694         /* result is value: [X:reg:..:reg] */
3695 
3696         tcg_gen_extr_i64_i32(lo, hi, t0);
3697 
3698         /* extract X */
3699 
3700         tcg_gen_shri_i32(X, hi, 31);
3701 
3702         /* extract result */
3703 
3704         tcg_gen_shli_i32(hi, hi, 1);
3705     }
3706     tcg_gen_or_i32(lo, lo, hi);
3707 
3708     /* if shift == 0, register and X are not affected */
3709 
3710     zero = tcg_constant_i32(0);
3711     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3712     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3713 
3714     return X;
3715 }
3716 
3717 DISAS_INSN(rotate_im)
3718 {
3719     TCGv shift;
3720     int tmp;
3721     int left = (insn & 0x100);
3722 
3723     tmp = (insn >> 9) & 7;
3724     if (tmp == 0) {
3725         tmp = 8;
3726     }
3727 
3728     shift = tcg_constant_i32(tmp);
3729     if (insn & 8) {
3730         rotate(DREG(insn, 0), shift, left, 32);
3731     } else {
3732         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3733         rotate_x_flags(DREG(insn, 0), X, 32);
3734     }
3735 
3736     set_cc_op(s, CC_OP_FLAGS);
3737 }
3738 
3739 DISAS_INSN(rotate8_im)
3740 {
3741     int left = (insn & 0x100);
3742     TCGv reg;
3743     TCGv shift;
3744     int tmp;
3745 
3746     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3747 
3748     tmp = (insn >> 9) & 7;
3749     if (tmp == 0) {
3750         tmp = 8;
3751     }
3752 
3753     shift = tcg_constant_i32(tmp);
3754     if (insn & 8) {
3755         rotate(reg, shift, left, 8);
3756     } else {
3757         TCGv X = rotate_x(reg, shift, left, 8);
3758         rotate_x_flags(reg, X, 8);
3759     }
3760     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3761     set_cc_op(s, CC_OP_FLAGS);
3762 }
3763 
3764 DISAS_INSN(rotate16_im)
3765 {
3766     int left = (insn & 0x100);
3767     TCGv reg;
3768     TCGv shift;
3769     int tmp;
3770 
3771     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3772     tmp = (insn >> 9) & 7;
3773     if (tmp == 0) {
3774         tmp = 8;
3775     }
3776 
3777     shift = tcg_constant_i32(tmp);
3778     if (insn & 8) {
3779         rotate(reg, shift, left, 16);
3780     } else {
3781         TCGv X = rotate_x(reg, shift, left, 16);
3782         rotate_x_flags(reg, X, 16);
3783     }
3784     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3785     set_cc_op(s, CC_OP_FLAGS);
3786 }
3787 
3788 DISAS_INSN(rotate_reg)
3789 {
3790     TCGv reg;
3791     TCGv src;
3792     TCGv t0, t1;
3793     int left = (insn & 0x100);
3794 
3795     reg = DREG(insn, 0);
3796     src = DREG(insn, 9);
3797     /* shift in [0..63] */
3798     t0 = tcg_temp_new();
3799     tcg_gen_andi_i32(t0, src, 63);
3800     t1 = tcg_temp_new_i32();
3801     if (insn & 8) {
3802         tcg_gen_andi_i32(t1, src, 31);
3803         rotate(reg, t1, left, 32);
3804         /* if shift == 0, clear C */
3805         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3806                             t0, QREG_CC_V /* 0 */,
3807                             QREG_CC_V /* 0 */, QREG_CC_C);
3808     } else {
3809         TCGv X;
3810         /* modulo 33 */
3811         tcg_gen_movi_i32(t1, 33);
3812         tcg_gen_remu_i32(t1, t0, t1);
3813         X = rotate32_x(DREG(insn, 0), t1, left);
3814         rotate_x_flags(DREG(insn, 0), X, 32);
3815     }
3816     set_cc_op(s, CC_OP_FLAGS);
3817 }
3818 
3819 DISAS_INSN(rotate8_reg)
3820 {
3821     TCGv reg;
3822     TCGv src;
3823     TCGv t0, t1;
3824     int left = (insn & 0x100);
3825 
3826     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3827     src = DREG(insn, 9);
3828     /* shift in [0..63] */
3829     t0 = tcg_temp_new_i32();
3830     tcg_gen_andi_i32(t0, src, 63);
3831     t1 = tcg_temp_new_i32();
3832     if (insn & 8) {
3833         tcg_gen_andi_i32(t1, src, 7);
3834         rotate(reg, t1, left, 8);
3835         /* if shift == 0, clear C */
3836         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3837                             t0, QREG_CC_V /* 0 */,
3838                             QREG_CC_V /* 0 */, QREG_CC_C);
3839     } else {
3840         TCGv X;
3841         /* modulo 9 */
3842         tcg_gen_movi_i32(t1, 9);
3843         tcg_gen_remu_i32(t1, t0, t1);
3844         X = rotate_x(reg, t1, left, 8);
3845         rotate_x_flags(reg, X, 8);
3846     }
3847     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3848     set_cc_op(s, CC_OP_FLAGS);
3849 }
3850 
3851 DISAS_INSN(rotate16_reg)
3852 {
3853     TCGv reg;
3854     TCGv src;
3855     TCGv t0, t1;
3856     int left = (insn & 0x100);
3857 
3858     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3859     src = DREG(insn, 9);
3860     /* shift in [0..63] */
3861     t0 = tcg_temp_new_i32();
3862     tcg_gen_andi_i32(t0, src, 63);
3863     t1 = tcg_temp_new_i32();
3864     if (insn & 8) {
3865         tcg_gen_andi_i32(t1, src, 15);
3866         rotate(reg, t1, left, 16);
3867         /* if shift == 0, clear C */
3868         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3869                             t0, QREG_CC_V /* 0 */,
3870                             QREG_CC_V /* 0 */, QREG_CC_C);
3871     } else {
3872         TCGv X;
3873         /* modulo 17 */
3874         tcg_gen_movi_i32(t1, 17);
3875         tcg_gen_remu_i32(t1, t0, t1);
3876         X = rotate_x(reg, t1, left, 16);
3877         rotate_x_flags(reg, X, 16);
3878     }
3879     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3880     set_cc_op(s, CC_OP_FLAGS);
3881 }
3882 
3883 DISAS_INSN(rotate_mem)
3884 {
3885     TCGv src;
3886     TCGv addr;
3887     TCGv shift;
3888     int left = (insn & 0x100);
3889 
3890     SRC_EA(env, src, OS_WORD, 0, &addr);
3891 
3892     shift = tcg_constant_i32(1);
3893     if (insn & 0x0200) {
3894         rotate(src, shift, left, 16);
3895     } else {
3896         TCGv X = rotate_x(src, shift, left, 16);
3897         rotate_x_flags(src, X, 16);
3898     }
3899     DEST_EA(env, insn, OS_WORD, src, &addr);
3900     set_cc_op(s, CC_OP_FLAGS);
3901 }
3902 
3903 DISAS_INSN(bfext_reg)
3904 {
3905     int ext = read_im16(env, s);
3906     int is_sign = insn & 0x200;
3907     TCGv src = DREG(insn, 0);
3908     TCGv dst = DREG(ext, 12);
3909     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3910     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3911     int pos = 32 - ofs - len;        /* little bit-endian */
3912     TCGv tmp = tcg_temp_new();
3913     TCGv shift;
3914 
3915     /*
3916      * In general, we're going to rotate the field so that it's at the
3917      * top of the word and then right-shift by the complement of the
3918      * width to extend the field.
3919      */
3920     if (ext & 0x20) {
3921         /* Variable width.  */
3922         if (ext & 0x800) {
3923             /* Variable offset.  */
3924             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3925             tcg_gen_rotl_i32(tmp, src, tmp);
3926         } else {
3927             tcg_gen_rotli_i32(tmp, src, ofs);
3928         }
3929 
3930         shift = tcg_temp_new();
3931         tcg_gen_neg_i32(shift, DREG(ext, 0));
3932         tcg_gen_andi_i32(shift, shift, 31);
3933         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3934         if (is_sign) {
3935             tcg_gen_mov_i32(dst, QREG_CC_N);
3936         } else {
3937             tcg_gen_shr_i32(dst, tmp, shift);
3938         }
3939     } else {
3940         /* Immediate width.  */
3941         if (ext & 0x800) {
3942             /* Variable offset */
3943             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3944             tcg_gen_rotl_i32(tmp, src, tmp);
3945             src = tmp;
3946             pos = 32 - len;
3947         } else {
3948             /*
3949              * Immediate offset.  If the field doesn't wrap around the
3950              * end of the word, rely on (s)extract completely.
3951              */
3952             if (pos < 0) {
3953                 tcg_gen_rotli_i32(tmp, src, ofs);
3954                 src = tmp;
3955                 pos = 32 - len;
3956             }
3957         }
3958 
3959         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3960         if (is_sign) {
3961             tcg_gen_mov_i32(dst, QREG_CC_N);
3962         } else {
3963             tcg_gen_extract_i32(dst, src, pos, len);
3964         }
3965     }
3966 
3967     set_cc_op(s, CC_OP_LOGIC);
3968 }
3969 
3970 DISAS_INSN(bfext_mem)
3971 {
3972     int ext = read_im16(env, s);
3973     int is_sign = insn & 0x200;
3974     TCGv dest = DREG(ext, 12);
3975     TCGv addr, len, ofs;
3976 
3977     addr = gen_lea(env, s, insn, OS_UNSIZED);
3978     if (IS_NULL_QREG(addr)) {
3979         gen_addr_fault(s);
3980         return;
3981     }
3982 
3983     if (ext & 0x20) {
3984         len = DREG(ext, 0);
3985     } else {
3986         len = tcg_constant_i32(extract32(ext, 0, 5));
3987     }
3988     if (ext & 0x800) {
3989         ofs = DREG(ext, 6);
3990     } else {
3991         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3992     }
3993 
3994     if (is_sign) {
3995         gen_helper_bfexts_mem(dest, tcg_env, addr, ofs, len);
3996         tcg_gen_mov_i32(QREG_CC_N, dest);
3997     } else {
3998         TCGv_i64 tmp = tcg_temp_new_i64();
3999         gen_helper_bfextu_mem(tmp, tcg_env, addr, ofs, len);
4000         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4001     }
4002     set_cc_op(s, CC_OP_LOGIC);
4003 }
4004 
4005 DISAS_INSN(bfop_reg)
4006 {
4007     int ext = read_im16(env, s);
4008     TCGv src = DREG(insn, 0);
4009     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4010     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4011     TCGv mask, tofs = NULL, tlen = NULL;
4012     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4013 
4014     if ((ext & 0x820) == 0) {
4015         /* Immediate width and offset.  */
4016         uint32_t maski = 0x7fffffffu >> (len - 1);
4017         if (ofs + len <= 32) {
4018             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4019         } else {
4020             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4021         }
4022         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4023 
4024         mask = tcg_constant_i32(ror32(maski, ofs));
4025         if (is_bfffo) {
4026             tofs = tcg_constant_i32(ofs);
4027             tlen = tcg_constant_i32(len);
4028         }
4029     } else {
4030         TCGv tmp = tcg_temp_new();
4031 
4032         mask = tcg_temp_new();
4033         if (ext & 0x20) {
4034             /* Variable width */
4035             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4036             tcg_gen_andi_i32(tmp, tmp, 31);
4037             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4038             if (is_bfffo) {
4039                 tlen = tcg_temp_new();
4040                 tcg_gen_addi_i32(tlen, tmp, 1);
4041             }
4042         } else {
4043             /* Immediate width */
4044             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4045             if (is_bfffo) {
4046                 tlen = tcg_constant_i32(len);
4047             }
4048         }
4049 
4050         if (ext & 0x800) {
4051             /* Variable offset */
4052             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4053             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4054             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4055             tcg_gen_rotr_i32(mask, mask, tmp);
4056             if (is_bfffo) {
4057                 tofs = tmp;
4058             }
4059         } else {
4060             /* Immediate offset (and variable width) */
4061             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4062             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4063             tcg_gen_rotri_i32(mask, mask, ofs);
4064             if (is_bfffo) {
4065                 tofs = tcg_constant_i32(ofs);
4066             }
4067         }
4068     }
4069     set_cc_op(s, CC_OP_LOGIC);
4070 
4071     switch (insn & 0x0f00) {
4072     case 0x0a00: /* bfchg */
4073         tcg_gen_eqv_i32(src, src, mask);
4074         break;
4075     case 0x0c00: /* bfclr */
4076         tcg_gen_and_i32(src, src, mask);
4077         break;
4078     case 0x0d00: /* bfffo */
4079         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4080         break;
4081     case 0x0e00: /* bfset */
4082         tcg_gen_orc_i32(src, src, mask);
4083         break;
4084     case 0x0800: /* bftst */
4085         /* flags already set; no other work to do.  */
4086         break;
4087     default:
4088         g_assert_not_reached();
4089     }
4090 }
4091 
4092 DISAS_INSN(bfop_mem)
4093 {
4094     int ext = read_im16(env, s);
4095     TCGv addr, len, ofs;
4096     TCGv_i64 t64;
4097 
4098     addr = gen_lea(env, s, insn, OS_UNSIZED);
4099     if (IS_NULL_QREG(addr)) {
4100         gen_addr_fault(s);
4101         return;
4102     }
4103 
4104     if (ext & 0x20) {
4105         len = DREG(ext, 0);
4106     } else {
4107         len = tcg_constant_i32(extract32(ext, 0, 5));
4108     }
4109     if (ext & 0x800) {
4110         ofs = DREG(ext, 6);
4111     } else {
4112         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4113     }
4114 
4115     switch (insn & 0x0f00) {
4116     case 0x0a00: /* bfchg */
4117         gen_helper_bfchg_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4118         break;
4119     case 0x0c00: /* bfclr */
4120         gen_helper_bfclr_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4121         break;
4122     case 0x0d00: /* bfffo */
4123         t64 = tcg_temp_new_i64();
4124         gen_helper_bfffo_mem(t64, tcg_env, addr, ofs, len);
4125         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4126         break;
4127     case 0x0e00: /* bfset */
4128         gen_helper_bfset_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4129         break;
4130     case 0x0800: /* bftst */
4131         gen_helper_bfexts_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4132         break;
4133     default:
4134         g_assert_not_reached();
4135     }
4136     set_cc_op(s, CC_OP_LOGIC);
4137 }
4138 
4139 DISAS_INSN(bfins_reg)
4140 {
4141     int ext = read_im16(env, s);
4142     TCGv dst = DREG(insn, 0);
4143     TCGv src = DREG(ext, 12);
4144     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4145     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4146     int pos = 32 - ofs - len;        /* little bit-endian */
4147     TCGv tmp;
4148 
4149     tmp = tcg_temp_new();
4150 
4151     if (ext & 0x20) {
4152         /* Variable width */
4153         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4154         tcg_gen_andi_i32(tmp, tmp, 31);
4155         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4156     } else {
4157         /* Immediate width */
4158         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4159     }
4160     set_cc_op(s, CC_OP_LOGIC);
4161 
4162     /* Immediate width and offset */
4163     if ((ext & 0x820) == 0) {
4164         /* Check for suitability for deposit.  */
4165         if (pos >= 0) {
4166             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4167         } else {
4168             uint32_t maski = -2U << (len - 1);
4169             uint32_t roti = (ofs + len) & 31;
4170             tcg_gen_andi_i32(tmp, src, ~maski);
4171             tcg_gen_rotri_i32(tmp, tmp, roti);
4172             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4173             tcg_gen_or_i32(dst, dst, tmp);
4174         }
4175     } else {
4176         TCGv mask = tcg_temp_new();
4177         TCGv rot = tcg_temp_new();
4178 
4179         if (ext & 0x20) {
4180             /* Variable width */
4181             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4182             tcg_gen_andi_i32(rot, rot, 31);
4183             tcg_gen_movi_i32(mask, -2);
4184             tcg_gen_shl_i32(mask, mask, rot);
4185             tcg_gen_mov_i32(rot, DREG(ext, 0));
4186             tcg_gen_andc_i32(tmp, src, mask);
4187         } else {
4188             /* Immediate width (variable offset) */
4189             uint32_t maski = -2U << (len - 1);
4190             tcg_gen_andi_i32(tmp, src, ~maski);
4191             tcg_gen_movi_i32(mask, maski);
4192             tcg_gen_movi_i32(rot, len & 31);
4193         }
4194         if (ext & 0x800) {
4195             /* Variable offset */
4196             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4197         } else {
4198             /* Immediate offset (variable width) */
4199             tcg_gen_addi_i32(rot, rot, ofs);
4200         }
4201         tcg_gen_andi_i32(rot, rot, 31);
4202         tcg_gen_rotr_i32(mask, mask, rot);
4203         tcg_gen_rotr_i32(tmp, tmp, rot);
4204         tcg_gen_and_i32(dst, dst, mask);
4205         tcg_gen_or_i32(dst, dst, tmp);
4206     }
4207 }
4208 
4209 DISAS_INSN(bfins_mem)
4210 {
4211     int ext = read_im16(env, s);
4212     TCGv src = DREG(ext, 12);
4213     TCGv addr, len, ofs;
4214 
4215     addr = gen_lea(env, s, insn, OS_UNSIZED);
4216     if (IS_NULL_QREG(addr)) {
4217         gen_addr_fault(s);
4218         return;
4219     }
4220 
4221     if (ext & 0x20) {
4222         len = DREG(ext, 0);
4223     } else {
4224         len = tcg_constant_i32(extract32(ext, 0, 5));
4225     }
4226     if (ext & 0x800) {
4227         ofs = DREG(ext, 6);
4228     } else {
4229         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4230     }
4231 
4232     gen_helper_bfins_mem(QREG_CC_N, tcg_env, addr, src, ofs, len);
4233     set_cc_op(s, CC_OP_LOGIC);
4234 }
4235 
4236 DISAS_INSN(ff1)
4237 {
4238     TCGv reg;
4239     reg = DREG(insn, 0);
4240     gen_logic_cc(s, reg, OS_LONG);
4241     gen_helper_ff1(reg, reg);
4242 }
4243 
4244 DISAS_INSN(chk)
4245 {
4246     TCGv src, reg;
4247     int opsize;
4248 
4249     switch ((insn >> 7) & 3) {
4250     case 3:
4251         opsize = OS_WORD;
4252         break;
4253     case 2:
4254         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4255             opsize = OS_LONG;
4256             break;
4257         }
4258         /* fallthru */
4259     default:
4260         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4261         return;
4262     }
4263     SRC_EA(env, src, opsize, 1, NULL);
4264     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4265 
4266     gen_flush_flags(s);
4267     gen_helper_chk(tcg_env, reg, src);
4268 }
4269 
4270 DISAS_INSN(chk2)
4271 {
4272     uint16_t ext;
4273     TCGv addr1, addr2, bound1, bound2, reg;
4274     int opsize;
4275 
4276     switch ((insn >> 9) & 3) {
4277     case 0:
4278         opsize = OS_BYTE;
4279         break;
4280     case 1:
4281         opsize = OS_WORD;
4282         break;
4283     case 2:
4284         opsize = OS_LONG;
4285         break;
4286     default:
4287         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4288         return;
4289     }
4290 
4291     ext = read_im16(env, s);
4292     if ((ext & 0x0800) == 0) {
4293         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4294         return;
4295     }
4296 
4297     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4298     addr2 = tcg_temp_new();
4299     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4300 
4301     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4302     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4303 
4304     reg = tcg_temp_new();
4305     if (ext & 0x8000) {
4306         tcg_gen_mov_i32(reg, AREG(ext, 12));
4307     } else {
4308         gen_ext(reg, DREG(ext, 12), opsize, 1);
4309     }
4310 
4311     gen_flush_flags(s);
4312     gen_helper_chk2(tcg_env, reg, bound1, bound2);
4313 }
4314 
4315 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4316 {
4317     TCGv addr;
4318     TCGv_i64 t0, t1;
4319 
4320     addr = tcg_temp_new();
4321 
4322     t0 = tcg_temp_new_i64();
4323     t1 = tcg_temp_new_i64();
4324 
4325     tcg_gen_andi_i32(addr, src, ~15);
4326     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4327     tcg_gen_addi_i32(addr, addr, 8);
4328     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4329 
4330     tcg_gen_andi_i32(addr, dst, ~15);
4331     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4332     tcg_gen_addi_i32(addr, addr, 8);
4333     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4334 }
4335 
4336 DISAS_INSN(move16_reg)
4337 {
4338     int index = IS_USER(s);
4339     TCGv tmp;
4340     uint16_t ext;
4341 
4342     ext = read_im16(env, s);
4343     if ((ext & (1 << 15)) == 0) {
4344         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4345     }
4346 
4347     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4348 
4349     /* Ax can be Ay, so save Ay before incrementing Ax */
4350     tmp = tcg_temp_new();
4351     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4352     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4353     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4354 }
4355 
4356 DISAS_INSN(move16_mem)
4357 {
4358     int index = IS_USER(s);
4359     TCGv reg, addr;
4360 
4361     reg = AREG(insn, 0);
4362     addr = tcg_constant_i32(read_im32(env, s));
4363 
4364     if ((insn >> 3) & 1) {
4365         /* MOVE16 (xxx).L, (Ay) */
4366         m68k_copy_line(reg, addr, index);
4367     } else {
4368         /* MOVE16 (Ay), (xxx).L */
4369         m68k_copy_line(addr, reg, index);
4370     }
4371 
4372     if (((insn >> 3) & 2) == 0) {
4373         /* (Ay)+ */
4374         tcg_gen_addi_i32(reg, reg, 16);
4375     }
4376 }
4377 
4378 DISAS_INSN(strldsr)
4379 {
4380     uint16_t ext;
4381     uint32_t addr;
4382 
4383     addr = s->pc - 2;
4384     ext = read_im16(env, s);
4385     if (ext != 0x46FC) {
4386         gen_exception(s, addr, EXCP_ILLEGAL);
4387         return;
4388     }
4389     ext = read_im16(env, s);
4390     if (IS_USER(s) || (ext & SR_S) == 0) {
4391         gen_exception(s, addr, EXCP_PRIVILEGE);
4392         return;
4393     }
4394     gen_push(s, gen_get_sr(s));
4395     gen_set_sr_im(s, ext, 0);
4396     gen_exit_tb(s);
4397 }
4398 
4399 DISAS_INSN(move_from_sr)
4400 {
4401     TCGv sr;
4402 
4403     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4404         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4405         return;
4406     }
4407     sr = gen_get_sr(s);
4408     DEST_EA(env, insn, OS_WORD, sr, NULL);
4409 }
4410 
4411 #if !defined(CONFIG_USER_ONLY)
4412 DISAS_INSN(moves)
4413 {
4414     int opsize;
4415     uint16_t ext;
4416     TCGv reg;
4417     TCGv addr;
4418     int extend;
4419 
4420     if (IS_USER(s)) {
4421         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4422         return;
4423     }
4424 
4425     ext = read_im16(env, s);
4426 
4427     opsize = insn_opsize(insn);
4428 
4429     if (ext & 0x8000) {
4430         /* address register */
4431         reg = AREG(ext, 12);
4432         extend = 1;
4433     } else {
4434         /* data register */
4435         reg = DREG(ext, 12);
4436         extend = 0;
4437     }
4438 
4439     addr = gen_lea(env, s, insn, opsize);
4440     if (IS_NULL_QREG(addr)) {
4441         gen_addr_fault(s);
4442         return;
4443     }
4444 
4445     if (ext & 0x0800) {
4446         /* from reg to ea */
4447         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4448     } else {
4449         /* from ea to reg */
4450         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4451         if (extend) {
4452             gen_ext(reg, tmp, opsize, 1);
4453         } else {
4454             gen_partset_reg(opsize, reg, tmp);
4455         }
4456     }
4457     switch (extract32(insn, 3, 3)) {
4458     case 3: /* Indirect postincrement.  */
4459         tcg_gen_addi_i32(AREG(insn, 0), addr,
4460                          REG(insn, 0) == 7 && opsize == OS_BYTE
4461                          ? 2
4462                          : opsize_bytes(opsize));
4463         break;
4464     case 4: /* Indirect predecrememnt.  */
4465         tcg_gen_mov_i32(AREG(insn, 0), addr);
4466         break;
4467     }
4468 }
4469 
4470 DISAS_INSN(move_to_sr)
4471 {
4472     if (IS_USER(s)) {
4473         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4474         return;
4475     }
4476     gen_move_to_sr(env, s, insn, false);
4477     gen_exit_tb(s);
4478 }
4479 
4480 DISAS_INSN(move_from_usp)
4481 {
4482     if (IS_USER(s)) {
4483         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4484         return;
4485     }
4486     tcg_gen_ld_i32(AREG(insn, 0), tcg_env,
4487                    offsetof(CPUM68KState, sp[M68K_USP]));
4488 }
4489 
4490 DISAS_INSN(move_to_usp)
4491 {
4492     if (IS_USER(s)) {
4493         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4494         return;
4495     }
4496     tcg_gen_st_i32(AREG(insn, 0), tcg_env,
4497                    offsetof(CPUM68KState, sp[M68K_USP]));
4498 }
4499 
4500 DISAS_INSN(halt)
4501 {
4502     if (IS_USER(s)) {
4503         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4504         return;
4505     }
4506     if (semihosting_test(s)) {
4507         gen_exception(s, s->pc, EXCP_SEMIHOSTING);
4508         return;
4509     }
4510     tcg_gen_movi_i32(cpu_halted, 1);
4511     gen_exception(s, s->pc, EXCP_HLT);
4512 }
4513 
4514 DISAS_INSN(stop)
4515 {
4516     uint16_t ext;
4517 
4518     if (IS_USER(s)) {
4519         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4520         return;
4521     }
4522 
4523     ext = read_im16(env, s);
4524 
4525     gen_set_sr_im(s, ext, 0);
4526     tcg_gen_movi_i32(cpu_halted, 1);
4527     gen_exception(s, s->pc, EXCP_HLT);
4528 }
4529 
4530 DISAS_INSN(rte)
4531 {
4532     if (IS_USER(s)) {
4533         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4534         return;
4535     }
4536     gen_exception(s, s->base.pc_next, EXCP_RTE);
4537 }
4538 
4539 DISAS_INSN(cf_movec)
4540 {
4541     uint16_t ext;
4542     TCGv reg;
4543 
4544     if (IS_USER(s)) {
4545         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4546         return;
4547     }
4548 
4549     ext = read_im16(env, s);
4550 
4551     if (ext & 0x8000) {
4552         reg = AREG(ext, 12);
4553     } else {
4554         reg = DREG(ext, 12);
4555     }
4556     gen_helper_cf_movec_to(tcg_env, tcg_constant_i32(ext & 0xfff), reg);
4557     gen_exit_tb(s);
4558 }
4559 
4560 DISAS_INSN(m68k_movec)
4561 {
4562     uint16_t ext;
4563     TCGv reg, creg;
4564 
4565     if (IS_USER(s)) {
4566         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4567         return;
4568     }
4569 
4570     ext = read_im16(env, s);
4571 
4572     if (ext & 0x8000) {
4573         reg = AREG(ext, 12);
4574     } else {
4575         reg = DREG(ext, 12);
4576     }
4577     creg = tcg_constant_i32(ext & 0xfff);
4578     if (insn & 1) {
4579         gen_helper_m68k_movec_to(tcg_env, creg, reg);
4580     } else {
4581         gen_helper_m68k_movec_from(reg, tcg_env, creg);
4582     }
4583     gen_exit_tb(s);
4584 }
4585 
4586 DISAS_INSN(intouch)
4587 {
4588     if (IS_USER(s)) {
4589         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4590         return;
4591     }
4592     /* ICache fetch.  Implement as no-op.  */
4593 }
4594 
4595 DISAS_INSN(cpushl)
4596 {
4597     if (IS_USER(s)) {
4598         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4599         return;
4600     }
4601     /* Cache push/invalidate.  Implement as no-op.  */
4602 }
4603 
4604 DISAS_INSN(cpush)
4605 {
4606     if (IS_USER(s)) {
4607         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4608         return;
4609     }
4610     /* Cache push/invalidate.  Implement as no-op.  */
4611 }
4612 
4613 DISAS_INSN(cinv)
4614 {
4615     if (IS_USER(s)) {
4616         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4617         return;
4618     }
4619     /* Invalidate cache line.  Implement as no-op.  */
4620 }
4621 
4622 #if !defined(CONFIG_USER_ONLY)
4623 DISAS_INSN(pflush)
4624 {
4625     TCGv opmode;
4626 
4627     if (IS_USER(s)) {
4628         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4629         return;
4630     }
4631 
4632     opmode = tcg_constant_i32((insn >> 3) & 3);
4633     gen_helper_pflush(tcg_env, AREG(insn, 0), opmode);
4634 }
4635 
4636 DISAS_INSN(ptest)
4637 {
4638     TCGv is_read;
4639 
4640     if (IS_USER(s)) {
4641         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4642         return;
4643     }
4644     is_read = tcg_constant_i32((insn >> 5) & 1);
4645     gen_helper_ptest(tcg_env, AREG(insn, 0), is_read);
4646 }
4647 #endif
4648 
4649 DISAS_INSN(wddata)
4650 {
4651     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4652 }
4653 
4654 DISAS_INSN(wdebug)
4655 {
4656     if (IS_USER(s)) {
4657         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4658         return;
4659     }
4660     /* TODO: Implement wdebug.  */
4661     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4662 }
4663 #endif
4664 
4665 DISAS_INSN(trap)
4666 {
4667     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4668 }
4669 
4670 static void do_trapcc(DisasContext *s, DisasCompare *c)
4671 {
4672     if (c->tcond != TCG_COND_NEVER) {
4673         TCGLabel *over = NULL;
4674 
4675         update_cc_op(s);
4676 
4677         if (c->tcond != TCG_COND_ALWAYS) {
4678             /* Jump over if !c. */
4679             over = gen_new_label();
4680             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4681         }
4682 
4683         tcg_gen_movi_i32(QREG_PC, s->pc);
4684         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4685 
4686         if (over != NULL) {
4687             gen_set_label(over);
4688             s->base.is_jmp = DISAS_NEXT;
4689         }
4690     }
4691 }
4692 
4693 DISAS_INSN(trapcc)
4694 {
4695     DisasCompare c;
4696 
4697     /* Consume and discard the immediate operand. */
4698     switch (extract32(insn, 0, 3)) {
4699     case 2: /* trapcc.w */
4700         (void)read_im16(env, s);
4701         break;
4702     case 3: /* trapcc.l */
4703         (void)read_im32(env, s);
4704         break;
4705     case 4: /* trapcc (no operand) */
4706         break;
4707     default:
4708         /* trapcc registered with only valid opmodes */
4709         g_assert_not_reached();
4710     }
4711 
4712     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4713     do_trapcc(s, &c);
4714 }
4715 
4716 DISAS_INSN(trapv)
4717 {
4718     DisasCompare c;
4719 
4720     gen_cc_cond(&c, s, 9); /* V set */
4721     do_trapcc(s, &c);
4722 }
4723 
4724 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4725 {
4726     switch (reg) {
4727     case M68K_FPIAR:
4728         tcg_gen_movi_i32(res, 0);
4729         break;
4730     case M68K_FPSR:
4731         gen_helper_get_fpsr(res, tcg_env);
4732         break;
4733     case M68K_FPCR:
4734         tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpcr));
4735         break;
4736     }
4737 }
4738 
4739 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4740 {
4741     switch (reg) {
4742     case M68K_FPIAR:
4743         break;
4744     case M68K_FPSR:
4745         gen_helper_set_fpsr(tcg_env, val);
4746         break;
4747     case M68K_FPCR:
4748         gen_helper_set_fpcr(tcg_env, val);
4749         break;
4750     }
4751 }
4752 
4753 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4754 {
4755     int index = IS_USER(s);
4756     TCGv tmp;
4757 
4758     tmp = tcg_temp_new();
4759     gen_load_fcr(s, tmp, reg);
4760     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4761 }
4762 
4763 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4764 {
4765     int index = IS_USER(s);
4766     TCGv tmp;
4767 
4768     tmp = tcg_temp_new();
4769     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4770     gen_store_fcr(s, tmp, reg);
4771 }
4772 
4773 
4774 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4775                              uint32_t insn, uint32_t ext)
4776 {
4777     int mask = (ext >> 10) & 7;
4778     int is_write = (ext >> 13) & 1;
4779     int mode = extract32(insn, 3, 3);
4780     int i;
4781     TCGv addr, tmp;
4782 
4783     switch (mode) {
4784     case 0: /* Dn */
4785         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4786             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4787             return;
4788         }
4789         if (is_write) {
4790             gen_load_fcr(s, DREG(insn, 0), mask);
4791         } else {
4792             gen_store_fcr(s, DREG(insn, 0), mask);
4793         }
4794         return;
4795     case 1: /* An, only with FPIAR */
4796         if (mask != M68K_FPIAR) {
4797             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4798             return;
4799         }
4800         if (is_write) {
4801             gen_load_fcr(s, AREG(insn, 0), mask);
4802         } else {
4803             gen_store_fcr(s, AREG(insn, 0), mask);
4804         }
4805         return;
4806     case 7: /* Immediate */
4807         if (REG(insn, 0) == 4) {
4808             if (is_write ||
4809                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4810                  mask != M68K_FPCR)) {
4811                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4812                 return;
4813             }
4814             tmp = tcg_constant_i32(read_im32(env, s));
4815             gen_store_fcr(s, tmp, mask);
4816             return;
4817         }
4818         break;
4819     default:
4820         break;
4821     }
4822 
4823     tmp = gen_lea(env, s, insn, OS_LONG);
4824     if (IS_NULL_QREG(tmp)) {
4825         gen_addr_fault(s);
4826         return;
4827     }
4828 
4829     addr = tcg_temp_new();
4830     tcg_gen_mov_i32(addr, tmp);
4831 
4832     /*
4833      * mask:
4834      *
4835      * 0b100 Floating-Point Control Register
4836      * 0b010 Floating-Point Status Register
4837      * 0b001 Floating-Point Instruction Address Register
4838      *
4839      */
4840 
4841     if (is_write && mode == 4) {
4842         for (i = 2; i >= 0; i--, mask >>= 1) {
4843             if (mask & 1) {
4844                 gen_qemu_store_fcr(s, addr, 1 << i);
4845                 if (mask != 1) {
4846                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4847                 }
4848             }
4849        }
4850        tcg_gen_mov_i32(AREG(insn, 0), addr);
4851     } else {
4852         for (i = 0; i < 3; i++, mask >>= 1) {
4853             if (mask & 1) {
4854                 if (is_write) {
4855                     gen_qemu_store_fcr(s, addr, 1 << i);
4856                 } else {
4857                     gen_qemu_load_fcr(s, addr, 1 << i);
4858                 }
4859                 if (mask != 1 || mode == 3) {
4860                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4861                 }
4862             }
4863         }
4864         if (mode == 3) {
4865             tcg_gen_mov_i32(AREG(insn, 0), addr);
4866         }
4867     }
4868 }
4869 
4870 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4871                           uint32_t insn, uint32_t ext)
4872 {
4873     int opsize;
4874     TCGv addr, tmp;
4875     int mode = (ext >> 11) & 0x3;
4876     int is_load = ((ext & 0x2000) == 0);
4877 
4878     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4879         opsize = OS_EXTENDED;
4880     } else {
4881         opsize = OS_DOUBLE;  /* FIXME */
4882     }
4883 
4884     addr = gen_lea(env, s, insn, opsize);
4885     if (IS_NULL_QREG(addr)) {
4886         gen_addr_fault(s);
4887         return;
4888     }
4889 
4890     tmp = tcg_temp_new();
4891     if (mode & 0x1) {
4892         /* Dynamic register list */
4893         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4894     } else {
4895         /* Static register list */
4896         tcg_gen_movi_i32(tmp, ext & 0xff);
4897     }
4898 
4899     if (!is_load && (mode & 2) == 0) {
4900         /*
4901          * predecrement addressing mode
4902          * only available to store register to memory
4903          */
4904         if (opsize == OS_EXTENDED) {
4905             gen_helper_fmovemx_st_predec(tmp, tcg_env, addr, tmp);
4906         } else {
4907             gen_helper_fmovemd_st_predec(tmp, tcg_env, addr, tmp);
4908         }
4909     } else {
4910         /* postincrement addressing mode */
4911         if (opsize == OS_EXTENDED) {
4912             if (is_load) {
4913                 gen_helper_fmovemx_ld_postinc(tmp, tcg_env, addr, tmp);
4914             } else {
4915                 gen_helper_fmovemx_st_postinc(tmp, tcg_env, addr, tmp);
4916             }
4917         } else {
4918             if (is_load) {
4919                 gen_helper_fmovemd_ld_postinc(tmp, tcg_env, addr, tmp);
4920             } else {
4921                 gen_helper_fmovemd_st_postinc(tmp, tcg_env, addr, tmp);
4922             }
4923         }
4924     }
4925     if ((insn & 070) == 030 || (insn & 070) == 040) {
4926         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4927     }
4928 }
4929 
4930 /*
4931  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4932  * immediately before the next FP instruction is executed.
4933  */
4934 DISAS_INSN(fpu)
4935 {
4936     uint16_t ext;
4937     int opmode;
4938     int opsize;
4939     TCGv_ptr cpu_src, cpu_dest;
4940 
4941     ext = read_im16(env, s);
4942     opmode = ext & 0x7f;
4943     switch ((ext >> 13) & 7) {
4944     case 0:
4945         break;
4946     case 1:
4947         goto undef;
4948     case 2:
4949         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4950             /* fmovecr */
4951             TCGv rom_offset = tcg_constant_i32(opmode);
4952             cpu_dest = gen_fp_ptr(REG(ext, 7));
4953             gen_helper_fconst(tcg_env, cpu_dest, rom_offset);
4954             return;
4955         }
4956         break;
4957     case 3: /* fmove out */
4958         cpu_src = gen_fp_ptr(REG(ext, 7));
4959         opsize = ext_opsize(ext, 10);
4960         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4961                       EA_STORE, IS_USER(s)) == -1) {
4962             gen_addr_fault(s);
4963         }
4964         gen_helper_ftst(tcg_env, cpu_src);
4965         return;
4966     case 4: /* fmove to control register.  */
4967     case 5: /* fmove from control register.  */
4968         gen_op_fmove_fcr(env, s, insn, ext);
4969         return;
4970     case 6: /* fmovem */
4971     case 7:
4972         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4973             goto undef;
4974         }
4975         gen_op_fmovem(env, s, insn, ext);
4976         return;
4977     }
4978     if (ext & (1 << 14)) {
4979         /* Source effective address.  */
4980         opsize = ext_opsize(ext, 10);
4981         cpu_src = gen_fp_result_ptr();
4982         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4983                       EA_LOADS, IS_USER(s)) == -1) {
4984             gen_addr_fault(s);
4985             return;
4986         }
4987     } else {
4988         /* Source register.  */
4989         opsize = OS_EXTENDED;
4990         cpu_src = gen_fp_ptr(REG(ext, 10));
4991     }
4992     cpu_dest = gen_fp_ptr(REG(ext, 7));
4993     switch (opmode) {
4994     case 0: /* fmove */
4995         gen_fp_move(cpu_dest, cpu_src);
4996         break;
4997     case 0x40: /* fsmove */
4998         gen_helper_fsround(tcg_env, cpu_dest, cpu_src);
4999         break;
5000     case 0x44: /* fdmove */
5001         gen_helper_fdround(tcg_env, cpu_dest, cpu_src);
5002         break;
5003     case 1: /* fint */
5004         gen_helper_firound(tcg_env, cpu_dest, cpu_src);
5005         break;
5006     case 2: /* fsinh */
5007         gen_helper_fsinh(tcg_env, cpu_dest, cpu_src);
5008         break;
5009     case 3: /* fintrz */
5010         gen_helper_fitrunc(tcg_env, cpu_dest, cpu_src);
5011         break;
5012     case 4: /* fsqrt */
5013         gen_helper_fsqrt(tcg_env, cpu_dest, cpu_src);
5014         break;
5015     case 0x41: /* fssqrt */
5016         gen_helper_fssqrt(tcg_env, cpu_dest, cpu_src);
5017         break;
5018     case 0x45: /* fdsqrt */
5019         gen_helper_fdsqrt(tcg_env, cpu_dest, cpu_src);
5020         break;
5021     case 0x06: /* flognp1 */
5022         gen_helper_flognp1(tcg_env, cpu_dest, cpu_src);
5023         break;
5024     case 0x08: /* fetoxm1 */
5025         gen_helper_fetoxm1(tcg_env, cpu_dest, cpu_src);
5026         break;
5027     case 0x09: /* ftanh */
5028         gen_helper_ftanh(tcg_env, cpu_dest, cpu_src);
5029         break;
5030     case 0x0a: /* fatan */
5031         gen_helper_fatan(tcg_env, cpu_dest, cpu_src);
5032         break;
5033     case 0x0c: /* fasin */
5034         gen_helper_fasin(tcg_env, cpu_dest, cpu_src);
5035         break;
5036     case 0x0d: /* fatanh */
5037         gen_helper_fatanh(tcg_env, cpu_dest, cpu_src);
5038         break;
5039     case 0x0e: /* fsin */
5040         gen_helper_fsin(tcg_env, cpu_dest, cpu_src);
5041         break;
5042     case 0x0f: /* ftan */
5043         gen_helper_ftan(tcg_env, cpu_dest, cpu_src);
5044         break;
5045     case 0x10: /* fetox */
5046         gen_helper_fetox(tcg_env, cpu_dest, cpu_src);
5047         break;
5048     case 0x11: /* ftwotox */
5049         gen_helper_ftwotox(tcg_env, cpu_dest, cpu_src);
5050         break;
5051     case 0x12: /* ftentox */
5052         gen_helper_ftentox(tcg_env, cpu_dest, cpu_src);
5053         break;
5054     case 0x14: /* flogn */
5055         gen_helper_flogn(tcg_env, cpu_dest, cpu_src);
5056         break;
5057     case 0x15: /* flog10 */
5058         gen_helper_flog10(tcg_env, cpu_dest, cpu_src);
5059         break;
5060     case 0x16: /* flog2 */
5061         gen_helper_flog2(tcg_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x18: /* fabs */
5064         gen_helper_fabs(tcg_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x58: /* fsabs */
5067         gen_helper_fsabs(tcg_env, cpu_dest, cpu_src);
5068         break;
5069     case 0x5c: /* fdabs */
5070         gen_helper_fdabs(tcg_env, cpu_dest, cpu_src);
5071         break;
5072     case 0x19: /* fcosh */
5073         gen_helper_fcosh(tcg_env, cpu_dest, cpu_src);
5074         break;
5075     case 0x1a: /* fneg */
5076         gen_helper_fneg(tcg_env, cpu_dest, cpu_src);
5077         break;
5078     case 0x5a: /* fsneg */
5079         gen_helper_fsneg(tcg_env, cpu_dest, cpu_src);
5080         break;
5081     case 0x5e: /* fdneg */
5082         gen_helper_fdneg(tcg_env, cpu_dest, cpu_src);
5083         break;
5084     case 0x1c: /* facos */
5085         gen_helper_facos(tcg_env, cpu_dest, cpu_src);
5086         break;
5087     case 0x1d: /* fcos */
5088         gen_helper_fcos(tcg_env, cpu_dest, cpu_src);
5089         break;
5090     case 0x1e: /* fgetexp */
5091         gen_helper_fgetexp(tcg_env, cpu_dest, cpu_src);
5092         break;
5093     case 0x1f: /* fgetman */
5094         gen_helper_fgetman(tcg_env, cpu_dest, cpu_src);
5095         break;
5096     case 0x20: /* fdiv */
5097         gen_helper_fdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5098         break;
5099     case 0x60: /* fsdiv */
5100         gen_helper_fsdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5101         break;
5102     case 0x64: /* fddiv */
5103         gen_helper_fddiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5104         break;
5105     case 0x21: /* fmod */
5106         gen_helper_fmod(tcg_env, cpu_dest, cpu_src, cpu_dest);
5107         break;
5108     case 0x22: /* fadd */
5109         gen_helper_fadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5110         break;
5111     case 0x62: /* fsadd */
5112         gen_helper_fsadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5113         break;
5114     case 0x66: /* fdadd */
5115         gen_helper_fdadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5116         break;
5117     case 0x23: /* fmul */
5118         gen_helper_fmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5119         break;
5120     case 0x63: /* fsmul */
5121         gen_helper_fsmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5122         break;
5123     case 0x67: /* fdmul */
5124         gen_helper_fdmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5125         break;
5126     case 0x24: /* fsgldiv */
5127         gen_helper_fsgldiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5128         break;
5129     case 0x25: /* frem */
5130         gen_helper_frem(tcg_env, cpu_dest, cpu_src, cpu_dest);
5131         break;
5132     case 0x26: /* fscale */
5133         gen_helper_fscale(tcg_env, cpu_dest, cpu_src, cpu_dest);
5134         break;
5135     case 0x27: /* fsglmul */
5136         gen_helper_fsglmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5137         break;
5138     case 0x28: /* fsub */
5139         gen_helper_fsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5140         break;
5141     case 0x68: /* fssub */
5142         gen_helper_fssub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5143         break;
5144     case 0x6c: /* fdsub */
5145         gen_helper_fdsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5146         break;
5147     case 0x30: case 0x31: case 0x32:
5148     case 0x33: case 0x34: case 0x35:
5149     case 0x36: case 0x37: {
5150             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5151             gen_helper_fsincos(tcg_env, cpu_dest, cpu_dest2, cpu_src);
5152         }
5153         break;
5154     case 0x38: /* fcmp */
5155         gen_helper_fcmp(tcg_env, cpu_src, cpu_dest);
5156         return;
5157     case 0x3a: /* ftst */
5158         gen_helper_ftst(tcg_env, cpu_src);
5159         return;
5160     default:
5161         goto undef;
5162     }
5163     gen_helper_ftst(tcg_env, cpu_dest);
5164     return;
5165 undef:
5166     /* FIXME: Is this right for offset addressing modes?  */
5167     s->pc -= 2;
5168     disas_undef_fpu(env, s, insn);
5169 }
5170 
5171 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5172 {
5173     TCGv fpsr;
5174     int imm = 0;
5175 
5176     /* TODO: Raise BSUN exception.  */
5177     fpsr = tcg_temp_new();
5178     gen_load_fcr(s, fpsr, M68K_FPSR);
5179     c->v1 = fpsr;
5180 
5181     switch (cond) {
5182     case 0:  /* False */
5183     case 16: /* Signaling False */
5184         c->tcond = TCG_COND_NEVER;
5185         break;
5186     case 1:  /* EQual Z */
5187     case 17: /* Signaling EQual Z */
5188         imm = FPSR_CC_Z;
5189         c->tcond = TCG_COND_TSTNE;
5190         break;
5191     case 2:  /* Ordered Greater Than !(A || Z || N) */
5192     case 18: /* Greater Than !(A || Z || N) */
5193         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5194         c->tcond = TCG_COND_TSTEQ;
5195         break;
5196     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5197     case 19: /* Greater than or Equal Z || !(A || N) */
5198         c->v1 = tcg_temp_new();
5199         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5200         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5201         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5202         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5203         imm = FPSR_CC_Z | FPSR_CC_N;
5204         c->tcond = TCG_COND_TSTNE;
5205         break;
5206     case 4:  /* Ordered Less Than !(!N || A || Z); */
5207     case 20: /* Less Than !(!N || A || Z); */
5208         c->v1 = tcg_temp_new();
5209         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5210         imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
5211         c->tcond = TCG_COND_TSTEQ;
5212         break;
5213     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5214     case 21: /* Less than or Equal Z || (N && !A) */
5215         c->v1 = tcg_temp_new();
5216         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5217         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5218         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5219         imm = FPSR_CC_Z | FPSR_CC_N;
5220         c->tcond = TCG_COND_TSTNE;
5221         break;
5222     case 6:  /* Ordered Greater or Less than !(A || Z) */
5223     case 22: /* Greater or Less than !(A || Z) */
5224         imm = FPSR_CC_A | FPSR_CC_Z;
5225         c->tcond = TCG_COND_TSTEQ;
5226         break;
5227     case 7:  /* Ordered !A */
5228     case 23: /* Greater, Less or Equal !A */
5229         imm = FPSR_CC_A;
5230         c->tcond = TCG_COND_TSTEQ;
5231         break;
5232     case 8:  /* Unordered A */
5233     case 24: /* Not Greater, Less or Equal A */
5234         imm = FPSR_CC_A;
5235         c->tcond = TCG_COND_TSTNE;
5236         break;
5237     case 9:  /* Unordered or Equal A || Z */
5238     case 25: /* Not Greater or Less then A || Z */
5239         imm = FPSR_CC_A | FPSR_CC_Z;
5240         c->tcond = TCG_COND_TSTNE;
5241         break;
5242     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5243     case 26: /* Not Less or Equal A || !(N || Z)) */
5244         c->v1 = tcg_temp_new();
5245         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5246         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5247         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5248         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5249         imm = FPSR_CC_A | FPSR_CC_N;
5250         c->tcond = TCG_COND_TSTNE;
5251         break;
5252     case 11: /* Unordered or Greater or Equal A || Z || !N */
5253     case 27: /* Not Less Than A || Z || !N */
5254         c->v1 = tcg_temp_new();
5255         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5256         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5257         c->tcond = TCG_COND_TSTNE;
5258         break;
5259     case 12: /* Unordered or Less Than A || (N && !Z) */
5260     case 28: /* Not Greater than or Equal A || (N && !Z) */
5261         c->v1 = tcg_temp_new();
5262         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5263         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5264         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5265         imm = FPSR_CC_A | FPSR_CC_N;
5266         c->tcond = TCG_COND_TSTNE;
5267         break;
5268     case 13: /* Unordered or Less or Equal A || Z || N */
5269     case 29: /* Not Greater Than A || Z || N */
5270         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5271         c->tcond = TCG_COND_TSTNE;
5272         break;
5273     case 14: /* Not Equal !Z */
5274     case 30: /* Signaling Not Equal !Z */
5275         imm = FPSR_CC_Z;
5276         c->tcond = TCG_COND_TSTEQ;
5277         break;
5278     case 15: /* True */
5279     case 31: /* Signaling True */
5280         c->tcond = TCG_COND_ALWAYS;
5281         break;
5282     }
5283     c->v2 = tcg_constant_i32(imm);
5284 }
5285 
5286 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5287 {
5288     DisasCompare c;
5289 
5290     gen_fcc_cond(&c, s, cond);
5291     update_cc_op(s);
5292     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5293 }
5294 
5295 DISAS_INSN(fbcc)
5296 {
5297     uint32_t offset;
5298     uint32_t base;
5299     TCGLabel *l1;
5300 
5301     base = s->pc;
5302     offset = (int16_t)read_im16(env, s);
5303     if (insn & (1 << 6)) {
5304         offset = (offset << 16) | read_im16(env, s);
5305     }
5306 
5307     l1 = gen_new_label();
5308     update_cc_op(s);
5309     gen_fjmpcc(s, insn & 0x3f, l1);
5310     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5311     gen_set_label(l1);
5312     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5313 }
5314 
5315 DISAS_INSN(fscc)
5316 {
5317     DisasCompare c;
5318     int cond;
5319     TCGv tmp;
5320     uint16_t ext;
5321 
5322     ext = read_im16(env, s);
5323     cond = ext & 0x3f;
5324     gen_fcc_cond(&c, s, cond);
5325 
5326     tmp = tcg_temp_new();
5327     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
5328 
5329     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5330 }
5331 
5332 DISAS_INSN(ftrapcc)
5333 {
5334     DisasCompare c;
5335     uint16_t ext;
5336     int cond;
5337 
5338     ext = read_im16(env, s);
5339     cond = ext & 0x3f;
5340 
5341     /* Consume and discard the immediate operand. */
5342     switch (extract32(insn, 0, 3)) {
5343     case 2: /* ftrapcc.w */
5344         (void)read_im16(env, s);
5345         break;
5346     case 3: /* ftrapcc.l */
5347         (void)read_im32(env, s);
5348         break;
5349     case 4: /* ftrapcc (no operand) */
5350         break;
5351     default:
5352         /* ftrapcc registered with only valid opmodes */
5353         g_assert_not_reached();
5354     }
5355 
5356     gen_fcc_cond(&c, s, cond);
5357     do_trapcc(s, &c);
5358 }
5359 
5360 #if !defined(CONFIG_USER_ONLY)
5361 DISAS_INSN(frestore)
5362 {
5363     TCGv addr;
5364 
5365     if (IS_USER(s)) {
5366         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5367         return;
5368     }
5369     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5370         SRC_EA(env, addr, OS_LONG, 0, NULL);
5371         /* FIXME: check the state frame */
5372     } else {
5373         disas_undef(env, s, insn);
5374     }
5375 }
5376 
5377 DISAS_INSN(fsave)
5378 {
5379     if (IS_USER(s)) {
5380         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5381         return;
5382     }
5383 
5384     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5385         /* always write IDLE */
5386         TCGv idle = tcg_constant_i32(0x41000000);
5387         DEST_EA(env, insn, OS_LONG, idle, NULL);
5388     } else {
5389         disas_undef(env, s, insn);
5390     }
5391 }
5392 #endif
5393 
5394 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5395 {
5396     TCGv tmp = tcg_temp_new();
5397     if (s->env->macsr & MACSR_FI) {
5398         if (upper)
5399             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5400         else
5401             tcg_gen_shli_i32(tmp, val, 16);
5402     } else if (s->env->macsr & MACSR_SU) {
5403         if (upper)
5404             tcg_gen_sari_i32(tmp, val, 16);
5405         else
5406             tcg_gen_ext16s_i32(tmp, val);
5407     } else {
5408         if (upper)
5409             tcg_gen_shri_i32(tmp, val, 16);
5410         else
5411             tcg_gen_ext16u_i32(tmp, val);
5412     }
5413     return tmp;
5414 }
5415 
5416 static void gen_mac_clear_flags(void)
5417 {
5418     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5419                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5420 }
5421 
5422 DISAS_INSN(mac)
5423 {
5424     TCGv rx;
5425     TCGv ry;
5426     uint16_t ext;
5427     int acc;
5428     TCGv tmp;
5429     TCGv addr;
5430     TCGv loadval;
5431     int dual;
5432     TCGv saved_flags;
5433 
5434     if (!s->done_mac) {
5435         s->mactmp = tcg_temp_new_i64();
5436         s->done_mac = 1;
5437     }
5438 
5439     ext = read_im16(env, s);
5440 
5441     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5442     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5443     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5444         disas_undef(env, s, insn);
5445         return;
5446     }
5447     if (insn & 0x30) {
5448         /* MAC with load.  */
5449         tmp = gen_lea(env, s, insn, OS_LONG);
5450         addr = tcg_temp_new();
5451         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5452         /*
5453          * Load the value now to ensure correct exception behavior.
5454          * Perform writeback after reading the MAC inputs.
5455          */
5456         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5457 
5458         acc ^= 1;
5459         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5460         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5461     } else {
5462         loadval = addr = NULL_QREG;
5463         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5464         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5465     }
5466 
5467     gen_mac_clear_flags();
5468 #if 0
5469     l1 = -1;
5470     /* Disabled because conditional branches clobber temporary vars.  */
5471     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5472         /* Skip the multiply if we know we will ignore it.  */
5473         l1 = gen_new_label();
5474         tmp = tcg_temp_new();
5475         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5476         gen_op_jmp_nz32(tmp, l1);
5477     }
5478 #endif
5479 
5480     if ((ext & 0x0800) == 0) {
5481         /* Word.  */
5482         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5483         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5484     }
5485     if (s->env->macsr & MACSR_FI) {
5486         gen_helper_macmulf(s->mactmp, tcg_env, rx, ry);
5487     } else {
5488         if (s->env->macsr & MACSR_SU)
5489             gen_helper_macmuls(s->mactmp, tcg_env, rx, ry);
5490         else
5491             gen_helper_macmulu(s->mactmp, tcg_env, rx, ry);
5492         switch ((ext >> 9) & 3) {
5493         case 1:
5494             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5495             break;
5496         case 3:
5497             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5498             break;
5499         }
5500     }
5501 
5502     if (dual) {
5503         /* Save the overflow flag from the multiply.  */
5504         saved_flags = tcg_temp_new();
5505         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5506     } else {
5507         saved_flags = NULL_QREG;
5508     }
5509 
5510 #if 0
5511     /* Disabled because conditional branches clobber temporary vars.  */
5512     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5513         /* Skip the accumulate if the value is already saturated.  */
5514         l1 = gen_new_label();
5515         tmp = tcg_temp_new();
5516         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5517         gen_op_jmp_nz32(tmp, l1);
5518     }
5519 #endif
5520 
5521     if (insn & 0x100)
5522         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5523     else
5524         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5525 
5526     if (s->env->macsr & MACSR_FI)
5527         gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5528     else if (s->env->macsr & MACSR_SU)
5529         gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5530     else
5531         gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5532 
5533 #if 0
5534     /* Disabled because conditional branches clobber temporary vars.  */
5535     if (l1 != -1)
5536         gen_set_label(l1);
5537 #endif
5538 
5539     if (dual) {
5540         /* Dual accumulate variant.  */
5541         acc = (ext >> 2) & 3;
5542         /* Restore the overflow flag from the multiplier.  */
5543         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5544 #if 0
5545         /* Disabled because conditional branches clobber temporary vars.  */
5546         if ((s->env->macsr & MACSR_OMC) != 0) {
5547             /* Skip the accumulate if the value is already saturated.  */
5548             l1 = gen_new_label();
5549             tmp = tcg_temp_new();
5550             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5551             gen_op_jmp_nz32(tmp, l1);
5552         }
5553 #endif
5554         if (ext & 2)
5555             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5556         else
5557             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5558         if (s->env->macsr & MACSR_FI)
5559             gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5560         else if (s->env->macsr & MACSR_SU)
5561             gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5562         else
5563             gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5564 #if 0
5565         /* Disabled because conditional branches clobber temporary vars.  */
5566         if (l1 != -1)
5567             gen_set_label(l1);
5568 #endif
5569     }
5570     gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(acc));
5571 
5572     if (insn & 0x30) {
5573         TCGv rw;
5574         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5575         tcg_gen_mov_i32(rw, loadval);
5576         /*
5577          * FIXME: Should address writeback happen with the masked or
5578          * unmasked value?
5579          */
5580         switch ((insn >> 3) & 7) {
5581         case 3: /* Post-increment.  */
5582             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5583             break;
5584         case 4: /* Pre-decrement.  */
5585             tcg_gen_mov_i32(AREG(insn, 0), addr);
5586         }
5587     }
5588 }
5589 
5590 DISAS_INSN(from_mac)
5591 {
5592     TCGv rx;
5593     TCGv_i64 acc;
5594     int accnum;
5595 
5596     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5597     accnum = (insn >> 9) & 3;
5598     acc = MACREG(accnum);
5599     if (s->env->macsr & MACSR_FI) {
5600         gen_helper_get_macf(rx, tcg_env, acc);
5601     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5602         tcg_gen_extrl_i64_i32(rx, acc);
5603     } else if (s->env->macsr & MACSR_SU) {
5604         gen_helper_get_macs(rx, acc);
5605     } else {
5606         gen_helper_get_macu(rx, acc);
5607     }
5608     if (insn & 0x40) {
5609         tcg_gen_movi_i64(acc, 0);
5610         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5611     }
5612 }
5613 
5614 DISAS_INSN(move_mac)
5615 {
5616     /* FIXME: This can be done without a helper.  */
5617     int src;
5618     TCGv dest;
5619     src = insn & 3;
5620     dest = tcg_constant_i32((insn >> 9) & 3);
5621     gen_helper_mac_move(tcg_env, dest, tcg_constant_i32(src));
5622     gen_mac_clear_flags();
5623     gen_helper_mac_set_flags(tcg_env, dest);
5624 }
5625 
5626 DISAS_INSN(from_macsr)
5627 {
5628     TCGv reg;
5629 
5630     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5631     tcg_gen_mov_i32(reg, QREG_MACSR);
5632 }
5633 
5634 DISAS_INSN(from_mask)
5635 {
5636     TCGv reg;
5637     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5638     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5639 }
5640 
5641 DISAS_INSN(from_mext)
5642 {
5643     TCGv reg;
5644     TCGv acc;
5645     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5646     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5647     if (s->env->macsr & MACSR_FI)
5648         gen_helper_get_mac_extf(reg, tcg_env, acc);
5649     else
5650         gen_helper_get_mac_exti(reg, tcg_env, acc);
5651 }
5652 
5653 DISAS_INSN(macsr_to_ccr)
5654 {
5655     TCGv tmp = tcg_temp_new();
5656 
5657     /* Note that X and C are always cleared. */
5658     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5659     gen_helper_set_ccr(tcg_env, tmp);
5660     set_cc_op(s, CC_OP_FLAGS);
5661 }
5662 
5663 DISAS_INSN(to_mac)
5664 {
5665     TCGv_i64 acc;
5666     TCGv val;
5667     int accnum;
5668     accnum = (insn >> 9) & 3;
5669     acc = MACREG(accnum);
5670     SRC_EA(env, val, OS_LONG, 0, NULL);
5671     if (s->env->macsr & MACSR_FI) {
5672         tcg_gen_ext_i32_i64(acc, val);
5673         tcg_gen_shli_i64(acc, acc, 8);
5674     } else if (s->env->macsr & MACSR_SU) {
5675         tcg_gen_ext_i32_i64(acc, val);
5676     } else {
5677         tcg_gen_extu_i32_i64(acc, val);
5678     }
5679     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5680     gen_mac_clear_flags();
5681     gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(accnum));
5682 }
5683 
5684 DISAS_INSN(to_macsr)
5685 {
5686     TCGv val;
5687     SRC_EA(env, val, OS_LONG, 0, NULL);
5688     gen_helper_set_macsr(tcg_env, val);
5689     gen_exit_tb(s);
5690 }
5691 
5692 DISAS_INSN(to_mask)
5693 {
5694     TCGv val;
5695     SRC_EA(env, val, OS_LONG, 0, NULL);
5696     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5697 }
5698 
5699 DISAS_INSN(to_mext)
5700 {
5701     TCGv val;
5702     TCGv acc;
5703     SRC_EA(env, val, OS_LONG, 0, NULL);
5704     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5705     if (s->env->macsr & MACSR_FI)
5706         gen_helper_set_mac_extf(tcg_env, val, acc);
5707     else if (s->env->macsr & MACSR_SU)
5708         gen_helper_set_mac_exts(tcg_env, val, acc);
5709     else
5710         gen_helper_set_mac_extu(tcg_env, val, acc);
5711 }
5712 
5713 static disas_proc opcode_table[65536];
5714 
5715 static void
5716 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5717 {
5718   int i;
5719   int from;
5720   int to;
5721 
5722   /* Sanity check.  All set bits must be included in the mask.  */
5723   if (opcode & ~mask) {
5724       fprintf(stderr,
5725               "qemu internal error: bogus opcode definition %04x/%04x\n",
5726               opcode, mask);
5727       abort();
5728   }
5729   /*
5730    * This could probably be cleverer.  For now just optimize the case where
5731    * the top bits are known.
5732    */
5733   /* Find the first zero bit in the mask.  */
5734   i = 0x8000;
5735   while ((i & mask) != 0)
5736       i >>= 1;
5737   /* Iterate over all combinations of this and lower bits.  */
5738   if (i == 0)
5739       i = 1;
5740   else
5741       i <<= 1;
5742   from = opcode & ~(i - 1);
5743   to = from + i;
5744   for (i = from; i < to; i++) {
5745       if ((i & mask) == opcode)
5746           opcode_table[i] = proc;
5747   }
5748 }
5749 
5750 /*
5751  * Register m68k opcode handlers.  Order is important.
5752  * Later insn override earlier ones.
5753  */
5754 void register_m68k_insns (CPUM68KState *env)
5755 {
5756     /*
5757      * Build the opcode table only once to avoid
5758      * multithreading issues.
5759      */
5760     if (opcode_table[0] != NULL) {
5761         return;
5762     }
5763 
5764     /*
5765      * use BASE() for instruction available
5766      * for CF_ISA_A and M68000.
5767      */
5768 #define BASE(name, opcode, mask) \
5769     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5770 #define INSN(name, opcode, mask, feature) do { \
5771     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5772         BASE(name, opcode, mask); \
5773     } while(0)
5774     BASE(undef,     0000, 0000);
5775     INSN(arith_im,  0080, fff8, CF_ISA_A);
5776     INSN(arith_im,  0000, ff00, M68K);
5777     INSN(chk2,      00c0, f9c0, CHK2);
5778     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5779     BASE(bitop_reg, 0100, f1c0);
5780     BASE(bitop_reg, 0140, f1c0);
5781     BASE(bitop_reg, 0180, f1c0);
5782     BASE(bitop_reg, 01c0, f1c0);
5783     INSN(movep,     0108, f138, MOVEP);
5784     INSN(arith_im,  0280, fff8, CF_ISA_A);
5785     INSN(arith_im,  0200, ff00, M68K);
5786     INSN(undef,     02c0, ffc0, M68K);
5787     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5788     INSN(arith_im,  0480, fff8, CF_ISA_A);
5789     INSN(arith_im,  0400, ff00, M68K);
5790     INSN(undef,     04c0, ffc0, M68K);
5791     INSN(arith_im,  0600, ff00, M68K);
5792     INSN(undef,     06c0, ffc0, M68K);
5793     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5794     INSN(arith_im,  0680, fff8, CF_ISA_A);
5795     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5796     INSN(arith_im,  0c00, ff00, M68K);
5797     BASE(bitop_im,  0800, ffc0);
5798     BASE(bitop_im,  0840, ffc0);
5799     BASE(bitop_im,  0880, ffc0);
5800     BASE(bitop_im,  08c0, ffc0);
5801     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5802     INSN(arith_im,  0a00, ff00, M68K);
5803 #if !defined(CONFIG_USER_ONLY)
5804     INSN(moves,     0e00, ff00, M68K);
5805 #endif
5806     INSN(cas,       0ac0, ffc0, CAS);
5807     INSN(cas,       0cc0, ffc0, CAS);
5808     INSN(cas,       0ec0, ffc0, CAS);
5809     INSN(cas2w,     0cfc, ffff, CAS);
5810     INSN(cas2l,     0efc, ffff, CAS);
5811     BASE(move,      1000, f000);
5812     BASE(move,      2000, f000);
5813     BASE(move,      3000, f000);
5814     INSN(chk,       4000, f040, M68K);
5815     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5816     INSN(negx,      4080, fff8, CF_ISA_A);
5817     INSN(negx,      4000, ff00, M68K);
5818     INSN(undef,     40c0, ffc0, M68K);
5819     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5820     INSN(move_from_sr, 40c0, ffc0, M68K);
5821     BASE(lea,       41c0, f1c0);
5822     BASE(clr,       4200, ff00);
5823     BASE(undef,     42c0, ffc0);
5824     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5825     INSN(move_from_ccr, 42c0, ffc0, M68K);
5826     INSN(neg,       4480, fff8, CF_ISA_A);
5827     INSN(neg,       4400, ff00, M68K);
5828     INSN(undef,     44c0, ffc0, M68K);
5829     BASE(move_to_ccr, 44c0, ffc0);
5830     INSN(not,       4680, fff8, CF_ISA_A);
5831     INSN(not,       4600, ff00, M68K);
5832 #if !defined(CONFIG_USER_ONLY)
5833     BASE(move_to_sr, 46c0, ffc0);
5834 #endif
5835     INSN(nbcd,      4800, ffc0, M68K);
5836     INSN(linkl,     4808, fff8, M68K);
5837     BASE(pea,       4840, ffc0);
5838     BASE(swap,      4840, fff8);
5839     INSN(bkpt,      4848, fff8, BKPT);
5840     INSN(movem,     48d0, fbf8, CF_ISA_A);
5841     INSN(movem,     48e8, fbf8, CF_ISA_A);
5842     INSN(movem,     4880, fb80, M68K);
5843     BASE(ext,       4880, fff8);
5844     BASE(ext,       48c0, fff8);
5845     BASE(ext,       49c0, fff8);
5846     BASE(tst,       4a00, ff00);
5847     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5848     INSN(tas,       4ac0, ffc0, M68K);
5849 #if !defined(CONFIG_USER_ONLY)
5850     INSN(halt,      4ac8, ffff, CF_ISA_A);
5851     INSN(halt,      4ac8, ffff, M68K);
5852 #endif
5853     INSN(pulse,     4acc, ffff, CF_ISA_A);
5854     BASE(illegal,   4afc, ffff);
5855     INSN(mull,      4c00, ffc0, CF_ISA_A);
5856     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5857     INSN(divl,      4c40, ffc0, CF_ISA_A);
5858     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5859     INSN(sats,      4c80, fff8, CF_ISA_B);
5860     BASE(trap,      4e40, fff0);
5861     BASE(link,      4e50, fff8);
5862     BASE(unlk,      4e58, fff8);
5863 #if !defined(CONFIG_USER_ONLY)
5864     INSN(move_to_usp, 4e60, fff8, USP);
5865     INSN(move_from_usp, 4e68, fff8, USP);
5866     INSN(reset,     4e70, ffff, M68K);
5867     BASE(stop,      4e72, ffff);
5868     BASE(rte,       4e73, ffff);
5869     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5870     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5871 #endif
5872     BASE(nop,       4e71, ffff);
5873     INSN(rtd,       4e74, ffff, RTD);
5874     BASE(rts,       4e75, ffff);
5875     INSN(trapv,     4e76, ffff, M68K);
5876     INSN(rtr,       4e77, ffff, M68K);
5877     BASE(jump,      4e80, ffc0);
5878     BASE(jump,      4ec0, ffc0);
5879     INSN(addsubq,   5000, f080, M68K);
5880     BASE(addsubq,   5080, f0c0);
5881     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5882     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5883     INSN(dbcc,      50c8, f0f8, M68K);
5884     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5885     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5886     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5887     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5888 
5889     /* Branch instructions.  */
5890     BASE(branch,    6000, f000);
5891     /* Disable long branch instructions, then add back the ones we want.  */
5892     BASE(undef,     60ff, f0ff); /* All long branches.  */
5893     INSN(branch,    60ff, f0ff, CF_ISA_B);
5894     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5895     INSN(branch,    60ff, ffff, BRAL);
5896     INSN(branch,    60ff, f0ff, BCCL);
5897 
5898     BASE(moveq,     7000, f100);
5899     INSN(mvzs,      7100, f100, CF_ISA_B);
5900     BASE(or,        8000, f000);
5901     BASE(divw,      80c0, f0c0);
5902     INSN(sbcd_reg,  8100, f1f8, M68K);
5903     INSN(sbcd_mem,  8108, f1f8, M68K);
5904     BASE(addsub,    9000, f000);
5905     INSN(undef,     90c0, f0c0, CF_ISA_A);
5906     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5907     INSN(subx_reg,  9100, f138, M68K);
5908     INSN(subx_mem,  9108, f138, M68K);
5909     INSN(suba,      91c0, f1c0, CF_ISA_A);
5910     INSN(suba,      90c0, f0c0, M68K);
5911 
5912     BASE(undef_mac, a000, f000);
5913     INSN(mac,       a000, f100, CF_EMAC);
5914     INSN(from_mac,  a180, f9b0, CF_EMAC);
5915     INSN(move_mac,  a110, f9fc, CF_EMAC);
5916     INSN(from_macsr,a980, f9f0, CF_EMAC);
5917     INSN(from_mask, ad80, fff0, CF_EMAC);
5918     INSN(from_mext, ab80, fbf0, CF_EMAC);
5919     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5920     INSN(to_mac,    a100, f9c0, CF_EMAC);
5921     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5922     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5923     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5924 
5925     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5926     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5927     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5928     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5929     INSN(cmp,       b080, f1c0, CF_ISA_A);
5930     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5931     INSN(cmp,       b000, f100, M68K);
5932     INSN(eor,       b100, f100, M68K);
5933     INSN(cmpm,      b108, f138, M68K);
5934     INSN(cmpa,      b0c0, f0c0, M68K);
5935     INSN(eor,       b180, f1c0, CF_ISA_A);
5936     BASE(and,       c000, f000);
5937     INSN(exg_dd,    c140, f1f8, M68K);
5938     INSN(exg_aa,    c148, f1f8, M68K);
5939     INSN(exg_da,    c188, f1f8, M68K);
5940     BASE(mulw,      c0c0, f0c0);
5941     INSN(abcd_reg,  c100, f1f8, M68K);
5942     INSN(abcd_mem,  c108, f1f8, M68K);
5943     BASE(addsub,    d000, f000);
5944     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5945     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5946     INSN(addx_reg,  d100, f138, M68K);
5947     INSN(addx_mem,  d108, f138, M68K);
5948     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5949     INSN(adda,      d0c0, f0c0, M68K);
5950     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5951     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5952     INSN(shift8_im, e000, f0f0, M68K);
5953     INSN(shift16_im, e040, f0f0, M68K);
5954     INSN(shift_im,  e080, f0f0, M68K);
5955     INSN(shift8_reg, e020, f0f0, M68K);
5956     INSN(shift16_reg, e060, f0f0, M68K);
5957     INSN(shift_reg, e0a0, f0f0, M68K);
5958     INSN(shift_mem, e0c0, fcc0, M68K);
5959     INSN(rotate_im, e090, f0f0, M68K);
5960     INSN(rotate8_im, e010, f0f0, M68K);
5961     INSN(rotate16_im, e050, f0f0, M68K);
5962     INSN(rotate_reg, e0b0, f0f0, M68K);
5963     INSN(rotate8_reg, e030, f0f0, M68K);
5964     INSN(rotate16_reg, e070, f0f0, M68K);
5965     INSN(rotate_mem, e4c0, fcc0, M68K);
5966     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5967     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5968     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5969     INSN(bfins_reg, efc0, fff8, BITFIELD);
5970     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5971     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5972     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5973     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5974     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5975     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5976     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5977     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5978     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5979     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5980     BASE(undef_fpu, f000, f000);
5981     INSN(fpu,       f200, ffc0, CF_FPU);
5982     INSN(fbcc,      f280, ffc0, CF_FPU);
5983     INSN(fpu,       f200, ffc0, FPU);
5984     INSN(fscc,      f240, ffc0, FPU);
5985     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5986     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5987     INSN(fbcc,      f280, ff80, FPU);
5988 #if !defined(CONFIG_USER_ONLY)
5989     INSN(frestore,  f340, ffc0, CF_FPU);
5990     INSN(fsave,     f300, ffc0, CF_FPU);
5991     INSN(frestore,  f340, ffc0, FPU);
5992     INSN(fsave,     f300, ffc0, FPU);
5993     INSN(intouch,   f340, ffc0, CF_ISA_A);
5994     INSN(cpushl,    f428, ff38, CF_ISA_A);
5995     INSN(cpush,     f420, ff20, M68040);
5996     INSN(cinv,      f400, ff20, M68040);
5997     INSN(pflush,    f500, ffe0, M68040);
5998     INSN(ptest,     f548, ffd8, M68040);
5999     INSN(wddata,    fb00, ff00, CF_ISA_A);
6000     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6001 #endif
6002     INSN(move16_mem, f600, ffe0, M68040);
6003     INSN(move16_reg, f620, fff8, M68040);
6004 #undef INSN
6005 }
6006 
6007 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6008 {
6009     DisasContext *dc = container_of(dcbase, DisasContext, base);
6010     CPUM68KState *env = cpu_env(cpu);
6011 
6012     dc->env = env;
6013     dc->pc = dc->base.pc_first;
6014     /* This value will always be filled in properly before m68k_tr_tb_stop. */
6015     dc->pc_prev = 0xdeadbeef;
6016     dc->cc_op = CC_OP_DYNAMIC;
6017     dc->cc_op_synced = 1;
6018     dc->done_mac = 0;
6019     dc->writeback_mask = 0;
6020 
6021     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6022     /* If architectural single step active, limit to 1 */
6023     if (dc->ss_active) {
6024         dc->base.max_insns = 1;
6025     }
6026 }
6027 
6028 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6029 {
6030 }
6031 
6032 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6033 {
6034     DisasContext *dc = container_of(dcbase, DisasContext, base);
6035     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6036 }
6037 
6038 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6039 {
6040     DisasContext *dc = container_of(dcbase, DisasContext, base);
6041     CPUM68KState *env = cpu_env(cpu);
6042     uint16_t insn = read_im16(env, dc);
6043 
6044     opcode_table[insn](env, dc, insn);
6045     do_writebacks(dc);
6046 
6047     dc->pc_prev = dc->base.pc_next;
6048     dc->base.pc_next = dc->pc;
6049 
6050     if (dc->base.is_jmp == DISAS_NEXT) {
6051         /*
6052          * Stop translation when the next insn might touch a new page.
6053          * This ensures that prefetch aborts at the right place.
6054          *
6055          * We cannot determine the size of the next insn without
6056          * completely decoding it.  However, the maximum insn size
6057          * is 32 bytes, so end if we do not have that much remaining.
6058          * This may produce several small TBs at the end of each page,
6059          * but they will all be linked with goto_tb.
6060          *
6061          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6062          * smaller than MC68020's.
6063          */
6064         target_ulong start_page_offset
6065             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6066 
6067         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6068             dc->base.is_jmp = DISAS_TOO_MANY;
6069         }
6070     }
6071 }
6072 
6073 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6074 {
6075     DisasContext *dc = container_of(dcbase, DisasContext, base);
6076 
6077     switch (dc->base.is_jmp) {
6078     case DISAS_NORETURN:
6079         break;
6080     case DISAS_TOO_MANY:
6081         update_cc_op(dc);
6082         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6083         break;
6084     case DISAS_JUMP:
6085         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6086         if (dc->ss_active) {
6087             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6088         } else {
6089             tcg_gen_lookup_and_goto_ptr();
6090         }
6091         break;
6092     case DISAS_EXIT:
6093         /*
6094          * We updated CC_OP and PC in gen_exit_tb, but also modified
6095          * other state that may require returning to the main loop.
6096          */
6097         if (dc->ss_active) {
6098             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6099         } else {
6100             tcg_gen_exit_tb(NULL, 0);
6101         }
6102         break;
6103     default:
6104         g_assert_not_reached();
6105     }
6106 }
6107 
6108 static void m68k_tr_disas_log(const DisasContextBase *dcbase,
6109                               CPUState *cpu, FILE *logfile)
6110 {
6111     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
6112     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
6113 }
6114 
6115 static const TranslatorOps m68k_tr_ops = {
6116     .init_disas_context = m68k_tr_init_disas_context,
6117     .tb_start           = m68k_tr_tb_start,
6118     .insn_start         = m68k_tr_insn_start,
6119     .translate_insn     = m68k_tr_translate_insn,
6120     .tb_stop            = m68k_tr_tb_stop,
6121     .disas_log          = m68k_tr_disas_log,
6122 };
6123 
6124 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
6125                            vaddr pc, void *host_pc)
6126 {
6127     DisasContext dc;
6128     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6129 }
6130 
6131 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6132 {
6133     floatx80 a = { .high = high, .low = low };
6134     union {
6135         float64 f64;
6136         double d;
6137     } u;
6138 
6139     u.f64 = floatx80_to_float64(a, &env->fp_status);
6140     return u.d;
6141 }
6142 
6143 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6144 {
6145     CPUM68KState *env = cpu_env(cs);
6146     int i;
6147     uint16_t sr;
6148     for (i = 0; i < 8; i++) {
6149         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6150                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6151                      i, env->dregs[i], i, env->aregs[i],
6152                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6153                      floatx80_to_double(env, env->fregs[i].l.upper,
6154                                         env->fregs[i].l.lower));
6155     }
6156     qemu_fprintf(f, "PC = %08x   ", env->pc);
6157     sr = env->sr | cpu_m68k_get_ccr(env);
6158     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6159                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6160                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6161                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6162                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6163                  (sr & CCF_C) ? 'C' : '-');
6164     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6165                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6166                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6167                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6168                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6169     qemu_fprintf(f, "\n                                "
6170                  "FPCR =     %04x ", env->fpcr);
6171     switch (env->fpcr & FPCR_PREC_MASK) {
6172     case FPCR_PREC_X:
6173         qemu_fprintf(f, "X ");
6174         break;
6175     case FPCR_PREC_S:
6176         qemu_fprintf(f, "S ");
6177         break;
6178     case FPCR_PREC_D:
6179         qemu_fprintf(f, "D ");
6180         break;
6181     }
6182     switch (env->fpcr & FPCR_RND_MASK) {
6183     case FPCR_RND_N:
6184         qemu_fprintf(f, "RN ");
6185         break;
6186     case FPCR_RND_Z:
6187         qemu_fprintf(f, "RZ ");
6188         break;
6189     case FPCR_RND_M:
6190         qemu_fprintf(f, "RM ");
6191         break;
6192     case FPCR_RND_P:
6193         qemu_fprintf(f, "RP ");
6194         break;
6195     }
6196     qemu_fprintf(f, "\n");
6197 #ifndef CONFIG_USER_ONLY
6198     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6199                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6200                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6201                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6202     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6203     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6204     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6205                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6206     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6207                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6208                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6209     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6210                  env->mmu.mmusr, env->mmu.ar);
6211 #endif /* !CONFIG_USER_ONLY */
6212 }
6213