xref: /openbmc/qemu/target/hppa/translate.c (revision c5a49c63)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 typedef struct DisasCond {
34     TCGCond c;
35     TCGv a0, a1;
36     bool a0_is_n;
37     bool a1_is_0;
38 } DisasCond;
39 
40 typedef struct DisasContext {
41     DisasContextBase base;
42     CPUState *cs;
43 
44     target_ulong iaoq_f;
45     target_ulong iaoq_b;
46     target_ulong iaoq_n;
47     TCGv iaoq_n_var;
48 
49     int ntemps;
50     TCGv temps[8];
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     bool psw_n_nonzero;
56 } DisasContext;
57 
58 /* Target-specific return values from translate_one, indicating the
59    state of the TB.  Note that DISAS_NEXT indicates that we are not
60    exiting the TB.  */
61 
62 /* We are not using a goto_tb (for whatever reason), but have updated
63    the iaq (for whatever reason), so don't do it again on exit.  */
64 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
65 
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67    updated the iaq for the next instruction to be executed.  */
68 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
69 
70 typedef struct DisasInsn {
71     uint32_t insn, mask;
72     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73                            const struct DisasInsn *f);
74     union {
75         void (*ttt)(TCGv, TCGv, TCGv);
76         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
82     } f;
83 } DisasInsn;
84 
85 /* global register indexes */
86 static TCGv_env cpu_env;
87 static TCGv cpu_gr[32];
88 static TCGv cpu_iaoq_f;
89 static TCGv cpu_iaoq_b;
90 static TCGv cpu_sar;
91 static TCGv cpu_psw_n;
92 static TCGv cpu_psw_v;
93 static TCGv cpu_psw_cb;
94 static TCGv cpu_psw_cb_msb;
95 static TCGv cpu_cr26;
96 static TCGv cpu_cr27;
97 
98 #include "exec/gen-icount.h"
99 
100 void hppa_translate_init(void)
101 {
102 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
103 
104     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
105     static const GlobalVar vars[] = {
106         DEF_VAR(sar),
107         DEF_VAR(cr26),
108         DEF_VAR(cr27),
109         DEF_VAR(psw_n),
110         DEF_VAR(psw_v),
111         DEF_VAR(psw_cb),
112         DEF_VAR(psw_cb_msb),
113         DEF_VAR(iaoq_f),
114         DEF_VAR(iaoq_b),
115     };
116 
117 #undef DEF_VAR
118 
119     /* Use the symbolic register names that match the disassembler.  */
120     static const char gr_names[32][4] = {
121         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
122         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
123         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
124         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
125     };
126 
127     int i;
128 
129     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
130     tcg_ctx.tcg_env = cpu_env;
131 
132     TCGV_UNUSED(cpu_gr[0]);
133     for (i = 1; i < 32; i++) {
134         cpu_gr[i] = tcg_global_mem_new(cpu_env,
135                                        offsetof(CPUHPPAState, gr[i]),
136                                        gr_names[i]);
137     }
138 
139     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
140         const GlobalVar *v = &vars[i];
141         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
142     }
143 }
144 
145 static DisasCond cond_make_f(void)
146 {
147     DisasCond r = { .c = TCG_COND_NEVER };
148     TCGV_UNUSED(r.a0);
149     TCGV_UNUSED(r.a1);
150     return r;
151 }
152 
153 static DisasCond cond_make_n(void)
154 {
155     DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
156     r.a0 = cpu_psw_n;
157     TCGV_UNUSED(r.a1);
158     return r;
159 }
160 
161 static DisasCond cond_make_0(TCGCond c, TCGv a0)
162 {
163     DisasCond r = { .c = c, .a1_is_0 = true };
164 
165     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
166     r.a0 = tcg_temp_new();
167     tcg_gen_mov_tl(r.a0, a0);
168     TCGV_UNUSED(r.a1);
169 
170     return r;
171 }
172 
173 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
174 {
175     DisasCond r = { .c = c };
176 
177     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
178     r.a0 = tcg_temp_new();
179     tcg_gen_mov_tl(r.a0, a0);
180     r.a1 = tcg_temp_new();
181     tcg_gen_mov_tl(r.a1, a1);
182 
183     return r;
184 }
185 
186 static void cond_prep(DisasCond *cond)
187 {
188     if (cond->a1_is_0) {
189         cond->a1_is_0 = false;
190         cond->a1 = tcg_const_tl(0);
191     }
192 }
193 
194 static void cond_free(DisasCond *cond)
195 {
196     switch (cond->c) {
197     default:
198         if (!cond->a0_is_n) {
199             tcg_temp_free(cond->a0);
200         }
201         if (!cond->a1_is_0) {
202             tcg_temp_free(cond->a1);
203         }
204         cond->a0_is_n = false;
205         cond->a1_is_0 = false;
206         TCGV_UNUSED(cond->a0);
207         TCGV_UNUSED(cond->a1);
208         /* fallthru */
209     case TCG_COND_ALWAYS:
210         cond->c = TCG_COND_NEVER;
211         break;
212     case TCG_COND_NEVER:
213         break;
214     }
215 }
216 
217 static TCGv get_temp(DisasContext *ctx)
218 {
219     unsigned i = ctx->ntemps++;
220     g_assert(i < ARRAY_SIZE(ctx->temps));
221     return ctx->temps[i] = tcg_temp_new();
222 }
223 
224 static TCGv load_const(DisasContext *ctx, target_long v)
225 {
226     TCGv t = get_temp(ctx);
227     tcg_gen_movi_tl(t, v);
228     return t;
229 }
230 
231 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
232 {
233     if (reg == 0) {
234         TCGv t = get_temp(ctx);
235         tcg_gen_movi_tl(t, 0);
236         return t;
237     } else {
238         return cpu_gr[reg];
239     }
240 }
241 
242 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
243 {
244     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
245         return get_temp(ctx);
246     } else {
247         return cpu_gr[reg];
248     }
249 }
250 
251 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
252 {
253     if (ctx->null_cond.c != TCG_COND_NEVER) {
254         cond_prep(&ctx->null_cond);
255         tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
256                            ctx->null_cond.a1, dest, t);
257     } else {
258         tcg_gen_mov_tl(dest, t);
259     }
260 }
261 
262 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
263 {
264     if (reg != 0) {
265         save_or_nullify(ctx, cpu_gr[reg], t);
266     }
267 }
268 
269 #ifdef HOST_WORDS_BIGENDIAN
270 # define HI_OFS  0
271 # define LO_OFS  4
272 #else
273 # define HI_OFS  4
274 # define LO_OFS  0
275 #endif
276 
277 static TCGv_i32 load_frw_i32(unsigned rt)
278 {
279     TCGv_i32 ret = tcg_temp_new_i32();
280     tcg_gen_ld_i32(ret, cpu_env,
281                    offsetof(CPUHPPAState, fr[rt & 31])
282                    + (rt & 32 ? LO_OFS : HI_OFS));
283     return ret;
284 }
285 
286 static TCGv_i32 load_frw0_i32(unsigned rt)
287 {
288     if (rt == 0) {
289         return tcg_const_i32(0);
290     } else {
291         return load_frw_i32(rt);
292     }
293 }
294 
295 static TCGv_i64 load_frw0_i64(unsigned rt)
296 {
297     if (rt == 0) {
298         return tcg_const_i64(0);
299     } else {
300         TCGv_i64 ret = tcg_temp_new_i64();
301         tcg_gen_ld32u_i64(ret, cpu_env,
302                           offsetof(CPUHPPAState, fr[rt & 31])
303                           + (rt & 32 ? LO_OFS : HI_OFS));
304         return ret;
305     }
306 }
307 
308 static void save_frw_i32(unsigned rt, TCGv_i32 val)
309 {
310     tcg_gen_st_i32(val, cpu_env,
311                    offsetof(CPUHPPAState, fr[rt & 31])
312                    + (rt & 32 ? LO_OFS : HI_OFS));
313 }
314 
315 #undef HI_OFS
316 #undef LO_OFS
317 
318 static TCGv_i64 load_frd(unsigned rt)
319 {
320     TCGv_i64 ret = tcg_temp_new_i64();
321     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
322     return ret;
323 }
324 
325 static TCGv_i64 load_frd0(unsigned rt)
326 {
327     if (rt == 0) {
328         return tcg_const_i64(0);
329     } else {
330         return load_frd(rt);
331     }
332 }
333 
334 static void save_frd(unsigned rt, TCGv_i64 val)
335 {
336     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
337 }
338 
339 /* Skip over the implementation of an insn that has been nullified.
340    Use this when the insn is too complex for a conditional move.  */
341 static void nullify_over(DisasContext *ctx)
342 {
343     if (ctx->null_cond.c != TCG_COND_NEVER) {
344         /* The always condition should have been handled in the main loop.  */
345         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
346 
347         ctx->null_lab = gen_new_label();
348         cond_prep(&ctx->null_cond);
349 
350         /* If we're using PSW[N], copy it to a temp because... */
351         if (ctx->null_cond.a0_is_n) {
352             ctx->null_cond.a0_is_n = false;
353             ctx->null_cond.a0 = tcg_temp_new();
354             tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
355         }
356         /* ... we clear it before branching over the implementation,
357            so that (1) it's clear after nullifying this insn and
358            (2) if this insn nullifies the next, PSW[N] is valid.  */
359         if (ctx->psw_n_nonzero) {
360             ctx->psw_n_nonzero = false;
361             tcg_gen_movi_tl(cpu_psw_n, 0);
362         }
363 
364         tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
365                           ctx->null_cond.a1, ctx->null_lab);
366         cond_free(&ctx->null_cond);
367     }
368 }
369 
370 /* Save the current nullification state to PSW[N].  */
371 static void nullify_save(DisasContext *ctx)
372 {
373     if (ctx->null_cond.c == TCG_COND_NEVER) {
374         if (ctx->psw_n_nonzero) {
375             tcg_gen_movi_tl(cpu_psw_n, 0);
376         }
377         return;
378     }
379     if (!ctx->null_cond.a0_is_n) {
380         cond_prep(&ctx->null_cond);
381         tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
382                            ctx->null_cond.a0, ctx->null_cond.a1);
383         ctx->psw_n_nonzero = true;
384     }
385     cond_free(&ctx->null_cond);
386 }
387 
388 /* Set a PSW[N] to X.  The intention is that this is used immediately
389    before a goto_tb/exit_tb, so that there is no fallthru path to other
390    code within the TB.  Therefore we do not update psw_n_nonzero.  */
391 static void nullify_set(DisasContext *ctx, bool x)
392 {
393     if (ctx->psw_n_nonzero || x) {
394         tcg_gen_movi_tl(cpu_psw_n, x);
395     }
396 }
397 
398 /* Mark the end of an instruction that may have been nullified.
399    This is the pair to nullify_over.  */
400 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
401 {
402     TCGLabel *null_lab = ctx->null_lab;
403 
404     if (likely(null_lab == NULL)) {
405         /* The current insn wasn't conditional or handled the condition
406            applied to it without a branch, so the (new) setting of
407            NULL_COND can be applied directly to the next insn.  */
408         return status;
409     }
410     ctx->null_lab = NULL;
411 
412     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
413         /* The next instruction will be unconditional,
414            and NULL_COND already reflects that.  */
415         gen_set_label(null_lab);
416     } else {
417         /* The insn that we just executed is itself nullifying the next
418            instruction.  Store the condition in the PSW[N] global.
419            We asserted PSW[N] = 0 in nullify_over, so that after the
420            label we have the proper value in place.  */
421         nullify_save(ctx);
422         gen_set_label(null_lab);
423         ctx->null_cond = cond_make_n();
424     }
425 
426     assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
427     if (status == DISAS_NORETURN) {
428         status = DISAS_NEXT;
429     }
430     return status;
431 }
432 
433 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
434 {
435     if (unlikely(ival == -1)) {
436         tcg_gen_mov_tl(dest, vval);
437     } else {
438         tcg_gen_movi_tl(dest, ival);
439     }
440 }
441 
442 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
443 {
444     return ctx->iaoq_f + disp + 8;
445 }
446 
447 static void gen_excp_1(int exception)
448 {
449     TCGv_i32 t = tcg_const_i32(exception);
450     gen_helper_excp(cpu_env, t);
451     tcg_temp_free_i32(t);
452 }
453 
454 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
455 {
456     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
457     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
458     nullify_save(ctx);
459     gen_excp_1(exception);
460     return DISAS_NORETURN;
461 }
462 
463 static DisasJumpType gen_illegal(DisasContext *ctx)
464 {
465     nullify_over(ctx);
466     return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
467 }
468 
469 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
470 {
471     /* Suppress goto_tb in the case of single-steping and IO.  */
472     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
473         return false;
474     }
475     return true;
476 }
477 
478 /* If the next insn is to be nullified, and it's on the same page,
479    and we're not attempting to set a breakpoint on it, then we can
480    totally skip the nullified insn.  This avoids creating and
481    executing a TB that merely branches to the next TB.  */
482 static bool use_nullify_skip(DisasContext *ctx)
483 {
484     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
485             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
486 }
487 
488 static void gen_goto_tb(DisasContext *ctx, int which,
489                         target_ulong f, target_ulong b)
490 {
491     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
492         tcg_gen_goto_tb(which);
493         tcg_gen_movi_tl(cpu_iaoq_f, f);
494         tcg_gen_movi_tl(cpu_iaoq_b, b);
495         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
496     } else {
497         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
498         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
499         if (ctx->base.singlestep_enabled) {
500             gen_excp_1(EXCP_DEBUG);
501         } else {
502             tcg_gen_lookup_and_goto_ptr();
503         }
504     }
505 }
506 
507 /* PA has a habit of taking the LSB of a field and using that as the sign,
508    with the rest of the field becoming the least significant bits.  */
509 static target_long low_sextract(uint32_t val, int pos, int len)
510 {
511     target_ulong x = -(target_ulong)extract32(val, pos, 1);
512     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
513     return x;
514 }
515 
516 static unsigned assemble_rt64(uint32_t insn)
517 {
518     unsigned r1 = extract32(insn, 6, 1);
519     unsigned r0 = extract32(insn, 0, 5);
520     return r1 * 32 + r0;
521 }
522 
523 static unsigned assemble_ra64(uint32_t insn)
524 {
525     unsigned r1 = extract32(insn, 7, 1);
526     unsigned r0 = extract32(insn, 21, 5);
527     return r1 * 32 + r0;
528 }
529 
530 static unsigned assemble_rb64(uint32_t insn)
531 {
532     unsigned r1 = extract32(insn, 12, 1);
533     unsigned r0 = extract32(insn, 16, 5);
534     return r1 * 32 + r0;
535 }
536 
537 static unsigned assemble_rc64(uint32_t insn)
538 {
539     unsigned r2 = extract32(insn, 8, 1);
540     unsigned r1 = extract32(insn, 13, 3);
541     unsigned r0 = extract32(insn, 9, 2);
542     return r2 * 32 + r1 * 4 + r0;
543 }
544 
545 static target_long assemble_12(uint32_t insn)
546 {
547     target_ulong x = -(target_ulong)(insn & 1);
548     x = (x <<  1) | extract32(insn, 2, 1);
549     x = (x << 10) | extract32(insn, 3, 10);
550     return x;
551 }
552 
553 static target_long assemble_16(uint32_t insn)
554 {
555     /* Take the name from PA2.0, which produces a 16-bit number
556        only with wide mode; otherwise a 14-bit number.  Since we don't
557        implement wide mode, this is always the 14-bit number.  */
558     return low_sextract(insn, 0, 14);
559 }
560 
561 static target_long assemble_16a(uint32_t insn)
562 {
563     /* Take the name from PA2.0, which produces a 14-bit shifted number
564        only with wide mode; otherwise a 12-bit shifted number.  Since we
565        don't implement wide mode, this is always the 12-bit number.  */
566     target_ulong x = -(target_ulong)(insn & 1);
567     x = (x << 11) | extract32(insn, 2, 11);
568     return x << 2;
569 }
570 
571 static target_long assemble_17(uint32_t insn)
572 {
573     target_ulong x = -(target_ulong)(insn & 1);
574     x = (x <<  5) | extract32(insn, 16, 5);
575     x = (x <<  1) | extract32(insn, 2, 1);
576     x = (x << 10) | extract32(insn, 3, 10);
577     return x << 2;
578 }
579 
580 static target_long assemble_21(uint32_t insn)
581 {
582     target_ulong x = -(target_ulong)(insn & 1);
583     x = (x << 11) | extract32(insn, 1, 11);
584     x = (x <<  2) | extract32(insn, 14, 2);
585     x = (x <<  5) | extract32(insn, 16, 5);
586     x = (x <<  2) | extract32(insn, 12, 2);
587     return x << 11;
588 }
589 
590 static target_long assemble_22(uint32_t insn)
591 {
592     target_ulong x = -(target_ulong)(insn & 1);
593     x = (x << 10) | extract32(insn, 16, 10);
594     x = (x <<  1) | extract32(insn, 2, 1);
595     x = (x << 10) | extract32(insn, 3, 10);
596     return x << 2;
597 }
598 
599 /* The parisc documentation describes only the general interpretation of
600    the conditions, without describing their exact implementation.  The
601    interpretations do not stand up well when considering ADD,C and SUB,B.
602    However, considering the Addition, Subtraction and Logical conditions
603    as a whole it would appear that these relations are similar to what
604    a traditional NZCV set of flags would produce.  */
605 
606 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
607 {
608     DisasCond cond;
609     TCGv tmp;
610 
611     switch (cf >> 1) {
612     case 0: /* Never / TR */
613         cond = cond_make_f();
614         break;
615     case 1: /* = / <>        (Z / !Z) */
616         cond = cond_make_0(TCG_COND_EQ, res);
617         break;
618     case 2: /* < / >=        (N / !N) */
619         cond = cond_make_0(TCG_COND_LT, res);
620         break;
621     case 3: /* <= / >        (N | Z / !N & !Z) */
622         cond = cond_make_0(TCG_COND_LE, res);
623         break;
624     case 4: /* NUV / UV      (!C / C) */
625         cond = cond_make_0(TCG_COND_EQ, cb_msb);
626         break;
627     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
628         tmp = tcg_temp_new();
629         tcg_gen_neg_tl(tmp, cb_msb);
630         tcg_gen_and_tl(tmp, tmp, res);
631         cond = cond_make_0(TCG_COND_EQ, tmp);
632         tcg_temp_free(tmp);
633         break;
634     case 6: /* SV / NSV      (V / !V) */
635         cond = cond_make_0(TCG_COND_LT, sv);
636         break;
637     case 7: /* OD / EV */
638         tmp = tcg_temp_new();
639         tcg_gen_andi_tl(tmp, res, 1);
640         cond = cond_make_0(TCG_COND_NE, tmp);
641         tcg_temp_free(tmp);
642         break;
643     default:
644         g_assert_not_reached();
645     }
646     if (cf & 1) {
647         cond.c = tcg_invert_cond(cond.c);
648     }
649 
650     return cond;
651 }
652 
653 /* Similar, but for the special case of subtraction without borrow, we
654    can use the inputs directly.  This can allow other computation to be
655    deleted as unused.  */
656 
657 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
658 {
659     DisasCond cond;
660 
661     switch (cf >> 1) {
662     case 1: /* = / <> */
663         cond = cond_make(TCG_COND_EQ, in1, in2);
664         break;
665     case 2: /* < / >= */
666         cond = cond_make(TCG_COND_LT, in1, in2);
667         break;
668     case 3: /* <= / > */
669         cond = cond_make(TCG_COND_LE, in1, in2);
670         break;
671     case 4: /* << / >>= */
672         cond = cond_make(TCG_COND_LTU, in1, in2);
673         break;
674     case 5: /* <<= / >> */
675         cond = cond_make(TCG_COND_LEU, in1, in2);
676         break;
677     default:
678         return do_cond(cf, res, sv, sv);
679     }
680     if (cf & 1) {
681         cond.c = tcg_invert_cond(cond.c);
682     }
683 
684     return cond;
685 }
686 
687 /* Similar, but for logicals, where the carry and overflow bits are not
688    computed, and use of them is undefined.  */
689 
690 static DisasCond do_log_cond(unsigned cf, TCGv res)
691 {
692     switch (cf >> 1) {
693     case 4: case 5: case 6:
694         cf &= 1;
695         break;
696     }
697     return do_cond(cf, res, res, res);
698 }
699 
700 /* Similar, but for shift/extract/deposit conditions.  */
701 
702 static DisasCond do_sed_cond(unsigned orig, TCGv res)
703 {
704     unsigned c, f;
705 
706     /* Convert the compressed condition codes to standard.
707        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
708        4-7 are the reverse of 0-3.  */
709     c = orig & 3;
710     if (c == 3) {
711         c = 7;
712     }
713     f = (orig & 4) / 4;
714 
715     return do_log_cond(c * 2 + f, res);
716 }
717 
718 /* Similar, but for unit conditions.  */
719 
720 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
721 {
722     DisasCond cond;
723     TCGv tmp, cb;
724 
725     TCGV_UNUSED(cb);
726     if (cf & 8) {
727         /* Since we want to test lots of carry-out bits all at once, do not
728          * do our normal thing and compute carry-in of bit B+1 since that
729          * leaves us with carry bits spread across two words.
730          */
731         cb = tcg_temp_new();
732         tmp = tcg_temp_new();
733         tcg_gen_or_tl(cb, in1, in2);
734         tcg_gen_and_tl(tmp, in1, in2);
735         tcg_gen_andc_tl(cb, cb, res);
736         tcg_gen_or_tl(cb, cb, tmp);
737         tcg_temp_free(tmp);
738     }
739 
740     switch (cf >> 1) {
741     case 0: /* never / TR */
742     case 1: /* undefined */
743     case 5: /* undefined */
744         cond = cond_make_f();
745         break;
746 
747     case 2: /* SBZ / NBZ */
748         /* See hasless(v,1) from
749          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
750          */
751         tmp = tcg_temp_new();
752         tcg_gen_subi_tl(tmp, res, 0x01010101u);
753         tcg_gen_andc_tl(tmp, tmp, res);
754         tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
755         cond = cond_make_0(TCG_COND_NE, tmp);
756         tcg_temp_free(tmp);
757         break;
758 
759     case 3: /* SHZ / NHZ */
760         tmp = tcg_temp_new();
761         tcg_gen_subi_tl(tmp, res, 0x00010001u);
762         tcg_gen_andc_tl(tmp, tmp, res);
763         tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
764         cond = cond_make_0(TCG_COND_NE, tmp);
765         tcg_temp_free(tmp);
766         break;
767 
768     case 4: /* SDC / NDC */
769         tcg_gen_andi_tl(cb, cb, 0x88888888u);
770         cond = cond_make_0(TCG_COND_NE, cb);
771         break;
772 
773     case 6: /* SBC / NBC */
774         tcg_gen_andi_tl(cb, cb, 0x80808080u);
775         cond = cond_make_0(TCG_COND_NE, cb);
776         break;
777 
778     case 7: /* SHC / NHC */
779         tcg_gen_andi_tl(cb, cb, 0x80008000u);
780         cond = cond_make_0(TCG_COND_NE, cb);
781         break;
782 
783     default:
784         g_assert_not_reached();
785     }
786     if (cf & 8) {
787         tcg_temp_free(cb);
788     }
789     if (cf & 1) {
790         cond.c = tcg_invert_cond(cond.c);
791     }
792 
793     return cond;
794 }
795 
796 /* Compute signed overflow for addition.  */
797 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
798 {
799     TCGv sv = get_temp(ctx);
800     TCGv tmp = tcg_temp_new();
801 
802     tcg_gen_xor_tl(sv, res, in1);
803     tcg_gen_xor_tl(tmp, in1, in2);
804     tcg_gen_andc_tl(sv, sv, tmp);
805     tcg_temp_free(tmp);
806 
807     return sv;
808 }
809 
810 /* Compute signed overflow for subtraction.  */
811 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
812 {
813     TCGv sv = get_temp(ctx);
814     TCGv tmp = tcg_temp_new();
815 
816     tcg_gen_xor_tl(sv, res, in1);
817     tcg_gen_xor_tl(tmp, in1, in2);
818     tcg_gen_and_tl(sv, sv, tmp);
819     tcg_temp_free(tmp);
820 
821     return sv;
822 }
823 
824 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
825                             unsigned shift, bool is_l, bool is_tsv, bool is_tc,
826                             bool is_c, unsigned cf)
827 {
828     TCGv dest, cb, cb_msb, sv, tmp;
829     unsigned c = cf >> 1;
830     DisasCond cond;
831 
832     dest = tcg_temp_new();
833     TCGV_UNUSED(cb);
834     TCGV_UNUSED(cb_msb);
835 
836     if (shift) {
837         tmp = get_temp(ctx);
838         tcg_gen_shli_tl(tmp, in1, shift);
839         in1 = tmp;
840     }
841 
842     if (!is_l || c == 4 || c == 5) {
843         TCGv zero = tcg_const_tl(0);
844         cb_msb = get_temp(ctx);
845         tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
846         if (is_c) {
847             tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
848         }
849         tcg_temp_free(zero);
850         if (!is_l) {
851             cb = get_temp(ctx);
852             tcg_gen_xor_tl(cb, in1, in2);
853             tcg_gen_xor_tl(cb, cb, dest);
854         }
855     } else {
856         tcg_gen_add_tl(dest, in1, in2);
857         if (is_c) {
858             tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
859         }
860     }
861 
862     /* Compute signed overflow if required.  */
863     TCGV_UNUSED(sv);
864     if (is_tsv || c == 6) {
865         sv = do_add_sv(ctx, dest, in1, in2);
866         if (is_tsv) {
867             /* ??? Need to include overflow from shift.  */
868             gen_helper_tsv(cpu_env, sv);
869         }
870     }
871 
872     /* Emit any conditional trap before any writeback.  */
873     cond = do_cond(cf, dest, cb_msb, sv);
874     if (is_tc) {
875         cond_prep(&cond);
876         tmp = tcg_temp_new();
877         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
878         gen_helper_tcond(cpu_env, tmp);
879         tcg_temp_free(tmp);
880     }
881 
882     /* Write back the result.  */
883     if (!is_l) {
884         save_or_nullify(ctx, cpu_psw_cb, cb);
885         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
886     }
887     save_gpr(ctx, rt, dest);
888     tcg_temp_free(dest);
889 
890     /* Install the new nullification.  */
891     cond_free(&ctx->null_cond);
892     ctx->null_cond = cond;
893     return DISAS_NEXT;
894 }
895 
896 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
897                             bool is_tsv, bool is_b, bool is_tc, unsigned cf)
898 {
899     TCGv dest, sv, cb, cb_msb, zero, tmp;
900     unsigned c = cf >> 1;
901     DisasCond cond;
902 
903     dest = tcg_temp_new();
904     cb = tcg_temp_new();
905     cb_msb = tcg_temp_new();
906 
907     zero = tcg_const_tl(0);
908     if (is_b) {
909         /* DEST,C = IN1 + ~IN2 + C.  */
910         tcg_gen_not_tl(cb, in2);
911         tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
912         tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
913         tcg_gen_xor_tl(cb, cb, in1);
914         tcg_gen_xor_tl(cb, cb, dest);
915     } else {
916         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
917            operations by seeding the high word with 1 and subtracting.  */
918         tcg_gen_movi_tl(cb_msb, 1);
919         tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
920         tcg_gen_eqv_tl(cb, in1, in2);
921         tcg_gen_xor_tl(cb, cb, dest);
922     }
923     tcg_temp_free(zero);
924 
925     /* Compute signed overflow if required.  */
926     TCGV_UNUSED(sv);
927     if (is_tsv || c == 6) {
928         sv = do_sub_sv(ctx, dest, in1, in2);
929         if (is_tsv) {
930             gen_helper_tsv(cpu_env, sv);
931         }
932     }
933 
934     /* Compute the condition.  We cannot use the special case for borrow.  */
935     if (!is_b) {
936         cond = do_sub_cond(cf, dest, in1, in2, sv);
937     } else {
938         cond = do_cond(cf, dest, cb_msb, sv);
939     }
940 
941     /* Emit any conditional trap before any writeback.  */
942     if (is_tc) {
943         cond_prep(&cond);
944         tmp = tcg_temp_new();
945         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
946         gen_helper_tcond(cpu_env, tmp);
947         tcg_temp_free(tmp);
948     }
949 
950     /* Write back the result.  */
951     save_or_nullify(ctx, cpu_psw_cb, cb);
952     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
953     save_gpr(ctx, rt, dest);
954     tcg_temp_free(dest);
955 
956     /* Install the new nullification.  */
957     cond_free(&ctx->null_cond);
958     ctx->null_cond = cond;
959     return DISAS_NEXT;
960 }
961 
962 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
963                                TCGv in2, unsigned cf)
964 {
965     TCGv dest, sv;
966     DisasCond cond;
967 
968     dest = tcg_temp_new();
969     tcg_gen_sub_tl(dest, in1, in2);
970 
971     /* Compute signed overflow if required.  */
972     TCGV_UNUSED(sv);
973     if ((cf >> 1) == 6) {
974         sv = do_sub_sv(ctx, dest, in1, in2);
975     }
976 
977     /* Form the condition for the compare.  */
978     cond = do_sub_cond(cf, dest, in1, in2, sv);
979 
980     /* Clear.  */
981     tcg_gen_movi_tl(dest, 0);
982     save_gpr(ctx, rt, dest);
983     tcg_temp_free(dest);
984 
985     /* Install the new nullification.  */
986     cond_free(&ctx->null_cond);
987     ctx->null_cond = cond;
988     return DISAS_NEXT;
989 }
990 
991 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
992                             unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
993 {
994     TCGv dest = dest_gpr(ctx, rt);
995 
996     /* Perform the operation, and writeback.  */
997     fn(dest, in1, in2);
998     save_gpr(ctx, rt, dest);
999 
1000     /* Install the new nullification.  */
1001     cond_free(&ctx->null_cond);
1002     if (cf) {
1003         ctx->null_cond = do_log_cond(cf, dest);
1004     }
1005     return DISAS_NEXT;
1006 }
1007 
1008 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1009                              TCGv in2, unsigned cf, bool is_tc,
1010                              void (*fn)(TCGv, TCGv, TCGv))
1011 {
1012     TCGv dest;
1013     DisasCond cond;
1014 
1015     if (cf == 0) {
1016         dest = dest_gpr(ctx, rt);
1017         fn(dest, in1, in2);
1018         save_gpr(ctx, rt, dest);
1019         cond_free(&ctx->null_cond);
1020     } else {
1021         dest = tcg_temp_new();
1022         fn(dest, in1, in2);
1023 
1024         cond = do_unit_cond(cf, dest, in1, in2);
1025 
1026         if (is_tc) {
1027             TCGv tmp = tcg_temp_new();
1028             cond_prep(&cond);
1029             tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1030             gen_helper_tcond(cpu_env, tmp);
1031             tcg_temp_free(tmp);
1032         }
1033         save_gpr(ctx, rt, dest);
1034 
1035         cond_free(&ctx->null_cond);
1036         ctx->null_cond = cond;
1037     }
1038     return DISAS_NEXT;
1039 }
1040 
1041 /* Emit a memory load.  The modify parameter should be
1042  * < 0 for pre-modify,
1043  * > 0 for post-modify,
1044  * = 0 for no base register update.
1045  */
1046 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1047                        unsigned rx, int scale, target_long disp,
1048                        int modify, TCGMemOp mop)
1049 {
1050     TCGv addr, base;
1051 
1052     /* Caller uses nullify_over/nullify_end.  */
1053     assert(ctx->null_cond.c == TCG_COND_NEVER);
1054 
1055     addr = tcg_temp_new();
1056     base = load_gpr(ctx, rb);
1057 
1058     /* Note that RX is mutually exclusive with DISP.  */
1059     if (rx) {
1060         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1061         tcg_gen_add_tl(addr, addr, base);
1062     } else {
1063         tcg_gen_addi_tl(addr, base, disp);
1064     }
1065 
1066     if (modify == 0) {
1067         tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1068     } else {
1069         tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1070                             MMU_USER_IDX, mop);
1071         save_gpr(ctx, rb, addr);
1072     }
1073     tcg_temp_free(addr);
1074 }
1075 
1076 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1077                        unsigned rx, int scale, target_long disp,
1078                        int modify, TCGMemOp mop)
1079 {
1080     TCGv addr, base;
1081 
1082     /* Caller uses nullify_over/nullify_end.  */
1083     assert(ctx->null_cond.c == TCG_COND_NEVER);
1084 
1085     addr = tcg_temp_new();
1086     base = load_gpr(ctx, rb);
1087 
1088     /* Note that RX is mutually exclusive with DISP.  */
1089     if (rx) {
1090         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1091         tcg_gen_add_tl(addr, addr, base);
1092     } else {
1093         tcg_gen_addi_tl(addr, base, disp);
1094     }
1095 
1096     if (modify == 0) {
1097         tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1098     } else {
1099         tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1100                             MMU_USER_IDX, mop);
1101         save_gpr(ctx, rb, addr);
1102     }
1103     tcg_temp_free(addr);
1104 }
1105 
1106 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1107                         unsigned rx, int scale, target_long disp,
1108                         int modify, TCGMemOp mop)
1109 {
1110     TCGv addr, base;
1111 
1112     /* Caller uses nullify_over/nullify_end.  */
1113     assert(ctx->null_cond.c == TCG_COND_NEVER);
1114 
1115     addr = tcg_temp_new();
1116     base = load_gpr(ctx, rb);
1117 
1118     /* Note that RX is mutually exclusive with DISP.  */
1119     if (rx) {
1120         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1121         tcg_gen_add_tl(addr, addr, base);
1122     } else {
1123         tcg_gen_addi_tl(addr, base, disp);
1124     }
1125 
1126     tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1127 
1128     if (modify != 0) {
1129         save_gpr(ctx, rb, addr);
1130     }
1131     tcg_temp_free(addr);
1132 }
1133 
1134 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1135                         unsigned rx, int scale, target_long disp,
1136                         int modify, TCGMemOp mop)
1137 {
1138     TCGv addr, base;
1139 
1140     /* Caller uses nullify_over/nullify_end.  */
1141     assert(ctx->null_cond.c == TCG_COND_NEVER);
1142 
1143     addr = tcg_temp_new();
1144     base = load_gpr(ctx, rb);
1145 
1146     /* Note that RX is mutually exclusive with DISP.  */
1147     if (rx) {
1148         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1149         tcg_gen_add_tl(addr, addr, base);
1150     } else {
1151         tcg_gen_addi_tl(addr, base, disp);
1152     }
1153 
1154     tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1155 
1156     if (modify != 0) {
1157         save_gpr(ctx, rb, addr);
1158     }
1159     tcg_temp_free(addr);
1160 }
1161 
1162 #if TARGET_LONG_BITS == 64
1163 #define do_load_tl  do_load_64
1164 #define do_store_tl do_store_64
1165 #else
1166 #define do_load_tl  do_load_32
1167 #define do_store_tl do_store_32
1168 #endif
1169 
1170 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1171                              unsigned rx, int scale, target_long disp,
1172                              int modify, TCGMemOp mop)
1173 {
1174     TCGv dest;
1175 
1176     nullify_over(ctx);
1177 
1178     if (modify == 0) {
1179         /* No base register update.  */
1180         dest = dest_gpr(ctx, rt);
1181     } else {
1182         /* Make sure if RT == RB, we see the result of the load.  */
1183         dest = get_temp(ctx);
1184     }
1185     do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1186     save_gpr(ctx, rt, dest);
1187 
1188     return nullify_end(ctx, DISAS_NEXT);
1189 }
1190 
1191 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1192                                unsigned rx, int scale, target_long disp,
1193                                int modify)
1194 {
1195     TCGv_i32 tmp;
1196 
1197     nullify_over(ctx);
1198 
1199     tmp = tcg_temp_new_i32();
1200     do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1201     save_frw_i32(rt, tmp);
1202     tcg_temp_free_i32(tmp);
1203 
1204     if (rt == 0) {
1205         gen_helper_loaded_fr0(cpu_env);
1206     }
1207 
1208     return nullify_end(ctx, DISAS_NEXT);
1209 }
1210 
1211 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1212                                unsigned rx, int scale, target_long disp,
1213                                int modify)
1214 {
1215     TCGv_i64 tmp;
1216 
1217     nullify_over(ctx);
1218 
1219     tmp = tcg_temp_new_i64();
1220     do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1221     save_frd(rt, tmp);
1222     tcg_temp_free_i64(tmp);
1223 
1224     if (rt == 0) {
1225         gen_helper_loaded_fr0(cpu_env);
1226     }
1227 
1228     return nullify_end(ctx, DISAS_NEXT);
1229 }
1230 
1231 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1232                               target_long disp, int modify, TCGMemOp mop)
1233 {
1234     nullify_over(ctx);
1235     do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1236     return nullify_end(ctx, DISAS_NEXT);
1237 }
1238 
1239 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1240                                 unsigned rx, int scale, target_long disp,
1241                                 int modify)
1242 {
1243     TCGv_i32 tmp;
1244 
1245     nullify_over(ctx);
1246 
1247     tmp = load_frw_i32(rt);
1248     do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1249     tcg_temp_free_i32(tmp);
1250 
1251     return nullify_end(ctx, DISAS_NEXT);
1252 }
1253 
1254 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1255                                 unsigned rx, int scale, target_long disp,
1256                                 int modify)
1257 {
1258     TCGv_i64 tmp;
1259 
1260     nullify_over(ctx);
1261 
1262     tmp = load_frd(rt);
1263     do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1264     tcg_temp_free_i64(tmp);
1265 
1266     return nullify_end(ctx, DISAS_NEXT);
1267 }
1268 
1269 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1270                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1271 {
1272     TCGv_i32 tmp;
1273 
1274     nullify_over(ctx);
1275     tmp = load_frw0_i32(ra);
1276 
1277     func(tmp, cpu_env, tmp);
1278 
1279     save_frw_i32(rt, tmp);
1280     tcg_temp_free_i32(tmp);
1281     return nullify_end(ctx, DISAS_NEXT);
1282 }
1283 
1284 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1285                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1286 {
1287     TCGv_i32 dst;
1288     TCGv_i64 src;
1289 
1290     nullify_over(ctx);
1291     src = load_frd(ra);
1292     dst = tcg_temp_new_i32();
1293 
1294     func(dst, cpu_env, src);
1295 
1296     tcg_temp_free_i64(src);
1297     save_frw_i32(rt, dst);
1298     tcg_temp_free_i32(dst);
1299     return nullify_end(ctx, DISAS_NEXT);
1300 }
1301 
1302 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1303                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1304 {
1305     TCGv_i64 tmp;
1306 
1307     nullify_over(ctx);
1308     tmp = load_frd0(ra);
1309 
1310     func(tmp, cpu_env, tmp);
1311 
1312     save_frd(rt, tmp);
1313     tcg_temp_free_i64(tmp);
1314     return nullify_end(ctx, DISAS_NEXT);
1315 }
1316 
1317 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1318                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1319 {
1320     TCGv_i32 src;
1321     TCGv_i64 dst;
1322 
1323     nullify_over(ctx);
1324     src = load_frw0_i32(ra);
1325     dst = tcg_temp_new_i64();
1326 
1327     func(dst, cpu_env, src);
1328 
1329     tcg_temp_free_i32(src);
1330     save_frd(rt, dst);
1331     tcg_temp_free_i64(dst);
1332     return nullify_end(ctx, DISAS_NEXT);
1333 }
1334 
1335 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1336                                  unsigned ra, unsigned rb,
1337                                  void (*func)(TCGv_i32, TCGv_env,
1338                                               TCGv_i32, TCGv_i32))
1339 {
1340     TCGv_i32 a, b;
1341 
1342     nullify_over(ctx);
1343     a = load_frw0_i32(ra);
1344     b = load_frw0_i32(rb);
1345 
1346     func(a, cpu_env, a, b);
1347 
1348     tcg_temp_free_i32(b);
1349     save_frw_i32(rt, a);
1350     tcg_temp_free_i32(a);
1351     return nullify_end(ctx, DISAS_NEXT);
1352 }
1353 
1354 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1355                                  unsigned ra, unsigned rb,
1356                                  void (*func)(TCGv_i64, TCGv_env,
1357                                               TCGv_i64, TCGv_i64))
1358 {
1359     TCGv_i64 a, b;
1360 
1361     nullify_over(ctx);
1362     a = load_frd0(ra);
1363     b = load_frd0(rb);
1364 
1365     func(a, cpu_env, a, b);
1366 
1367     tcg_temp_free_i64(b);
1368     save_frd(rt, a);
1369     tcg_temp_free_i64(a);
1370     return nullify_end(ctx, DISAS_NEXT);
1371 }
1372 
1373 /* Emit an unconditional branch to a direct target, which may or may not
1374    have already had nullification handled.  */
1375 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1376                                 unsigned link, bool is_n)
1377 {
1378     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1379         if (link != 0) {
1380             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1381         }
1382         ctx->iaoq_n = dest;
1383         if (is_n) {
1384             ctx->null_cond.c = TCG_COND_ALWAYS;
1385         }
1386         return DISAS_NEXT;
1387     } else {
1388         nullify_over(ctx);
1389 
1390         if (link != 0) {
1391             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1392         }
1393 
1394         if (is_n && use_nullify_skip(ctx)) {
1395             nullify_set(ctx, 0);
1396             gen_goto_tb(ctx, 0, dest, dest + 4);
1397         } else {
1398             nullify_set(ctx, is_n);
1399             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1400         }
1401 
1402         nullify_end(ctx, DISAS_NEXT);
1403 
1404         nullify_set(ctx, 0);
1405         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1406         return DISAS_NORETURN;
1407     }
1408 }
1409 
1410 /* Emit a conditional branch to a direct target.  If the branch itself
1411    is nullified, we should have already used nullify_over.  */
1412 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1413                                 DisasCond *cond)
1414 {
1415     target_ulong dest = iaoq_dest(ctx, disp);
1416     TCGLabel *taken = NULL;
1417     TCGCond c = cond->c;
1418     bool n;
1419 
1420     assert(ctx->null_cond.c == TCG_COND_NEVER);
1421 
1422     /* Handle TRUE and NEVER as direct branches.  */
1423     if (c == TCG_COND_ALWAYS) {
1424         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1425     }
1426     if (c == TCG_COND_NEVER) {
1427         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1428     }
1429 
1430     taken = gen_new_label();
1431     cond_prep(cond);
1432     tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1433     cond_free(cond);
1434 
1435     /* Not taken: Condition not satisfied; nullify on backward branches. */
1436     n = is_n && disp < 0;
1437     if (n && use_nullify_skip(ctx)) {
1438         nullify_set(ctx, 0);
1439         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1440     } else {
1441         if (!n && ctx->null_lab) {
1442             gen_set_label(ctx->null_lab);
1443             ctx->null_lab = NULL;
1444         }
1445         nullify_set(ctx, n);
1446         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1447     }
1448 
1449     gen_set_label(taken);
1450 
1451     /* Taken: Condition satisfied; nullify on forward branches.  */
1452     n = is_n && disp >= 0;
1453     if (n && use_nullify_skip(ctx)) {
1454         nullify_set(ctx, 0);
1455         gen_goto_tb(ctx, 1, dest, dest + 4);
1456     } else {
1457         nullify_set(ctx, n);
1458         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1459     }
1460 
1461     /* Not taken: the branch itself was nullified.  */
1462     if (ctx->null_lab) {
1463         gen_set_label(ctx->null_lab);
1464         ctx->null_lab = NULL;
1465         return DISAS_IAQ_N_STALE;
1466     } else {
1467         return DISAS_NORETURN;
1468     }
1469 }
1470 
1471 /* Emit an unconditional branch to an indirect target.  This handles
1472    nullification of the branch itself.  */
1473 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1474                                 unsigned link, bool is_n)
1475 {
1476     TCGv a0, a1, next, tmp;
1477     TCGCond c;
1478 
1479     assert(ctx->null_lab == NULL);
1480 
1481     if (ctx->null_cond.c == TCG_COND_NEVER) {
1482         if (link != 0) {
1483             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1484         }
1485         next = get_temp(ctx);
1486         tcg_gen_mov_tl(next, dest);
1487         ctx->iaoq_n = -1;
1488         ctx->iaoq_n_var = next;
1489         if (is_n) {
1490             ctx->null_cond.c = TCG_COND_ALWAYS;
1491         }
1492     } else if (is_n && use_nullify_skip(ctx)) {
1493         /* The (conditional) branch, B, nullifies the next insn, N,
1494            and we're allowed to skip execution N (no single-step or
1495            tracepoint in effect).  Since the goto_ptr that we must use
1496            for the indirect branch consumes no special resources, we
1497            can (conditionally) skip B and continue execution.  */
1498         /* The use_nullify_skip test implies we have a known control path.  */
1499         tcg_debug_assert(ctx->iaoq_b != -1);
1500         tcg_debug_assert(ctx->iaoq_n != -1);
1501 
1502         /* We do have to handle the non-local temporary, DEST, before
1503            branching.  Since IOAQ_F is not really live at this point, we
1504            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1505         tcg_gen_mov_tl(cpu_iaoq_f, dest);
1506         tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1507 
1508         nullify_over(ctx);
1509         if (link != 0) {
1510             tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1511         }
1512         tcg_gen_lookup_and_goto_ptr();
1513         return nullify_end(ctx, DISAS_NEXT);
1514     } else {
1515         cond_prep(&ctx->null_cond);
1516         c = ctx->null_cond.c;
1517         a0 = ctx->null_cond.a0;
1518         a1 = ctx->null_cond.a1;
1519 
1520         tmp = tcg_temp_new();
1521         next = get_temp(ctx);
1522 
1523         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1524         tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1525         ctx->iaoq_n = -1;
1526         ctx->iaoq_n_var = next;
1527 
1528         if (link != 0) {
1529             tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1530         }
1531 
1532         if (is_n) {
1533             /* The branch nullifies the next insn, which means the state of N
1534                after the branch is the inverse of the state of N that applied
1535                to the branch.  */
1536             tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1537             cond_free(&ctx->null_cond);
1538             ctx->null_cond = cond_make_n();
1539             ctx->psw_n_nonzero = true;
1540         } else {
1541             cond_free(&ctx->null_cond);
1542         }
1543     }
1544 
1545     return DISAS_NEXT;
1546 }
1547 
1548 /* On Linux, page zero is normally marked execute only + gateway.
1549    Therefore normal read or write is supposed to fail, but specific
1550    offsets have kernel code mapped to raise permissions to implement
1551    system calls.  Handling this via an explicit check here, rather
1552    in than the "be disp(sr2,r0)" instruction that probably sent us
1553    here, is the easiest way to handle the branch delay slot on the
1554    aforementioned BE.  */
1555 static DisasJumpType do_page_zero(DisasContext *ctx)
1556 {
1557     /* If by some means we get here with PSW[N]=1, that implies that
1558        the B,GATE instruction would be skipped, and we'd fault on the
1559        next insn within the privilaged page.  */
1560     switch (ctx->null_cond.c) {
1561     case TCG_COND_NEVER:
1562         break;
1563     case TCG_COND_ALWAYS:
1564         tcg_gen_movi_tl(cpu_psw_n, 0);
1565         goto do_sigill;
1566     default:
1567         /* Since this is always the first (and only) insn within the
1568            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1569         g_assert_not_reached();
1570     }
1571 
1572     /* Check that we didn't arrive here via some means that allowed
1573        non-sequential instruction execution.  Normally the PSW[B] bit
1574        detects this by disallowing the B,GATE instruction to execute
1575        under such conditions.  */
1576     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1577         goto do_sigill;
1578     }
1579 
1580     switch (ctx->iaoq_f) {
1581     case 0x00: /* Null pointer call */
1582         gen_excp_1(EXCP_SIGSEGV);
1583         return DISAS_NORETURN;
1584 
1585     case 0xb0: /* LWS */
1586         gen_excp_1(EXCP_SYSCALL_LWS);
1587         return DISAS_NORETURN;
1588 
1589     case 0xe0: /* SET_THREAD_POINTER */
1590         tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1591         tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1592         tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1593         return DISAS_IAQ_N_UPDATED;
1594 
1595     case 0x100: /* SYSCALL */
1596         gen_excp_1(EXCP_SYSCALL);
1597         return DISAS_NORETURN;
1598 
1599     default:
1600     do_sigill:
1601         gen_excp_1(EXCP_SIGILL);
1602         return DISAS_NORETURN;
1603     }
1604 }
1605 
1606 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1607                                const DisasInsn *di)
1608 {
1609     cond_free(&ctx->null_cond);
1610     return DISAS_NEXT;
1611 }
1612 
1613 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1614                                  const DisasInsn *di)
1615 {
1616     nullify_over(ctx);
1617     return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1618 }
1619 
1620 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1621                                 const DisasInsn *di)
1622 {
1623     /* No point in nullifying the memory barrier.  */
1624     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1625 
1626     cond_free(&ctx->null_cond);
1627     return DISAS_NEXT;
1628 }
1629 
1630 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1631                                 const DisasInsn *di)
1632 {
1633     unsigned rt = extract32(insn, 0, 5);
1634     TCGv tmp = dest_gpr(ctx, rt);
1635     tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1636     save_gpr(ctx, rt, tmp);
1637 
1638     cond_free(&ctx->null_cond);
1639     return DISAS_NEXT;
1640 }
1641 
1642 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1643                                 const DisasInsn *di)
1644 {
1645     unsigned rt = extract32(insn, 0, 5);
1646     TCGv tmp = dest_gpr(ctx, rt);
1647 
1648     /* ??? We don't implement space registers.  */
1649     tcg_gen_movi_tl(tmp, 0);
1650     save_gpr(ctx, rt, tmp);
1651 
1652     cond_free(&ctx->null_cond);
1653     return DISAS_NEXT;
1654 }
1655 
1656 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1657                                  const DisasInsn *di)
1658 {
1659     unsigned rt = extract32(insn, 0, 5);
1660     unsigned ctl = extract32(insn, 21, 5);
1661     TCGv tmp;
1662 
1663     switch (ctl) {
1664     case 11: /* SAR */
1665 #ifdef TARGET_HPPA64
1666         if (extract32(insn, 14, 1) == 0) {
1667             /* MFSAR without ,W masks low 5 bits.  */
1668             tmp = dest_gpr(ctx, rt);
1669             tcg_gen_andi_tl(tmp, cpu_sar, 31);
1670             save_gpr(ctx, rt, tmp);
1671             break;
1672         }
1673 #endif
1674         save_gpr(ctx, rt, cpu_sar);
1675         break;
1676     case 16: /* Interval Timer */
1677         tmp = dest_gpr(ctx, rt);
1678         tcg_gen_movi_tl(tmp, 0); /* FIXME */
1679         save_gpr(ctx, rt, tmp);
1680         break;
1681     case 26:
1682         save_gpr(ctx, rt, cpu_cr26);
1683         break;
1684     case 27:
1685         save_gpr(ctx, rt, cpu_cr27);
1686         break;
1687     default:
1688         /* All other control registers are privileged.  */
1689         return gen_illegal(ctx);
1690     }
1691 
1692     cond_free(&ctx->null_cond);
1693     return DISAS_NEXT;
1694 }
1695 
1696 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1697                                  const DisasInsn *di)
1698 {
1699     unsigned rin = extract32(insn, 16, 5);
1700     unsigned ctl = extract32(insn, 21, 5);
1701     TCGv tmp;
1702 
1703     if (ctl == 11) { /* SAR */
1704         tmp = tcg_temp_new();
1705         tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1706         save_or_nullify(ctx, cpu_sar, tmp);
1707         tcg_temp_free(tmp);
1708     } else {
1709         /* All other control registers are privileged or read-only.  */
1710         return gen_illegal(ctx);
1711     }
1712 
1713     cond_free(&ctx->null_cond);
1714     return DISAS_NEXT;
1715 }
1716 
1717 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1718                                    const DisasInsn *di)
1719 {
1720     unsigned rin = extract32(insn, 16, 5);
1721     TCGv tmp = tcg_temp_new();
1722 
1723     tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1724     tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1725     save_or_nullify(ctx, cpu_sar, tmp);
1726     tcg_temp_free(tmp);
1727 
1728     cond_free(&ctx->null_cond);
1729     return DISAS_NEXT;
1730 }
1731 
1732 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1733                                  const DisasInsn *di)
1734 {
1735     unsigned rt = extract32(insn, 0, 5);
1736     TCGv dest = dest_gpr(ctx, rt);
1737 
1738     /* Since we don't implement space registers, this returns zero.  */
1739     tcg_gen_movi_tl(dest, 0);
1740     save_gpr(ctx, rt, dest);
1741 
1742     cond_free(&ctx->null_cond);
1743     return DISAS_NEXT;
1744 }
1745 
1746 static const DisasInsn table_system[] = {
1747     { 0x00000000u, 0xfc001fe0u, trans_break },
1748     /* We don't implement space register, so MTSP is a nop.  */
1749     { 0x00001820u, 0xffe01fffu, trans_nop },
1750     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1751     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1752     { 0x000014a0u, 0xffffffe0u, trans_mfia },
1753     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1754     { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1755     { 0x00000400u, 0xffffffffu, trans_sync },
1756     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1757 };
1758 
1759 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1760                                         const DisasInsn *di)
1761 {
1762     unsigned rb = extract32(insn, 21, 5);
1763     unsigned rx = extract32(insn, 16, 5);
1764     TCGv dest = dest_gpr(ctx, rb);
1765     TCGv src1 = load_gpr(ctx, rb);
1766     TCGv src2 = load_gpr(ctx, rx);
1767 
1768     /* The only thing we need to do is the base register modification.  */
1769     tcg_gen_add_tl(dest, src1, src2);
1770     save_gpr(ctx, rb, dest);
1771 
1772     cond_free(&ctx->null_cond);
1773     return DISAS_NEXT;
1774 }
1775 
1776 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1777                                  const DisasInsn *di)
1778 {
1779     unsigned rt = extract32(insn, 0, 5);
1780     unsigned rb = extract32(insn, 21, 5);
1781     unsigned is_write = extract32(insn, 6, 1);
1782     TCGv dest;
1783 
1784     nullify_over(ctx);
1785 
1786     /* ??? Do something with priv level operand.  */
1787     dest = dest_gpr(ctx, rt);
1788     if (is_write) {
1789         gen_helper_probe_w(dest, load_gpr(ctx, rb));
1790     } else {
1791         gen_helper_probe_r(dest, load_gpr(ctx, rb));
1792     }
1793     save_gpr(ctx, rt, dest);
1794     return nullify_end(ctx, DISAS_NEXT);
1795 }
1796 
1797 static const DisasInsn table_mem_mgmt[] = {
1798     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
1799     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
1800     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1801     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
1802     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1803     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
1804     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1805     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
1806     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1807     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
1808     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1809     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
1810     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1811     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
1812     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
1813 };
1814 
1815 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1816                                const DisasInsn *di)
1817 {
1818     unsigned r2 = extract32(insn, 21, 5);
1819     unsigned r1 = extract32(insn, 16, 5);
1820     unsigned cf = extract32(insn, 12, 4);
1821     unsigned ext = extract32(insn, 8, 4);
1822     unsigned shift = extract32(insn, 6, 2);
1823     unsigned rt = extract32(insn,  0, 5);
1824     TCGv tcg_r1, tcg_r2;
1825     bool is_c = false;
1826     bool is_l = false;
1827     bool is_tc = false;
1828     bool is_tsv = false;
1829     DisasJumpType ret;
1830 
1831     switch (ext) {
1832     case 0x6: /* ADD, SHLADD */
1833         break;
1834     case 0xa: /* ADD,L, SHLADD,L */
1835         is_l = true;
1836         break;
1837     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1838         is_tsv = true;
1839         break;
1840     case 0x7: /* ADD,C */
1841         is_c = true;
1842         break;
1843     case 0xf: /* ADD,C,TSV */
1844         is_c = is_tsv = true;
1845         break;
1846     default:
1847         return gen_illegal(ctx);
1848     }
1849 
1850     if (cf) {
1851         nullify_over(ctx);
1852     }
1853     tcg_r1 = load_gpr(ctx, r1);
1854     tcg_r2 = load_gpr(ctx, r2);
1855     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1856     return nullify_end(ctx, ret);
1857 }
1858 
1859 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1860                                const DisasInsn *di)
1861 {
1862     unsigned r2 = extract32(insn, 21, 5);
1863     unsigned r1 = extract32(insn, 16, 5);
1864     unsigned cf = extract32(insn, 12, 4);
1865     unsigned ext = extract32(insn, 6, 6);
1866     unsigned rt = extract32(insn,  0, 5);
1867     TCGv tcg_r1, tcg_r2;
1868     bool is_b = false;
1869     bool is_tc = false;
1870     bool is_tsv = false;
1871     DisasJumpType ret;
1872 
1873     switch (ext) {
1874     case 0x10: /* SUB */
1875         break;
1876     case 0x30: /* SUB,TSV */
1877         is_tsv = true;
1878         break;
1879     case 0x14: /* SUB,B */
1880         is_b = true;
1881         break;
1882     case 0x34: /* SUB,B,TSV */
1883         is_b = is_tsv = true;
1884         break;
1885     case 0x13: /* SUB,TC */
1886         is_tc = true;
1887         break;
1888     case 0x33: /* SUB,TSV,TC */
1889         is_tc = is_tsv = true;
1890         break;
1891     default:
1892         return gen_illegal(ctx);
1893     }
1894 
1895     if (cf) {
1896         nullify_over(ctx);
1897     }
1898     tcg_r1 = load_gpr(ctx, r1);
1899     tcg_r2 = load_gpr(ctx, r2);
1900     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1901     return nullify_end(ctx, ret);
1902 }
1903 
1904 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1905                                const DisasInsn *di)
1906 {
1907     unsigned r2 = extract32(insn, 21, 5);
1908     unsigned r1 = extract32(insn, 16, 5);
1909     unsigned cf = extract32(insn, 12, 4);
1910     unsigned rt = extract32(insn,  0, 5);
1911     TCGv tcg_r1, tcg_r2;
1912     DisasJumpType ret;
1913 
1914     if (cf) {
1915         nullify_over(ctx);
1916     }
1917     tcg_r1 = load_gpr(ctx, r1);
1918     tcg_r2 = load_gpr(ctx, r2);
1919     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1920     return nullify_end(ctx, ret);
1921 }
1922 
1923 /* OR r,0,t -> COPY (according to gas) */
1924 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1925                                 const DisasInsn *di)
1926 {
1927     unsigned r1 = extract32(insn, 16, 5);
1928     unsigned rt = extract32(insn,  0, 5);
1929 
1930     if (r1 == 0) {
1931         TCGv dest = dest_gpr(ctx, rt);
1932         tcg_gen_movi_tl(dest, 0);
1933         save_gpr(ctx, rt, dest);
1934     } else {
1935         save_gpr(ctx, rt, cpu_gr[r1]);
1936     }
1937     cond_free(&ctx->null_cond);
1938     return DISAS_NEXT;
1939 }
1940 
1941 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1942                                   const DisasInsn *di)
1943 {
1944     unsigned r2 = extract32(insn, 21, 5);
1945     unsigned r1 = extract32(insn, 16, 5);
1946     unsigned cf = extract32(insn, 12, 4);
1947     unsigned rt = extract32(insn,  0, 5);
1948     TCGv tcg_r1, tcg_r2;
1949     DisasJumpType ret;
1950 
1951     if (cf) {
1952         nullify_over(ctx);
1953     }
1954     tcg_r1 = load_gpr(ctx, r1);
1955     tcg_r2 = load_gpr(ctx, r2);
1956     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1957     return nullify_end(ctx, ret);
1958 }
1959 
1960 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1961                                 const DisasInsn *di)
1962 {
1963     unsigned r2 = extract32(insn, 21, 5);
1964     unsigned r1 = extract32(insn, 16, 5);
1965     unsigned cf = extract32(insn, 12, 4);
1966     unsigned rt = extract32(insn,  0, 5);
1967     TCGv tcg_r1, tcg_r2;
1968     DisasJumpType ret;
1969 
1970     if (cf) {
1971         nullify_over(ctx);
1972     }
1973     tcg_r1 = load_gpr(ctx, r1);
1974     tcg_r2 = load_gpr(ctx, r2);
1975     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1976     return nullify_end(ctx, ret);
1977 }
1978 
1979 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1980                                   const DisasInsn *di)
1981 {
1982     unsigned r2 = extract32(insn, 21, 5);
1983     unsigned r1 = extract32(insn, 16, 5);
1984     unsigned cf = extract32(insn, 12, 4);
1985     unsigned is_tc = extract32(insn, 6, 1);
1986     unsigned rt = extract32(insn,  0, 5);
1987     TCGv tcg_r1, tcg_r2, tmp;
1988     DisasJumpType ret;
1989 
1990     if (cf) {
1991         nullify_over(ctx);
1992     }
1993     tcg_r1 = load_gpr(ctx, r1);
1994     tcg_r2 = load_gpr(ctx, r2);
1995     tmp = get_temp(ctx);
1996     tcg_gen_not_tl(tmp, tcg_r2);
1997     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1998     return nullify_end(ctx, ret);
1999 }
2000 
2001 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2002                                 const DisasInsn *di)
2003 {
2004     unsigned r2 = extract32(insn, 21, 5);
2005     unsigned cf = extract32(insn, 12, 4);
2006     unsigned is_i = extract32(insn, 6, 1);
2007     unsigned rt = extract32(insn,  0, 5);
2008     TCGv tmp;
2009     DisasJumpType ret;
2010 
2011     nullify_over(ctx);
2012 
2013     tmp = get_temp(ctx);
2014     tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2015     if (!is_i) {
2016         tcg_gen_not_tl(tmp, tmp);
2017     }
2018     tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2019     tcg_gen_muli_tl(tmp, tmp, 6);
2020     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2021                   is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2022 
2023     return nullify_end(ctx, ret);
2024 }
2025 
2026 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2027                               const DisasInsn *di)
2028 {
2029     unsigned r2 = extract32(insn, 21, 5);
2030     unsigned r1 = extract32(insn, 16, 5);
2031     unsigned cf = extract32(insn, 12, 4);
2032     unsigned rt = extract32(insn,  0, 5);
2033     TCGv dest, add1, add2, addc, zero, in1, in2;
2034 
2035     nullify_over(ctx);
2036 
2037     in1 = load_gpr(ctx, r1);
2038     in2 = load_gpr(ctx, r2);
2039 
2040     add1 = tcg_temp_new();
2041     add2 = tcg_temp_new();
2042     addc = tcg_temp_new();
2043     dest = tcg_temp_new();
2044     zero = tcg_const_tl(0);
2045 
2046     /* Form R1 << 1 | PSW[CB]{8}.  */
2047     tcg_gen_add_tl(add1, in1, in1);
2048     tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2049 
2050     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2051        carry{8} requires that we subtract via + ~R2 + 1, as described in
2052        the manual.  By extracting and masking V, we can produce the
2053        proper inputs to the addition without movcond.  */
2054     tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2055     tcg_gen_xor_tl(add2, in2, addc);
2056     tcg_gen_andi_tl(addc, addc, 1);
2057     /* ??? This is only correct for 32-bit.  */
2058     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2059     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2060 
2061     tcg_temp_free(addc);
2062     tcg_temp_free(zero);
2063 
2064     /* Write back the result register.  */
2065     save_gpr(ctx, rt, dest);
2066 
2067     /* Write back PSW[CB].  */
2068     tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2069     tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2070 
2071     /* Write back PSW[V] for the division step.  */
2072     tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2073     tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2074 
2075     /* Install the new nullification.  */
2076     if (cf) {
2077         TCGv sv;
2078         TCGV_UNUSED(sv);
2079         if (cf >> 1 == 6) {
2080             /* ??? The lshift is supposed to contribute to overflow.  */
2081             sv = do_add_sv(ctx, dest, add1, add2);
2082         }
2083         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2084     }
2085 
2086     tcg_temp_free(add1);
2087     tcg_temp_free(add2);
2088     tcg_temp_free(dest);
2089 
2090     return nullify_end(ctx, DISAS_NEXT);
2091 }
2092 
2093 static const DisasInsn table_arith_log[] = {
2094     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2095     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2096     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2097     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2098     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2099     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2100     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2101     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2102     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2103     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2104     { 0x08000440u, 0xfc000fe0u, trans_ds },
2105     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2106     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2107     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2108     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2109 };
2110 
2111 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2112 {
2113     target_long im = low_sextract(insn, 0, 11);
2114     unsigned e1 = extract32(insn, 11, 1);
2115     unsigned cf = extract32(insn, 12, 4);
2116     unsigned rt = extract32(insn, 16, 5);
2117     unsigned r2 = extract32(insn, 21, 5);
2118     unsigned o1 = extract32(insn, 26, 1);
2119     TCGv tcg_im, tcg_r2;
2120     DisasJumpType ret;
2121 
2122     if (cf) {
2123         nullify_over(ctx);
2124     }
2125 
2126     tcg_im = load_const(ctx, im);
2127     tcg_r2 = load_gpr(ctx, r2);
2128     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2129 
2130     return nullify_end(ctx, ret);
2131 }
2132 
2133 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2134 {
2135     target_long im = low_sextract(insn, 0, 11);
2136     unsigned e1 = extract32(insn, 11, 1);
2137     unsigned cf = extract32(insn, 12, 4);
2138     unsigned rt = extract32(insn, 16, 5);
2139     unsigned r2 = extract32(insn, 21, 5);
2140     TCGv tcg_im, tcg_r2;
2141     DisasJumpType ret;
2142 
2143     if (cf) {
2144         nullify_over(ctx);
2145     }
2146 
2147     tcg_im = load_const(ctx, im);
2148     tcg_r2 = load_gpr(ctx, r2);
2149     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2150 
2151     return nullify_end(ctx, ret);
2152 }
2153 
2154 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2155 {
2156     target_long im = low_sextract(insn, 0, 11);
2157     unsigned cf = extract32(insn, 12, 4);
2158     unsigned rt = extract32(insn, 16, 5);
2159     unsigned r2 = extract32(insn, 21, 5);
2160     TCGv tcg_im, tcg_r2;
2161     DisasJumpType ret;
2162 
2163     if (cf) {
2164         nullify_over(ctx);
2165     }
2166 
2167     tcg_im = load_const(ctx, im);
2168     tcg_r2 = load_gpr(ctx, r2);
2169     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2170 
2171     return nullify_end(ctx, ret);
2172 }
2173 
2174 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2175                                     const DisasInsn *di)
2176 {
2177     unsigned rt = extract32(insn, 0, 5);
2178     unsigned m = extract32(insn, 5, 1);
2179     unsigned sz = extract32(insn, 6, 2);
2180     unsigned a = extract32(insn, 13, 1);
2181     int disp = low_sextract(insn, 16, 5);
2182     unsigned rb = extract32(insn, 21, 5);
2183     int modify = (m ? (a ? -1 : 1) : 0);
2184     TCGMemOp mop = MO_TE | sz;
2185 
2186     return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2187 }
2188 
2189 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2190                                     const DisasInsn *di)
2191 {
2192     unsigned rt = extract32(insn, 0, 5);
2193     unsigned m = extract32(insn, 5, 1);
2194     unsigned sz = extract32(insn, 6, 2);
2195     unsigned u = extract32(insn, 13, 1);
2196     unsigned rx = extract32(insn, 16, 5);
2197     unsigned rb = extract32(insn, 21, 5);
2198     TCGMemOp mop = MO_TE | sz;
2199 
2200     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2201 }
2202 
2203 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2204                                     const DisasInsn *di)
2205 {
2206     int disp = low_sextract(insn, 0, 5);
2207     unsigned m = extract32(insn, 5, 1);
2208     unsigned sz = extract32(insn, 6, 2);
2209     unsigned a = extract32(insn, 13, 1);
2210     unsigned rr = extract32(insn, 16, 5);
2211     unsigned rb = extract32(insn, 21, 5);
2212     int modify = (m ? (a ? -1 : 1) : 0);
2213     TCGMemOp mop = MO_TE | sz;
2214 
2215     return do_store(ctx, rr, rb, disp, modify, mop);
2216 }
2217 
2218 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2219                                 const DisasInsn *di)
2220 {
2221     unsigned rt = extract32(insn, 0, 5);
2222     unsigned m = extract32(insn, 5, 1);
2223     unsigned i = extract32(insn, 12, 1);
2224     unsigned au = extract32(insn, 13, 1);
2225     unsigned rx = extract32(insn, 16, 5);
2226     unsigned rb = extract32(insn, 21, 5);
2227     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2228     TCGv zero, addr, base, dest;
2229     int modify, disp = 0, scale = 0;
2230 
2231     nullify_over(ctx);
2232 
2233     /* ??? Share more code with do_load and do_load_{32,64}.  */
2234 
2235     if (i) {
2236         modify = (m ? (au ? -1 : 1) : 0);
2237         disp = low_sextract(rx, 0, 5);
2238         rx = 0;
2239     } else {
2240         modify = m;
2241         if (au) {
2242             scale = mop & MO_SIZE;
2243         }
2244     }
2245     if (modify) {
2246         /* Base register modification.  Make sure if RT == RB, we see
2247            the result of the load.  */
2248         dest = get_temp(ctx);
2249     } else {
2250         dest = dest_gpr(ctx, rt);
2251     }
2252 
2253     addr = tcg_temp_new();
2254     base = load_gpr(ctx, rb);
2255     if (rx) {
2256         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2257         tcg_gen_add_tl(addr, addr, base);
2258     } else {
2259         tcg_gen_addi_tl(addr, base, disp);
2260     }
2261 
2262     zero = tcg_const_tl(0);
2263     tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2264                            zero, MMU_USER_IDX, mop);
2265     if (modify) {
2266         save_gpr(ctx, rb, addr);
2267     }
2268     save_gpr(ctx, rt, dest);
2269 
2270     return nullify_end(ctx, DISAS_NEXT);
2271 }
2272 
2273 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2274                                 const DisasInsn *di)
2275 {
2276     target_long disp = low_sextract(insn, 0, 5);
2277     unsigned m = extract32(insn, 5, 1);
2278     unsigned a = extract32(insn, 13, 1);
2279     unsigned rt = extract32(insn, 16, 5);
2280     unsigned rb = extract32(insn, 21, 5);
2281     TCGv addr, val;
2282 
2283     nullify_over(ctx);
2284 
2285     addr = tcg_temp_new();
2286     if (m || disp == 0) {
2287         tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2288     } else {
2289         tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2290     }
2291     val = load_gpr(ctx, rt);
2292 
2293     if (a) {
2294         gen_helper_stby_e(cpu_env, addr, val);
2295     } else {
2296         gen_helper_stby_b(cpu_env, addr, val);
2297     }
2298 
2299     if (m) {
2300         tcg_gen_addi_tl(addr, addr, disp);
2301         tcg_gen_andi_tl(addr, addr, ~3);
2302         save_gpr(ctx, rb, addr);
2303     }
2304     tcg_temp_free(addr);
2305 
2306     return nullify_end(ctx, DISAS_NEXT);
2307 }
2308 
2309 static const DisasInsn table_index_mem[] = {
2310     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2311     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2312     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2313     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2314     { 0x0c001300u, 0xfc0013c0, trans_stby },
2315 };
2316 
2317 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2318 {
2319     unsigned rt = extract32(insn, 21, 5);
2320     target_long i = assemble_21(insn);
2321     TCGv tcg_rt = dest_gpr(ctx, rt);
2322 
2323     tcg_gen_movi_tl(tcg_rt, i);
2324     save_gpr(ctx, rt, tcg_rt);
2325     cond_free(&ctx->null_cond);
2326 
2327     return DISAS_NEXT;
2328 }
2329 
2330 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2331 {
2332     unsigned rt = extract32(insn, 21, 5);
2333     target_long i = assemble_21(insn);
2334     TCGv tcg_rt = load_gpr(ctx, rt);
2335     TCGv tcg_r1 = dest_gpr(ctx, 1);
2336 
2337     tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2338     save_gpr(ctx, 1, tcg_r1);
2339     cond_free(&ctx->null_cond);
2340 
2341     return DISAS_NEXT;
2342 }
2343 
2344 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2345 {
2346     unsigned rb = extract32(insn, 21, 5);
2347     unsigned rt = extract32(insn, 16, 5);
2348     target_long i = assemble_16(insn);
2349     TCGv tcg_rt = dest_gpr(ctx, rt);
2350 
2351     /* Special case rb == 0, for the LDI pseudo-op.
2352        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2353     if (rb == 0) {
2354         tcg_gen_movi_tl(tcg_rt, i);
2355     } else {
2356         tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2357     }
2358     save_gpr(ctx, rt, tcg_rt);
2359     cond_free(&ctx->null_cond);
2360 
2361     return DISAS_NEXT;
2362 }
2363 
2364 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2365                                 bool is_mod, TCGMemOp mop)
2366 {
2367     unsigned rb = extract32(insn, 21, 5);
2368     unsigned rt = extract32(insn, 16, 5);
2369     target_long i = assemble_16(insn);
2370 
2371     return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2372 }
2373 
2374 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2375 {
2376     unsigned rb = extract32(insn, 21, 5);
2377     unsigned rt = extract32(insn, 16, 5);
2378     target_long i = assemble_16a(insn);
2379     unsigned ext2 = extract32(insn, 1, 2);
2380 
2381     switch (ext2) {
2382     case 0:
2383     case 1:
2384         /* FLDW without modification.  */
2385         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2386     case 2:
2387         /* LDW with modification.  Note that the sign of I selects
2388            post-dec vs pre-inc.  */
2389         return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2390     default:
2391         return gen_illegal(ctx);
2392     }
2393 }
2394 
2395 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2396 {
2397     target_long i = assemble_16a(insn);
2398     unsigned t1 = extract32(insn, 1, 1);
2399     unsigned a = extract32(insn, 2, 1);
2400     unsigned t0 = extract32(insn, 16, 5);
2401     unsigned rb = extract32(insn, 21, 5);
2402 
2403     /* FLDW with modification.  */
2404     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2405 }
2406 
2407 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2408                                  bool is_mod, TCGMemOp mop)
2409 {
2410     unsigned rb = extract32(insn, 21, 5);
2411     unsigned rt = extract32(insn, 16, 5);
2412     target_long i = assemble_16(insn);
2413 
2414     return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2415 }
2416 
2417 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2418 {
2419     unsigned rb = extract32(insn, 21, 5);
2420     unsigned rt = extract32(insn, 16, 5);
2421     target_long i = assemble_16a(insn);
2422     unsigned ext2 = extract32(insn, 1, 2);
2423 
2424     switch (ext2) {
2425     case 0:
2426     case 1:
2427         /* FSTW without modification.  */
2428         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2429     case 2:
2430         /* LDW with modification.  */
2431         return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2432     default:
2433         return gen_illegal(ctx);
2434     }
2435 }
2436 
2437 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2438 {
2439     target_long i = assemble_16a(insn);
2440     unsigned t1 = extract32(insn, 1, 1);
2441     unsigned a = extract32(insn, 2, 1);
2442     unsigned t0 = extract32(insn, 16, 5);
2443     unsigned rb = extract32(insn, 21, 5);
2444 
2445     /* FSTW with modification.  */
2446     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2447 }
2448 
2449 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2450 {
2451     unsigned t0 = extract32(insn, 0, 5);
2452     unsigned m = extract32(insn, 5, 1);
2453     unsigned t1 = extract32(insn, 6, 1);
2454     unsigned ext3 = extract32(insn, 7, 3);
2455     /* unsigned cc = extract32(insn, 10, 2); */
2456     unsigned i = extract32(insn, 12, 1);
2457     unsigned ua = extract32(insn, 13, 1);
2458     unsigned rx = extract32(insn, 16, 5);
2459     unsigned rb = extract32(insn, 21, 5);
2460     unsigned rt = t1 * 32 + t0;
2461     int modify = (m ? (ua ? -1 : 1) : 0);
2462     int disp, scale;
2463 
2464     if (i == 0) {
2465         scale = (ua ? 2 : 0);
2466         disp = 0;
2467         modify = m;
2468     } else {
2469         disp = low_sextract(rx, 0, 5);
2470         scale = 0;
2471         rx = 0;
2472         modify = (m ? (ua ? -1 : 1) : 0);
2473     }
2474 
2475     switch (ext3) {
2476     case 0: /* FLDW */
2477         return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2478     case 4: /* FSTW */
2479         return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2480     }
2481     return gen_illegal(ctx);
2482 }
2483 
2484 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2485 {
2486     unsigned rt = extract32(insn, 0, 5);
2487     unsigned m = extract32(insn, 5, 1);
2488     unsigned ext4 = extract32(insn, 6, 4);
2489     /* unsigned cc = extract32(insn, 10, 2); */
2490     unsigned i = extract32(insn, 12, 1);
2491     unsigned ua = extract32(insn, 13, 1);
2492     unsigned rx = extract32(insn, 16, 5);
2493     unsigned rb = extract32(insn, 21, 5);
2494     int modify = (m ? (ua ? -1 : 1) : 0);
2495     int disp, scale;
2496 
2497     if (i == 0) {
2498         scale = (ua ? 3 : 0);
2499         disp = 0;
2500         modify = m;
2501     } else {
2502         disp = low_sextract(rx, 0, 5);
2503         scale = 0;
2504         rx = 0;
2505         modify = (m ? (ua ? -1 : 1) : 0);
2506     }
2507 
2508     switch (ext4) {
2509     case 0: /* FLDD */
2510         return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2511     case 8: /* FSTD */
2512         return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2513     default:
2514         return gen_illegal(ctx);
2515     }
2516 }
2517 
2518 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2519                                 bool is_true, bool is_imm, bool is_dw)
2520 {
2521     target_long disp = assemble_12(insn) * 4;
2522     unsigned n = extract32(insn, 1, 1);
2523     unsigned c = extract32(insn, 13, 3);
2524     unsigned r = extract32(insn, 21, 5);
2525     unsigned cf = c * 2 + !is_true;
2526     TCGv dest, in1, in2, sv;
2527     DisasCond cond;
2528 
2529     nullify_over(ctx);
2530 
2531     if (is_imm) {
2532         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2533     } else {
2534         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2535     }
2536     in2 = load_gpr(ctx, r);
2537     dest = get_temp(ctx);
2538 
2539     tcg_gen_sub_tl(dest, in1, in2);
2540 
2541     TCGV_UNUSED(sv);
2542     if (c == 6) {
2543         sv = do_sub_sv(ctx, dest, in1, in2);
2544     }
2545 
2546     cond = do_sub_cond(cf, dest, in1, in2, sv);
2547     return do_cbranch(ctx, disp, n, &cond);
2548 }
2549 
2550 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2551                                 bool is_true, bool is_imm)
2552 {
2553     target_long disp = assemble_12(insn) * 4;
2554     unsigned n = extract32(insn, 1, 1);
2555     unsigned c = extract32(insn, 13, 3);
2556     unsigned r = extract32(insn, 21, 5);
2557     unsigned cf = c * 2 + !is_true;
2558     TCGv dest, in1, in2, sv, cb_msb;
2559     DisasCond cond;
2560 
2561     nullify_over(ctx);
2562 
2563     if (is_imm) {
2564         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2565     } else {
2566         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2567     }
2568     in2 = load_gpr(ctx, r);
2569     dest = dest_gpr(ctx, r);
2570     TCGV_UNUSED(sv);
2571     TCGV_UNUSED(cb_msb);
2572 
2573     switch (c) {
2574     default:
2575         tcg_gen_add_tl(dest, in1, in2);
2576         break;
2577     case 4: case 5:
2578         cb_msb = get_temp(ctx);
2579         tcg_gen_movi_tl(cb_msb, 0);
2580         tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2581         break;
2582     case 6:
2583         tcg_gen_add_tl(dest, in1, in2);
2584         sv = do_add_sv(ctx, dest, in1, in2);
2585         break;
2586     }
2587 
2588     cond = do_cond(cf, dest, cb_msb, sv);
2589     return do_cbranch(ctx, disp, n, &cond);
2590 }
2591 
2592 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2593 {
2594     target_long disp = assemble_12(insn) * 4;
2595     unsigned n = extract32(insn, 1, 1);
2596     unsigned c = extract32(insn, 15, 1);
2597     unsigned r = extract32(insn, 16, 5);
2598     unsigned p = extract32(insn, 21, 5);
2599     unsigned i = extract32(insn, 26, 1);
2600     TCGv tmp, tcg_r;
2601     DisasCond cond;
2602 
2603     nullify_over(ctx);
2604 
2605     tmp = tcg_temp_new();
2606     tcg_r = load_gpr(ctx, r);
2607     if (i) {
2608         tcg_gen_shli_tl(tmp, tcg_r, p);
2609     } else {
2610         tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2611     }
2612 
2613     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2614     tcg_temp_free(tmp);
2615     return do_cbranch(ctx, disp, n, &cond);
2616 }
2617 
2618 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2619 {
2620     target_long disp = assemble_12(insn) * 4;
2621     unsigned n = extract32(insn, 1, 1);
2622     unsigned c = extract32(insn, 13, 3);
2623     unsigned t = extract32(insn, 16, 5);
2624     unsigned r = extract32(insn, 21, 5);
2625     TCGv dest;
2626     DisasCond cond;
2627 
2628     nullify_over(ctx);
2629 
2630     dest = dest_gpr(ctx, r);
2631     if (is_imm) {
2632         tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2633     } else if (t == 0) {
2634         tcg_gen_movi_tl(dest, 0);
2635     } else {
2636         tcg_gen_mov_tl(dest, cpu_gr[t]);
2637     }
2638 
2639     cond = do_sed_cond(c, dest);
2640     return do_cbranch(ctx, disp, n, &cond);
2641 }
2642 
2643 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2644                                     const DisasInsn *di)
2645 {
2646     unsigned rt = extract32(insn, 0, 5);
2647     unsigned c = extract32(insn, 13, 3);
2648     unsigned r1 = extract32(insn, 16, 5);
2649     unsigned r2 = extract32(insn, 21, 5);
2650     TCGv dest;
2651 
2652     if (c) {
2653         nullify_over(ctx);
2654     }
2655 
2656     dest = dest_gpr(ctx, rt);
2657     if (r1 == 0) {
2658         tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2659         tcg_gen_shr_tl(dest, dest, cpu_sar);
2660     } else if (r1 == r2) {
2661         TCGv_i32 t32 = tcg_temp_new_i32();
2662         tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2663         tcg_gen_rotr_i32(t32, t32, cpu_sar);
2664         tcg_gen_extu_i32_tl(dest, t32);
2665         tcg_temp_free_i32(t32);
2666     } else {
2667         TCGv_i64 t = tcg_temp_new_i64();
2668         TCGv_i64 s = tcg_temp_new_i64();
2669 
2670         tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2671         tcg_gen_extu_tl_i64(s, cpu_sar);
2672         tcg_gen_shr_i64(t, t, s);
2673         tcg_gen_trunc_i64_tl(dest, t);
2674 
2675         tcg_temp_free_i64(t);
2676         tcg_temp_free_i64(s);
2677     }
2678     save_gpr(ctx, rt, dest);
2679 
2680     /* Install the new nullification.  */
2681     cond_free(&ctx->null_cond);
2682     if (c) {
2683         ctx->null_cond = do_sed_cond(c, dest);
2684     }
2685     return nullify_end(ctx, DISAS_NEXT);
2686 }
2687 
2688 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2689                                      const DisasInsn *di)
2690 {
2691     unsigned rt = extract32(insn, 0, 5);
2692     unsigned cpos = extract32(insn, 5, 5);
2693     unsigned c = extract32(insn, 13, 3);
2694     unsigned r1 = extract32(insn, 16, 5);
2695     unsigned r2 = extract32(insn, 21, 5);
2696     unsigned sa = 31 - cpos;
2697     TCGv dest, t2;
2698 
2699     if (c) {
2700         nullify_over(ctx);
2701     }
2702 
2703     dest = dest_gpr(ctx, rt);
2704     t2 = load_gpr(ctx, r2);
2705     if (r1 == r2) {
2706         TCGv_i32 t32 = tcg_temp_new_i32();
2707         tcg_gen_trunc_tl_i32(t32, t2);
2708         tcg_gen_rotri_i32(t32, t32, sa);
2709         tcg_gen_extu_i32_tl(dest, t32);
2710         tcg_temp_free_i32(t32);
2711     } else if (r1 == 0) {
2712         tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2713     } else {
2714         TCGv t0 = tcg_temp_new();
2715         tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2716         tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2717         tcg_temp_free(t0);
2718     }
2719     save_gpr(ctx, rt, dest);
2720 
2721     /* Install the new nullification.  */
2722     cond_free(&ctx->null_cond);
2723     if (c) {
2724         ctx->null_cond = do_sed_cond(c, dest);
2725     }
2726     return nullify_end(ctx, DISAS_NEXT);
2727 }
2728 
2729 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2730                                      const DisasInsn *di)
2731 {
2732     unsigned clen = extract32(insn, 0, 5);
2733     unsigned is_se = extract32(insn, 10, 1);
2734     unsigned c = extract32(insn, 13, 3);
2735     unsigned rt = extract32(insn, 16, 5);
2736     unsigned rr = extract32(insn, 21, 5);
2737     unsigned len = 32 - clen;
2738     TCGv dest, src, tmp;
2739 
2740     if (c) {
2741         nullify_over(ctx);
2742     }
2743 
2744     dest = dest_gpr(ctx, rt);
2745     src = load_gpr(ctx, rr);
2746     tmp = tcg_temp_new();
2747 
2748     /* Recall that SAR is using big-endian bit numbering.  */
2749     tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2750     if (is_se) {
2751         tcg_gen_sar_tl(dest, src, tmp);
2752         tcg_gen_sextract_tl(dest, dest, 0, len);
2753     } else {
2754         tcg_gen_shr_tl(dest, src, tmp);
2755         tcg_gen_extract_tl(dest, dest, 0, len);
2756     }
2757     tcg_temp_free(tmp);
2758     save_gpr(ctx, rt, dest);
2759 
2760     /* Install the new nullification.  */
2761     cond_free(&ctx->null_cond);
2762     if (c) {
2763         ctx->null_cond = do_sed_cond(c, dest);
2764     }
2765     return nullify_end(ctx, DISAS_NEXT);
2766 }
2767 
2768 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2769                                      const DisasInsn *di)
2770 {
2771     unsigned clen = extract32(insn, 0, 5);
2772     unsigned pos = extract32(insn, 5, 5);
2773     unsigned is_se = extract32(insn, 10, 1);
2774     unsigned c = extract32(insn, 13, 3);
2775     unsigned rt = extract32(insn, 16, 5);
2776     unsigned rr = extract32(insn, 21, 5);
2777     unsigned len = 32 - clen;
2778     unsigned cpos = 31 - pos;
2779     TCGv dest, src;
2780 
2781     if (c) {
2782         nullify_over(ctx);
2783     }
2784 
2785     dest = dest_gpr(ctx, rt);
2786     src = load_gpr(ctx, rr);
2787     if (is_se) {
2788         tcg_gen_sextract_tl(dest, src, cpos, len);
2789     } else {
2790         tcg_gen_extract_tl(dest, src, cpos, len);
2791     }
2792     save_gpr(ctx, rt, dest);
2793 
2794     /* Install the new nullification.  */
2795     cond_free(&ctx->null_cond);
2796     if (c) {
2797         ctx->null_cond = do_sed_cond(c, dest);
2798     }
2799     return nullify_end(ctx, DISAS_NEXT);
2800 }
2801 
2802 static const DisasInsn table_sh_ex[] = {
2803     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2804     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2805     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2806     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2807 };
2808 
2809 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2810                                       const DisasInsn *di)
2811 {
2812     unsigned clen = extract32(insn, 0, 5);
2813     unsigned cpos = extract32(insn, 5, 5);
2814     unsigned nz = extract32(insn, 10, 1);
2815     unsigned c = extract32(insn, 13, 3);
2816     target_long val = low_sextract(insn, 16, 5);
2817     unsigned rt = extract32(insn, 21, 5);
2818     unsigned len = 32 - clen;
2819     target_long mask0, mask1;
2820     TCGv dest;
2821 
2822     if (c) {
2823         nullify_over(ctx);
2824     }
2825     if (cpos + len > 32) {
2826         len = 32 - cpos;
2827     }
2828 
2829     dest = dest_gpr(ctx, rt);
2830     mask0 = deposit64(0, cpos, len, val);
2831     mask1 = deposit64(-1, cpos, len, val);
2832 
2833     if (nz) {
2834         TCGv src = load_gpr(ctx, rt);
2835         if (mask1 != -1) {
2836             tcg_gen_andi_tl(dest, src, mask1);
2837             src = dest;
2838         }
2839         tcg_gen_ori_tl(dest, src, mask0);
2840     } else {
2841         tcg_gen_movi_tl(dest, mask0);
2842     }
2843     save_gpr(ctx, rt, dest);
2844 
2845     /* Install the new nullification.  */
2846     cond_free(&ctx->null_cond);
2847     if (c) {
2848         ctx->null_cond = do_sed_cond(c, dest);
2849     }
2850     return nullify_end(ctx, DISAS_NEXT);
2851 }
2852 
2853 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2854                                     const DisasInsn *di)
2855 {
2856     unsigned clen = extract32(insn, 0, 5);
2857     unsigned cpos = extract32(insn, 5, 5);
2858     unsigned nz = extract32(insn, 10, 1);
2859     unsigned c = extract32(insn, 13, 3);
2860     unsigned rr = extract32(insn, 16, 5);
2861     unsigned rt = extract32(insn, 21, 5);
2862     unsigned rs = nz ? rt : 0;
2863     unsigned len = 32 - clen;
2864     TCGv dest, val;
2865 
2866     if (c) {
2867         nullify_over(ctx);
2868     }
2869     if (cpos + len > 32) {
2870         len = 32 - cpos;
2871     }
2872 
2873     dest = dest_gpr(ctx, rt);
2874     val = load_gpr(ctx, rr);
2875     if (rs == 0) {
2876         tcg_gen_deposit_z_tl(dest, val, cpos, len);
2877     } else {
2878         tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2879     }
2880     save_gpr(ctx, rt, dest);
2881 
2882     /* Install the new nullification.  */
2883     cond_free(&ctx->null_cond);
2884     if (c) {
2885         ctx->null_cond = do_sed_cond(c, dest);
2886     }
2887     return nullify_end(ctx, DISAS_NEXT);
2888 }
2889 
2890 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2891                                     const DisasInsn *di)
2892 {
2893     unsigned clen = extract32(insn, 0, 5);
2894     unsigned nz = extract32(insn, 10, 1);
2895     unsigned i = extract32(insn, 12, 1);
2896     unsigned c = extract32(insn, 13, 3);
2897     unsigned rt = extract32(insn, 21, 5);
2898     unsigned rs = nz ? rt : 0;
2899     unsigned len = 32 - clen;
2900     TCGv val, mask, tmp, shift, dest;
2901     unsigned msb = 1U << (len - 1);
2902 
2903     if (c) {
2904         nullify_over(ctx);
2905     }
2906 
2907     if (i) {
2908         val = load_const(ctx, low_sextract(insn, 16, 5));
2909     } else {
2910         val = load_gpr(ctx, extract32(insn, 16, 5));
2911     }
2912     dest = dest_gpr(ctx, rt);
2913     shift = tcg_temp_new();
2914     tmp = tcg_temp_new();
2915 
2916     /* Convert big-endian bit numbering in SAR to left-shift.  */
2917     tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2918 
2919     mask = tcg_const_tl(msb + (msb - 1));
2920     tcg_gen_and_tl(tmp, val, mask);
2921     if (rs) {
2922         tcg_gen_shl_tl(mask, mask, shift);
2923         tcg_gen_shl_tl(tmp, tmp, shift);
2924         tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2925         tcg_gen_or_tl(dest, dest, tmp);
2926     } else {
2927         tcg_gen_shl_tl(dest, tmp, shift);
2928     }
2929     tcg_temp_free(shift);
2930     tcg_temp_free(mask);
2931     tcg_temp_free(tmp);
2932     save_gpr(ctx, rt, dest);
2933 
2934     /* Install the new nullification.  */
2935     cond_free(&ctx->null_cond);
2936     if (c) {
2937         ctx->null_cond = do_sed_cond(c, dest);
2938     }
2939     return nullify_end(ctx, DISAS_NEXT);
2940 }
2941 
2942 static const DisasInsn table_depw[] = {
2943     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2944     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2945     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2946 };
2947 
2948 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2949 {
2950     unsigned n = extract32(insn, 1, 1);
2951     unsigned b = extract32(insn, 21, 5);
2952     target_long disp = assemble_17(insn);
2953 
2954     /* unsigned s = low_uextract(insn, 13, 3); */
2955     /* ??? It seems like there should be a good way of using
2956        "be disp(sr2, r0)", the canonical gateway entry mechanism
2957        to our advantage.  But that appears to be inconvenient to
2958        manage along side branch delay slots.  Therefore we handle
2959        entry into the gateway page via absolute address.  */
2960 
2961     /* Since we don't implement spaces, just branch.  Do notice the special
2962        case of "be disp(*,r0)" using a direct branch to disp, so that we can
2963        goto_tb to the TB containing the syscall.  */
2964     if (b == 0) {
2965         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2966     } else {
2967         TCGv tmp = get_temp(ctx);
2968         tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2969         return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2970     }
2971 }
2972 
2973 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2974                               const DisasInsn *di)
2975 {
2976     unsigned n = extract32(insn, 1, 1);
2977     unsigned link = extract32(insn, 21, 5);
2978     target_long disp = assemble_17(insn);
2979 
2980     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2981 }
2982 
2983 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2984                                    const DisasInsn *di)
2985 {
2986     unsigned n = extract32(insn, 1, 1);
2987     target_long disp = assemble_22(insn);
2988 
2989     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2990 }
2991 
2992 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
2993                                const DisasInsn *di)
2994 {
2995     unsigned n = extract32(insn, 1, 1);
2996     unsigned rx = extract32(insn, 16, 5);
2997     unsigned link = extract32(insn, 21, 5);
2998     TCGv tmp = get_temp(ctx);
2999 
3000     tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3001     tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3002     return do_ibranch(ctx, tmp, link, n);
3003 }
3004 
3005 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3006                               const DisasInsn *di)
3007 {
3008     unsigned n = extract32(insn, 1, 1);
3009     unsigned rx = extract32(insn, 16, 5);
3010     unsigned rb = extract32(insn, 21, 5);
3011     TCGv dest;
3012 
3013     if (rx == 0) {
3014         dest = load_gpr(ctx, rb);
3015     } else {
3016         dest = get_temp(ctx);
3017         tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3018         tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3019     }
3020     return do_ibranch(ctx, dest, 0, n);
3021 }
3022 
3023 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3024                                const DisasInsn *di)
3025 {
3026     unsigned n = extract32(insn, 1, 1);
3027     unsigned rb = extract32(insn, 21, 5);
3028     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3029 
3030     return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3031 }
3032 
3033 static const DisasInsn table_branch[] = {
3034     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3035     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3036     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3037     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3038     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3039 };
3040 
3041 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3042                                       const DisasInsn *di)
3043 {
3044     unsigned rt = extract32(insn, 0, 5);
3045     unsigned ra = extract32(insn, 21, 5);
3046     return do_fop_wew(ctx, rt, ra, di->f.wew);
3047 }
3048 
3049 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3050                                       const DisasInsn *di)
3051 {
3052     unsigned rt = assemble_rt64(insn);
3053     unsigned ra = assemble_ra64(insn);
3054     return do_fop_wew(ctx, rt, ra, di->f.wew);
3055 }
3056 
3057 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3058                                    const DisasInsn *di)
3059 {
3060     unsigned rt = extract32(insn, 0, 5);
3061     unsigned ra = extract32(insn, 21, 5);
3062     return do_fop_ded(ctx, rt, ra, di->f.ded);
3063 }
3064 
3065 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3066                                       const DisasInsn *di)
3067 {
3068     unsigned rt = extract32(insn, 0, 5);
3069     unsigned ra = extract32(insn, 21, 5);
3070     return do_fop_wed(ctx, rt, ra, di->f.wed);
3071 }
3072 
3073 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3074                                       const DisasInsn *di)
3075 {
3076     unsigned rt = assemble_rt64(insn);
3077     unsigned ra = extract32(insn, 21, 5);
3078     return do_fop_wed(ctx, rt, ra, di->f.wed);
3079 }
3080 
3081 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3082                                       const DisasInsn *di)
3083 {
3084     unsigned rt = extract32(insn, 0, 5);
3085     unsigned ra = extract32(insn, 21, 5);
3086     return do_fop_dew(ctx, rt, ra, di->f.dew);
3087 }
3088 
3089 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3090                                       const DisasInsn *di)
3091 {
3092     unsigned rt = extract32(insn, 0, 5);
3093     unsigned ra = assemble_ra64(insn);
3094     return do_fop_dew(ctx, rt, ra, di->f.dew);
3095 }
3096 
3097 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3098                                        const DisasInsn *di)
3099 {
3100     unsigned rt = extract32(insn, 0, 5);
3101     unsigned rb = extract32(insn, 16, 5);
3102     unsigned ra = extract32(insn, 21, 5);
3103     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3104 }
3105 
3106 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3107                                        const DisasInsn *di)
3108 {
3109     unsigned rt = assemble_rt64(insn);
3110     unsigned rb = assemble_rb64(insn);
3111     unsigned ra = assemble_ra64(insn);
3112     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3113 }
3114 
3115 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3116                                     const DisasInsn *di)
3117 {
3118     unsigned rt = extract32(insn, 0, 5);
3119     unsigned rb = extract32(insn, 16, 5);
3120     unsigned ra = extract32(insn, 21, 5);
3121     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3122 }
3123 
3124 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3125 {
3126     tcg_gen_mov_i32(dst, src);
3127 }
3128 
3129 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3130 {
3131     tcg_gen_mov_i64(dst, src);
3132 }
3133 
3134 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3135 {
3136     tcg_gen_andi_i32(dst, src, INT32_MAX);
3137 }
3138 
3139 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3140 {
3141     tcg_gen_andi_i64(dst, src, INT64_MAX);
3142 }
3143 
3144 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3145 {
3146     tcg_gen_xori_i32(dst, src, INT32_MIN);
3147 }
3148 
3149 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3150 {
3151     tcg_gen_xori_i64(dst, src, INT64_MIN);
3152 }
3153 
3154 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3155 {
3156     tcg_gen_ori_i32(dst, src, INT32_MIN);
3157 }
3158 
3159 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3160 {
3161     tcg_gen_ori_i64(dst, src, INT64_MIN);
3162 }
3163 
3164 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3165                                unsigned y, unsigned c)
3166 {
3167     TCGv_i32 ta, tb, tc, ty;
3168 
3169     nullify_over(ctx);
3170 
3171     ta = load_frw0_i32(ra);
3172     tb = load_frw0_i32(rb);
3173     ty = tcg_const_i32(y);
3174     tc = tcg_const_i32(c);
3175 
3176     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3177 
3178     tcg_temp_free_i32(ta);
3179     tcg_temp_free_i32(tb);
3180     tcg_temp_free_i32(ty);
3181     tcg_temp_free_i32(tc);
3182 
3183     return nullify_end(ctx, DISAS_NEXT);
3184 }
3185 
3186 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3187                                      const DisasInsn *di)
3188 {
3189     unsigned c = extract32(insn, 0, 5);
3190     unsigned y = extract32(insn, 13, 3);
3191     unsigned rb = extract32(insn, 16, 5);
3192     unsigned ra = extract32(insn, 21, 5);
3193     return do_fcmp_s(ctx, ra, rb, y, c);
3194 }
3195 
3196 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3197                                      const DisasInsn *di)
3198 {
3199     unsigned c = extract32(insn, 0, 5);
3200     unsigned y = extract32(insn, 13, 3);
3201     unsigned rb = assemble_rb64(insn);
3202     unsigned ra = assemble_ra64(insn);
3203     return do_fcmp_s(ctx, ra, rb, y, c);
3204 }
3205 
3206 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3207                                   const DisasInsn *di)
3208 {
3209     unsigned c = extract32(insn, 0, 5);
3210     unsigned y = extract32(insn, 13, 3);
3211     unsigned rb = extract32(insn, 16, 5);
3212     unsigned ra = extract32(insn, 21, 5);
3213     TCGv_i64 ta, tb;
3214     TCGv_i32 tc, ty;
3215 
3216     nullify_over(ctx);
3217 
3218     ta = load_frd0(ra);
3219     tb = load_frd0(rb);
3220     ty = tcg_const_i32(y);
3221     tc = tcg_const_i32(c);
3222 
3223     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3224 
3225     tcg_temp_free_i64(ta);
3226     tcg_temp_free_i64(tb);
3227     tcg_temp_free_i32(ty);
3228     tcg_temp_free_i32(tc);
3229 
3230     return nullify_end(ctx, DISAS_NEXT);
3231 }
3232 
3233 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3234                                    const DisasInsn *di)
3235 {
3236     unsigned y = extract32(insn, 13, 3);
3237     unsigned cbit = (y ^ 1) - 1;
3238     TCGv t;
3239 
3240     nullify_over(ctx);
3241 
3242     t = tcg_temp_new();
3243     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3244     tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3245     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3246     tcg_temp_free(t);
3247 
3248     return nullify_end(ctx, DISAS_NEXT);
3249 }
3250 
3251 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3252                                    const DisasInsn *di)
3253 {
3254     unsigned c = extract32(insn, 0, 5);
3255     int mask;
3256     bool inv = false;
3257     TCGv t;
3258 
3259     nullify_over(ctx);
3260 
3261     t = tcg_temp_new();
3262     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3263 
3264     switch (c) {
3265     case 0: /* simple */
3266         tcg_gen_andi_tl(t, t, 0x4000000);
3267         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3268         goto done;
3269     case 2: /* rej */
3270         inv = true;
3271         /* fallthru */
3272     case 1: /* acc */
3273         mask = 0x43ff800;
3274         break;
3275     case 6: /* rej8 */
3276         inv = true;
3277         /* fallthru */
3278     case 5: /* acc8 */
3279         mask = 0x43f8000;
3280         break;
3281     case 9: /* acc6 */
3282         mask = 0x43e0000;
3283         break;
3284     case 13: /* acc4 */
3285         mask = 0x4380000;
3286         break;
3287     case 17: /* acc2 */
3288         mask = 0x4200000;
3289         break;
3290     default:
3291         return gen_illegal(ctx);
3292     }
3293     if (inv) {
3294         TCGv c = load_const(ctx, mask);
3295         tcg_gen_or_tl(t, t, c);
3296         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3297     } else {
3298         tcg_gen_andi_tl(t, t, mask);
3299         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3300     }
3301  done:
3302     return nullify_end(ctx, DISAS_NEXT);
3303 }
3304 
3305 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3306                                  const DisasInsn *di)
3307 {
3308     unsigned rt = extract32(insn, 0, 5);
3309     unsigned rb = assemble_rb64(insn);
3310     unsigned ra = assemble_ra64(insn);
3311     TCGv_i64 a, b;
3312 
3313     nullify_over(ctx);
3314 
3315     a = load_frw0_i64(ra);
3316     b = load_frw0_i64(rb);
3317     tcg_gen_mul_i64(a, a, b);
3318     save_frd(rt, a);
3319     tcg_temp_free_i64(a);
3320     tcg_temp_free_i64(b);
3321 
3322     return nullify_end(ctx, DISAS_NEXT);
3323 }
3324 
3325 #define FOP_DED  trans_fop_ded, .f.ded
3326 #define FOP_DEDD trans_fop_dedd, .f.dedd
3327 
3328 #define FOP_WEW  trans_fop_wew_0c, .f.wew
3329 #define FOP_DEW  trans_fop_dew_0c, .f.dew
3330 #define FOP_WED  trans_fop_wed_0c, .f.wed
3331 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3332 
3333 static const DisasInsn table_float_0c[] = {
3334     /* floating point class zero */
3335     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3336     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3337     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3338     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3339     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3340     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3341 
3342     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3343     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3344     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3345     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3346     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3347     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3348 
3349     /* floating point class three */
3350     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3351     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3352     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3353     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3354 
3355     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3356     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3357     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3358     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3359 
3360     /* floating point class one */
3361     /* float/float */
3362     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3363     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3364     /* int/float */
3365     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3366     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3367     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3368     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3369     /* float/int */
3370     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3371     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3372     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3373     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3374     /* float/int truncate */
3375     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3376     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3377     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3378     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3379     /* uint/float */
3380     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3381     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3382     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3383     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3384     /* float/uint */
3385     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3386     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3387     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3388     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3389     /* float/uint truncate */
3390     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3391     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3392     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3393     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3394 
3395     /* floating point class two */
3396     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3397     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3398     { 0x30002420, 0xffffffe0, trans_ftest_q },
3399     { 0x30000420, 0xffff1fff, trans_ftest_t },
3400 
3401     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3402        This is machine/revision == 0, which is reserved for simulator.  */
3403     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3404 };
3405 
3406 #undef FOP_WEW
3407 #undef FOP_DEW
3408 #undef FOP_WED
3409 #undef FOP_WEWW
3410 #define FOP_WEW  trans_fop_wew_0e, .f.wew
3411 #define FOP_DEW  trans_fop_dew_0e, .f.dew
3412 #define FOP_WED  trans_fop_wed_0e, .f.wed
3413 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3414 
3415 static const DisasInsn table_float_0e[] = {
3416     /* floating point class zero */
3417     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3418     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3419     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3420     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3421     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3422     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3423 
3424     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3425     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3426     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3427     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3428     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3429     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3430 
3431     /* floating point class three */
3432     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3433     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3434     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3435     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3436 
3437     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3438     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3439     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3440     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3441 
3442     { 0x38004700, 0xfc00ef60, trans_xmpyu },
3443 
3444     /* floating point class one */
3445     /* float/float */
3446     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3447     { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3448     /* int/float */
3449     { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3450     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3451     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3452     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3453     /* float/int */
3454     { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3455     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3456     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3457     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3458     /* float/int truncate */
3459     { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3460     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3461     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3462     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3463     /* uint/float */
3464     { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3465     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3466     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3467     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3468     /* float/uint */
3469     { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3470     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3471     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3472     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3473     /* float/uint truncate */
3474     { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3475     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3476     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3477     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3478 
3479     /* floating point class two */
3480     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3481     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3482 };
3483 
3484 #undef FOP_WEW
3485 #undef FOP_DEW
3486 #undef FOP_WED
3487 #undef FOP_WEWW
3488 #undef FOP_DED
3489 #undef FOP_DEDD
3490 
3491 /* Convert the fmpyadd single-precision register encodings to standard.  */
3492 static inline int fmpyadd_s_reg(unsigned r)
3493 {
3494     return (r & 16) * 2 + 16 + (r & 15);
3495 }
3496 
3497 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3498                                    uint32_t insn, bool is_sub)
3499 {
3500     unsigned tm = extract32(insn, 0, 5);
3501     unsigned f = extract32(insn, 5, 1);
3502     unsigned ra = extract32(insn, 6, 5);
3503     unsigned ta = extract32(insn, 11, 5);
3504     unsigned rm2 = extract32(insn, 16, 5);
3505     unsigned rm1 = extract32(insn, 21, 5);
3506 
3507     nullify_over(ctx);
3508 
3509     /* Independent multiply & add/sub, with undefined behaviour
3510        if outputs overlap inputs.  */
3511     if (f == 0) {
3512         tm = fmpyadd_s_reg(tm);
3513         ra = fmpyadd_s_reg(ra);
3514         ta = fmpyadd_s_reg(ta);
3515         rm2 = fmpyadd_s_reg(rm2);
3516         rm1 = fmpyadd_s_reg(rm1);
3517         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3518         do_fop_weww(ctx, ta, ta, ra,
3519                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3520     } else {
3521         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3522         do_fop_dedd(ctx, ta, ta, ra,
3523                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3524     }
3525 
3526     return nullify_end(ctx, DISAS_NEXT);
3527 }
3528 
3529 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3530                                       const DisasInsn *di)
3531 {
3532     unsigned rt = assemble_rt64(insn);
3533     unsigned neg = extract32(insn, 5, 1);
3534     unsigned rm1 = assemble_ra64(insn);
3535     unsigned rm2 = assemble_rb64(insn);
3536     unsigned ra3 = assemble_rc64(insn);
3537     TCGv_i32 a, b, c;
3538 
3539     nullify_over(ctx);
3540     a = load_frw0_i32(rm1);
3541     b = load_frw0_i32(rm2);
3542     c = load_frw0_i32(ra3);
3543 
3544     if (neg) {
3545         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3546     } else {
3547         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3548     }
3549 
3550     tcg_temp_free_i32(b);
3551     tcg_temp_free_i32(c);
3552     save_frw_i32(rt, a);
3553     tcg_temp_free_i32(a);
3554     return nullify_end(ctx, DISAS_NEXT);
3555 }
3556 
3557 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3558                                       const DisasInsn *di)
3559 {
3560     unsigned rt = extract32(insn, 0, 5);
3561     unsigned neg = extract32(insn, 5, 1);
3562     unsigned rm1 = extract32(insn, 21, 5);
3563     unsigned rm2 = extract32(insn, 16, 5);
3564     unsigned ra3 = assemble_rc64(insn);
3565     TCGv_i64 a, b, c;
3566 
3567     nullify_over(ctx);
3568     a = load_frd0(rm1);
3569     b = load_frd0(rm2);
3570     c = load_frd0(ra3);
3571 
3572     if (neg) {
3573         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3574     } else {
3575         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3576     }
3577 
3578     tcg_temp_free_i64(b);
3579     tcg_temp_free_i64(c);
3580     save_frd(rt, a);
3581     tcg_temp_free_i64(a);
3582     return nullify_end(ctx, DISAS_NEXT);
3583 }
3584 
3585 static const DisasInsn table_fp_fused[] = {
3586     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3587     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3588 };
3589 
3590 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3591                                          const DisasInsn table[], size_t n)
3592 {
3593     size_t i;
3594     for (i = 0; i < n; ++i) {
3595         if ((insn & table[i].mask) == table[i].insn) {
3596             return table[i].trans(ctx, insn, &table[i]);
3597         }
3598     }
3599     return gen_illegal(ctx);
3600 }
3601 
3602 #define translate_table(ctx, insn, table) \
3603     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3604 
3605 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3606 {
3607     uint32_t opc = extract32(insn, 26, 6);
3608 
3609     switch (opc) {
3610     case 0x00: /* system op */
3611         return translate_table(ctx, insn, table_system);
3612     case 0x01:
3613         return translate_table(ctx, insn, table_mem_mgmt);
3614     case 0x02:
3615         return translate_table(ctx, insn, table_arith_log);
3616     case 0x03:
3617         return translate_table(ctx, insn, table_index_mem);
3618     case 0x06:
3619         return trans_fmpyadd(ctx, insn, false);
3620     case 0x08:
3621         return trans_ldil(ctx, insn);
3622     case 0x09:
3623         return trans_copr_w(ctx, insn);
3624     case 0x0A:
3625         return trans_addil(ctx, insn);
3626     case 0x0B:
3627         return trans_copr_dw(ctx, insn);
3628     case 0x0C:
3629         return translate_table(ctx, insn, table_float_0c);
3630     case 0x0D:
3631         return trans_ldo(ctx, insn);
3632     case 0x0E:
3633         return translate_table(ctx, insn, table_float_0e);
3634 
3635     case 0x10:
3636         return trans_load(ctx, insn, false, MO_UB);
3637     case 0x11:
3638         return trans_load(ctx, insn, false, MO_TEUW);
3639     case 0x12:
3640         return trans_load(ctx, insn, false, MO_TEUL);
3641     case 0x13:
3642         return trans_load(ctx, insn, true, MO_TEUL);
3643     case 0x16:
3644         return trans_fload_mod(ctx, insn);
3645     case 0x17:
3646         return trans_load_w(ctx, insn);
3647     case 0x18:
3648         return trans_store(ctx, insn, false, MO_UB);
3649     case 0x19:
3650         return trans_store(ctx, insn, false, MO_TEUW);
3651     case 0x1A:
3652         return trans_store(ctx, insn, false, MO_TEUL);
3653     case 0x1B:
3654         return trans_store(ctx, insn, true, MO_TEUL);
3655     case 0x1E:
3656         return trans_fstore_mod(ctx, insn);
3657     case 0x1F:
3658         return trans_store_w(ctx, insn);
3659 
3660     case 0x20:
3661         return trans_cmpb(ctx, insn, true, false, false);
3662     case 0x21:
3663         return trans_cmpb(ctx, insn, true, true, false);
3664     case 0x22:
3665         return trans_cmpb(ctx, insn, false, false, false);
3666     case 0x23:
3667         return trans_cmpb(ctx, insn, false, true, false);
3668     case 0x24:
3669         return trans_cmpiclr(ctx, insn);
3670     case 0x25:
3671         return trans_subi(ctx, insn);
3672     case 0x26:
3673         return trans_fmpyadd(ctx, insn, true);
3674     case 0x27:
3675         return trans_cmpb(ctx, insn, true, false, true);
3676     case 0x28:
3677         return trans_addb(ctx, insn, true, false);
3678     case 0x29:
3679         return trans_addb(ctx, insn, true, true);
3680     case 0x2A:
3681         return trans_addb(ctx, insn, false, false);
3682     case 0x2B:
3683         return trans_addb(ctx, insn, false, true);
3684     case 0x2C:
3685     case 0x2D:
3686         return trans_addi(ctx, insn);
3687     case 0x2E:
3688         return translate_table(ctx, insn, table_fp_fused);
3689     case 0x2F:
3690         return trans_cmpb(ctx, insn, false, false, true);
3691 
3692     case 0x30:
3693     case 0x31:
3694         return trans_bb(ctx, insn);
3695     case 0x32:
3696         return trans_movb(ctx, insn, false);
3697     case 0x33:
3698         return trans_movb(ctx, insn, true);
3699     case 0x34:
3700         return translate_table(ctx, insn, table_sh_ex);
3701     case 0x35:
3702         return translate_table(ctx, insn, table_depw);
3703     case 0x38:
3704         return trans_be(ctx, insn, false);
3705     case 0x39:
3706         return trans_be(ctx, insn, true);
3707     case 0x3A:
3708         return translate_table(ctx, insn, table_branch);
3709 
3710     case 0x04: /* spopn */
3711     case 0x05: /* diag */
3712     case 0x0F: /* product specific */
3713         break;
3714 
3715     case 0x07: /* unassigned */
3716     case 0x15: /* unassigned */
3717     case 0x1D: /* unassigned */
3718     case 0x37: /* unassigned */
3719     case 0x3F: /* unassigned */
3720     default:
3721         break;
3722     }
3723     return gen_illegal(ctx);
3724 }
3725 
3726 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3727                                       CPUState *cs, int max_insns)
3728 {
3729     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3730     TranslationBlock *tb = ctx->base.tb;
3731     int i, bound;
3732 
3733     ctx->cs = cs;
3734     ctx->iaoq_f = tb->pc;
3735     ctx->iaoq_b = tb->cs_base;
3736     ctx->iaoq_n = -1;
3737     TCGV_UNUSED(ctx->iaoq_n_var);
3738 
3739     ctx->ntemps = 0;
3740     for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3741         TCGV_UNUSED(ctx->temps[i]);
3742     }
3743 
3744     bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3745     return MIN(max_insns, bound);
3746 }
3747 
3748 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3749 {
3750     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3751 
3752     /* Seed the nullification status from PSW[N], as shown in TB->FLAGS.  */
3753     ctx->null_cond = cond_make_f();
3754     ctx->psw_n_nonzero = false;
3755     if (ctx->base.tb->flags & 1) {
3756         ctx->null_cond.c = TCG_COND_ALWAYS;
3757         ctx->psw_n_nonzero = true;
3758     }
3759     ctx->null_lab = NULL;
3760 }
3761 
3762 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3763 {
3764     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3765 
3766     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3767 }
3768 
3769 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3770                                       const CPUBreakpoint *bp)
3771 {
3772     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3773 
3774     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3775     ctx->base.pc_next = ctx->iaoq_f + 4;
3776     return true;
3777 }
3778 
3779 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3780 {
3781     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3782     CPUHPPAState *env = cs->env_ptr;
3783     DisasJumpType ret;
3784     int i, n;
3785 
3786     /* Execute one insn.  */
3787     if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3788         ret = do_page_zero(ctx);
3789         assert(ret != DISAS_NEXT);
3790     } else {
3791         /* Always fetch the insn, even if nullified, so that we check
3792            the page permissions for execute.  */
3793         uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3794 
3795         /* Set up the IA queue for the next insn.
3796            This will be overwritten by a branch.  */
3797         if (ctx->iaoq_b == -1) {
3798             ctx->iaoq_n = -1;
3799             ctx->iaoq_n_var = get_temp(ctx);
3800             tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3801         } else {
3802             ctx->iaoq_n = ctx->iaoq_b + 4;
3803             TCGV_UNUSED(ctx->iaoq_n_var);
3804         }
3805 
3806         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3807             ctx->null_cond.c = TCG_COND_NEVER;
3808             ret = DISAS_NEXT;
3809         } else {
3810             ret = translate_one(ctx, insn);
3811             assert(ctx->null_lab == NULL);
3812         }
3813     }
3814 
3815     /* Free any temporaries allocated.  */
3816     for (i = 0, n = ctx->ntemps; i < n; ++i) {
3817         tcg_temp_free(ctx->temps[i]);
3818         TCGV_UNUSED(ctx->temps[i]);
3819     }
3820     ctx->ntemps = 0;
3821 
3822     /* Advance the insn queue.  */
3823     /* ??? The non-linear instruction restriction is purely due to
3824        the debugging dump.  Otherwise we *could* follow unconditional
3825        branches within the same page.  */
3826     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3827         if (ctx->null_cond.c == TCG_COND_NEVER
3828             || ctx->null_cond.c == TCG_COND_ALWAYS) {
3829             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3830             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3831             ret = DISAS_NORETURN;
3832         } else {
3833             ret = DISAS_IAQ_N_STALE;
3834        }
3835     }
3836     ctx->iaoq_f = ctx->iaoq_b;
3837     ctx->iaoq_b = ctx->iaoq_n;
3838     ctx->base.is_jmp = ret;
3839 
3840     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3841         return;
3842     }
3843     if (ctx->iaoq_f == -1) {
3844         tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3845         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3846         nullify_save(ctx);
3847         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3848     } else if (ctx->iaoq_b == -1) {
3849         tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3850     }
3851 }
3852 
3853 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3854 {
3855     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3856 
3857     switch (ctx->base.is_jmp) {
3858     case DISAS_NORETURN:
3859         break;
3860     case DISAS_TOO_MANY:
3861     case DISAS_IAQ_N_STALE:
3862         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3863         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3864         nullify_save(ctx);
3865         /* FALLTHRU */
3866     case DISAS_IAQ_N_UPDATED:
3867         if (ctx->base.singlestep_enabled) {
3868             gen_excp_1(EXCP_DEBUG);
3869         } else {
3870             tcg_gen_lookup_and_goto_ptr();
3871         }
3872         break;
3873     default:
3874         g_assert_not_reached();
3875     }
3876 
3877     /* We don't actually use this during normal translation,
3878        but we should interact with the generic main loop.  */
3879     ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3880 }
3881 
3882 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3883 {
3884     TranslationBlock *tb = dcbase->tb;
3885 
3886     switch (tb->pc) {
3887     case 0x00:
3888         qemu_log("IN:\n0x00000000:  (null)\n");
3889         break;
3890     case 0xb0:
3891         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
3892         break;
3893     case 0xe0:
3894         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
3895         break;
3896     case 0x100:
3897         qemu_log("IN:\n0x00000100:  syscall\n");
3898         break;
3899     default:
3900         qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3901         log_target_disas(cs, tb->pc, tb->size, 1);
3902         break;
3903     }
3904 }
3905 
3906 static const TranslatorOps hppa_tr_ops = {
3907     .init_disas_context = hppa_tr_init_disas_context,
3908     .tb_start           = hppa_tr_tb_start,
3909     .insn_start         = hppa_tr_insn_start,
3910     .breakpoint_check   = hppa_tr_breakpoint_check,
3911     .translate_insn     = hppa_tr_translate_insn,
3912     .tb_stop            = hppa_tr_tb_stop,
3913     .disas_log          = hppa_tr_disas_log,
3914 };
3915 
3916 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3917 
3918 {
3919     DisasContext ctx;
3920     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3921 }
3922 
3923 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3924                           target_ulong *data)
3925 {
3926     env->iaoq_f = data[0];
3927     if (data[1] != -1) {
3928         env->iaoq_b = data[1];
3929     }
3930     /* Since we were executing the instruction at IAOQ_F, and took some
3931        sort of action that provoked the cpu_restore_state, we can infer
3932        that the instruction was not nullified.  */
3933     env->psw_n = 0;
3934 }
3935