xref: /openbmc/qemu/target/hppa/translate.c (revision c39f95dc)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 typedef struct DisasCond {
34     TCGCond c;
35     TCGv a0, a1;
36     bool a0_is_n;
37     bool a1_is_0;
38 } DisasCond;
39 
40 typedef struct DisasContext {
41     DisasContextBase base;
42     CPUState *cs;
43 
44     target_ulong iaoq_f;
45     target_ulong iaoq_b;
46     target_ulong iaoq_n;
47     TCGv iaoq_n_var;
48 
49     int ntemps;
50     TCGv temps[8];
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     bool psw_n_nonzero;
56 } DisasContext;
57 
58 /* Target-specific return values from translate_one, indicating the
59    state of the TB.  Note that DISAS_NEXT indicates that we are not
60    exiting the TB.  */
61 
62 /* We are not using a goto_tb (for whatever reason), but have updated
63    the iaq (for whatever reason), so don't do it again on exit.  */
64 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
65 
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67    updated the iaq for the next instruction to be executed.  */
68 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
69 
70 typedef struct DisasInsn {
71     uint32_t insn, mask;
72     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73                            const struct DisasInsn *f);
74     union {
75         void (*ttt)(TCGv, TCGv, TCGv);
76         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
82     } f;
83 } DisasInsn;
84 
85 /* global register indexes */
86 static TCGv_env cpu_env;
87 static TCGv cpu_gr[32];
88 static TCGv cpu_iaoq_f;
89 static TCGv cpu_iaoq_b;
90 static TCGv cpu_sar;
91 static TCGv cpu_psw_n;
92 static TCGv cpu_psw_v;
93 static TCGv cpu_psw_cb;
94 static TCGv cpu_psw_cb_msb;
95 static TCGv cpu_cr26;
96 static TCGv cpu_cr27;
97 
98 #include "exec/gen-icount.h"
99 
100 void hppa_translate_init(void)
101 {
102 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
103 
104     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
105     static const GlobalVar vars[] = {
106         DEF_VAR(sar),
107         DEF_VAR(cr26),
108         DEF_VAR(cr27),
109         DEF_VAR(psw_n),
110         DEF_VAR(psw_v),
111         DEF_VAR(psw_cb),
112         DEF_VAR(psw_cb_msb),
113         DEF_VAR(iaoq_f),
114         DEF_VAR(iaoq_b),
115     };
116 
117 #undef DEF_VAR
118 
119     /* Use the symbolic register names that match the disassembler.  */
120     static const char gr_names[32][4] = {
121         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
122         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
123         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
124         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
125     };
126 
127     static bool done_init = 0;
128     int i;
129 
130     if (done_init) {
131         return;
132     }
133     done_init = 1;
134 
135     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
136     tcg_ctx.tcg_env = cpu_env;
137 
138     TCGV_UNUSED(cpu_gr[0]);
139     for (i = 1; i < 32; i++) {
140         cpu_gr[i] = tcg_global_mem_new(cpu_env,
141                                        offsetof(CPUHPPAState, gr[i]),
142                                        gr_names[i]);
143     }
144 
145     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
146         const GlobalVar *v = &vars[i];
147         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
148     }
149 }
150 
151 static DisasCond cond_make_f(void)
152 {
153     DisasCond r = { .c = TCG_COND_NEVER };
154     TCGV_UNUSED(r.a0);
155     TCGV_UNUSED(r.a1);
156     return r;
157 }
158 
159 static DisasCond cond_make_n(void)
160 {
161     DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
162     r.a0 = cpu_psw_n;
163     TCGV_UNUSED(r.a1);
164     return r;
165 }
166 
167 static DisasCond cond_make_0(TCGCond c, TCGv a0)
168 {
169     DisasCond r = { .c = c, .a1_is_0 = true };
170 
171     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
172     r.a0 = tcg_temp_new();
173     tcg_gen_mov_tl(r.a0, a0);
174     TCGV_UNUSED(r.a1);
175 
176     return r;
177 }
178 
179 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
180 {
181     DisasCond r = { .c = c };
182 
183     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
184     r.a0 = tcg_temp_new();
185     tcg_gen_mov_tl(r.a0, a0);
186     r.a1 = tcg_temp_new();
187     tcg_gen_mov_tl(r.a1, a1);
188 
189     return r;
190 }
191 
192 static void cond_prep(DisasCond *cond)
193 {
194     if (cond->a1_is_0) {
195         cond->a1_is_0 = false;
196         cond->a1 = tcg_const_tl(0);
197     }
198 }
199 
200 static void cond_free(DisasCond *cond)
201 {
202     switch (cond->c) {
203     default:
204         if (!cond->a0_is_n) {
205             tcg_temp_free(cond->a0);
206         }
207         if (!cond->a1_is_0) {
208             tcg_temp_free(cond->a1);
209         }
210         cond->a0_is_n = false;
211         cond->a1_is_0 = false;
212         TCGV_UNUSED(cond->a0);
213         TCGV_UNUSED(cond->a1);
214         /* fallthru */
215     case TCG_COND_ALWAYS:
216         cond->c = TCG_COND_NEVER;
217         break;
218     case TCG_COND_NEVER:
219         break;
220     }
221 }
222 
223 static TCGv get_temp(DisasContext *ctx)
224 {
225     unsigned i = ctx->ntemps++;
226     g_assert(i < ARRAY_SIZE(ctx->temps));
227     return ctx->temps[i] = tcg_temp_new();
228 }
229 
230 static TCGv load_const(DisasContext *ctx, target_long v)
231 {
232     TCGv t = get_temp(ctx);
233     tcg_gen_movi_tl(t, v);
234     return t;
235 }
236 
237 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
238 {
239     if (reg == 0) {
240         TCGv t = get_temp(ctx);
241         tcg_gen_movi_tl(t, 0);
242         return t;
243     } else {
244         return cpu_gr[reg];
245     }
246 }
247 
248 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
249 {
250     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
251         return get_temp(ctx);
252     } else {
253         return cpu_gr[reg];
254     }
255 }
256 
257 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
258 {
259     if (ctx->null_cond.c != TCG_COND_NEVER) {
260         cond_prep(&ctx->null_cond);
261         tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
262                            ctx->null_cond.a1, dest, t);
263     } else {
264         tcg_gen_mov_tl(dest, t);
265     }
266 }
267 
268 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
269 {
270     if (reg != 0) {
271         save_or_nullify(ctx, cpu_gr[reg], t);
272     }
273 }
274 
275 #ifdef HOST_WORDS_BIGENDIAN
276 # define HI_OFS  0
277 # define LO_OFS  4
278 #else
279 # define HI_OFS  4
280 # define LO_OFS  0
281 #endif
282 
283 static TCGv_i32 load_frw_i32(unsigned rt)
284 {
285     TCGv_i32 ret = tcg_temp_new_i32();
286     tcg_gen_ld_i32(ret, cpu_env,
287                    offsetof(CPUHPPAState, fr[rt & 31])
288                    + (rt & 32 ? LO_OFS : HI_OFS));
289     return ret;
290 }
291 
292 static TCGv_i32 load_frw0_i32(unsigned rt)
293 {
294     if (rt == 0) {
295         return tcg_const_i32(0);
296     } else {
297         return load_frw_i32(rt);
298     }
299 }
300 
301 static TCGv_i64 load_frw0_i64(unsigned rt)
302 {
303     if (rt == 0) {
304         return tcg_const_i64(0);
305     } else {
306         TCGv_i64 ret = tcg_temp_new_i64();
307         tcg_gen_ld32u_i64(ret, cpu_env,
308                           offsetof(CPUHPPAState, fr[rt & 31])
309                           + (rt & 32 ? LO_OFS : HI_OFS));
310         return ret;
311     }
312 }
313 
314 static void save_frw_i32(unsigned rt, TCGv_i32 val)
315 {
316     tcg_gen_st_i32(val, cpu_env,
317                    offsetof(CPUHPPAState, fr[rt & 31])
318                    + (rt & 32 ? LO_OFS : HI_OFS));
319 }
320 
321 #undef HI_OFS
322 #undef LO_OFS
323 
324 static TCGv_i64 load_frd(unsigned rt)
325 {
326     TCGv_i64 ret = tcg_temp_new_i64();
327     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
328     return ret;
329 }
330 
331 static TCGv_i64 load_frd0(unsigned rt)
332 {
333     if (rt == 0) {
334         return tcg_const_i64(0);
335     } else {
336         return load_frd(rt);
337     }
338 }
339 
340 static void save_frd(unsigned rt, TCGv_i64 val)
341 {
342     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
343 }
344 
345 /* Skip over the implementation of an insn that has been nullified.
346    Use this when the insn is too complex for a conditional move.  */
347 static void nullify_over(DisasContext *ctx)
348 {
349     if (ctx->null_cond.c != TCG_COND_NEVER) {
350         /* The always condition should have been handled in the main loop.  */
351         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
352 
353         ctx->null_lab = gen_new_label();
354         cond_prep(&ctx->null_cond);
355 
356         /* If we're using PSW[N], copy it to a temp because... */
357         if (ctx->null_cond.a0_is_n) {
358             ctx->null_cond.a0_is_n = false;
359             ctx->null_cond.a0 = tcg_temp_new();
360             tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
361         }
362         /* ... we clear it before branching over the implementation,
363            so that (1) it's clear after nullifying this insn and
364            (2) if this insn nullifies the next, PSW[N] is valid.  */
365         if (ctx->psw_n_nonzero) {
366             ctx->psw_n_nonzero = false;
367             tcg_gen_movi_tl(cpu_psw_n, 0);
368         }
369 
370         tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
371                           ctx->null_cond.a1, ctx->null_lab);
372         cond_free(&ctx->null_cond);
373     }
374 }
375 
376 /* Save the current nullification state to PSW[N].  */
377 static void nullify_save(DisasContext *ctx)
378 {
379     if (ctx->null_cond.c == TCG_COND_NEVER) {
380         if (ctx->psw_n_nonzero) {
381             tcg_gen_movi_tl(cpu_psw_n, 0);
382         }
383         return;
384     }
385     if (!ctx->null_cond.a0_is_n) {
386         cond_prep(&ctx->null_cond);
387         tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
388                            ctx->null_cond.a0, ctx->null_cond.a1);
389         ctx->psw_n_nonzero = true;
390     }
391     cond_free(&ctx->null_cond);
392 }
393 
394 /* Set a PSW[N] to X.  The intention is that this is used immediately
395    before a goto_tb/exit_tb, so that there is no fallthru path to other
396    code within the TB.  Therefore we do not update psw_n_nonzero.  */
397 static void nullify_set(DisasContext *ctx, bool x)
398 {
399     if (ctx->psw_n_nonzero || x) {
400         tcg_gen_movi_tl(cpu_psw_n, x);
401     }
402 }
403 
404 /* Mark the end of an instruction that may have been nullified.
405    This is the pair to nullify_over.  */
406 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
407 {
408     TCGLabel *null_lab = ctx->null_lab;
409 
410     if (likely(null_lab == NULL)) {
411         /* The current insn wasn't conditional or handled the condition
412            applied to it without a branch, so the (new) setting of
413            NULL_COND can be applied directly to the next insn.  */
414         return status;
415     }
416     ctx->null_lab = NULL;
417 
418     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
419         /* The next instruction will be unconditional,
420            and NULL_COND already reflects that.  */
421         gen_set_label(null_lab);
422     } else {
423         /* The insn that we just executed is itself nullifying the next
424            instruction.  Store the condition in the PSW[N] global.
425            We asserted PSW[N] = 0 in nullify_over, so that after the
426            label we have the proper value in place.  */
427         nullify_save(ctx);
428         gen_set_label(null_lab);
429         ctx->null_cond = cond_make_n();
430     }
431 
432     assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
433     if (status == DISAS_NORETURN) {
434         status = DISAS_NEXT;
435     }
436     return status;
437 }
438 
439 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
440 {
441     if (unlikely(ival == -1)) {
442         tcg_gen_mov_tl(dest, vval);
443     } else {
444         tcg_gen_movi_tl(dest, ival);
445     }
446 }
447 
448 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
449 {
450     return ctx->iaoq_f + disp + 8;
451 }
452 
453 static void gen_excp_1(int exception)
454 {
455     TCGv_i32 t = tcg_const_i32(exception);
456     gen_helper_excp(cpu_env, t);
457     tcg_temp_free_i32(t);
458 }
459 
460 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
461 {
462     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
463     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
464     nullify_save(ctx);
465     gen_excp_1(exception);
466     return DISAS_NORETURN;
467 }
468 
469 static DisasJumpType gen_illegal(DisasContext *ctx)
470 {
471     nullify_over(ctx);
472     return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
473 }
474 
475 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
476 {
477     /* Suppress goto_tb in the case of single-steping and IO.  */
478     if ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled) {
479         return false;
480     }
481     return true;
482 }
483 
484 /* If the next insn is to be nullified, and it's on the same page,
485    and we're not attempting to set a breakpoint on it, then we can
486    totally skip the nullified insn.  This avoids creating and
487    executing a TB that merely branches to the next TB.  */
488 static bool use_nullify_skip(DisasContext *ctx)
489 {
490     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
491             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
492 }
493 
494 static void gen_goto_tb(DisasContext *ctx, int which,
495                         target_ulong f, target_ulong b)
496 {
497     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
498         tcg_gen_goto_tb(which);
499         tcg_gen_movi_tl(cpu_iaoq_f, f);
500         tcg_gen_movi_tl(cpu_iaoq_b, b);
501         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
502     } else {
503         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
504         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
505         if (ctx->base.singlestep_enabled) {
506             gen_excp_1(EXCP_DEBUG);
507         } else {
508             tcg_gen_lookup_and_goto_ptr();
509         }
510     }
511 }
512 
513 /* PA has a habit of taking the LSB of a field and using that as the sign,
514    with the rest of the field becoming the least significant bits.  */
515 static target_long low_sextract(uint32_t val, int pos, int len)
516 {
517     target_ulong x = -(target_ulong)extract32(val, pos, 1);
518     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
519     return x;
520 }
521 
522 static unsigned assemble_rt64(uint32_t insn)
523 {
524     unsigned r1 = extract32(insn, 6, 1);
525     unsigned r0 = extract32(insn, 0, 5);
526     return r1 * 32 + r0;
527 }
528 
529 static unsigned assemble_ra64(uint32_t insn)
530 {
531     unsigned r1 = extract32(insn, 7, 1);
532     unsigned r0 = extract32(insn, 21, 5);
533     return r1 * 32 + r0;
534 }
535 
536 static unsigned assemble_rb64(uint32_t insn)
537 {
538     unsigned r1 = extract32(insn, 12, 1);
539     unsigned r0 = extract32(insn, 16, 5);
540     return r1 * 32 + r0;
541 }
542 
543 static unsigned assemble_rc64(uint32_t insn)
544 {
545     unsigned r2 = extract32(insn, 8, 1);
546     unsigned r1 = extract32(insn, 13, 3);
547     unsigned r0 = extract32(insn, 9, 2);
548     return r2 * 32 + r1 * 4 + r0;
549 }
550 
551 static target_long assemble_12(uint32_t insn)
552 {
553     target_ulong x = -(target_ulong)(insn & 1);
554     x = (x <<  1) | extract32(insn, 2, 1);
555     x = (x << 10) | extract32(insn, 3, 10);
556     return x;
557 }
558 
559 static target_long assemble_16(uint32_t insn)
560 {
561     /* Take the name from PA2.0, which produces a 16-bit number
562        only with wide mode; otherwise a 14-bit number.  Since we don't
563        implement wide mode, this is always the 14-bit number.  */
564     return low_sextract(insn, 0, 14);
565 }
566 
567 static target_long assemble_16a(uint32_t insn)
568 {
569     /* Take the name from PA2.0, which produces a 14-bit shifted number
570        only with wide mode; otherwise a 12-bit shifted number.  Since we
571        don't implement wide mode, this is always the 12-bit number.  */
572     target_ulong x = -(target_ulong)(insn & 1);
573     x = (x << 11) | extract32(insn, 2, 11);
574     return x << 2;
575 }
576 
577 static target_long assemble_17(uint32_t insn)
578 {
579     target_ulong x = -(target_ulong)(insn & 1);
580     x = (x <<  5) | extract32(insn, 16, 5);
581     x = (x <<  1) | extract32(insn, 2, 1);
582     x = (x << 10) | extract32(insn, 3, 10);
583     return x << 2;
584 }
585 
586 static target_long assemble_21(uint32_t insn)
587 {
588     target_ulong x = -(target_ulong)(insn & 1);
589     x = (x << 11) | extract32(insn, 1, 11);
590     x = (x <<  2) | extract32(insn, 14, 2);
591     x = (x <<  5) | extract32(insn, 16, 5);
592     x = (x <<  2) | extract32(insn, 12, 2);
593     return x << 11;
594 }
595 
596 static target_long assemble_22(uint32_t insn)
597 {
598     target_ulong x = -(target_ulong)(insn & 1);
599     x = (x << 10) | extract32(insn, 16, 10);
600     x = (x <<  1) | extract32(insn, 2, 1);
601     x = (x << 10) | extract32(insn, 3, 10);
602     return x << 2;
603 }
604 
605 /* The parisc documentation describes only the general interpretation of
606    the conditions, without describing their exact implementation.  The
607    interpretations do not stand up well when considering ADD,C and SUB,B.
608    However, considering the Addition, Subtraction and Logical conditions
609    as a whole it would appear that these relations are similar to what
610    a traditional NZCV set of flags would produce.  */
611 
612 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
613 {
614     DisasCond cond;
615     TCGv tmp;
616 
617     switch (cf >> 1) {
618     case 0: /* Never / TR */
619         cond = cond_make_f();
620         break;
621     case 1: /* = / <>        (Z / !Z) */
622         cond = cond_make_0(TCG_COND_EQ, res);
623         break;
624     case 2: /* < / >=        (N / !N) */
625         cond = cond_make_0(TCG_COND_LT, res);
626         break;
627     case 3: /* <= / >        (N | Z / !N & !Z) */
628         cond = cond_make_0(TCG_COND_LE, res);
629         break;
630     case 4: /* NUV / UV      (!C / C) */
631         cond = cond_make_0(TCG_COND_EQ, cb_msb);
632         break;
633     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
634         tmp = tcg_temp_new();
635         tcg_gen_neg_tl(tmp, cb_msb);
636         tcg_gen_and_tl(tmp, tmp, res);
637         cond = cond_make_0(TCG_COND_EQ, tmp);
638         tcg_temp_free(tmp);
639         break;
640     case 6: /* SV / NSV      (V / !V) */
641         cond = cond_make_0(TCG_COND_LT, sv);
642         break;
643     case 7: /* OD / EV */
644         tmp = tcg_temp_new();
645         tcg_gen_andi_tl(tmp, res, 1);
646         cond = cond_make_0(TCG_COND_NE, tmp);
647         tcg_temp_free(tmp);
648         break;
649     default:
650         g_assert_not_reached();
651     }
652     if (cf & 1) {
653         cond.c = tcg_invert_cond(cond.c);
654     }
655 
656     return cond;
657 }
658 
659 /* Similar, but for the special case of subtraction without borrow, we
660    can use the inputs directly.  This can allow other computation to be
661    deleted as unused.  */
662 
663 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
664 {
665     DisasCond cond;
666 
667     switch (cf >> 1) {
668     case 1: /* = / <> */
669         cond = cond_make(TCG_COND_EQ, in1, in2);
670         break;
671     case 2: /* < / >= */
672         cond = cond_make(TCG_COND_LT, in1, in2);
673         break;
674     case 3: /* <= / > */
675         cond = cond_make(TCG_COND_LE, in1, in2);
676         break;
677     case 4: /* << / >>= */
678         cond = cond_make(TCG_COND_LTU, in1, in2);
679         break;
680     case 5: /* <<= / >> */
681         cond = cond_make(TCG_COND_LEU, in1, in2);
682         break;
683     default:
684         return do_cond(cf, res, sv, sv);
685     }
686     if (cf & 1) {
687         cond.c = tcg_invert_cond(cond.c);
688     }
689 
690     return cond;
691 }
692 
693 /* Similar, but for logicals, where the carry and overflow bits are not
694    computed, and use of them is undefined.  */
695 
696 static DisasCond do_log_cond(unsigned cf, TCGv res)
697 {
698     switch (cf >> 1) {
699     case 4: case 5: case 6:
700         cf &= 1;
701         break;
702     }
703     return do_cond(cf, res, res, res);
704 }
705 
706 /* Similar, but for shift/extract/deposit conditions.  */
707 
708 static DisasCond do_sed_cond(unsigned orig, TCGv res)
709 {
710     unsigned c, f;
711 
712     /* Convert the compressed condition codes to standard.
713        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
714        4-7 are the reverse of 0-3.  */
715     c = orig & 3;
716     if (c == 3) {
717         c = 7;
718     }
719     f = (orig & 4) / 4;
720 
721     return do_log_cond(c * 2 + f, res);
722 }
723 
724 /* Similar, but for unit conditions.  */
725 
726 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
727 {
728     DisasCond cond;
729     TCGv tmp, cb;
730 
731     TCGV_UNUSED(cb);
732     if (cf & 8) {
733         /* Since we want to test lots of carry-out bits all at once, do not
734          * do our normal thing and compute carry-in of bit B+1 since that
735          * leaves us with carry bits spread across two words.
736          */
737         cb = tcg_temp_new();
738         tmp = tcg_temp_new();
739         tcg_gen_or_tl(cb, in1, in2);
740         tcg_gen_and_tl(tmp, in1, in2);
741         tcg_gen_andc_tl(cb, cb, res);
742         tcg_gen_or_tl(cb, cb, tmp);
743         tcg_temp_free(tmp);
744     }
745 
746     switch (cf >> 1) {
747     case 0: /* never / TR */
748     case 1: /* undefined */
749     case 5: /* undefined */
750         cond = cond_make_f();
751         break;
752 
753     case 2: /* SBZ / NBZ */
754         /* See hasless(v,1) from
755          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
756          */
757         tmp = tcg_temp_new();
758         tcg_gen_subi_tl(tmp, res, 0x01010101u);
759         tcg_gen_andc_tl(tmp, tmp, res);
760         tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
761         cond = cond_make_0(TCG_COND_NE, tmp);
762         tcg_temp_free(tmp);
763         break;
764 
765     case 3: /* SHZ / NHZ */
766         tmp = tcg_temp_new();
767         tcg_gen_subi_tl(tmp, res, 0x00010001u);
768         tcg_gen_andc_tl(tmp, tmp, res);
769         tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
770         cond = cond_make_0(TCG_COND_NE, tmp);
771         tcg_temp_free(tmp);
772         break;
773 
774     case 4: /* SDC / NDC */
775         tcg_gen_andi_tl(cb, cb, 0x88888888u);
776         cond = cond_make_0(TCG_COND_NE, cb);
777         break;
778 
779     case 6: /* SBC / NBC */
780         tcg_gen_andi_tl(cb, cb, 0x80808080u);
781         cond = cond_make_0(TCG_COND_NE, cb);
782         break;
783 
784     case 7: /* SHC / NHC */
785         tcg_gen_andi_tl(cb, cb, 0x80008000u);
786         cond = cond_make_0(TCG_COND_NE, cb);
787         break;
788 
789     default:
790         g_assert_not_reached();
791     }
792     if (cf & 8) {
793         tcg_temp_free(cb);
794     }
795     if (cf & 1) {
796         cond.c = tcg_invert_cond(cond.c);
797     }
798 
799     return cond;
800 }
801 
802 /* Compute signed overflow for addition.  */
803 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
804 {
805     TCGv sv = get_temp(ctx);
806     TCGv tmp = tcg_temp_new();
807 
808     tcg_gen_xor_tl(sv, res, in1);
809     tcg_gen_xor_tl(tmp, in1, in2);
810     tcg_gen_andc_tl(sv, sv, tmp);
811     tcg_temp_free(tmp);
812 
813     return sv;
814 }
815 
816 /* Compute signed overflow for subtraction.  */
817 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
818 {
819     TCGv sv = get_temp(ctx);
820     TCGv tmp = tcg_temp_new();
821 
822     tcg_gen_xor_tl(sv, res, in1);
823     tcg_gen_xor_tl(tmp, in1, in2);
824     tcg_gen_and_tl(sv, sv, tmp);
825     tcg_temp_free(tmp);
826 
827     return sv;
828 }
829 
830 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
831                             unsigned shift, bool is_l, bool is_tsv, bool is_tc,
832                             bool is_c, unsigned cf)
833 {
834     TCGv dest, cb, cb_msb, sv, tmp;
835     unsigned c = cf >> 1;
836     DisasCond cond;
837 
838     dest = tcg_temp_new();
839     TCGV_UNUSED(cb);
840     TCGV_UNUSED(cb_msb);
841 
842     if (shift) {
843         tmp = get_temp(ctx);
844         tcg_gen_shli_tl(tmp, in1, shift);
845         in1 = tmp;
846     }
847 
848     if (!is_l || c == 4 || c == 5) {
849         TCGv zero = tcg_const_tl(0);
850         cb_msb = get_temp(ctx);
851         tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
852         if (is_c) {
853             tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
854         }
855         tcg_temp_free(zero);
856         if (!is_l) {
857             cb = get_temp(ctx);
858             tcg_gen_xor_tl(cb, in1, in2);
859             tcg_gen_xor_tl(cb, cb, dest);
860         }
861     } else {
862         tcg_gen_add_tl(dest, in1, in2);
863         if (is_c) {
864             tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
865         }
866     }
867 
868     /* Compute signed overflow if required.  */
869     TCGV_UNUSED(sv);
870     if (is_tsv || c == 6) {
871         sv = do_add_sv(ctx, dest, in1, in2);
872         if (is_tsv) {
873             /* ??? Need to include overflow from shift.  */
874             gen_helper_tsv(cpu_env, sv);
875         }
876     }
877 
878     /* Emit any conditional trap before any writeback.  */
879     cond = do_cond(cf, dest, cb_msb, sv);
880     if (is_tc) {
881         cond_prep(&cond);
882         tmp = tcg_temp_new();
883         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
884         gen_helper_tcond(cpu_env, tmp);
885         tcg_temp_free(tmp);
886     }
887 
888     /* Write back the result.  */
889     if (!is_l) {
890         save_or_nullify(ctx, cpu_psw_cb, cb);
891         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
892     }
893     save_gpr(ctx, rt, dest);
894     tcg_temp_free(dest);
895 
896     /* Install the new nullification.  */
897     cond_free(&ctx->null_cond);
898     ctx->null_cond = cond;
899     return DISAS_NEXT;
900 }
901 
902 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
903                             bool is_tsv, bool is_b, bool is_tc, unsigned cf)
904 {
905     TCGv dest, sv, cb, cb_msb, zero, tmp;
906     unsigned c = cf >> 1;
907     DisasCond cond;
908 
909     dest = tcg_temp_new();
910     cb = tcg_temp_new();
911     cb_msb = tcg_temp_new();
912 
913     zero = tcg_const_tl(0);
914     if (is_b) {
915         /* DEST,C = IN1 + ~IN2 + C.  */
916         tcg_gen_not_tl(cb, in2);
917         tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
918         tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
919         tcg_gen_xor_tl(cb, cb, in1);
920         tcg_gen_xor_tl(cb, cb, dest);
921     } else {
922         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
923            operations by seeding the high word with 1 and subtracting.  */
924         tcg_gen_movi_tl(cb_msb, 1);
925         tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
926         tcg_gen_eqv_tl(cb, in1, in2);
927         tcg_gen_xor_tl(cb, cb, dest);
928     }
929     tcg_temp_free(zero);
930 
931     /* Compute signed overflow if required.  */
932     TCGV_UNUSED(sv);
933     if (is_tsv || c == 6) {
934         sv = do_sub_sv(ctx, dest, in1, in2);
935         if (is_tsv) {
936             gen_helper_tsv(cpu_env, sv);
937         }
938     }
939 
940     /* Compute the condition.  We cannot use the special case for borrow.  */
941     if (!is_b) {
942         cond = do_sub_cond(cf, dest, in1, in2, sv);
943     } else {
944         cond = do_cond(cf, dest, cb_msb, sv);
945     }
946 
947     /* Emit any conditional trap before any writeback.  */
948     if (is_tc) {
949         cond_prep(&cond);
950         tmp = tcg_temp_new();
951         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
952         gen_helper_tcond(cpu_env, tmp);
953         tcg_temp_free(tmp);
954     }
955 
956     /* Write back the result.  */
957     save_or_nullify(ctx, cpu_psw_cb, cb);
958     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
959     save_gpr(ctx, rt, dest);
960     tcg_temp_free(dest);
961 
962     /* Install the new nullification.  */
963     cond_free(&ctx->null_cond);
964     ctx->null_cond = cond;
965     return DISAS_NEXT;
966 }
967 
968 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
969                                TCGv in2, unsigned cf)
970 {
971     TCGv dest, sv;
972     DisasCond cond;
973 
974     dest = tcg_temp_new();
975     tcg_gen_sub_tl(dest, in1, in2);
976 
977     /* Compute signed overflow if required.  */
978     TCGV_UNUSED(sv);
979     if ((cf >> 1) == 6) {
980         sv = do_sub_sv(ctx, dest, in1, in2);
981     }
982 
983     /* Form the condition for the compare.  */
984     cond = do_sub_cond(cf, dest, in1, in2, sv);
985 
986     /* Clear.  */
987     tcg_gen_movi_tl(dest, 0);
988     save_gpr(ctx, rt, dest);
989     tcg_temp_free(dest);
990 
991     /* Install the new nullification.  */
992     cond_free(&ctx->null_cond);
993     ctx->null_cond = cond;
994     return DISAS_NEXT;
995 }
996 
997 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
998                             unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
999 {
1000     TCGv dest = dest_gpr(ctx, rt);
1001 
1002     /* Perform the operation, and writeback.  */
1003     fn(dest, in1, in2);
1004     save_gpr(ctx, rt, dest);
1005 
1006     /* Install the new nullification.  */
1007     cond_free(&ctx->null_cond);
1008     if (cf) {
1009         ctx->null_cond = do_log_cond(cf, dest);
1010     }
1011     return DISAS_NEXT;
1012 }
1013 
1014 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1015                              TCGv in2, unsigned cf, bool is_tc,
1016                              void (*fn)(TCGv, TCGv, TCGv))
1017 {
1018     TCGv dest;
1019     DisasCond cond;
1020 
1021     if (cf == 0) {
1022         dest = dest_gpr(ctx, rt);
1023         fn(dest, in1, in2);
1024         save_gpr(ctx, rt, dest);
1025         cond_free(&ctx->null_cond);
1026     } else {
1027         dest = tcg_temp_new();
1028         fn(dest, in1, in2);
1029 
1030         cond = do_unit_cond(cf, dest, in1, in2);
1031 
1032         if (is_tc) {
1033             TCGv tmp = tcg_temp_new();
1034             cond_prep(&cond);
1035             tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1036             gen_helper_tcond(cpu_env, tmp);
1037             tcg_temp_free(tmp);
1038         }
1039         save_gpr(ctx, rt, dest);
1040 
1041         cond_free(&ctx->null_cond);
1042         ctx->null_cond = cond;
1043     }
1044     return DISAS_NEXT;
1045 }
1046 
1047 /* Emit a memory load.  The modify parameter should be
1048  * < 0 for pre-modify,
1049  * > 0 for post-modify,
1050  * = 0 for no base register update.
1051  */
1052 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1053                        unsigned rx, int scale, target_long disp,
1054                        int modify, TCGMemOp mop)
1055 {
1056     TCGv addr, base;
1057 
1058     /* Caller uses nullify_over/nullify_end.  */
1059     assert(ctx->null_cond.c == TCG_COND_NEVER);
1060 
1061     addr = tcg_temp_new();
1062     base = load_gpr(ctx, rb);
1063 
1064     /* Note that RX is mutually exclusive with DISP.  */
1065     if (rx) {
1066         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1067         tcg_gen_add_tl(addr, addr, base);
1068     } else {
1069         tcg_gen_addi_tl(addr, base, disp);
1070     }
1071 
1072     if (modify == 0) {
1073         tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1074     } else {
1075         tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1076                             MMU_USER_IDX, mop);
1077         save_gpr(ctx, rb, addr);
1078     }
1079     tcg_temp_free(addr);
1080 }
1081 
1082 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1083                        unsigned rx, int scale, target_long disp,
1084                        int modify, TCGMemOp mop)
1085 {
1086     TCGv addr, base;
1087 
1088     /* Caller uses nullify_over/nullify_end.  */
1089     assert(ctx->null_cond.c == TCG_COND_NEVER);
1090 
1091     addr = tcg_temp_new();
1092     base = load_gpr(ctx, rb);
1093 
1094     /* Note that RX is mutually exclusive with DISP.  */
1095     if (rx) {
1096         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1097         tcg_gen_add_tl(addr, addr, base);
1098     } else {
1099         tcg_gen_addi_tl(addr, base, disp);
1100     }
1101 
1102     if (modify == 0) {
1103         tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1104     } else {
1105         tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1106                             MMU_USER_IDX, mop);
1107         save_gpr(ctx, rb, addr);
1108     }
1109     tcg_temp_free(addr);
1110 }
1111 
1112 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1113                         unsigned rx, int scale, target_long disp,
1114                         int modify, TCGMemOp mop)
1115 {
1116     TCGv addr, base;
1117 
1118     /* Caller uses nullify_over/nullify_end.  */
1119     assert(ctx->null_cond.c == TCG_COND_NEVER);
1120 
1121     addr = tcg_temp_new();
1122     base = load_gpr(ctx, rb);
1123 
1124     /* Note that RX is mutually exclusive with DISP.  */
1125     if (rx) {
1126         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1127         tcg_gen_add_tl(addr, addr, base);
1128     } else {
1129         tcg_gen_addi_tl(addr, base, disp);
1130     }
1131 
1132     tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1133 
1134     if (modify != 0) {
1135         save_gpr(ctx, rb, addr);
1136     }
1137     tcg_temp_free(addr);
1138 }
1139 
1140 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1141                         unsigned rx, int scale, target_long disp,
1142                         int modify, TCGMemOp mop)
1143 {
1144     TCGv addr, base;
1145 
1146     /* Caller uses nullify_over/nullify_end.  */
1147     assert(ctx->null_cond.c == TCG_COND_NEVER);
1148 
1149     addr = tcg_temp_new();
1150     base = load_gpr(ctx, rb);
1151 
1152     /* Note that RX is mutually exclusive with DISP.  */
1153     if (rx) {
1154         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1155         tcg_gen_add_tl(addr, addr, base);
1156     } else {
1157         tcg_gen_addi_tl(addr, base, disp);
1158     }
1159 
1160     tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1161 
1162     if (modify != 0) {
1163         save_gpr(ctx, rb, addr);
1164     }
1165     tcg_temp_free(addr);
1166 }
1167 
1168 #if TARGET_LONG_BITS == 64
1169 #define do_load_tl  do_load_64
1170 #define do_store_tl do_store_64
1171 #else
1172 #define do_load_tl  do_load_32
1173 #define do_store_tl do_store_32
1174 #endif
1175 
1176 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1177                              unsigned rx, int scale, target_long disp,
1178                              int modify, TCGMemOp mop)
1179 {
1180     TCGv dest;
1181 
1182     nullify_over(ctx);
1183 
1184     if (modify == 0) {
1185         /* No base register update.  */
1186         dest = dest_gpr(ctx, rt);
1187     } else {
1188         /* Make sure if RT == RB, we see the result of the load.  */
1189         dest = get_temp(ctx);
1190     }
1191     do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1192     save_gpr(ctx, rt, dest);
1193 
1194     return nullify_end(ctx, DISAS_NEXT);
1195 }
1196 
1197 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1198                                unsigned rx, int scale, target_long disp,
1199                                int modify)
1200 {
1201     TCGv_i32 tmp;
1202 
1203     nullify_over(ctx);
1204 
1205     tmp = tcg_temp_new_i32();
1206     do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1207     save_frw_i32(rt, tmp);
1208     tcg_temp_free_i32(tmp);
1209 
1210     if (rt == 0) {
1211         gen_helper_loaded_fr0(cpu_env);
1212     }
1213 
1214     return nullify_end(ctx, DISAS_NEXT);
1215 }
1216 
1217 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1218                                unsigned rx, int scale, target_long disp,
1219                                int modify)
1220 {
1221     TCGv_i64 tmp;
1222 
1223     nullify_over(ctx);
1224 
1225     tmp = tcg_temp_new_i64();
1226     do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1227     save_frd(rt, tmp);
1228     tcg_temp_free_i64(tmp);
1229 
1230     if (rt == 0) {
1231         gen_helper_loaded_fr0(cpu_env);
1232     }
1233 
1234     return nullify_end(ctx, DISAS_NEXT);
1235 }
1236 
1237 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1238                               target_long disp, int modify, TCGMemOp mop)
1239 {
1240     nullify_over(ctx);
1241     do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1242     return nullify_end(ctx, DISAS_NEXT);
1243 }
1244 
1245 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1246                                 unsigned rx, int scale, target_long disp,
1247                                 int modify)
1248 {
1249     TCGv_i32 tmp;
1250 
1251     nullify_over(ctx);
1252 
1253     tmp = load_frw_i32(rt);
1254     do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1255     tcg_temp_free_i32(tmp);
1256 
1257     return nullify_end(ctx, DISAS_NEXT);
1258 }
1259 
1260 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1261                                 unsigned rx, int scale, target_long disp,
1262                                 int modify)
1263 {
1264     TCGv_i64 tmp;
1265 
1266     nullify_over(ctx);
1267 
1268     tmp = load_frd(rt);
1269     do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1270     tcg_temp_free_i64(tmp);
1271 
1272     return nullify_end(ctx, DISAS_NEXT);
1273 }
1274 
1275 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1276                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1277 {
1278     TCGv_i32 tmp;
1279 
1280     nullify_over(ctx);
1281     tmp = load_frw0_i32(ra);
1282 
1283     func(tmp, cpu_env, tmp);
1284 
1285     save_frw_i32(rt, tmp);
1286     tcg_temp_free_i32(tmp);
1287     return nullify_end(ctx, DISAS_NEXT);
1288 }
1289 
1290 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1291                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1292 {
1293     TCGv_i32 dst;
1294     TCGv_i64 src;
1295 
1296     nullify_over(ctx);
1297     src = load_frd(ra);
1298     dst = tcg_temp_new_i32();
1299 
1300     func(dst, cpu_env, src);
1301 
1302     tcg_temp_free_i64(src);
1303     save_frw_i32(rt, dst);
1304     tcg_temp_free_i32(dst);
1305     return nullify_end(ctx, DISAS_NEXT);
1306 }
1307 
1308 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1309                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1310 {
1311     TCGv_i64 tmp;
1312 
1313     nullify_over(ctx);
1314     tmp = load_frd0(ra);
1315 
1316     func(tmp, cpu_env, tmp);
1317 
1318     save_frd(rt, tmp);
1319     tcg_temp_free_i64(tmp);
1320     return nullify_end(ctx, DISAS_NEXT);
1321 }
1322 
1323 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1324                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1325 {
1326     TCGv_i32 src;
1327     TCGv_i64 dst;
1328 
1329     nullify_over(ctx);
1330     src = load_frw0_i32(ra);
1331     dst = tcg_temp_new_i64();
1332 
1333     func(dst, cpu_env, src);
1334 
1335     tcg_temp_free_i32(src);
1336     save_frd(rt, dst);
1337     tcg_temp_free_i64(dst);
1338     return nullify_end(ctx, DISAS_NEXT);
1339 }
1340 
1341 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1342                                  unsigned ra, unsigned rb,
1343                                  void (*func)(TCGv_i32, TCGv_env,
1344                                               TCGv_i32, TCGv_i32))
1345 {
1346     TCGv_i32 a, b;
1347 
1348     nullify_over(ctx);
1349     a = load_frw0_i32(ra);
1350     b = load_frw0_i32(rb);
1351 
1352     func(a, cpu_env, a, b);
1353 
1354     tcg_temp_free_i32(b);
1355     save_frw_i32(rt, a);
1356     tcg_temp_free_i32(a);
1357     return nullify_end(ctx, DISAS_NEXT);
1358 }
1359 
1360 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1361                                  unsigned ra, unsigned rb,
1362                                  void (*func)(TCGv_i64, TCGv_env,
1363                                               TCGv_i64, TCGv_i64))
1364 {
1365     TCGv_i64 a, b;
1366 
1367     nullify_over(ctx);
1368     a = load_frd0(ra);
1369     b = load_frd0(rb);
1370 
1371     func(a, cpu_env, a, b);
1372 
1373     tcg_temp_free_i64(b);
1374     save_frd(rt, a);
1375     tcg_temp_free_i64(a);
1376     return nullify_end(ctx, DISAS_NEXT);
1377 }
1378 
1379 /* Emit an unconditional branch to a direct target, which may or may not
1380    have already had nullification handled.  */
1381 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1382                                 unsigned link, bool is_n)
1383 {
1384     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1385         if (link != 0) {
1386             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1387         }
1388         ctx->iaoq_n = dest;
1389         if (is_n) {
1390             ctx->null_cond.c = TCG_COND_ALWAYS;
1391         }
1392         return DISAS_NEXT;
1393     } else {
1394         nullify_over(ctx);
1395 
1396         if (link != 0) {
1397             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1398         }
1399 
1400         if (is_n && use_nullify_skip(ctx)) {
1401             nullify_set(ctx, 0);
1402             gen_goto_tb(ctx, 0, dest, dest + 4);
1403         } else {
1404             nullify_set(ctx, is_n);
1405             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1406         }
1407 
1408         nullify_end(ctx, DISAS_NEXT);
1409 
1410         nullify_set(ctx, 0);
1411         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1412         return DISAS_NORETURN;
1413     }
1414 }
1415 
1416 /* Emit a conditional branch to a direct target.  If the branch itself
1417    is nullified, we should have already used nullify_over.  */
1418 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1419                                 DisasCond *cond)
1420 {
1421     target_ulong dest = iaoq_dest(ctx, disp);
1422     TCGLabel *taken = NULL;
1423     TCGCond c = cond->c;
1424     bool n;
1425 
1426     assert(ctx->null_cond.c == TCG_COND_NEVER);
1427 
1428     /* Handle TRUE and NEVER as direct branches.  */
1429     if (c == TCG_COND_ALWAYS) {
1430         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1431     }
1432     if (c == TCG_COND_NEVER) {
1433         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1434     }
1435 
1436     taken = gen_new_label();
1437     cond_prep(cond);
1438     tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1439     cond_free(cond);
1440 
1441     /* Not taken: Condition not satisfied; nullify on backward branches. */
1442     n = is_n && disp < 0;
1443     if (n && use_nullify_skip(ctx)) {
1444         nullify_set(ctx, 0);
1445         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1446     } else {
1447         if (!n && ctx->null_lab) {
1448             gen_set_label(ctx->null_lab);
1449             ctx->null_lab = NULL;
1450         }
1451         nullify_set(ctx, n);
1452         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1453     }
1454 
1455     gen_set_label(taken);
1456 
1457     /* Taken: Condition satisfied; nullify on forward branches.  */
1458     n = is_n && disp >= 0;
1459     if (n && use_nullify_skip(ctx)) {
1460         nullify_set(ctx, 0);
1461         gen_goto_tb(ctx, 1, dest, dest + 4);
1462     } else {
1463         nullify_set(ctx, n);
1464         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1465     }
1466 
1467     /* Not taken: the branch itself was nullified.  */
1468     if (ctx->null_lab) {
1469         gen_set_label(ctx->null_lab);
1470         ctx->null_lab = NULL;
1471         return DISAS_IAQ_N_STALE;
1472     } else {
1473         return DISAS_NORETURN;
1474     }
1475 }
1476 
1477 /* Emit an unconditional branch to an indirect target.  This handles
1478    nullification of the branch itself.  */
1479 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1480                                 unsigned link, bool is_n)
1481 {
1482     TCGv a0, a1, next, tmp;
1483     TCGCond c;
1484 
1485     assert(ctx->null_lab == NULL);
1486 
1487     if (ctx->null_cond.c == TCG_COND_NEVER) {
1488         if (link != 0) {
1489             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1490         }
1491         next = get_temp(ctx);
1492         tcg_gen_mov_tl(next, dest);
1493         ctx->iaoq_n = -1;
1494         ctx->iaoq_n_var = next;
1495         if (is_n) {
1496             ctx->null_cond.c = TCG_COND_ALWAYS;
1497         }
1498     } else if (is_n && use_nullify_skip(ctx)) {
1499         /* The (conditional) branch, B, nullifies the next insn, N,
1500            and we're allowed to skip execution N (no single-step or
1501            tracepoint in effect).  Since the goto_ptr that we must use
1502            for the indirect branch consumes no special resources, we
1503            can (conditionally) skip B and continue execution.  */
1504         /* The use_nullify_skip test implies we have a known control path.  */
1505         tcg_debug_assert(ctx->iaoq_b != -1);
1506         tcg_debug_assert(ctx->iaoq_n != -1);
1507 
1508         /* We do have to handle the non-local temporary, DEST, before
1509            branching.  Since IOAQ_F is not really live at this point, we
1510            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1511         tcg_gen_mov_tl(cpu_iaoq_f, dest);
1512         tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1513 
1514         nullify_over(ctx);
1515         if (link != 0) {
1516             tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1517         }
1518         tcg_gen_lookup_and_goto_ptr();
1519         return nullify_end(ctx, DISAS_NEXT);
1520     } else {
1521         cond_prep(&ctx->null_cond);
1522         c = ctx->null_cond.c;
1523         a0 = ctx->null_cond.a0;
1524         a1 = ctx->null_cond.a1;
1525 
1526         tmp = tcg_temp_new();
1527         next = get_temp(ctx);
1528 
1529         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1530         tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1531         ctx->iaoq_n = -1;
1532         ctx->iaoq_n_var = next;
1533 
1534         if (link != 0) {
1535             tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1536         }
1537 
1538         if (is_n) {
1539             /* The branch nullifies the next insn, which means the state of N
1540                after the branch is the inverse of the state of N that applied
1541                to the branch.  */
1542             tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1543             cond_free(&ctx->null_cond);
1544             ctx->null_cond = cond_make_n();
1545             ctx->psw_n_nonzero = true;
1546         } else {
1547             cond_free(&ctx->null_cond);
1548         }
1549     }
1550 
1551     return DISAS_NEXT;
1552 }
1553 
1554 /* On Linux, page zero is normally marked execute only + gateway.
1555    Therefore normal read or write is supposed to fail, but specific
1556    offsets have kernel code mapped to raise permissions to implement
1557    system calls.  Handling this via an explicit check here, rather
1558    in than the "be disp(sr2,r0)" instruction that probably sent us
1559    here, is the easiest way to handle the branch delay slot on the
1560    aforementioned BE.  */
1561 static DisasJumpType do_page_zero(DisasContext *ctx)
1562 {
1563     /* If by some means we get here with PSW[N]=1, that implies that
1564        the B,GATE instruction would be skipped, and we'd fault on the
1565        next insn within the privilaged page.  */
1566     switch (ctx->null_cond.c) {
1567     case TCG_COND_NEVER:
1568         break;
1569     case TCG_COND_ALWAYS:
1570         tcg_gen_movi_tl(cpu_psw_n, 0);
1571         goto do_sigill;
1572     default:
1573         /* Since this is always the first (and only) insn within the
1574            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1575         g_assert_not_reached();
1576     }
1577 
1578     /* Check that we didn't arrive here via some means that allowed
1579        non-sequential instruction execution.  Normally the PSW[B] bit
1580        detects this by disallowing the B,GATE instruction to execute
1581        under such conditions.  */
1582     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1583         goto do_sigill;
1584     }
1585 
1586     switch (ctx->iaoq_f) {
1587     case 0x00: /* Null pointer call */
1588         gen_excp_1(EXCP_SIGSEGV);
1589         return DISAS_NORETURN;
1590 
1591     case 0xb0: /* LWS */
1592         gen_excp_1(EXCP_SYSCALL_LWS);
1593         return DISAS_NORETURN;
1594 
1595     case 0xe0: /* SET_THREAD_POINTER */
1596         tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1597         tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1598         tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1599         return DISAS_IAQ_N_UPDATED;
1600 
1601     case 0x100: /* SYSCALL */
1602         gen_excp_1(EXCP_SYSCALL);
1603         return DISAS_NORETURN;
1604 
1605     default:
1606     do_sigill:
1607         gen_excp_1(EXCP_SIGILL);
1608         return DISAS_NORETURN;
1609     }
1610 }
1611 
1612 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1613                                const DisasInsn *di)
1614 {
1615     cond_free(&ctx->null_cond);
1616     return DISAS_NEXT;
1617 }
1618 
1619 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1620                                  const DisasInsn *di)
1621 {
1622     nullify_over(ctx);
1623     return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1624 }
1625 
1626 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1627                                 const DisasInsn *di)
1628 {
1629     /* No point in nullifying the memory barrier.  */
1630     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1631 
1632     cond_free(&ctx->null_cond);
1633     return DISAS_NEXT;
1634 }
1635 
1636 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1637                                 const DisasInsn *di)
1638 {
1639     unsigned rt = extract32(insn, 0, 5);
1640     TCGv tmp = dest_gpr(ctx, rt);
1641     tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1642     save_gpr(ctx, rt, tmp);
1643 
1644     cond_free(&ctx->null_cond);
1645     return DISAS_NEXT;
1646 }
1647 
1648 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1649                                 const DisasInsn *di)
1650 {
1651     unsigned rt = extract32(insn, 0, 5);
1652     TCGv tmp = dest_gpr(ctx, rt);
1653 
1654     /* ??? We don't implement space registers.  */
1655     tcg_gen_movi_tl(tmp, 0);
1656     save_gpr(ctx, rt, tmp);
1657 
1658     cond_free(&ctx->null_cond);
1659     return DISAS_NEXT;
1660 }
1661 
1662 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1663                                  const DisasInsn *di)
1664 {
1665     unsigned rt = extract32(insn, 0, 5);
1666     unsigned ctl = extract32(insn, 21, 5);
1667     TCGv tmp;
1668 
1669     switch (ctl) {
1670     case 11: /* SAR */
1671 #ifdef TARGET_HPPA64
1672         if (extract32(insn, 14, 1) == 0) {
1673             /* MFSAR without ,W masks low 5 bits.  */
1674             tmp = dest_gpr(ctx, rt);
1675             tcg_gen_andi_tl(tmp, cpu_sar, 31);
1676             save_gpr(ctx, rt, tmp);
1677             break;
1678         }
1679 #endif
1680         save_gpr(ctx, rt, cpu_sar);
1681         break;
1682     case 16: /* Interval Timer */
1683         tmp = dest_gpr(ctx, rt);
1684         tcg_gen_movi_tl(tmp, 0); /* FIXME */
1685         save_gpr(ctx, rt, tmp);
1686         break;
1687     case 26:
1688         save_gpr(ctx, rt, cpu_cr26);
1689         break;
1690     case 27:
1691         save_gpr(ctx, rt, cpu_cr27);
1692         break;
1693     default:
1694         /* All other control registers are privileged.  */
1695         return gen_illegal(ctx);
1696     }
1697 
1698     cond_free(&ctx->null_cond);
1699     return DISAS_NEXT;
1700 }
1701 
1702 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1703                                  const DisasInsn *di)
1704 {
1705     unsigned rin = extract32(insn, 16, 5);
1706     unsigned ctl = extract32(insn, 21, 5);
1707     TCGv tmp;
1708 
1709     if (ctl == 11) { /* SAR */
1710         tmp = tcg_temp_new();
1711         tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1712         save_or_nullify(ctx, cpu_sar, tmp);
1713         tcg_temp_free(tmp);
1714     } else {
1715         /* All other control registers are privileged or read-only.  */
1716         return gen_illegal(ctx);
1717     }
1718 
1719     cond_free(&ctx->null_cond);
1720     return DISAS_NEXT;
1721 }
1722 
1723 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1724                                    const DisasInsn *di)
1725 {
1726     unsigned rin = extract32(insn, 16, 5);
1727     TCGv tmp = tcg_temp_new();
1728 
1729     tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1730     tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1731     save_or_nullify(ctx, cpu_sar, tmp);
1732     tcg_temp_free(tmp);
1733 
1734     cond_free(&ctx->null_cond);
1735     return DISAS_NEXT;
1736 }
1737 
1738 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1739                                  const DisasInsn *di)
1740 {
1741     unsigned rt = extract32(insn, 0, 5);
1742     TCGv dest = dest_gpr(ctx, rt);
1743 
1744     /* Since we don't implement space registers, this returns zero.  */
1745     tcg_gen_movi_tl(dest, 0);
1746     save_gpr(ctx, rt, dest);
1747 
1748     cond_free(&ctx->null_cond);
1749     return DISAS_NEXT;
1750 }
1751 
1752 static const DisasInsn table_system[] = {
1753     { 0x00000000u, 0xfc001fe0u, trans_break },
1754     /* We don't implement space register, so MTSP is a nop.  */
1755     { 0x00001820u, 0xffe01fffu, trans_nop },
1756     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1757     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1758     { 0x000014a0u, 0xffffffe0u, trans_mfia },
1759     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1760     { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1761     { 0x00000400u, 0xffffffffu, trans_sync },
1762     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1763 };
1764 
1765 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1766                                         const DisasInsn *di)
1767 {
1768     unsigned rb = extract32(insn, 21, 5);
1769     unsigned rx = extract32(insn, 16, 5);
1770     TCGv dest = dest_gpr(ctx, rb);
1771     TCGv src1 = load_gpr(ctx, rb);
1772     TCGv src2 = load_gpr(ctx, rx);
1773 
1774     /* The only thing we need to do is the base register modification.  */
1775     tcg_gen_add_tl(dest, src1, src2);
1776     save_gpr(ctx, rb, dest);
1777 
1778     cond_free(&ctx->null_cond);
1779     return DISAS_NEXT;
1780 }
1781 
1782 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1783                                  const DisasInsn *di)
1784 {
1785     unsigned rt = extract32(insn, 0, 5);
1786     unsigned rb = extract32(insn, 21, 5);
1787     unsigned is_write = extract32(insn, 6, 1);
1788     TCGv dest;
1789 
1790     nullify_over(ctx);
1791 
1792     /* ??? Do something with priv level operand.  */
1793     dest = dest_gpr(ctx, rt);
1794     if (is_write) {
1795         gen_helper_probe_w(dest, load_gpr(ctx, rb));
1796     } else {
1797         gen_helper_probe_r(dest, load_gpr(ctx, rb));
1798     }
1799     save_gpr(ctx, rt, dest);
1800     return nullify_end(ctx, DISAS_NEXT);
1801 }
1802 
1803 static const DisasInsn table_mem_mgmt[] = {
1804     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
1805     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
1806     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1807     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
1808     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1809     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
1810     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1811     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
1812     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1813     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
1814     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1815     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
1816     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1817     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
1818     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
1819 };
1820 
1821 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1822                                const DisasInsn *di)
1823 {
1824     unsigned r2 = extract32(insn, 21, 5);
1825     unsigned r1 = extract32(insn, 16, 5);
1826     unsigned cf = extract32(insn, 12, 4);
1827     unsigned ext = extract32(insn, 8, 4);
1828     unsigned shift = extract32(insn, 6, 2);
1829     unsigned rt = extract32(insn,  0, 5);
1830     TCGv tcg_r1, tcg_r2;
1831     bool is_c = false;
1832     bool is_l = false;
1833     bool is_tc = false;
1834     bool is_tsv = false;
1835     DisasJumpType ret;
1836 
1837     switch (ext) {
1838     case 0x6: /* ADD, SHLADD */
1839         break;
1840     case 0xa: /* ADD,L, SHLADD,L */
1841         is_l = true;
1842         break;
1843     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1844         is_tsv = true;
1845         break;
1846     case 0x7: /* ADD,C */
1847         is_c = true;
1848         break;
1849     case 0xf: /* ADD,C,TSV */
1850         is_c = is_tsv = true;
1851         break;
1852     default:
1853         return gen_illegal(ctx);
1854     }
1855 
1856     if (cf) {
1857         nullify_over(ctx);
1858     }
1859     tcg_r1 = load_gpr(ctx, r1);
1860     tcg_r2 = load_gpr(ctx, r2);
1861     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1862     return nullify_end(ctx, ret);
1863 }
1864 
1865 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1866                                const DisasInsn *di)
1867 {
1868     unsigned r2 = extract32(insn, 21, 5);
1869     unsigned r1 = extract32(insn, 16, 5);
1870     unsigned cf = extract32(insn, 12, 4);
1871     unsigned ext = extract32(insn, 6, 6);
1872     unsigned rt = extract32(insn,  0, 5);
1873     TCGv tcg_r1, tcg_r2;
1874     bool is_b = false;
1875     bool is_tc = false;
1876     bool is_tsv = false;
1877     DisasJumpType ret;
1878 
1879     switch (ext) {
1880     case 0x10: /* SUB */
1881         break;
1882     case 0x30: /* SUB,TSV */
1883         is_tsv = true;
1884         break;
1885     case 0x14: /* SUB,B */
1886         is_b = true;
1887         break;
1888     case 0x34: /* SUB,B,TSV */
1889         is_b = is_tsv = true;
1890         break;
1891     case 0x13: /* SUB,TC */
1892         is_tc = true;
1893         break;
1894     case 0x33: /* SUB,TSV,TC */
1895         is_tc = is_tsv = true;
1896         break;
1897     default:
1898         return gen_illegal(ctx);
1899     }
1900 
1901     if (cf) {
1902         nullify_over(ctx);
1903     }
1904     tcg_r1 = load_gpr(ctx, r1);
1905     tcg_r2 = load_gpr(ctx, r2);
1906     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1907     return nullify_end(ctx, ret);
1908 }
1909 
1910 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1911                                const DisasInsn *di)
1912 {
1913     unsigned r2 = extract32(insn, 21, 5);
1914     unsigned r1 = extract32(insn, 16, 5);
1915     unsigned cf = extract32(insn, 12, 4);
1916     unsigned rt = extract32(insn,  0, 5);
1917     TCGv tcg_r1, tcg_r2;
1918     DisasJumpType ret;
1919 
1920     if (cf) {
1921         nullify_over(ctx);
1922     }
1923     tcg_r1 = load_gpr(ctx, r1);
1924     tcg_r2 = load_gpr(ctx, r2);
1925     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1926     return nullify_end(ctx, ret);
1927 }
1928 
1929 /* OR r,0,t -> COPY (according to gas) */
1930 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1931                                 const DisasInsn *di)
1932 {
1933     unsigned r1 = extract32(insn, 16, 5);
1934     unsigned rt = extract32(insn,  0, 5);
1935 
1936     if (r1 == 0) {
1937         TCGv dest = dest_gpr(ctx, rt);
1938         tcg_gen_movi_tl(dest, 0);
1939         save_gpr(ctx, rt, dest);
1940     } else {
1941         save_gpr(ctx, rt, cpu_gr[r1]);
1942     }
1943     cond_free(&ctx->null_cond);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1948                                   const DisasInsn *di)
1949 {
1950     unsigned r2 = extract32(insn, 21, 5);
1951     unsigned r1 = extract32(insn, 16, 5);
1952     unsigned cf = extract32(insn, 12, 4);
1953     unsigned rt = extract32(insn,  0, 5);
1954     TCGv tcg_r1, tcg_r2;
1955     DisasJumpType ret;
1956 
1957     if (cf) {
1958         nullify_over(ctx);
1959     }
1960     tcg_r1 = load_gpr(ctx, r1);
1961     tcg_r2 = load_gpr(ctx, r2);
1962     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1963     return nullify_end(ctx, ret);
1964 }
1965 
1966 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1967                                 const DisasInsn *di)
1968 {
1969     unsigned r2 = extract32(insn, 21, 5);
1970     unsigned r1 = extract32(insn, 16, 5);
1971     unsigned cf = extract32(insn, 12, 4);
1972     unsigned rt = extract32(insn,  0, 5);
1973     TCGv tcg_r1, tcg_r2;
1974     DisasJumpType ret;
1975 
1976     if (cf) {
1977         nullify_over(ctx);
1978     }
1979     tcg_r1 = load_gpr(ctx, r1);
1980     tcg_r2 = load_gpr(ctx, r2);
1981     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1982     return nullify_end(ctx, ret);
1983 }
1984 
1985 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1986                                   const DisasInsn *di)
1987 {
1988     unsigned r2 = extract32(insn, 21, 5);
1989     unsigned r1 = extract32(insn, 16, 5);
1990     unsigned cf = extract32(insn, 12, 4);
1991     unsigned is_tc = extract32(insn, 6, 1);
1992     unsigned rt = extract32(insn,  0, 5);
1993     TCGv tcg_r1, tcg_r2, tmp;
1994     DisasJumpType ret;
1995 
1996     if (cf) {
1997         nullify_over(ctx);
1998     }
1999     tcg_r1 = load_gpr(ctx, r1);
2000     tcg_r2 = load_gpr(ctx, r2);
2001     tmp = get_temp(ctx);
2002     tcg_gen_not_tl(tmp, tcg_r2);
2003     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
2004     return nullify_end(ctx, ret);
2005 }
2006 
2007 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2008                                 const DisasInsn *di)
2009 {
2010     unsigned r2 = extract32(insn, 21, 5);
2011     unsigned cf = extract32(insn, 12, 4);
2012     unsigned is_i = extract32(insn, 6, 1);
2013     unsigned rt = extract32(insn,  0, 5);
2014     TCGv tmp;
2015     DisasJumpType ret;
2016 
2017     nullify_over(ctx);
2018 
2019     tmp = get_temp(ctx);
2020     tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2021     if (!is_i) {
2022         tcg_gen_not_tl(tmp, tmp);
2023     }
2024     tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2025     tcg_gen_muli_tl(tmp, tmp, 6);
2026     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2027                   is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2028 
2029     return nullify_end(ctx, ret);
2030 }
2031 
2032 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2033                               const DisasInsn *di)
2034 {
2035     unsigned r2 = extract32(insn, 21, 5);
2036     unsigned r1 = extract32(insn, 16, 5);
2037     unsigned cf = extract32(insn, 12, 4);
2038     unsigned rt = extract32(insn,  0, 5);
2039     TCGv dest, add1, add2, addc, zero, in1, in2;
2040 
2041     nullify_over(ctx);
2042 
2043     in1 = load_gpr(ctx, r1);
2044     in2 = load_gpr(ctx, r2);
2045 
2046     add1 = tcg_temp_new();
2047     add2 = tcg_temp_new();
2048     addc = tcg_temp_new();
2049     dest = tcg_temp_new();
2050     zero = tcg_const_tl(0);
2051 
2052     /* Form R1 << 1 | PSW[CB]{8}.  */
2053     tcg_gen_add_tl(add1, in1, in1);
2054     tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2055 
2056     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2057        carry{8} requires that we subtract via + ~R2 + 1, as described in
2058        the manual.  By extracting and masking V, we can produce the
2059        proper inputs to the addition without movcond.  */
2060     tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2061     tcg_gen_xor_tl(add2, in2, addc);
2062     tcg_gen_andi_tl(addc, addc, 1);
2063     /* ??? This is only correct for 32-bit.  */
2064     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2065     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2066 
2067     tcg_temp_free(addc);
2068     tcg_temp_free(zero);
2069 
2070     /* Write back the result register.  */
2071     save_gpr(ctx, rt, dest);
2072 
2073     /* Write back PSW[CB].  */
2074     tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2075     tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2076 
2077     /* Write back PSW[V] for the division step.  */
2078     tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2079     tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2080 
2081     /* Install the new nullification.  */
2082     if (cf) {
2083         TCGv sv;
2084         TCGV_UNUSED(sv);
2085         if (cf >> 1 == 6) {
2086             /* ??? The lshift is supposed to contribute to overflow.  */
2087             sv = do_add_sv(ctx, dest, add1, add2);
2088         }
2089         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2090     }
2091 
2092     tcg_temp_free(add1);
2093     tcg_temp_free(add2);
2094     tcg_temp_free(dest);
2095 
2096     return nullify_end(ctx, DISAS_NEXT);
2097 }
2098 
2099 static const DisasInsn table_arith_log[] = {
2100     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2101     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2102     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2103     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2104     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2105     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2106     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2107     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2108     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2109     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2110     { 0x08000440u, 0xfc000fe0u, trans_ds },
2111     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2112     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2113     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2114     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2115 };
2116 
2117 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2118 {
2119     target_long im = low_sextract(insn, 0, 11);
2120     unsigned e1 = extract32(insn, 11, 1);
2121     unsigned cf = extract32(insn, 12, 4);
2122     unsigned rt = extract32(insn, 16, 5);
2123     unsigned r2 = extract32(insn, 21, 5);
2124     unsigned o1 = extract32(insn, 26, 1);
2125     TCGv tcg_im, tcg_r2;
2126     DisasJumpType ret;
2127 
2128     if (cf) {
2129         nullify_over(ctx);
2130     }
2131 
2132     tcg_im = load_const(ctx, im);
2133     tcg_r2 = load_gpr(ctx, r2);
2134     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2135 
2136     return nullify_end(ctx, ret);
2137 }
2138 
2139 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2140 {
2141     target_long im = low_sextract(insn, 0, 11);
2142     unsigned e1 = extract32(insn, 11, 1);
2143     unsigned cf = extract32(insn, 12, 4);
2144     unsigned rt = extract32(insn, 16, 5);
2145     unsigned r2 = extract32(insn, 21, 5);
2146     TCGv tcg_im, tcg_r2;
2147     DisasJumpType ret;
2148 
2149     if (cf) {
2150         nullify_over(ctx);
2151     }
2152 
2153     tcg_im = load_const(ctx, im);
2154     tcg_r2 = load_gpr(ctx, r2);
2155     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2156 
2157     return nullify_end(ctx, ret);
2158 }
2159 
2160 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2161 {
2162     target_long im = low_sextract(insn, 0, 11);
2163     unsigned cf = extract32(insn, 12, 4);
2164     unsigned rt = extract32(insn, 16, 5);
2165     unsigned r2 = extract32(insn, 21, 5);
2166     TCGv tcg_im, tcg_r2;
2167     DisasJumpType ret;
2168 
2169     if (cf) {
2170         nullify_over(ctx);
2171     }
2172 
2173     tcg_im = load_const(ctx, im);
2174     tcg_r2 = load_gpr(ctx, r2);
2175     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2176 
2177     return nullify_end(ctx, ret);
2178 }
2179 
2180 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2181                                     const DisasInsn *di)
2182 {
2183     unsigned rt = extract32(insn, 0, 5);
2184     unsigned m = extract32(insn, 5, 1);
2185     unsigned sz = extract32(insn, 6, 2);
2186     unsigned a = extract32(insn, 13, 1);
2187     int disp = low_sextract(insn, 16, 5);
2188     unsigned rb = extract32(insn, 21, 5);
2189     int modify = (m ? (a ? -1 : 1) : 0);
2190     TCGMemOp mop = MO_TE | sz;
2191 
2192     return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2193 }
2194 
2195 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2196                                     const DisasInsn *di)
2197 {
2198     unsigned rt = extract32(insn, 0, 5);
2199     unsigned m = extract32(insn, 5, 1);
2200     unsigned sz = extract32(insn, 6, 2);
2201     unsigned u = extract32(insn, 13, 1);
2202     unsigned rx = extract32(insn, 16, 5);
2203     unsigned rb = extract32(insn, 21, 5);
2204     TCGMemOp mop = MO_TE | sz;
2205 
2206     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2207 }
2208 
2209 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2210                                     const DisasInsn *di)
2211 {
2212     int disp = low_sextract(insn, 0, 5);
2213     unsigned m = extract32(insn, 5, 1);
2214     unsigned sz = extract32(insn, 6, 2);
2215     unsigned a = extract32(insn, 13, 1);
2216     unsigned rr = extract32(insn, 16, 5);
2217     unsigned rb = extract32(insn, 21, 5);
2218     int modify = (m ? (a ? -1 : 1) : 0);
2219     TCGMemOp mop = MO_TE | sz;
2220 
2221     return do_store(ctx, rr, rb, disp, modify, mop);
2222 }
2223 
2224 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2225                                 const DisasInsn *di)
2226 {
2227     unsigned rt = extract32(insn, 0, 5);
2228     unsigned m = extract32(insn, 5, 1);
2229     unsigned i = extract32(insn, 12, 1);
2230     unsigned au = extract32(insn, 13, 1);
2231     unsigned rx = extract32(insn, 16, 5);
2232     unsigned rb = extract32(insn, 21, 5);
2233     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2234     TCGv zero, addr, base, dest;
2235     int modify, disp = 0, scale = 0;
2236 
2237     nullify_over(ctx);
2238 
2239     /* ??? Share more code with do_load and do_load_{32,64}.  */
2240 
2241     if (i) {
2242         modify = (m ? (au ? -1 : 1) : 0);
2243         disp = low_sextract(rx, 0, 5);
2244         rx = 0;
2245     } else {
2246         modify = m;
2247         if (au) {
2248             scale = mop & MO_SIZE;
2249         }
2250     }
2251     if (modify) {
2252         /* Base register modification.  Make sure if RT == RB, we see
2253            the result of the load.  */
2254         dest = get_temp(ctx);
2255     } else {
2256         dest = dest_gpr(ctx, rt);
2257     }
2258 
2259     addr = tcg_temp_new();
2260     base = load_gpr(ctx, rb);
2261     if (rx) {
2262         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2263         tcg_gen_add_tl(addr, addr, base);
2264     } else {
2265         tcg_gen_addi_tl(addr, base, disp);
2266     }
2267 
2268     zero = tcg_const_tl(0);
2269     tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2270                            zero, MMU_USER_IDX, mop);
2271     if (modify) {
2272         save_gpr(ctx, rb, addr);
2273     }
2274     save_gpr(ctx, rt, dest);
2275 
2276     return nullify_end(ctx, DISAS_NEXT);
2277 }
2278 
2279 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2280                                 const DisasInsn *di)
2281 {
2282     target_long disp = low_sextract(insn, 0, 5);
2283     unsigned m = extract32(insn, 5, 1);
2284     unsigned a = extract32(insn, 13, 1);
2285     unsigned rt = extract32(insn, 16, 5);
2286     unsigned rb = extract32(insn, 21, 5);
2287     TCGv addr, val;
2288 
2289     nullify_over(ctx);
2290 
2291     addr = tcg_temp_new();
2292     if (m || disp == 0) {
2293         tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2294     } else {
2295         tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2296     }
2297     val = load_gpr(ctx, rt);
2298 
2299     if (a) {
2300         gen_helper_stby_e(cpu_env, addr, val);
2301     } else {
2302         gen_helper_stby_b(cpu_env, addr, val);
2303     }
2304 
2305     if (m) {
2306         tcg_gen_addi_tl(addr, addr, disp);
2307         tcg_gen_andi_tl(addr, addr, ~3);
2308         save_gpr(ctx, rb, addr);
2309     }
2310     tcg_temp_free(addr);
2311 
2312     return nullify_end(ctx, DISAS_NEXT);
2313 }
2314 
2315 static const DisasInsn table_index_mem[] = {
2316     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2317     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2318     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2319     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2320     { 0x0c001300u, 0xfc0013c0, trans_stby },
2321 };
2322 
2323 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2324 {
2325     unsigned rt = extract32(insn, 21, 5);
2326     target_long i = assemble_21(insn);
2327     TCGv tcg_rt = dest_gpr(ctx, rt);
2328 
2329     tcg_gen_movi_tl(tcg_rt, i);
2330     save_gpr(ctx, rt, tcg_rt);
2331     cond_free(&ctx->null_cond);
2332 
2333     return DISAS_NEXT;
2334 }
2335 
2336 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2337 {
2338     unsigned rt = extract32(insn, 21, 5);
2339     target_long i = assemble_21(insn);
2340     TCGv tcg_rt = load_gpr(ctx, rt);
2341     TCGv tcg_r1 = dest_gpr(ctx, 1);
2342 
2343     tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2344     save_gpr(ctx, 1, tcg_r1);
2345     cond_free(&ctx->null_cond);
2346 
2347     return DISAS_NEXT;
2348 }
2349 
2350 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2351 {
2352     unsigned rb = extract32(insn, 21, 5);
2353     unsigned rt = extract32(insn, 16, 5);
2354     target_long i = assemble_16(insn);
2355     TCGv tcg_rt = dest_gpr(ctx, rt);
2356 
2357     /* Special case rb == 0, for the LDI pseudo-op.
2358        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2359     if (rb == 0) {
2360         tcg_gen_movi_tl(tcg_rt, i);
2361     } else {
2362         tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2363     }
2364     save_gpr(ctx, rt, tcg_rt);
2365     cond_free(&ctx->null_cond);
2366 
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2371                                 bool is_mod, TCGMemOp mop)
2372 {
2373     unsigned rb = extract32(insn, 21, 5);
2374     unsigned rt = extract32(insn, 16, 5);
2375     target_long i = assemble_16(insn);
2376 
2377     return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2378 }
2379 
2380 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2381 {
2382     unsigned rb = extract32(insn, 21, 5);
2383     unsigned rt = extract32(insn, 16, 5);
2384     target_long i = assemble_16a(insn);
2385     unsigned ext2 = extract32(insn, 1, 2);
2386 
2387     switch (ext2) {
2388     case 0:
2389     case 1:
2390         /* FLDW without modification.  */
2391         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2392     case 2:
2393         /* LDW with modification.  Note that the sign of I selects
2394            post-dec vs pre-inc.  */
2395         return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2396     default:
2397         return gen_illegal(ctx);
2398     }
2399 }
2400 
2401 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2402 {
2403     target_long i = assemble_16a(insn);
2404     unsigned t1 = extract32(insn, 1, 1);
2405     unsigned a = extract32(insn, 2, 1);
2406     unsigned t0 = extract32(insn, 16, 5);
2407     unsigned rb = extract32(insn, 21, 5);
2408 
2409     /* FLDW with modification.  */
2410     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2411 }
2412 
2413 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2414                                  bool is_mod, TCGMemOp mop)
2415 {
2416     unsigned rb = extract32(insn, 21, 5);
2417     unsigned rt = extract32(insn, 16, 5);
2418     target_long i = assemble_16(insn);
2419 
2420     return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2421 }
2422 
2423 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2424 {
2425     unsigned rb = extract32(insn, 21, 5);
2426     unsigned rt = extract32(insn, 16, 5);
2427     target_long i = assemble_16a(insn);
2428     unsigned ext2 = extract32(insn, 1, 2);
2429 
2430     switch (ext2) {
2431     case 0:
2432     case 1:
2433         /* FSTW without modification.  */
2434         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2435     case 2:
2436         /* LDW with modification.  */
2437         return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2438     default:
2439         return gen_illegal(ctx);
2440     }
2441 }
2442 
2443 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2444 {
2445     target_long i = assemble_16a(insn);
2446     unsigned t1 = extract32(insn, 1, 1);
2447     unsigned a = extract32(insn, 2, 1);
2448     unsigned t0 = extract32(insn, 16, 5);
2449     unsigned rb = extract32(insn, 21, 5);
2450 
2451     /* FSTW with modification.  */
2452     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2453 }
2454 
2455 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2456 {
2457     unsigned t0 = extract32(insn, 0, 5);
2458     unsigned m = extract32(insn, 5, 1);
2459     unsigned t1 = extract32(insn, 6, 1);
2460     unsigned ext3 = extract32(insn, 7, 3);
2461     /* unsigned cc = extract32(insn, 10, 2); */
2462     unsigned i = extract32(insn, 12, 1);
2463     unsigned ua = extract32(insn, 13, 1);
2464     unsigned rx = extract32(insn, 16, 5);
2465     unsigned rb = extract32(insn, 21, 5);
2466     unsigned rt = t1 * 32 + t0;
2467     int modify = (m ? (ua ? -1 : 1) : 0);
2468     int disp, scale;
2469 
2470     if (i == 0) {
2471         scale = (ua ? 2 : 0);
2472         disp = 0;
2473         modify = m;
2474     } else {
2475         disp = low_sextract(rx, 0, 5);
2476         scale = 0;
2477         rx = 0;
2478         modify = (m ? (ua ? -1 : 1) : 0);
2479     }
2480 
2481     switch (ext3) {
2482     case 0: /* FLDW */
2483         return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2484     case 4: /* FSTW */
2485         return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2486     }
2487     return gen_illegal(ctx);
2488 }
2489 
2490 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2491 {
2492     unsigned rt = extract32(insn, 0, 5);
2493     unsigned m = extract32(insn, 5, 1);
2494     unsigned ext4 = extract32(insn, 6, 4);
2495     /* unsigned cc = extract32(insn, 10, 2); */
2496     unsigned i = extract32(insn, 12, 1);
2497     unsigned ua = extract32(insn, 13, 1);
2498     unsigned rx = extract32(insn, 16, 5);
2499     unsigned rb = extract32(insn, 21, 5);
2500     int modify = (m ? (ua ? -1 : 1) : 0);
2501     int disp, scale;
2502 
2503     if (i == 0) {
2504         scale = (ua ? 3 : 0);
2505         disp = 0;
2506         modify = m;
2507     } else {
2508         disp = low_sextract(rx, 0, 5);
2509         scale = 0;
2510         rx = 0;
2511         modify = (m ? (ua ? -1 : 1) : 0);
2512     }
2513 
2514     switch (ext4) {
2515     case 0: /* FLDD */
2516         return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2517     case 8: /* FSTD */
2518         return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2519     default:
2520         return gen_illegal(ctx);
2521     }
2522 }
2523 
2524 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2525                                 bool is_true, bool is_imm, bool is_dw)
2526 {
2527     target_long disp = assemble_12(insn) * 4;
2528     unsigned n = extract32(insn, 1, 1);
2529     unsigned c = extract32(insn, 13, 3);
2530     unsigned r = extract32(insn, 21, 5);
2531     unsigned cf = c * 2 + !is_true;
2532     TCGv dest, in1, in2, sv;
2533     DisasCond cond;
2534 
2535     nullify_over(ctx);
2536 
2537     if (is_imm) {
2538         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2539     } else {
2540         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2541     }
2542     in2 = load_gpr(ctx, r);
2543     dest = get_temp(ctx);
2544 
2545     tcg_gen_sub_tl(dest, in1, in2);
2546 
2547     TCGV_UNUSED(sv);
2548     if (c == 6) {
2549         sv = do_sub_sv(ctx, dest, in1, in2);
2550     }
2551 
2552     cond = do_sub_cond(cf, dest, in1, in2, sv);
2553     return do_cbranch(ctx, disp, n, &cond);
2554 }
2555 
2556 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2557                                 bool is_true, bool is_imm)
2558 {
2559     target_long disp = assemble_12(insn) * 4;
2560     unsigned n = extract32(insn, 1, 1);
2561     unsigned c = extract32(insn, 13, 3);
2562     unsigned r = extract32(insn, 21, 5);
2563     unsigned cf = c * 2 + !is_true;
2564     TCGv dest, in1, in2, sv, cb_msb;
2565     DisasCond cond;
2566 
2567     nullify_over(ctx);
2568 
2569     if (is_imm) {
2570         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2571     } else {
2572         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2573     }
2574     in2 = load_gpr(ctx, r);
2575     dest = dest_gpr(ctx, r);
2576     TCGV_UNUSED(sv);
2577     TCGV_UNUSED(cb_msb);
2578 
2579     switch (c) {
2580     default:
2581         tcg_gen_add_tl(dest, in1, in2);
2582         break;
2583     case 4: case 5:
2584         cb_msb = get_temp(ctx);
2585         tcg_gen_movi_tl(cb_msb, 0);
2586         tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2587         break;
2588     case 6:
2589         tcg_gen_add_tl(dest, in1, in2);
2590         sv = do_add_sv(ctx, dest, in1, in2);
2591         break;
2592     }
2593 
2594     cond = do_cond(cf, dest, cb_msb, sv);
2595     return do_cbranch(ctx, disp, n, &cond);
2596 }
2597 
2598 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2599 {
2600     target_long disp = assemble_12(insn) * 4;
2601     unsigned n = extract32(insn, 1, 1);
2602     unsigned c = extract32(insn, 15, 1);
2603     unsigned r = extract32(insn, 16, 5);
2604     unsigned p = extract32(insn, 21, 5);
2605     unsigned i = extract32(insn, 26, 1);
2606     TCGv tmp, tcg_r;
2607     DisasCond cond;
2608 
2609     nullify_over(ctx);
2610 
2611     tmp = tcg_temp_new();
2612     tcg_r = load_gpr(ctx, r);
2613     if (i) {
2614         tcg_gen_shli_tl(tmp, tcg_r, p);
2615     } else {
2616         tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2617     }
2618 
2619     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2620     tcg_temp_free(tmp);
2621     return do_cbranch(ctx, disp, n, &cond);
2622 }
2623 
2624 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2625 {
2626     target_long disp = assemble_12(insn) * 4;
2627     unsigned n = extract32(insn, 1, 1);
2628     unsigned c = extract32(insn, 13, 3);
2629     unsigned t = extract32(insn, 16, 5);
2630     unsigned r = extract32(insn, 21, 5);
2631     TCGv dest;
2632     DisasCond cond;
2633 
2634     nullify_over(ctx);
2635 
2636     dest = dest_gpr(ctx, r);
2637     if (is_imm) {
2638         tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2639     } else if (t == 0) {
2640         tcg_gen_movi_tl(dest, 0);
2641     } else {
2642         tcg_gen_mov_tl(dest, cpu_gr[t]);
2643     }
2644 
2645     cond = do_sed_cond(c, dest);
2646     return do_cbranch(ctx, disp, n, &cond);
2647 }
2648 
2649 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2650                                     const DisasInsn *di)
2651 {
2652     unsigned rt = extract32(insn, 0, 5);
2653     unsigned c = extract32(insn, 13, 3);
2654     unsigned r1 = extract32(insn, 16, 5);
2655     unsigned r2 = extract32(insn, 21, 5);
2656     TCGv dest;
2657 
2658     if (c) {
2659         nullify_over(ctx);
2660     }
2661 
2662     dest = dest_gpr(ctx, rt);
2663     if (r1 == 0) {
2664         tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2665         tcg_gen_shr_tl(dest, dest, cpu_sar);
2666     } else if (r1 == r2) {
2667         TCGv_i32 t32 = tcg_temp_new_i32();
2668         tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2669         tcg_gen_rotr_i32(t32, t32, cpu_sar);
2670         tcg_gen_extu_i32_tl(dest, t32);
2671         tcg_temp_free_i32(t32);
2672     } else {
2673         TCGv_i64 t = tcg_temp_new_i64();
2674         TCGv_i64 s = tcg_temp_new_i64();
2675 
2676         tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2677         tcg_gen_extu_tl_i64(s, cpu_sar);
2678         tcg_gen_shr_i64(t, t, s);
2679         tcg_gen_trunc_i64_tl(dest, t);
2680 
2681         tcg_temp_free_i64(t);
2682         tcg_temp_free_i64(s);
2683     }
2684     save_gpr(ctx, rt, dest);
2685 
2686     /* Install the new nullification.  */
2687     cond_free(&ctx->null_cond);
2688     if (c) {
2689         ctx->null_cond = do_sed_cond(c, dest);
2690     }
2691     return nullify_end(ctx, DISAS_NEXT);
2692 }
2693 
2694 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2695                                      const DisasInsn *di)
2696 {
2697     unsigned rt = extract32(insn, 0, 5);
2698     unsigned cpos = extract32(insn, 5, 5);
2699     unsigned c = extract32(insn, 13, 3);
2700     unsigned r1 = extract32(insn, 16, 5);
2701     unsigned r2 = extract32(insn, 21, 5);
2702     unsigned sa = 31 - cpos;
2703     TCGv dest, t2;
2704 
2705     if (c) {
2706         nullify_over(ctx);
2707     }
2708 
2709     dest = dest_gpr(ctx, rt);
2710     t2 = load_gpr(ctx, r2);
2711     if (r1 == r2) {
2712         TCGv_i32 t32 = tcg_temp_new_i32();
2713         tcg_gen_trunc_tl_i32(t32, t2);
2714         tcg_gen_rotri_i32(t32, t32, sa);
2715         tcg_gen_extu_i32_tl(dest, t32);
2716         tcg_temp_free_i32(t32);
2717     } else if (r1 == 0) {
2718         tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2719     } else {
2720         TCGv t0 = tcg_temp_new();
2721         tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2722         tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2723         tcg_temp_free(t0);
2724     }
2725     save_gpr(ctx, rt, dest);
2726 
2727     /* Install the new nullification.  */
2728     cond_free(&ctx->null_cond);
2729     if (c) {
2730         ctx->null_cond = do_sed_cond(c, dest);
2731     }
2732     return nullify_end(ctx, DISAS_NEXT);
2733 }
2734 
2735 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2736                                      const DisasInsn *di)
2737 {
2738     unsigned clen = extract32(insn, 0, 5);
2739     unsigned is_se = extract32(insn, 10, 1);
2740     unsigned c = extract32(insn, 13, 3);
2741     unsigned rt = extract32(insn, 16, 5);
2742     unsigned rr = extract32(insn, 21, 5);
2743     unsigned len = 32 - clen;
2744     TCGv dest, src, tmp;
2745 
2746     if (c) {
2747         nullify_over(ctx);
2748     }
2749 
2750     dest = dest_gpr(ctx, rt);
2751     src = load_gpr(ctx, rr);
2752     tmp = tcg_temp_new();
2753 
2754     /* Recall that SAR is using big-endian bit numbering.  */
2755     tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2756     if (is_se) {
2757         tcg_gen_sar_tl(dest, src, tmp);
2758         tcg_gen_sextract_tl(dest, dest, 0, len);
2759     } else {
2760         tcg_gen_shr_tl(dest, src, tmp);
2761         tcg_gen_extract_tl(dest, dest, 0, len);
2762     }
2763     tcg_temp_free(tmp);
2764     save_gpr(ctx, rt, dest);
2765 
2766     /* Install the new nullification.  */
2767     cond_free(&ctx->null_cond);
2768     if (c) {
2769         ctx->null_cond = do_sed_cond(c, dest);
2770     }
2771     return nullify_end(ctx, DISAS_NEXT);
2772 }
2773 
2774 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2775                                      const DisasInsn *di)
2776 {
2777     unsigned clen = extract32(insn, 0, 5);
2778     unsigned pos = extract32(insn, 5, 5);
2779     unsigned is_se = extract32(insn, 10, 1);
2780     unsigned c = extract32(insn, 13, 3);
2781     unsigned rt = extract32(insn, 16, 5);
2782     unsigned rr = extract32(insn, 21, 5);
2783     unsigned len = 32 - clen;
2784     unsigned cpos = 31 - pos;
2785     TCGv dest, src;
2786 
2787     if (c) {
2788         nullify_over(ctx);
2789     }
2790 
2791     dest = dest_gpr(ctx, rt);
2792     src = load_gpr(ctx, rr);
2793     if (is_se) {
2794         tcg_gen_sextract_tl(dest, src, cpos, len);
2795     } else {
2796         tcg_gen_extract_tl(dest, src, cpos, len);
2797     }
2798     save_gpr(ctx, rt, dest);
2799 
2800     /* Install the new nullification.  */
2801     cond_free(&ctx->null_cond);
2802     if (c) {
2803         ctx->null_cond = do_sed_cond(c, dest);
2804     }
2805     return nullify_end(ctx, DISAS_NEXT);
2806 }
2807 
2808 static const DisasInsn table_sh_ex[] = {
2809     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2810     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2811     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2812     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2813 };
2814 
2815 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2816                                       const DisasInsn *di)
2817 {
2818     unsigned clen = extract32(insn, 0, 5);
2819     unsigned cpos = extract32(insn, 5, 5);
2820     unsigned nz = extract32(insn, 10, 1);
2821     unsigned c = extract32(insn, 13, 3);
2822     target_long val = low_sextract(insn, 16, 5);
2823     unsigned rt = extract32(insn, 21, 5);
2824     unsigned len = 32 - clen;
2825     target_long mask0, mask1;
2826     TCGv dest;
2827 
2828     if (c) {
2829         nullify_over(ctx);
2830     }
2831     if (cpos + len > 32) {
2832         len = 32 - cpos;
2833     }
2834 
2835     dest = dest_gpr(ctx, rt);
2836     mask0 = deposit64(0, cpos, len, val);
2837     mask1 = deposit64(-1, cpos, len, val);
2838 
2839     if (nz) {
2840         TCGv src = load_gpr(ctx, rt);
2841         if (mask1 != -1) {
2842             tcg_gen_andi_tl(dest, src, mask1);
2843             src = dest;
2844         }
2845         tcg_gen_ori_tl(dest, src, mask0);
2846     } else {
2847         tcg_gen_movi_tl(dest, mask0);
2848     }
2849     save_gpr(ctx, rt, dest);
2850 
2851     /* Install the new nullification.  */
2852     cond_free(&ctx->null_cond);
2853     if (c) {
2854         ctx->null_cond = do_sed_cond(c, dest);
2855     }
2856     return nullify_end(ctx, DISAS_NEXT);
2857 }
2858 
2859 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2860                                     const DisasInsn *di)
2861 {
2862     unsigned clen = extract32(insn, 0, 5);
2863     unsigned cpos = extract32(insn, 5, 5);
2864     unsigned nz = extract32(insn, 10, 1);
2865     unsigned c = extract32(insn, 13, 3);
2866     unsigned rr = extract32(insn, 16, 5);
2867     unsigned rt = extract32(insn, 21, 5);
2868     unsigned rs = nz ? rt : 0;
2869     unsigned len = 32 - clen;
2870     TCGv dest, val;
2871 
2872     if (c) {
2873         nullify_over(ctx);
2874     }
2875     if (cpos + len > 32) {
2876         len = 32 - cpos;
2877     }
2878 
2879     dest = dest_gpr(ctx, rt);
2880     val = load_gpr(ctx, rr);
2881     if (rs == 0) {
2882         tcg_gen_deposit_z_tl(dest, val, cpos, len);
2883     } else {
2884         tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2885     }
2886     save_gpr(ctx, rt, dest);
2887 
2888     /* Install the new nullification.  */
2889     cond_free(&ctx->null_cond);
2890     if (c) {
2891         ctx->null_cond = do_sed_cond(c, dest);
2892     }
2893     return nullify_end(ctx, DISAS_NEXT);
2894 }
2895 
2896 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2897                                     const DisasInsn *di)
2898 {
2899     unsigned clen = extract32(insn, 0, 5);
2900     unsigned nz = extract32(insn, 10, 1);
2901     unsigned i = extract32(insn, 12, 1);
2902     unsigned c = extract32(insn, 13, 3);
2903     unsigned rt = extract32(insn, 21, 5);
2904     unsigned rs = nz ? rt : 0;
2905     unsigned len = 32 - clen;
2906     TCGv val, mask, tmp, shift, dest;
2907     unsigned msb = 1U << (len - 1);
2908 
2909     if (c) {
2910         nullify_over(ctx);
2911     }
2912 
2913     if (i) {
2914         val = load_const(ctx, low_sextract(insn, 16, 5));
2915     } else {
2916         val = load_gpr(ctx, extract32(insn, 16, 5));
2917     }
2918     dest = dest_gpr(ctx, rt);
2919     shift = tcg_temp_new();
2920     tmp = tcg_temp_new();
2921 
2922     /* Convert big-endian bit numbering in SAR to left-shift.  */
2923     tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2924 
2925     mask = tcg_const_tl(msb + (msb - 1));
2926     tcg_gen_and_tl(tmp, val, mask);
2927     if (rs) {
2928         tcg_gen_shl_tl(mask, mask, shift);
2929         tcg_gen_shl_tl(tmp, tmp, shift);
2930         tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2931         tcg_gen_or_tl(dest, dest, tmp);
2932     } else {
2933         tcg_gen_shl_tl(dest, tmp, shift);
2934     }
2935     tcg_temp_free(shift);
2936     tcg_temp_free(mask);
2937     tcg_temp_free(tmp);
2938     save_gpr(ctx, rt, dest);
2939 
2940     /* Install the new nullification.  */
2941     cond_free(&ctx->null_cond);
2942     if (c) {
2943         ctx->null_cond = do_sed_cond(c, dest);
2944     }
2945     return nullify_end(ctx, DISAS_NEXT);
2946 }
2947 
2948 static const DisasInsn table_depw[] = {
2949     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2950     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2951     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2952 };
2953 
2954 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2955 {
2956     unsigned n = extract32(insn, 1, 1);
2957     unsigned b = extract32(insn, 21, 5);
2958     target_long disp = assemble_17(insn);
2959 
2960     /* unsigned s = low_uextract(insn, 13, 3); */
2961     /* ??? It seems like there should be a good way of using
2962        "be disp(sr2, r0)", the canonical gateway entry mechanism
2963        to our advantage.  But that appears to be inconvenient to
2964        manage along side branch delay slots.  Therefore we handle
2965        entry into the gateway page via absolute address.  */
2966 
2967     /* Since we don't implement spaces, just branch.  Do notice the special
2968        case of "be disp(*,r0)" using a direct branch to disp, so that we can
2969        goto_tb to the TB containing the syscall.  */
2970     if (b == 0) {
2971         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2972     } else {
2973         TCGv tmp = get_temp(ctx);
2974         tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2975         return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2976     }
2977 }
2978 
2979 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2980                               const DisasInsn *di)
2981 {
2982     unsigned n = extract32(insn, 1, 1);
2983     unsigned link = extract32(insn, 21, 5);
2984     target_long disp = assemble_17(insn);
2985 
2986     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2987 }
2988 
2989 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2990                                    const DisasInsn *di)
2991 {
2992     unsigned n = extract32(insn, 1, 1);
2993     target_long disp = assemble_22(insn);
2994 
2995     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2996 }
2997 
2998 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
2999                                const DisasInsn *di)
3000 {
3001     unsigned n = extract32(insn, 1, 1);
3002     unsigned rx = extract32(insn, 16, 5);
3003     unsigned link = extract32(insn, 21, 5);
3004     TCGv tmp = get_temp(ctx);
3005 
3006     tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3007     tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3008     return do_ibranch(ctx, tmp, link, n);
3009 }
3010 
3011 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3012                               const DisasInsn *di)
3013 {
3014     unsigned n = extract32(insn, 1, 1);
3015     unsigned rx = extract32(insn, 16, 5);
3016     unsigned rb = extract32(insn, 21, 5);
3017     TCGv dest;
3018 
3019     if (rx == 0) {
3020         dest = load_gpr(ctx, rb);
3021     } else {
3022         dest = get_temp(ctx);
3023         tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3024         tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3025     }
3026     return do_ibranch(ctx, dest, 0, n);
3027 }
3028 
3029 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3030                                const DisasInsn *di)
3031 {
3032     unsigned n = extract32(insn, 1, 1);
3033     unsigned rb = extract32(insn, 21, 5);
3034     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3035 
3036     return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3037 }
3038 
3039 static const DisasInsn table_branch[] = {
3040     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3041     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3042     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3043     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3044     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3045 };
3046 
3047 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3048                                       const DisasInsn *di)
3049 {
3050     unsigned rt = extract32(insn, 0, 5);
3051     unsigned ra = extract32(insn, 21, 5);
3052     return do_fop_wew(ctx, rt, ra, di->f.wew);
3053 }
3054 
3055 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3056                                       const DisasInsn *di)
3057 {
3058     unsigned rt = assemble_rt64(insn);
3059     unsigned ra = assemble_ra64(insn);
3060     return do_fop_wew(ctx, rt, ra, di->f.wew);
3061 }
3062 
3063 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3064                                    const DisasInsn *di)
3065 {
3066     unsigned rt = extract32(insn, 0, 5);
3067     unsigned ra = extract32(insn, 21, 5);
3068     return do_fop_ded(ctx, rt, ra, di->f.ded);
3069 }
3070 
3071 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3072                                       const DisasInsn *di)
3073 {
3074     unsigned rt = extract32(insn, 0, 5);
3075     unsigned ra = extract32(insn, 21, 5);
3076     return do_fop_wed(ctx, rt, ra, di->f.wed);
3077 }
3078 
3079 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3080                                       const DisasInsn *di)
3081 {
3082     unsigned rt = assemble_rt64(insn);
3083     unsigned ra = extract32(insn, 21, 5);
3084     return do_fop_wed(ctx, rt, ra, di->f.wed);
3085 }
3086 
3087 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3088                                       const DisasInsn *di)
3089 {
3090     unsigned rt = extract32(insn, 0, 5);
3091     unsigned ra = extract32(insn, 21, 5);
3092     return do_fop_dew(ctx, rt, ra, di->f.dew);
3093 }
3094 
3095 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3096                                       const DisasInsn *di)
3097 {
3098     unsigned rt = extract32(insn, 0, 5);
3099     unsigned ra = assemble_ra64(insn);
3100     return do_fop_dew(ctx, rt, ra, di->f.dew);
3101 }
3102 
3103 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3104                                        const DisasInsn *di)
3105 {
3106     unsigned rt = extract32(insn, 0, 5);
3107     unsigned rb = extract32(insn, 16, 5);
3108     unsigned ra = extract32(insn, 21, 5);
3109     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3110 }
3111 
3112 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3113                                        const DisasInsn *di)
3114 {
3115     unsigned rt = assemble_rt64(insn);
3116     unsigned rb = assemble_rb64(insn);
3117     unsigned ra = assemble_ra64(insn);
3118     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3119 }
3120 
3121 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3122                                     const DisasInsn *di)
3123 {
3124     unsigned rt = extract32(insn, 0, 5);
3125     unsigned rb = extract32(insn, 16, 5);
3126     unsigned ra = extract32(insn, 21, 5);
3127     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3128 }
3129 
3130 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3131 {
3132     tcg_gen_mov_i32(dst, src);
3133 }
3134 
3135 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3136 {
3137     tcg_gen_mov_i64(dst, src);
3138 }
3139 
3140 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3141 {
3142     tcg_gen_andi_i32(dst, src, INT32_MAX);
3143 }
3144 
3145 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3146 {
3147     tcg_gen_andi_i64(dst, src, INT64_MAX);
3148 }
3149 
3150 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3151 {
3152     tcg_gen_xori_i32(dst, src, INT32_MIN);
3153 }
3154 
3155 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3156 {
3157     tcg_gen_xori_i64(dst, src, INT64_MIN);
3158 }
3159 
3160 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3161 {
3162     tcg_gen_ori_i32(dst, src, INT32_MIN);
3163 }
3164 
3165 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3166 {
3167     tcg_gen_ori_i64(dst, src, INT64_MIN);
3168 }
3169 
3170 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3171                                unsigned y, unsigned c)
3172 {
3173     TCGv_i32 ta, tb, tc, ty;
3174 
3175     nullify_over(ctx);
3176 
3177     ta = load_frw0_i32(ra);
3178     tb = load_frw0_i32(rb);
3179     ty = tcg_const_i32(y);
3180     tc = tcg_const_i32(c);
3181 
3182     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3183 
3184     tcg_temp_free_i32(ta);
3185     tcg_temp_free_i32(tb);
3186     tcg_temp_free_i32(ty);
3187     tcg_temp_free_i32(tc);
3188 
3189     return nullify_end(ctx, DISAS_NEXT);
3190 }
3191 
3192 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3193                                      const DisasInsn *di)
3194 {
3195     unsigned c = extract32(insn, 0, 5);
3196     unsigned y = extract32(insn, 13, 3);
3197     unsigned rb = extract32(insn, 16, 5);
3198     unsigned ra = extract32(insn, 21, 5);
3199     return do_fcmp_s(ctx, ra, rb, y, c);
3200 }
3201 
3202 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3203                                      const DisasInsn *di)
3204 {
3205     unsigned c = extract32(insn, 0, 5);
3206     unsigned y = extract32(insn, 13, 3);
3207     unsigned rb = assemble_rb64(insn);
3208     unsigned ra = assemble_ra64(insn);
3209     return do_fcmp_s(ctx, ra, rb, y, c);
3210 }
3211 
3212 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3213                                   const DisasInsn *di)
3214 {
3215     unsigned c = extract32(insn, 0, 5);
3216     unsigned y = extract32(insn, 13, 3);
3217     unsigned rb = extract32(insn, 16, 5);
3218     unsigned ra = extract32(insn, 21, 5);
3219     TCGv_i64 ta, tb;
3220     TCGv_i32 tc, ty;
3221 
3222     nullify_over(ctx);
3223 
3224     ta = load_frd0(ra);
3225     tb = load_frd0(rb);
3226     ty = tcg_const_i32(y);
3227     tc = tcg_const_i32(c);
3228 
3229     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3230 
3231     tcg_temp_free_i64(ta);
3232     tcg_temp_free_i64(tb);
3233     tcg_temp_free_i32(ty);
3234     tcg_temp_free_i32(tc);
3235 
3236     return nullify_end(ctx, DISAS_NEXT);
3237 }
3238 
3239 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3240                                    const DisasInsn *di)
3241 {
3242     unsigned y = extract32(insn, 13, 3);
3243     unsigned cbit = (y ^ 1) - 1;
3244     TCGv t;
3245 
3246     nullify_over(ctx);
3247 
3248     t = tcg_temp_new();
3249     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3250     tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3251     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3252     tcg_temp_free(t);
3253 
3254     return nullify_end(ctx, DISAS_NEXT);
3255 }
3256 
3257 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3258                                    const DisasInsn *di)
3259 {
3260     unsigned c = extract32(insn, 0, 5);
3261     int mask;
3262     bool inv = false;
3263     TCGv t;
3264 
3265     nullify_over(ctx);
3266 
3267     t = tcg_temp_new();
3268     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3269 
3270     switch (c) {
3271     case 0: /* simple */
3272         tcg_gen_andi_tl(t, t, 0x4000000);
3273         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3274         goto done;
3275     case 2: /* rej */
3276         inv = true;
3277         /* fallthru */
3278     case 1: /* acc */
3279         mask = 0x43ff800;
3280         break;
3281     case 6: /* rej8 */
3282         inv = true;
3283         /* fallthru */
3284     case 5: /* acc8 */
3285         mask = 0x43f8000;
3286         break;
3287     case 9: /* acc6 */
3288         mask = 0x43e0000;
3289         break;
3290     case 13: /* acc4 */
3291         mask = 0x4380000;
3292         break;
3293     case 17: /* acc2 */
3294         mask = 0x4200000;
3295         break;
3296     default:
3297         return gen_illegal(ctx);
3298     }
3299     if (inv) {
3300         TCGv c = load_const(ctx, mask);
3301         tcg_gen_or_tl(t, t, c);
3302         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3303     } else {
3304         tcg_gen_andi_tl(t, t, mask);
3305         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3306     }
3307  done:
3308     return nullify_end(ctx, DISAS_NEXT);
3309 }
3310 
3311 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3312                                  const DisasInsn *di)
3313 {
3314     unsigned rt = extract32(insn, 0, 5);
3315     unsigned rb = assemble_rb64(insn);
3316     unsigned ra = assemble_ra64(insn);
3317     TCGv_i64 a, b;
3318 
3319     nullify_over(ctx);
3320 
3321     a = load_frw0_i64(ra);
3322     b = load_frw0_i64(rb);
3323     tcg_gen_mul_i64(a, a, b);
3324     save_frd(rt, a);
3325     tcg_temp_free_i64(a);
3326     tcg_temp_free_i64(b);
3327 
3328     return nullify_end(ctx, DISAS_NEXT);
3329 }
3330 
3331 #define FOP_DED  trans_fop_ded, .f.ded
3332 #define FOP_DEDD trans_fop_dedd, .f.dedd
3333 
3334 #define FOP_WEW  trans_fop_wew_0c, .f.wew
3335 #define FOP_DEW  trans_fop_dew_0c, .f.dew
3336 #define FOP_WED  trans_fop_wed_0c, .f.wed
3337 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3338 
3339 static const DisasInsn table_float_0c[] = {
3340     /* floating point class zero */
3341     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3342     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3343     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3344     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3345     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3346     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3347 
3348     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3349     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3350     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3351     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3352     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3353     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3354 
3355     /* floating point class three */
3356     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3357     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3358     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3359     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3360 
3361     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3362     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3363     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3364     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3365 
3366     /* floating point class one */
3367     /* float/float */
3368     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3369     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3370     /* int/float */
3371     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3372     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3373     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3374     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3375     /* float/int */
3376     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3377     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3378     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3379     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3380     /* float/int truncate */
3381     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3382     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3383     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3384     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3385     /* uint/float */
3386     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3387     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3388     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3389     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3390     /* float/uint */
3391     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3392     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3393     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3394     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3395     /* float/uint truncate */
3396     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3397     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3398     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3399     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3400 
3401     /* floating point class two */
3402     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3403     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3404     { 0x30002420, 0xffffffe0, trans_ftest_q },
3405     { 0x30000420, 0xffff1fff, trans_ftest_t },
3406 
3407     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3408        This is machine/revision == 0, which is reserved for simulator.  */
3409     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3410 };
3411 
3412 #undef FOP_WEW
3413 #undef FOP_DEW
3414 #undef FOP_WED
3415 #undef FOP_WEWW
3416 #define FOP_WEW  trans_fop_wew_0e, .f.wew
3417 #define FOP_DEW  trans_fop_dew_0e, .f.dew
3418 #define FOP_WED  trans_fop_wed_0e, .f.wed
3419 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3420 
3421 static const DisasInsn table_float_0e[] = {
3422     /* floating point class zero */
3423     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3424     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3425     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3426     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3427     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3428     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3429 
3430     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3431     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3432     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3433     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3434     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3435     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3436 
3437     /* floating point class three */
3438     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3439     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3440     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3441     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3442 
3443     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3444     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3445     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3446     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3447 
3448     { 0x38004700, 0xfc00ef60, trans_xmpyu },
3449 
3450     /* floating point class one */
3451     /* float/float */
3452     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3453     { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3454     /* int/float */
3455     { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3456     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3457     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3458     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3459     /* float/int */
3460     { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3461     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3462     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3463     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3464     /* float/int truncate */
3465     { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3466     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3467     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3468     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3469     /* uint/float */
3470     { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3471     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3472     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3473     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3474     /* float/uint */
3475     { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3476     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3477     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3478     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3479     /* float/uint truncate */
3480     { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3481     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3482     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3483     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3484 
3485     /* floating point class two */
3486     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3487     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3488 };
3489 
3490 #undef FOP_WEW
3491 #undef FOP_DEW
3492 #undef FOP_WED
3493 #undef FOP_WEWW
3494 #undef FOP_DED
3495 #undef FOP_DEDD
3496 
3497 /* Convert the fmpyadd single-precision register encodings to standard.  */
3498 static inline int fmpyadd_s_reg(unsigned r)
3499 {
3500     return (r & 16) * 2 + 16 + (r & 15);
3501 }
3502 
3503 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3504                                    uint32_t insn, bool is_sub)
3505 {
3506     unsigned tm = extract32(insn, 0, 5);
3507     unsigned f = extract32(insn, 5, 1);
3508     unsigned ra = extract32(insn, 6, 5);
3509     unsigned ta = extract32(insn, 11, 5);
3510     unsigned rm2 = extract32(insn, 16, 5);
3511     unsigned rm1 = extract32(insn, 21, 5);
3512 
3513     nullify_over(ctx);
3514 
3515     /* Independent multiply & add/sub, with undefined behaviour
3516        if outputs overlap inputs.  */
3517     if (f == 0) {
3518         tm = fmpyadd_s_reg(tm);
3519         ra = fmpyadd_s_reg(ra);
3520         ta = fmpyadd_s_reg(ta);
3521         rm2 = fmpyadd_s_reg(rm2);
3522         rm1 = fmpyadd_s_reg(rm1);
3523         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3524         do_fop_weww(ctx, ta, ta, ra,
3525                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3526     } else {
3527         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3528         do_fop_dedd(ctx, ta, ta, ra,
3529                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3530     }
3531 
3532     return nullify_end(ctx, DISAS_NEXT);
3533 }
3534 
3535 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3536                                       const DisasInsn *di)
3537 {
3538     unsigned rt = assemble_rt64(insn);
3539     unsigned neg = extract32(insn, 5, 1);
3540     unsigned rm1 = assemble_ra64(insn);
3541     unsigned rm2 = assemble_rb64(insn);
3542     unsigned ra3 = assemble_rc64(insn);
3543     TCGv_i32 a, b, c;
3544 
3545     nullify_over(ctx);
3546     a = load_frw0_i32(rm1);
3547     b = load_frw0_i32(rm2);
3548     c = load_frw0_i32(ra3);
3549 
3550     if (neg) {
3551         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3552     } else {
3553         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3554     }
3555 
3556     tcg_temp_free_i32(b);
3557     tcg_temp_free_i32(c);
3558     save_frw_i32(rt, a);
3559     tcg_temp_free_i32(a);
3560     return nullify_end(ctx, DISAS_NEXT);
3561 }
3562 
3563 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3564                                       const DisasInsn *di)
3565 {
3566     unsigned rt = extract32(insn, 0, 5);
3567     unsigned neg = extract32(insn, 5, 1);
3568     unsigned rm1 = extract32(insn, 21, 5);
3569     unsigned rm2 = extract32(insn, 16, 5);
3570     unsigned ra3 = assemble_rc64(insn);
3571     TCGv_i64 a, b, c;
3572 
3573     nullify_over(ctx);
3574     a = load_frd0(rm1);
3575     b = load_frd0(rm2);
3576     c = load_frd0(ra3);
3577 
3578     if (neg) {
3579         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3580     } else {
3581         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3582     }
3583 
3584     tcg_temp_free_i64(b);
3585     tcg_temp_free_i64(c);
3586     save_frd(rt, a);
3587     tcg_temp_free_i64(a);
3588     return nullify_end(ctx, DISAS_NEXT);
3589 }
3590 
3591 static const DisasInsn table_fp_fused[] = {
3592     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3593     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3594 };
3595 
3596 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3597                                          const DisasInsn table[], size_t n)
3598 {
3599     size_t i;
3600     for (i = 0; i < n; ++i) {
3601         if ((insn & table[i].mask) == table[i].insn) {
3602             return table[i].trans(ctx, insn, &table[i]);
3603         }
3604     }
3605     return gen_illegal(ctx);
3606 }
3607 
3608 #define translate_table(ctx, insn, table) \
3609     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3610 
3611 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3612 {
3613     uint32_t opc = extract32(insn, 26, 6);
3614 
3615     switch (opc) {
3616     case 0x00: /* system op */
3617         return translate_table(ctx, insn, table_system);
3618     case 0x01:
3619         return translate_table(ctx, insn, table_mem_mgmt);
3620     case 0x02:
3621         return translate_table(ctx, insn, table_arith_log);
3622     case 0x03:
3623         return translate_table(ctx, insn, table_index_mem);
3624     case 0x06:
3625         return trans_fmpyadd(ctx, insn, false);
3626     case 0x08:
3627         return trans_ldil(ctx, insn);
3628     case 0x09:
3629         return trans_copr_w(ctx, insn);
3630     case 0x0A:
3631         return trans_addil(ctx, insn);
3632     case 0x0B:
3633         return trans_copr_dw(ctx, insn);
3634     case 0x0C:
3635         return translate_table(ctx, insn, table_float_0c);
3636     case 0x0D:
3637         return trans_ldo(ctx, insn);
3638     case 0x0E:
3639         return translate_table(ctx, insn, table_float_0e);
3640 
3641     case 0x10:
3642         return trans_load(ctx, insn, false, MO_UB);
3643     case 0x11:
3644         return trans_load(ctx, insn, false, MO_TEUW);
3645     case 0x12:
3646         return trans_load(ctx, insn, false, MO_TEUL);
3647     case 0x13:
3648         return trans_load(ctx, insn, true, MO_TEUL);
3649     case 0x16:
3650         return trans_fload_mod(ctx, insn);
3651     case 0x17:
3652         return trans_load_w(ctx, insn);
3653     case 0x18:
3654         return trans_store(ctx, insn, false, MO_UB);
3655     case 0x19:
3656         return trans_store(ctx, insn, false, MO_TEUW);
3657     case 0x1A:
3658         return trans_store(ctx, insn, false, MO_TEUL);
3659     case 0x1B:
3660         return trans_store(ctx, insn, true, MO_TEUL);
3661     case 0x1E:
3662         return trans_fstore_mod(ctx, insn);
3663     case 0x1F:
3664         return trans_store_w(ctx, insn);
3665 
3666     case 0x20:
3667         return trans_cmpb(ctx, insn, true, false, false);
3668     case 0x21:
3669         return trans_cmpb(ctx, insn, true, true, false);
3670     case 0x22:
3671         return trans_cmpb(ctx, insn, false, false, false);
3672     case 0x23:
3673         return trans_cmpb(ctx, insn, false, true, false);
3674     case 0x24:
3675         return trans_cmpiclr(ctx, insn);
3676     case 0x25:
3677         return trans_subi(ctx, insn);
3678     case 0x26:
3679         return trans_fmpyadd(ctx, insn, true);
3680     case 0x27:
3681         return trans_cmpb(ctx, insn, true, false, true);
3682     case 0x28:
3683         return trans_addb(ctx, insn, true, false);
3684     case 0x29:
3685         return trans_addb(ctx, insn, true, true);
3686     case 0x2A:
3687         return trans_addb(ctx, insn, false, false);
3688     case 0x2B:
3689         return trans_addb(ctx, insn, false, true);
3690     case 0x2C:
3691     case 0x2D:
3692         return trans_addi(ctx, insn);
3693     case 0x2E:
3694         return translate_table(ctx, insn, table_fp_fused);
3695     case 0x2F:
3696         return trans_cmpb(ctx, insn, false, false, true);
3697 
3698     case 0x30:
3699     case 0x31:
3700         return trans_bb(ctx, insn);
3701     case 0x32:
3702         return trans_movb(ctx, insn, false);
3703     case 0x33:
3704         return trans_movb(ctx, insn, true);
3705     case 0x34:
3706         return translate_table(ctx, insn, table_sh_ex);
3707     case 0x35:
3708         return translate_table(ctx, insn, table_depw);
3709     case 0x38:
3710         return trans_be(ctx, insn, false);
3711     case 0x39:
3712         return trans_be(ctx, insn, true);
3713     case 0x3A:
3714         return translate_table(ctx, insn, table_branch);
3715 
3716     case 0x04: /* spopn */
3717     case 0x05: /* diag */
3718     case 0x0F: /* product specific */
3719         break;
3720 
3721     case 0x07: /* unassigned */
3722     case 0x15: /* unassigned */
3723     case 0x1D: /* unassigned */
3724     case 0x37: /* unassigned */
3725     case 0x3F: /* unassigned */
3726     default:
3727         break;
3728     }
3729     return gen_illegal(ctx);
3730 }
3731 
3732 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3733                                       CPUState *cs, int max_insns)
3734 {
3735     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3736     TranslationBlock *tb = ctx->base.tb;
3737     int i, bound;
3738 
3739     ctx->cs = cs;
3740     ctx->iaoq_f = tb->pc;
3741     ctx->iaoq_b = tb->cs_base;
3742     ctx->iaoq_n = -1;
3743     TCGV_UNUSED(ctx->iaoq_n_var);
3744 
3745     ctx->ntemps = 0;
3746     for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3747         TCGV_UNUSED(ctx->temps[i]);
3748     }
3749 
3750     bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3751     return MIN(max_insns, bound);
3752 }
3753 
3754 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3755 {
3756     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3757 
3758     /* Seed the nullification status from PSW[N], as shown in TB->FLAGS.  */
3759     ctx->null_cond = cond_make_f();
3760     ctx->psw_n_nonzero = false;
3761     if (ctx->base.tb->flags & 1) {
3762         ctx->null_cond.c = TCG_COND_ALWAYS;
3763         ctx->psw_n_nonzero = true;
3764     }
3765     ctx->null_lab = NULL;
3766 }
3767 
3768 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3769 {
3770     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3771 
3772     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3773 }
3774 
3775 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3776                                       const CPUBreakpoint *bp)
3777 {
3778     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3779 
3780     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3781     ctx->base.pc_next = ctx->iaoq_f + 4;
3782     return true;
3783 }
3784 
3785 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3786 {
3787     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3788     CPUHPPAState *env = cs->env_ptr;
3789     DisasJumpType ret;
3790     int i, n;
3791 
3792     /* Execute one insn.  */
3793     if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3794         ret = do_page_zero(ctx);
3795         assert(ret != DISAS_NEXT);
3796     } else {
3797         /* Always fetch the insn, even if nullified, so that we check
3798            the page permissions for execute.  */
3799         uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3800 
3801         /* Set up the IA queue for the next insn.
3802            This will be overwritten by a branch.  */
3803         if (ctx->iaoq_b == -1) {
3804             ctx->iaoq_n = -1;
3805             ctx->iaoq_n_var = get_temp(ctx);
3806             tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3807         } else {
3808             ctx->iaoq_n = ctx->iaoq_b + 4;
3809             TCGV_UNUSED(ctx->iaoq_n_var);
3810         }
3811 
3812         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3813             ctx->null_cond.c = TCG_COND_NEVER;
3814             ret = DISAS_NEXT;
3815         } else {
3816             ret = translate_one(ctx, insn);
3817             assert(ctx->null_lab == NULL);
3818         }
3819     }
3820 
3821     /* Free any temporaries allocated.  */
3822     for (i = 0, n = ctx->ntemps; i < n; ++i) {
3823         tcg_temp_free(ctx->temps[i]);
3824         TCGV_UNUSED(ctx->temps[i]);
3825     }
3826     ctx->ntemps = 0;
3827 
3828     /* Advance the insn queue.  */
3829     /* ??? The non-linear instruction restriction is purely due to
3830        the debugging dump.  Otherwise we *could* follow unconditional
3831        branches within the same page.  */
3832     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3833         if (ctx->null_cond.c == TCG_COND_NEVER
3834             || ctx->null_cond.c == TCG_COND_ALWAYS) {
3835             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3836             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3837             ret = DISAS_NORETURN;
3838         } else {
3839             ret = DISAS_IAQ_N_STALE;
3840        }
3841     }
3842     ctx->iaoq_f = ctx->iaoq_b;
3843     ctx->iaoq_b = ctx->iaoq_n;
3844     ctx->base.is_jmp = ret;
3845 
3846     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3847         return;
3848     }
3849     if (ctx->iaoq_f == -1) {
3850         tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3851         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3852         nullify_save(ctx);
3853         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3854     } else if (ctx->iaoq_b == -1) {
3855         tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3856     }
3857 }
3858 
3859 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3860 {
3861     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3862 
3863     switch (ctx->base.is_jmp) {
3864     case DISAS_NORETURN:
3865         break;
3866     case DISAS_TOO_MANY:
3867     case DISAS_IAQ_N_STALE:
3868         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3869         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3870         nullify_save(ctx);
3871         /* FALLTHRU */
3872     case DISAS_IAQ_N_UPDATED:
3873         if (ctx->base.singlestep_enabled) {
3874             gen_excp_1(EXCP_DEBUG);
3875         } else {
3876             tcg_gen_lookup_and_goto_ptr();
3877         }
3878         break;
3879     default:
3880         g_assert_not_reached();
3881     }
3882 
3883     /* We don't actually use this during normal translation,
3884        but we should interact with the generic main loop.  */
3885     ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3886 }
3887 
3888 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3889 {
3890     TranslationBlock *tb = dcbase->tb;
3891 
3892     switch (tb->pc) {
3893     case 0x00:
3894         qemu_log("IN:\n0x00000000:  (null)\n");
3895         break;
3896     case 0xb0:
3897         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
3898         break;
3899     case 0xe0:
3900         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
3901         break;
3902     case 0x100:
3903         qemu_log("IN:\n0x00000100:  syscall\n");
3904         break;
3905     default:
3906         qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3907         log_target_disas(cs, tb->pc, tb->size, 1);
3908         break;
3909     }
3910 }
3911 
3912 static const TranslatorOps hppa_tr_ops = {
3913     .init_disas_context = hppa_tr_init_disas_context,
3914     .tb_start           = hppa_tr_tb_start,
3915     .insn_start         = hppa_tr_insn_start,
3916     .breakpoint_check   = hppa_tr_breakpoint_check,
3917     .translate_insn     = hppa_tr_translate_insn,
3918     .tb_stop            = hppa_tr_tb_stop,
3919     .disas_log          = hppa_tr_disas_log,
3920 };
3921 
3922 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3923 
3924 {
3925     DisasContext ctx;
3926     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3927 }
3928 
3929 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3930                           target_ulong *data)
3931 {
3932     env->iaoq_f = data[0];
3933     if (data[1] != -1) {
3934         env->iaoq_b = data[1];
3935     }
3936     /* Since we were executing the instruction at IAOQ_F, and took some
3937        sort of action that provoked the cpu_restore_state, we can infer
3938        that the instruction was not nullified.  */
3939     env->psw_n = 0;
3940 }
3941