xref: /openbmc/qemu/target/hppa/translate.c (revision 1c2adb95)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 typedef struct DisasCond {
34     TCGCond c;
35     TCGv a0, a1;
36     bool a0_is_n;
37     bool a1_is_0;
38 } DisasCond;
39 
40 typedef struct DisasContext {
41     DisasContextBase base;
42     CPUState *cs;
43 
44     target_ulong iaoq_f;
45     target_ulong iaoq_b;
46     target_ulong iaoq_n;
47     TCGv iaoq_n_var;
48 
49     int ntemps;
50     TCGv temps[8];
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     bool psw_n_nonzero;
56 } DisasContext;
57 
58 /* Target-specific return values from translate_one, indicating the
59    state of the TB.  Note that DISAS_NEXT indicates that we are not
60    exiting the TB.  */
61 
62 /* We are not using a goto_tb (for whatever reason), but have updated
63    the iaq (for whatever reason), so don't do it again on exit.  */
64 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
65 
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67    updated the iaq for the next instruction to be executed.  */
68 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
69 
70 typedef struct DisasInsn {
71     uint32_t insn, mask;
72     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73                            const struct DisasInsn *f);
74     union {
75         void (*ttt)(TCGv, TCGv, TCGv);
76         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
82     } f;
83 } DisasInsn;
84 
85 /* global register indexes */
86 static TCGv cpu_gr[32];
87 static TCGv cpu_iaoq_f;
88 static TCGv cpu_iaoq_b;
89 static TCGv cpu_sar;
90 static TCGv cpu_psw_n;
91 static TCGv cpu_psw_v;
92 static TCGv cpu_psw_cb;
93 static TCGv cpu_psw_cb_msb;
94 static TCGv cpu_cr26;
95 static TCGv cpu_cr27;
96 
97 #include "exec/gen-icount.h"
98 
99 void hppa_translate_init(void)
100 {
101 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
102 
103     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
104     static const GlobalVar vars[] = {
105         DEF_VAR(sar),
106         DEF_VAR(cr26),
107         DEF_VAR(cr27),
108         DEF_VAR(psw_n),
109         DEF_VAR(psw_v),
110         DEF_VAR(psw_cb),
111         DEF_VAR(psw_cb_msb),
112         DEF_VAR(iaoq_f),
113         DEF_VAR(iaoq_b),
114     };
115 
116 #undef DEF_VAR
117 
118     /* Use the symbolic register names that match the disassembler.  */
119     static const char gr_names[32][4] = {
120         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
121         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
122         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
123         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
124     };
125 
126     int i;
127 
128     TCGV_UNUSED(cpu_gr[0]);
129     for (i = 1; i < 32; i++) {
130         cpu_gr[i] = tcg_global_mem_new(cpu_env,
131                                        offsetof(CPUHPPAState, gr[i]),
132                                        gr_names[i]);
133     }
134 
135     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
136         const GlobalVar *v = &vars[i];
137         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
138     }
139 }
140 
141 static DisasCond cond_make_f(void)
142 {
143     DisasCond r = { .c = TCG_COND_NEVER };
144     TCGV_UNUSED(r.a0);
145     TCGV_UNUSED(r.a1);
146     return r;
147 }
148 
149 static DisasCond cond_make_n(void)
150 {
151     DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
152     r.a0 = cpu_psw_n;
153     TCGV_UNUSED(r.a1);
154     return r;
155 }
156 
157 static DisasCond cond_make_0(TCGCond c, TCGv a0)
158 {
159     DisasCond r = { .c = c, .a1_is_0 = true };
160 
161     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
162     r.a0 = tcg_temp_new();
163     tcg_gen_mov_tl(r.a0, a0);
164     TCGV_UNUSED(r.a1);
165 
166     return r;
167 }
168 
169 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
170 {
171     DisasCond r = { .c = c };
172 
173     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
174     r.a0 = tcg_temp_new();
175     tcg_gen_mov_tl(r.a0, a0);
176     r.a1 = tcg_temp_new();
177     tcg_gen_mov_tl(r.a1, a1);
178 
179     return r;
180 }
181 
182 static void cond_prep(DisasCond *cond)
183 {
184     if (cond->a1_is_0) {
185         cond->a1_is_0 = false;
186         cond->a1 = tcg_const_tl(0);
187     }
188 }
189 
190 static void cond_free(DisasCond *cond)
191 {
192     switch (cond->c) {
193     default:
194         if (!cond->a0_is_n) {
195             tcg_temp_free(cond->a0);
196         }
197         if (!cond->a1_is_0) {
198             tcg_temp_free(cond->a1);
199         }
200         cond->a0_is_n = false;
201         cond->a1_is_0 = false;
202         TCGV_UNUSED(cond->a0);
203         TCGV_UNUSED(cond->a1);
204         /* fallthru */
205     case TCG_COND_ALWAYS:
206         cond->c = TCG_COND_NEVER;
207         break;
208     case TCG_COND_NEVER:
209         break;
210     }
211 }
212 
213 static TCGv get_temp(DisasContext *ctx)
214 {
215     unsigned i = ctx->ntemps++;
216     g_assert(i < ARRAY_SIZE(ctx->temps));
217     return ctx->temps[i] = tcg_temp_new();
218 }
219 
220 static TCGv load_const(DisasContext *ctx, target_long v)
221 {
222     TCGv t = get_temp(ctx);
223     tcg_gen_movi_tl(t, v);
224     return t;
225 }
226 
227 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
228 {
229     if (reg == 0) {
230         TCGv t = get_temp(ctx);
231         tcg_gen_movi_tl(t, 0);
232         return t;
233     } else {
234         return cpu_gr[reg];
235     }
236 }
237 
238 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
239 {
240     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
241         return get_temp(ctx);
242     } else {
243         return cpu_gr[reg];
244     }
245 }
246 
247 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
248 {
249     if (ctx->null_cond.c != TCG_COND_NEVER) {
250         cond_prep(&ctx->null_cond);
251         tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
252                            ctx->null_cond.a1, dest, t);
253     } else {
254         tcg_gen_mov_tl(dest, t);
255     }
256 }
257 
258 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
259 {
260     if (reg != 0) {
261         save_or_nullify(ctx, cpu_gr[reg], t);
262     }
263 }
264 
265 #ifdef HOST_WORDS_BIGENDIAN
266 # define HI_OFS  0
267 # define LO_OFS  4
268 #else
269 # define HI_OFS  4
270 # define LO_OFS  0
271 #endif
272 
273 static TCGv_i32 load_frw_i32(unsigned rt)
274 {
275     TCGv_i32 ret = tcg_temp_new_i32();
276     tcg_gen_ld_i32(ret, cpu_env,
277                    offsetof(CPUHPPAState, fr[rt & 31])
278                    + (rt & 32 ? LO_OFS : HI_OFS));
279     return ret;
280 }
281 
282 static TCGv_i32 load_frw0_i32(unsigned rt)
283 {
284     if (rt == 0) {
285         return tcg_const_i32(0);
286     } else {
287         return load_frw_i32(rt);
288     }
289 }
290 
291 static TCGv_i64 load_frw0_i64(unsigned rt)
292 {
293     if (rt == 0) {
294         return tcg_const_i64(0);
295     } else {
296         TCGv_i64 ret = tcg_temp_new_i64();
297         tcg_gen_ld32u_i64(ret, cpu_env,
298                           offsetof(CPUHPPAState, fr[rt & 31])
299                           + (rt & 32 ? LO_OFS : HI_OFS));
300         return ret;
301     }
302 }
303 
304 static void save_frw_i32(unsigned rt, TCGv_i32 val)
305 {
306     tcg_gen_st_i32(val, cpu_env,
307                    offsetof(CPUHPPAState, fr[rt & 31])
308                    + (rt & 32 ? LO_OFS : HI_OFS));
309 }
310 
311 #undef HI_OFS
312 #undef LO_OFS
313 
314 static TCGv_i64 load_frd(unsigned rt)
315 {
316     TCGv_i64 ret = tcg_temp_new_i64();
317     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
318     return ret;
319 }
320 
321 static TCGv_i64 load_frd0(unsigned rt)
322 {
323     if (rt == 0) {
324         return tcg_const_i64(0);
325     } else {
326         return load_frd(rt);
327     }
328 }
329 
330 static void save_frd(unsigned rt, TCGv_i64 val)
331 {
332     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
333 }
334 
335 /* Skip over the implementation of an insn that has been nullified.
336    Use this when the insn is too complex for a conditional move.  */
337 static void nullify_over(DisasContext *ctx)
338 {
339     if (ctx->null_cond.c != TCG_COND_NEVER) {
340         /* The always condition should have been handled in the main loop.  */
341         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
342 
343         ctx->null_lab = gen_new_label();
344         cond_prep(&ctx->null_cond);
345 
346         /* If we're using PSW[N], copy it to a temp because... */
347         if (ctx->null_cond.a0_is_n) {
348             ctx->null_cond.a0_is_n = false;
349             ctx->null_cond.a0 = tcg_temp_new();
350             tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
351         }
352         /* ... we clear it before branching over the implementation,
353            so that (1) it's clear after nullifying this insn and
354            (2) if this insn nullifies the next, PSW[N] is valid.  */
355         if (ctx->psw_n_nonzero) {
356             ctx->psw_n_nonzero = false;
357             tcg_gen_movi_tl(cpu_psw_n, 0);
358         }
359 
360         tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
361                           ctx->null_cond.a1, ctx->null_lab);
362         cond_free(&ctx->null_cond);
363     }
364 }
365 
366 /* Save the current nullification state to PSW[N].  */
367 static void nullify_save(DisasContext *ctx)
368 {
369     if (ctx->null_cond.c == TCG_COND_NEVER) {
370         if (ctx->psw_n_nonzero) {
371             tcg_gen_movi_tl(cpu_psw_n, 0);
372         }
373         return;
374     }
375     if (!ctx->null_cond.a0_is_n) {
376         cond_prep(&ctx->null_cond);
377         tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
378                            ctx->null_cond.a0, ctx->null_cond.a1);
379         ctx->psw_n_nonzero = true;
380     }
381     cond_free(&ctx->null_cond);
382 }
383 
384 /* Set a PSW[N] to X.  The intention is that this is used immediately
385    before a goto_tb/exit_tb, so that there is no fallthru path to other
386    code within the TB.  Therefore we do not update psw_n_nonzero.  */
387 static void nullify_set(DisasContext *ctx, bool x)
388 {
389     if (ctx->psw_n_nonzero || x) {
390         tcg_gen_movi_tl(cpu_psw_n, x);
391     }
392 }
393 
394 /* Mark the end of an instruction that may have been nullified.
395    This is the pair to nullify_over.  */
396 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
397 {
398     TCGLabel *null_lab = ctx->null_lab;
399 
400     if (likely(null_lab == NULL)) {
401         /* The current insn wasn't conditional or handled the condition
402            applied to it without a branch, so the (new) setting of
403            NULL_COND can be applied directly to the next insn.  */
404         return status;
405     }
406     ctx->null_lab = NULL;
407 
408     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
409         /* The next instruction will be unconditional,
410            and NULL_COND already reflects that.  */
411         gen_set_label(null_lab);
412     } else {
413         /* The insn that we just executed is itself nullifying the next
414            instruction.  Store the condition in the PSW[N] global.
415            We asserted PSW[N] = 0 in nullify_over, so that after the
416            label we have the proper value in place.  */
417         nullify_save(ctx);
418         gen_set_label(null_lab);
419         ctx->null_cond = cond_make_n();
420     }
421 
422     assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
423     if (status == DISAS_NORETURN) {
424         status = DISAS_NEXT;
425     }
426     return status;
427 }
428 
429 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
430 {
431     if (unlikely(ival == -1)) {
432         tcg_gen_mov_tl(dest, vval);
433     } else {
434         tcg_gen_movi_tl(dest, ival);
435     }
436 }
437 
438 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
439 {
440     return ctx->iaoq_f + disp + 8;
441 }
442 
443 static void gen_excp_1(int exception)
444 {
445     TCGv_i32 t = tcg_const_i32(exception);
446     gen_helper_excp(cpu_env, t);
447     tcg_temp_free_i32(t);
448 }
449 
450 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
451 {
452     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
453     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
454     nullify_save(ctx);
455     gen_excp_1(exception);
456     return DISAS_NORETURN;
457 }
458 
459 static DisasJumpType gen_illegal(DisasContext *ctx)
460 {
461     nullify_over(ctx);
462     return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
463 }
464 
465 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
466 {
467     /* Suppress goto_tb in the case of single-steping and IO.  */
468     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
469         return false;
470     }
471     return true;
472 }
473 
474 /* If the next insn is to be nullified, and it's on the same page,
475    and we're not attempting to set a breakpoint on it, then we can
476    totally skip the nullified insn.  This avoids creating and
477    executing a TB that merely branches to the next TB.  */
478 static bool use_nullify_skip(DisasContext *ctx)
479 {
480     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
481             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
482 }
483 
484 static void gen_goto_tb(DisasContext *ctx, int which,
485                         target_ulong f, target_ulong b)
486 {
487     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
488         tcg_gen_goto_tb(which);
489         tcg_gen_movi_tl(cpu_iaoq_f, f);
490         tcg_gen_movi_tl(cpu_iaoq_b, b);
491         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
492     } else {
493         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
494         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
495         if (ctx->base.singlestep_enabled) {
496             gen_excp_1(EXCP_DEBUG);
497         } else {
498             tcg_gen_lookup_and_goto_ptr();
499         }
500     }
501 }
502 
503 /* PA has a habit of taking the LSB of a field and using that as the sign,
504    with the rest of the field becoming the least significant bits.  */
505 static target_long low_sextract(uint32_t val, int pos, int len)
506 {
507     target_ulong x = -(target_ulong)extract32(val, pos, 1);
508     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
509     return x;
510 }
511 
512 static unsigned assemble_rt64(uint32_t insn)
513 {
514     unsigned r1 = extract32(insn, 6, 1);
515     unsigned r0 = extract32(insn, 0, 5);
516     return r1 * 32 + r0;
517 }
518 
519 static unsigned assemble_ra64(uint32_t insn)
520 {
521     unsigned r1 = extract32(insn, 7, 1);
522     unsigned r0 = extract32(insn, 21, 5);
523     return r1 * 32 + r0;
524 }
525 
526 static unsigned assemble_rb64(uint32_t insn)
527 {
528     unsigned r1 = extract32(insn, 12, 1);
529     unsigned r0 = extract32(insn, 16, 5);
530     return r1 * 32 + r0;
531 }
532 
533 static unsigned assemble_rc64(uint32_t insn)
534 {
535     unsigned r2 = extract32(insn, 8, 1);
536     unsigned r1 = extract32(insn, 13, 3);
537     unsigned r0 = extract32(insn, 9, 2);
538     return r2 * 32 + r1 * 4 + r0;
539 }
540 
541 static target_long assemble_12(uint32_t insn)
542 {
543     target_ulong x = -(target_ulong)(insn & 1);
544     x = (x <<  1) | extract32(insn, 2, 1);
545     x = (x << 10) | extract32(insn, 3, 10);
546     return x;
547 }
548 
549 static target_long assemble_16(uint32_t insn)
550 {
551     /* Take the name from PA2.0, which produces a 16-bit number
552        only with wide mode; otherwise a 14-bit number.  Since we don't
553        implement wide mode, this is always the 14-bit number.  */
554     return low_sextract(insn, 0, 14);
555 }
556 
557 static target_long assemble_16a(uint32_t insn)
558 {
559     /* Take the name from PA2.0, which produces a 14-bit shifted number
560        only with wide mode; otherwise a 12-bit shifted number.  Since we
561        don't implement wide mode, this is always the 12-bit number.  */
562     target_ulong x = -(target_ulong)(insn & 1);
563     x = (x << 11) | extract32(insn, 2, 11);
564     return x << 2;
565 }
566 
567 static target_long assemble_17(uint32_t insn)
568 {
569     target_ulong x = -(target_ulong)(insn & 1);
570     x = (x <<  5) | extract32(insn, 16, 5);
571     x = (x <<  1) | extract32(insn, 2, 1);
572     x = (x << 10) | extract32(insn, 3, 10);
573     return x << 2;
574 }
575 
576 static target_long assemble_21(uint32_t insn)
577 {
578     target_ulong x = -(target_ulong)(insn & 1);
579     x = (x << 11) | extract32(insn, 1, 11);
580     x = (x <<  2) | extract32(insn, 14, 2);
581     x = (x <<  5) | extract32(insn, 16, 5);
582     x = (x <<  2) | extract32(insn, 12, 2);
583     return x << 11;
584 }
585 
586 static target_long assemble_22(uint32_t insn)
587 {
588     target_ulong x = -(target_ulong)(insn & 1);
589     x = (x << 10) | extract32(insn, 16, 10);
590     x = (x <<  1) | extract32(insn, 2, 1);
591     x = (x << 10) | extract32(insn, 3, 10);
592     return x << 2;
593 }
594 
595 /* The parisc documentation describes only the general interpretation of
596    the conditions, without describing their exact implementation.  The
597    interpretations do not stand up well when considering ADD,C and SUB,B.
598    However, considering the Addition, Subtraction and Logical conditions
599    as a whole it would appear that these relations are similar to what
600    a traditional NZCV set of flags would produce.  */
601 
602 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
603 {
604     DisasCond cond;
605     TCGv tmp;
606 
607     switch (cf >> 1) {
608     case 0: /* Never / TR */
609         cond = cond_make_f();
610         break;
611     case 1: /* = / <>        (Z / !Z) */
612         cond = cond_make_0(TCG_COND_EQ, res);
613         break;
614     case 2: /* < / >=        (N / !N) */
615         cond = cond_make_0(TCG_COND_LT, res);
616         break;
617     case 3: /* <= / >        (N | Z / !N & !Z) */
618         cond = cond_make_0(TCG_COND_LE, res);
619         break;
620     case 4: /* NUV / UV      (!C / C) */
621         cond = cond_make_0(TCG_COND_EQ, cb_msb);
622         break;
623     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
624         tmp = tcg_temp_new();
625         tcg_gen_neg_tl(tmp, cb_msb);
626         tcg_gen_and_tl(tmp, tmp, res);
627         cond = cond_make_0(TCG_COND_EQ, tmp);
628         tcg_temp_free(tmp);
629         break;
630     case 6: /* SV / NSV      (V / !V) */
631         cond = cond_make_0(TCG_COND_LT, sv);
632         break;
633     case 7: /* OD / EV */
634         tmp = tcg_temp_new();
635         tcg_gen_andi_tl(tmp, res, 1);
636         cond = cond_make_0(TCG_COND_NE, tmp);
637         tcg_temp_free(tmp);
638         break;
639     default:
640         g_assert_not_reached();
641     }
642     if (cf & 1) {
643         cond.c = tcg_invert_cond(cond.c);
644     }
645 
646     return cond;
647 }
648 
649 /* Similar, but for the special case of subtraction without borrow, we
650    can use the inputs directly.  This can allow other computation to be
651    deleted as unused.  */
652 
653 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
654 {
655     DisasCond cond;
656 
657     switch (cf >> 1) {
658     case 1: /* = / <> */
659         cond = cond_make(TCG_COND_EQ, in1, in2);
660         break;
661     case 2: /* < / >= */
662         cond = cond_make(TCG_COND_LT, in1, in2);
663         break;
664     case 3: /* <= / > */
665         cond = cond_make(TCG_COND_LE, in1, in2);
666         break;
667     case 4: /* << / >>= */
668         cond = cond_make(TCG_COND_LTU, in1, in2);
669         break;
670     case 5: /* <<= / >> */
671         cond = cond_make(TCG_COND_LEU, in1, in2);
672         break;
673     default:
674         return do_cond(cf, res, sv, sv);
675     }
676     if (cf & 1) {
677         cond.c = tcg_invert_cond(cond.c);
678     }
679 
680     return cond;
681 }
682 
683 /* Similar, but for logicals, where the carry and overflow bits are not
684    computed, and use of them is undefined.  */
685 
686 static DisasCond do_log_cond(unsigned cf, TCGv res)
687 {
688     switch (cf >> 1) {
689     case 4: case 5: case 6:
690         cf &= 1;
691         break;
692     }
693     return do_cond(cf, res, res, res);
694 }
695 
696 /* Similar, but for shift/extract/deposit conditions.  */
697 
698 static DisasCond do_sed_cond(unsigned orig, TCGv res)
699 {
700     unsigned c, f;
701 
702     /* Convert the compressed condition codes to standard.
703        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
704        4-7 are the reverse of 0-3.  */
705     c = orig & 3;
706     if (c == 3) {
707         c = 7;
708     }
709     f = (orig & 4) / 4;
710 
711     return do_log_cond(c * 2 + f, res);
712 }
713 
714 /* Similar, but for unit conditions.  */
715 
716 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
717 {
718     DisasCond cond;
719     TCGv tmp, cb;
720 
721     TCGV_UNUSED(cb);
722     if (cf & 8) {
723         /* Since we want to test lots of carry-out bits all at once, do not
724          * do our normal thing and compute carry-in of bit B+1 since that
725          * leaves us with carry bits spread across two words.
726          */
727         cb = tcg_temp_new();
728         tmp = tcg_temp_new();
729         tcg_gen_or_tl(cb, in1, in2);
730         tcg_gen_and_tl(tmp, in1, in2);
731         tcg_gen_andc_tl(cb, cb, res);
732         tcg_gen_or_tl(cb, cb, tmp);
733         tcg_temp_free(tmp);
734     }
735 
736     switch (cf >> 1) {
737     case 0: /* never / TR */
738     case 1: /* undefined */
739     case 5: /* undefined */
740         cond = cond_make_f();
741         break;
742 
743     case 2: /* SBZ / NBZ */
744         /* See hasless(v,1) from
745          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
746          */
747         tmp = tcg_temp_new();
748         tcg_gen_subi_tl(tmp, res, 0x01010101u);
749         tcg_gen_andc_tl(tmp, tmp, res);
750         tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
751         cond = cond_make_0(TCG_COND_NE, tmp);
752         tcg_temp_free(tmp);
753         break;
754 
755     case 3: /* SHZ / NHZ */
756         tmp = tcg_temp_new();
757         tcg_gen_subi_tl(tmp, res, 0x00010001u);
758         tcg_gen_andc_tl(tmp, tmp, res);
759         tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
760         cond = cond_make_0(TCG_COND_NE, tmp);
761         tcg_temp_free(tmp);
762         break;
763 
764     case 4: /* SDC / NDC */
765         tcg_gen_andi_tl(cb, cb, 0x88888888u);
766         cond = cond_make_0(TCG_COND_NE, cb);
767         break;
768 
769     case 6: /* SBC / NBC */
770         tcg_gen_andi_tl(cb, cb, 0x80808080u);
771         cond = cond_make_0(TCG_COND_NE, cb);
772         break;
773 
774     case 7: /* SHC / NHC */
775         tcg_gen_andi_tl(cb, cb, 0x80008000u);
776         cond = cond_make_0(TCG_COND_NE, cb);
777         break;
778 
779     default:
780         g_assert_not_reached();
781     }
782     if (cf & 8) {
783         tcg_temp_free(cb);
784     }
785     if (cf & 1) {
786         cond.c = tcg_invert_cond(cond.c);
787     }
788 
789     return cond;
790 }
791 
792 /* Compute signed overflow for addition.  */
793 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
794 {
795     TCGv sv = get_temp(ctx);
796     TCGv tmp = tcg_temp_new();
797 
798     tcg_gen_xor_tl(sv, res, in1);
799     tcg_gen_xor_tl(tmp, in1, in2);
800     tcg_gen_andc_tl(sv, sv, tmp);
801     tcg_temp_free(tmp);
802 
803     return sv;
804 }
805 
806 /* Compute signed overflow for subtraction.  */
807 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
808 {
809     TCGv sv = get_temp(ctx);
810     TCGv tmp = tcg_temp_new();
811 
812     tcg_gen_xor_tl(sv, res, in1);
813     tcg_gen_xor_tl(tmp, in1, in2);
814     tcg_gen_and_tl(sv, sv, tmp);
815     tcg_temp_free(tmp);
816 
817     return sv;
818 }
819 
820 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
821                             unsigned shift, bool is_l, bool is_tsv, bool is_tc,
822                             bool is_c, unsigned cf)
823 {
824     TCGv dest, cb, cb_msb, sv, tmp;
825     unsigned c = cf >> 1;
826     DisasCond cond;
827 
828     dest = tcg_temp_new();
829     TCGV_UNUSED(cb);
830     TCGV_UNUSED(cb_msb);
831 
832     if (shift) {
833         tmp = get_temp(ctx);
834         tcg_gen_shli_tl(tmp, in1, shift);
835         in1 = tmp;
836     }
837 
838     if (!is_l || c == 4 || c == 5) {
839         TCGv zero = tcg_const_tl(0);
840         cb_msb = get_temp(ctx);
841         tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
842         if (is_c) {
843             tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
844         }
845         tcg_temp_free(zero);
846         if (!is_l) {
847             cb = get_temp(ctx);
848             tcg_gen_xor_tl(cb, in1, in2);
849             tcg_gen_xor_tl(cb, cb, dest);
850         }
851     } else {
852         tcg_gen_add_tl(dest, in1, in2);
853         if (is_c) {
854             tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
855         }
856     }
857 
858     /* Compute signed overflow if required.  */
859     TCGV_UNUSED(sv);
860     if (is_tsv || c == 6) {
861         sv = do_add_sv(ctx, dest, in1, in2);
862         if (is_tsv) {
863             /* ??? Need to include overflow from shift.  */
864             gen_helper_tsv(cpu_env, sv);
865         }
866     }
867 
868     /* Emit any conditional trap before any writeback.  */
869     cond = do_cond(cf, dest, cb_msb, sv);
870     if (is_tc) {
871         cond_prep(&cond);
872         tmp = tcg_temp_new();
873         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
874         gen_helper_tcond(cpu_env, tmp);
875         tcg_temp_free(tmp);
876     }
877 
878     /* Write back the result.  */
879     if (!is_l) {
880         save_or_nullify(ctx, cpu_psw_cb, cb);
881         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
882     }
883     save_gpr(ctx, rt, dest);
884     tcg_temp_free(dest);
885 
886     /* Install the new nullification.  */
887     cond_free(&ctx->null_cond);
888     ctx->null_cond = cond;
889     return DISAS_NEXT;
890 }
891 
892 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
893                             bool is_tsv, bool is_b, bool is_tc, unsigned cf)
894 {
895     TCGv dest, sv, cb, cb_msb, zero, tmp;
896     unsigned c = cf >> 1;
897     DisasCond cond;
898 
899     dest = tcg_temp_new();
900     cb = tcg_temp_new();
901     cb_msb = tcg_temp_new();
902 
903     zero = tcg_const_tl(0);
904     if (is_b) {
905         /* DEST,C = IN1 + ~IN2 + C.  */
906         tcg_gen_not_tl(cb, in2);
907         tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
908         tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
909         tcg_gen_xor_tl(cb, cb, in1);
910         tcg_gen_xor_tl(cb, cb, dest);
911     } else {
912         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
913            operations by seeding the high word with 1 and subtracting.  */
914         tcg_gen_movi_tl(cb_msb, 1);
915         tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
916         tcg_gen_eqv_tl(cb, in1, in2);
917         tcg_gen_xor_tl(cb, cb, dest);
918     }
919     tcg_temp_free(zero);
920 
921     /* Compute signed overflow if required.  */
922     TCGV_UNUSED(sv);
923     if (is_tsv || c == 6) {
924         sv = do_sub_sv(ctx, dest, in1, in2);
925         if (is_tsv) {
926             gen_helper_tsv(cpu_env, sv);
927         }
928     }
929 
930     /* Compute the condition.  We cannot use the special case for borrow.  */
931     if (!is_b) {
932         cond = do_sub_cond(cf, dest, in1, in2, sv);
933     } else {
934         cond = do_cond(cf, dest, cb_msb, sv);
935     }
936 
937     /* Emit any conditional trap before any writeback.  */
938     if (is_tc) {
939         cond_prep(&cond);
940         tmp = tcg_temp_new();
941         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
942         gen_helper_tcond(cpu_env, tmp);
943         tcg_temp_free(tmp);
944     }
945 
946     /* Write back the result.  */
947     save_or_nullify(ctx, cpu_psw_cb, cb);
948     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
949     save_gpr(ctx, rt, dest);
950     tcg_temp_free(dest);
951 
952     /* Install the new nullification.  */
953     cond_free(&ctx->null_cond);
954     ctx->null_cond = cond;
955     return DISAS_NEXT;
956 }
957 
958 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
959                                TCGv in2, unsigned cf)
960 {
961     TCGv dest, sv;
962     DisasCond cond;
963 
964     dest = tcg_temp_new();
965     tcg_gen_sub_tl(dest, in1, in2);
966 
967     /* Compute signed overflow if required.  */
968     TCGV_UNUSED(sv);
969     if ((cf >> 1) == 6) {
970         sv = do_sub_sv(ctx, dest, in1, in2);
971     }
972 
973     /* Form the condition for the compare.  */
974     cond = do_sub_cond(cf, dest, in1, in2, sv);
975 
976     /* Clear.  */
977     tcg_gen_movi_tl(dest, 0);
978     save_gpr(ctx, rt, dest);
979     tcg_temp_free(dest);
980 
981     /* Install the new nullification.  */
982     cond_free(&ctx->null_cond);
983     ctx->null_cond = cond;
984     return DISAS_NEXT;
985 }
986 
987 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
988                             unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
989 {
990     TCGv dest = dest_gpr(ctx, rt);
991 
992     /* Perform the operation, and writeback.  */
993     fn(dest, in1, in2);
994     save_gpr(ctx, rt, dest);
995 
996     /* Install the new nullification.  */
997     cond_free(&ctx->null_cond);
998     if (cf) {
999         ctx->null_cond = do_log_cond(cf, dest);
1000     }
1001     return DISAS_NEXT;
1002 }
1003 
1004 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1005                              TCGv in2, unsigned cf, bool is_tc,
1006                              void (*fn)(TCGv, TCGv, TCGv))
1007 {
1008     TCGv dest;
1009     DisasCond cond;
1010 
1011     if (cf == 0) {
1012         dest = dest_gpr(ctx, rt);
1013         fn(dest, in1, in2);
1014         save_gpr(ctx, rt, dest);
1015         cond_free(&ctx->null_cond);
1016     } else {
1017         dest = tcg_temp_new();
1018         fn(dest, in1, in2);
1019 
1020         cond = do_unit_cond(cf, dest, in1, in2);
1021 
1022         if (is_tc) {
1023             TCGv tmp = tcg_temp_new();
1024             cond_prep(&cond);
1025             tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1026             gen_helper_tcond(cpu_env, tmp);
1027             tcg_temp_free(tmp);
1028         }
1029         save_gpr(ctx, rt, dest);
1030 
1031         cond_free(&ctx->null_cond);
1032         ctx->null_cond = cond;
1033     }
1034     return DISAS_NEXT;
1035 }
1036 
1037 /* Emit a memory load.  The modify parameter should be
1038  * < 0 for pre-modify,
1039  * > 0 for post-modify,
1040  * = 0 for no base register update.
1041  */
1042 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1043                        unsigned rx, int scale, target_long disp,
1044                        int modify, TCGMemOp mop)
1045 {
1046     TCGv addr, base;
1047 
1048     /* Caller uses nullify_over/nullify_end.  */
1049     assert(ctx->null_cond.c == TCG_COND_NEVER);
1050 
1051     addr = tcg_temp_new();
1052     base = load_gpr(ctx, rb);
1053 
1054     /* Note that RX is mutually exclusive with DISP.  */
1055     if (rx) {
1056         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1057         tcg_gen_add_tl(addr, addr, base);
1058     } else {
1059         tcg_gen_addi_tl(addr, base, disp);
1060     }
1061 
1062     if (modify == 0) {
1063         tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1064     } else {
1065         tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1066                             MMU_USER_IDX, mop);
1067         save_gpr(ctx, rb, addr);
1068     }
1069     tcg_temp_free(addr);
1070 }
1071 
1072 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1073                        unsigned rx, int scale, target_long disp,
1074                        int modify, TCGMemOp mop)
1075 {
1076     TCGv addr, base;
1077 
1078     /* Caller uses nullify_over/nullify_end.  */
1079     assert(ctx->null_cond.c == TCG_COND_NEVER);
1080 
1081     addr = tcg_temp_new();
1082     base = load_gpr(ctx, rb);
1083 
1084     /* Note that RX is mutually exclusive with DISP.  */
1085     if (rx) {
1086         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1087         tcg_gen_add_tl(addr, addr, base);
1088     } else {
1089         tcg_gen_addi_tl(addr, base, disp);
1090     }
1091 
1092     if (modify == 0) {
1093         tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1094     } else {
1095         tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1096                             MMU_USER_IDX, mop);
1097         save_gpr(ctx, rb, addr);
1098     }
1099     tcg_temp_free(addr);
1100 }
1101 
1102 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1103                         unsigned rx, int scale, target_long disp,
1104                         int modify, TCGMemOp mop)
1105 {
1106     TCGv addr, base;
1107 
1108     /* Caller uses nullify_over/nullify_end.  */
1109     assert(ctx->null_cond.c == TCG_COND_NEVER);
1110 
1111     addr = tcg_temp_new();
1112     base = load_gpr(ctx, rb);
1113 
1114     /* Note that RX is mutually exclusive with DISP.  */
1115     if (rx) {
1116         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1117         tcg_gen_add_tl(addr, addr, base);
1118     } else {
1119         tcg_gen_addi_tl(addr, base, disp);
1120     }
1121 
1122     tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1123 
1124     if (modify != 0) {
1125         save_gpr(ctx, rb, addr);
1126     }
1127     tcg_temp_free(addr);
1128 }
1129 
1130 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1131                         unsigned rx, int scale, target_long disp,
1132                         int modify, TCGMemOp mop)
1133 {
1134     TCGv addr, base;
1135 
1136     /* Caller uses nullify_over/nullify_end.  */
1137     assert(ctx->null_cond.c == TCG_COND_NEVER);
1138 
1139     addr = tcg_temp_new();
1140     base = load_gpr(ctx, rb);
1141 
1142     /* Note that RX is mutually exclusive with DISP.  */
1143     if (rx) {
1144         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1145         tcg_gen_add_tl(addr, addr, base);
1146     } else {
1147         tcg_gen_addi_tl(addr, base, disp);
1148     }
1149 
1150     tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1151 
1152     if (modify != 0) {
1153         save_gpr(ctx, rb, addr);
1154     }
1155     tcg_temp_free(addr);
1156 }
1157 
1158 #if TARGET_LONG_BITS == 64
1159 #define do_load_tl  do_load_64
1160 #define do_store_tl do_store_64
1161 #else
1162 #define do_load_tl  do_load_32
1163 #define do_store_tl do_store_32
1164 #endif
1165 
1166 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1167                              unsigned rx, int scale, target_long disp,
1168                              int modify, TCGMemOp mop)
1169 {
1170     TCGv dest;
1171 
1172     nullify_over(ctx);
1173 
1174     if (modify == 0) {
1175         /* No base register update.  */
1176         dest = dest_gpr(ctx, rt);
1177     } else {
1178         /* Make sure if RT == RB, we see the result of the load.  */
1179         dest = get_temp(ctx);
1180     }
1181     do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1182     save_gpr(ctx, rt, dest);
1183 
1184     return nullify_end(ctx, DISAS_NEXT);
1185 }
1186 
1187 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1188                                unsigned rx, int scale, target_long disp,
1189                                int modify)
1190 {
1191     TCGv_i32 tmp;
1192 
1193     nullify_over(ctx);
1194 
1195     tmp = tcg_temp_new_i32();
1196     do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1197     save_frw_i32(rt, tmp);
1198     tcg_temp_free_i32(tmp);
1199 
1200     if (rt == 0) {
1201         gen_helper_loaded_fr0(cpu_env);
1202     }
1203 
1204     return nullify_end(ctx, DISAS_NEXT);
1205 }
1206 
1207 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1208                                unsigned rx, int scale, target_long disp,
1209                                int modify)
1210 {
1211     TCGv_i64 tmp;
1212 
1213     nullify_over(ctx);
1214 
1215     tmp = tcg_temp_new_i64();
1216     do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1217     save_frd(rt, tmp);
1218     tcg_temp_free_i64(tmp);
1219 
1220     if (rt == 0) {
1221         gen_helper_loaded_fr0(cpu_env);
1222     }
1223 
1224     return nullify_end(ctx, DISAS_NEXT);
1225 }
1226 
1227 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1228                               target_long disp, int modify, TCGMemOp mop)
1229 {
1230     nullify_over(ctx);
1231     do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1232     return nullify_end(ctx, DISAS_NEXT);
1233 }
1234 
1235 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1236                                 unsigned rx, int scale, target_long disp,
1237                                 int modify)
1238 {
1239     TCGv_i32 tmp;
1240 
1241     nullify_over(ctx);
1242 
1243     tmp = load_frw_i32(rt);
1244     do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1245     tcg_temp_free_i32(tmp);
1246 
1247     return nullify_end(ctx, DISAS_NEXT);
1248 }
1249 
1250 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1251                                 unsigned rx, int scale, target_long disp,
1252                                 int modify)
1253 {
1254     TCGv_i64 tmp;
1255 
1256     nullify_over(ctx);
1257 
1258     tmp = load_frd(rt);
1259     do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1260     tcg_temp_free_i64(tmp);
1261 
1262     return nullify_end(ctx, DISAS_NEXT);
1263 }
1264 
1265 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1266                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1267 {
1268     TCGv_i32 tmp;
1269 
1270     nullify_over(ctx);
1271     tmp = load_frw0_i32(ra);
1272 
1273     func(tmp, cpu_env, tmp);
1274 
1275     save_frw_i32(rt, tmp);
1276     tcg_temp_free_i32(tmp);
1277     return nullify_end(ctx, DISAS_NEXT);
1278 }
1279 
1280 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1281                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1282 {
1283     TCGv_i32 dst;
1284     TCGv_i64 src;
1285 
1286     nullify_over(ctx);
1287     src = load_frd(ra);
1288     dst = tcg_temp_new_i32();
1289 
1290     func(dst, cpu_env, src);
1291 
1292     tcg_temp_free_i64(src);
1293     save_frw_i32(rt, dst);
1294     tcg_temp_free_i32(dst);
1295     return nullify_end(ctx, DISAS_NEXT);
1296 }
1297 
1298 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1299                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1300 {
1301     TCGv_i64 tmp;
1302 
1303     nullify_over(ctx);
1304     tmp = load_frd0(ra);
1305 
1306     func(tmp, cpu_env, tmp);
1307 
1308     save_frd(rt, tmp);
1309     tcg_temp_free_i64(tmp);
1310     return nullify_end(ctx, DISAS_NEXT);
1311 }
1312 
1313 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1314                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1315 {
1316     TCGv_i32 src;
1317     TCGv_i64 dst;
1318 
1319     nullify_over(ctx);
1320     src = load_frw0_i32(ra);
1321     dst = tcg_temp_new_i64();
1322 
1323     func(dst, cpu_env, src);
1324 
1325     tcg_temp_free_i32(src);
1326     save_frd(rt, dst);
1327     tcg_temp_free_i64(dst);
1328     return nullify_end(ctx, DISAS_NEXT);
1329 }
1330 
1331 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1332                                  unsigned ra, unsigned rb,
1333                                  void (*func)(TCGv_i32, TCGv_env,
1334                                               TCGv_i32, TCGv_i32))
1335 {
1336     TCGv_i32 a, b;
1337 
1338     nullify_over(ctx);
1339     a = load_frw0_i32(ra);
1340     b = load_frw0_i32(rb);
1341 
1342     func(a, cpu_env, a, b);
1343 
1344     tcg_temp_free_i32(b);
1345     save_frw_i32(rt, a);
1346     tcg_temp_free_i32(a);
1347     return nullify_end(ctx, DISAS_NEXT);
1348 }
1349 
1350 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1351                                  unsigned ra, unsigned rb,
1352                                  void (*func)(TCGv_i64, TCGv_env,
1353                                               TCGv_i64, TCGv_i64))
1354 {
1355     TCGv_i64 a, b;
1356 
1357     nullify_over(ctx);
1358     a = load_frd0(ra);
1359     b = load_frd0(rb);
1360 
1361     func(a, cpu_env, a, b);
1362 
1363     tcg_temp_free_i64(b);
1364     save_frd(rt, a);
1365     tcg_temp_free_i64(a);
1366     return nullify_end(ctx, DISAS_NEXT);
1367 }
1368 
1369 /* Emit an unconditional branch to a direct target, which may or may not
1370    have already had nullification handled.  */
1371 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1372                                 unsigned link, bool is_n)
1373 {
1374     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1375         if (link != 0) {
1376             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1377         }
1378         ctx->iaoq_n = dest;
1379         if (is_n) {
1380             ctx->null_cond.c = TCG_COND_ALWAYS;
1381         }
1382         return DISAS_NEXT;
1383     } else {
1384         nullify_over(ctx);
1385 
1386         if (link != 0) {
1387             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1388         }
1389 
1390         if (is_n && use_nullify_skip(ctx)) {
1391             nullify_set(ctx, 0);
1392             gen_goto_tb(ctx, 0, dest, dest + 4);
1393         } else {
1394             nullify_set(ctx, is_n);
1395             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1396         }
1397 
1398         nullify_end(ctx, DISAS_NEXT);
1399 
1400         nullify_set(ctx, 0);
1401         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1402         return DISAS_NORETURN;
1403     }
1404 }
1405 
1406 /* Emit a conditional branch to a direct target.  If the branch itself
1407    is nullified, we should have already used nullify_over.  */
1408 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1409                                 DisasCond *cond)
1410 {
1411     target_ulong dest = iaoq_dest(ctx, disp);
1412     TCGLabel *taken = NULL;
1413     TCGCond c = cond->c;
1414     bool n;
1415 
1416     assert(ctx->null_cond.c == TCG_COND_NEVER);
1417 
1418     /* Handle TRUE and NEVER as direct branches.  */
1419     if (c == TCG_COND_ALWAYS) {
1420         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1421     }
1422     if (c == TCG_COND_NEVER) {
1423         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1424     }
1425 
1426     taken = gen_new_label();
1427     cond_prep(cond);
1428     tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1429     cond_free(cond);
1430 
1431     /* Not taken: Condition not satisfied; nullify on backward branches. */
1432     n = is_n && disp < 0;
1433     if (n && use_nullify_skip(ctx)) {
1434         nullify_set(ctx, 0);
1435         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1436     } else {
1437         if (!n && ctx->null_lab) {
1438             gen_set_label(ctx->null_lab);
1439             ctx->null_lab = NULL;
1440         }
1441         nullify_set(ctx, n);
1442         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1443     }
1444 
1445     gen_set_label(taken);
1446 
1447     /* Taken: Condition satisfied; nullify on forward branches.  */
1448     n = is_n && disp >= 0;
1449     if (n && use_nullify_skip(ctx)) {
1450         nullify_set(ctx, 0);
1451         gen_goto_tb(ctx, 1, dest, dest + 4);
1452     } else {
1453         nullify_set(ctx, n);
1454         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1455     }
1456 
1457     /* Not taken: the branch itself was nullified.  */
1458     if (ctx->null_lab) {
1459         gen_set_label(ctx->null_lab);
1460         ctx->null_lab = NULL;
1461         return DISAS_IAQ_N_STALE;
1462     } else {
1463         return DISAS_NORETURN;
1464     }
1465 }
1466 
1467 /* Emit an unconditional branch to an indirect target.  This handles
1468    nullification of the branch itself.  */
1469 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1470                                 unsigned link, bool is_n)
1471 {
1472     TCGv a0, a1, next, tmp;
1473     TCGCond c;
1474 
1475     assert(ctx->null_lab == NULL);
1476 
1477     if (ctx->null_cond.c == TCG_COND_NEVER) {
1478         if (link != 0) {
1479             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1480         }
1481         next = get_temp(ctx);
1482         tcg_gen_mov_tl(next, dest);
1483         ctx->iaoq_n = -1;
1484         ctx->iaoq_n_var = next;
1485         if (is_n) {
1486             ctx->null_cond.c = TCG_COND_ALWAYS;
1487         }
1488     } else if (is_n && use_nullify_skip(ctx)) {
1489         /* The (conditional) branch, B, nullifies the next insn, N,
1490            and we're allowed to skip execution N (no single-step or
1491            tracepoint in effect).  Since the goto_ptr that we must use
1492            for the indirect branch consumes no special resources, we
1493            can (conditionally) skip B and continue execution.  */
1494         /* The use_nullify_skip test implies we have a known control path.  */
1495         tcg_debug_assert(ctx->iaoq_b != -1);
1496         tcg_debug_assert(ctx->iaoq_n != -1);
1497 
1498         /* We do have to handle the non-local temporary, DEST, before
1499            branching.  Since IOAQ_F is not really live at this point, we
1500            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1501         tcg_gen_mov_tl(cpu_iaoq_f, dest);
1502         tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1503 
1504         nullify_over(ctx);
1505         if (link != 0) {
1506             tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1507         }
1508         tcg_gen_lookup_and_goto_ptr();
1509         return nullify_end(ctx, DISAS_NEXT);
1510     } else {
1511         cond_prep(&ctx->null_cond);
1512         c = ctx->null_cond.c;
1513         a0 = ctx->null_cond.a0;
1514         a1 = ctx->null_cond.a1;
1515 
1516         tmp = tcg_temp_new();
1517         next = get_temp(ctx);
1518 
1519         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1520         tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1521         ctx->iaoq_n = -1;
1522         ctx->iaoq_n_var = next;
1523 
1524         if (link != 0) {
1525             tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1526         }
1527 
1528         if (is_n) {
1529             /* The branch nullifies the next insn, which means the state of N
1530                after the branch is the inverse of the state of N that applied
1531                to the branch.  */
1532             tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1533             cond_free(&ctx->null_cond);
1534             ctx->null_cond = cond_make_n();
1535             ctx->psw_n_nonzero = true;
1536         } else {
1537             cond_free(&ctx->null_cond);
1538         }
1539     }
1540 
1541     return DISAS_NEXT;
1542 }
1543 
1544 /* On Linux, page zero is normally marked execute only + gateway.
1545    Therefore normal read or write is supposed to fail, but specific
1546    offsets have kernel code mapped to raise permissions to implement
1547    system calls.  Handling this via an explicit check here, rather
1548    in than the "be disp(sr2,r0)" instruction that probably sent us
1549    here, is the easiest way to handle the branch delay slot on the
1550    aforementioned BE.  */
1551 static DisasJumpType do_page_zero(DisasContext *ctx)
1552 {
1553     /* If by some means we get here with PSW[N]=1, that implies that
1554        the B,GATE instruction would be skipped, and we'd fault on the
1555        next insn within the privilaged page.  */
1556     switch (ctx->null_cond.c) {
1557     case TCG_COND_NEVER:
1558         break;
1559     case TCG_COND_ALWAYS:
1560         tcg_gen_movi_tl(cpu_psw_n, 0);
1561         goto do_sigill;
1562     default:
1563         /* Since this is always the first (and only) insn within the
1564            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1565         g_assert_not_reached();
1566     }
1567 
1568     /* Check that we didn't arrive here via some means that allowed
1569        non-sequential instruction execution.  Normally the PSW[B] bit
1570        detects this by disallowing the B,GATE instruction to execute
1571        under such conditions.  */
1572     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1573         goto do_sigill;
1574     }
1575 
1576     switch (ctx->iaoq_f) {
1577     case 0x00: /* Null pointer call */
1578         gen_excp_1(EXCP_SIGSEGV);
1579         return DISAS_NORETURN;
1580 
1581     case 0xb0: /* LWS */
1582         gen_excp_1(EXCP_SYSCALL_LWS);
1583         return DISAS_NORETURN;
1584 
1585     case 0xe0: /* SET_THREAD_POINTER */
1586         tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1587         tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1588         tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1589         return DISAS_IAQ_N_UPDATED;
1590 
1591     case 0x100: /* SYSCALL */
1592         gen_excp_1(EXCP_SYSCALL);
1593         return DISAS_NORETURN;
1594 
1595     default:
1596     do_sigill:
1597         gen_excp_1(EXCP_SIGILL);
1598         return DISAS_NORETURN;
1599     }
1600 }
1601 
1602 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1603                                const DisasInsn *di)
1604 {
1605     cond_free(&ctx->null_cond);
1606     return DISAS_NEXT;
1607 }
1608 
1609 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1610                                  const DisasInsn *di)
1611 {
1612     nullify_over(ctx);
1613     return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1614 }
1615 
1616 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1617                                 const DisasInsn *di)
1618 {
1619     /* No point in nullifying the memory barrier.  */
1620     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1621 
1622     cond_free(&ctx->null_cond);
1623     return DISAS_NEXT;
1624 }
1625 
1626 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1627                                 const DisasInsn *di)
1628 {
1629     unsigned rt = extract32(insn, 0, 5);
1630     TCGv tmp = dest_gpr(ctx, rt);
1631     tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1632     save_gpr(ctx, rt, tmp);
1633 
1634     cond_free(&ctx->null_cond);
1635     return DISAS_NEXT;
1636 }
1637 
1638 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1639                                 const DisasInsn *di)
1640 {
1641     unsigned rt = extract32(insn, 0, 5);
1642     TCGv tmp = dest_gpr(ctx, rt);
1643 
1644     /* ??? We don't implement space registers.  */
1645     tcg_gen_movi_tl(tmp, 0);
1646     save_gpr(ctx, rt, tmp);
1647 
1648     cond_free(&ctx->null_cond);
1649     return DISAS_NEXT;
1650 }
1651 
1652 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1653                                  const DisasInsn *di)
1654 {
1655     unsigned rt = extract32(insn, 0, 5);
1656     unsigned ctl = extract32(insn, 21, 5);
1657     TCGv tmp;
1658 
1659     switch (ctl) {
1660     case 11: /* SAR */
1661 #ifdef TARGET_HPPA64
1662         if (extract32(insn, 14, 1) == 0) {
1663             /* MFSAR without ,W masks low 5 bits.  */
1664             tmp = dest_gpr(ctx, rt);
1665             tcg_gen_andi_tl(tmp, cpu_sar, 31);
1666             save_gpr(ctx, rt, tmp);
1667             break;
1668         }
1669 #endif
1670         save_gpr(ctx, rt, cpu_sar);
1671         break;
1672     case 16: /* Interval Timer */
1673         tmp = dest_gpr(ctx, rt);
1674         tcg_gen_movi_tl(tmp, 0); /* FIXME */
1675         save_gpr(ctx, rt, tmp);
1676         break;
1677     case 26:
1678         save_gpr(ctx, rt, cpu_cr26);
1679         break;
1680     case 27:
1681         save_gpr(ctx, rt, cpu_cr27);
1682         break;
1683     default:
1684         /* All other control registers are privileged.  */
1685         return gen_illegal(ctx);
1686     }
1687 
1688     cond_free(&ctx->null_cond);
1689     return DISAS_NEXT;
1690 }
1691 
1692 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1693                                  const DisasInsn *di)
1694 {
1695     unsigned rin = extract32(insn, 16, 5);
1696     unsigned ctl = extract32(insn, 21, 5);
1697     TCGv tmp;
1698 
1699     if (ctl == 11) { /* SAR */
1700         tmp = tcg_temp_new();
1701         tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1702         save_or_nullify(ctx, cpu_sar, tmp);
1703         tcg_temp_free(tmp);
1704     } else {
1705         /* All other control registers are privileged or read-only.  */
1706         return gen_illegal(ctx);
1707     }
1708 
1709     cond_free(&ctx->null_cond);
1710     return DISAS_NEXT;
1711 }
1712 
1713 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1714                                    const DisasInsn *di)
1715 {
1716     unsigned rin = extract32(insn, 16, 5);
1717     TCGv tmp = tcg_temp_new();
1718 
1719     tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1720     tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1721     save_or_nullify(ctx, cpu_sar, tmp);
1722     tcg_temp_free(tmp);
1723 
1724     cond_free(&ctx->null_cond);
1725     return DISAS_NEXT;
1726 }
1727 
1728 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1729                                  const DisasInsn *di)
1730 {
1731     unsigned rt = extract32(insn, 0, 5);
1732     TCGv dest = dest_gpr(ctx, rt);
1733 
1734     /* Since we don't implement space registers, this returns zero.  */
1735     tcg_gen_movi_tl(dest, 0);
1736     save_gpr(ctx, rt, dest);
1737 
1738     cond_free(&ctx->null_cond);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static const DisasInsn table_system[] = {
1743     { 0x00000000u, 0xfc001fe0u, trans_break },
1744     /* We don't implement space register, so MTSP is a nop.  */
1745     { 0x00001820u, 0xffe01fffu, trans_nop },
1746     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1747     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1748     { 0x000014a0u, 0xffffffe0u, trans_mfia },
1749     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1750     { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1751     { 0x00000400u, 0xffffffffu, trans_sync },
1752     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1753 };
1754 
1755 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1756                                         const DisasInsn *di)
1757 {
1758     unsigned rb = extract32(insn, 21, 5);
1759     unsigned rx = extract32(insn, 16, 5);
1760     TCGv dest = dest_gpr(ctx, rb);
1761     TCGv src1 = load_gpr(ctx, rb);
1762     TCGv src2 = load_gpr(ctx, rx);
1763 
1764     /* The only thing we need to do is the base register modification.  */
1765     tcg_gen_add_tl(dest, src1, src2);
1766     save_gpr(ctx, rb, dest);
1767 
1768     cond_free(&ctx->null_cond);
1769     return DISAS_NEXT;
1770 }
1771 
1772 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1773                                  const DisasInsn *di)
1774 {
1775     unsigned rt = extract32(insn, 0, 5);
1776     unsigned rb = extract32(insn, 21, 5);
1777     unsigned is_write = extract32(insn, 6, 1);
1778     TCGv dest;
1779 
1780     nullify_over(ctx);
1781 
1782     /* ??? Do something with priv level operand.  */
1783     dest = dest_gpr(ctx, rt);
1784     if (is_write) {
1785         gen_helper_probe_w(dest, load_gpr(ctx, rb));
1786     } else {
1787         gen_helper_probe_r(dest, load_gpr(ctx, rb));
1788     }
1789     save_gpr(ctx, rt, dest);
1790     return nullify_end(ctx, DISAS_NEXT);
1791 }
1792 
1793 static const DisasInsn table_mem_mgmt[] = {
1794     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
1795     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
1796     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1797     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
1798     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1799     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
1800     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1801     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
1802     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1803     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
1804     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1805     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
1806     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1807     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
1808     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
1809 };
1810 
1811 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1812                                const DisasInsn *di)
1813 {
1814     unsigned r2 = extract32(insn, 21, 5);
1815     unsigned r1 = extract32(insn, 16, 5);
1816     unsigned cf = extract32(insn, 12, 4);
1817     unsigned ext = extract32(insn, 8, 4);
1818     unsigned shift = extract32(insn, 6, 2);
1819     unsigned rt = extract32(insn,  0, 5);
1820     TCGv tcg_r1, tcg_r2;
1821     bool is_c = false;
1822     bool is_l = false;
1823     bool is_tc = false;
1824     bool is_tsv = false;
1825     DisasJumpType ret;
1826 
1827     switch (ext) {
1828     case 0x6: /* ADD, SHLADD */
1829         break;
1830     case 0xa: /* ADD,L, SHLADD,L */
1831         is_l = true;
1832         break;
1833     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1834         is_tsv = true;
1835         break;
1836     case 0x7: /* ADD,C */
1837         is_c = true;
1838         break;
1839     case 0xf: /* ADD,C,TSV */
1840         is_c = is_tsv = true;
1841         break;
1842     default:
1843         return gen_illegal(ctx);
1844     }
1845 
1846     if (cf) {
1847         nullify_over(ctx);
1848     }
1849     tcg_r1 = load_gpr(ctx, r1);
1850     tcg_r2 = load_gpr(ctx, r2);
1851     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1852     return nullify_end(ctx, ret);
1853 }
1854 
1855 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1856                                const DisasInsn *di)
1857 {
1858     unsigned r2 = extract32(insn, 21, 5);
1859     unsigned r1 = extract32(insn, 16, 5);
1860     unsigned cf = extract32(insn, 12, 4);
1861     unsigned ext = extract32(insn, 6, 6);
1862     unsigned rt = extract32(insn,  0, 5);
1863     TCGv tcg_r1, tcg_r2;
1864     bool is_b = false;
1865     bool is_tc = false;
1866     bool is_tsv = false;
1867     DisasJumpType ret;
1868 
1869     switch (ext) {
1870     case 0x10: /* SUB */
1871         break;
1872     case 0x30: /* SUB,TSV */
1873         is_tsv = true;
1874         break;
1875     case 0x14: /* SUB,B */
1876         is_b = true;
1877         break;
1878     case 0x34: /* SUB,B,TSV */
1879         is_b = is_tsv = true;
1880         break;
1881     case 0x13: /* SUB,TC */
1882         is_tc = true;
1883         break;
1884     case 0x33: /* SUB,TSV,TC */
1885         is_tc = is_tsv = true;
1886         break;
1887     default:
1888         return gen_illegal(ctx);
1889     }
1890 
1891     if (cf) {
1892         nullify_over(ctx);
1893     }
1894     tcg_r1 = load_gpr(ctx, r1);
1895     tcg_r2 = load_gpr(ctx, r2);
1896     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1897     return nullify_end(ctx, ret);
1898 }
1899 
1900 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1901                                const DisasInsn *di)
1902 {
1903     unsigned r2 = extract32(insn, 21, 5);
1904     unsigned r1 = extract32(insn, 16, 5);
1905     unsigned cf = extract32(insn, 12, 4);
1906     unsigned rt = extract32(insn,  0, 5);
1907     TCGv tcg_r1, tcg_r2;
1908     DisasJumpType ret;
1909 
1910     if (cf) {
1911         nullify_over(ctx);
1912     }
1913     tcg_r1 = load_gpr(ctx, r1);
1914     tcg_r2 = load_gpr(ctx, r2);
1915     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1916     return nullify_end(ctx, ret);
1917 }
1918 
1919 /* OR r,0,t -> COPY (according to gas) */
1920 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1921                                 const DisasInsn *di)
1922 {
1923     unsigned r1 = extract32(insn, 16, 5);
1924     unsigned rt = extract32(insn,  0, 5);
1925 
1926     if (r1 == 0) {
1927         TCGv dest = dest_gpr(ctx, rt);
1928         tcg_gen_movi_tl(dest, 0);
1929         save_gpr(ctx, rt, dest);
1930     } else {
1931         save_gpr(ctx, rt, cpu_gr[r1]);
1932     }
1933     cond_free(&ctx->null_cond);
1934     return DISAS_NEXT;
1935 }
1936 
1937 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1938                                   const DisasInsn *di)
1939 {
1940     unsigned r2 = extract32(insn, 21, 5);
1941     unsigned r1 = extract32(insn, 16, 5);
1942     unsigned cf = extract32(insn, 12, 4);
1943     unsigned rt = extract32(insn,  0, 5);
1944     TCGv tcg_r1, tcg_r2;
1945     DisasJumpType ret;
1946 
1947     if (cf) {
1948         nullify_over(ctx);
1949     }
1950     tcg_r1 = load_gpr(ctx, r1);
1951     tcg_r2 = load_gpr(ctx, r2);
1952     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1953     return nullify_end(ctx, ret);
1954 }
1955 
1956 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1957                                 const DisasInsn *di)
1958 {
1959     unsigned r2 = extract32(insn, 21, 5);
1960     unsigned r1 = extract32(insn, 16, 5);
1961     unsigned cf = extract32(insn, 12, 4);
1962     unsigned rt = extract32(insn,  0, 5);
1963     TCGv tcg_r1, tcg_r2;
1964     DisasJumpType ret;
1965 
1966     if (cf) {
1967         nullify_over(ctx);
1968     }
1969     tcg_r1 = load_gpr(ctx, r1);
1970     tcg_r2 = load_gpr(ctx, r2);
1971     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1972     return nullify_end(ctx, ret);
1973 }
1974 
1975 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1976                                   const DisasInsn *di)
1977 {
1978     unsigned r2 = extract32(insn, 21, 5);
1979     unsigned r1 = extract32(insn, 16, 5);
1980     unsigned cf = extract32(insn, 12, 4);
1981     unsigned is_tc = extract32(insn, 6, 1);
1982     unsigned rt = extract32(insn,  0, 5);
1983     TCGv tcg_r1, tcg_r2, tmp;
1984     DisasJumpType ret;
1985 
1986     if (cf) {
1987         nullify_over(ctx);
1988     }
1989     tcg_r1 = load_gpr(ctx, r1);
1990     tcg_r2 = load_gpr(ctx, r2);
1991     tmp = get_temp(ctx);
1992     tcg_gen_not_tl(tmp, tcg_r2);
1993     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1994     return nullify_end(ctx, ret);
1995 }
1996 
1997 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
1998                                 const DisasInsn *di)
1999 {
2000     unsigned r2 = extract32(insn, 21, 5);
2001     unsigned cf = extract32(insn, 12, 4);
2002     unsigned is_i = extract32(insn, 6, 1);
2003     unsigned rt = extract32(insn,  0, 5);
2004     TCGv tmp;
2005     DisasJumpType ret;
2006 
2007     nullify_over(ctx);
2008 
2009     tmp = get_temp(ctx);
2010     tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2011     if (!is_i) {
2012         tcg_gen_not_tl(tmp, tmp);
2013     }
2014     tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2015     tcg_gen_muli_tl(tmp, tmp, 6);
2016     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2017                   is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2018 
2019     return nullify_end(ctx, ret);
2020 }
2021 
2022 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2023                               const DisasInsn *di)
2024 {
2025     unsigned r2 = extract32(insn, 21, 5);
2026     unsigned r1 = extract32(insn, 16, 5);
2027     unsigned cf = extract32(insn, 12, 4);
2028     unsigned rt = extract32(insn,  0, 5);
2029     TCGv dest, add1, add2, addc, zero, in1, in2;
2030 
2031     nullify_over(ctx);
2032 
2033     in1 = load_gpr(ctx, r1);
2034     in2 = load_gpr(ctx, r2);
2035 
2036     add1 = tcg_temp_new();
2037     add2 = tcg_temp_new();
2038     addc = tcg_temp_new();
2039     dest = tcg_temp_new();
2040     zero = tcg_const_tl(0);
2041 
2042     /* Form R1 << 1 | PSW[CB]{8}.  */
2043     tcg_gen_add_tl(add1, in1, in1);
2044     tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2045 
2046     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2047        carry{8} requires that we subtract via + ~R2 + 1, as described in
2048        the manual.  By extracting and masking V, we can produce the
2049        proper inputs to the addition without movcond.  */
2050     tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2051     tcg_gen_xor_tl(add2, in2, addc);
2052     tcg_gen_andi_tl(addc, addc, 1);
2053     /* ??? This is only correct for 32-bit.  */
2054     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2055     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2056 
2057     tcg_temp_free(addc);
2058     tcg_temp_free(zero);
2059 
2060     /* Write back the result register.  */
2061     save_gpr(ctx, rt, dest);
2062 
2063     /* Write back PSW[CB].  */
2064     tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2065     tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2066 
2067     /* Write back PSW[V] for the division step.  */
2068     tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2069     tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2070 
2071     /* Install the new nullification.  */
2072     if (cf) {
2073         TCGv sv;
2074         TCGV_UNUSED(sv);
2075         if (cf >> 1 == 6) {
2076             /* ??? The lshift is supposed to contribute to overflow.  */
2077             sv = do_add_sv(ctx, dest, add1, add2);
2078         }
2079         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2080     }
2081 
2082     tcg_temp_free(add1);
2083     tcg_temp_free(add2);
2084     tcg_temp_free(dest);
2085 
2086     return nullify_end(ctx, DISAS_NEXT);
2087 }
2088 
2089 static const DisasInsn table_arith_log[] = {
2090     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2091     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2092     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2093     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2094     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2095     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2096     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2097     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2098     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2099     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2100     { 0x08000440u, 0xfc000fe0u, trans_ds },
2101     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2102     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2103     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2104     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2105 };
2106 
2107 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2108 {
2109     target_long im = low_sextract(insn, 0, 11);
2110     unsigned e1 = extract32(insn, 11, 1);
2111     unsigned cf = extract32(insn, 12, 4);
2112     unsigned rt = extract32(insn, 16, 5);
2113     unsigned r2 = extract32(insn, 21, 5);
2114     unsigned o1 = extract32(insn, 26, 1);
2115     TCGv tcg_im, tcg_r2;
2116     DisasJumpType ret;
2117 
2118     if (cf) {
2119         nullify_over(ctx);
2120     }
2121 
2122     tcg_im = load_const(ctx, im);
2123     tcg_r2 = load_gpr(ctx, r2);
2124     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2125 
2126     return nullify_end(ctx, ret);
2127 }
2128 
2129 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2130 {
2131     target_long im = low_sextract(insn, 0, 11);
2132     unsigned e1 = extract32(insn, 11, 1);
2133     unsigned cf = extract32(insn, 12, 4);
2134     unsigned rt = extract32(insn, 16, 5);
2135     unsigned r2 = extract32(insn, 21, 5);
2136     TCGv tcg_im, tcg_r2;
2137     DisasJumpType ret;
2138 
2139     if (cf) {
2140         nullify_over(ctx);
2141     }
2142 
2143     tcg_im = load_const(ctx, im);
2144     tcg_r2 = load_gpr(ctx, r2);
2145     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2146 
2147     return nullify_end(ctx, ret);
2148 }
2149 
2150 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2151 {
2152     target_long im = low_sextract(insn, 0, 11);
2153     unsigned cf = extract32(insn, 12, 4);
2154     unsigned rt = extract32(insn, 16, 5);
2155     unsigned r2 = extract32(insn, 21, 5);
2156     TCGv tcg_im, tcg_r2;
2157     DisasJumpType ret;
2158 
2159     if (cf) {
2160         nullify_over(ctx);
2161     }
2162 
2163     tcg_im = load_const(ctx, im);
2164     tcg_r2 = load_gpr(ctx, r2);
2165     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2166 
2167     return nullify_end(ctx, ret);
2168 }
2169 
2170 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2171                                     const DisasInsn *di)
2172 {
2173     unsigned rt = extract32(insn, 0, 5);
2174     unsigned m = extract32(insn, 5, 1);
2175     unsigned sz = extract32(insn, 6, 2);
2176     unsigned a = extract32(insn, 13, 1);
2177     int disp = low_sextract(insn, 16, 5);
2178     unsigned rb = extract32(insn, 21, 5);
2179     int modify = (m ? (a ? -1 : 1) : 0);
2180     TCGMemOp mop = MO_TE | sz;
2181 
2182     return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2183 }
2184 
2185 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2186                                     const DisasInsn *di)
2187 {
2188     unsigned rt = extract32(insn, 0, 5);
2189     unsigned m = extract32(insn, 5, 1);
2190     unsigned sz = extract32(insn, 6, 2);
2191     unsigned u = extract32(insn, 13, 1);
2192     unsigned rx = extract32(insn, 16, 5);
2193     unsigned rb = extract32(insn, 21, 5);
2194     TCGMemOp mop = MO_TE | sz;
2195 
2196     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2197 }
2198 
2199 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2200                                     const DisasInsn *di)
2201 {
2202     int disp = low_sextract(insn, 0, 5);
2203     unsigned m = extract32(insn, 5, 1);
2204     unsigned sz = extract32(insn, 6, 2);
2205     unsigned a = extract32(insn, 13, 1);
2206     unsigned rr = extract32(insn, 16, 5);
2207     unsigned rb = extract32(insn, 21, 5);
2208     int modify = (m ? (a ? -1 : 1) : 0);
2209     TCGMemOp mop = MO_TE | sz;
2210 
2211     return do_store(ctx, rr, rb, disp, modify, mop);
2212 }
2213 
2214 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2215                                 const DisasInsn *di)
2216 {
2217     unsigned rt = extract32(insn, 0, 5);
2218     unsigned m = extract32(insn, 5, 1);
2219     unsigned i = extract32(insn, 12, 1);
2220     unsigned au = extract32(insn, 13, 1);
2221     unsigned rx = extract32(insn, 16, 5);
2222     unsigned rb = extract32(insn, 21, 5);
2223     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2224     TCGv zero, addr, base, dest;
2225     int modify, disp = 0, scale = 0;
2226 
2227     nullify_over(ctx);
2228 
2229     /* ??? Share more code with do_load and do_load_{32,64}.  */
2230 
2231     if (i) {
2232         modify = (m ? (au ? -1 : 1) : 0);
2233         disp = low_sextract(rx, 0, 5);
2234         rx = 0;
2235     } else {
2236         modify = m;
2237         if (au) {
2238             scale = mop & MO_SIZE;
2239         }
2240     }
2241     if (modify) {
2242         /* Base register modification.  Make sure if RT == RB, we see
2243            the result of the load.  */
2244         dest = get_temp(ctx);
2245     } else {
2246         dest = dest_gpr(ctx, rt);
2247     }
2248 
2249     addr = tcg_temp_new();
2250     base = load_gpr(ctx, rb);
2251     if (rx) {
2252         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2253         tcg_gen_add_tl(addr, addr, base);
2254     } else {
2255         tcg_gen_addi_tl(addr, base, disp);
2256     }
2257 
2258     zero = tcg_const_tl(0);
2259     tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2260                            zero, MMU_USER_IDX, mop);
2261     if (modify) {
2262         save_gpr(ctx, rb, addr);
2263     }
2264     save_gpr(ctx, rt, dest);
2265 
2266     return nullify_end(ctx, DISAS_NEXT);
2267 }
2268 
2269 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2270                                 const DisasInsn *di)
2271 {
2272     target_long disp = low_sextract(insn, 0, 5);
2273     unsigned m = extract32(insn, 5, 1);
2274     unsigned a = extract32(insn, 13, 1);
2275     unsigned rt = extract32(insn, 16, 5);
2276     unsigned rb = extract32(insn, 21, 5);
2277     TCGv addr, val;
2278 
2279     nullify_over(ctx);
2280 
2281     addr = tcg_temp_new();
2282     if (m || disp == 0) {
2283         tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2284     } else {
2285         tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2286     }
2287     val = load_gpr(ctx, rt);
2288 
2289     if (a) {
2290         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2291             gen_helper_stby_e_parallel(cpu_env, addr, val);
2292         } else {
2293             gen_helper_stby_e(cpu_env, addr, val);
2294         }
2295     } else {
2296         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2297             gen_helper_stby_b_parallel(cpu_env, addr, val);
2298         } else {
2299             gen_helper_stby_b(cpu_env, addr, val);
2300         }
2301     }
2302 
2303     if (m) {
2304         tcg_gen_addi_tl(addr, addr, disp);
2305         tcg_gen_andi_tl(addr, addr, ~3);
2306         save_gpr(ctx, rb, addr);
2307     }
2308     tcg_temp_free(addr);
2309 
2310     return nullify_end(ctx, DISAS_NEXT);
2311 }
2312 
2313 static const DisasInsn table_index_mem[] = {
2314     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2315     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2316     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2317     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2318     { 0x0c001300u, 0xfc0013c0, trans_stby },
2319 };
2320 
2321 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2322 {
2323     unsigned rt = extract32(insn, 21, 5);
2324     target_long i = assemble_21(insn);
2325     TCGv tcg_rt = dest_gpr(ctx, rt);
2326 
2327     tcg_gen_movi_tl(tcg_rt, i);
2328     save_gpr(ctx, rt, tcg_rt);
2329     cond_free(&ctx->null_cond);
2330 
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2335 {
2336     unsigned rt = extract32(insn, 21, 5);
2337     target_long i = assemble_21(insn);
2338     TCGv tcg_rt = load_gpr(ctx, rt);
2339     TCGv tcg_r1 = dest_gpr(ctx, 1);
2340 
2341     tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2342     save_gpr(ctx, 1, tcg_r1);
2343     cond_free(&ctx->null_cond);
2344 
2345     return DISAS_NEXT;
2346 }
2347 
2348 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2349 {
2350     unsigned rb = extract32(insn, 21, 5);
2351     unsigned rt = extract32(insn, 16, 5);
2352     target_long i = assemble_16(insn);
2353     TCGv tcg_rt = dest_gpr(ctx, rt);
2354 
2355     /* Special case rb == 0, for the LDI pseudo-op.
2356        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2357     if (rb == 0) {
2358         tcg_gen_movi_tl(tcg_rt, i);
2359     } else {
2360         tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2361     }
2362     save_gpr(ctx, rt, tcg_rt);
2363     cond_free(&ctx->null_cond);
2364 
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2369                                 bool is_mod, TCGMemOp mop)
2370 {
2371     unsigned rb = extract32(insn, 21, 5);
2372     unsigned rt = extract32(insn, 16, 5);
2373     target_long i = assemble_16(insn);
2374 
2375     return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2376 }
2377 
2378 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2379 {
2380     unsigned rb = extract32(insn, 21, 5);
2381     unsigned rt = extract32(insn, 16, 5);
2382     target_long i = assemble_16a(insn);
2383     unsigned ext2 = extract32(insn, 1, 2);
2384 
2385     switch (ext2) {
2386     case 0:
2387     case 1:
2388         /* FLDW without modification.  */
2389         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2390     case 2:
2391         /* LDW with modification.  Note that the sign of I selects
2392            post-dec vs pre-inc.  */
2393         return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2394     default:
2395         return gen_illegal(ctx);
2396     }
2397 }
2398 
2399 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2400 {
2401     target_long i = assemble_16a(insn);
2402     unsigned t1 = extract32(insn, 1, 1);
2403     unsigned a = extract32(insn, 2, 1);
2404     unsigned t0 = extract32(insn, 16, 5);
2405     unsigned rb = extract32(insn, 21, 5);
2406 
2407     /* FLDW with modification.  */
2408     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2409 }
2410 
2411 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2412                                  bool is_mod, TCGMemOp mop)
2413 {
2414     unsigned rb = extract32(insn, 21, 5);
2415     unsigned rt = extract32(insn, 16, 5);
2416     target_long i = assemble_16(insn);
2417 
2418     return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2419 }
2420 
2421 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2422 {
2423     unsigned rb = extract32(insn, 21, 5);
2424     unsigned rt = extract32(insn, 16, 5);
2425     target_long i = assemble_16a(insn);
2426     unsigned ext2 = extract32(insn, 1, 2);
2427 
2428     switch (ext2) {
2429     case 0:
2430     case 1:
2431         /* FSTW without modification.  */
2432         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2433     case 2:
2434         /* LDW with modification.  */
2435         return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2436     default:
2437         return gen_illegal(ctx);
2438     }
2439 }
2440 
2441 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2442 {
2443     target_long i = assemble_16a(insn);
2444     unsigned t1 = extract32(insn, 1, 1);
2445     unsigned a = extract32(insn, 2, 1);
2446     unsigned t0 = extract32(insn, 16, 5);
2447     unsigned rb = extract32(insn, 21, 5);
2448 
2449     /* FSTW with modification.  */
2450     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2451 }
2452 
2453 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2454 {
2455     unsigned t0 = extract32(insn, 0, 5);
2456     unsigned m = extract32(insn, 5, 1);
2457     unsigned t1 = extract32(insn, 6, 1);
2458     unsigned ext3 = extract32(insn, 7, 3);
2459     /* unsigned cc = extract32(insn, 10, 2); */
2460     unsigned i = extract32(insn, 12, 1);
2461     unsigned ua = extract32(insn, 13, 1);
2462     unsigned rx = extract32(insn, 16, 5);
2463     unsigned rb = extract32(insn, 21, 5);
2464     unsigned rt = t1 * 32 + t0;
2465     int modify = (m ? (ua ? -1 : 1) : 0);
2466     int disp, scale;
2467 
2468     if (i == 0) {
2469         scale = (ua ? 2 : 0);
2470         disp = 0;
2471         modify = m;
2472     } else {
2473         disp = low_sextract(rx, 0, 5);
2474         scale = 0;
2475         rx = 0;
2476         modify = (m ? (ua ? -1 : 1) : 0);
2477     }
2478 
2479     switch (ext3) {
2480     case 0: /* FLDW */
2481         return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2482     case 4: /* FSTW */
2483         return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2484     }
2485     return gen_illegal(ctx);
2486 }
2487 
2488 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2489 {
2490     unsigned rt = extract32(insn, 0, 5);
2491     unsigned m = extract32(insn, 5, 1);
2492     unsigned ext4 = extract32(insn, 6, 4);
2493     /* unsigned cc = extract32(insn, 10, 2); */
2494     unsigned i = extract32(insn, 12, 1);
2495     unsigned ua = extract32(insn, 13, 1);
2496     unsigned rx = extract32(insn, 16, 5);
2497     unsigned rb = extract32(insn, 21, 5);
2498     int modify = (m ? (ua ? -1 : 1) : 0);
2499     int disp, scale;
2500 
2501     if (i == 0) {
2502         scale = (ua ? 3 : 0);
2503         disp = 0;
2504         modify = m;
2505     } else {
2506         disp = low_sextract(rx, 0, 5);
2507         scale = 0;
2508         rx = 0;
2509         modify = (m ? (ua ? -1 : 1) : 0);
2510     }
2511 
2512     switch (ext4) {
2513     case 0: /* FLDD */
2514         return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2515     case 8: /* FSTD */
2516         return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2517     default:
2518         return gen_illegal(ctx);
2519     }
2520 }
2521 
2522 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2523                                 bool is_true, bool is_imm, bool is_dw)
2524 {
2525     target_long disp = assemble_12(insn) * 4;
2526     unsigned n = extract32(insn, 1, 1);
2527     unsigned c = extract32(insn, 13, 3);
2528     unsigned r = extract32(insn, 21, 5);
2529     unsigned cf = c * 2 + !is_true;
2530     TCGv dest, in1, in2, sv;
2531     DisasCond cond;
2532 
2533     nullify_over(ctx);
2534 
2535     if (is_imm) {
2536         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2537     } else {
2538         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2539     }
2540     in2 = load_gpr(ctx, r);
2541     dest = get_temp(ctx);
2542 
2543     tcg_gen_sub_tl(dest, in1, in2);
2544 
2545     TCGV_UNUSED(sv);
2546     if (c == 6) {
2547         sv = do_sub_sv(ctx, dest, in1, in2);
2548     }
2549 
2550     cond = do_sub_cond(cf, dest, in1, in2, sv);
2551     return do_cbranch(ctx, disp, n, &cond);
2552 }
2553 
2554 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2555                                 bool is_true, bool is_imm)
2556 {
2557     target_long disp = assemble_12(insn) * 4;
2558     unsigned n = extract32(insn, 1, 1);
2559     unsigned c = extract32(insn, 13, 3);
2560     unsigned r = extract32(insn, 21, 5);
2561     unsigned cf = c * 2 + !is_true;
2562     TCGv dest, in1, in2, sv, cb_msb;
2563     DisasCond cond;
2564 
2565     nullify_over(ctx);
2566 
2567     if (is_imm) {
2568         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2569     } else {
2570         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2571     }
2572     in2 = load_gpr(ctx, r);
2573     dest = dest_gpr(ctx, r);
2574     TCGV_UNUSED(sv);
2575     TCGV_UNUSED(cb_msb);
2576 
2577     switch (c) {
2578     default:
2579         tcg_gen_add_tl(dest, in1, in2);
2580         break;
2581     case 4: case 5:
2582         cb_msb = get_temp(ctx);
2583         tcg_gen_movi_tl(cb_msb, 0);
2584         tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2585         break;
2586     case 6:
2587         tcg_gen_add_tl(dest, in1, in2);
2588         sv = do_add_sv(ctx, dest, in1, in2);
2589         break;
2590     }
2591 
2592     cond = do_cond(cf, dest, cb_msb, sv);
2593     return do_cbranch(ctx, disp, n, &cond);
2594 }
2595 
2596 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2597 {
2598     target_long disp = assemble_12(insn) * 4;
2599     unsigned n = extract32(insn, 1, 1);
2600     unsigned c = extract32(insn, 15, 1);
2601     unsigned r = extract32(insn, 16, 5);
2602     unsigned p = extract32(insn, 21, 5);
2603     unsigned i = extract32(insn, 26, 1);
2604     TCGv tmp, tcg_r;
2605     DisasCond cond;
2606 
2607     nullify_over(ctx);
2608 
2609     tmp = tcg_temp_new();
2610     tcg_r = load_gpr(ctx, r);
2611     if (i) {
2612         tcg_gen_shli_tl(tmp, tcg_r, p);
2613     } else {
2614         tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2615     }
2616 
2617     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2618     tcg_temp_free(tmp);
2619     return do_cbranch(ctx, disp, n, &cond);
2620 }
2621 
2622 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2623 {
2624     target_long disp = assemble_12(insn) * 4;
2625     unsigned n = extract32(insn, 1, 1);
2626     unsigned c = extract32(insn, 13, 3);
2627     unsigned t = extract32(insn, 16, 5);
2628     unsigned r = extract32(insn, 21, 5);
2629     TCGv dest;
2630     DisasCond cond;
2631 
2632     nullify_over(ctx);
2633 
2634     dest = dest_gpr(ctx, r);
2635     if (is_imm) {
2636         tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2637     } else if (t == 0) {
2638         tcg_gen_movi_tl(dest, 0);
2639     } else {
2640         tcg_gen_mov_tl(dest, cpu_gr[t]);
2641     }
2642 
2643     cond = do_sed_cond(c, dest);
2644     return do_cbranch(ctx, disp, n, &cond);
2645 }
2646 
2647 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2648                                     const DisasInsn *di)
2649 {
2650     unsigned rt = extract32(insn, 0, 5);
2651     unsigned c = extract32(insn, 13, 3);
2652     unsigned r1 = extract32(insn, 16, 5);
2653     unsigned r2 = extract32(insn, 21, 5);
2654     TCGv dest;
2655 
2656     if (c) {
2657         nullify_over(ctx);
2658     }
2659 
2660     dest = dest_gpr(ctx, rt);
2661     if (r1 == 0) {
2662         tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2663         tcg_gen_shr_tl(dest, dest, cpu_sar);
2664     } else if (r1 == r2) {
2665         TCGv_i32 t32 = tcg_temp_new_i32();
2666         tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2667         tcg_gen_rotr_i32(t32, t32, cpu_sar);
2668         tcg_gen_extu_i32_tl(dest, t32);
2669         tcg_temp_free_i32(t32);
2670     } else {
2671         TCGv_i64 t = tcg_temp_new_i64();
2672         TCGv_i64 s = tcg_temp_new_i64();
2673 
2674         tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2675         tcg_gen_extu_tl_i64(s, cpu_sar);
2676         tcg_gen_shr_i64(t, t, s);
2677         tcg_gen_trunc_i64_tl(dest, t);
2678 
2679         tcg_temp_free_i64(t);
2680         tcg_temp_free_i64(s);
2681     }
2682     save_gpr(ctx, rt, dest);
2683 
2684     /* Install the new nullification.  */
2685     cond_free(&ctx->null_cond);
2686     if (c) {
2687         ctx->null_cond = do_sed_cond(c, dest);
2688     }
2689     return nullify_end(ctx, DISAS_NEXT);
2690 }
2691 
2692 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2693                                      const DisasInsn *di)
2694 {
2695     unsigned rt = extract32(insn, 0, 5);
2696     unsigned cpos = extract32(insn, 5, 5);
2697     unsigned c = extract32(insn, 13, 3);
2698     unsigned r1 = extract32(insn, 16, 5);
2699     unsigned r2 = extract32(insn, 21, 5);
2700     unsigned sa = 31 - cpos;
2701     TCGv dest, t2;
2702 
2703     if (c) {
2704         nullify_over(ctx);
2705     }
2706 
2707     dest = dest_gpr(ctx, rt);
2708     t2 = load_gpr(ctx, r2);
2709     if (r1 == r2) {
2710         TCGv_i32 t32 = tcg_temp_new_i32();
2711         tcg_gen_trunc_tl_i32(t32, t2);
2712         tcg_gen_rotri_i32(t32, t32, sa);
2713         tcg_gen_extu_i32_tl(dest, t32);
2714         tcg_temp_free_i32(t32);
2715     } else if (r1 == 0) {
2716         tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2717     } else {
2718         TCGv t0 = tcg_temp_new();
2719         tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2720         tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2721         tcg_temp_free(t0);
2722     }
2723     save_gpr(ctx, rt, dest);
2724 
2725     /* Install the new nullification.  */
2726     cond_free(&ctx->null_cond);
2727     if (c) {
2728         ctx->null_cond = do_sed_cond(c, dest);
2729     }
2730     return nullify_end(ctx, DISAS_NEXT);
2731 }
2732 
2733 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2734                                      const DisasInsn *di)
2735 {
2736     unsigned clen = extract32(insn, 0, 5);
2737     unsigned is_se = extract32(insn, 10, 1);
2738     unsigned c = extract32(insn, 13, 3);
2739     unsigned rt = extract32(insn, 16, 5);
2740     unsigned rr = extract32(insn, 21, 5);
2741     unsigned len = 32 - clen;
2742     TCGv dest, src, tmp;
2743 
2744     if (c) {
2745         nullify_over(ctx);
2746     }
2747 
2748     dest = dest_gpr(ctx, rt);
2749     src = load_gpr(ctx, rr);
2750     tmp = tcg_temp_new();
2751 
2752     /* Recall that SAR is using big-endian bit numbering.  */
2753     tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2754     if (is_se) {
2755         tcg_gen_sar_tl(dest, src, tmp);
2756         tcg_gen_sextract_tl(dest, dest, 0, len);
2757     } else {
2758         tcg_gen_shr_tl(dest, src, tmp);
2759         tcg_gen_extract_tl(dest, dest, 0, len);
2760     }
2761     tcg_temp_free(tmp);
2762     save_gpr(ctx, rt, dest);
2763 
2764     /* Install the new nullification.  */
2765     cond_free(&ctx->null_cond);
2766     if (c) {
2767         ctx->null_cond = do_sed_cond(c, dest);
2768     }
2769     return nullify_end(ctx, DISAS_NEXT);
2770 }
2771 
2772 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2773                                      const DisasInsn *di)
2774 {
2775     unsigned clen = extract32(insn, 0, 5);
2776     unsigned pos = extract32(insn, 5, 5);
2777     unsigned is_se = extract32(insn, 10, 1);
2778     unsigned c = extract32(insn, 13, 3);
2779     unsigned rt = extract32(insn, 16, 5);
2780     unsigned rr = extract32(insn, 21, 5);
2781     unsigned len = 32 - clen;
2782     unsigned cpos = 31 - pos;
2783     TCGv dest, src;
2784 
2785     if (c) {
2786         nullify_over(ctx);
2787     }
2788 
2789     dest = dest_gpr(ctx, rt);
2790     src = load_gpr(ctx, rr);
2791     if (is_se) {
2792         tcg_gen_sextract_tl(dest, src, cpos, len);
2793     } else {
2794         tcg_gen_extract_tl(dest, src, cpos, len);
2795     }
2796     save_gpr(ctx, rt, dest);
2797 
2798     /* Install the new nullification.  */
2799     cond_free(&ctx->null_cond);
2800     if (c) {
2801         ctx->null_cond = do_sed_cond(c, dest);
2802     }
2803     return nullify_end(ctx, DISAS_NEXT);
2804 }
2805 
2806 static const DisasInsn table_sh_ex[] = {
2807     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2808     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2809     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2810     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2811 };
2812 
2813 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2814                                       const DisasInsn *di)
2815 {
2816     unsigned clen = extract32(insn, 0, 5);
2817     unsigned cpos = extract32(insn, 5, 5);
2818     unsigned nz = extract32(insn, 10, 1);
2819     unsigned c = extract32(insn, 13, 3);
2820     target_long val = low_sextract(insn, 16, 5);
2821     unsigned rt = extract32(insn, 21, 5);
2822     unsigned len = 32 - clen;
2823     target_long mask0, mask1;
2824     TCGv dest;
2825 
2826     if (c) {
2827         nullify_over(ctx);
2828     }
2829     if (cpos + len > 32) {
2830         len = 32 - cpos;
2831     }
2832 
2833     dest = dest_gpr(ctx, rt);
2834     mask0 = deposit64(0, cpos, len, val);
2835     mask1 = deposit64(-1, cpos, len, val);
2836 
2837     if (nz) {
2838         TCGv src = load_gpr(ctx, rt);
2839         if (mask1 != -1) {
2840             tcg_gen_andi_tl(dest, src, mask1);
2841             src = dest;
2842         }
2843         tcg_gen_ori_tl(dest, src, mask0);
2844     } else {
2845         tcg_gen_movi_tl(dest, mask0);
2846     }
2847     save_gpr(ctx, rt, dest);
2848 
2849     /* Install the new nullification.  */
2850     cond_free(&ctx->null_cond);
2851     if (c) {
2852         ctx->null_cond = do_sed_cond(c, dest);
2853     }
2854     return nullify_end(ctx, DISAS_NEXT);
2855 }
2856 
2857 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2858                                     const DisasInsn *di)
2859 {
2860     unsigned clen = extract32(insn, 0, 5);
2861     unsigned cpos = extract32(insn, 5, 5);
2862     unsigned nz = extract32(insn, 10, 1);
2863     unsigned c = extract32(insn, 13, 3);
2864     unsigned rr = extract32(insn, 16, 5);
2865     unsigned rt = extract32(insn, 21, 5);
2866     unsigned rs = nz ? rt : 0;
2867     unsigned len = 32 - clen;
2868     TCGv dest, val;
2869 
2870     if (c) {
2871         nullify_over(ctx);
2872     }
2873     if (cpos + len > 32) {
2874         len = 32 - cpos;
2875     }
2876 
2877     dest = dest_gpr(ctx, rt);
2878     val = load_gpr(ctx, rr);
2879     if (rs == 0) {
2880         tcg_gen_deposit_z_tl(dest, val, cpos, len);
2881     } else {
2882         tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2883     }
2884     save_gpr(ctx, rt, dest);
2885 
2886     /* Install the new nullification.  */
2887     cond_free(&ctx->null_cond);
2888     if (c) {
2889         ctx->null_cond = do_sed_cond(c, dest);
2890     }
2891     return nullify_end(ctx, DISAS_NEXT);
2892 }
2893 
2894 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2895                                     const DisasInsn *di)
2896 {
2897     unsigned clen = extract32(insn, 0, 5);
2898     unsigned nz = extract32(insn, 10, 1);
2899     unsigned i = extract32(insn, 12, 1);
2900     unsigned c = extract32(insn, 13, 3);
2901     unsigned rt = extract32(insn, 21, 5);
2902     unsigned rs = nz ? rt : 0;
2903     unsigned len = 32 - clen;
2904     TCGv val, mask, tmp, shift, dest;
2905     unsigned msb = 1U << (len - 1);
2906 
2907     if (c) {
2908         nullify_over(ctx);
2909     }
2910 
2911     if (i) {
2912         val = load_const(ctx, low_sextract(insn, 16, 5));
2913     } else {
2914         val = load_gpr(ctx, extract32(insn, 16, 5));
2915     }
2916     dest = dest_gpr(ctx, rt);
2917     shift = tcg_temp_new();
2918     tmp = tcg_temp_new();
2919 
2920     /* Convert big-endian bit numbering in SAR to left-shift.  */
2921     tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2922 
2923     mask = tcg_const_tl(msb + (msb - 1));
2924     tcg_gen_and_tl(tmp, val, mask);
2925     if (rs) {
2926         tcg_gen_shl_tl(mask, mask, shift);
2927         tcg_gen_shl_tl(tmp, tmp, shift);
2928         tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2929         tcg_gen_or_tl(dest, dest, tmp);
2930     } else {
2931         tcg_gen_shl_tl(dest, tmp, shift);
2932     }
2933     tcg_temp_free(shift);
2934     tcg_temp_free(mask);
2935     tcg_temp_free(tmp);
2936     save_gpr(ctx, rt, dest);
2937 
2938     /* Install the new nullification.  */
2939     cond_free(&ctx->null_cond);
2940     if (c) {
2941         ctx->null_cond = do_sed_cond(c, dest);
2942     }
2943     return nullify_end(ctx, DISAS_NEXT);
2944 }
2945 
2946 static const DisasInsn table_depw[] = {
2947     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2948     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2949     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2950 };
2951 
2952 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2953 {
2954     unsigned n = extract32(insn, 1, 1);
2955     unsigned b = extract32(insn, 21, 5);
2956     target_long disp = assemble_17(insn);
2957 
2958     /* unsigned s = low_uextract(insn, 13, 3); */
2959     /* ??? It seems like there should be a good way of using
2960        "be disp(sr2, r0)", the canonical gateway entry mechanism
2961        to our advantage.  But that appears to be inconvenient to
2962        manage along side branch delay slots.  Therefore we handle
2963        entry into the gateway page via absolute address.  */
2964 
2965     /* Since we don't implement spaces, just branch.  Do notice the special
2966        case of "be disp(*,r0)" using a direct branch to disp, so that we can
2967        goto_tb to the TB containing the syscall.  */
2968     if (b == 0) {
2969         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2970     } else {
2971         TCGv tmp = get_temp(ctx);
2972         tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2973         return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2974     }
2975 }
2976 
2977 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2978                               const DisasInsn *di)
2979 {
2980     unsigned n = extract32(insn, 1, 1);
2981     unsigned link = extract32(insn, 21, 5);
2982     target_long disp = assemble_17(insn);
2983 
2984     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2985 }
2986 
2987 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2988                                    const DisasInsn *di)
2989 {
2990     unsigned n = extract32(insn, 1, 1);
2991     target_long disp = assemble_22(insn);
2992 
2993     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2994 }
2995 
2996 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
2997                                const DisasInsn *di)
2998 {
2999     unsigned n = extract32(insn, 1, 1);
3000     unsigned rx = extract32(insn, 16, 5);
3001     unsigned link = extract32(insn, 21, 5);
3002     TCGv tmp = get_temp(ctx);
3003 
3004     tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3005     tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3006     return do_ibranch(ctx, tmp, link, n);
3007 }
3008 
3009 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3010                               const DisasInsn *di)
3011 {
3012     unsigned n = extract32(insn, 1, 1);
3013     unsigned rx = extract32(insn, 16, 5);
3014     unsigned rb = extract32(insn, 21, 5);
3015     TCGv dest;
3016 
3017     if (rx == 0) {
3018         dest = load_gpr(ctx, rb);
3019     } else {
3020         dest = get_temp(ctx);
3021         tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3022         tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3023     }
3024     return do_ibranch(ctx, dest, 0, n);
3025 }
3026 
3027 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3028                                const DisasInsn *di)
3029 {
3030     unsigned n = extract32(insn, 1, 1);
3031     unsigned rb = extract32(insn, 21, 5);
3032     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3033 
3034     return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3035 }
3036 
3037 static const DisasInsn table_branch[] = {
3038     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3039     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3040     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3041     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3042     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3043 };
3044 
3045 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3046                                       const DisasInsn *di)
3047 {
3048     unsigned rt = extract32(insn, 0, 5);
3049     unsigned ra = extract32(insn, 21, 5);
3050     return do_fop_wew(ctx, rt, ra, di->f.wew);
3051 }
3052 
3053 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3054                                       const DisasInsn *di)
3055 {
3056     unsigned rt = assemble_rt64(insn);
3057     unsigned ra = assemble_ra64(insn);
3058     return do_fop_wew(ctx, rt, ra, di->f.wew);
3059 }
3060 
3061 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3062                                    const DisasInsn *di)
3063 {
3064     unsigned rt = extract32(insn, 0, 5);
3065     unsigned ra = extract32(insn, 21, 5);
3066     return do_fop_ded(ctx, rt, ra, di->f.ded);
3067 }
3068 
3069 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3070                                       const DisasInsn *di)
3071 {
3072     unsigned rt = extract32(insn, 0, 5);
3073     unsigned ra = extract32(insn, 21, 5);
3074     return do_fop_wed(ctx, rt, ra, di->f.wed);
3075 }
3076 
3077 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3078                                       const DisasInsn *di)
3079 {
3080     unsigned rt = assemble_rt64(insn);
3081     unsigned ra = extract32(insn, 21, 5);
3082     return do_fop_wed(ctx, rt, ra, di->f.wed);
3083 }
3084 
3085 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3086                                       const DisasInsn *di)
3087 {
3088     unsigned rt = extract32(insn, 0, 5);
3089     unsigned ra = extract32(insn, 21, 5);
3090     return do_fop_dew(ctx, rt, ra, di->f.dew);
3091 }
3092 
3093 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3094                                       const DisasInsn *di)
3095 {
3096     unsigned rt = extract32(insn, 0, 5);
3097     unsigned ra = assemble_ra64(insn);
3098     return do_fop_dew(ctx, rt, ra, di->f.dew);
3099 }
3100 
3101 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3102                                        const DisasInsn *di)
3103 {
3104     unsigned rt = extract32(insn, 0, 5);
3105     unsigned rb = extract32(insn, 16, 5);
3106     unsigned ra = extract32(insn, 21, 5);
3107     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3108 }
3109 
3110 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3111                                        const DisasInsn *di)
3112 {
3113     unsigned rt = assemble_rt64(insn);
3114     unsigned rb = assemble_rb64(insn);
3115     unsigned ra = assemble_ra64(insn);
3116     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3117 }
3118 
3119 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3120                                     const DisasInsn *di)
3121 {
3122     unsigned rt = extract32(insn, 0, 5);
3123     unsigned rb = extract32(insn, 16, 5);
3124     unsigned ra = extract32(insn, 21, 5);
3125     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3126 }
3127 
3128 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3129 {
3130     tcg_gen_mov_i32(dst, src);
3131 }
3132 
3133 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3134 {
3135     tcg_gen_mov_i64(dst, src);
3136 }
3137 
3138 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3139 {
3140     tcg_gen_andi_i32(dst, src, INT32_MAX);
3141 }
3142 
3143 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3144 {
3145     tcg_gen_andi_i64(dst, src, INT64_MAX);
3146 }
3147 
3148 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3149 {
3150     tcg_gen_xori_i32(dst, src, INT32_MIN);
3151 }
3152 
3153 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3154 {
3155     tcg_gen_xori_i64(dst, src, INT64_MIN);
3156 }
3157 
3158 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3159 {
3160     tcg_gen_ori_i32(dst, src, INT32_MIN);
3161 }
3162 
3163 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3164 {
3165     tcg_gen_ori_i64(dst, src, INT64_MIN);
3166 }
3167 
3168 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3169                                unsigned y, unsigned c)
3170 {
3171     TCGv_i32 ta, tb, tc, ty;
3172 
3173     nullify_over(ctx);
3174 
3175     ta = load_frw0_i32(ra);
3176     tb = load_frw0_i32(rb);
3177     ty = tcg_const_i32(y);
3178     tc = tcg_const_i32(c);
3179 
3180     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3181 
3182     tcg_temp_free_i32(ta);
3183     tcg_temp_free_i32(tb);
3184     tcg_temp_free_i32(ty);
3185     tcg_temp_free_i32(tc);
3186 
3187     return nullify_end(ctx, DISAS_NEXT);
3188 }
3189 
3190 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3191                                      const DisasInsn *di)
3192 {
3193     unsigned c = extract32(insn, 0, 5);
3194     unsigned y = extract32(insn, 13, 3);
3195     unsigned rb = extract32(insn, 16, 5);
3196     unsigned ra = extract32(insn, 21, 5);
3197     return do_fcmp_s(ctx, ra, rb, y, c);
3198 }
3199 
3200 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3201                                      const DisasInsn *di)
3202 {
3203     unsigned c = extract32(insn, 0, 5);
3204     unsigned y = extract32(insn, 13, 3);
3205     unsigned rb = assemble_rb64(insn);
3206     unsigned ra = assemble_ra64(insn);
3207     return do_fcmp_s(ctx, ra, rb, y, c);
3208 }
3209 
3210 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3211                                   const DisasInsn *di)
3212 {
3213     unsigned c = extract32(insn, 0, 5);
3214     unsigned y = extract32(insn, 13, 3);
3215     unsigned rb = extract32(insn, 16, 5);
3216     unsigned ra = extract32(insn, 21, 5);
3217     TCGv_i64 ta, tb;
3218     TCGv_i32 tc, ty;
3219 
3220     nullify_over(ctx);
3221 
3222     ta = load_frd0(ra);
3223     tb = load_frd0(rb);
3224     ty = tcg_const_i32(y);
3225     tc = tcg_const_i32(c);
3226 
3227     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3228 
3229     tcg_temp_free_i64(ta);
3230     tcg_temp_free_i64(tb);
3231     tcg_temp_free_i32(ty);
3232     tcg_temp_free_i32(tc);
3233 
3234     return nullify_end(ctx, DISAS_NEXT);
3235 }
3236 
3237 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3238                                    const DisasInsn *di)
3239 {
3240     unsigned y = extract32(insn, 13, 3);
3241     unsigned cbit = (y ^ 1) - 1;
3242     TCGv t;
3243 
3244     nullify_over(ctx);
3245 
3246     t = tcg_temp_new();
3247     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3248     tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3249     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3250     tcg_temp_free(t);
3251 
3252     return nullify_end(ctx, DISAS_NEXT);
3253 }
3254 
3255 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3256                                    const DisasInsn *di)
3257 {
3258     unsigned c = extract32(insn, 0, 5);
3259     int mask;
3260     bool inv = false;
3261     TCGv t;
3262 
3263     nullify_over(ctx);
3264 
3265     t = tcg_temp_new();
3266     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3267 
3268     switch (c) {
3269     case 0: /* simple */
3270         tcg_gen_andi_tl(t, t, 0x4000000);
3271         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3272         goto done;
3273     case 2: /* rej */
3274         inv = true;
3275         /* fallthru */
3276     case 1: /* acc */
3277         mask = 0x43ff800;
3278         break;
3279     case 6: /* rej8 */
3280         inv = true;
3281         /* fallthru */
3282     case 5: /* acc8 */
3283         mask = 0x43f8000;
3284         break;
3285     case 9: /* acc6 */
3286         mask = 0x43e0000;
3287         break;
3288     case 13: /* acc4 */
3289         mask = 0x4380000;
3290         break;
3291     case 17: /* acc2 */
3292         mask = 0x4200000;
3293         break;
3294     default:
3295         return gen_illegal(ctx);
3296     }
3297     if (inv) {
3298         TCGv c = load_const(ctx, mask);
3299         tcg_gen_or_tl(t, t, c);
3300         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3301     } else {
3302         tcg_gen_andi_tl(t, t, mask);
3303         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3304     }
3305  done:
3306     return nullify_end(ctx, DISAS_NEXT);
3307 }
3308 
3309 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3310                                  const DisasInsn *di)
3311 {
3312     unsigned rt = extract32(insn, 0, 5);
3313     unsigned rb = assemble_rb64(insn);
3314     unsigned ra = assemble_ra64(insn);
3315     TCGv_i64 a, b;
3316 
3317     nullify_over(ctx);
3318 
3319     a = load_frw0_i64(ra);
3320     b = load_frw0_i64(rb);
3321     tcg_gen_mul_i64(a, a, b);
3322     save_frd(rt, a);
3323     tcg_temp_free_i64(a);
3324     tcg_temp_free_i64(b);
3325 
3326     return nullify_end(ctx, DISAS_NEXT);
3327 }
3328 
3329 #define FOP_DED  trans_fop_ded, .f.ded
3330 #define FOP_DEDD trans_fop_dedd, .f.dedd
3331 
3332 #define FOP_WEW  trans_fop_wew_0c, .f.wew
3333 #define FOP_DEW  trans_fop_dew_0c, .f.dew
3334 #define FOP_WED  trans_fop_wed_0c, .f.wed
3335 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3336 
3337 static const DisasInsn table_float_0c[] = {
3338     /* floating point class zero */
3339     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3340     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3341     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3342     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3343     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3344     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3345 
3346     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3347     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3348     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3349     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3350     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3351     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3352 
3353     /* floating point class three */
3354     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3355     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3356     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3357     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3358 
3359     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3360     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3361     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3362     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3363 
3364     /* floating point class one */
3365     /* float/float */
3366     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3367     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3368     /* int/float */
3369     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3370     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3371     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3372     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3373     /* float/int */
3374     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3375     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3376     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3377     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3378     /* float/int truncate */
3379     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3380     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3381     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3382     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3383     /* uint/float */
3384     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3385     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3386     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3387     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3388     /* float/uint */
3389     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3390     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3391     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3392     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3393     /* float/uint truncate */
3394     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3395     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3396     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3397     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3398 
3399     /* floating point class two */
3400     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3401     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3402     { 0x30002420, 0xffffffe0, trans_ftest_q },
3403     { 0x30000420, 0xffff1fff, trans_ftest_t },
3404 
3405     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3406        This is machine/revision == 0, which is reserved for simulator.  */
3407     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3408 };
3409 
3410 #undef FOP_WEW
3411 #undef FOP_DEW
3412 #undef FOP_WED
3413 #undef FOP_WEWW
3414 #define FOP_WEW  trans_fop_wew_0e, .f.wew
3415 #define FOP_DEW  trans_fop_dew_0e, .f.dew
3416 #define FOP_WED  trans_fop_wed_0e, .f.wed
3417 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3418 
3419 static const DisasInsn table_float_0e[] = {
3420     /* floating point class zero */
3421     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3422     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3423     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3424     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3425     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3426     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3427 
3428     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3429     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3430     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3431     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3432     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3433     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3434 
3435     /* floating point class three */
3436     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3437     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3438     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3439     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3440 
3441     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3442     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3443     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3444     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3445 
3446     { 0x38004700, 0xfc00ef60, trans_xmpyu },
3447 
3448     /* floating point class one */
3449     /* float/float */
3450     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3451     { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3452     /* int/float */
3453     { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3454     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3455     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3456     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3457     /* float/int */
3458     { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3459     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3460     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3461     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3462     /* float/int truncate */
3463     { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3464     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3465     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3466     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3467     /* uint/float */
3468     { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3469     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3470     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3471     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3472     /* float/uint */
3473     { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3474     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3475     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3476     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3477     /* float/uint truncate */
3478     { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3479     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3480     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3481     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3482 
3483     /* floating point class two */
3484     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3485     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3486 };
3487 
3488 #undef FOP_WEW
3489 #undef FOP_DEW
3490 #undef FOP_WED
3491 #undef FOP_WEWW
3492 #undef FOP_DED
3493 #undef FOP_DEDD
3494 
3495 /* Convert the fmpyadd single-precision register encodings to standard.  */
3496 static inline int fmpyadd_s_reg(unsigned r)
3497 {
3498     return (r & 16) * 2 + 16 + (r & 15);
3499 }
3500 
3501 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3502                                    uint32_t insn, bool is_sub)
3503 {
3504     unsigned tm = extract32(insn, 0, 5);
3505     unsigned f = extract32(insn, 5, 1);
3506     unsigned ra = extract32(insn, 6, 5);
3507     unsigned ta = extract32(insn, 11, 5);
3508     unsigned rm2 = extract32(insn, 16, 5);
3509     unsigned rm1 = extract32(insn, 21, 5);
3510 
3511     nullify_over(ctx);
3512 
3513     /* Independent multiply & add/sub, with undefined behaviour
3514        if outputs overlap inputs.  */
3515     if (f == 0) {
3516         tm = fmpyadd_s_reg(tm);
3517         ra = fmpyadd_s_reg(ra);
3518         ta = fmpyadd_s_reg(ta);
3519         rm2 = fmpyadd_s_reg(rm2);
3520         rm1 = fmpyadd_s_reg(rm1);
3521         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3522         do_fop_weww(ctx, ta, ta, ra,
3523                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3524     } else {
3525         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3526         do_fop_dedd(ctx, ta, ta, ra,
3527                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3528     }
3529 
3530     return nullify_end(ctx, DISAS_NEXT);
3531 }
3532 
3533 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3534                                       const DisasInsn *di)
3535 {
3536     unsigned rt = assemble_rt64(insn);
3537     unsigned neg = extract32(insn, 5, 1);
3538     unsigned rm1 = assemble_ra64(insn);
3539     unsigned rm2 = assemble_rb64(insn);
3540     unsigned ra3 = assemble_rc64(insn);
3541     TCGv_i32 a, b, c;
3542 
3543     nullify_over(ctx);
3544     a = load_frw0_i32(rm1);
3545     b = load_frw0_i32(rm2);
3546     c = load_frw0_i32(ra3);
3547 
3548     if (neg) {
3549         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3550     } else {
3551         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3552     }
3553 
3554     tcg_temp_free_i32(b);
3555     tcg_temp_free_i32(c);
3556     save_frw_i32(rt, a);
3557     tcg_temp_free_i32(a);
3558     return nullify_end(ctx, DISAS_NEXT);
3559 }
3560 
3561 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3562                                       const DisasInsn *di)
3563 {
3564     unsigned rt = extract32(insn, 0, 5);
3565     unsigned neg = extract32(insn, 5, 1);
3566     unsigned rm1 = extract32(insn, 21, 5);
3567     unsigned rm2 = extract32(insn, 16, 5);
3568     unsigned ra3 = assemble_rc64(insn);
3569     TCGv_i64 a, b, c;
3570 
3571     nullify_over(ctx);
3572     a = load_frd0(rm1);
3573     b = load_frd0(rm2);
3574     c = load_frd0(ra3);
3575 
3576     if (neg) {
3577         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3578     } else {
3579         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3580     }
3581 
3582     tcg_temp_free_i64(b);
3583     tcg_temp_free_i64(c);
3584     save_frd(rt, a);
3585     tcg_temp_free_i64(a);
3586     return nullify_end(ctx, DISAS_NEXT);
3587 }
3588 
3589 static const DisasInsn table_fp_fused[] = {
3590     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3591     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3592 };
3593 
3594 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3595                                          const DisasInsn table[], size_t n)
3596 {
3597     size_t i;
3598     for (i = 0; i < n; ++i) {
3599         if ((insn & table[i].mask) == table[i].insn) {
3600             return table[i].trans(ctx, insn, &table[i]);
3601         }
3602     }
3603     return gen_illegal(ctx);
3604 }
3605 
3606 #define translate_table(ctx, insn, table) \
3607     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3608 
3609 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3610 {
3611     uint32_t opc = extract32(insn, 26, 6);
3612 
3613     switch (opc) {
3614     case 0x00: /* system op */
3615         return translate_table(ctx, insn, table_system);
3616     case 0x01:
3617         return translate_table(ctx, insn, table_mem_mgmt);
3618     case 0x02:
3619         return translate_table(ctx, insn, table_arith_log);
3620     case 0x03:
3621         return translate_table(ctx, insn, table_index_mem);
3622     case 0x06:
3623         return trans_fmpyadd(ctx, insn, false);
3624     case 0x08:
3625         return trans_ldil(ctx, insn);
3626     case 0x09:
3627         return trans_copr_w(ctx, insn);
3628     case 0x0A:
3629         return trans_addil(ctx, insn);
3630     case 0x0B:
3631         return trans_copr_dw(ctx, insn);
3632     case 0x0C:
3633         return translate_table(ctx, insn, table_float_0c);
3634     case 0x0D:
3635         return trans_ldo(ctx, insn);
3636     case 0x0E:
3637         return translate_table(ctx, insn, table_float_0e);
3638 
3639     case 0x10:
3640         return trans_load(ctx, insn, false, MO_UB);
3641     case 0x11:
3642         return trans_load(ctx, insn, false, MO_TEUW);
3643     case 0x12:
3644         return trans_load(ctx, insn, false, MO_TEUL);
3645     case 0x13:
3646         return trans_load(ctx, insn, true, MO_TEUL);
3647     case 0x16:
3648         return trans_fload_mod(ctx, insn);
3649     case 0x17:
3650         return trans_load_w(ctx, insn);
3651     case 0x18:
3652         return trans_store(ctx, insn, false, MO_UB);
3653     case 0x19:
3654         return trans_store(ctx, insn, false, MO_TEUW);
3655     case 0x1A:
3656         return trans_store(ctx, insn, false, MO_TEUL);
3657     case 0x1B:
3658         return trans_store(ctx, insn, true, MO_TEUL);
3659     case 0x1E:
3660         return trans_fstore_mod(ctx, insn);
3661     case 0x1F:
3662         return trans_store_w(ctx, insn);
3663 
3664     case 0x20:
3665         return trans_cmpb(ctx, insn, true, false, false);
3666     case 0x21:
3667         return trans_cmpb(ctx, insn, true, true, false);
3668     case 0x22:
3669         return trans_cmpb(ctx, insn, false, false, false);
3670     case 0x23:
3671         return trans_cmpb(ctx, insn, false, true, false);
3672     case 0x24:
3673         return trans_cmpiclr(ctx, insn);
3674     case 0x25:
3675         return trans_subi(ctx, insn);
3676     case 0x26:
3677         return trans_fmpyadd(ctx, insn, true);
3678     case 0x27:
3679         return trans_cmpb(ctx, insn, true, false, true);
3680     case 0x28:
3681         return trans_addb(ctx, insn, true, false);
3682     case 0x29:
3683         return trans_addb(ctx, insn, true, true);
3684     case 0x2A:
3685         return trans_addb(ctx, insn, false, false);
3686     case 0x2B:
3687         return trans_addb(ctx, insn, false, true);
3688     case 0x2C:
3689     case 0x2D:
3690         return trans_addi(ctx, insn);
3691     case 0x2E:
3692         return translate_table(ctx, insn, table_fp_fused);
3693     case 0x2F:
3694         return trans_cmpb(ctx, insn, false, false, true);
3695 
3696     case 0x30:
3697     case 0x31:
3698         return trans_bb(ctx, insn);
3699     case 0x32:
3700         return trans_movb(ctx, insn, false);
3701     case 0x33:
3702         return trans_movb(ctx, insn, true);
3703     case 0x34:
3704         return translate_table(ctx, insn, table_sh_ex);
3705     case 0x35:
3706         return translate_table(ctx, insn, table_depw);
3707     case 0x38:
3708         return trans_be(ctx, insn, false);
3709     case 0x39:
3710         return trans_be(ctx, insn, true);
3711     case 0x3A:
3712         return translate_table(ctx, insn, table_branch);
3713 
3714     case 0x04: /* spopn */
3715     case 0x05: /* diag */
3716     case 0x0F: /* product specific */
3717         break;
3718 
3719     case 0x07: /* unassigned */
3720     case 0x15: /* unassigned */
3721     case 0x1D: /* unassigned */
3722     case 0x37: /* unassigned */
3723     case 0x3F: /* unassigned */
3724     default:
3725         break;
3726     }
3727     return gen_illegal(ctx);
3728 }
3729 
3730 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3731                                       CPUState *cs, int max_insns)
3732 {
3733     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3734     TranslationBlock *tb = ctx->base.tb;
3735     int i, bound;
3736 
3737     ctx->cs = cs;
3738     ctx->iaoq_f = tb->pc;
3739     ctx->iaoq_b = tb->cs_base;
3740     ctx->iaoq_n = -1;
3741     TCGV_UNUSED(ctx->iaoq_n_var);
3742 
3743     ctx->ntemps = 0;
3744     for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3745         TCGV_UNUSED(ctx->temps[i]);
3746     }
3747 
3748     bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3749     return MIN(max_insns, bound);
3750 }
3751 
3752 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3753 {
3754     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3755 
3756     /* Seed the nullification status from PSW[N], as shown in TB->FLAGS.  */
3757     ctx->null_cond = cond_make_f();
3758     ctx->psw_n_nonzero = false;
3759     if (ctx->base.tb->flags & 1) {
3760         ctx->null_cond.c = TCG_COND_ALWAYS;
3761         ctx->psw_n_nonzero = true;
3762     }
3763     ctx->null_lab = NULL;
3764 }
3765 
3766 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3767 {
3768     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3769 
3770     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3771 }
3772 
3773 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3774                                       const CPUBreakpoint *bp)
3775 {
3776     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3777 
3778     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3779     ctx->base.pc_next = ctx->iaoq_f + 4;
3780     return true;
3781 }
3782 
3783 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3784 {
3785     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3786     CPUHPPAState *env = cs->env_ptr;
3787     DisasJumpType ret;
3788     int i, n;
3789 
3790     /* Execute one insn.  */
3791     if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3792         ret = do_page_zero(ctx);
3793         assert(ret != DISAS_NEXT);
3794     } else {
3795         /* Always fetch the insn, even if nullified, so that we check
3796            the page permissions for execute.  */
3797         uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3798 
3799         /* Set up the IA queue for the next insn.
3800            This will be overwritten by a branch.  */
3801         if (ctx->iaoq_b == -1) {
3802             ctx->iaoq_n = -1;
3803             ctx->iaoq_n_var = get_temp(ctx);
3804             tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3805         } else {
3806             ctx->iaoq_n = ctx->iaoq_b + 4;
3807             TCGV_UNUSED(ctx->iaoq_n_var);
3808         }
3809 
3810         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3811             ctx->null_cond.c = TCG_COND_NEVER;
3812             ret = DISAS_NEXT;
3813         } else {
3814             ret = translate_one(ctx, insn);
3815             assert(ctx->null_lab == NULL);
3816         }
3817     }
3818 
3819     /* Free any temporaries allocated.  */
3820     for (i = 0, n = ctx->ntemps; i < n; ++i) {
3821         tcg_temp_free(ctx->temps[i]);
3822         TCGV_UNUSED(ctx->temps[i]);
3823     }
3824     ctx->ntemps = 0;
3825 
3826     /* Advance the insn queue.  */
3827     /* ??? The non-linear instruction restriction is purely due to
3828        the debugging dump.  Otherwise we *could* follow unconditional
3829        branches within the same page.  */
3830     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3831         if (ctx->null_cond.c == TCG_COND_NEVER
3832             || ctx->null_cond.c == TCG_COND_ALWAYS) {
3833             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3834             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3835             ret = DISAS_NORETURN;
3836         } else {
3837             ret = DISAS_IAQ_N_STALE;
3838        }
3839     }
3840     ctx->iaoq_f = ctx->iaoq_b;
3841     ctx->iaoq_b = ctx->iaoq_n;
3842     ctx->base.is_jmp = ret;
3843 
3844     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3845         return;
3846     }
3847     if (ctx->iaoq_f == -1) {
3848         tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3849         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3850         nullify_save(ctx);
3851         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3852     } else if (ctx->iaoq_b == -1) {
3853         tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3854     }
3855 }
3856 
3857 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3858 {
3859     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3860 
3861     switch (ctx->base.is_jmp) {
3862     case DISAS_NORETURN:
3863         break;
3864     case DISAS_TOO_MANY:
3865     case DISAS_IAQ_N_STALE:
3866         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3867         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3868         nullify_save(ctx);
3869         /* FALLTHRU */
3870     case DISAS_IAQ_N_UPDATED:
3871         if (ctx->base.singlestep_enabled) {
3872             gen_excp_1(EXCP_DEBUG);
3873         } else {
3874             tcg_gen_lookup_and_goto_ptr();
3875         }
3876         break;
3877     default:
3878         g_assert_not_reached();
3879     }
3880 
3881     /* We don't actually use this during normal translation,
3882        but we should interact with the generic main loop.  */
3883     ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3884 }
3885 
3886 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3887 {
3888     TranslationBlock *tb = dcbase->tb;
3889 
3890     switch (tb->pc) {
3891     case 0x00:
3892         qemu_log("IN:\n0x00000000:  (null)\n");
3893         break;
3894     case 0xb0:
3895         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
3896         break;
3897     case 0xe0:
3898         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
3899         break;
3900     case 0x100:
3901         qemu_log("IN:\n0x00000100:  syscall\n");
3902         break;
3903     default:
3904         qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3905         log_target_disas(cs, tb->pc, tb->size, 1);
3906         break;
3907     }
3908 }
3909 
3910 static const TranslatorOps hppa_tr_ops = {
3911     .init_disas_context = hppa_tr_init_disas_context,
3912     .tb_start           = hppa_tr_tb_start,
3913     .insn_start         = hppa_tr_insn_start,
3914     .breakpoint_check   = hppa_tr_breakpoint_check,
3915     .translate_insn     = hppa_tr_translate_insn,
3916     .tb_stop            = hppa_tr_tb_stop,
3917     .disas_log          = hppa_tr_disas_log,
3918 };
3919 
3920 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3921 
3922 {
3923     DisasContext ctx;
3924     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3925 }
3926 
3927 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3928                           target_ulong *data)
3929 {
3930     env->iaoq_f = data[0];
3931     if (data[1] != -1) {
3932         env->iaoq_b = data[1];
3933     }
3934     /* Since we were executing the instruction at IAOQ_F, and took some
3935        sort of action that provoked the cpu_restore_state, we can infer
3936        that the instruction was not nullified.  */
3937     env->psw_n = 0;
3938 }
3939