xref: /openbmc/qemu/target/hppa/translate.c (revision 406d2aa2)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 typedef struct DisasCond {
34     TCGCond c;
35     TCGv a0, a1;
36     bool a0_is_n;
37     bool a1_is_0;
38 } DisasCond;
39 
40 typedef struct DisasContext {
41     DisasContextBase base;
42     CPUState *cs;
43 
44     target_ulong iaoq_f;
45     target_ulong iaoq_b;
46     target_ulong iaoq_n;
47     TCGv iaoq_n_var;
48 
49     int ntemps;
50     TCGv temps[8];
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     bool psw_n_nonzero;
56 } DisasContext;
57 
58 /* Target-specific return values from translate_one, indicating the
59    state of the TB.  Note that DISAS_NEXT indicates that we are not
60    exiting the TB.  */
61 
62 /* We are not using a goto_tb (for whatever reason), but have updated
63    the iaq (for whatever reason), so don't do it again on exit.  */
64 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
65 
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67    updated the iaq for the next instruction to be executed.  */
68 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
69 
70 typedef struct DisasInsn {
71     uint32_t insn, mask;
72     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73                            const struct DisasInsn *f);
74     union {
75         void (*ttt)(TCGv, TCGv, TCGv);
76         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
82     } f;
83 } DisasInsn;
84 
85 /* global register indexes */
86 static TCGv cpu_gr[32];
87 static TCGv cpu_iaoq_f;
88 static TCGv cpu_iaoq_b;
89 static TCGv cpu_sar;
90 static TCGv cpu_psw_n;
91 static TCGv cpu_psw_v;
92 static TCGv cpu_psw_cb;
93 static TCGv cpu_psw_cb_msb;
94 static TCGv cpu_cr26;
95 static TCGv cpu_cr27;
96 
97 #include "exec/gen-icount.h"
98 
99 void hppa_translate_init(void)
100 {
101 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
102 
103     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
104     static const GlobalVar vars[] = {
105         DEF_VAR(sar),
106         DEF_VAR(cr26),
107         DEF_VAR(cr27),
108         DEF_VAR(psw_n),
109         DEF_VAR(psw_v),
110         DEF_VAR(psw_cb),
111         DEF_VAR(psw_cb_msb),
112         DEF_VAR(iaoq_f),
113         DEF_VAR(iaoq_b),
114     };
115 
116 #undef DEF_VAR
117 
118     /* Use the symbolic register names that match the disassembler.  */
119     static const char gr_names[32][4] = {
120         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
121         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
122         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
123         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
124     };
125 
126     int i;
127 
128     cpu_gr[0] = NULL;
129     for (i = 1; i < 32; i++) {
130         cpu_gr[i] = tcg_global_mem_new(cpu_env,
131                                        offsetof(CPUHPPAState, gr[i]),
132                                        gr_names[i]);
133     }
134 
135     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
136         const GlobalVar *v = &vars[i];
137         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
138     }
139 }
140 
141 static DisasCond cond_make_f(void)
142 {
143     return (DisasCond){
144         .c = TCG_COND_NEVER,
145         .a0 = NULL,
146         .a1 = NULL,
147     };
148 }
149 
150 static DisasCond cond_make_n(void)
151 {
152     return (DisasCond){
153         .c = TCG_COND_NE,
154         .a0 = cpu_psw_n,
155         .a0_is_n = true,
156         .a1 = NULL,
157         .a1_is_0 = true
158     };
159 }
160 
161 static DisasCond cond_make_0(TCGCond c, TCGv a0)
162 {
163     DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
164 
165     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
166     r.a0 = tcg_temp_new();
167     tcg_gen_mov_tl(r.a0, a0);
168 
169     return r;
170 }
171 
172 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
173 {
174     DisasCond r = { .c = c };
175 
176     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
177     r.a0 = tcg_temp_new();
178     tcg_gen_mov_tl(r.a0, a0);
179     r.a1 = tcg_temp_new();
180     tcg_gen_mov_tl(r.a1, a1);
181 
182     return r;
183 }
184 
185 static void cond_prep(DisasCond *cond)
186 {
187     if (cond->a1_is_0) {
188         cond->a1_is_0 = false;
189         cond->a1 = tcg_const_tl(0);
190     }
191 }
192 
193 static void cond_free(DisasCond *cond)
194 {
195     switch (cond->c) {
196     default:
197         if (!cond->a0_is_n) {
198             tcg_temp_free(cond->a0);
199         }
200         if (!cond->a1_is_0) {
201             tcg_temp_free(cond->a1);
202         }
203         cond->a0_is_n = false;
204         cond->a1_is_0 = false;
205         cond->a0 = NULL;
206         cond->a1 = NULL;
207         /* fallthru */
208     case TCG_COND_ALWAYS:
209         cond->c = TCG_COND_NEVER;
210         break;
211     case TCG_COND_NEVER:
212         break;
213     }
214 }
215 
216 static TCGv get_temp(DisasContext *ctx)
217 {
218     unsigned i = ctx->ntemps++;
219     g_assert(i < ARRAY_SIZE(ctx->temps));
220     return ctx->temps[i] = tcg_temp_new();
221 }
222 
223 static TCGv load_const(DisasContext *ctx, target_long v)
224 {
225     TCGv t = get_temp(ctx);
226     tcg_gen_movi_tl(t, v);
227     return t;
228 }
229 
230 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
231 {
232     if (reg == 0) {
233         TCGv t = get_temp(ctx);
234         tcg_gen_movi_tl(t, 0);
235         return t;
236     } else {
237         return cpu_gr[reg];
238     }
239 }
240 
241 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
242 {
243     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
244         return get_temp(ctx);
245     } else {
246         return cpu_gr[reg];
247     }
248 }
249 
250 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
251 {
252     if (ctx->null_cond.c != TCG_COND_NEVER) {
253         cond_prep(&ctx->null_cond);
254         tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
255                            ctx->null_cond.a1, dest, t);
256     } else {
257         tcg_gen_mov_tl(dest, t);
258     }
259 }
260 
261 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
262 {
263     if (reg != 0) {
264         save_or_nullify(ctx, cpu_gr[reg], t);
265     }
266 }
267 
268 #ifdef HOST_WORDS_BIGENDIAN
269 # define HI_OFS  0
270 # define LO_OFS  4
271 #else
272 # define HI_OFS  4
273 # define LO_OFS  0
274 #endif
275 
276 static TCGv_i32 load_frw_i32(unsigned rt)
277 {
278     TCGv_i32 ret = tcg_temp_new_i32();
279     tcg_gen_ld_i32(ret, cpu_env,
280                    offsetof(CPUHPPAState, fr[rt & 31])
281                    + (rt & 32 ? LO_OFS : HI_OFS));
282     return ret;
283 }
284 
285 static TCGv_i32 load_frw0_i32(unsigned rt)
286 {
287     if (rt == 0) {
288         return tcg_const_i32(0);
289     } else {
290         return load_frw_i32(rt);
291     }
292 }
293 
294 static TCGv_i64 load_frw0_i64(unsigned rt)
295 {
296     if (rt == 0) {
297         return tcg_const_i64(0);
298     } else {
299         TCGv_i64 ret = tcg_temp_new_i64();
300         tcg_gen_ld32u_i64(ret, cpu_env,
301                           offsetof(CPUHPPAState, fr[rt & 31])
302                           + (rt & 32 ? LO_OFS : HI_OFS));
303         return ret;
304     }
305 }
306 
307 static void save_frw_i32(unsigned rt, TCGv_i32 val)
308 {
309     tcg_gen_st_i32(val, cpu_env,
310                    offsetof(CPUHPPAState, fr[rt & 31])
311                    + (rt & 32 ? LO_OFS : HI_OFS));
312 }
313 
314 #undef HI_OFS
315 #undef LO_OFS
316 
317 static TCGv_i64 load_frd(unsigned rt)
318 {
319     TCGv_i64 ret = tcg_temp_new_i64();
320     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
321     return ret;
322 }
323 
324 static TCGv_i64 load_frd0(unsigned rt)
325 {
326     if (rt == 0) {
327         return tcg_const_i64(0);
328     } else {
329         return load_frd(rt);
330     }
331 }
332 
333 static void save_frd(unsigned rt, TCGv_i64 val)
334 {
335     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
336 }
337 
338 /* Skip over the implementation of an insn that has been nullified.
339    Use this when the insn is too complex for a conditional move.  */
340 static void nullify_over(DisasContext *ctx)
341 {
342     if (ctx->null_cond.c != TCG_COND_NEVER) {
343         /* The always condition should have been handled in the main loop.  */
344         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
345 
346         ctx->null_lab = gen_new_label();
347         cond_prep(&ctx->null_cond);
348 
349         /* If we're using PSW[N], copy it to a temp because... */
350         if (ctx->null_cond.a0_is_n) {
351             ctx->null_cond.a0_is_n = false;
352             ctx->null_cond.a0 = tcg_temp_new();
353             tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
354         }
355         /* ... we clear it before branching over the implementation,
356            so that (1) it's clear after nullifying this insn and
357            (2) if this insn nullifies the next, PSW[N] is valid.  */
358         if (ctx->psw_n_nonzero) {
359             ctx->psw_n_nonzero = false;
360             tcg_gen_movi_tl(cpu_psw_n, 0);
361         }
362 
363         tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
364                           ctx->null_cond.a1, ctx->null_lab);
365         cond_free(&ctx->null_cond);
366     }
367 }
368 
369 /* Save the current nullification state to PSW[N].  */
370 static void nullify_save(DisasContext *ctx)
371 {
372     if (ctx->null_cond.c == TCG_COND_NEVER) {
373         if (ctx->psw_n_nonzero) {
374             tcg_gen_movi_tl(cpu_psw_n, 0);
375         }
376         return;
377     }
378     if (!ctx->null_cond.a0_is_n) {
379         cond_prep(&ctx->null_cond);
380         tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
381                            ctx->null_cond.a0, ctx->null_cond.a1);
382         ctx->psw_n_nonzero = true;
383     }
384     cond_free(&ctx->null_cond);
385 }
386 
387 /* Set a PSW[N] to X.  The intention is that this is used immediately
388    before a goto_tb/exit_tb, so that there is no fallthru path to other
389    code within the TB.  Therefore we do not update psw_n_nonzero.  */
390 static void nullify_set(DisasContext *ctx, bool x)
391 {
392     if (ctx->psw_n_nonzero || x) {
393         tcg_gen_movi_tl(cpu_psw_n, x);
394     }
395 }
396 
397 /* Mark the end of an instruction that may have been nullified.
398    This is the pair to nullify_over.  */
399 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
400 {
401     TCGLabel *null_lab = ctx->null_lab;
402 
403     if (likely(null_lab == NULL)) {
404         /* The current insn wasn't conditional or handled the condition
405            applied to it without a branch, so the (new) setting of
406            NULL_COND can be applied directly to the next insn.  */
407         return status;
408     }
409     ctx->null_lab = NULL;
410 
411     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
412         /* The next instruction will be unconditional,
413            and NULL_COND already reflects that.  */
414         gen_set_label(null_lab);
415     } else {
416         /* The insn that we just executed is itself nullifying the next
417            instruction.  Store the condition in the PSW[N] global.
418            We asserted PSW[N] = 0 in nullify_over, so that after the
419            label we have the proper value in place.  */
420         nullify_save(ctx);
421         gen_set_label(null_lab);
422         ctx->null_cond = cond_make_n();
423     }
424 
425     assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
426     if (status == DISAS_NORETURN) {
427         status = DISAS_NEXT;
428     }
429     return status;
430 }
431 
432 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
433 {
434     if (unlikely(ival == -1)) {
435         tcg_gen_mov_tl(dest, vval);
436     } else {
437         tcg_gen_movi_tl(dest, ival);
438     }
439 }
440 
441 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
442 {
443     return ctx->iaoq_f + disp + 8;
444 }
445 
446 static void gen_excp_1(int exception)
447 {
448     TCGv_i32 t = tcg_const_i32(exception);
449     gen_helper_excp(cpu_env, t);
450     tcg_temp_free_i32(t);
451 }
452 
453 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
454 {
455     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
456     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
457     nullify_save(ctx);
458     gen_excp_1(exception);
459     return DISAS_NORETURN;
460 }
461 
462 static DisasJumpType gen_illegal(DisasContext *ctx)
463 {
464     nullify_over(ctx);
465     return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
466 }
467 
468 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
469 {
470     /* Suppress goto_tb in the case of single-steping and IO.  */
471     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
472         return false;
473     }
474     return true;
475 }
476 
477 /* If the next insn is to be nullified, and it's on the same page,
478    and we're not attempting to set a breakpoint on it, then we can
479    totally skip the nullified insn.  This avoids creating and
480    executing a TB that merely branches to the next TB.  */
481 static bool use_nullify_skip(DisasContext *ctx)
482 {
483     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
484             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
485 }
486 
487 static void gen_goto_tb(DisasContext *ctx, int which,
488                         target_ulong f, target_ulong b)
489 {
490     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
491         tcg_gen_goto_tb(which);
492         tcg_gen_movi_tl(cpu_iaoq_f, f);
493         tcg_gen_movi_tl(cpu_iaoq_b, b);
494         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
495     } else {
496         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
497         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
498         if (ctx->base.singlestep_enabled) {
499             gen_excp_1(EXCP_DEBUG);
500         } else {
501             tcg_gen_lookup_and_goto_ptr();
502         }
503     }
504 }
505 
506 /* PA has a habit of taking the LSB of a field and using that as the sign,
507    with the rest of the field becoming the least significant bits.  */
508 static target_long low_sextract(uint32_t val, int pos, int len)
509 {
510     target_ulong x = -(target_ulong)extract32(val, pos, 1);
511     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
512     return x;
513 }
514 
515 static unsigned assemble_rt64(uint32_t insn)
516 {
517     unsigned r1 = extract32(insn, 6, 1);
518     unsigned r0 = extract32(insn, 0, 5);
519     return r1 * 32 + r0;
520 }
521 
522 static unsigned assemble_ra64(uint32_t insn)
523 {
524     unsigned r1 = extract32(insn, 7, 1);
525     unsigned r0 = extract32(insn, 21, 5);
526     return r1 * 32 + r0;
527 }
528 
529 static unsigned assemble_rb64(uint32_t insn)
530 {
531     unsigned r1 = extract32(insn, 12, 1);
532     unsigned r0 = extract32(insn, 16, 5);
533     return r1 * 32 + r0;
534 }
535 
536 static unsigned assemble_rc64(uint32_t insn)
537 {
538     unsigned r2 = extract32(insn, 8, 1);
539     unsigned r1 = extract32(insn, 13, 3);
540     unsigned r0 = extract32(insn, 9, 2);
541     return r2 * 32 + r1 * 4 + r0;
542 }
543 
544 static target_long assemble_12(uint32_t insn)
545 {
546     target_ulong x = -(target_ulong)(insn & 1);
547     x = (x <<  1) | extract32(insn, 2, 1);
548     x = (x << 10) | extract32(insn, 3, 10);
549     return x;
550 }
551 
552 static target_long assemble_16(uint32_t insn)
553 {
554     /* Take the name from PA2.0, which produces a 16-bit number
555        only with wide mode; otherwise a 14-bit number.  Since we don't
556        implement wide mode, this is always the 14-bit number.  */
557     return low_sextract(insn, 0, 14);
558 }
559 
560 static target_long assemble_16a(uint32_t insn)
561 {
562     /* Take the name from PA2.0, which produces a 14-bit shifted number
563        only with wide mode; otherwise a 12-bit shifted number.  Since we
564        don't implement wide mode, this is always the 12-bit number.  */
565     target_ulong x = -(target_ulong)(insn & 1);
566     x = (x << 11) | extract32(insn, 2, 11);
567     return x << 2;
568 }
569 
570 static target_long assemble_17(uint32_t insn)
571 {
572     target_ulong x = -(target_ulong)(insn & 1);
573     x = (x <<  5) | extract32(insn, 16, 5);
574     x = (x <<  1) | extract32(insn, 2, 1);
575     x = (x << 10) | extract32(insn, 3, 10);
576     return x << 2;
577 }
578 
579 static target_long assemble_21(uint32_t insn)
580 {
581     target_ulong x = -(target_ulong)(insn & 1);
582     x = (x << 11) | extract32(insn, 1, 11);
583     x = (x <<  2) | extract32(insn, 14, 2);
584     x = (x <<  5) | extract32(insn, 16, 5);
585     x = (x <<  2) | extract32(insn, 12, 2);
586     return x << 11;
587 }
588 
589 static target_long assemble_22(uint32_t insn)
590 {
591     target_ulong x = -(target_ulong)(insn & 1);
592     x = (x << 10) | extract32(insn, 16, 10);
593     x = (x <<  1) | extract32(insn, 2, 1);
594     x = (x << 10) | extract32(insn, 3, 10);
595     return x << 2;
596 }
597 
598 /* The parisc documentation describes only the general interpretation of
599    the conditions, without describing their exact implementation.  The
600    interpretations do not stand up well when considering ADD,C and SUB,B.
601    However, considering the Addition, Subtraction and Logical conditions
602    as a whole it would appear that these relations are similar to what
603    a traditional NZCV set of flags would produce.  */
604 
605 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
606 {
607     DisasCond cond;
608     TCGv tmp;
609 
610     switch (cf >> 1) {
611     case 0: /* Never / TR */
612         cond = cond_make_f();
613         break;
614     case 1: /* = / <>        (Z / !Z) */
615         cond = cond_make_0(TCG_COND_EQ, res);
616         break;
617     case 2: /* < / >=        (N / !N) */
618         cond = cond_make_0(TCG_COND_LT, res);
619         break;
620     case 3: /* <= / >        (N | Z / !N & !Z) */
621         cond = cond_make_0(TCG_COND_LE, res);
622         break;
623     case 4: /* NUV / UV      (!C / C) */
624         cond = cond_make_0(TCG_COND_EQ, cb_msb);
625         break;
626     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
627         tmp = tcg_temp_new();
628         tcg_gen_neg_tl(tmp, cb_msb);
629         tcg_gen_and_tl(tmp, tmp, res);
630         cond = cond_make_0(TCG_COND_EQ, tmp);
631         tcg_temp_free(tmp);
632         break;
633     case 6: /* SV / NSV      (V / !V) */
634         cond = cond_make_0(TCG_COND_LT, sv);
635         break;
636     case 7: /* OD / EV */
637         tmp = tcg_temp_new();
638         tcg_gen_andi_tl(tmp, res, 1);
639         cond = cond_make_0(TCG_COND_NE, tmp);
640         tcg_temp_free(tmp);
641         break;
642     default:
643         g_assert_not_reached();
644     }
645     if (cf & 1) {
646         cond.c = tcg_invert_cond(cond.c);
647     }
648 
649     return cond;
650 }
651 
652 /* Similar, but for the special case of subtraction without borrow, we
653    can use the inputs directly.  This can allow other computation to be
654    deleted as unused.  */
655 
656 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
657 {
658     DisasCond cond;
659 
660     switch (cf >> 1) {
661     case 1: /* = / <> */
662         cond = cond_make(TCG_COND_EQ, in1, in2);
663         break;
664     case 2: /* < / >= */
665         cond = cond_make(TCG_COND_LT, in1, in2);
666         break;
667     case 3: /* <= / > */
668         cond = cond_make(TCG_COND_LE, in1, in2);
669         break;
670     case 4: /* << / >>= */
671         cond = cond_make(TCG_COND_LTU, in1, in2);
672         break;
673     case 5: /* <<= / >> */
674         cond = cond_make(TCG_COND_LEU, in1, in2);
675         break;
676     default:
677         return do_cond(cf, res, sv, sv);
678     }
679     if (cf & 1) {
680         cond.c = tcg_invert_cond(cond.c);
681     }
682 
683     return cond;
684 }
685 
686 /* Similar, but for logicals, where the carry and overflow bits are not
687    computed, and use of them is undefined.  */
688 
689 static DisasCond do_log_cond(unsigned cf, TCGv res)
690 {
691     switch (cf >> 1) {
692     case 4: case 5: case 6:
693         cf &= 1;
694         break;
695     }
696     return do_cond(cf, res, res, res);
697 }
698 
699 /* Similar, but for shift/extract/deposit conditions.  */
700 
701 static DisasCond do_sed_cond(unsigned orig, TCGv res)
702 {
703     unsigned c, f;
704 
705     /* Convert the compressed condition codes to standard.
706        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
707        4-7 are the reverse of 0-3.  */
708     c = orig & 3;
709     if (c == 3) {
710         c = 7;
711     }
712     f = (orig & 4) / 4;
713 
714     return do_log_cond(c * 2 + f, res);
715 }
716 
717 /* Similar, but for unit conditions.  */
718 
719 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
720 {
721     DisasCond cond;
722     TCGv tmp, cb = NULL;
723 
724     if (cf & 8) {
725         /* Since we want to test lots of carry-out bits all at once, do not
726          * do our normal thing and compute carry-in of bit B+1 since that
727          * leaves us with carry bits spread across two words.
728          */
729         cb = tcg_temp_new();
730         tmp = tcg_temp_new();
731         tcg_gen_or_tl(cb, in1, in2);
732         tcg_gen_and_tl(tmp, in1, in2);
733         tcg_gen_andc_tl(cb, cb, res);
734         tcg_gen_or_tl(cb, cb, tmp);
735         tcg_temp_free(tmp);
736     }
737 
738     switch (cf >> 1) {
739     case 0: /* never / TR */
740     case 1: /* undefined */
741     case 5: /* undefined */
742         cond = cond_make_f();
743         break;
744 
745     case 2: /* SBZ / NBZ */
746         /* See hasless(v,1) from
747          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
748          */
749         tmp = tcg_temp_new();
750         tcg_gen_subi_tl(tmp, res, 0x01010101u);
751         tcg_gen_andc_tl(tmp, tmp, res);
752         tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
753         cond = cond_make_0(TCG_COND_NE, tmp);
754         tcg_temp_free(tmp);
755         break;
756 
757     case 3: /* SHZ / NHZ */
758         tmp = tcg_temp_new();
759         tcg_gen_subi_tl(tmp, res, 0x00010001u);
760         tcg_gen_andc_tl(tmp, tmp, res);
761         tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
762         cond = cond_make_0(TCG_COND_NE, tmp);
763         tcg_temp_free(tmp);
764         break;
765 
766     case 4: /* SDC / NDC */
767         tcg_gen_andi_tl(cb, cb, 0x88888888u);
768         cond = cond_make_0(TCG_COND_NE, cb);
769         break;
770 
771     case 6: /* SBC / NBC */
772         tcg_gen_andi_tl(cb, cb, 0x80808080u);
773         cond = cond_make_0(TCG_COND_NE, cb);
774         break;
775 
776     case 7: /* SHC / NHC */
777         tcg_gen_andi_tl(cb, cb, 0x80008000u);
778         cond = cond_make_0(TCG_COND_NE, cb);
779         break;
780 
781     default:
782         g_assert_not_reached();
783     }
784     if (cf & 8) {
785         tcg_temp_free(cb);
786     }
787     if (cf & 1) {
788         cond.c = tcg_invert_cond(cond.c);
789     }
790 
791     return cond;
792 }
793 
794 /* Compute signed overflow for addition.  */
795 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
796 {
797     TCGv sv = get_temp(ctx);
798     TCGv tmp = tcg_temp_new();
799 
800     tcg_gen_xor_tl(sv, res, in1);
801     tcg_gen_xor_tl(tmp, in1, in2);
802     tcg_gen_andc_tl(sv, sv, tmp);
803     tcg_temp_free(tmp);
804 
805     return sv;
806 }
807 
808 /* Compute signed overflow for subtraction.  */
809 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
810 {
811     TCGv sv = get_temp(ctx);
812     TCGv tmp = tcg_temp_new();
813 
814     tcg_gen_xor_tl(sv, res, in1);
815     tcg_gen_xor_tl(tmp, in1, in2);
816     tcg_gen_and_tl(sv, sv, tmp);
817     tcg_temp_free(tmp);
818 
819     return sv;
820 }
821 
822 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
823                             unsigned shift, bool is_l, bool is_tsv, bool is_tc,
824                             bool is_c, unsigned cf)
825 {
826     TCGv dest, cb, cb_msb, sv, tmp;
827     unsigned c = cf >> 1;
828     DisasCond cond;
829 
830     dest = tcg_temp_new();
831     cb = NULL;
832     cb_msb = NULL;
833 
834     if (shift) {
835         tmp = get_temp(ctx);
836         tcg_gen_shli_tl(tmp, in1, shift);
837         in1 = tmp;
838     }
839 
840     if (!is_l || c == 4 || c == 5) {
841         TCGv zero = tcg_const_tl(0);
842         cb_msb = get_temp(ctx);
843         tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
844         if (is_c) {
845             tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
846         }
847         tcg_temp_free(zero);
848         if (!is_l) {
849             cb = get_temp(ctx);
850             tcg_gen_xor_tl(cb, in1, in2);
851             tcg_gen_xor_tl(cb, cb, dest);
852         }
853     } else {
854         tcg_gen_add_tl(dest, in1, in2);
855         if (is_c) {
856             tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
857         }
858     }
859 
860     /* Compute signed overflow if required.  */
861     sv = NULL;
862     if (is_tsv || c == 6) {
863         sv = do_add_sv(ctx, dest, in1, in2);
864         if (is_tsv) {
865             /* ??? Need to include overflow from shift.  */
866             gen_helper_tsv(cpu_env, sv);
867         }
868     }
869 
870     /* Emit any conditional trap before any writeback.  */
871     cond = do_cond(cf, dest, cb_msb, sv);
872     if (is_tc) {
873         cond_prep(&cond);
874         tmp = tcg_temp_new();
875         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
876         gen_helper_tcond(cpu_env, tmp);
877         tcg_temp_free(tmp);
878     }
879 
880     /* Write back the result.  */
881     if (!is_l) {
882         save_or_nullify(ctx, cpu_psw_cb, cb);
883         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
884     }
885     save_gpr(ctx, rt, dest);
886     tcg_temp_free(dest);
887 
888     /* Install the new nullification.  */
889     cond_free(&ctx->null_cond);
890     ctx->null_cond = cond;
891     return DISAS_NEXT;
892 }
893 
894 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
895                             bool is_tsv, bool is_b, bool is_tc, unsigned cf)
896 {
897     TCGv dest, sv, cb, cb_msb, zero, tmp;
898     unsigned c = cf >> 1;
899     DisasCond cond;
900 
901     dest = tcg_temp_new();
902     cb = tcg_temp_new();
903     cb_msb = tcg_temp_new();
904 
905     zero = tcg_const_tl(0);
906     if (is_b) {
907         /* DEST,C = IN1 + ~IN2 + C.  */
908         tcg_gen_not_tl(cb, in2);
909         tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
910         tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
911         tcg_gen_xor_tl(cb, cb, in1);
912         tcg_gen_xor_tl(cb, cb, dest);
913     } else {
914         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
915            operations by seeding the high word with 1 and subtracting.  */
916         tcg_gen_movi_tl(cb_msb, 1);
917         tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
918         tcg_gen_eqv_tl(cb, in1, in2);
919         tcg_gen_xor_tl(cb, cb, dest);
920     }
921     tcg_temp_free(zero);
922 
923     /* Compute signed overflow if required.  */
924     sv = NULL;
925     if (is_tsv || c == 6) {
926         sv = do_sub_sv(ctx, dest, in1, in2);
927         if (is_tsv) {
928             gen_helper_tsv(cpu_env, sv);
929         }
930     }
931 
932     /* Compute the condition.  We cannot use the special case for borrow.  */
933     if (!is_b) {
934         cond = do_sub_cond(cf, dest, in1, in2, sv);
935     } else {
936         cond = do_cond(cf, dest, cb_msb, sv);
937     }
938 
939     /* Emit any conditional trap before any writeback.  */
940     if (is_tc) {
941         cond_prep(&cond);
942         tmp = tcg_temp_new();
943         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
944         gen_helper_tcond(cpu_env, tmp);
945         tcg_temp_free(tmp);
946     }
947 
948     /* Write back the result.  */
949     save_or_nullify(ctx, cpu_psw_cb, cb);
950     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
951     save_gpr(ctx, rt, dest);
952     tcg_temp_free(dest);
953 
954     /* Install the new nullification.  */
955     cond_free(&ctx->null_cond);
956     ctx->null_cond = cond;
957     return DISAS_NEXT;
958 }
959 
960 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
961                                TCGv in2, unsigned cf)
962 {
963     TCGv dest, sv;
964     DisasCond cond;
965 
966     dest = tcg_temp_new();
967     tcg_gen_sub_tl(dest, in1, in2);
968 
969     /* Compute signed overflow if required.  */
970     sv = NULL;
971     if ((cf >> 1) == 6) {
972         sv = do_sub_sv(ctx, dest, in1, in2);
973     }
974 
975     /* Form the condition for the compare.  */
976     cond = do_sub_cond(cf, dest, in1, in2, sv);
977 
978     /* Clear.  */
979     tcg_gen_movi_tl(dest, 0);
980     save_gpr(ctx, rt, dest);
981     tcg_temp_free(dest);
982 
983     /* Install the new nullification.  */
984     cond_free(&ctx->null_cond);
985     ctx->null_cond = cond;
986     return DISAS_NEXT;
987 }
988 
989 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
990                             unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
991 {
992     TCGv dest = dest_gpr(ctx, rt);
993 
994     /* Perform the operation, and writeback.  */
995     fn(dest, in1, in2);
996     save_gpr(ctx, rt, dest);
997 
998     /* Install the new nullification.  */
999     cond_free(&ctx->null_cond);
1000     if (cf) {
1001         ctx->null_cond = do_log_cond(cf, dest);
1002     }
1003     return DISAS_NEXT;
1004 }
1005 
1006 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1007                              TCGv in2, unsigned cf, bool is_tc,
1008                              void (*fn)(TCGv, TCGv, TCGv))
1009 {
1010     TCGv dest;
1011     DisasCond cond;
1012 
1013     if (cf == 0) {
1014         dest = dest_gpr(ctx, rt);
1015         fn(dest, in1, in2);
1016         save_gpr(ctx, rt, dest);
1017         cond_free(&ctx->null_cond);
1018     } else {
1019         dest = tcg_temp_new();
1020         fn(dest, in1, in2);
1021 
1022         cond = do_unit_cond(cf, dest, in1, in2);
1023 
1024         if (is_tc) {
1025             TCGv tmp = tcg_temp_new();
1026             cond_prep(&cond);
1027             tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1028             gen_helper_tcond(cpu_env, tmp);
1029             tcg_temp_free(tmp);
1030         }
1031         save_gpr(ctx, rt, dest);
1032 
1033         cond_free(&ctx->null_cond);
1034         ctx->null_cond = cond;
1035     }
1036     return DISAS_NEXT;
1037 }
1038 
1039 /* Emit a memory load.  The modify parameter should be
1040  * < 0 for pre-modify,
1041  * > 0 for post-modify,
1042  * = 0 for no base register update.
1043  */
1044 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1045                        unsigned rx, int scale, target_long disp,
1046                        int modify, TCGMemOp mop)
1047 {
1048     TCGv addr, base;
1049 
1050     /* Caller uses nullify_over/nullify_end.  */
1051     assert(ctx->null_cond.c == TCG_COND_NEVER);
1052 
1053     addr = tcg_temp_new();
1054     base = load_gpr(ctx, rb);
1055 
1056     /* Note that RX is mutually exclusive with DISP.  */
1057     if (rx) {
1058         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1059         tcg_gen_add_tl(addr, addr, base);
1060     } else {
1061         tcg_gen_addi_tl(addr, base, disp);
1062     }
1063 
1064     if (modify == 0) {
1065         tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1066     } else {
1067         tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1068                             MMU_USER_IDX, mop);
1069         save_gpr(ctx, rb, addr);
1070     }
1071     tcg_temp_free(addr);
1072 }
1073 
1074 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1075                        unsigned rx, int scale, target_long disp,
1076                        int modify, TCGMemOp mop)
1077 {
1078     TCGv addr, base;
1079 
1080     /* Caller uses nullify_over/nullify_end.  */
1081     assert(ctx->null_cond.c == TCG_COND_NEVER);
1082 
1083     addr = tcg_temp_new();
1084     base = load_gpr(ctx, rb);
1085 
1086     /* Note that RX is mutually exclusive with DISP.  */
1087     if (rx) {
1088         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1089         tcg_gen_add_tl(addr, addr, base);
1090     } else {
1091         tcg_gen_addi_tl(addr, base, disp);
1092     }
1093 
1094     if (modify == 0) {
1095         tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1096     } else {
1097         tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1098                             MMU_USER_IDX, mop);
1099         save_gpr(ctx, rb, addr);
1100     }
1101     tcg_temp_free(addr);
1102 }
1103 
1104 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1105                         unsigned rx, int scale, target_long disp,
1106                         int modify, TCGMemOp mop)
1107 {
1108     TCGv addr, base;
1109 
1110     /* Caller uses nullify_over/nullify_end.  */
1111     assert(ctx->null_cond.c == TCG_COND_NEVER);
1112 
1113     addr = tcg_temp_new();
1114     base = load_gpr(ctx, rb);
1115 
1116     /* Note that RX is mutually exclusive with DISP.  */
1117     if (rx) {
1118         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1119         tcg_gen_add_tl(addr, addr, base);
1120     } else {
1121         tcg_gen_addi_tl(addr, base, disp);
1122     }
1123 
1124     tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1125 
1126     if (modify != 0) {
1127         save_gpr(ctx, rb, addr);
1128     }
1129     tcg_temp_free(addr);
1130 }
1131 
1132 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1133                         unsigned rx, int scale, target_long disp,
1134                         int modify, TCGMemOp mop)
1135 {
1136     TCGv addr, base;
1137 
1138     /* Caller uses nullify_over/nullify_end.  */
1139     assert(ctx->null_cond.c == TCG_COND_NEVER);
1140 
1141     addr = tcg_temp_new();
1142     base = load_gpr(ctx, rb);
1143 
1144     /* Note that RX is mutually exclusive with DISP.  */
1145     if (rx) {
1146         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1147         tcg_gen_add_tl(addr, addr, base);
1148     } else {
1149         tcg_gen_addi_tl(addr, base, disp);
1150     }
1151 
1152     tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1153 
1154     if (modify != 0) {
1155         save_gpr(ctx, rb, addr);
1156     }
1157     tcg_temp_free(addr);
1158 }
1159 
1160 #if TARGET_LONG_BITS == 64
1161 #define do_load_tl  do_load_64
1162 #define do_store_tl do_store_64
1163 #else
1164 #define do_load_tl  do_load_32
1165 #define do_store_tl do_store_32
1166 #endif
1167 
1168 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1169                              unsigned rx, int scale, target_long disp,
1170                              int modify, TCGMemOp mop)
1171 {
1172     TCGv dest;
1173 
1174     nullify_over(ctx);
1175 
1176     if (modify == 0) {
1177         /* No base register update.  */
1178         dest = dest_gpr(ctx, rt);
1179     } else {
1180         /* Make sure if RT == RB, we see the result of the load.  */
1181         dest = get_temp(ctx);
1182     }
1183     do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1184     save_gpr(ctx, rt, dest);
1185 
1186     return nullify_end(ctx, DISAS_NEXT);
1187 }
1188 
1189 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1190                                unsigned rx, int scale, target_long disp,
1191                                int modify)
1192 {
1193     TCGv_i32 tmp;
1194 
1195     nullify_over(ctx);
1196 
1197     tmp = tcg_temp_new_i32();
1198     do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1199     save_frw_i32(rt, tmp);
1200     tcg_temp_free_i32(tmp);
1201 
1202     if (rt == 0) {
1203         gen_helper_loaded_fr0(cpu_env);
1204     }
1205 
1206     return nullify_end(ctx, DISAS_NEXT);
1207 }
1208 
1209 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1210                                unsigned rx, int scale, target_long disp,
1211                                int modify)
1212 {
1213     TCGv_i64 tmp;
1214 
1215     nullify_over(ctx);
1216 
1217     tmp = tcg_temp_new_i64();
1218     do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1219     save_frd(rt, tmp);
1220     tcg_temp_free_i64(tmp);
1221 
1222     if (rt == 0) {
1223         gen_helper_loaded_fr0(cpu_env);
1224     }
1225 
1226     return nullify_end(ctx, DISAS_NEXT);
1227 }
1228 
1229 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1230                               target_long disp, int modify, TCGMemOp mop)
1231 {
1232     nullify_over(ctx);
1233     do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1234     return nullify_end(ctx, DISAS_NEXT);
1235 }
1236 
1237 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1238                                 unsigned rx, int scale, target_long disp,
1239                                 int modify)
1240 {
1241     TCGv_i32 tmp;
1242 
1243     nullify_over(ctx);
1244 
1245     tmp = load_frw_i32(rt);
1246     do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1247     tcg_temp_free_i32(tmp);
1248 
1249     return nullify_end(ctx, DISAS_NEXT);
1250 }
1251 
1252 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1253                                 unsigned rx, int scale, target_long disp,
1254                                 int modify)
1255 {
1256     TCGv_i64 tmp;
1257 
1258     nullify_over(ctx);
1259 
1260     tmp = load_frd(rt);
1261     do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1262     tcg_temp_free_i64(tmp);
1263 
1264     return nullify_end(ctx, DISAS_NEXT);
1265 }
1266 
1267 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1268                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1269 {
1270     TCGv_i32 tmp;
1271 
1272     nullify_over(ctx);
1273     tmp = load_frw0_i32(ra);
1274 
1275     func(tmp, cpu_env, tmp);
1276 
1277     save_frw_i32(rt, tmp);
1278     tcg_temp_free_i32(tmp);
1279     return nullify_end(ctx, DISAS_NEXT);
1280 }
1281 
1282 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1283                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1284 {
1285     TCGv_i32 dst;
1286     TCGv_i64 src;
1287 
1288     nullify_over(ctx);
1289     src = load_frd(ra);
1290     dst = tcg_temp_new_i32();
1291 
1292     func(dst, cpu_env, src);
1293 
1294     tcg_temp_free_i64(src);
1295     save_frw_i32(rt, dst);
1296     tcg_temp_free_i32(dst);
1297     return nullify_end(ctx, DISAS_NEXT);
1298 }
1299 
1300 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1301                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1302 {
1303     TCGv_i64 tmp;
1304 
1305     nullify_over(ctx);
1306     tmp = load_frd0(ra);
1307 
1308     func(tmp, cpu_env, tmp);
1309 
1310     save_frd(rt, tmp);
1311     tcg_temp_free_i64(tmp);
1312     return nullify_end(ctx, DISAS_NEXT);
1313 }
1314 
1315 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1316                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1317 {
1318     TCGv_i32 src;
1319     TCGv_i64 dst;
1320 
1321     nullify_over(ctx);
1322     src = load_frw0_i32(ra);
1323     dst = tcg_temp_new_i64();
1324 
1325     func(dst, cpu_env, src);
1326 
1327     tcg_temp_free_i32(src);
1328     save_frd(rt, dst);
1329     tcg_temp_free_i64(dst);
1330     return nullify_end(ctx, DISAS_NEXT);
1331 }
1332 
1333 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1334                                  unsigned ra, unsigned rb,
1335                                  void (*func)(TCGv_i32, TCGv_env,
1336                                               TCGv_i32, TCGv_i32))
1337 {
1338     TCGv_i32 a, b;
1339 
1340     nullify_over(ctx);
1341     a = load_frw0_i32(ra);
1342     b = load_frw0_i32(rb);
1343 
1344     func(a, cpu_env, a, b);
1345 
1346     tcg_temp_free_i32(b);
1347     save_frw_i32(rt, a);
1348     tcg_temp_free_i32(a);
1349     return nullify_end(ctx, DISAS_NEXT);
1350 }
1351 
1352 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1353                                  unsigned ra, unsigned rb,
1354                                  void (*func)(TCGv_i64, TCGv_env,
1355                                               TCGv_i64, TCGv_i64))
1356 {
1357     TCGv_i64 a, b;
1358 
1359     nullify_over(ctx);
1360     a = load_frd0(ra);
1361     b = load_frd0(rb);
1362 
1363     func(a, cpu_env, a, b);
1364 
1365     tcg_temp_free_i64(b);
1366     save_frd(rt, a);
1367     tcg_temp_free_i64(a);
1368     return nullify_end(ctx, DISAS_NEXT);
1369 }
1370 
1371 /* Emit an unconditional branch to a direct target, which may or may not
1372    have already had nullification handled.  */
1373 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1374                                 unsigned link, bool is_n)
1375 {
1376     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1377         if (link != 0) {
1378             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1379         }
1380         ctx->iaoq_n = dest;
1381         if (is_n) {
1382             ctx->null_cond.c = TCG_COND_ALWAYS;
1383         }
1384         return DISAS_NEXT;
1385     } else {
1386         nullify_over(ctx);
1387 
1388         if (link != 0) {
1389             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1390         }
1391 
1392         if (is_n && use_nullify_skip(ctx)) {
1393             nullify_set(ctx, 0);
1394             gen_goto_tb(ctx, 0, dest, dest + 4);
1395         } else {
1396             nullify_set(ctx, is_n);
1397             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1398         }
1399 
1400         nullify_end(ctx, DISAS_NEXT);
1401 
1402         nullify_set(ctx, 0);
1403         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1404         return DISAS_NORETURN;
1405     }
1406 }
1407 
1408 /* Emit a conditional branch to a direct target.  If the branch itself
1409    is nullified, we should have already used nullify_over.  */
1410 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1411                                 DisasCond *cond)
1412 {
1413     target_ulong dest = iaoq_dest(ctx, disp);
1414     TCGLabel *taken = NULL;
1415     TCGCond c = cond->c;
1416     bool n;
1417 
1418     assert(ctx->null_cond.c == TCG_COND_NEVER);
1419 
1420     /* Handle TRUE and NEVER as direct branches.  */
1421     if (c == TCG_COND_ALWAYS) {
1422         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1423     }
1424     if (c == TCG_COND_NEVER) {
1425         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1426     }
1427 
1428     taken = gen_new_label();
1429     cond_prep(cond);
1430     tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1431     cond_free(cond);
1432 
1433     /* Not taken: Condition not satisfied; nullify on backward branches. */
1434     n = is_n && disp < 0;
1435     if (n && use_nullify_skip(ctx)) {
1436         nullify_set(ctx, 0);
1437         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1438     } else {
1439         if (!n && ctx->null_lab) {
1440             gen_set_label(ctx->null_lab);
1441             ctx->null_lab = NULL;
1442         }
1443         nullify_set(ctx, n);
1444         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1445     }
1446 
1447     gen_set_label(taken);
1448 
1449     /* Taken: Condition satisfied; nullify on forward branches.  */
1450     n = is_n && disp >= 0;
1451     if (n && use_nullify_skip(ctx)) {
1452         nullify_set(ctx, 0);
1453         gen_goto_tb(ctx, 1, dest, dest + 4);
1454     } else {
1455         nullify_set(ctx, n);
1456         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1457     }
1458 
1459     /* Not taken: the branch itself was nullified.  */
1460     if (ctx->null_lab) {
1461         gen_set_label(ctx->null_lab);
1462         ctx->null_lab = NULL;
1463         return DISAS_IAQ_N_STALE;
1464     } else {
1465         return DISAS_NORETURN;
1466     }
1467 }
1468 
1469 /* Emit an unconditional branch to an indirect target.  This handles
1470    nullification of the branch itself.  */
1471 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1472                                 unsigned link, bool is_n)
1473 {
1474     TCGv a0, a1, next, tmp;
1475     TCGCond c;
1476 
1477     assert(ctx->null_lab == NULL);
1478 
1479     if (ctx->null_cond.c == TCG_COND_NEVER) {
1480         if (link != 0) {
1481             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1482         }
1483         next = get_temp(ctx);
1484         tcg_gen_mov_tl(next, dest);
1485         ctx->iaoq_n = -1;
1486         ctx->iaoq_n_var = next;
1487         if (is_n) {
1488             ctx->null_cond.c = TCG_COND_ALWAYS;
1489         }
1490     } else if (is_n && use_nullify_skip(ctx)) {
1491         /* The (conditional) branch, B, nullifies the next insn, N,
1492            and we're allowed to skip execution N (no single-step or
1493            tracepoint in effect).  Since the goto_ptr that we must use
1494            for the indirect branch consumes no special resources, we
1495            can (conditionally) skip B and continue execution.  */
1496         /* The use_nullify_skip test implies we have a known control path.  */
1497         tcg_debug_assert(ctx->iaoq_b != -1);
1498         tcg_debug_assert(ctx->iaoq_n != -1);
1499 
1500         /* We do have to handle the non-local temporary, DEST, before
1501            branching.  Since IOAQ_F is not really live at this point, we
1502            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1503         tcg_gen_mov_tl(cpu_iaoq_f, dest);
1504         tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1505 
1506         nullify_over(ctx);
1507         if (link != 0) {
1508             tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1509         }
1510         tcg_gen_lookup_and_goto_ptr();
1511         return nullify_end(ctx, DISAS_NEXT);
1512     } else {
1513         cond_prep(&ctx->null_cond);
1514         c = ctx->null_cond.c;
1515         a0 = ctx->null_cond.a0;
1516         a1 = ctx->null_cond.a1;
1517 
1518         tmp = tcg_temp_new();
1519         next = get_temp(ctx);
1520 
1521         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1522         tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1523         ctx->iaoq_n = -1;
1524         ctx->iaoq_n_var = next;
1525 
1526         if (link != 0) {
1527             tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1528         }
1529 
1530         if (is_n) {
1531             /* The branch nullifies the next insn, which means the state of N
1532                after the branch is the inverse of the state of N that applied
1533                to the branch.  */
1534             tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1535             cond_free(&ctx->null_cond);
1536             ctx->null_cond = cond_make_n();
1537             ctx->psw_n_nonzero = true;
1538         } else {
1539             cond_free(&ctx->null_cond);
1540         }
1541     }
1542 
1543     return DISAS_NEXT;
1544 }
1545 
1546 /* On Linux, page zero is normally marked execute only + gateway.
1547    Therefore normal read or write is supposed to fail, but specific
1548    offsets have kernel code mapped to raise permissions to implement
1549    system calls.  Handling this via an explicit check here, rather
1550    in than the "be disp(sr2,r0)" instruction that probably sent us
1551    here, is the easiest way to handle the branch delay slot on the
1552    aforementioned BE.  */
1553 static DisasJumpType do_page_zero(DisasContext *ctx)
1554 {
1555     /* If by some means we get here with PSW[N]=1, that implies that
1556        the B,GATE instruction would be skipped, and we'd fault on the
1557        next insn within the privilaged page.  */
1558     switch (ctx->null_cond.c) {
1559     case TCG_COND_NEVER:
1560         break;
1561     case TCG_COND_ALWAYS:
1562         tcg_gen_movi_tl(cpu_psw_n, 0);
1563         goto do_sigill;
1564     default:
1565         /* Since this is always the first (and only) insn within the
1566            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1567         g_assert_not_reached();
1568     }
1569 
1570     /* Check that we didn't arrive here via some means that allowed
1571        non-sequential instruction execution.  Normally the PSW[B] bit
1572        detects this by disallowing the B,GATE instruction to execute
1573        under such conditions.  */
1574     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1575         goto do_sigill;
1576     }
1577 
1578     switch (ctx->iaoq_f) {
1579     case 0x00: /* Null pointer call */
1580         gen_excp_1(EXCP_SIGSEGV);
1581         return DISAS_NORETURN;
1582 
1583     case 0xb0: /* LWS */
1584         gen_excp_1(EXCP_SYSCALL_LWS);
1585         return DISAS_NORETURN;
1586 
1587     case 0xe0: /* SET_THREAD_POINTER */
1588         tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1589         tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1590         tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1591         return DISAS_IAQ_N_UPDATED;
1592 
1593     case 0x100: /* SYSCALL */
1594         gen_excp_1(EXCP_SYSCALL);
1595         return DISAS_NORETURN;
1596 
1597     default:
1598     do_sigill:
1599         gen_excp_1(EXCP_SIGILL);
1600         return DISAS_NORETURN;
1601     }
1602 }
1603 
1604 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1605                                const DisasInsn *di)
1606 {
1607     cond_free(&ctx->null_cond);
1608     return DISAS_NEXT;
1609 }
1610 
1611 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1612                                  const DisasInsn *di)
1613 {
1614     nullify_over(ctx);
1615     return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1616 }
1617 
1618 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1619                                 const DisasInsn *di)
1620 {
1621     /* No point in nullifying the memory barrier.  */
1622     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1623 
1624     cond_free(&ctx->null_cond);
1625     return DISAS_NEXT;
1626 }
1627 
1628 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1629                                 const DisasInsn *di)
1630 {
1631     unsigned rt = extract32(insn, 0, 5);
1632     TCGv tmp = dest_gpr(ctx, rt);
1633     tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1634     save_gpr(ctx, rt, tmp);
1635 
1636     cond_free(&ctx->null_cond);
1637     return DISAS_NEXT;
1638 }
1639 
1640 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1641                                 const DisasInsn *di)
1642 {
1643     unsigned rt = extract32(insn, 0, 5);
1644     TCGv tmp = dest_gpr(ctx, rt);
1645 
1646     /* ??? We don't implement space registers.  */
1647     tcg_gen_movi_tl(tmp, 0);
1648     save_gpr(ctx, rt, tmp);
1649 
1650     cond_free(&ctx->null_cond);
1651     return DISAS_NEXT;
1652 }
1653 
1654 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1655                                  const DisasInsn *di)
1656 {
1657     unsigned rt = extract32(insn, 0, 5);
1658     unsigned ctl = extract32(insn, 21, 5);
1659     TCGv tmp;
1660 
1661     switch (ctl) {
1662     case 11: /* SAR */
1663 #ifdef TARGET_HPPA64
1664         if (extract32(insn, 14, 1) == 0) {
1665             /* MFSAR without ,W masks low 5 bits.  */
1666             tmp = dest_gpr(ctx, rt);
1667             tcg_gen_andi_tl(tmp, cpu_sar, 31);
1668             save_gpr(ctx, rt, tmp);
1669             break;
1670         }
1671 #endif
1672         save_gpr(ctx, rt, cpu_sar);
1673         break;
1674     case 16: /* Interval Timer */
1675         tmp = dest_gpr(ctx, rt);
1676         tcg_gen_movi_tl(tmp, 0); /* FIXME */
1677         save_gpr(ctx, rt, tmp);
1678         break;
1679     case 26:
1680         save_gpr(ctx, rt, cpu_cr26);
1681         break;
1682     case 27:
1683         save_gpr(ctx, rt, cpu_cr27);
1684         break;
1685     default:
1686         /* All other control registers are privileged.  */
1687         return gen_illegal(ctx);
1688     }
1689 
1690     cond_free(&ctx->null_cond);
1691     return DISAS_NEXT;
1692 }
1693 
1694 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1695                                  const DisasInsn *di)
1696 {
1697     unsigned rin = extract32(insn, 16, 5);
1698     unsigned ctl = extract32(insn, 21, 5);
1699     TCGv tmp;
1700 
1701     if (ctl == 11) { /* SAR */
1702         tmp = tcg_temp_new();
1703         tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1704         save_or_nullify(ctx, cpu_sar, tmp);
1705         tcg_temp_free(tmp);
1706     } else {
1707         /* All other control registers are privileged or read-only.  */
1708         return gen_illegal(ctx);
1709     }
1710 
1711     cond_free(&ctx->null_cond);
1712     return DISAS_NEXT;
1713 }
1714 
1715 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1716                                    const DisasInsn *di)
1717 {
1718     unsigned rin = extract32(insn, 16, 5);
1719     TCGv tmp = tcg_temp_new();
1720 
1721     tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1722     tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1723     save_or_nullify(ctx, cpu_sar, tmp);
1724     tcg_temp_free(tmp);
1725 
1726     cond_free(&ctx->null_cond);
1727     return DISAS_NEXT;
1728 }
1729 
1730 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1731                                  const DisasInsn *di)
1732 {
1733     unsigned rt = extract32(insn, 0, 5);
1734     TCGv dest = dest_gpr(ctx, rt);
1735 
1736     /* Since we don't implement space registers, this returns zero.  */
1737     tcg_gen_movi_tl(dest, 0);
1738     save_gpr(ctx, rt, dest);
1739 
1740     cond_free(&ctx->null_cond);
1741     return DISAS_NEXT;
1742 }
1743 
1744 static const DisasInsn table_system[] = {
1745     { 0x00000000u, 0xfc001fe0u, trans_break },
1746     /* We don't implement space register, so MTSP is a nop.  */
1747     { 0x00001820u, 0xffe01fffu, trans_nop },
1748     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1749     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1750     { 0x000014a0u, 0xffffffe0u, trans_mfia },
1751     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1752     { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1753     { 0x00000400u, 0xffffffffu, trans_sync },
1754     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1755 };
1756 
1757 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1758                                         const DisasInsn *di)
1759 {
1760     unsigned rb = extract32(insn, 21, 5);
1761     unsigned rx = extract32(insn, 16, 5);
1762     TCGv dest = dest_gpr(ctx, rb);
1763     TCGv src1 = load_gpr(ctx, rb);
1764     TCGv src2 = load_gpr(ctx, rx);
1765 
1766     /* The only thing we need to do is the base register modification.  */
1767     tcg_gen_add_tl(dest, src1, src2);
1768     save_gpr(ctx, rb, dest);
1769 
1770     cond_free(&ctx->null_cond);
1771     return DISAS_NEXT;
1772 }
1773 
1774 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1775                                  const DisasInsn *di)
1776 {
1777     unsigned rt = extract32(insn, 0, 5);
1778     unsigned rb = extract32(insn, 21, 5);
1779     unsigned is_write = extract32(insn, 6, 1);
1780     TCGv dest;
1781 
1782     nullify_over(ctx);
1783 
1784     /* ??? Do something with priv level operand.  */
1785     dest = dest_gpr(ctx, rt);
1786     if (is_write) {
1787         gen_helper_probe_w(dest, load_gpr(ctx, rb));
1788     } else {
1789         gen_helper_probe_r(dest, load_gpr(ctx, rb));
1790     }
1791     save_gpr(ctx, rt, dest);
1792     return nullify_end(ctx, DISAS_NEXT);
1793 }
1794 
1795 static const DisasInsn table_mem_mgmt[] = {
1796     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
1797     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
1798     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1799     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
1800     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1801     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
1802     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1803     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
1804     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1805     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
1806     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1807     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
1808     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1809     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
1810     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
1811 };
1812 
1813 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1814                                const DisasInsn *di)
1815 {
1816     unsigned r2 = extract32(insn, 21, 5);
1817     unsigned r1 = extract32(insn, 16, 5);
1818     unsigned cf = extract32(insn, 12, 4);
1819     unsigned ext = extract32(insn, 8, 4);
1820     unsigned shift = extract32(insn, 6, 2);
1821     unsigned rt = extract32(insn,  0, 5);
1822     TCGv tcg_r1, tcg_r2;
1823     bool is_c = false;
1824     bool is_l = false;
1825     bool is_tc = false;
1826     bool is_tsv = false;
1827     DisasJumpType ret;
1828 
1829     switch (ext) {
1830     case 0x6: /* ADD, SHLADD */
1831         break;
1832     case 0xa: /* ADD,L, SHLADD,L */
1833         is_l = true;
1834         break;
1835     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1836         is_tsv = true;
1837         break;
1838     case 0x7: /* ADD,C */
1839         is_c = true;
1840         break;
1841     case 0xf: /* ADD,C,TSV */
1842         is_c = is_tsv = true;
1843         break;
1844     default:
1845         return gen_illegal(ctx);
1846     }
1847 
1848     if (cf) {
1849         nullify_over(ctx);
1850     }
1851     tcg_r1 = load_gpr(ctx, r1);
1852     tcg_r2 = load_gpr(ctx, r2);
1853     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1854     return nullify_end(ctx, ret);
1855 }
1856 
1857 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1858                                const DisasInsn *di)
1859 {
1860     unsigned r2 = extract32(insn, 21, 5);
1861     unsigned r1 = extract32(insn, 16, 5);
1862     unsigned cf = extract32(insn, 12, 4);
1863     unsigned ext = extract32(insn, 6, 6);
1864     unsigned rt = extract32(insn,  0, 5);
1865     TCGv tcg_r1, tcg_r2;
1866     bool is_b = false;
1867     bool is_tc = false;
1868     bool is_tsv = false;
1869     DisasJumpType ret;
1870 
1871     switch (ext) {
1872     case 0x10: /* SUB */
1873         break;
1874     case 0x30: /* SUB,TSV */
1875         is_tsv = true;
1876         break;
1877     case 0x14: /* SUB,B */
1878         is_b = true;
1879         break;
1880     case 0x34: /* SUB,B,TSV */
1881         is_b = is_tsv = true;
1882         break;
1883     case 0x13: /* SUB,TC */
1884         is_tc = true;
1885         break;
1886     case 0x33: /* SUB,TSV,TC */
1887         is_tc = is_tsv = true;
1888         break;
1889     default:
1890         return gen_illegal(ctx);
1891     }
1892 
1893     if (cf) {
1894         nullify_over(ctx);
1895     }
1896     tcg_r1 = load_gpr(ctx, r1);
1897     tcg_r2 = load_gpr(ctx, r2);
1898     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1899     return nullify_end(ctx, ret);
1900 }
1901 
1902 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1903                                const DisasInsn *di)
1904 {
1905     unsigned r2 = extract32(insn, 21, 5);
1906     unsigned r1 = extract32(insn, 16, 5);
1907     unsigned cf = extract32(insn, 12, 4);
1908     unsigned rt = extract32(insn,  0, 5);
1909     TCGv tcg_r1, tcg_r2;
1910     DisasJumpType ret;
1911 
1912     if (cf) {
1913         nullify_over(ctx);
1914     }
1915     tcg_r1 = load_gpr(ctx, r1);
1916     tcg_r2 = load_gpr(ctx, r2);
1917     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1918     return nullify_end(ctx, ret);
1919 }
1920 
1921 /* OR r,0,t -> COPY (according to gas) */
1922 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1923                                 const DisasInsn *di)
1924 {
1925     unsigned r1 = extract32(insn, 16, 5);
1926     unsigned rt = extract32(insn,  0, 5);
1927 
1928     if (r1 == 0) {
1929         TCGv dest = dest_gpr(ctx, rt);
1930         tcg_gen_movi_tl(dest, 0);
1931         save_gpr(ctx, rt, dest);
1932     } else {
1933         save_gpr(ctx, rt, cpu_gr[r1]);
1934     }
1935     cond_free(&ctx->null_cond);
1936     return DISAS_NEXT;
1937 }
1938 
1939 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1940                                   const DisasInsn *di)
1941 {
1942     unsigned r2 = extract32(insn, 21, 5);
1943     unsigned r1 = extract32(insn, 16, 5);
1944     unsigned cf = extract32(insn, 12, 4);
1945     unsigned rt = extract32(insn,  0, 5);
1946     TCGv tcg_r1, tcg_r2;
1947     DisasJumpType ret;
1948 
1949     if (cf) {
1950         nullify_over(ctx);
1951     }
1952     tcg_r1 = load_gpr(ctx, r1);
1953     tcg_r2 = load_gpr(ctx, r2);
1954     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1955     return nullify_end(ctx, ret);
1956 }
1957 
1958 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1959                                 const DisasInsn *di)
1960 {
1961     unsigned r2 = extract32(insn, 21, 5);
1962     unsigned r1 = extract32(insn, 16, 5);
1963     unsigned cf = extract32(insn, 12, 4);
1964     unsigned rt = extract32(insn,  0, 5);
1965     TCGv tcg_r1, tcg_r2;
1966     DisasJumpType ret;
1967 
1968     if (cf) {
1969         nullify_over(ctx);
1970     }
1971     tcg_r1 = load_gpr(ctx, r1);
1972     tcg_r2 = load_gpr(ctx, r2);
1973     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1974     return nullify_end(ctx, ret);
1975 }
1976 
1977 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1978                                   const DisasInsn *di)
1979 {
1980     unsigned r2 = extract32(insn, 21, 5);
1981     unsigned r1 = extract32(insn, 16, 5);
1982     unsigned cf = extract32(insn, 12, 4);
1983     unsigned is_tc = extract32(insn, 6, 1);
1984     unsigned rt = extract32(insn,  0, 5);
1985     TCGv tcg_r1, tcg_r2, tmp;
1986     DisasJumpType ret;
1987 
1988     if (cf) {
1989         nullify_over(ctx);
1990     }
1991     tcg_r1 = load_gpr(ctx, r1);
1992     tcg_r2 = load_gpr(ctx, r2);
1993     tmp = get_temp(ctx);
1994     tcg_gen_not_tl(tmp, tcg_r2);
1995     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1996     return nullify_end(ctx, ret);
1997 }
1998 
1999 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2000                                 const DisasInsn *di)
2001 {
2002     unsigned r2 = extract32(insn, 21, 5);
2003     unsigned cf = extract32(insn, 12, 4);
2004     unsigned is_i = extract32(insn, 6, 1);
2005     unsigned rt = extract32(insn,  0, 5);
2006     TCGv tmp;
2007     DisasJumpType ret;
2008 
2009     nullify_over(ctx);
2010 
2011     tmp = get_temp(ctx);
2012     tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2013     if (!is_i) {
2014         tcg_gen_not_tl(tmp, tmp);
2015     }
2016     tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2017     tcg_gen_muli_tl(tmp, tmp, 6);
2018     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2019                   is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2020 
2021     return nullify_end(ctx, ret);
2022 }
2023 
2024 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2025                               const DisasInsn *di)
2026 {
2027     unsigned r2 = extract32(insn, 21, 5);
2028     unsigned r1 = extract32(insn, 16, 5);
2029     unsigned cf = extract32(insn, 12, 4);
2030     unsigned rt = extract32(insn,  0, 5);
2031     TCGv dest, add1, add2, addc, zero, in1, in2;
2032 
2033     nullify_over(ctx);
2034 
2035     in1 = load_gpr(ctx, r1);
2036     in2 = load_gpr(ctx, r2);
2037 
2038     add1 = tcg_temp_new();
2039     add2 = tcg_temp_new();
2040     addc = tcg_temp_new();
2041     dest = tcg_temp_new();
2042     zero = tcg_const_tl(0);
2043 
2044     /* Form R1 << 1 | PSW[CB]{8}.  */
2045     tcg_gen_add_tl(add1, in1, in1);
2046     tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2047 
2048     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2049        carry{8} requires that we subtract via + ~R2 + 1, as described in
2050        the manual.  By extracting and masking V, we can produce the
2051        proper inputs to the addition without movcond.  */
2052     tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2053     tcg_gen_xor_tl(add2, in2, addc);
2054     tcg_gen_andi_tl(addc, addc, 1);
2055     /* ??? This is only correct for 32-bit.  */
2056     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2057     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2058 
2059     tcg_temp_free(addc);
2060     tcg_temp_free(zero);
2061 
2062     /* Write back the result register.  */
2063     save_gpr(ctx, rt, dest);
2064 
2065     /* Write back PSW[CB].  */
2066     tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2067     tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2068 
2069     /* Write back PSW[V] for the division step.  */
2070     tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2071     tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2072 
2073     /* Install the new nullification.  */
2074     if (cf) {
2075         TCGv sv = NULL;
2076         if (cf >> 1 == 6) {
2077             /* ??? The lshift is supposed to contribute to overflow.  */
2078             sv = do_add_sv(ctx, dest, add1, add2);
2079         }
2080         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2081     }
2082 
2083     tcg_temp_free(add1);
2084     tcg_temp_free(add2);
2085     tcg_temp_free(dest);
2086 
2087     return nullify_end(ctx, DISAS_NEXT);
2088 }
2089 
2090 static const DisasInsn table_arith_log[] = {
2091     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2092     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2093     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2094     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2095     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2096     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2097     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2098     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2099     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2100     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2101     { 0x08000440u, 0xfc000fe0u, trans_ds },
2102     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2103     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2104     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2105     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2106 };
2107 
2108 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2109 {
2110     target_long im = low_sextract(insn, 0, 11);
2111     unsigned e1 = extract32(insn, 11, 1);
2112     unsigned cf = extract32(insn, 12, 4);
2113     unsigned rt = extract32(insn, 16, 5);
2114     unsigned r2 = extract32(insn, 21, 5);
2115     unsigned o1 = extract32(insn, 26, 1);
2116     TCGv tcg_im, tcg_r2;
2117     DisasJumpType ret;
2118 
2119     if (cf) {
2120         nullify_over(ctx);
2121     }
2122 
2123     tcg_im = load_const(ctx, im);
2124     tcg_r2 = load_gpr(ctx, r2);
2125     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2126 
2127     return nullify_end(ctx, ret);
2128 }
2129 
2130 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2131 {
2132     target_long im = low_sextract(insn, 0, 11);
2133     unsigned e1 = extract32(insn, 11, 1);
2134     unsigned cf = extract32(insn, 12, 4);
2135     unsigned rt = extract32(insn, 16, 5);
2136     unsigned r2 = extract32(insn, 21, 5);
2137     TCGv tcg_im, tcg_r2;
2138     DisasJumpType ret;
2139 
2140     if (cf) {
2141         nullify_over(ctx);
2142     }
2143 
2144     tcg_im = load_const(ctx, im);
2145     tcg_r2 = load_gpr(ctx, r2);
2146     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2147 
2148     return nullify_end(ctx, ret);
2149 }
2150 
2151 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2152 {
2153     target_long im = low_sextract(insn, 0, 11);
2154     unsigned cf = extract32(insn, 12, 4);
2155     unsigned rt = extract32(insn, 16, 5);
2156     unsigned r2 = extract32(insn, 21, 5);
2157     TCGv tcg_im, tcg_r2;
2158     DisasJumpType ret;
2159 
2160     if (cf) {
2161         nullify_over(ctx);
2162     }
2163 
2164     tcg_im = load_const(ctx, im);
2165     tcg_r2 = load_gpr(ctx, r2);
2166     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2167 
2168     return nullify_end(ctx, ret);
2169 }
2170 
2171 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2172                                     const DisasInsn *di)
2173 {
2174     unsigned rt = extract32(insn, 0, 5);
2175     unsigned m = extract32(insn, 5, 1);
2176     unsigned sz = extract32(insn, 6, 2);
2177     unsigned a = extract32(insn, 13, 1);
2178     int disp = low_sextract(insn, 16, 5);
2179     unsigned rb = extract32(insn, 21, 5);
2180     int modify = (m ? (a ? -1 : 1) : 0);
2181     TCGMemOp mop = MO_TE | sz;
2182 
2183     return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2184 }
2185 
2186 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2187                                     const DisasInsn *di)
2188 {
2189     unsigned rt = extract32(insn, 0, 5);
2190     unsigned m = extract32(insn, 5, 1);
2191     unsigned sz = extract32(insn, 6, 2);
2192     unsigned u = extract32(insn, 13, 1);
2193     unsigned rx = extract32(insn, 16, 5);
2194     unsigned rb = extract32(insn, 21, 5);
2195     TCGMemOp mop = MO_TE | sz;
2196 
2197     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2198 }
2199 
2200 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2201                                     const DisasInsn *di)
2202 {
2203     int disp = low_sextract(insn, 0, 5);
2204     unsigned m = extract32(insn, 5, 1);
2205     unsigned sz = extract32(insn, 6, 2);
2206     unsigned a = extract32(insn, 13, 1);
2207     unsigned rr = extract32(insn, 16, 5);
2208     unsigned rb = extract32(insn, 21, 5);
2209     int modify = (m ? (a ? -1 : 1) : 0);
2210     TCGMemOp mop = MO_TE | sz;
2211 
2212     return do_store(ctx, rr, rb, disp, modify, mop);
2213 }
2214 
2215 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2216                                 const DisasInsn *di)
2217 {
2218     unsigned rt = extract32(insn, 0, 5);
2219     unsigned m = extract32(insn, 5, 1);
2220     unsigned i = extract32(insn, 12, 1);
2221     unsigned au = extract32(insn, 13, 1);
2222     unsigned rx = extract32(insn, 16, 5);
2223     unsigned rb = extract32(insn, 21, 5);
2224     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2225     TCGv zero, addr, base, dest;
2226     int modify, disp = 0, scale = 0;
2227 
2228     nullify_over(ctx);
2229 
2230     /* ??? Share more code with do_load and do_load_{32,64}.  */
2231 
2232     if (i) {
2233         modify = (m ? (au ? -1 : 1) : 0);
2234         disp = low_sextract(rx, 0, 5);
2235         rx = 0;
2236     } else {
2237         modify = m;
2238         if (au) {
2239             scale = mop & MO_SIZE;
2240         }
2241     }
2242     if (modify) {
2243         /* Base register modification.  Make sure if RT == RB, we see
2244            the result of the load.  */
2245         dest = get_temp(ctx);
2246     } else {
2247         dest = dest_gpr(ctx, rt);
2248     }
2249 
2250     addr = tcg_temp_new();
2251     base = load_gpr(ctx, rb);
2252     if (rx) {
2253         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2254         tcg_gen_add_tl(addr, addr, base);
2255     } else {
2256         tcg_gen_addi_tl(addr, base, disp);
2257     }
2258 
2259     zero = tcg_const_tl(0);
2260     tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2261                            zero, MMU_USER_IDX, mop);
2262     if (modify) {
2263         save_gpr(ctx, rb, addr);
2264     }
2265     save_gpr(ctx, rt, dest);
2266 
2267     return nullify_end(ctx, DISAS_NEXT);
2268 }
2269 
2270 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2271                                 const DisasInsn *di)
2272 {
2273     target_long disp = low_sextract(insn, 0, 5);
2274     unsigned m = extract32(insn, 5, 1);
2275     unsigned a = extract32(insn, 13, 1);
2276     unsigned rt = extract32(insn, 16, 5);
2277     unsigned rb = extract32(insn, 21, 5);
2278     TCGv addr, val;
2279 
2280     nullify_over(ctx);
2281 
2282     addr = tcg_temp_new();
2283     if (m || disp == 0) {
2284         tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2285     } else {
2286         tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2287     }
2288     val = load_gpr(ctx, rt);
2289 
2290     if (a) {
2291         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2292             gen_helper_stby_e_parallel(cpu_env, addr, val);
2293         } else {
2294             gen_helper_stby_e(cpu_env, addr, val);
2295         }
2296     } else {
2297         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2298             gen_helper_stby_b_parallel(cpu_env, addr, val);
2299         } else {
2300             gen_helper_stby_b(cpu_env, addr, val);
2301         }
2302     }
2303 
2304     if (m) {
2305         tcg_gen_addi_tl(addr, addr, disp);
2306         tcg_gen_andi_tl(addr, addr, ~3);
2307         save_gpr(ctx, rb, addr);
2308     }
2309     tcg_temp_free(addr);
2310 
2311     return nullify_end(ctx, DISAS_NEXT);
2312 }
2313 
2314 static const DisasInsn table_index_mem[] = {
2315     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2316     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2317     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2318     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2319     { 0x0c001300u, 0xfc0013c0, trans_stby },
2320 };
2321 
2322 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2323 {
2324     unsigned rt = extract32(insn, 21, 5);
2325     target_long i = assemble_21(insn);
2326     TCGv tcg_rt = dest_gpr(ctx, rt);
2327 
2328     tcg_gen_movi_tl(tcg_rt, i);
2329     save_gpr(ctx, rt, tcg_rt);
2330     cond_free(&ctx->null_cond);
2331 
2332     return DISAS_NEXT;
2333 }
2334 
2335 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2336 {
2337     unsigned rt = extract32(insn, 21, 5);
2338     target_long i = assemble_21(insn);
2339     TCGv tcg_rt = load_gpr(ctx, rt);
2340     TCGv tcg_r1 = dest_gpr(ctx, 1);
2341 
2342     tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2343     save_gpr(ctx, 1, tcg_r1);
2344     cond_free(&ctx->null_cond);
2345 
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2350 {
2351     unsigned rb = extract32(insn, 21, 5);
2352     unsigned rt = extract32(insn, 16, 5);
2353     target_long i = assemble_16(insn);
2354     TCGv tcg_rt = dest_gpr(ctx, rt);
2355 
2356     /* Special case rb == 0, for the LDI pseudo-op.
2357        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2358     if (rb == 0) {
2359         tcg_gen_movi_tl(tcg_rt, i);
2360     } else {
2361         tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2362     }
2363     save_gpr(ctx, rt, tcg_rt);
2364     cond_free(&ctx->null_cond);
2365 
2366     return DISAS_NEXT;
2367 }
2368 
2369 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2370                                 bool is_mod, TCGMemOp mop)
2371 {
2372     unsigned rb = extract32(insn, 21, 5);
2373     unsigned rt = extract32(insn, 16, 5);
2374     target_long i = assemble_16(insn);
2375 
2376     return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2377 }
2378 
2379 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2380 {
2381     unsigned rb = extract32(insn, 21, 5);
2382     unsigned rt = extract32(insn, 16, 5);
2383     target_long i = assemble_16a(insn);
2384     unsigned ext2 = extract32(insn, 1, 2);
2385 
2386     switch (ext2) {
2387     case 0:
2388     case 1:
2389         /* FLDW without modification.  */
2390         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2391     case 2:
2392         /* LDW with modification.  Note that the sign of I selects
2393            post-dec vs pre-inc.  */
2394         return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2395     default:
2396         return gen_illegal(ctx);
2397     }
2398 }
2399 
2400 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2401 {
2402     target_long i = assemble_16a(insn);
2403     unsigned t1 = extract32(insn, 1, 1);
2404     unsigned a = extract32(insn, 2, 1);
2405     unsigned t0 = extract32(insn, 16, 5);
2406     unsigned rb = extract32(insn, 21, 5);
2407 
2408     /* FLDW with modification.  */
2409     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2410 }
2411 
2412 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2413                                  bool is_mod, TCGMemOp mop)
2414 {
2415     unsigned rb = extract32(insn, 21, 5);
2416     unsigned rt = extract32(insn, 16, 5);
2417     target_long i = assemble_16(insn);
2418 
2419     return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2420 }
2421 
2422 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2423 {
2424     unsigned rb = extract32(insn, 21, 5);
2425     unsigned rt = extract32(insn, 16, 5);
2426     target_long i = assemble_16a(insn);
2427     unsigned ext2 = extract32(insn, 1, 2);
2428 
2429     switch (ext2) {
2430     case 0:
2431     case 1:
2432         /* FSTW without modification.  */
2433         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2434     case 2:
2435         /* LDW with modification.  */
2436         return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2437     default:
2438         return gen_illegal(ctx);
2439     }
2440 }
2441 
2442 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2443 {
2444     target_long i = assemble_16a(insn);
2445     unsigned t1 = extract32(insn, 1, 1);
2446     unsigned a = extract32(insn, 2, 1);
2447     unsigned t0 = extract32(insn, 16, 5);
2448     unsigned rb = extract32(insn, 21, 5);
2449 
2450     /* FSTW with modification.  */
2451     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2452 }
2453 
2454 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2455 {
2456     unsigned t0 = extract32(insn, 0, 5);
2457     unsigned m = extract32(insn, 5, 1);
2458     unsigned t1 = extract32(insn, 6, 1);
2459     unsigned ext3 = extract32(insn, 7, 3);
2460     /* unsigned cc = extract32(insn, 10, 2); */
2461     unsigned i = extract32(insn, 12, 1);
2462     unsigned ua = extract32(insn, 13, 1);
2463     unsigned rx = extract32(insn, 16, 5);
2464     unsigned rb = extract32(insn, 21, 5);
2465     unsigned rt = t1 * 32 + t0;
2466     int modify = (m ? (ua ? -1 : 1) : 0);
2467     int disp, scale;
2468 
2469     if (i == 0) {
2470         scale = (ua ? 2 : 0);
2471         disp = 0;
2472         modify = m;
2473     } else {
2474         disp = low_sextract(rx, 0, 5);
2475         scale = 0;
2476         rx = 0;
2477         modify = (m ? (ua ? -1 : 1) : 0);
2478     }
2479 
2480     switch (ext3) {
2481     case 0: /* FLDW */
2482         return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2483     case 4: /* FSTW */
2484         return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2485     }
2486     return gen_illegal(ctx);
2487 }
2488 
2489 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2490 {
2491     unsigned rt = extract32(insn, 0, 5);
2492     unsigned m = extract32(insn, 5, 1);
2493     unsigned ext4 = extract32(insn, 6, 4);
2494     /* unsigned cc = extract32(insn, 10, 2); */
2495     unsigned i = extract32(insn, 12, 1);
2496     unsigned ua = extract32(insn, 13, 1);
2497     unsigned rx = extract32(insn, 16, 5);
2498     unsigned rb = extract32(insn, 21, 5);
2499     int modify = (m ? (ua ? -1 : 1) : 0);
2500     int disp, scale;
2501 
2502     if (i == 0) {
2503         scale = (ua ? 3 : 0);
2504         disp = 0;
2505         modify = m;
2506     } else {
2507         disp = low_sextract(rx, 0, 5);
2508         scale = 0;
2509         rx = 0;
2510         modify = (m ? (ua ? -1 : 1) : 0);
2511     }
2512 
2513     switch (ext4) {
2514     case 0: /* FLDD */
2515         return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2516     case 8: /* FSTD */
2517         return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2518     default:
2519         return gen_illegal(ctx);
2520     }
2521 }
2522 
2523 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2524                                 bool is_true, bool is_imm, bool is_dw)
2525 {
2526     target_long disp = assemble_12(insn) * 4;
2527     unsigned n = extract32(insn, 1, 1);
2528     unsigned c = extract32(insn, 13, 3);
2529     unsigned r = extract32(insn, 21, 5);
2530     unsigned cf = c * 2 + !is_true;
2531     TCGv dest, in1, in2, sv;
2532     DisasCond cond;
2533 
2534     nullify_over(ctx);
2535 
2536     if (is_imm) {
2537         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2538     } else {
2539         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2540     }
2541     in2 = load_gpr(ctx, r);
2542     dest = get_temp(ctx);
2543 
2544     tcg_gen_sub_tl(dest, in1, in2);
2545 
2546     sv = NULL;
2547     if (c == 6) {
2548         sv = do_sub_sv(ctx, dest, in1, in2);
2549     }
2550 
2551     cond = do_sub_cond(cf, dest, in1, in2, sv);
2552     return do_cbranch(ctx, disp, n, &cond);
2553 }
2554 
2555 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2556                                 bool is_true, bool is_imm)
2557 {
2558     target_long disp = assemble_12(insn) * 4;
2559     unsigned n = extract32(insn, 1, 1);
2560     unsigned c = extract32(insn, 13, 3);
2561     unsigned r = extract32(insn, 21, 5);
2562     unsigned cf = c * 2 + !is_true;
2563     TCGv dest, in1, in2, sv, cb_msb;
2564     DisasCond cond;
2565 
2566     nullify_over(ctx);
2567 
2568     if (is_imm) {
2569         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2570     } else {
2571         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2572     }
2573     in2 = load_gpr(ctx, r);
2574     dest = dest_gpr(ctx, r);
2575     sv = NULL;
2576     cb_msb = NULL;
2577 
2578     switch (c) {
2579     default:
2580         tcg_gen_add_tl(dest, in1, in2);
2581         break;
2582     case 4: case 5:
2583         cb_msb = get_temp(ctx);
2584         tcg_gen_movi_tl(cb_msb, 0);
2585         tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2586         break;
2587     case 6:
2588         tcg_gen_add_tl(dest, in1, in2);
2589         sv = do_add_sv(ctx, dest, in1, in2);
2590         break;
2591     }
2592 
2593     cond = do_cond(cf, dest, cb_msb, sv);
2594     return do_cbranch(ctx, disp, n, &cond);
2595 }
2596 
2597 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2598 {
2599     target_long disp = assemble_12(insn) * 4;
2600     unsigned n = extract32(insn, 1, 1);
2601     unsigned c = extract32(insn, 15, 1);
2602     unsigned r = extract32(insn, 16, 5);
2603     unsigned p = extract32(insn, 21, 5);
2604     unsigned i = extract32(insn, 26, 1);
2605     TCGv tmp, tcg_r;
2606     DisasCond cond;
2607 
2608     nullify_over(ctx);
2609 
2610     tmp = tcg_temp_new();
2611     tcg_r = load_gpr(ctx, r);
2612     if (i) {
2613         tcg_gen_shli_tl(tmp, tcg_r, p);
2614     } else {
2615         tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2616     }
2617 
2618     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2619     tcg_temp_free(tmp);
2620     return do_cbranch(ctx, disp, n, &cond);
2621 }
2622 
2623 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2624 {
2625     target_long disp = assemble_12(insn) * 4;
2626     unsigned n = extract32(insn, 1, 1);
2627     unsigned c = extract32(insn, 13, 3);
2628     unsigned t = extract32(insn, 16, 5);
2629     unsigned r = extract32(insn, 21, 5);
2630     TCGv dest;
2631     DisasCond cond;
2632 
2633     nullify_over(ctx);
2634 
2635     dest = dest_gpr(ctx, r);
2636     if (is_imm) {
2637         tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2638     } else if (t == 0) {
2639         tcg_gen_movi_tl(dest, 0);
2640     } else {
2641         tcg_gen_mov_tl(dest, cpu_gr[t]);
2642     }
2643 
2644     cond = do_sed_cond(c, dest);
2645     return do_cbranch(ctx, disp, n, &cond);
2646 }
2647 
2648 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2649                                     const DisasInsn *di)
2650 {
2651     unsigned rt = extract32(insn, 0, 5);
2652     unsigned c = extract32(insn, 13, 3);
2653     unsigned r1 = extract32(insn, 16, 5);
2654     unsigned r2 = extract32(insn, 21, 5);
2655     TCGv dest;
2656 
2657     if (c) {
2658         nullify_over(ctx);
2659     }
2660 
2661     dest = dest_gpr(ctx, rt);
2662     if (r1 == 0) {
2663         tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2664         tcg_gen_shr_tl(dest, dest, cpu_sar);
2665     } else if (r1 == r2) {
2666         TCGv_i32 t32 = tcg_temp_new_i32();
2667         tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2668         tcg_gen_rotr_i32(t32, t32, cpu_sar);
2669         tcg_gen_extu_i32_tl(dest, t32);
2670         tcg_temp_free_i32(t32);
2671     } else {
2672         TCGv_i64 t = tcg_temp_new_i64();
2673         TCGv_i64 s = tcg_temp_new_i64();
2674 
2675         tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2676         tcg_gen_extu_tl_i64(s, cpu_sar);
2677         tcg_gen_shr_i64(t, t, s);
2678         tcg_gen_trunc_i64_tl(dest, t);
2679 
2680         tcg_temp_free_i64(t);
2681         tcg_temp_free_i64(s);
2682     }
2683     save_gpr(ctx, rt, dest);
2684 
2685     /* Install the new nullification.  */
2686     cond_free(&ctx->null_cond);
2687     if (c) {
2688         ctx->null_cond = do_sed_cond(c, dest);
2689     }
2690     return nullify_end(ctx, DISAS_NEXT);
2691 }
2692 
2693 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2694                                      const DisasInsn *di)
2695 {
2696     unsigned rt = extract32(insn, 0, 5);
2697     unsigned cpos = extract32(insn, 5, 5);
2698     unsigned c = extract32(insn, 13, 3);
2699     unsigned r1 = extract32(insn, 16, 5);
2700     unsigned r2 = extract32(insn, 21, 5);
2701     unsigned sa = 31 - cpos;
2702     TCGv dest, t2;
2703 
2704     if (c) {
2705         nullify_over(ctx);
2706     }
2707 
2708     dest = dest_gpr(ctx, rt);
2709     t2 = load_gpr(ctx, r2);
2710     if (r1 == r2) {
2711         TCGv_i32 t32 = tcg_temp_new_i32();
2712         tcg_gen_trunc_tl_i32(t32, t2);
2713         tcg_gen_rotri_i32(t32, t32, sa);
2714         tcg_gen_extu_i32_tl(dest, t32);
2715         tcg_temp_free_i32(t32);
2716     } else if (r1 == 0) {
2717         tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2718     } else {
2719         TCGv t0 = tcg_temp_new();
2720         tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2721         tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2722         tcg_temp_free(t0);
2723     }
2724     save_gpr(ctx, rt, dest);
2725 
2726     /* Install the new nullification.  */
2727     cond_free(&ctx->null_cond);
2728     if (c) {
2729         ctx->null_cond = do_sed_cond(c, dest);
2730     }
2731     return nullify_end(ctx, DISAS_NEXT);
2732 }
2733 
2734 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2735                                      const DisasInsn *di)
2736 {
2737     unsigned clen = extract32(insn, 0, 5);
2738     unsigned is_se = extract32(insn, 10, 1);
2739     unsigned c = extract32(insn, 13, 3);
2740     unsigned rt = extract32(insn, 16, 5);
2741     unsigned rr = extract32(insn, 21, 5);
2742     unsigned len = 32 - clen;
2743     TCGv dest, src, tmp;
2744 
2745     if (c) {
2746         nullify_over(ctx);
2747     }
2748 
2749     dest = dest_gpr(ctx, rt);
2750     src = load_gpr(ctx, rr);
2751     tmp = tcg_temp_new();
2752 
2753     /* Recall that SAR is using big-endian bit numbering.  */
2754     tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2755     if (is_se) {
2756         tcg_gen_sar_tl(dest, src, tmp);
2757         tcg_gen_sextract_tl(dest, dest, 0, len);
2758     } else {
2759         tcg_gen_shr_tl(dest, src, tmp);
2760         tcg_gen_extract_tl(dest, dest, 0, len);
2761     }
2762     tcg_temp_free(tmp);
2763     save_gpr(ctx, rt, dest);
2764 
2765     /* Install the new nullification.  */
2766     cond_free(&ctx->null_cond);
2767     if (c) {
2768         ctx->null_cond = do_sed_cond(c, dest);
2769     }
2770     return nullify_end(ctx, DISAS_NEXT);
2771 }
2772 
2773 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2774                                      const DisasInsn *di)
2775 {
2776     unsigned clen = extract32(insn, 0, 5);
2777     unsigned pos = extract32(insn, 5, 5);
2778     unsigned is_se = extract32(insn, 10, 1);
2779     unsigned c = extract32(insn, 13, 3);
2780     unsigned rt = extract32(insn, 16, 5);
2781     unsigned rr = extract32(insn, 21, 5);
2782     unsigned len = 32 - clen;
2783     unsigned cpos = 31 - pos;
2784     TCGv dest, src;
2785 
2786     if (c) {
2787         nullify_over(ctx);
2788     }
2789 
2790     dest = dest_gpr(ctx, rt);
2791     src = load_gpr(ctx, rr);
2792     if (is_se) {
2793         tcg_gen_sextract_tl(dest, src, cpos, len);
2794     } else {
2795         tcg_gen_extract_tl(dest, src, cpos, len);
2796     }
2797     save_gpr(ctx, rt, dest);
2798 
2799     /* Install the new nullification.  */
2800     cond_free(&ctx->null_cond);
2801     if (c) {
2802         ctx->null_cond = do_sed_cond(c, dest);
2803     }
2804     return nullify_end(ctx, DISAS_NEXT);
2805 }
2806 
2807 static const DisasInsn table_sh_ex[] = {
2808     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2809     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2810     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2811     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2812 };
2813 
2814 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2815                                       const DisasInsn *di)
2816 {
2817     unsigned clen = extract32(insn, 0, 5);
2818     unsigned cpos = extract32(insn, 5, 5);
2819     unsigned nz = extract32(insn, 10, 1);
2820     unsigned c = extract32(insn, 13, 3);
2821     target_long val = low_sextract(insn, 16, 5);
2822     unsigned rt = extract32(insn, 21, 5);
2823     unsigned len = 32 - clen;
2824     target_long mask0, mask1;
2825     TCGv dest;
2826 
2827     if (c) {
2828         nullify_over(ctx);
2829     }
2830     if (cpos + len > 32) {
2831         len = 32 - cpos;
2832     }
2833 
2834     dest = dest_gpr(ctx, rt);
2835     mask0 = deposit64(0, cpos, len, val);
2836     mask1 = deposit64(-1, cpos, len, val);
2837 
2838     if (nz) {
2839         TCGv src = load_gpr(ctx, rt);
2840         if (mask1 != -1) {
2841             tcg_gen_andi_tl(dest, src, mask1);
2842             src = dest;
2843         }
2844         tcg_gen_ori_tl(dest, src, mask0);
2845     } else {
2846         tcg_gen_movi_tl(dest, mask0);
2847     }
2848     save_gpr(ctx, rt, dest);
2849 
2850     /* Install the new nullification.  */
2851     cond_free(&ctx->null_cond);
2852     if (c) {
2853         ctx->null_cond = do_sed_cond(c, dest);
2854     }
2855     return nullify_end(ctx, DISAS_NEXT);
2856 }
2857 
2858 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2859                                     const DisasInsn *di)
2860 {
2861     unsigned clen = extract32(insn, 0, 5);
2862     unsigned cpos = extract32(insn, 5, 5);
2863     unsigned nz = extract32(insn, 10, 1);
2864     unsigned c = extract32(insn, 13, 3);
2865     unsigned rr = extract32(insn, 16, 5);
2866     unsigned rt = extract32(insn, 21, 5);
2867     unsigned rs = nz ? rt : 0;
2868     unsigned len = 32 - clen;
2869     TCGv dest, val;
2870 
2871     if (c) {
2872         nullify_over(ctx);
2873     }
2874     if (cpos + len > 32) {
2875         len = 32 - cpos;
2876     }
2877 
2878     dest = dest_gpr(ctx, rt);
2879     val = load_gpr(ctx, rr);
2880     if (rs == 0) {
2881         tcg_gen_deposit_z_tl(dest, val, cpos, len);
2882     } else {
2883         tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2884     }
2885     save_gpr(ctx, rt, dest);
2886 
2887     /* Install the new nullification.  */
2888     cond_free(&ctx->null_cond);
2889     if (c) {
2890         ctx->null_cond = do_sed_cond(c, dest);
2891     }
2892     return nullify_end(ctx, DISAS_NEXT);
2893 }
2894 
2895 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2896                                     const DisasInsn *di)
2897 {
2898     unsigned clen = extract32(insn, 0, 5);
2899     unsigned nz = extract32(insn, 10, 1);
2900     unsigned i = extract32(insn, 12, 1);
2901     unsigned c = extract32(insn, 13, 3);
2902     unsigned rt = extract32(insn, 21, 5);
2903     unsigned rs = nz ? rt : 0;
2904     unsigned len = 32 - clen;
2905     TCGv val, mask, tmp, shift, dest;
2906     unsigned msb = 1U << (len - 1);
2907 
2908     if (c) {
2909         nullify_over(ctx);
2910     }
2911 
2912     if (i) {
2913         val = load_const(ctx, low_sextract(insn, 16, 5));
2914     } else {
2915         val = load_gpr(ctx, extract32(insn, 16, 5));
2916     }
2917     dest = dest_gpr(ctx, rt);
2918     shift = tcg_temp_new();
2919     tmp = tcg_temp_new();
2920 
2921     /* Convert big-endian bit numbering in SAR to left-shift.  */
2922     tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2923 
2924     mask = tcg_const_tl(msb + (msb - 1));
2925     tcg_gen_and_tl(tmp, val, mask);
2926     if (rs) {
2927         tcg_gen_shl_tl(mask, mask, shift);
2928         tcg_gen_shl_tl(tmp, tmp, shift);
2929         tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2930         tcg_gen_or_tl(dest, dest, tmp);
2931     } else {
2932         tcg_gen_shl_tl(dest, tmp, shift);
2933     }
2934     tcg_temp_free(shift);
2935     tcg_temp_free(mask);
2936     tcg_temp_free(tmp);
2937     save_gpr(ctx, rt, dest);
2938 
2939     /* Install the new nullification.  */
2940     cond_free(&ctx->null_cond);
2941     if (c) {
2942         ctx->null_cond = do_sed_cond(c, dest);
2943     }
2944     return nullify_end(ctx, DISAS_NEXT);
2945 }
2946 
2947 static const DisasInsn table_depw[] = {
2948     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2949     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2950     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2951 };
2952 
2953 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2954 {
2955     unsigned n = extract32(insn, 1, 1);
2956     unsigned b = extract32(insn, 21, 5);
2957     target_long disp = assemble_17(insn);
2958 
2959     /* unsigned s = low_uextract(insn, 13, 3); */
2960     /* ??? It seems like there should be a good way of using
2961        "be disp(sr2, r0)", the canonical gateway entry mechanism
2962        to our advantage.  But that appears to be inconvenient to
2963        manage along side branch delay slots.  Therefore we handle
2964        entry into the gateway page via absolute address.  */
2965 
2966     /* Since we don't implement spaces, just branch.  Do notice the special
2967        case of "be disp(*,r0)" using a direct branch to disp, so that we can
2968        goto_tb to the TB containing the syscall.  */
2969     if (b == 0) {
2970         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2971     } else {
2972         TCGv tmp = get_temp(ctx);
2973         tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2974         return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2975     }
2976 }
2977 
2978 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2979                               const DisasInsn *di)
2980 {
2981     unsigned n = extract32(insn, 1, 1);
2982     unsigned link = extract32(insn, 21, 5);
2983     target_long disp = assemble_17(insn);
2984 
2985     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2986 }
2987 
2988 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2989                                    const DisasInsn *di)
2990 {
2991     unsigned n = extract32(insn, 1, 1);
2992     target_long disp = assemble_22(insn);
2993 
2994     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2995 }
2996 
2997 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
2998                                const DisasInsn *di)
2999 {
3000     unsigned n = extract32(insn, 1, 1);
3001     unsigned rx = extract32(insn, 16, 5);
3002     unsigned link = extract32(insn, 21, 5);
3003     TCGv tmp = get_temp(ctx);
3004 
3005     tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3006     tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3007     return do_ibranch(ctx, tmp, link, n);
3008 }
3009 
3010 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3011                               const DisasInsn *di)
3012 {
3013     unsigned n = extract32(insn, 1, 1);
3014     unsigned rx = extract32(insn, 16, 5);
3015     unsigned rb = extract32(insn, 21, 5);
3016     TCGv dest;
3017 
3018     if (rx == 0) {
3019         dest = load_gpr(ctx, rb);
3020     } else {
3021         dest = get_temp(ctx);
3022         tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3023         tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3024     }
3025     return do_ibranch(ctx, dest, 0, n);
3026 }
3027 
3028 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3029                                const DisasInsn *di)
3030 {
3031     unsigned n = extract32(insn, 1, 1);
3032     unsigned rb = extract32(insn, 21, 5);
3033     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3034 
3035     return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3036 }
3037 
3038 static const DisasInsn table_branch[] = {
3039     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3040     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3041     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3042     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3043     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3044 };
3045 
3046 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3047                                       const DisasInsn *di)
3048 {
3049     unsigned rt = extract32(insn, 0, 5);
3050     unsigned ra = extract32(insn, 21, 5);
3051     return do_fop_wew(ctx, rt, ra, di->f.wew);
3052 }
3053 
3054 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3055                                       const DisasInsn *di)
3056 {
3057     unsigned rt = assemble_rt64(insn);
3058     unsigned ra = assemble_ra64(insn);
3059     return do_fop_wew(ctx, rt, ra, di->f.wew);
3060 }
3061 
3062 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3063                                    const DisasInsn *di)
3064 {
3065     unsigned rt = extract32(insn, 0, 5);
3066     unsigned ra = extract32(insn, 21, 5);
3067     return do_fop_ded(ctx, rt, ra, di->f.ded);
3068 }
3069 
3070 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3071                                       const DisasInsn *di)
3072 {
3073     unsigned rt = extract32(insn, 0, 5);
3074     unsigned ra = extract32(insn, 21, 5);
3075     return do_fop_wed(ctx, rt, ra, di->f.wed);
3076 }
3077 
3078 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3079                                       const DisasInsn *di)
3080 {
3081     unsigned rt = assemble_rt64(insn);
3082     unsigned ra = extract32(insn, 21, 5);
3083     return do_fop_wed(ctx, rt, ra, di->f.wed);
3084 }
3085 
3086 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3087                                       const DisasInsn *di)
3088 {
3089     unsigned rt = extract32(insn, 0, 5);
3090     unsigned ra = extract32(insn, 21, 5);
3091     return do_fop_dew(ctx, rt, ra, di->f.dew);
3092 }
3093 
3094 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3095                                       const DisasInsn *di)
3096 {
3097     unsigned rt = extract32(insn, 0, 5);
3098     unsigned ra = assemble_ra64(insn);
3099     return do_fop_dew(ctx, rt, ra, di->f.dew);
3100 }
3101 
3102 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3103                                        const DisasInsn *di)
3104 {
3105     unsigned rt = extract32(insn, 0, 5);
3106     unsigned rb = extract32(insn, 16, 5);
3107     unsigned ra = extract32(insn, 21, 5);
3108     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3109 }
3110 
3111 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3112                                        const DisasInsn *di)
3113 {
3114     unsigned rt = assemble_rt64(insn);
3115     unsigned rb = assemble_rb64(insn);
3116     unsigned ra = assemble_ra64(insn);
3117     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3118 }
3119 
3120 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3121                                     const DisasInsn *di)
3122 {
3123     unsigned rt = extract32(insn, 0, 5);
3124     unsigned rb = extract32(insn, 16, 5);
3125     unsigned ra = extract32(insn, 21, 5);
3126     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3127 }
3128 
3129 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3130 {
3131     tcg_gen_mov_i32(dst, src);
3132 }
3133 
3134 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3135 {
3136     tcg_gen_mov_i64(dst, src);
3137 }
3138 
3139 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3140 {
3141     tcg_gen_andi_i32(dst, src, INT32_MAX);
3142 }
3143 
3144 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3145 {
3146     tcg_gen_andi_i64(dst, src, INT64_MAX);
3147 }
3148 
3149 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3150 {
3151     tcg_gen_xori_i32(dst, src, INT32_MIN);
3152 }
3153 
3154 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3155 {
3156     tcg_gen_xori_i64(dst, src, INT64_MIN);
3157 }
3158 
3159 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3160 {
3161     tcg_gen_ori_i32(dst, src, INT32_MIN);
3162 }
3163 
3164 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3165 {
3166     tcg_gen_ori_i64(dst, src, INT64_MIN);
3167 }
3168 
3169 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3170                                unsigned y, unsigned c)
3171 {
3172     TCGv_i32 ta, tb, tc, ty;
3173 
3174     nullify_over(ctx);
3175 
3176     ta = load_frw0_i32(ra);
3177     tb = load_frw0_i32(rb);
3178     ty = tcg_const_i32(y);
3179     tc = tcg_const_i32(c);
3180 
3181     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3182 
3183     tcg_temp_free_i32(ta);
3184     tcg_temp_free_i32(tb);
3185     tcg_temp_free_i32(ty);
3186     tcg_temp_free_i32(tc);
3187 
3188     return nullify_end(ctx, DISAS_NEXT);
3189 }
3190 
3191 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3192                                      const DisasInsn *di)
3193 {
3194     unsigned c = extract32(insn, 0, 5);
3195     unsigned y = extract32(insn, 13, 3);
3196     unsigned rb = extract32(insn, 16, 5);
3197     unsigned ra = extract32(insn, 21, 5);
3198     return do_fcmp_s(ctx, ra, rb, y, c);
3199 }
3200 
3201 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3202                                      const DisasInsn *di)
3203 {
3204     unsigned c = extract32(insn, 0, 5);
3205     unsigned y = extract32(insn, 13, 3);
3206     unsigned rb = assemble_rb64(insn);
3207     unsigned ra = assemble_ra64(insn);
3208     return do_fcmp_s(ctx, ra, rb, y, c);
3209 }
3210 
3211 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3212                                   const DisasInsn *di)
3213 {
3214     unsigned c = extract32(insn, 0, 5);
3215     unsigned y = extract32(insn, 13, 3);
3216     unsigned rb = extract32(insn, 16, 5);
3217     unsigned ra = extract32(insn, 21, 5);
3218     TCGv_i64 ta, tb;
3219     TCGv_i32 tc, ty;
3220 
3221     nullify_over(ctx);
3222 
3223     ta = load_frd0(ra);
3224     tb = load_frd0(rb);
3225     ty = tcg_const_i32(y);
3226     tc = tcg_const_i32(c);
3227 
3228     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3229 
3230     tcg_temp_free_i64(ta);
3231     tcg_temp_free_i64(tb);
3232     tcg_temp_free_i32(ty);
3233     tcg_temp_free_i32(tc);
3234 
3235     return nullify_end(ctx, DISAS_NEXT);
3236 }
3237 
3238 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3239                                    const DisasInsn *di)
3240 {
3241     unsigned y = extract32(insn, 13, 3);
3242     unsigned cbit = (y ^ 1) - 1;
3243     TCGv t;
3244 
3245     nullify_over(ctx);
3246 
3247     t = tcg_temp_new();
3248     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3249     tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3250     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3251     tcg_temp_free(t);
3252 
3253     return nullify_end(ctx, DISAS_NEXT);
3254 }
3255 
3256 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3257                                    const DisasInsn *di)
3258 {
3259     unsigned c = extract32(insn, 0, 5);
3260     int mask;
3261     bool inv = false;
3262     TCGv t;
3263 
3264     nullify_over(ctx);
3265 
3266     t = tcg_temp_new();
3267     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3268 
3269     switch (c) {
3270     case 0: /* simple */
3271         tcg_gen_andi_tl(t, t, 0x4000000);
3272         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3273         goto done;
3274     case 2: /* rej */
3275         inv = true;
3276         /* fallthru */
3277     case 1: /* acc */
3278         mask = 0x43ff800;
3279         break;
3280     case 6: /* rej8 */
3281         inv = true;
3282         /* fallthru */
3283     case 5: /* acc8 */
3284         mask = 0x43f8000;
3285         break;
3286     case 9: /* acc6 */
3287         mask = 0x43e0000;
3288         break;
3289     case 13: /* acc4 */
3290         mask = 0x4380000;
3291         break;
3292     case 17: /* acc2 */
3293         mask = 0x4200000;
3294         break;
3295     default:
3296         return gen_illegal(ctx);
3297     }
3298     if (inv) {
3299         TCGv c = load_const(ctx, mask);
3300         tcg_gen_or_tl(t, t, c);
3301         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3302     } else {
3303         tcg_gen_andi_tl(t, t, mask);
3304         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3305     }
3306  done:
3307     return nullify_end(ctx, DISAS_NEXT);
3308 }
3309 
3310 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3311                                  const DisasInsn *di)
3312 {
3313     unsigned rt = extract32(insn, 0, 5);
3314     unsigned rb = assemble_rb64(insn);
3315     unsigned ra = assemble_ra64(insn);
3316     TCGv_i64 a, b;
3317 
3318     nullify_over(ctx);
3319 
3320     a = load_frw0_i64(ra);
3321     b = load_frw0_i64(rb);
3322     tcg_gen_mul_i64(a, a, b);
3323     save_frd(rt, a);
3324     tcg_temp_free_i64(a);
3325     tcg_temp_free_i64(b);
3326 
3327     return nullify_end(ctx, DISAS_NEXT);
3328 }
3329 
3330 #define FOP_DED  trans_fop_ded, .f.ded
3331 #define FOP_DEDD trans_fop_dedd, .f.dedd
3332 
3333 #define FOP_WEW  trans_fop_wew_0c, .f.wew
3334 #define FOP_DEW  trans_fop_dew_0c, .f.dew
3335 #define FOP_WED  trans_fop_wed_0c, .f.wed
3336 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3337 
3338 static const DisasInsn table_float_0c[] = {
3339     /* floating point class zero */
3340     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3341     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3342     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3343     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3344     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3345     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3346 
3347     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3348     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3349     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3350     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3351     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3352     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3353 
3354     /* floating point class three */
3355     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3356     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3357     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3358     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3359 
3360     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3361     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3362     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3363     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3364 
3365     /* floating point class one */
3366     /* float/float */
3367     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3368     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3369     /* int/float */
3370     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3371     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3372     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3373     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3374     /* float/int */
3375     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3376     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3377     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3378     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3379     /* float/int truncate */
3380     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3381     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3382     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3383     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3384     /* uint/float */
3385     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3386     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3387     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3388     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3389     /* float/uint */
3390     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3391     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3392     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3393     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3394     /* float/uint truncate */
3395     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3396     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3397     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3398     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3399 
3400     /* floating point class two */
3401     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3402     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3403     { 0x30002420, 0xffffffe0, trans_ftest_q },
3404     { 0x30000420, 0xffff1fff, trans_ftest_t },
3405 
3406     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3407        This is machine/revision == 0, which is reserved for simulator.  */
3408     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3409 };
3410 
3411 #undef FOP_WEW
3412 #undef FOP_DEW
3413 #undef FOP_WED
3414 #undef FOP_WEWW
3415 #define FOP_WEW  trans_fop_wew_0e, .f.wew
3416 #define FOP_DEW  trans_fop_dew_0e, .f.dew
3417 #define FOP_WED  trans_fop_wed_0e, .f.wed
3418 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3419 
3420 static const DisasInsn table_float_0e[] = {
3421     /* floating point class zero */
3422     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3423     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3424     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3425     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3426     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3427     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3428 
3429     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3430     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3431     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3432     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3433     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3434     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3435 
3436     /* floating point class three */
3437     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3438     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3439     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3440     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3441 
3442     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3443     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3444     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3445     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3446 
3447     { 0x38004700, 0xfc00ef60, trans_xmpyu },
3448 
3449     /* floating point class one */
3450     /* float/float */
3451     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3452     { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3453     /* int/float */
3454     { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3455     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3456     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3457     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3458     /* float/int */
3459     { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3460     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3461     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3462     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3463     /* float/int truncate */
3464     { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3465     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3466     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3467     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3468     /* uint/float */
3469     { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3470     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3471     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3472     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3473     /* float/uint */
3474     { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3475     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3476     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3477     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3478     /* float/uint truncate */
3479     { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3480     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3481     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3482     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3483 
3484     /* floating point class two */
3485     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3486     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3487 };
3488 
3489 #undef FOP_WEW
3490 #undef FOP_DEW
3491 #undef FOP_WED
3492 #undef FOP_WEWW
3493 #undef FOP_DED
3494 #undef FOP_DEDD
3495 
3496 /* Convert the fmpyadd single-precision register encodings to standard.  */
3497 static inline int fmpyadd_s_reg(unsigned r)
3498 {
3499     return (r & 16) * 2 + 16 + (r & 15);
3500 }
3501 
3502 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3503                                    uint32_t insn, bool is_sub)
3504 {
3505     unsigned tm = extract32(insn, 0, 5);
3506     unsigned f = extract32(insn, 5, 1);
3507     unsigned ra = extract32(insn, 6, 5);
3508     unsigned ta = extract32(insn, 11, 5);
3509     unsigned rm2 = extract32(insn, 16, 5);
3510     unsigned rm1 = extract32(insn, 21, 5);
3511 
3512     nullify_over(ctx);
3513 
3514     /* Independent multiply & add/sub, with undefined behaviour
3515        if outputs overlap inputs.  */
3516     if (f == 0) {
3517         tm = fmpyadd_s_reg(tm);
3518         ra = fmpyadd_s_reg(ra);
3519         ta = fmpyadd_s_reg(ta);
3520         rm2 = fmpyadd_s_reg(rm2);
3521         rm1 = fmpyadd_s_reg(rm1);
3522         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3523         do_fop_weww(ctx, ta, ta, ra,
3524                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3525     } else {
3526         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3527         do_fop_dedd(ctx, ta, ta, ra,
3528                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3529     }
3530 
3531     return nullify_end(ctx, DISAS_NEXT);
3532 }
3533 
3534 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3535                                       const DisasInsn *di)
3536 {
3537     unsigned rt = assemble_rt64(insn);
3538     unsigned neg = extract32(insn, 5, 1);
3539     unsigned rm1 = assemble_ra64(insn);
3540     unsigned rm2 = assemble_rb64(insn);
3541     unsigned ra3 = assemble_rc64(insn);
3542     TCGv_i32 a, b, c;
3543 
3544     nullify_over(ctx);
3545     a = load_frw0_i32(rm1);
3546     b = load_frw0_i32(rm2);
3547     c = load_frw0_i32(ra3);
3548 
3549     if (neg) {
3550         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3551     } else {
3552         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3553     }
3554 
3555     tcg_temp_free_i32(b);
3556     tcg_temp_free_i32(c);
3557     save_frw_i32(rt, a);
3558     tcg_temp_free_i32(a);
3559     return nullify_end(ctx, DISAS_NEXT);
3560 }
3561 
3562 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3563                                       const DisasInsn *di)
3564 {
3565     unsigned rt = extract32(insn, 0, 5);
3566     unsigned neg = extract32(insn, 5, 1);
3567     unsigned rm1 = extract32(insn, 21, 5);
3568     unsigned rm2 = extract32(insn, 16, 5);
3569     unsigned ra3 = assemble_rc64(insn);
3570     TCGv_i64 a, b, c;
3571 
3572     nullify_over(ctx);
3573     a = load_frd0(rm1);
3574     b = load_frd0(rm2);
3575     c = load_frd0(ra3);
3576 
3577     if (neg) {
3578         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3579     } else {
3580         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3581     }
3582 
3583     tcg_temp_free_i64(b);
3584     tcg_temp_free_i64(c);
3585     save_frd(rt, a);
3586     tcg_temp_free_i64(a);
3587     return nullify_end(ctx, DISAS_NEXT);
3588 }
3589 
3590 static const DisasInsn table_fp_fused[] = {
3591     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3592     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3593 };
3594 
3595 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3596                                          const DisasInsn table[], size_t n)
3597 {
3598     size_t i;
3599     for (i = 0; i < n; ++i) {
3600         if ((insn & table[i].mask) == table[i].insn) {
3601             return table[i].trans(ctx, insn, &table[i]);
3602         }
3603     }
3604     return gen_illegal(ctx);
3605 }
3606 
3607 #define translate_table(ctx, insn, table) \
3608     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3609 
3610 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3611 {
3612     uint32_t opc = extract32(insn, 26, 6);
3613 
3614     switch (opc) {
3615     case 0x00: /* system op */
3616         return translate_table(ctx, insn, table_system);
3617     case 0x01:
3618         return translate_table(ctx, insn, table_mem_mgmt);
3619     case 0x02:
3620         return translate_table(ctx, insn, table_arith_log);
3621     case 0x03:
3622         return translate_table(ctx, insn, table_index_mem);
3623     case 0x06:
3624         return trans_fmpyadd(ctx, insn, false);
3625     case 0x08:
3626         return trans_ldil(ctx, insn);
3627     case 0x09:
3628         return trans_copr_w(ctx, insn);
3629     case 0x0A:
3630         return trans_addil(ctx, insn);
3631     case 0x0B:
3632         return trans_copr_dw(ctx, insn);
3633     case 0x0C:
3634         return translate_table(ctx, insn, table_float_0c);
3635     case 0x0D:
3636         return trans_ldo(ctx, insn);
3637     case 0x0E:
3638         return translate_table(ctx, insn, table_float_0e);
3639 
3640     case 0x10:
3641         return trans_load(ctx, insn, false, MO_UB);
3642     case 0x11:
3643         return trans_load(ctx, insn, false, MO_TEUW);
3644     case 0x12:
3645         return trans_load(ctx, insn, false, MO_TEUL);
3646     case 0x13:
3647         return trans_load(ctx, insn, true, MO_TEUL);
3648     case 0x16:
3649         return trans_fload_mod(ctx, insn);
3650     case 0x17:
3651         return trans_load_w(ctx, insn);
3652     case 0x18:
3653         return trans_store(ctx, insn, false, MO_UB);
3654     case 0x19:
3655         return trans_store(ctx, insn, false, MO_TEUW);
3656     case 0x1A:
3657         return trans_store(ctx, insn, false, MO_TEUL);
3658     case 0x1B:
3659         return trans_store(ctx, insn, true, MO_TEUL);
3660     case 0x1E:
3661         return trans_fstore_mod(ctx, insn);
3662     case 0x1F:
3663         return trans_store_w(ctx, insn);
3664 
3665     case 0x20:
3666         return trans_cmpb(ctx, insn, true, false, false);
3667     case 0x21:
3668         return trans_cmpb(ctx, insn, true, true, false);
3669     case 0x22:
3670         return trans_cmpb(ctx, insn, false, false, false);
3671     case 0x23:
3672         return trans_cmpb(ctx, insn, false, true, false);
3673     case 0x24:
3674         return trans_cmpiclr(ctx, insn);
3675     case 0x25:
3676         return trans_subi(ctx, insn);
3677     case 0x26:
3678         return trans_fmpyadd(ctx, insn, true);
3679     case 0x27:
3680         return trans_cmpb(ctx, insn, true, false, true);
3681     case 0x28:
3682         return trans_addb(ctx, insn, true, false);
3683     case 0x29:
3684         return trans_addb(ctx, insn, true, true);
3685     case 0x2A:
3686         return trans_addb(ctx, insn, false, false);
3687     case 0x2B:
3688         return trans_addb(ctx, insn, false, true);
3689     case 0x2C:
3690     case 0x2D:
3691         return trans_addi(ctx, insn);
3692     case 0x2E:
3693         return translate_table(ctx, insn, table_fp_fused);
3694     case 0x2F:
3695         return trans_cmpb(ctx, insn, false, false, true);
3696 
3697     case 0x30:
3698     case 0x31:
3699         return trans_bb(ctx, insn);
3700     case 0x32:
3701         return trans_movb(ctx, insn, false);
3702     case 0x33:
3703         return trans_movb(ctx, insn, true);
3704     case 0x34:
3705         return translate_table(ctx, insn, table_sh_ex);
3706     case 0x35:
3707         return translate_table(ctx, insn, table_depw);
3708     case 0x38:
3709         return trans_be(ctx, insn, false);
3710     case 0x39:
3711         return trans_be(ctx, insn, true);
3712     case 0x3A:
3713         return translate_table(ctx, insn, table_branch);
3714 
3715     case 0x04: /* spopn */
3716     case 0x05: /* diag */
3717     case 0x0F: /* product specific */
3718         break;
3719 
3720     case 0x07: /* unassigned */
3721     case 0x15: /* unassigned */
3722     case 0x1D: /* unassigned */
3723     case 0x37: /* unassigned */
3724     case 0x3F: /* unassigned */
3725     default:
3726         break;
3727     }
3728     return gen_illegal(ctx);
3729 }
3730 
3731 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3732                                       CPUState *cs, int max_insns)
3733 {
3734     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3735     TranslationBlock *tb = ctx->base.tb;
3736     int bound;
3737 
3738     ctx->cs = cs;
3739     ctx->iaoq_f = tb->pc;
3740     ctx->iaoq_b = tb->cs_base;
3741     ctx->iaoq_n = -1;
3742     ctx->iaoq_n_var = NULL;
3743 
3744     ctx->ntemps = 0;
3745     memset(ctx->temps, 0, sizeof(ctx->temps));
3746 
3747     bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3748     return MIN(max_insns, bound);
3749 }
3750 
3751 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3752 {
3753     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3754 
3755     /* Seed the nullification status from PSW[N], as shown in TB->FLAGS.  */
3756     ctx->null_cond = cond_make_f();
3757     ctx->psw_n_nonzero = false;
3758     if (ctx->base.tb->flags & 1) {
3759         ctx->null_cond.c = TCG_COND_ALWAYS;
3760         ctx->psw_n_nonzero = true;
3761     }
3762     ctx->null_lab = NULL;
3763 }
3764 
3765 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3766 {
3767     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3768 
3769     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3770 }
3771 
3772 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3773                                       const CPUBreakpoint *bp)
3774 {
3775     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3776 
3777     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3778     ctx->base.pc_next = ctx->iaoq_f + 4;
3779     return true;
3780 }
3781 
3782 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3783 {
3784     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3785     CPUHPPAState *env = cs->env_ptr;
3786     DisasJumpType ret;
3787     int i, n;
3788 
3789     /* Execute one insn.  */
3790     if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3791         ret = do_page_zero(ctx);
3792         assert(ret != DISAS_NEXT);
3793     } else {
3794         /* Always fetch the insn, even if nullified, so that we check
3795            the page permissions for execute.  */
3796         uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3797 
3798         /* Set up the IA queue for the next insn.
3799            This will be overwritten by a branch.  */
3800         if (ctx->iaoq_b == -1) {
3801             ctx->iaoq_n = -1;
3802             ctx->iaoq_n_var = get_temp(ctx);
3803             tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3804         } else {
3805             ctx->iaoq_n = ctx->iaoq_b + 4;
3806             ctx->iaoq_n_var = NULL;
3807         }
3808 
3809         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3810             ctx->null_cond.c = TCG_COND_NEVER;
3811             ret = DISAS_NEXT;
3812         } else {
3813             ret = translate_one(ctx, insn);
3814             assert(ctx->null_lab == NULL);
3815         }
3816     }
3817 
3818     /* Free any temporaries allocated.  */
3819     for (i = 0, n = ctx->ntemps; i < n; ++i) {
3820         tcg_temp_free(ctx->temps[i]);
3821         ctx->temps[i] = NULL;
3822     }
3823     ctx->ntemps = 0;
3824 
3825     /* Advance the insn queue.  */
3826     /* ??? The non-linear instruction restriction is purely due to
3827        the debugging dump.  Otherwise we *could* follow unconditional
3828        branches within the same page.  */
3829     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3830         if (ctx->null_cond.c == TCG_COND_NEVER
3831             || ctx->null_cond.c == TCG_COND_ALWAYS) {
3832             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3833             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3834             ret = DISAS_NORETURN;
3835         } else {
3836             ret = DISAS_IAQ_N_STALE;
3837        }
3838     }
3839     ctx->iaoq_f = ctx->iaoq_b;
3840     ctx->iaoq_b = ctx->iaoq_n;
3841     ctx->base.is_jmp = ret;
3842 
3843     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3844         return;
3845     }
3846     if (ctx->iaoq_f == -1) {
3847         tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3848         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3849         nullify_save(ctx);
3850         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3851     } else if (ctx->iaoq_b == -1) {
3852         tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3853     }
3854 }
3855 
3856 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3857 {
3858     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3859 
3860     switch (ctx->base.is_jmp) {
3861     case DISAS_NORETURN:
3862         break;
3863     case DISAS_TOO_MANY:
3864     case DISAS_IAQ_N_STALE:
3865         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3866         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3867         nullify_save(ctx);
3868         /* FALLTHRU */
3869     case DISAS_IAQ_N_UPDATED:
3870         if (ctx->base.singlestep_enabled) {
3871             gen_excp_1(EXCP_DEBUG);
3872         } else {
3873             tcg_gen_lookup_and_goto_ptr();
3874         }
3875         break;
3876     default:
3877         g_assert_not_reached();
3878     }
3879 
3880     /* We don't actually use this during normal translation,
3881        but we should interact with the generic main loop.  */
3882     ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3883 }
3884 
3885 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3886 {
3887     TranslationBlock *tb = dcbase->tb;
3888 
3889     switch (tb->pc) {
3890     case 0x00:
3891         qemu_log("IN:\n0x00000000:  (null)\n");
3892         break;
3893     case 0xb0:
3894         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
3895         break;
3896     case 0xe0:
3897         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
3898         break;
3899     case 0x100:
3900         qemu_log("IN:\n0x00000100:  syscall\n");
3901         break;
3902     default:
3903         qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3904         log_target_disas(cs, tb->pc, tb->size);
3905         break;
3906     }
3907 }
3908 
3909 static const TranslatorOps hppa_tr_ops = {
3910     .init_disas_context = hppa_tr_init_disas_context,
3911     .tb_start           = hppa_tr_tb_start,
3912     .insn_start         = hppa_tr_insn_start,
3913     .breakpoint_check   = hppa_tr_breakpoint_check,
3914     .translate_insn     = hppa_tr_translate_insn,
3915     .tb_stop            = hppa_tr_tb_stop,
3916     .disas_log          = hppa_tr_disas_log,
3917 };
3918 
3919 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3920 
3921 {
3922     DisasContext ctx;
3923     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3924 }
3925 
3926 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3927                           target_ulong *data)
3928 {
3929     env->iaoq_f = data[0];
3930     if (data[1] != -1) {
3931         env->iaoq_b = data[1];
3932     }
3933     /* Since we were executing the instruction at IAOQ_F, and took some
3934        sort of action that provoked the cpu_restore_state, we can infer
3935        that the instruction was not nullified.  */
3936     env->psw_n = 0;
3937 }
3938