xref: /openbmc/qemu/target/hppa/translate.c (revision f9f46db4)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 typedef struct DisasCond {
34     TCGCond c;
35     TCGv a0, a1;
36     bool a0_is_n;
37     bool a1_is_0;
38 } DisasCond;
39 
40 typedef struct DisasContext {
41     DisasContextBase base;
42     CPUState *cs;
43 
44     target_ulong iaoq_f;
45     target_ulong iaoq_b;
46     target_ulong iaoq_n;
47     TCGv iaoq_n_var;
48 
49     int ntemps;
50     TCGv temps[8];
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     bool psw_n_nonzero;
56 } DisasContext;
57 
58 /* Target-specific return values from translate_one, indicating the
59    state of the TB.  Note that DISAS_NEXT indicates that we are not
60    exiting the TB.  */
61 
62 /* We are not using a goto_tb (for whatever reason), but have updated
63    the iaq (for whatever reason), so don't do it again on exit.  */
64 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
65 
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67    updated the iaq for the next instruction to be executed.  */
68 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
69 
70 typedef struct DisasInsn {
71     uint32_t insn, mask;
72     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73                            const struct DisasInsn *f);
74     union {
75         void (*ttt)(TCGv, TCGv, TCGv);
76         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
82     } f;
83 } DisasInsn;
84 
85 /* global register indexes */
86 static TCGv_env cpu_env;
87 static TCGv cpu_gr[32];
88 static TCGv cpu_iaoq_f;
89 static TCGv cpu_iaoq_b;
90 static TCGv cpu_sar;
91 static TCGv cpu_psw_n;
92 static TCGv cpu_psw_v;
93 static TCGv cpu_psw_cb;
94 static TCGv cpu_psw_cb_msb;
95 static TCGv cpu_cr26;
96 static TCGv cpu_cr27;
97 
98 #include "exec/gen-icount.h"
99 
100 void hppa_translate_init(void)
101 {
102 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
103 
104     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
105     static const GlobalVar vars[] = {
106         DEF_VAR(sar),
107         DEF_VAR(cr26),
108         DEF_VAR(cr27),
109         DEF_VAR(psw_n),
110         DEF_VAR(psw_v),
111         DEF_VAR(psw_cb),
112         DEF_VAR(psw_cb_msb),
113         DEF_VAR(iaoq_f),
114         DEF_VAR(iaoq_b),
115     };
116 
117 #undef DEF_VAR
118 
119     /* Use the symbolic register names that match the disassembler.  */
120     static const char gr_names[32][4] = {
121         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
122         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
123         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
124         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
125     };
126 
127     int i;
128 
129     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
130     tcg_ctx.tcg_env = cpu_env;
131 
132     TCGV_UNUSED(cpu_gr[0]);
133     for (i = 1; i < 32; i++) {
134         cpu_gr[i] = tcg_global_mem_new(cpu_env,
135                                        offsetof(CPUHPPAState, gr[i]),
136                                        gr_names[i]);
137     }
138 
139     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
140         const GlobalVar *v = &vars[i];
141         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
142     }
143 }
144 
145 static DisasCond cond_make_f(void)
146 {
147     DisasCond r = { .c = TCG_COND_NEVER };
148     TCGV_UNUSED(r.a0);
149     TCGV_UNUSED(r.a1);
150     return r;
151 }
152 
153 static DisasCond cond_make_n(void)
154 {
155     DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
156     r.a0 = cpu_psw_n;
157     TCGV_UNUSED(r.a1);
158     return r;
159 }
160 
161 static DisasCond cond_make_0(TCGCond c, TCGv a0)
162 {
163     DisasCond r = { .c = c, .a1_is_0 = true };
164 
165     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
166     r.a0 = tcg_temp_new();
167     tcg_gen_mov_tl(r.a0, a0);
168     TCGV_UNUSED(r.a1);
169 
170     return r;
171 }
172 
173 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
174 {
175     DisasCond r = { .c = c };
176 
177     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
178     r.a0 = tcg_temp_new();
179     tcg_gen_mov_tl(r.a0, a0);
180     r.a1 = tcg_temp_new();
181     tcg_gen_mov_tl(r.a1, a1);
182 
183     return r;
184 }
185 
186 static void cond_prep(DisasCond *cond)
187 {
188     if (cond->a1_is_0) {
189         cond->a1_is_0 = false;
190         cond->a1 = tcg_const_tl(0);
191     }
192 }
193 
194 static void cond_free(DisasCond *cond)
195 {
196     switch (cond->c) {
197     default:
198         if (!cond->a0_is_n) {
199             tcg_temp_free(cond->a0);
200         }
201         if (!cond->a1_is_0) {
202             tcg_temp_free(cond->a1);
203         }
204         cond->a0_is_n = false;
205         cond->a1_is_0 = false;
206         TCGV_UNUSED(cond->a0);
207         TCGV_UNUSED(cond->a1);
208         /* fallthru */
209     case TCG_COND_ALWAYS:
210         cond->c = TCG_COND_NEVER;
211         break;
212     case TCG_COND_NEVER:
213         break;
214     }
215 }
216 
217 static TCGv get_temp(DisasContext *ctx)
218 {
219     unsigned i = ctx->ntemps++;
220     g_assert(i < ARRAY_SIZE(ctx->temps));
221     return ctx->temps[i] = tcg_temp_new();
222 }
223 
224 static TCGv load_const(DisasContext *ctx, target_long v)
225 {
226     TCGv t = get_temp(ctx);
227     tcg_gen_movi_tl(t, v);
228     return t;
229 }
230 
231 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
232 {
233     if (reg == 0) {
234         TCGv t = get_temp(ctx);
235         tcg_gen_movi_tl(t, 0);
236         return t;
237     } else {
238         return cpu_gr[reg];
239     }
240 }
241 
242 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
243 {
244     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
245         return get_temp(ctx);
246     } else {
247         return cpu_gr[reg];
248     }
249 }
250 
251 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
252 {
253     if (ctx->null_cond.c != TCG_COND_NEVER) {
254         cond_prep(&ctx->null_cond);
255         tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
256                            ctx->null_cond.a1, dest, t);
257     } else {
258         tcg_gen_mov_tl(dest, t);
259     }
260 }
261 
262 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
263 {
264     if (reg != 0) {
265         save_or_nullify(ctx, cpu_gr[reg], t);
266     }
267 }
268 
269 #ifdef HOST_WORDS_BIGENDIAN
270 # define HI_OFS  0
271 # define LO_OFS  4
272 #else
273 # define HI_OFS  4
274 # define LO_OFS  0
275 #endif
276 
277 static TCGv_i32 load_frw_i32(unsigned rt)
278 {
279     TCGv_i32 ret = tcg_temp_new_i32();
280     tcg_gen_ld_i32(ret, cpu_env,
281                    offsetof(CPUHPPAState, fr[rt & 31])
282                    + (rt & 32 ? LO_OFS : HI_OFS));
283     return ret;
284 }
285 
286 static TCGv_i32 load_frw0_i32(unsigned rt)
287 {
288     if (rt == 0) {
289         return tcg_const_i32(0);
290     } else {
291         return load_frw_i32(rt);
292     }
293 }
294 
295 static TCGv_i64 load_frw0_i64(unsigned rt)
296 {
297     if (rt == 0) {
298         return tcg_const_i64(0);
299     } else {
300         TCGv_i64 ret = tcg_temp_new_i64();
301         tcg_gen_ld32u_i64(ret, cpu_env,
302                           offsetof(CPUHPPAState, fr[rt & 31])
303                           + (rt & 32 ? LO_OFS : HI_OFS));
304         return ret;
305     }
306 }
307 
308 static void save_frw_i32(unsigned rt, TCGv_i32 val)
309 {
310     tcg_gen_st_i32(val, cpu_env,
311                    offsetof(CPUHPPAState, fr[rt & 31])
312                    + (rt & 32 ? LO_OFS : HI_OFS));
313 }
314 
315 #undef HI_OFS
316 #undef LO_OFS
317 
318 static TCGv_i64 load_frd(unsigned rt)
319 {
320     TCGv_i64 ret = tcg_temp_new_i64();
321     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
322     return ret;
323 }
324 
325 static TCGv_i64 load_frd0(unsigned rt)
326 {
327     if (rt == 0) {
328         return tcg_const_i64(0);
329     } else {
330         return load_frd(rt);
331     }
332 }
333 
334 static void save_frd(unsigned rt, TCGv_i64 val)
335 {
336     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
337 }
338 
339 /* Skip over the implementation of an insn that has been nullified.
340    Use this when the insn is too complex for a conditional move.  */
341 static void nullify_over(DisasContext *ctx)
342 {
343     if (ctx->null_cond.c != TCG_COND_NEVER) {
344         /* The always condition should have been handled in the main loop.  */
345         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
346 
347         ctx->null_lab = gen_new_label();
348         cond_prep(&ctx->null_cond);
349 
350         /* If we're using PSW[N], copy it to a temp because... */
351         if (ctx->null_cond.a0_is_n) {
352             ctx->null_cond.a0_is_n = false;
353             ctx->null_cond.a0 = tcg_temp_new();
354             tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
355         }
356         /* ... we clear it before branching over the implementation,
357            so that (1) it's clear after nullifying this insn and
358            (2) if this insn nullifies the next, PSW[N] is valid.  */
359         if (ctx->psw_n_nonzero) {
360             ctx->psw_n_nonzero = false;
361             tcg_gen_movi_tl(cpu_psw_n, 0);
362         }
363 
364         tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
365                           ctx->null_cond.a1, ctx->null_lab);
366         cond_free(&ctx->null_cond);
367     }
368 }
369 
370 /* Save the current nullification state to PSW[N].  */
371 static void nullify_save(DisasContext *ctx)
372 {
373     if (ctx->null_cond.c == TCG_COND_NEVER) {
374         if (ctx->psw_n_nonzero) {
375             tcg_gen_movi_tl(cpu_psw_n, 0);
376         }
377         return;
378     }
379     if (!ctx->null_cond.a0_is_n) {
380         cond_prep(&ctx->null_cond);
381         tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
382                            ctx->null_cond.a0, ctx->null_cond.a1);
383         ctx->psw_n_nonzero = true;
384     }
385     cond_free(&ctx->null_cond);
386 }
387 
388 /* Set a PSW[N] to X.  The intention is that this is used immediately
389    before a goto_tb/exit_tb, so that there is no fallthru path to other
390    code within the TB.  Therefore we do not update psw_n_nonzero.  */
391 static void nullify_set(DisasContext *ctx, bool x)
392 {
393     if (ctx->psw_n_nonzero || x) {
394         tcg_gen_movi_tl(cpu_psw_n, x);
395     }
396 }
397 
398 /* Mark the end of an instruction that may have been nullified.
399    This is the pair to nullify_over.  */
400 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
401 {
402     TCGLabel *null_lab = ctx->null_lab;
403 
404     if (likely(null_lab == NULL)) {
405         /* The current insn wasn't conditional or handled the condition
406            applied to it without a branch, so the (new) setting of
407            NULL_COND can be applied directly to the next insn.  */
408         return status;
409     }
410     ctx->null_lab = NULL;
411 
412     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
413         /* The next instruction will be unconditional,
414            and NULL_COND already reflects that.  */
415         gen_set_label(null_lab);
416     } else {
417         /* The insn that we just executed is itself nullifying the next
418            instruction.  Store the condition in the PSW[N] global.
419            We asserted PSW[N] = 0 in nullify_over, so that after the
420            label we have the proper value in place.  */
421         nullify_save(ctx);
422         gen_set_label(null_lab);
423         ctx->null_cond = cond_make_n();
424     }
425 
426     assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
427     if (status == DISAS_NORETURN) {
428         status = DISAS_NEXT;
429     }
430     return status;
431 }
432 
433 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
434 {
435     if (unlikely(ival == -1)) {
436         tcg_gen_mov_tl(dest, vval);
437     } else {
438         tcg_gen_movi_tl(dest, ival);
439     }
440 }
441 
442 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
443 {
444     return ctx->iaoq_f + disp + 8;
445 }
446 
447 static void gen_excp_1(int exception)
448 {
449     TCGv_i32 t = tcg_const_i32(exception);
450     gen_helper_excp(cpu_env, t);
451     tcg_temp_free_i32(t);
452 }
453 
454 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
455 {
456     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
457     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
458     nullify_save(ctx);
459     gen_excp_1(exception);
460     return DISAS_NORETURN;
461 }
462 
463 static DisasJumpType gen_illegal(DisasContext *ctx)
464 {
465     nullify_over(ctx);
466     return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
467 }
468 
469 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
470 {
471     /* Suppress goto_tb in the case of single-steping and IO.  */
472     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
473         return false;
474     }
475     return true;
476 }
477 
478 /* If the next insn is to be nullified, and it's on the same page,
479    and we're not attempting to set a breakpoint on it, then we can
480    totally skip the nullified insn.  This avoids creating and
481    executing a TB that merely branches to the next TB.  */
482 static bool use_nullify_skip(DisasContext *ctx)
483 {
484     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
485             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
486 }
487 
488 static void gen_goto_tb(DisasContext *ctx, int which,
489                         target_ulong f, target_ulong b)
490 {
491     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
492         tcg_gen_goto_tb(which);
493         tcg_gen_movi_tl(cpu_iaoq_f, f);
494         tcg_gen_movi_tl(cpu_iaoq_b, b);
495         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
496     } else {
497         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
498         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
499         if (ctx->base.singlestep_enabled) {
500             gen_excp_1(EXCP_DEBUG);
501         } else {
502             tcg_gen_lookup_and_goto_ptr();
503         }
504     }
505 }
506 
507 /* PA has a habit of taking the LSB of a field and using that as the sign,
508    with the rest of the field becoming the least significant bits.  */
509 static target_long low_sextract(uint32_t val, int pos, int len)
510 {
511     target_ulong x = -(target_ulong)extract32(val, pos, 1);
512     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
513     return x;
514 }
515 
516 static unsigned assemble_rt64(uint32_t insn)
517 {
518     unsigned r1 = extract32(insn, 6, 1);
519     unsigned r0 = extract32(insn, 0, 5);
520     return r1 * 32 + r0;
521 }
522 
523 static unsigned assemble_ra64(uint32_t insn)
524 {
525     unsigned r1 = extract32(insn, 7, 1);
526     unsigned r0 = extract32(insn, 21, 5);
527     return r1 * 32 + r0;
528 }
529 
530 static unsigned assemble_rb64(uint32_t insn)
531 {
532     unsigned r1 = extract32(insn, 12, 1);
533     unsigned r0 = extract32(insn, 16, 5);
534     return r1 * 32 + r0;
535 }
536 
537 static unsigned assemble_rc64(uint32_t insn)
538 {
539     unsigned r2 = extract32(insn, 8, 1);
540     unsigned r1 = extract32(insn, 13, 3);
541     unsigned r0 = extract32(insn, 9, 2);
542     return r2 * 32 + r1 * 4 + r0;
543 }
544 
545 static target_long assemble_12(uint32_t insn)
546 {
547     target_ulong x = -(target_ulong)(insn & 1);
548     x = (x <<  1) | extract32(insn, 2, 1);
549     x = (x << 10) | extract32(insn, 3, 10);
550     return x;
551 }
552 
553 static target_long assemble_16(uint32_t insn)
554 {
555     /* Take the name from PA2.0, which produces a 16-bit number
556        only with wide mode; otherwise a 14-bit number.  Since we don't
557        implement wide mode, this is always the 14-bit number.  */
558     return low_sextract(insn, 0, 14);
559 }
560 
561 static target_long assemble_16a(uint32_t insn)
562 {
563     /* Take the name from PA2.0, which produces a 14-bit shifted number
564        only with wide mode; otherwise a 12-bit shifted number.  Since we
565        don't implement wide mode, this is always the 12-bit number.  */
566     target_ulong x = -(target_ulong)(insn & 1);
567     x = (x << 11) | extract32(insn, 2, 11);
568     return x << 2;
569 }
570 
571 static target_long assemble_17(uint32_t insn)
572 {
573     target_ulong x = -(target_ulong)(insn & 1);
574     x = (x <<  5) | extract32(insn, 16, 5);
575     x = (x <<  1) | extract32(insn, 2, 1);
576     x = (x << 10) | extract32(insn, 3, 10);
577     return x << 2;
578 }
579 
580 static target_long assemble_21(uint32_t insn)
581 {
582     target_ulong x = -(target_ulong)(insn & 1);
583     x = (x << 11) | extract32(insn, 1, 11);
584     x = (x <<  2) | extract32(insn, 14, 2);
585     x = (x <<  5) | extract32(insn, 16, 5);
586     x = (x <<  2) | extract32(insn, 12, 2);
587     return x << 11;
588 }
589 
590 static target_long assemble_22(uint32_t insn)
591 {
592     target_ulong x = -(target_ulong)(insn & 1);
593     x = (x << 10) | extract32(insn, 16, 10);
594     x = (x <<  1) | extract32(insn, 2, 1);
595     x = (x << 10) | extract32(insn, 3, 10);
596     return x << 2;
597 }
598 
599 /* The parisc documentation describes only the general interpretation of
600    the conditions, without describing their exact implementation.  The
601    interpretations do not stand up well when considering ADD,C and SUB,B.
602    However, considering the Addition, Subtraction and Logical conditions
603    as a whole it would appear that these relations are similar to what
604    a traditional NZCV set of flags would produce.  */
605 
606 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
607 {
608     DisasCond cond;
609     TCGv tmp;
610 
611     switch (cf >> 1) {
612     case 0: /* Never / TR */
613         cond = cond_make_f();
614         break;
615     case 1: /* = / <>        (Z / !Z) */
616         cond = cond_make_0(TCG_COND_EQ, res);
617         break;
618     case 2: /* < / >=        (N / !N) */
619         cond = cond_make_0(TCG_COND_LT, res);
620         break;
621     case 3: /* <= / >        (N | Z / !N & !Z) */
622         cond = cond_make_0(TCG_COND_LE, res);
623         break;
624     case 4: /* NUV / UV      (!C / C) */
625         cond = cond_make_0(TCG_COND_EQ, cb_msb);
626         break;
627     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
628         tmp = tcg_temp_new();
629         tcg_gen_neg_tl(tmp, cb_msb);
630         tcg_gen_and_tl(tmp, tmp, res);
631         cond = cond_make_0(TCG_COND_EQ, tmp);
632         tcg_temp_free(tmp);
633         break;
634     case 6: /* SV / NSV      (V / !V) */
635         cond = cond_make_0(TCG_COND_LT, sv);
636         break;
637     case 7: /* OD / EV */
638         tmp = tcg_temp_new();
639         tcg_gen_andi_tl(tmp, res, 1);
640         cond = cond_make_0(TCG_COND_NE, tmp);
641         tcg_temp_free(tmp);
642         break;
643     default:
644         g_assert_not_reached();
645     }
646     if (cf & 1) {
647         cond.c = tcg_invert_cond(cond.c);
648     }
649 
650     return cond;
651 }
652 
653 /* Similar, but for the special case of subtraction without borrow, we
654    can use the inputs directly.  This can allow other computation to be
655    deleted as unused.  */
656 
657 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
658 {
659     DisasCond cond;
660 
661     switch (cf >> 1) {
662     case 1: /* = / <> */
663         cond = cond_make(TCG_COND_EQ, in1, in2);
664         break;
665     case 2: /* < / >= */
666         cond = cond_make(TCG_COND_LT, in1, in2);
667         break;
668     case 3: /* <= / > */
669         cond = cond_make(TCG_COND_LE, in1, in2);
670         break;
671     case 4: /* << / >>= */
672         cond = cond_make(TCG_COND_LTU, in1, in2);
673         break;
674     case 5: /* <<= / >> */
675         cond = cond_make(TCG_COND_LEU, in1, in2);
676         break;
677     default:
678         return do_cond(cf, res, sv, sv);
679     }
680     if (cf & 1) {
681         cond.c = tcg_invert_cond(cond.c);
682     }
683 
684     return cond;
685 }
686 
687 /* Similar, but for logicals, where the carry and overflow bits are not
688    computed, and use of them is undefined.  */
689 
690 static DisasCond do_log_cond(unsigned cf, TCGv res)
691 {
692     switch (cf >> 1) {
693     case 4: case 5: case 6:
694         cf &= 1;
695         break;
696     }
697     return do_cond(cf, res, res, res);
698 }
699 
700 /* Similar, but for shift/extract/deposit conditions.  */
701 
702 static DisasCond do_sed_cond(unsigned orig, TCGv res)
703 {
704     unsigned c, f;
705 
706     /* Convert the compressed condition codes to standard.
707        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
708        4-7 are the reverse of 0-3.  */
709     c = orig & 3;
710     if (c == 3) {
711         c = 7;
712     }
713     f = (orig & 4) / 4;
714 
715     return do_log_cond(c * 2 + f, res);
716 }
717 
718 /* Similar, but for unit conditions.  */
719 
720 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
721 {
722     DisasCond cond;
723     TCGv tmp, cb;
724 
725     TCGV_UNUSED(cb);
726     if (cf & 8) {
727         /* Since we want to test lots of carry-out bits all at once, do not
728          * do our normal thing and compute carry-in of bit B+1 since that
729          * leaves us with carry bits spread across two words.
730          */
731         cb = tcg_temp_new();
732         tmp = tcg_temp_new();
733         tcg_gen_or_tl(cb, in1, in2);
734         tcg_gen_and_tl(tmp, in1, in2);
735         tcg_gen_andc_tl(cb, cb, res);
736         tcg_gen_or_tl(cb, cb, tmp);
737         tcg_temp_free(tmp);
738     }
739 
740     switch (cf >> 1) {
741     case 0: /* never / TR */
742     case 1: /* undefined */
743     case 5: /* undefined */
744         cond = cond_make_f();
745         break;
746 
747     case 2: /* SBZ / NBZ */
748         /* See hasless(v,1) from
749          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
750          */
751         tmp = tcg_temp_new();
752         tcg_gen_subi_tl(tmp, res, 0x01010101u);
753         tcg_gen_andc_tl(tmp, tmp, res);
754         tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
755         cond = cond_make_0(TCG_COND_NE, tmp);
756         tcg_temp_free(tmp);
757         break;
758 
759     case 3: /* SHZ / NHZ */
760         tmp = tcg_temp_new();
761         tcg_gen_subi_tl(tmp, res, 0x00010001u);
762         tcg_gen_andc_tl(tmp, tmp, res);
763         tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
764         cond = cond_make_0(TCG_COND_NE, tmp);
765         tcg_temp_free(tmp);
766         break;
767 
768     case 4: /* SDC / NDC */
769         tcg_gen_andi_tl(cb, cb, 0x88888888u);
770         cond = cond_make_0(TCG_COND_NE, cb);
771         break;
772 
773     case 6: /* SBC / NBC */
774         tcg_gen_andi_tl(cb, cb, 0x80808080u);
775         cond = cond_make_0(TCG_COND_NE, cb);
776         break;
777 
778     case 7: /* SHC / NHC */
779         tcg_gen_andi_tl(cb, cb, 0x80008000u);
780         cond = cond_make_0(TCG_COND_NE, cb);
781         break;
782 
783     default:
784         g_assert_not_reached();
785     }
786     if (cf & 8) {
787         tcg_temp_free(cb);
788     }
789     if (cf & 1) {
790         cond.c = tcg_invert_cond(cond.c);
791     }
792 
793     return cond;
794 }
795 
796 /* Compute signed overflow for addition.  */
797 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
798 {
799     TCGv sv = get_temp(ctx);
800     TCGv tmp = tcg_temp_new();
801 
802     tcg_gen_xor_tl(sv, res, in1);
803     tcg_gen_xor_tl(tmp, in1, in2);
804     tcg_gen_andc_tl(sv, sv, tmp);
805     tcg_temp_free(tmp);
806 
807     return sv;
808 }
809 
810 /* Compute signed overflow for subtraction.  */
811 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
812 {
813     TCGv sv = get_temp(ctx);
814     TCGv tmp = tcg_temp_new();
815 
816     tcg_gen_xor_tl(sv, res, in1);
817     tcg_gen_xor_tl(tmp, in1, in2);
818     tcg_gen_and_tl(sv, sv, tmp);
819     tcg_temp_free(tmp);
820 
821     return sv;
822 }
823 
824 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
825                             unsigned shift, bool is_l, bool is_tsv, bool is_tc,
826                             bool is_c, unsigned cf)
827 {
828     TCGv dest, cb, cb_msb, sv, tmp;
829     unsigned c = cf >> 1;
830     DisasCond cond;
831 
832     dest = tcg_temp_new();
833     TCGV_UNUSED(cb);
834     TCGV_UNUSED(cb_msb);
835 
836     if (shift) {
837         tmp = get_temp(ctx);
838         tcg_gen_shli_tl(tmp, in1, shift);
839         in1 = tmp;
840     }
841 
842     if (!is_l || c == 4 || c == 5) {
843         TCGv zero = tcg_const_tl(0);
844         cb_msb = get_temp(ctx);
845         tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
846         if (is_c) {
847             tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
848         }
849         tcg_temp_free(zero);
850         if (!is_l) {
851             cb = get_temp(ctx);
852             tcg_gen_xor_tl(cb, in1, in2);
853             tcg_gen_xor_tl(cb, cb, dest);
854         }
855     } else {
856         tcg_gen_add_tl(dest, in1, in2);
857         if (is_c) {
858             tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
859         }
860     }
861 
862     /* Compute signed overflow if required.  */
863     TCGV_UNUSED(sv);
864     if (is_tsv || c == 6) {
865         sv = do_add_sv(ctx, dest, in1, in2);
866         if (is_tsv) {
867             /* ??? Need to include overflow from shift.  */
868             gen_helper_tsv(cpu_env, sv);
869         }
870     }
871 
872     /* Emit any conditional trap before any writeback.  */
873     cond = do_cond(cf, dest, cb_msb, sv);
874     if (is_tc) {
875         cond_prep(&cond);
876         tmp = tcg_temp_new();
877         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
878         gen_helper_tcond(cpu_env, tmp);
879         tcg_temp_free(tmp);
880     }
881 
882     /* Write back the result.  */
883     if (!is_l) {
884         save_or_nullify(ctx, cpu_psw_cb, cb);
885         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
886     }
887     save_gpr(ctx, rt, dest);
888     tcg_temp_free(dest);
889 
890     /* Install the new nullification.  */
891     cond_free(&ctx->null_cond);
892     ctx->null_cond = cond;
893     return DISAS_NEXT;
894 }
895 
896 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
897                             bool is_tsv, bool is_b, bool is_tc, unsigned cf)
898 {
899     TCGv dest, sv, cb, cb_msb, zero, tmp;
900     unsigned c = cf >> 1;
901     DisasCond cond;
902 
903     dest = tcg_temp_new();
904     cb = tcg_temp_new();
905     cb_msb = tcg_temp_new();
906 
907     zero = tcg_const_tl(0);
908     if (is_b) {
909         /* DEST,C = IN1 + ~IN2 + C.  */
910         tcg_gen_not_tl(cb, in2);
911         tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
912         tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
913         tcg_gen_xor_tl(cb, cb, in1);
914         tcg_gen_xor_tl(cb, cb, dest);
915     } else {
916         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
917            operations by seeding the high word with 1 and subtracting.  */
918         tcg_gen_movi_tl(cb_msb, 1);
919         tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
920         tcg_gen_eqv_tl(cb, in1, in2);
921         tcg_gen_xor_tl(cb, cb, dest);
922     }
923     tcg_temp_free(zero);
924 
925     /* Compute signed overflow if required.  */
926     TCGV_UNUSED(sv);
927     if (is_tsv || c == 6) {
928         sv = do_sub_sv(ctx, dest, in1, in2);
929         if (is_tsv) {
930             gen_helper_tsv(cpu_env, sv);
931         }
932     }
933 
934     /* Compute the condition.  We cannot use the special case for borrow.  */
935     if (!is_b) {
936         cond = do_sub_cond(cf, dest, in1, in2, sv);
937     } else {
938         cond = do_cond(cf, dest, cb_msb, sv);
939     }
940 
941     /* Emit any conditional trap before any writeback.  */
942     if (is_tc) {
943         cond_prep(&cond);
944         tmp = tcg_temp_new();
945         tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
946         gen_helper_tcond(cpu_env, tmp);
947         tcg_temp_free(tmp);
948     }
949 
950     /* Write back the result.  */
951     save_or_nullify(ctx, cpu_psw_cb, cb);
952     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
953     save_gpr(ctx, rt, dest);
954     tcg_temp_free(dest);
955 
956     /* Install the new nullification.  */
957     cond_free(&ctx->null_cond);
958     ctx->null_cond = cond;
959     return DISAS_NEXT;
960 }
961 
962 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
963                                TCGv in2, unsigned cf)
964 {
965     TCGv dest, sv;
966     DisasCond cond;
967 
968     dest = tcg_temp_new();
969     tcg_gen_sub_tl(dest, in1, in2);
970 
971     /* Compute signed overflow if required.  */
972     TCGV_UNUSED(sv);
973     if ((cf >> 1) == 6) {
974         sv = do_sub_sv(ctx, dest, in1, in2);
975     }
976 
977     /* Form the condition for the compare.  */
978     cond = do_sub_cond(cf, dest, in1, in2, sv);
979 
980     /* Clear.  */
981     tcg_gen_movi_tl(dest, 0);
982     save_gpr(ctx, rt, dest);
983     tcg_temp_free(dest);
984 
985     /* Install the new nullification.  */
986     cond_free(&ctx->null_cond);
987     ctx->null_cond = cond;
988     return DISAS_NEXT;
989 }
990 
991 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
992                             unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
993 {
994     TCGv dest = dest_gpr(ctx, rt);
995 
996     /* Perform the operation, and writeback.  */
997     fn(dest, in1, in2);
998     save_gpr(ctx, rt, dest);
999 
1000     /* Install the new nullification.  */
1001     cond_free(&ctx->null_cond);
1002     if (cf) {
1003         ctx->null_cond = do_log_cond(cf, dest);
1004     }
1005     return DISAS_NEXT;
1006 }
1007 
1008 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1009                              TCGv in2, unsigned cf, bool is_tc,
1010                              void (*fn)(TCGv, TCGv, TCGv))
1011 {
1012     TCGv dest;
1013     DisasCond cond;
1014 
1015     if (cf == 0) {
1016         dest = dest_gpr(ctx, rt);
1017         fn(dest, in1, in2);
1018         save_gpr(ctx, rt, dest);
1019         cond_free(&ctx->null_cond);
1020     } else {
1021         dest = tcg_temp_new();
1022         fn(dest, in1, in2);
1023 
1024         cond = do_unit_cond(cf, dest, in1, in2);
1025 
1026         if (is_tc) {
1027             TCGv tmp = tcg_temp_new();
1028             cond_prep(&cond);
1029             tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1030             gen_helper_tcond(cpu_env, tmp);
1031             tcg_temp_free(tmp);
1032         }
1033         save_gpr(ctx, rt, dest);
1034 
1035         cond_free(&ctx->null_cond);
1036         ctx->null_cond = cond;
1037     }
1038     return DISAS_NEXT;
1039 }
1040 
1041 /* Emit a memory load.  The modify parameter should be
1042  * < 0 for pre-modify,
1043  * > 0 for post-modify,
1044  * = 0 for no base register update.
1045  */
1046 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1047                        unsigned rx, int scale, target_long disp,
1048                        int modify, TCGMemOp mop)
1049 {
1050     TCGv addr, base;
1051 
1052     /* Caller uses nullify_over/nullify_end.  */
1053     assert(ctx->null_cond.c == TCG_COND_NEVER);
1054 
1055     addr = tcg_temp_new();
1056     base = load_gpr(ctx, rb);
1057 
1058     /* Note that RX is mutually exclusive with DISP.  */
1059     if (rx) {
1060         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1061         tcg_gen_add_tl(addr, addr, base);
1062     } else {
1063         tcg_gen_addi_tl(addr, base, disp);
1064     }
1065 
1066     if (modify == 0) {
1067         tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1068     } else {
1069         tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1070                             MMU_USER_IDX, mop);
1071         save_gpr(ctx, rb, addr);
1072     }
1073     tcg_temp_free(addr);
1074 }
1075 
1076 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1077                        unsigned rx, int scale, target_long disp,
1078                        int modify, TCGMemOp mop)
1079 {
1080     TCGv addr, base;
1081 
1082     /* Caller uses nullify_over/nullify_end.  */
1083     assert(ctx->null_cond.c == TCG_COND_NEVER);
1084 
1085     addr = tcg_temp_new();
1086     base = load_gpr(ctx, rb);
1087 
1088     /* Note that RX is mutually exclusive with DISP.  */
1089     if (rx) {
1090         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1091         tcg_gen_add_tl(addr, addr, base);
1092     } else {
1093         tcg_gen_addi_tl(addr, base, disp);
1094     }
1095 
1096     if (modify == 0) {
1097         tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1098     } else {
1099         tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1100                             MMU_USER_IDX, mop);
1101         save_gpr(ctx, rb, addr);
1102     }
1103     tcg_temp_free(addr);
1104 }
1105 
1106 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1107                         unsigned rx, int scale, target_long disp,
1108                         int modify, TCGMemOp mop)
1109 {
1110     TCGv addr, base;
1111 
1112     /* Caller uses nullify_over/nullify_end.  */
1113     assert(ctx->null_cond.c == TCG_COND_NEVER);
1114 
1115     addr = tcg_temp_new();
1116     base = load_gpr(ctx, rb);
1117 
1118     /* Note that RX is mutually exclusive with DISP.  */
1119     if (rx) {
1120         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1121         tcg_gen_add_tl(addr, addr, base);
1122     } else {
1123         tcg_gen_addi_tl(addr, base, disp);
1124     }
1125 
1126     tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1127 
1128     if (modify != 0) {
1129         save_gpr(ctx, rb, addr);
1130     }
1131     tcg_temp_free(addr);
1132 }
1133 
1134 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1135                         unsigned rx, int scale, target_long disp,
1136                         int modify, TCGMemOp mop)
1137 {
1138     TCGv addr, base;
1139 
1140     /* Caller uses nullify_over/nullify_end.  */
1141     assert(ctx->null_cond.c == TCG_COND_NEVER);
1142 
1143     addr = tcg_temp_new();
1144     base = load_gpr(ctx, rb);
1145 
1146     /* Note that RX is mutually exclusive with DISP.  */
1147     if (rx) {
1148         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1149         tcg_gen_add_tl(addr, addr, base);
1150     } else {
1151         tcg_gen_addi_tl(addr, base, disp);
1152     }
1153 
1154     tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1155 
1156     if (modify != 0) {
1157         save_gpr(ctx, rb, addr);
1158     }
1159     tcg_temp_free(addr);
1160 }
1161 
1162 #if TARGET_LONG_BITS == 64
1163 #define do_load_tl  do_load_64
1164 #define do_store_tl do_store_64
1165 #else
1166 #define do_load_tl  do_load_32
1167 #define do_store_tl do_store_32
1168 #endif
1169 
1170 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1171                              unsigned rx, int scale, target_long disp,
1172                              int modify, TCGMemOp mop)
1173 {
1174     TCGv dest;
1175 
1176     nullify_over(ctx);
1177 
1178     if (modify == 0) {
1179         /* No base register update.  */
1180         dest = dest_gpr(ctx, rt);
1181     } else {
1182         /* Make sure if RT == RB, we see the result of the load.  */
1183         dest = get_temp(ctx);
1184     }
1185     do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1186     save_gpr(ctx, rt, dest);
1187 
1188     return nullify_end(ctx, DISAS_NEXT);
1189 }
1190 
1191 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1192                                unsigned rx, int scale, target_long disp,
1193                                int modify)
1194 {
1195     TCGv_i32 tmp;
1196 
1197     nullify_over(ctx);
1198 
1199     tmp = tcg_temp_new_i32();
1200     do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1201     save_frw_i32(rt, tmp);
1202     tcg_temp_free_i32(tmp);
1203 
1204     if (rt == 0) {
1205         gen_helper_loaded_fr0(cpu_env);
1206     }
1207 
1208     return nullify_end(ctx, DISAS_NEXT);
1209 }
1210 
1211 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1212                                unsigned rx, int scale, target_long disp,
1213                                int modify)
1214 {
1215     TCGv_i64 tmp;
1216 
1217     nullify_over(ctx);
1218 
1219     tmp = tcg_temp_new_i64();
1220     do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1221     save_frd(rt, tmp);
1222     tcg_temp_free_i64(tmp);
1223 
1224     if (rt == 0) {
1225         gen_helper_loaded_fr0(cpu_env);
1226     }
1227 
1228     return nullify_end(ctx, DISAS_NEXT);
1229 }
1230 
1231 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1232                               target_long disp, int modify, TCGMemOp mop)
1233 {
1234     nullify_over(ctx);
1235     do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1236     return nullify_end(ctx, DISAS_NEXT);
1237 }
1238 
1239 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1240                                 unsigned rx, int scale, target_long disp,
1241                                 int modify)
1242 {
1243     TCGv_i32 tmp;
1244 
1245     nullify_over(ctx);
1246 
1247     tmp = load_frw_i32(rt);
1248     do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1249     tcg_temp_free_i32(tmp);
1250 
1251     return nullify_end(ctx, DISAS_NEXT);
1252 }
1253 
1254 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1255                                 unsigned rx, int scale, target_long disp,
1256                                 int modify)
1257 {
1258     TCGv_i64 tmp;
1259 
1260     nullify_over(ctx);
1261 
1262     tmp = load_frd(rt);
1263     do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1264     tcg_temp_free_i64(tmp);
1265 
1266     return nullify_end(ctx, DISAS_NEXT);
1267 }
1268 
1269 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1270                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1271 {
1272     TCGv_i32 tmp;
1273 
1274     nullify_over(ctx);
1275     tmp = load_frw0_i32(ra);
1276 
1277     func(tmp, cpu_env, tmp);
1278 
1279     save_frw_i32(rt, tmp);
1280     tcg_temp_free_i32(tmp);
1281     return nullify_end(ctx, DISAS_NEXT);
1282 }
1283 
1284 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1285                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1286 {
1287     TCGv_i32 dst;
1288     TCGv_i64 src;
1289 
1290     nullify_over(ctx);
1291     src = load_frd(ra);
1292     dst = tcg_temp_new_i32();
1293 
1294     func(dst, cpu_env, src);
1295 
1296     tcg_temp_free_i64(src);
1297     save_frw_i32(rt, dst);
1298     tcg_temp_free_i32(dst);
1299     return nullify_end(ctx, DISAS_NEXT);
1300 }
1301 
1302 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1303                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1304 {
1305     TCGv_i64 tmp;
1306 
1307     nullify_over(ctx);
1308     tmp = load_frd0(ra);
1309 
1310     func(tmp, cpu_env, tmp);
1311 
1312     save_frd(rt, tmp);
1313     tcg_temp_free_i64(tmp);
1314     return nullify_end(ctx, DISAS_NEXT);
1315 }
1316 
1317 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1318                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1319 {
1320     TCGv_i32 src;
1321     TCGv_i64 dst;
1322 
1323     nullify_over(ctx);
1324     src = load_frw0_i32(ra);
1325     dst = tcg_temp_new_i64();
1326 
1327     func(dst, cpu_env, src);
1328 
1329     tcg_temp_free_i32(src);
1330     save_frd(rt, dst);
1331     tcg_temp_free_i64(dst);
1332     return nullify_end(ctx, DISAS_NEXT);
1333 }
1334 
1335 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1336                                  unsigned ra, unsigned rb,
1337                                  void (*func)(TCGv_i32, TCGv_env,
1338                                               TCGv_i32, TCGv_i32))
1339 {
1340     TCGv_i32 a, b;
1341 
1342     nullify_over(ctx);
1343     a = load_frw0_i32(ra);
1344     b = load_frw0_i32(rb);
1345 
1346     func(a, cpu_env, a, b);
1347 
1348     tcg_temp_free_i32(b);
1349     save_frw_i32(rt, a);
1350     tcg_temp_free_i32(a);
1351     return nullify_end(ctx, DISAS_NEXT);
1352 }
1353 
1354 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1355                                  unsigned ra, unsigned rb,
1356                                  void (*func)(TCGv_i64, TCGv_env,
1357                                               TCGv_i64, TCGv_i64))
1358 {
1359     TCGv_i64 a, b;
1360 
1361     nullify_over(ctx);
1362     a = load_frd0(ra);
1363     b = load_frd0(rb);
1364 
1365     func(a, cpu_env, a, b);
1366 
1367     tcg_temp_free_i64(b);
1368     save_frd(rt, a);
1369     tcg_temp_free_i64(a);
1370     return nullify_end(ctx, DISAS_NEXT);
1371 }
1372 
1373 /* Emit an unconditional branch to a direct target, which may or may not
1374    have already had nullification handled.  */
1375 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1376                                 unsigned link, bool is_n)
1377 {
1378     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1379         if (link != 0) {
1380             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1381         }
1382         ctx->iaoq_n = dest;
1383         if (is_n) {
1384             ctx->null_cond.c = TCG_COND_ALWAYS;
1385         }
1386         return DISAS_NEXT;
1387     } else {
1388         nullify_over(ctx);
1389 
1390         if (link != 0) {
1391             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1392         }
1393 
1394         if (is_n && use_nullify_skip(ctx)) {
1395             nullify_set(ctx, 0);
1396             gen_goto_tb(ctx, 0, dest, dest + 4);
1397         } else {
1398             nullify_set(ctx, is_n);
1399             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1400         }
1401 
1402         nullify_end(ctx, DISAS_NEXT);
1403 
1404         nullify_set(ctx, 0);
1405         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1406         return DISAS_NORETURN;
1407     }
1408 }
1409 
1410 /* Emit a conditional branch to a direct target.  If the branch itself
1411    is nullified, we should have already used nullify_over.  */
1412 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1413                                 DisasCond *cond)
1414 {
1415     target_ulong dest = iaoq_dest(ctx, disp);
1416     TCGLabel *taken = NULL;
1417     TCGCond c = cond->c;
1418     bool n;
1419 
1420     assert(ctx->null_cond.c == TCG_COND_NEVER);
1421 
1422     /* Handle TRUE and NEVER as direct branches.  */
1423     if (c == TCG_COND_ALWAYS) {
1424         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1425     }
1426     if (c == TCG_COND_NEVER) {
1427         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1428     }
1429 
1430     taken = gen_new_label();
1431     cond_prep(cond);
1432     tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1433     cond_free(cond);
1434 
1435     /* Not taken: Condition not satisfied; nullify on backward branches. */
1436     n = is_n && disp < 0;
1437     if (n && use_nullify_skip(ctx)) {
1438         nullify_set(ctx, 0);
1439         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1440     } else {
1441         if (!n && ctx->null_lab) {
1442             gen_set_label(ctx->null_lab);
1443             ctx->null_lab = NULL;
1444         }
1445         nullify_set(ctx, n);
1446         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1447     }
1448 
1449     gen_set_label(taken);
1450 
1451     /* Taken: Condition satisfied; nullify on forward branches.  */
1452     n = is_n && disp >= 0;
1453     if (n && use_nullify_skip(ctx)) {
1454         nullify_set(ctx, 0);
1455         gen_goto_tb(ctx, 1, dest, dest + 4);
1456     } else {
1457         nullify_set(ctx, n);
1458         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1459     }
1460 
1461     /* Not taken: the branch itself was nullified.  */
1462     if (ctx->null_lab) {
1463         gen_set_label(ctx->null_lab);
1464         ctx->null_lab = NULL;
1465         return DISAS_IAQ_N_STALE;
1466     } else {
1467         return DISAS_NORETURN;
1468     }
1469 }
1470 
1471 /* Emit an unconditional branch to an indirect target.  This handles
1472    nullification of the branch itself.  */
1473 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1474                                 unsigned link, bool is_n)
1475 {
1476     TCGv a0, a1, next, tmp;
1477     TCGCond c;
1478 
1479     assert(ctx->null_lab == NULL);
1480 
1481     if (ctx->null_cond.c == TCG_COND_NEVER) {
1482         if (link != 0) {
1483             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1484         }
1485         next = get_temp(ctx);
1486         tcg_gen_mov_tl(next, dest);
1487         ctx->iaoq_n = -1;
1488         ctx->iaoq_n_var = next;
1489         if (is_n) {
1490             ctx->null_cond.c = TCG_COND_ALWAYS;
1491         }
1492     } else if (is_n && use_nullify_skip(ctx)) {
1493         /* The (conditional) branch, B, nullifies the next insn, N,
1494            and we're allowed to skip execution N (no single-step or
1495            tracepoint in effect).  Since the goto_ptr that we must use
1496            for the indirect branch consumes no special resources, we
1497            can (conditionally) skip B and continue execution.  */
1498         /* The use_nullify_skip test implies we have a known control path.  */
1499         tcg_debug_assert(ctx->iaoq_b != -1);
1500         tcg_debug_assert(ctx->iaoq_n != -1);
1501 
1502         /* We do have to handle the non-local temporary, DEST, before
1503            branching.  Since IOAQ_F is not really live at this point, we
1504            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1505         tcg_gen_mov_tl(cpu_iaoq_f, dest);
1506         tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1507 
1508         nullify_over(ctx);
1509         if (link != 0) {
1510             tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1511         }
1512         tcg_gen_lookup_and_goto_ptr();
1513         return nullify_end(ctx, DISAS_NEXT);
1514     } else {
1515         cond_prep(&ctx->null_cond);
1516         c = ctx->null_cond.c;
1517         a0 = ctx->null_cond.a0;
1518         a1 = ctx->null_cond.a1;
1519 
1520         tmp = tcg_temp_new();
1521         next = get_temp(ctx);
1522 
1523         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1524         tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1525         ctx->iaoq_n = -1;
1526         ctx->iaoq_n_var = next;
1527 
1528         if (link != 0) {
1529             tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1530         }
1531 
1532         if (is_n) {
1533             /* The branch nullifies the next insn, which means the state of N
1534                after the branch is the inverse of the state of N that applied
1535                to the branch.  */
1536             tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1537             cond_free(&ctx->null_cond);
1538             ctx->null_cond = cond_make_n();
1539             ctx->psw_n_nonzero = true;
1540         } else {
1541             cond_free(&ctx->null_cond);
1542         }
1543     }
1544 
1545     return DISAS_NEXT;
1546 }
1547 
1548 /* On Linux, page zero is normally marked execute only + gateway.
1549    Therefore normal read or write is supposed to fail, but specific
1550    offsets have kernel code mapped to raise permissions to implement
1551    system calls.  Handling this via an explicit check here, rather
1552    in than the "be disp(sr2,r0)" instruction that probably sent us
1553    here, is the easiest way to handle the branch delay slot on the
1554    aforementioned BE.  */
1555 static DisasJumpType do_page_zero(DisasContext *ctx)
1556 {
1557     /* If by some means we get here with PSW[N]=1, that implies that
1558        the B,GATE instruction would be skipped, and we'd fault on the
1559        next insn within the privilaged page.  */
1560     switch (ctx->null_cond.c) {
1561     case TCG_COND_NEVER:
1562         break;
1563     case TCG_COND_ALWAYS:
1564         tcg_gen_movi_tl(cpu_psw_n, 0);
1565         goto do_sigill;
1566     default:
1567         /* Since this is always the first (and only) insn within the
1568            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1569         g_assert_not_reached();
1570     }
1571 
1572     /* Check that we didn't arrive here via some means that allowed
1573        non-sequential instruction execution.  Normally the PSW[B] bit
1574        detects this by disallowing the B,GATE instruction to execute
1575        under such conditions.  */
1576     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1577         goto do_sigill;
1578     }
1579 
1580     switch (ctx->iaoq_f) {
1581     case 0x00: /* Null pointer call */
1582         gen_excp_1(EXCP_SIGSEGV);
1583         return DISAS_NORETURN;
1584 
1585     case 0xb0: /* LWS */
1586         gen_excp_1(EXCP_SYSCALL_LWS);
1587         return DISAS_NORETURN;
1588 
1589     case 0xe0: /* SET_THREAD_POINTER */
1590         tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1591         tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1592         tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1593         return DISAS_IAQ_N_UPDATED;
1594 
1595     case 0x100: /* SYSCALL */
1596         gen_excp_1(EXCP_SYSCALL);
1597         return DISAS_NORETURN;
1598 
1599     default:
1600     do_sigill:
1601         gen_excp_1(EXCP_SIGILL);
1602         return DISAS_NORETURN;
1603     }
1604 }
1605 
1606 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1607                                const DisasInsn *di)
1608 {
1609     cond_free(&ctx->null_cond);
1610     return DISAS_NEXT;
1611 }
1612 
1613 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1614                                  const DisasInsn *di)
1615 {
1616     nullify_over(ctx);
1617     return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1618 }
1619 
1620 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1621                                 const DisasInsn *di)
1622 {
1623     /* No point in nullifying the memory barrier.  */
1624     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1625 
1626     cond_free(&ctx->null_cond);
1627     return DISAS_NEXT;
1628 }
1629 
1630 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1631                                 const DisasInsn *di)
1632 {
1633     unsigned rt = extract32(insn, 0, 5);
1634     TCGv tmp = dest_gpr(ctx, rt);
1635     tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1636     save_gpr(ctx, rt, tmp);
1637 
1638     cond_free(&ctx->null_cond);
1639     return DISAS_NEXT;
1640 }
1641 
1642 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1643                                 const DisasInsn *di)
1644 {
1645     unsigned rt = extract32(insn, 0, 5);
1646     TCGv tmp = dest_gpr(ctx, rt);
1647 
1648     /* ??? We don't implement space registers.  */
1649     tcg_gen_movi_tl(tmp, 0);
1650     save_gpr(ctx, rt, tmp);
1651 
1652     cond_free(&ctx->null_cond);
1653     return DISAS_NEXT;
1654 }
1655 
1656 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1657                                  const DisasInsn *di)
1658 {
1659     unsigned rt = extract32(insn, 0, 5);
1660     unsigned ctl = extract32(insn, 21, 5);
1661     TCGv tmp;
1662 
1663     switch (ctl) {
1664     case 11: /* SAR */
1665 #ifdef TARGET_HPPA64
1666         if (extract32(insn, 14, 1) == 0) {
1667             /* MFSAR without ,W masks low 5 bits.  */
1668             tmp = dest_gpr(ctx, rt);
1669             tcg_gen_andi_tl(tmp, cpu_sar, 31);
1670             save_gpr(ctx, rt, tmp);
1671             break;
1672         }
1673 #endif
1674         save_gpr(ctx, rt, cpu_sar);
1675         break;
1676     case 16: /* Interval Timer */
1677         tmp = dest_gpr(ctx, rt);
1678         tcg_gen_movi_tl(tmp, 0); /* FIXME */
1679         save_gpr(ctx, rt, tmp);
1680         break;
1681     case 26:
1682         save_gpr(ctx, rt, cpu_cr26);
1683         break;
1684     case 27:
1685         save_gpr(ctx, rt, cpu_cr27);
1686         break;
1687     default:
1688         /* All other control registers are privileged.  */
1689         return gen_illegal(ctx);
1690     }
1691 
1692     cond_free(&ctx->null_cond);
1693     return DISAS_NEXT;
1694 }
1695 
1696 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1697                                  const DisasInsn *di)
1698 {
1699     unsigned rin = extract32(insn, 16, 5);
1700     unsigned ctl = extract32(insn, 21, 5);
1701     TCGv tmp;
1702 
1703     if (ctl == 11) { /* SAR */
1704         tmp = tcg_temp_new();
1705         tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1706         save_or_nullify(ctx, cpu_sar, tmp);
1707         tcg_temp_free(tmp);
1708     } else {
1709         /* All other control registers are privileged or read-only.  */
1710         return gen_illegal(ctx);
1711     }
1712 
1713     cond_free(&ctx->null_cond);
1714     return DISAS_NEXT;
1715 }
1716 
1717 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1718                                    const DisasInsn *di)
1719 {
1720     unsigned rin = extract32(insn, 16, 5);
1721     TCGv tmp = tcg_temp_new();
1722 
1723     tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1724     tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1725     save_or_nullify(ctx, cpu_sar, tmp);
1726     tcg_temp_free(tmp);
1727 
1728     cond_free(&ctx->null_cond);
1729     return DISAS_NEXT;
1730 }
1731 
1732 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1733                                  const DisasInsn *di)
1734 {
1735     unsigned rt = extract32(insn, 0, 5);
1736     TCGv dest = dest_gpr(ctx, rt);
1737 
1738     /* Since we don't implement space registers, this returns zero.  */
1739     tcg_gen_movi_tl(dest, 0);
1740     save_gpr(ctx, rt, dest);
1741 
1742     cond_free(&ctx->null_cond);
1743     return DISAS_NEXT;
1744 }
1745 
1746 static const DisasInsn table_system[] = {
1747     { 0x00000000u, 0xfc001fe0u, trans_break },
1748     /* We don't implement space register, so MTSP is a nop.  */
1749     { 0x00001820u, 0xffe01fffu, trans_nop },
1750     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1751     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1752     { 0x000014a0u, 0xffffffe0u, trans_mfia },
1753     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1754     { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1755     { 0x00000400u, 0xffffffffu, trans_sync },
1756     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1757 };
1758 
1759 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1760                                         const DisasInsn *di)
1761 {
1762     unsigned rb = extract32(insn, 21, 5);
1763     unsigned rx = extract32(insn, 16, 5);
1764     TCGv dest = dest_gpr(ctx, rb);
1765     TCGv src1 = load_gpr(ctx, rb);
1766     TCGv src2 = load_gpr(ctx, rx);
1767 
1768     /* The only thing we need to do is the base register modification.  */
1769     tcg_gen_add_tl(dest, src1, src2);
1770     save_gpr(ctx, rb, dest);
1771 
1772     cond_free(&ctx->null_cond);
1773     return DISAS_NEXT;
1774 }
1775 
1776 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1777                                  const DisasInsn *di)
1778 {
1779     unsigned rt = extract32(insn, 0, 5);
1780     unsigned rb = extract32(insn, 21, 5);
1781     unsigned is_write = extract32(insn, 6, 1);
1782     TCGv dest;
1783 
1784     nullify_over(ctx);
1785 
1786     /* ??? Do something with priv level operand.  */
1787     dest = dest_gpr(ctx, rt);
1788     if (is_write) {
1789         gen_helper_probe_w(dest, load_gpr(ctx, rb));
1790     } else {
1791         gen_helper_probe_r(dest, load_gpr(ctx, rb));
1792     }
1793     save_gpr(ctx, rt, dest);
1794     return nullify_end(ctx, DISAS_NEXT);
1795 }
1796 
1797 static const DisasInsn table_mem_mgmt[] = {
1798     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
1799     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
1800     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1801     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
1802     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1803     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
1804     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1805     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
1806     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1807     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
1808     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1809     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
1810     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1811     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
1812     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
1813 };
1814 
1815 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1816                                const DisasInsn *di)
1817 {
1818     unsigned r2 = extract32(insn, 21, 5);
1819     unsigned r1 = extract32(insn, 16, 5);
1820     unsigned cf = extract32(insn, 12, 4);
1821     unsigned ext = extract32(insn, 8, 4);
1822     unsigned shift = extract32(insn, 6, 2);
1823     unsigned rt = extract32(insn,  0, 5);
1824     TCGv tcg_r1, tcg_r2;
1825     bool is_c = false;
1826     bool is_l = false;
1827     bool is_tc = false;
1828     bool is_tsv = false;
1829     DisasJumpType ret;
1830 
1831     switch (ext) {
1832     case 0x6: /* ADD, SHLADD */
1833         break;
1834     case 0xa: /* ADD,L, SHLADD,L */
1835         is_l = true;
1836         break;
1837     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1838         is_tsv = true;
1839         break;
1840     case 0x7: /* ADD,C */
1841         is_c = true;
1842         break;
1843     case 0xf: /* ADD,C,TSV */
1844         is_c = is_tsv = true;
1845         break;
1846     default:
1847         return gen_illegal(ctx);
1848     }
1849 
1850     if (cf) {
1851         nullify_over(ctx);
1852     }
1853     tcg_r1 = load_gpr(ctx, r1);
1854     tcg_r2 = load_gpr(ctx, r2);
1855     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1856     return nullify_end(ctx, ret);
1857 }
1858 
1859 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1860                                const DisasInsn *di)
1861 {
1862     unsigned r2 = extract32(insn, 21, 5);
1863     unsigned r1 = extract32(insn, 16, 5);
1864     unsigned cf = extract32(insn, 12, 4);
1865     unsigned ext = extract32(insn, 6, 6);
1866     unsigned rt = extract32(insn,  0, 5);
1867     TCGv tcg_r1, tcg_r2;
1868     bool is_b = false;
1869     bool is_tc = false;
1870     bool is_tsv = false;
1871     DisasJumpType ret;
1872 
1873     switch (ext) {
1874     case 0x10: /* SUB */
1875         break;
1876     case 0x30: /* SUB,TSV */
1877         is_tsv = true;
1878         break;
1879     case 0x14: /* SUB,B */
1880         is_b = true;
1881         break;
1882     case 0x34: /* SUB,B,TSV */
1883         is_b = is_tsv = true;
1884         break;
1885     case 0x13: /* SUB,TC */
1886         is_tc = true;
1887         break;
1888     case 0x33: /* SUB,TSV,TC */
1889         is_tc = is_tsv = true;
1890         break;
1891     default:
1892         return gen_illegal(ctx);
1893     }
1894 
1895     if (cf) {
1896         nullify_over(ctx);
1897     }
1898     tcg_r1 = load_gpr(ctx, r1);
1899     tcg_r2 = load_gpr(ctx, r2);
1900     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1901     return nullify_end(ctx, ret);
1902 }
1903 
1904 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1905                                const DisasInsn *di)
1906 {
1907     unsigned r2 = extract32(insn, 21, 5);
1908     unsigned r1 = extract32(insn, 16, 5);
1909     unsigned cf = extract32(insn, 12, 4);
1910     unsigned rt = extract32(insn,  0, 5);
1911     TCGv tcg_r1, tcg_r2;
1912     DisasJumpType ret;
1913 
1914     if (cf) {
1915         nullify_over(ctx);
1916     }
1917     tcg_r1 = load_gpr(ctx, r1);
1918     tcg_r2 = load_gpr(ctx, r2);
1919     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1920     return nullify_end(ctx, ret);
1921 }
1922 
1923 /* OR r,0,t -> COPY (according to gas) */
1924 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1925                                 const DisasInsn *di)
1926 {
1927     unsigned r1 = extract32(insn, 16, 5);
1928     unsigned rt = extract32(insn,  0, 5);
1929 
1930     if (r1 == 0) {
1931         TCGv dest = dest_gpr(ctx, rt);
1932         tcg_gen_movi_tl(dest, 0);
1933         save_gpr(ctx, rt, dest);
1934     } else {
1935         save_gpr(ctx, rt, cpu_gr[r1]);
1936     }
1937     cond_free(&ctx->null_cond);
1938     return DISAS_NEXT;
1939 }
1940 
1941 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1942                                   const DisasInsn *di)
1943 {
1944     unsigned r2 = extract32(insn, 21, 5);
1945     unsigned r1 = extract32(insn, 16, 5);
1946     unsigned cf = extract32(insn, 12, 4);
1947     unsigned rt = extract32(insn,  0, 5);
1948     TCGv tcg_r1, tcg_r2;
1949     DisasJumpType ret;
1950 
1951     if (cf) {
1952         nullify_over(ctx);
1953     }
1954     tcg_r1 = load_gpr(ctx, r1);
1955     tcg_r2 = load_gpr(ctx, r2);
1956     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1957     return nullify_end(ctx, ret);
1958 }
1959 
1960 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1961                                 const DisasInsn *di)
1962 {
1963     unsigned r2 = extract32(insn, 21, 5);
1964     unsigned r1 = extract32(insn, 16, 5);
1965     unsigned cf = extract32(insn, 12, 4);
1966     unsigned rt = extract32(insn,  0, 5);
1967     TCGv tcg_r1, tcg_r2;
1968     DisasJumpType ret;
1969 
1970     if (cf) {
1971         nullify_over(ctx);
1972     }
1973     tcg_r1 = load_gpr(ctx, r1);
1974     tcg_r2 = load_gpr(ctx, r2);
1975     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1976     return nullify_end(ctx, ret);
1977 }
1978 
1979 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1980                                   const DisasInsn *di)
1981 {
1982     unsigned r2 = extract32(insn, 21, 5);
1983     unsigned r1 = extract32(insn, 16, 5);
1984     unsigned cf = extract32(insn, 12, 4);
1985     unsigned is_tc = extract32(insn, 6, 1);
1986     unsigned rt = extract32(insn,  0, 5);
1987     TCGv tcg_r1, tcg_r2, tmp;
1988     DisasJumpType ret;
1989 
1990     if (cf) {
1991         nullify_over(ctx);
1992     }
1993     tcg_r1 = load_gpr(ctx, r1);
1994     tcg_r2 = load_gpr(ctx, r2);
1995     tmp = get_temp(ctx);
1996     tcg_gen_not_tl(tmp, tcg_r2);
1997     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1998     return nullify_end(ctx, ret);
1999 }
2000 
2001 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2002                                 const DisasInsn *di)
2003 {
2004     unsigned r2 = extract32(insn, 21, 5);
2005     unsigned cf = extract32(insn, 12, 4);
2006     unsigned is_i = extract32(insn, 6, 1);
2007     unsigned rt = extract32(insn,  0, 5);
2008     TCGv tmp;
2009     DisasJumpType ret;
2010 
2011     nullify_over(ctx);
2012 
2013     tmp = get_temp(ctx);
2014     tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2015     if (!is_i) {
2016         tcg_gen_not_tl(tmp, tmp);
2017     }
2018     tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2019     tcg_gen_muli_tl(tmp, tmp, 6);
2020     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2021                   is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2022 
2023     return nullify_end(ctx, ret);
2024 }
2025 
2026 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2027                               const DisasInsn *di)
2028 {
2029     unsigned r2 = extract32(insn, 21, 5);
2030     unsigned r1 = extract32(insn, 16, 5);
2031     unsigned cf = extract32(insn, 12, 4);
2032     unsigned rt = extract32(insn,  0, 5);
2033     TCGv dest, add1, add2, addc, zero, in1, in2;
2034 
2035     nullify_over(ctx);
2036 
2037     in1 = load_gpr(ctx, r1);
2038     in2 = load_gpr(ctx, r2);
2039 
2040     add1 = tcg_temp_new();
2041     add2 = tcg_temp_new();
2042     addc = tcg_temp_new();
2043     dest = tcg_temp_new();
2044     zero = tcg_const_tl(0);
2045 
2046     /* Form R1 << 1 | PSW[CB]{8}.  */
2047     tcg_gen_add_tl(add1, in1, in1);
2048     tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2049 
2050     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2051        carry{8} requires that we subtract via + ~R2 + 1, as described in
2052        the manual.  By extracting and masking V, we can produce the
2053        proper inputs to the addition without movcond.  */
2054     tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2055     tcg_gen_xor_tl(add2, in2, addc);
2056     tcg_gen_andi_tl(addc, addc, 1);
2057     /* ??? This is only correct for 32-bit.  */
2058     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2059     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2060 
2061     tcg_temp_free(addc);
2062     tcg_temp_free(zero);
2063 
2064     /* Write back the result register.  */
2065     save_gpr(ctx, rt, dest);
2066 
2067     /* Write back PSW[CB].  */
2068     tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2069     tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2070 
2071     /* Write back PSW[V] for the division step.  */
2072     tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2073     tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2074 
2075     /* Install the new nullification.  */
2076     if (cf) {
2077         TCGv sv;
2078         TCGV_UNUSED(sv);
2079         if (cf >> 1 == 6) {
2080             /* ??? The lshift is supposed to contribute to overflow.  */
2081             sv = do_add_sv(ctx, dest, add1, add2);
2082         }
2083         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2084     }
2085 
2086     tcg_temp_free(add1);
2087     tcg_temp_free(add2);
2088     tcg_temp_free(dest);
2089 
2090     return nullify_end(ctx, DISAS_NEXT);
2091 }
2092 
2093 static const DisasInsn table_arith_log[] = {
2094     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2095     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2096     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2097     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2098     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2099     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2100     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2101     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2102     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2103     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2104     { 0x08000440u, 0xfc000fe0u, trans_ds },
2105     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2106     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2107     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2108     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2109 };
2110 
2111 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2112 {
2113     target_long im = low_sextract(insn, 0, 11);
2114     unsigned e1 = extract32(insn, 11, 1);
2115     unsigned cf = extract32(insn, 12, 4);
2116     unsigned rt = extract32(insn, 16, 5);
2117     unsigned r2 = extract32(insn, 21, 5);
2118     unsigned o1 = extract32(insn, 26, 1);
2119     TCGv tcg_im, tcg_r2;
2120     DisasJumpType ret;
2121 
2122     if (cf) {
2123         nullify_over(ctx);
2124     }
2125 
2126     tcg_im = load_const(ctx, im);
2127     tcg_r2 = load_gpr(ctx, r2);
2128     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2129 
2130     return nullify_end(ctx, ret);
2131 }
2132 
2133 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2134 {
2135     target_long im = low_sextract(insn, 0, 11);
2136     unsigned e1 = extract32(insn, 11, 1);
2137     unsigned cf = extract32(insn, 12, 4);
2138     unsigned rt = extract32(insn, 16, 5);
2139     unsigned r2 = extract32(insn, 21, 5);
2140     TCGv tcg_im, tcg_r2;
2141     DisasJumpType ret;
2142 
2143     if (cf) {
2144         nullify_over(ctx);
2145     }
2146 
2147     tcg_im = load_const(ctx, im);
2148     tcg_r2 = load_gpr(ctx, r2);
2149     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2150 
2151     return nullify_end(ctx, ret);
2152 }
2153 
2154 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2155 {
2156     target_long im = low_sextract(insn, 0, 11);
2157     unsigned cf = extract32(insn, 12, 4);
2158     unsigned rt = extract32(insn, 16, 5);
2159     unsigned r2 = extract32(insn, 21, 5);
2160     TCGv tcg_im, tcg_r2;
2161     DisasJumpType ret;
2162 
2163     if (cf) {
2164         nullify_over(ctx);
2165     }
2166 
2167     tcg_im = load_const(ctx, im);
2168     tcg_r2 = load_gpr(ctx, r2);
2169     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2170 
2171     return nullify_end(ctx, ret);
2172 }
2173 
2174 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2175                                     const DisasInsn *di)
2176 {
2177     unsigned rt = extract32(insn, 0, 5);
2178     unsigned m = extract32(insn, 5, 1);
2179     unsigned sz = extract32(insn, 6, 2);
2180     unsigned a = extract32(insn, 13, 1);
2181     int disp = low_sextract(insn, 16, 5);
2182     unsigned rb = extract32(insn, 21, 5);
2183     int modify = (m ? (a ? -1 : 1) : 0);
2184     TCGMemOp mop = MO_TE | sz;
2185 
2186     return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2187 }
2188 
2189 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2190                                     const DisasInsn *di)
2191 {
2192     unsigned rt = extract32(insn, 0, 5);
2193     unsigned m = extract32(insn, 5, 1);
2194     unsigned sz = extract32(insn, 6, 2);
2195     unsigned u = extract32(insn, 13, 1);
2196     unsigned rx = extract32(insn, 16, 5);
2197     unsigned rb = extract32(insn, 21, 5);
2198     TCGMemOp mop = MO_TE | sz;
2199 
2200     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2201 }
2202 
2203 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2204                                     const DisasInsn *di)
2205 {
2206     int disp = low_sextract(insn, 0, 5);
2207     unsigned m = extract32(insn, 5, 1);
2208     unsigned sz = extract32(insn, 6, 2);
2209     unsigned a = extract32(insn, 13, 1);
2210     unsigned rr = extract32(insn, 16, 5);
2211     unsigned rb = extract32(insn, 21, 5);
2212     int modify = (m ? (a ? -1 : 1) : 0);
2213     TCGMemOp mop = MO_TE | sz;
2214 
2215     return do_store(ctx, rr, rb, disp, modify, mop);
2216 }
2217 
2218 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2219                                 const DisasInsn *di)
2220 {
2221     unsigned rt = extract32(insn, 0, 5);
2222     unsigned m = extract32(insn, 5, 1);
2223     unsigned i = extract32(insn, 12, 1);
2224     unsigned au = extract32(insn, 13, 1);
2225     unsigned rx = extract32(insn, 16, 5);
2226     unsigned rb = extract32(insn, 21, 5);
2227     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2228     TCGv zero, addr, base, dest;
2229     int modify, disp = 0, scale = 0;
2230 
2231     nullify_over(ctx);
2232 
2233     /* ??? Share more code with do_load and do_load_{32,64}.  */
2234 
2235     if (i) {
2236         modify = (m ? (au ? -1 : 1) : 0);
2237         disp = low_sextract(rx, 0, 5);
2238         rx = 0;
2239     } else {
2240         modify = m;
2241         if (au) {
2242             scale = mop & MO_SIZE;
2243         }
2244     }
2245     if (modify) {
2246         /* Base register modification.  Make sure if RT == RB, we see
2247            the result of the load.  */
2248         dest = get_temp(ctx);
2249     } else {
2250         dest = dest_gpr(ctx, rt);
2251     }
2252 
2253     addr = tcg_temp_new();
2254     base = load_gpr(ctx, rb);
2255     if (rx) {
2256         tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2257         tcg_gen_add_tl(addr, addr, base);
2258     } else {
2259         tcg_gen_addi_tl(addr, base, disp);
2260     }
2261 
2262     zero = tcg_const_tl(0);
2263     tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2264                            zero, MMU_USER_IDX, mop);
2265     if (modify) {
2266         save_gpr(ctx, rb, addr);
2267     }
2268     save_gpr(ctx, rt, dest);
2269 
2270     return nullify_end(ctx, DISAS_NEXT);
2271 }
2272 
2273 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2274                                 const DisasInsn *di)
2275 {
2276     target_long disp = low_sextract(insn, 0, 5);
2277     unsigned m = extract32(insn, 5, 1);
2278     unsigned a = extract32(insn, 13, 1);
2279     unsigned rt = extract32(insn, 16, 5);
2280     unsigned rb = extract32(insn, 21, 5);
2281     TCGv addr, val;
2282 
2283     nullify_over(ctx);
2284 
2285     addr = tcg_temp_new();
2286     if (m || disp == 0) {
2287         tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2288     } else {
2289         tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2290     }
2291     val = load_gpr(ctx, rt);
2292 
2293     if (a) {
2294         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2295             gen_helper_stby_e_parallel(cpu_env, addr, val);
2296         } else {
2297             gen_helper_stby_e(cpu_env, addr, val);
2298         }
2299     } else {
2300         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2301             gen_helper_stby_b_parallel(cpu_env, addr, val);
2302         } else {
2303             gen_helper_stby_b(cpu_env, addr, val);
2304         }
2305     }
2306 
2307     if (m) {
2308         tcg_gen_addi_tl(addr, addr, disp);
2309         tcg_gen_andi_tl(addr, addr, ~3);
2310         save_gpr(ctx, rb, addr);
2311     }
2312     tcg_temp_free(addr);
2313 
2314     return nullify_end(ctx, DISAS_NEXT);
2315 }
2316 
2317 static const DisasInsn table_index_mem[] = {
2318     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2319     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2320     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2321     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2322     { 0x0c001300u, 0xfc0013c0, trans_stby },
2323 };
2324 
2325 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2326 {
2327     unsigned rt = extract32(insn, 21, 5);
2328     target_long i = assemble_21(insn);
2329     TCGv tcg_rt = dest_gpr(ctx, rt);
2330 
2331     tcg_gen_movi_tl(tcg_rt, i);
2332     save_gpr(ctx, rt, tcg_rt);
2333     cond_free(&ctx->null_cond);
2334 
2335     return DISAS_NEXT;
2336 }
2337 
2338 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2339 {
2340     unsigned rt = extract32(insn, 21, 5);
2341     target_long i = assemble_21(insn);
2342     TCGv tcg_rt = load_gpr(ctx, rt);
2343     TCGv tcg_r1 = dest_gpr(ctx, 1);
2344 
2345     tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2346     save_gpr(ctx, 1, tcg_r1);
2347     cond_free(&ctx->null_cond);
2348 
2349     return DISAS_NEXT;
2350 }
2351 
2352 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2353 {
2354     unsigned rb = extract32(insn, 21, 5);
2355     unsigned rt = extract32(insn, 16, 5);
2356     target_long i = assemble_16(insn);
2357     TCGv tcg_rt = dest_gpr(ctx, rt);
2358 
2359     /* Special case rb == 0, for the LDI pseudo-op.
2360        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2361     if (rb == 0) {
2362         tcg_gen_movi_tl(tcg_rt, i);
2363     } else {
2364         tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2365     }
2366     save_gpr(ctx, rt, tcg_rt);
2367     cond_free(&ctx->null_cond);
2368 
2369     return DISAS_NEXT;
2370 }
2371 
2372 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2373                                 bool is_mod, TCGMemOp mop)
2374 {
2375     unsigned rb = extract32(insn, 21, 5);
2376     unsigned rt = extract32(insn, 16, 5);
2377     target_long i = assemble_16(insn);
2378 
2379     return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2380 }
2381 
2382 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2383 {
2384     unsigned rb = extract32(insn, 21, 5);
2385     unsigned rt = extract32(insn, 16, 5);
2386     target_long i = assemble_16a(insn);
2387     unsigned ext2 = extract32(insn, 1, 2);
2388 
2389     switch (ext2) {
2390     case 0:
2391     case 1:
2392         /* FLDW without modification.  */
2393         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2394     case 2:
2395         /* LDW with modification.  Note that the sign of I selects
2396            post-dec vs pre-inc.  */
2397         return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2398     default:
2399         return gen_illegal(ctx);
2400     }
2401 }
2402 
2403 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2404 {
2405     target_long i = assemble_16a(insn);
2406     unsigned t1 = extract32(insn, 1, 1);
2407     unsigned a = extract32(insn, 2, 1);
2408     unsigned t0 = extract32(insn, 16, 5);
2409     unsigned rb = extract32(insn, 21, 5);
2410 
2411     /* FLDW with modification.  */
2412     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2413 }
2414 
2415 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2416                                  bool is_mod, TCGMemOp mop)
2417 {
2418     unsigned rb = extract32(insn, 21, 5);
2419     unsigned rt = extract32(insn, 16, 5);
2420     target_long i = assemble_16(insn);
2421 
2422     return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2423 }
2424 
2425 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2426 {
2427     unsigned rb = extract32(insn, 21, 5);
2428     unsigned rt = extract32(insn, 16, 5);
2429     target_long i = assemble_16a(insn);
2430     unsigned ext2 = extract32(insn, 1, 2);
2431 
2432     switch (ext2) {
2433     case 0:
2434     case 1:
2435         /* FSTW without modification.  */
2436         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2437     case 2:
2438         /* LDW with modification.  */
2439         return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2440     default:
2441         return gen_illegal(ctx);
2442     }
2443 }
2444 
2445 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2446 {
2447     target_long i = assemble_16a(insn);
2448     unsigned t1 = extract32(insn, 1, 1);
2449     unsigned a = extract32(insn, 2, 1);
2450     unsigned t0 = extract32(insn, 16, 5);
2451     unsigned rb = extract32(insn, 21, 5);
2452 
2453     /* FSTW with modification.  */
2454     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2455 }
2456 
2457 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2458 {
2459     unsigned t0 = extract32(insn, 0, 5);
2460     unsigned m = extract32(insn, 5, 1);
2461     unsigned t1 = extract32(insn, 6, 1);
2462     unsigned ext3 = extract32(insn, 7, 3);
2463     /* unsigned cc = extract32(insn, 10, 2); */
2464     unsigned i = extract32(insn, 12, 1);
2465     unsigned ua = extract32(insn, 13, 1);
2466     unsigned rx = extract32(insn, 16, 5);
2467     unsigned rb = extract32(insn, 21, 5);
2468     unsigned rt = t1 * 32 + t0;
2469     int modify = (m ? (ua ? -1 : 1) : 0);
2470     int disp, scale;
2471 
2472     if (i == 0) {
2473         scale = (ua ? 2 : 0);
2474         disp = 0;
2475         modify = m;
2476     } else {
2477         disp = low_sextract(rx, 0, 5);
2478         scale = 0;
2479         rx = 0;
2480         modify = (m ? (ua ? -1 : 1) : 0);
2481     }
2482 
2483     switch (ext3) {
2484     case 0: /* FLDW */
2485         return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2486     case 4: /* FSTW */
2487         return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2488     }
2489     return gen_illegal(ctx);
2490 }
2491 
2492 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2493 {
2494     unsigned rt = extract32(insn, 0, 5);
2495     unsigned m = extract32(insn, 5, 1);
2496     unsigned ext4 = extract32(insn, 6, 4);
2497     /* unsigned cc = extract32(insn, 10, 2); */
2498     unsigned i = extract32(insn, 12, 1);
2499     unsigned ua = extract32(insn, 13, 1);
2500     unsigned rx = extract32(insn, 16, 5);
2501     unsigned rb = extract32(insn, 21, 5);
2502     int modify = (m ? (ua ? -1 : 1) : 0);
2503     int disp, scale;
2504 
2505     if (i == 0) {
2506         scale = (ua ? 3 : 0);
2507         disp = 0;
2508         modify = m;
2509     } else {
2510         disp = low_sextract(rx, 0, 5);
2511         scale = 0;
2512         rx = 0;
2513         modify = (m ? (ua ? -1 : 1) : 0);
2514     }
2515 
2516     switch (ext4) {
2517     case 0: /* FLDD */
2518         return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2519     case 8: /* FSTD */
2520         return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2521     default:
2522         return gen_illegal(ctx);
2523     }
2524 }
2525 
2526 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2527                                 bool is_true, bool is_imm, bool is_dw)
2528 {
2529     target_long disp = assemble_12(insn) * 4;
2530     unsigned n = extract32(insn, 1, 1);
2531     unsigned c = extract32(insn, 13, 3);
2532     unsigned r = extract32(insn, 21, 5);
2533     unsigned cf = c * 2 + !is_true;
2534     TCGv dest, in1, in2, sv;
2535     DisasCond cond;
2536 
2537     nullify_over(ctx);
2538 
2539     if (is_imm) {
2540         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2541     } else {
2542         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2543     }
2544     in2 = load_gpr(ctx, r);
2545     dest = get_temp(ctx);
2546 
2547     tcg_gen_sub_tl(dest, in1, in2);
2548 
2549     TCGV_UNUSED(sv);
2550     if (c == 6) {
2551         sv = do_sub_sv(ctx, dest, in1, in2);
2552     }
2553 
2554     cond = do_sub_cond(cf, dest, in1, in2, sv);
2555     return do_cbranch(ctx, disp, n, &cond);
2556 }
2557 
2558 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2559                                 bool is_true, bool is_imm)
2560 {
2561     target_long disp = assemble_12(insn) * 4;
2562     unsigned n = extract32(insn, 1, 1);
2563     unsigned c = extract32(insn, 13, 3);
2564     unsigned r = extract32(insn, 21, 5);
2565     unsigned cf = c * 2 + !is_true;
2566     TCGv dest, in1, in2, sv, cb_msb;
2567     DisasCond cond;
2568 
2569     nullify_over(ctx);
2570 
2571     if (is_imm) {
2572         in1 = load_const(ctx, low_sextract(insn, 16, 5));
2573     } else {
2574         in1 = load_gpr(ctx, extract32(insn, 16, 5));
2575     }
2576     in2 = load_gpr(ctx, r);
2577     dest = dest_gpr(ctx, r);
2578     TCGV_UNUSED(sv);
2579     TCGV_UNUSED(cb_msb);
2580 
2581     switch (c) {
2582     default:
2583         tcg_gen_add_tl(dest, in1, in2);
2584         break;
2585     case 4: case 5:
2586         cb_msb = get_temp(ctx);
2587         tcg_gen_movi_tl(cb_msb, 0);
2588         tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2589         break;
2590     case 6:
2591         tcg_gen_add_tl(dest, in1, in2);
2592         sv = do_add_sv(ctx, dest, in1, in2);
2593         break;
2594     }
2595 
2596     cond = do_cond(cf, dest, cb_msb, sv);
2597     return do_cbranch(ctx, disp, n, &cond);
2598 }
2599 
2600 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2601 {
2602     target_long disp = assemble_12(insn) * 4;
2603     unsigned n = extract32(insn, 1, 1);
2604     unsigned c = extract32(insn, 15, 1);
2605     unsigned r = extract32(insn, 16, 5);
2606     unsigned p = extract32(insn, 21, 5);
2607     unsigned i = extract32(insn, 26, 1);
2608     TCGv tmp, tcg_r;
2609     DisasCond cond;
2610 
2611     nullify_over(ctx);
2612 
2613     tmp = tcg_temp_new();
2614     tcg_r = load_gpr(ctx, r);
2615     if (i) {
2616         tcg_gen_shli_tl(tmp, tcg_r, p);
2617     } else {
2618         tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2619     }
2620 
2621     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2622     tcg_temp_free(tmp);
2623     return do_cbranch(ctx, disp, n, &cond);
2624 }
2625 
2626 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2627 {
2628     target_long disp = assemble_12(insn) * 4;
2629     unsigned n = extract32(insn, 1, 1);
2630     unsigned c = extract32(insn, 13, 3);
2631     unsigned t = extract32(insn, 16, 5);
2632     unsigned r = extract32(insn, 21, 5);
2633     TCGv dest;
2634     DisasCond cond;
2635 
2636     nullify_over(ctx);
2637 
2638     dest = dest_gpr(ctx, r);
2639     if (is_imm) {
2640         tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2641     } else if (t == 0) {
2642         tcg_gen_movi_tl(dest, 0);
2643     } else {
2644         tcg_gen_mov_tl(dest, cpu_gr[t]);
2645     }
2646 
2647     cond = do_sed_cond(c, dest);
2648     return do_cbranch(ctx, disp, n, &cond);
2649 }
2650 
2651 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2652                                     const DisasInsn *di)
2653 {
2654     unsigned rt = extract32(insn, 0, 5);
2655     unsigned c = extract32(insn, 13, 3);
2656     unsigned r1 = extract32(insn, 16, 5);
2657     unsigned r2 = extract32(insn, 21, 5);
2658     TCGv dest;
2659 
2660     if (c) {
2661         nullify_over(ctx);
2662     }
2663 
2664     dest = dest_gpr(ctx, rt);
2665     if (r1 == 0) {
2666         tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2667         tcg_gen_shr_tl(dest, dest, cpu_sar);
2668     } else if (r1 == r2) {
2669         TCGv_i32 t32 = tcg_temp_new_i32();
2670         tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2671         tcg_gen_rotr_i32(t32, t32, cpu_sar);
2672         tcg_gen_extu_i32_tl(dest, t32);
2673         tcg_temp_free_i32(t32);
2674     } else {
2675         TCGv_i64 t = tcg_temp_new_i64();
2676         TCGv_i64 s = tcg_temp_new_i64();
2677 
2678         tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2679         tcg_gen_extu_tl_i64(s, cpu_sar);
2680         tcg_gen_shr_i64(t, t, s);
2681         tcg_gen_trunc_i64_tl(dest, t);
2682 
2683         tcg_temp_free_i64(t);
2684         tcg_temp_free_i64(s);
2685     }
2686     save_gpr(ctx, rt, dest);
2687 
2688     /* Install the new nullification.  */
2689     cond_free(&ctx->null_cond);
2690     if (c) {
2691         ctx->null_cond = do_sed_cond(c, dest);
2692     }
2693     return nullify_end(ctx, DISAS_NEXT);
2694 }
2695 
2696 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2697                                      const DisasInsn *di)
2698 {
2699     unsigned rt = extract32(insn, 0, 5);
2700     unsigned cpos = extract32(insn, 5, 5);
2701     unsigned c = extract32(insn, 13, 3);
2702     unsigned r1 = extract32(insn, 16, 5);
2703     unsigned r2 = extract32(insn, 21, 5);
2704     unsigned sa = 31 - cpos;
2705     TCGv dest, t2;
2706 
2707     if (c) {
2708         nullify_over(ctx);
2709     }
2710 
2711     dest = dest_gpr(ctx, rt);
2712     t2 = load_gpr(ctx, r2);
2713     if (r1 == r2) {
2714         TCGv_i32 t32 = tcg_temp_new_i32();
2715         tcg_gen_trunc_tl_i32(t32, t2);
2716         tcg_gen_rotri_i32(t32, t32, sa);
2717         tcg_gen_extu_i32_tl(dest, t32);
2718         tcg_temp_free_i32(t32);
2719     } else if (r1 == 0) {
2720         tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2721     } else {
2722         TCGv t0 = tcg_temp_new();
2723         tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2724         tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2725         tcg_temp_free(t0);
2726     }
2727     save_gpr(ctx, rt, dest);
2728 
2729     /* Install the new nullification.  */
2730     cond_free(&ctx->null_cond);
2731     if (c) {
2732         ctx->null_cond = do_sed_cond(c, dest);
2733     }
2734     return nullify_end(ctx, DISAS_NEXT);
2735 }
2736 
2737 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2738                                      const DisasInsn *di)
2739 {
2740     unsigned clen = extract32(insn, 0, 5);
2741     unsigned is_se = extract32(insn, 10, 1);
2742     unsigned c = extract32(insn, 13, 3);
2743     unsigned rt = extract32(insn, 16, 5);
2744     unsigned rr = extract32(insn, 21, 5);
2745     unsigned len = 32 - clen;
2746     TCGv dest, src, tmp;
2747 
2748     if (c) {
2749         nullify_over(ctx);
2750     }
2751 
2752     dest = dest_gpr(ctx, rt);
2753     src = load_gpr(ctx, rr);
2754     tmp = tcg_temp_new();
2755 
2756     /* Recall that SAR is using big-endian bit numbering.  */
2757     tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2758     if (is_se) {
2759         tcg_gen_sar_tl(dest, src, tmp);
2760         tcg_gen_sextract_tl(dest, dest, 0, len);
2761     } else {
2762         tcg_gen_shr_tl(dest, src, tmp);
2763         tcg_gen_extract_tl(dest, dest, 0, len);
2764     }
2765     tcg_temp_free(tmp);
2766     save_gpr(ctx, rt, dest);
2767 
2768     /* Install the new nullification.  */
2769     cond_free(&ctx->null_cond);
2770     if (c) {
2771         ctx->null_cond = do_sed_cond(c, dest);
2772     }
2773     return nullify_end(ctx, DISAS_NEXT);
2774 }
2775 
2776 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2777                                      const DisasInsn *di)
2778 {
2779     unsigned clen = extract32(insn, 0, 5);
2780     unsigned pos = extract32(insn, 5, 5);
2781     unsigned is_se = extract32(insn, 10, 1);
2782     unsigned c = extract32(insn, 13, 3);
2783     unsigned rt = extract32(insn, 16, 5);
2784     unsigned rr = extract32(insn, 21, 5);
2785     unsigned len = 32 - clen;
2786     unsigned cpos = 31 - pos;
2787     TCGv dest, src;
2788 
2789     if (c) {
2790         nullify_over(ctx);
2791     }
2792 
2793     dest = dest_gpr(ctx, rt);
2794     src = load_gpr(ctx, rr);
2795     if (is_se) {
2796         tcg_gen_sextract_tl(dest, src, cpos, len);
2797     } else {
2798         tcg_gen_extract_tl(dest, src, cpos, len);
2799     }
2800     save_gpr(ctx, rt, dest);
2801 
2802     /* Install the new nullification.  */
2803     cond_free(&ctx->null_cond);
2804     if (c) {
2805         ctx->null_cond = do_sed_cond(c, dest);
2806     }
2807     return nullify_end(ctx, DISAS_NEXT);
2808 }
2809 
2810 static const DisasInsn table_sh_ex[] = {
2811     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2812     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2813     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2814     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2815 };
2816 
2817 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2818                                       const DisasInsn *di)
2819 {
2820     unsigned clen = extract32(insn, 0, 5);
2821     unsigned cpos = extract32(insn, 5, 5);
2822     unsigned nz = extract32(insn, 10, 1);
2823     unsigned c = extract32(insn, 13, 3);
2824     target_long val = low_sextract(insn, 16, 5);
2825     unsigned rt = extract32(insn, 21, 5);
2826     unsigned len = 32 - clen;
2827     target_long mask0, mask1;
2828     TCGv dest;
2829 
2830     if (c) {
2831         nullify_over(ctx);
2832     }
2833     if (cpos + len > 32) {
2834         len = 32 - cpos;
2835     }
2836 
2837     dest = dest_gpr(ctx, rt);
2838     mask0 = deposit64(0, cpos, len, val);
2839     mask1 = deposit64(-1, cpos, len, val);
2840 
2841     if (nz) {
2842         TCGv src = load_gpr(ctx, rt);
2843         if (mask1 != -1) {
2844             tcg_gen_andi_tl(dest, src, mask1);
2845             src = dest;
2846         }
2847         tcg_gen_ori_tl(dest, src, mask0);
2848     } else {
2849         tcg_gen_movi_tl(dest, mask0);
2850     }
2851     save_gpr(ctx, rt, dest);
2852 
2853     /* Install the new nullification.  */
2854     cond_free(&ctx->null_cond);
2855     if (c) {
2856         ctx->null_cond = do_sed_cond(c, dest);
2857     }
2858     return nullify_end(ctx, DISAS_NEXT);
2859 }
2860 
2861 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2862                                     const DisasInsn *di)
2863 {
2864     unsigned clen = extract32(insn, 0, 5);
2865     unsigned cpos = extract32(insn, 5, 5);
2866     unsigned nz = extract32(insn, 10, 1);
2867     unsigned c = extract32(insn, 13, 3);
2868     unsigned rr = extract32(insn, 16, 5);
2869     unsigned rt = extract32(insn, 21, 5);
2870     unsigned rs = nz ? rt : 0;
2871     unsigned len = 32 - clen;
2872     TCGv dest, val;
2873 
2874     if (c) {
2875         nullify_over(ctx);
2876     }
2877     if (cpos + len > 32) {
2878         len = 32 - cpos;
2879     }
2880 
2881     dest = dest_gpr(ctx, rt);
2882     val = load_gpr(ctx, rr);
2883     if (rs == 0) {
2884         tcg_gen_deposit_z_tl(dest, val, cpos, len);
2885     } else {
2886         tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2887     }
2888     save_gpr(ctx, rt, dest);
2889 
2890     /* Install the new nullification.  */
2891     cond_free(&ctx->null_cond);
2892     if (c) {
2893         ctx->null_cond = do_sed_cond(c, dest);
2894     }
2895     return nullify_end(ctx, DISAS_NEXT);
2896 }
2897 
2898 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2899                                     const DisasInsn *di)
2900 {
2901     unsigned clen = extract32(insn, 0, 5);
2902     unsigned nz = extract32(insn, 10, 1);
2903     unsigned i = extract32(insn, 12, 1);
2904     unsigned c = extract32(insn, 13, 3);
2905     unsigned rt = extract32(insn, 21, 5);
2906     unsigned rs = nz ? rt : 0;
2907     unsigned len = 32 - clen;
2908     TCGv val, mask, tmp, shift, dest;
2909     unsigned msb = 1U << (len - 1);
2910 
2911     if (c) {
2912         nullify_over(ctx);
2913     }
2914 
2915     if (i) {
2916         val = load_const(ctx, low_sextract(insn, 16, 5));
2917     } else {
2918         val = load_gpr(ctx, extract32(insn, 16, 5));
2919     }
2920     dest = dest_gpr(ctx, rt);
2921     shift = tcg_temp_new();
2922     tmp = tcg_temp_new();
2923 
2924     /* Convert big-endian bit numbering in SAR to left-shift.  */
2925     tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2926 
2927     mask = tcg_const_tl(msb + (msb - 1));
2928     tcg_gen_and_tl(tmp, val, mask);
2929     if (rs) {
2930         tcg_gen_shl_tl(mask, mask, shift);
2931         tcg_gen_shl_tl(tmp, tmp, shift);
2932         tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2933         tcg_gen_or_tl(dest, dest, tmp);
2934     } else {
2935         tcg_gen_shl_tl(dest, tmp, shift);
2936     }
2937     tcg_temp_free(shift);
2938     tcg_temp_free(mask);
2939     tcg_temp_free(tmp);
2940     save_gpr(ctx, rt, dest);
2941 
2942     /* Install the new nullification.  */
2943     cond_free(&ctx->null_cond);
2944     if (c) {
2945         ctx->null_cond = do_sed_cond(c, dest);
2946     }
2947     return nullify_end(ctx, DISAS_NEXT);
2948 }
2949 
2950 static const DisasInsn table_depw[] = {
2951     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2952     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2953     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2954 };
2955 
2956 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2957 {
2958     unsigned n = extract32(insn, 1, 1);
2959     unsigned b = extract32(insn, 21, 5);
2960     target_long disp = assemble_17(insn);
2961 
2962     /* unsigned s = low_uextract(insn, 13, 3); */
2963     /* ??? It seems like there should be a good way of using
2964        "be disp(sr2, r0)", the canonical gateway entry mechanism
2965        to our advantage.  But that appears to be inconvenient to
2966        manage along side branch delay slots.  Therefore we handle
2967        entry into the gateway page via absolute address.  */
2968 
2969     /* Since we don't implement spaces, just branch.  Do notice the special
2970        case of "be disp(*,r0)" using a direct branch to disp, so that we can
2971        goto_tb to the TB containing the syscall.  */
2972     if (b == 0) {
2973         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2974     } else {
2975         TCGv tmp = get_temp(ctx);
2976         tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2977         return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2978     }
2979 }
2980 
2981 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2982                               const DisasInsn *di)
2983 {
2984     unsigned n = extract32(insn, 1, 1);
2985     unsigned link = extract32(insn, 21, 5);
2986     target_long disp = assemble_17(insn);
2987 
2988     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2989 }
2990 
2991 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2992                                    const DisasInsn *di)
2993 {
2994     unsigned n = extract32(insn, 1, 1);
2995     target_long disp = assemble_22(insn);
2996 
2997     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2998 }
2999 
3000 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
3001                                const DisasInsn *di)
3002 {
3003     unsigned n = extract32(insn, 1, 1);
3004     unsigned rx = extract32(insn, 16, 5);
3005     unsigned link = extract32(insn, 21, 5);
3006     TCGv tmp = get_temp(ctx);
3007 
3008     tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3009     tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3010     return do_ibranch(ctx, tmp, link, n);
3011 }
3012 
3013 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3014                               const DisasInsn *di)
3015 {
3016     unsigned n = extract32(insn, 1, 1);
3017     unsigned rx = extract32(insn, 16, 5);
3018     unsigned rb = extract32(insn, 21, 5);
3019     TCGv dest;
3020 
3021     if (rx == 0) {
3022         dest = load_gpr(ctx, rb);
3023     } else {
3024         dest = get_temp(ctx);
3025         tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3026         tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3027     }
3028     return do_ibranch(ctx, dest, 0, n);
3029 }
3030 
3031 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3032                                const DisasInsn *di)
3033 {
3034     unsigned n = extract32(insn, 1, 1);
3035     unsigned rb = extract32(insn, 21, 5);
3036     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3037 
3038     return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3039 }
3040 
3041 static const DisasInsn table_branch[] = {
3042     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3043     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3044     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3045     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3046     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3047 };
3048 
3049 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3050                                       const DisasInsn *di)
3051 {
3052     unsigned rt = extract32(insn, 0, 5);
3053     unsigned ra = extract32(insn, 21, 5);
3054     return do_fop_wew(ctx, rt, ra, di->f.wew);
3055 }
3056 
3057 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3058                                       const DisasInsn *di)
3059 {
3060     unsigned rt = assemble_rt64(insn);
3061     unsigned ra = assemble_ra64(insn);
3062     return do_fop_wew(ctx, rt, ra, di->f.wew);
3063 }
3064 
3065 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3066                                    const DisasInsn *di)
3067 {
3068     unsigned rt = extract32(insn, 0, 5);
3069     unsigned ra = extract32(insn, 21, 5);
3070     return do_fop_ded(ctx, rt, ra, di->f.ded);
3071 }
3072 
3073 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3074                                       const DisasInsn *di)
3075 {
3076     unsigned rt = extract32(insn, 0, 5);
3077     unsigned ra = extract32(insn, 21, 5);
3078     return do_fop_wed(ctx, rt, ra, di->f.wed);
3079 }
3080 
3081 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3082                                       const DisasInsn *di)
3083 {
3084     unsigned rt = assemble_rt64(insn);
3085     unsigned ra = extract32(insn, 21, 5);
3086     return do_fop_wed(ctx, rt, ra, di->f.wed);
3087 }
3088 
3089 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3090                                       const DisasInsn *di)
3091 {
3092     unsigned rt = extract32(insn, 0, 5);
3093     unsigned ra = extract32(insn, 21, 5);
3094     return do_fop_dew(ctx, rt, ra, di->f.dew);
3095 }
3096 
3097 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3098                                       const DisasInsn *di)
3099 {
3100     unsigned rt = extract32(insn, 0, 5);
3101     unsigned ra = assemble_ra64(insn);
3102     return do_fop_dew(ctx, rt, ra, di->f.dew);
3103 }
3104 
3105 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3106                                        const DisasInsn *di)
3107 {
3108     unsigned rt = extract32(insn, 0, 5);
3109     unsigned rb = extract32(insn, 16, 5);
3110     unsigned ra = extract32(insn, 21, 5);
3111     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3112 }
3113 
3114 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3115                                        const DisasInsn *di)
3116 {
3117     unsigned rt = assemble_rt64(insn);
3118     unsigned rb = assemble_rb64(insn);
3119     unsigned ra = assemble_ra64(insn);
3120     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3121 }
3122 
3123 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3124                                     const DisasInsn *di)
3125 {
3126     unsigned rt = extract32(insn, 0, 5);
3127     unsigned rb = extract32(insn, 16, 5);
3128     unsigned ra = extract32(insn, 21, 5);
3129     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3130 }
3131 
3132 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3133 {
3134     tcg_gen_mov_i32(dst, src);
3135 }
3136 
3137 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3138 {
3139     tcg_gen_mov_i64(dst, src);
3140 }
3141 
3142 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3143 {
3144     tcg_gen_andi_i32(dst, src, INT32_MAX);
3145 }
3146 
3147 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3148 {
3149     tcg_gen_andi_i64(dst, src, INT64_MAX);
3150 }
3151 
3152 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3153 {
3154     tcg_gen_xori_i32(dst, src, INT32_MIN);
3155 }
3156 
3157 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3158 {
3159     tcg_gen_xori_i64(dst, src, INT64_MIN);
3160 }
3161 
3162 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3163 {
3164     tcg_gen_ori_i32(dst, src, INT32_MIN);
3165 }
3166 
3167 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3168 {
3169     tcg_gen_ori_i64(dst, src, INT64_MIN);
3170 }
3171 
3172 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3173                                unsigned y, unsigned c)
3174 {
3175     TCGv_i32 ta, tb, tc, ty;
3176 
3177     nullify_over(ctx);
3178 
3179     ta = load_frw0_i32(ra);
3180     tb = load_frw0_i32(rb);
3181     ty = tcg_const_i32(y);
3182     tc = tcg_const_i32(c);
3183 
3184     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3185 
3186     tcg_temp_free_i32(ta);
3187     tcg_temp_free_i32(tb);
3188     tcg_temp_free_i32(ty);
3189     tcg_temp_free_i32(tc);
3190 
3191     return nullify_end(ctx, DISAS_NEXT);
3192 }
3193 
3194 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3195                                      const DisasInsn *di)
3196 {
3197     unsigned c = extract32(insn, 0, 5);
3198     unsigned y = extract32(insn, 13, 3);
3199     unsigned rb = extract32(insn, 16, 5);
3200     unsigned ra = extract32(insn, 21, 5);
3201     return do_fcmp_s(ctx, ra, rb, y, c);
3202 }
3203 
3204 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3205                                      const DisasInsn *di)
3206 {
3207     unsigned c = extract32(insn, 0, 5);
3208     unsigned y = extract32(insn, 13, 3);
3209     unsigned rb = assemble_rb64(insn);
3210     unsigned ra = assemble_ra64(insn);
3211     return do_fcmp_s(ctx, ra, rb, y, c);
3212 }
3213 
3214 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3215                                   const DisasInsn *di)
3216 {
3217     unsigned c = extract32(insn, 0, 5);
3218     unsigned y = extract32(insn, 13, 3);
3219     unsigned rb = extract32(insn, 16, 5);
3220     unsigned ra = extract32(insn, 21, 5);
3221     TCGv_i64 ta, tb;
3222     TCGv_i32 tc, ty;
3223 
3224     nullify_over(ctx);
3225 
3226     ta = load_frd0(ra);
3227     tb = load_frd0(rb);
3228     ty = tcg_const_i32(y);
3229     tc = tcg_const_i32(c);
3230 
3231     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3232 
3233     tcg_temp_free_i64(ta);
3234     tcg_temp_free_i64(tb);
3235     tcg_temp_free_i32(ty);
3236     tcg_temp_free_i32(tc);
3237 
3238     return nullify_end(ctx, DISAS_NEXT);
3239 }
3240 
3241 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3242                                    const DisasInsn *di)
3243 {
3244     unsigned y = extract32(insn, 13, 3);
3245     unsigned cbit = (y ^ 1) - 1;
3246     TCGv t;
3247 
3248     nullify_over(ctx);
3249 
3250     t = tcg_temp_new();
3251     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3252     tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3253     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3254     tcg_temp_free(t);
3255 
3256     return nullify_end(ctx, DISAS_NEXT);
3257 }
3258 
3259 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3260                                    const DisasInsn *di)
3261 {
3262     unsigned c = extract32(insn, 0, 5);
3263     int mask;
3264     bool inv = false;
3265     TCGv t;
3266 
3267     nullify_over(ctx);
3268 
3269     t = tcg_temp_new();
3270     tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3271 
3272     switch (c) {
3273     case 0: /* simple */
3274         tcg_gen_andi_tl(t, t, 0x4000000);
3275         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3276         goto done;
3277     case 2: /* rej */
3278         inv = true;
3279         /* fallthru */
3280     case 1: /* acc */
3281         mask = 0x43ff800;
3282         break;
3283     case 6: /* rej8 */
3284         inv = true;
3285         /* fallthru */
3286     case 5: /* acc8 */
3287         mask = 0x43f8000;
3288         break;
3289     case 9: /* acc6 */
3290         mask = 0x43e0000;
3291         break;
3292     case 13: /* acc4 */
3293         mask = 0x4380000;
3294         break;
3295     case 17: /* acc2 */
3296         mask = 0x4200000;
3297         break;
3298     default:
3299         return gen_illegal(ctx);
3300     }
3301     if (inv) {
3302         TCGv c = load_const(ctx, mask);
3303         tcg_gen_or_tl(t, t, c);
3304         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3305     } else {
3306         tcg_gen_andi_tl(t, t, mask);
3307         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3308     }
3309  done:
3310     return nullify_end(ctx, DISAS_NEXT);
3311 }
3312 
3313 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3314                                  const DisasInsn *di)
3315 {
3316     unsigned rt = extract32(insn, 0, 5);
3317     unsigned rb = assemble_rb64(insn);
3318     unsigned ra = assemble_ra64(insn);
3319     TCGv_i64 a, b;
3320 
3321     nullify_over(ctx);
3322 
3323     a = load_frw0_i64(ra);
3324     b = load_frw0_i64(rb);
3325     tcg_gen_mul_i64(a, a, b);
3326     save_frd(rt, a);
3327     tcg_temp_free_i64(a);
3328     tcg_temp_free_i64(b);
3329 
3330     return nullify_end(ctx, DISAS_NEXT);
3331 }
3332 
3333 #define FOP_DED  trans_fop_ded, .f.ded
3334 #define FOP_DEDD trans_fop_dedd, .f.dedd
3335 
3336 #define FOP_WEW  trans_fop_wew_0c, .f.wew
3337 #define FOP_DEW  trans_fop_dew_0c, .f.dew
3338 #define FOP_WED  trans_fop_wed_0c, .f.wed
3339 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3340 
3341 static const DisasInsn table_float_0c[] = {
3342     /* floating point class zero */
3343     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3344     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3345     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3346     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3347     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3348     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3349 
3350     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3351     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3352     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3353     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3354     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3355     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3356 
3357     /* floating point class three */
3358     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3359     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3360     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3361     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3362 
3363     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3364     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3365     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3366     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3367 
3368     /* floating point class one */
3369     /* float/float */
3370     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3371     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3372     /* int/float */
3373     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3374     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3375     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3376     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3377     /* float/int */
3378     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3379     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3380     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3381     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3382     /* float/int truncate */
3383     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3384     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3385     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3386     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3387     /* uint/float */
3388     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3389     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3390     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3391     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3392     /* float/uint */
3393     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3394     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3395     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3396     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3397     /* float/uint truncate */
3398     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3399     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3400     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3401     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3402 
3403     /* floating point class two */
3404     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3405     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3406     { 0x30002420, 0xffffffe0, trans_ftest_q },
3407     { 0x30000420, 0xffff1fff, trans_ftest_t },
3408 
3409     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3410        This is machine/revision == 0, which is reserved for simulator.  */
3411     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3412 };
3413 
3414 #undef FOP_WEW
3415 #undef FOP_DEW
3416 #undef FOP_WED
3417 #undef FOP_WEWW
3418 #define FOP_WEW  trans_fop_wew_0e, .f.wew
3419 #define FOP_DEW  trans_fop_dew_0e, .f.dew
3420 #define FOP_WED  trans_fop_wed_0e, .f.wed
3421 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3422 
3423 static const DisasInsn table_float_0e[] = {
3424     /* floating point class zero */
3425     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3426     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3427     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3428     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3429     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3430     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3431 
3432     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3433     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3434     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3435     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3436     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3437     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3438 
3439     /* floating point class three */
3440     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3441     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3442     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3443     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3444 
3445     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3446     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3447     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3448     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3449 
3450     { 0x38004700, 0xfc00ef60, trans_xmpyu },
3451 
3452     /* floating point class one */
3453     /* float/float */
3454     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3455     { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3456     /* int/float */
3457     { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3458     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3459     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3460     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3461     /* float/int */
3462     { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3463     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3464     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3465     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3466     /* float/int truncate */
3467     { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3468     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3469     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3470     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3471     /* uint/float */
3472     { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3473     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3474     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3475     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3476     /* float/uint */
3477     { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3478     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3479     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3480     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3481     /* float/uint truncate */
3482     { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3483     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3484     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3485     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3486 
3487     /* floating point class two */
3488     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3489     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3490 };
3491 
3492 #undef FOP_WEW
3493 #undef FOP_DEW
3494 #undef FOP_WED
3495 #undef FOP_WEWW
3496 #undef FOP_DED
3497 #undef FOP_DEDD
3498 
3499 /* Convert the fmpyadd single-precision register encodings to standard.  */
3500 static inline int fmpyadd_s_reg(unsigned r)
3501 {
3502     return (r & 16) * 2 + 16 + (r & 15);
3503 }
3504 
3505 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3506                                    uint32_t insn, bool is_sub)
3507 {
3508     unsigned tm = extract32(insn, 0, 5);
3509     unsigned f = extract32(insn, 5, 1);
3510     unsigned ra = extract32(insn, 6, 5);
3511     unsigned ta = extract32(insn, 11, 5);
3512     unsigned rm2 = extract32(insn, 16, 5);
3513     unsigned rm1 = extract32(insn, 21, 5);
3514 
3515     nullify_over(ctx);
3516 
3517     /* Independent multiply & add/sub, with undefined behaviour
3518        if outputs overlap inputs.  */
3519     if (f == 0) {
3520         tm = fmpyadd_s_reg(tm);
3521         ra = fmpyadd_s_reg(ra);
3522         ta = fmpyadd_s_reg(ta);
3523         rm2 = fmpyadd_s_reg(rm2);
3524         rm1 = fmpyadd_s_reg(rm1);
3525         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3526         do_fop_weww(ctx, ta, ta, ra,
3527                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3528     } else {
3529         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3530         do_fop_dedd(ctx, ta, ta, ra,
3531                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3532     }
3533 
3534     return nullify_end(ctx, DISAS_NEXT);
3535 }
3536 
3537 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3538                                       const DisasInsn *di)
3539 {
3540     unsigned rt = assemble_rt64(insn);
3541     unsigned neg = extract32(insn, 5, 1);
3542     unsigned rm1 = assemble_ra64(insn);
3543     unsigned rm2 = assemble_rb64(insn);
3544     unsigned ra3 = assemble_rc64(insn);
3545     TCGv_i32 a, b, c;
3546 
3547     nullify_over(ctx);
3548     a = load_frw0_i32(rm1);
3549     b = load_frw0_i32(rm2);
3550     c = load_frw0_i32(ra3);
3551 
3552     if (neg) {
3553         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3554     } else {
3555         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3556     }
3557 
3558     tcg_temp_free_i32(b);
3559     tcg_temp_free_i32(c);
3560     save_frw_i32(rt, a);
3561     tcg_temp_free_i32(a);
3562     return nullify_end(ctx, DISAS_NEXT);
3563 }
3564 
3565 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3566                                       const DisasInsn *di)
3567 {
3568     unsigned rt = extract32(insn, 0, 5);
3569     unsigned neg = extract32(insn, 5, 1);
3570     unsigned rm1 = extract32(insn, 21, 5);
3571     unsigned rm2 = extract32(insn, 16, 5);
3572     unsigned ra3 = assemble_rc64(insn);
3573     TCGv_i64 a, b, c;
3574 
3575     nullify_over(ctx);
3576     a = load_frd0(rm1);
3577     b = load_frd0(rm2);
3578     c = load_frd0(ra3);
3579 
3580     if (neg) {
3581         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3582     } else {
3583         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3584     }
3585 
3586     tcg_temp_free_i64(b);
3587     tcg_temp_free_i64(c);
3588     save_frd(rt, a);
3589     tcg_temp_free_i64(a);
3590     return nullify_end(ctx, DISAS_NEXT);
3591 }
3592 
3593 static const DisasInsn table_fp_fused[] = {
3594     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3595     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3596 };
3597 
3598 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3599                                          const DisasInsn table[], size_t n)
3600 {
3601     size_t i;
3602     for (i = 0; i < n; ++i) {
3603         if ((insn & table[i].mask) == table[i].insn) {
3604             return table[i].trans(ctx, insn, &table[i]);
3605         }
3606     }
3607     return gen_illegal(ctx);
3608 }
3609 
3610 #define translate_table(ctx, insn, table) \
3611     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3612 
3613 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3614 {
3615     uint32_t opc = extract32(insn, 26, 6);
3616 
3617     switch (opc) {
3618     case 0x00: /* system op */
3619         return translate_table(ctx, insn, table_system);
3620     case 0x01:
3621         return translate_table(ctx, insn, table_mem_mgmt);
3622     case 0x02:
3623         return translate_table(ctx, insn, table_arith_log);
3624     case 0x03:
3625         return translate_table(ctx, insn, table_index_mem);
3626     case 0x06:
3627         return trans_fmpyadd(ctx, insn, false);
3628     case 0x08:
3629         return trans_ldil(ctx, insn);
3630     case 0x09:
3631         return trans_copr_w(ctx, insn);
3632     case 0x0A:
3633         return trans_addil(ctx, insn);
3634     case 0x0B:
3635         return trans_copr_dw(ctx, insn);
3636     case 0x0C:
3637         return translate_table(ctx, insn, table_float_0c);
3638     case 0x0D:
3639         return trans_ldo(ctx, insn);
3640     case 0x0E:
3641         return translate_table(ctx, insn, table_float_0e);
3642 
3643     case 0x10:
3644         return trans_load(ctx, insn, false, MO_UB);
3645     case 0x11:
3646         return trans_load(ctx, insn, false, MO_TEUW);
3647     case 0x12:
3648         return trans_load(ctx, insn, false, MO_TEUL);
3649     case 0x13:
3650         return trans_load(ctx, insn, true, MO_TEUL);
3651     case 0x16:
3652         return trans_fload_mod(ctx, insn);
3653     case 0x17:
3654         return trans_load_w(ctx, insn);
3655     case 0x18:
3656         return trans_store(ctx, insn, false, MO_UB);
3657     case 0x19:
3658         return trans_store(ctx, insn, false, MO_TEUW);
3659     case 0x1A:
3660         return trans_store(ctx, insn, false, MO_TEUL);
3661     case 0x1B:
3662         return trans_store(ctx, insn, true, MO_TEUL);
3663     case 0x1E:
3664         return trans_fstore_mod(ctx, insn);
3665     case 0x1F:
3666         return trans_store_w(ctx, insn);
3667 
3668     case 0x20:
3669         return trans_cmpb(ctx, insn, true, false, false);
3670     case 0x21:
3671         return trans_cmpb(ctx, insn, true, true, false);
3672     case 0x22:
3673         return trans_cmpb(ctx, insn, false, false, false);
3674     case 0x23:
3675         return trans_cmpb(ctx, insn, false, true, false);
3676     case 0x24:
3677         return trans_cmpiclr(ctx, insn);
3678     case 0x25:
3679         return trans_subi(ctx, insn);
3680     case 0x26:
3681         return trans_fmpyadd(ctx, insn, true);
3682     case 0x27:
3683         return trans_cmpb(ctx, insn, true, false, true);
3684     case 0x28:
3685         return trans_addb(ctx, insn, true, false);
3686     case 0x29:
3687         return trans_addb(ctx, insn, true, true);
3688     case 0x2A:
3689         return trans_addb(ctx, insn, false, false);
3690     case 0x2B:
3691         return trans_addb(ctx, insn, false, true);
3692     case 0x2C:
3693     case 0x2D:
3694         return trans_addi(ctx, insn);
3695     case 0x2E:
3696         return translate_table(ctx, insn, table_fp_fused);
3697     case 0x2F:
3698         return trans_cmpb(ctx, insn, false, false, true);
3699 
3700     case 0x30:
3701     case 0x31:
3702         return trans_bb(ctx, insn);
3703     case 0x32:
3704         return trans_movb(ctx, insn, false);
3705     case 0x33:
3706         return trans_movb(ctx, insn, true);
3707     case 0x34:
3708         return translate_table(ctx, insn, table_sh_ex);
3709     case 0x35:
3710         return translate_table(ctx, insn, table_depw);
3711     case 0x38:
3712         return trans_be(ctx, insn, false);
3713     case 0x39:
3714         return trans_be(ctx, insn, true);
3715     case 0x3A:
3716         return translate_table(ctx, insn, table_branch);
3717 
3718     case 0x04: /* spopn */
3719     case 0x05: /* diag */
3720     case 0x0F: /* product specific */
3721         break;
3722 
3723     case 0x07: /* unassigned */
3724     case 0x15: /* unassigned */
3725     case 0x1D: /* unassigned */
3726     case 0x37: /* unassigned */
3727     case 0x3F: /* unassigned */
3728     default:
3729         break;
3730     }
3731     return gen_illegal(ctx);
3732 }
3733 
3734 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3735                                       CPUState *cs, int max_insns)
3736 {
3737     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3738     TranslationBlock *tb = ctx->base.tb;
3739     int i, bound;
3740 
3741     ctx->cs = cs;
3742     ctx->iaoq_f = tb->pc;
3743     ctx->iaoq_b = tb->cs_base;
3744     ctx->iaoq_n = -1;
3745     TCGV_UNUSED(ctx->iaoq_n_var);
3746 
3747     ctx->ntemps = 0;
3748     for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3749         TCGV_UNUSED(ctx->temps[i]);
3750     }
3751 
3752     bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3753     return MIN(max_insns, bound);
3754 }
3755 
3756 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3757 {
3758     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3759 
3760     /* Seed the nullification status from PSW[N], as shown in TB->FLAGS.  */
3761     ctx->null_cond = cond_make_f();
3762     ctx->psw_n_nonzero = false;
3763     if (ctx->base.tb->flags & 1) {
3764         ctx->null_cond.c = TCG_COND_ALWAYS;
3765         ctx->psw_n_nonzero = true;
3766     }
3767     ctx->null_lab = NULL;
3768 }
3769 
3770 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3771 {
3772     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3773 
3774     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3775 }
3776 
3777 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3778                                       const CPUBreakpoint *bp)
3779 {
3780     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3781 
3782     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3783     ctx->base.pc_next = ctx->iaoq_f + 4;
3784     return true;
3785 }
3786 
3787 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3788 {
3789     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3790     CPUHPPAState *env = cs->env_ptr;
3791     DisasJumpType ret;
3792     int i, n;
3793 
3794     /* Execute one insn.  */
3795     if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3796         ret = do_page_zero(ctx);
3797         assert(ret != DISAS_NEXT);
3798     } else {
3799         /* Always fetch the insn, even if nullified, so that we check
3800            the page permissions for execute.  */
3801         uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3802 
3803         /* Set up the IA queue for the next insn.
3804            This will be overwritten by a branch.  */
3805         if (ctx->iaoq_b == -1) {
3806             ctx->iaoq_n = -1;
3807             ctx->iaoq_n_var = get_temp(ctx);
3808             tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3809         } else {
3810             ctx->iaoq_n = ctx->iaoq_b + 4;
3811             TCGV_UNUSED(ctx->iaoq_n_var);
3812         }
3813 
3814         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3815             ctx->null_cond.c = TCG_COND_NEVER;
3816             ret = DISAS_NEXT;
3817         } else {
3818             ret = translate_one(ctx, insn);
3819             assert(ctx->null_lab == NULL);
3820         }
3821     }
3822 
3823     /* Free any temporaries allocated.  */
3824     for (i = 0, n = ctx->ntemps; i < n; ++i) {
3825         tcg_temp_free(ctx->temps[i]);
3826         TCGV_UNUSED(ctx->temps[i]);
3827     }
3828     ctx->ntemps = 0;
3829 
3830     /* Advance the insn queue.  */
3831     /* ??? The non-linear instruction restriction is purely due to
3832        the debugging dump.  Otherwise we *could* follow unconditional
3833        branches within the same page.  */
3834     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3835         if (ctx->null_cond.c == TCG_COND_NEVER
3836             || ctx->null_cond.c == TCG_COND_ALWAYS) {
3837             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3838             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3839             ret = DISAS_NORETURN;
3840         } else {
3841             ret = DISAS_IAQ_N_STALE;
3842        }
3843     }
3844     ctx->iaoq_f = ctx->iaoq_b;
3845     ctx->iaoq_b = ctx->iaoq_n;
3846     ctx->base.is_jmp = ret;
3847 
3848     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3849         return;
3850     }
3851     if (ctx->iaoq_f == -1) {
3852         tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3853         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3854         nullify_save(ctx);
3855         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3856     } else if (ctx->iaoq_b == -1) {
3857         tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3858     }
3859 }
3860 
3861 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3862 {
3863     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3864 
3865     switch (ctx->base.is_jmp) {
3866     case DISAS_NORETURN:
3867         break;
3868     case DISAS_TOO_MANY:
3869     case DISAS_IAQ_N_STALE:
3870         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3871         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3872         nullify_save(ctx);
3873         /* FALLTHRU */
3874     case DISAS_IAQ_N_UPDATED:
3875         if (ctx->base.singlestep_enabled) {
3876             gen_excp_1(EXCP_DEBUG);
3877         } else {
3878             tcg_gen_lookup_and_goto_ptr();
3879         }
3880         break;
3881     default:
3882         g_assert_not_reached();
3883     }
3884 
3885     /* We don't actually use this during normal translation,
3886        but we should interact with the generic main loop.  */
3887     ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3888 }
3889 
3890 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3891 {
3892     TranslationBlock *tb = dcbase->tb;
3893 
3894     switch (tb->pc) {
3895     case 0x00:
3896         qemu_log("IN:\n0x00000000:  (null)\n");
3897         break;
3898     case 0xb0:
3899         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
3900         break;
3901     case 0xe0:
3902         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
3903         break;
3904     case 0x100:
3905         qemu_log("IN:\n0x00000100:  syscall\n");
3906         break;
3907     default:
3908         qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3909         log_target_disas(cs, tb->pc, tb->size, 1);
3910         break;
3911     }
3912 }
3913 
3914 static const TranslatorOps hppa_tr_ops = {
3915     .init_disas_context = hppa_tr_init_disas_context,
3916     .tb_start           = hppa_tr_tb_start,
3917     .insn_start         = hppa_tr_insn_start,
3918     .breakpoint_check   = hppa_tr_breakpoint_check,
3919     .translate_insn     = hppa_tr_translate_insn,
3920     .tb_stop            = hppa_tr_tb_stop,
3921     .disas_log          = hppa_tr_disas_log,
3922 };
3923 
3924 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3925 
3926 {
3927     DisasContext ctx;
3928     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3929 }
3930 
3931 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3932                           target_ulong *data)
3933 {
3934     env->iaoq_f = data[0];
3935     if (data[1] != -1) {
3936         env->iaoq_b = data[1];
3937     }
3938     /* Since we were executing the instruction at IAOQ_F, and took some
3939        sort of action that provoked the cpu_restore_state, we can infer
3940        that the instruction was not nullified.  */
3941     env->psw_n = 0;
3942 }
3943