xref: /openbmc/qemu/target/hppa/translate.c (revision a4db4a78)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47 
48     uint64_t iaoq_f;
49     uint64_t iaoq_b;
50     uint64_t iaoq_n;
51     TCGv_i64 iaoq_n_var;
52 
53     DisasCond null_cond;
54     TCGLabel *null_lab;
55 
56     TCGv_i64 zero;
57 
58     uint32_t insn;
59     uint32_t tb_flags;
60     int mmu_idx;
61     int privilege;
62     bool psw_n_nonzero;
63     bool is_pa20;
64 
65 #ifdef CONFIG_USER_ONLY
66     MemOp unalign;
67 #endif
68 } DisasContext;
69 
70 #ifdef CONFIG_USER_ONLY
71 #define UNALIGN(C)  (C)->unalign
72 #else
73 #define UNALIGN(C)  MO_ALIGN
74 #endif
75 
76 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
77 static int expand_sm_imm(DisasContext *ctx, int val)
78 {
79     if (val & PSW_SM_E) {
80         val = (val & ~PSW_SM_E) | PSW_E;
81     }
82     if (val & PSW_SM_W) {
83         val = (val & ~PSW_SM_W) | PSW_W;
84     }
85     return val;
86 }
87 
88 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
89 static int expand_sr3x(DisasContext *ctx, int val)
90 {
91     return ~val;
92 }
93 
94 /* Convert the M:A bits within a memory insn to the tri-state value
95    we use for the final M.  */
96 static int ma_to_m(DisasContext *ctx, int val)
97 {
98     return val & 2 ? (val & 1 ? -1 : 1) : 0;
99 }
100 
101 /* Convert the sign of the displacement to a pre or post-modify.  */
102 static int pos_to_m(DisasContext *ctx, int val)
103 {
104     return val ? 1 : -1;
105 }
106 
107 static int neg_to_m(DisasContext *ctx, int val)
108 {
109     return val ? -1 : 1;
110 }
111 
112 /* Used for branch targets and fp memory ops.  */
113 static int expand_shl2(DisasContext *ctx, int val)
114 {
115     return val << 2;
116 }
117 
118 /* Used for fp memory ops.  */
119 static int expand_shl3(DisasContext *ctx, int val)
120 {
121     return val << 3;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Translate CMPI doubleword conditions to standard. */
142 static int cmpbid_c(DisasContext *ctx, int val)
143 {
144     return val ? val : 4; /* 0 == "*<<" */
145 }
146 
147 
148 /* Include the auto-generated decoder.  */
149 #include "decode-insns.c.inc"
150 
151 /* We are not using a goto_tb (for whatever reason), but have updated
152    the iaq (for whatever reason), so don't do it again on exit.  */
153 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
154 
155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
156    updated the iaq for the next instruction to be executed.  */
157 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
158 
159 /* Similarly, but we want to return to the main loop immediately
160    to recognize unmasked interrupts.  */
161 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
162 #define DISAS_EXIT                  DISAS_TARGET_3
163 
164 /* global register indexes */
165 static TCGv_i64 cpu_gr[32];
166 static TCGv_i64 cpu_sr[4];
167 static TCGv_i64 cpu_srH;
168 static TCGv_i64 cpu_iaoq_f;
169 static TCGv_i64 cpu_iaoq_b;
170 static TCGv_i64 cpu_iasq_f;
171 static TCGv_i64 cpu_iasq_b;
172 static TCGv_i64 cpu_sar;
173 static TCGv_i64 cpu_psw_n;
174 static TCGv_i64 cpu_psw_v;
175 static TCGv_i64 cpu_psw_cb;
176 static TCGv_i64 cpu_psw_cb_msb;
177 
178 void hppa_translate_init(void)
179 {
180 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
181 
182     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
183     static const GlobalVar vars[] = {
184         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
185         DEF_VAR(psw_n),
186         DEF_VAR(psw_v),
187         DEF_VAR(psw_cb),
188         DEF_VAR(psw_cb_msb),
189         DEF_VAR(iaoq_f),
190         DEF_VAR(iaoq_b),
191     };
192 
193 #undef DEF_VAR
194 
195     /* Use the symbolic register names that match the disassembler.  */
196     static const char gr_names[32][4] = {
197         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
198         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
199         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
200         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
201     };
202     /* SR[4-7] are not global registers so that we can index them.  */
203     static const char sr_names[5][4] = {
204         "sr0", "sr1", "sr2", "sr3", "srH"
205     };
206 
207     int i;
208 
209     cpu_gr[0] = NULL;
210     for (i = 1; i < 32; i++) {
211         cpu_gr[i] = tcg_global_mem_new(tcg_env,
212                                        offsetof(CPUHPPAState, gr[i]),
213                                        gr_names[i]);
214     }
215     for (i = 0; i < 4; i++) {
216         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
217                                            offsetof(CPUHPPAState, sr[i]),
218                                            sr_names[i]);
219     }
220     cpu_srH = tcg_global_mem_new_i64(tcg_env,
221                                      offsetof(CPUHPPAState, sr[4]),
222                                      sr_names[4]);
223 
224     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
225         const GlobalVar *v = &vars[i];
226         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
227     }
228 
229     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
230                                         offsetof(CPUHPPAState, iasq_f),
231                                         "iasq_f");
232     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
233                                         offsetof(CPUHPPAState, iasq_b),
234                                         "iasq_b");
235 }
236 
237 static DisasCond cond_make_f(void)
238 {
239     return (DisasCond){
240         .c = TCG_COND_NEVER,
241         .a0 = NULL,
242         .a1 = NULL,
243     };
244 }
245 
246 static DisasCond cond_make_t(void)
247 {
248     return (DisasCond){
249         .c = TCG_COND_ALWAYS,
250         .a0 = NULL,
251         .a1 = NULL,
252     };
253 }
254 
255 static DisasCond cond_make_n(void)
256 {
257     return (DisasCond){
258         .c = TCG_COND_NE,
259         .a0 = cpu_psw_n,
260         .a1 = tcg_constant_i64(0)
261     };
262 }
263 
264 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
265 {
266     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
267     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
268 }
269 
270 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
271 {
272     return cond_make_tmp(c, a0, tcg_constant_i64(0));
273 }
274 
275 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
276 {
277     TCGv_i64 tmp = tcg_temp_new_i64();
278     tcg_gen_mov_i64(tmp, a0);
279     return cond_make_0_tmp(c, tmp);
280 }
281 
282 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
283 {
284     TCGv_i64 t0 = tcg_temp_new_i64();
285     TCGv_i64 t1 = tcg_temp_new_i64();
286 
287     tcg_gen_mov_i64(t0, a0);
288     tcg_gen_mov_i64(t1, a1);
289     return cond_make_tmp(c, t0, t1);
290 }
291 
292 static void cond_free(DisasCond *cond)
293 {
294     switch (cond->c) {
295     default:
296         cond->a0 = NULL;
297         cond->a1 = NULL;
298         /* fallthru */
299     case TCG_COND_ALWAYS:
300         cond->c = TCG_COND_NEVER;
301         break;
302     case TCG_COND_NEVER:
303         break;
304     }
305 }
306 
307 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
308 {
309     if (reg == 0) {
310         TCGv_i64 t = tcg_temp_new_i64();
311         tcg_gen_movi_i64(t, 0);
312         return t;
313     } else {
314         return cpu_gr[reg];
315     }
316 }
317 
318 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
319 {
320     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
321         return tcg_temp_new_i64();
322     } else {
323         return cpu_gr[reg];
324     }
325 }
326 
327 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
328 {
329     if (ctx->null_cond.c != TCG_COND_NEVER) {
330         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
331                             ctx->null_cond.a1, dest, t);
332     } else {
333         tcg_gen_mov_i64(dest, t);
334     }
335 }
336 
337 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
338 {
339     if (reg != 0) {
340         save_or_nullify(ctx, cpu_gr[reg], t);
341     }
342 }
343 
344 #if HOST_BIG_ENDIAN
345 # define HI_OFS  0
346 # define LO_OFS  4
347 #else
348 # define HI_OFS  4
349 # define LO_OFS  0
350 #endif
351 
352 static TCGv_i32 load_frw_i32(unsigned rt)
353 {
354     TCGv_i32 ret = tcg_temp_new_i32();
355     tcg_gen_ld_i32(ret, tcg_env,
356                    offsetof(CPUHPPAState, fr[rt & 31])
357                    + (rt & 32 ? LO_OFS : HI_OFS));
358     return ret;
359 }
360 
361 static TCGv_i32 load_frw0_i32(unsigned rt)
362 {
363     if (rt == 0) {
364         TCGv_i32 ret = tcg_temp_new_i32();
365         tcg_gen_movi_i32(ret, 0);
366         return ret;
367     } else {
368         return load_frw_i32(rt);
369     }
370 }
371 
372 static TCGv_i64 load_frw0_i64(unsigned rt)
373 {
374     TCGv_i64 ret = tcg_temp_new_i64();
375     if (rt == 0) {
376         tcg_gen_movi_i64(ret, 0);
377     } else {
378         tcg_gen_ld32u_i64(ret, tcg_env,
379                           offsetof(CPUHPPAState, fr[rt & 31])
380                           + (rt & 32 ? LO_OFS : HI_OFS));
381     }
382     return ret;
383 }
384 
385 static void save_frw_i32(unsigned rt, TCGv_i32 val)
386 {
387     tcg_gen_st_i32(val, tcg_env,
388                    offsetof(CPUHPPAState, fr[rt & 31])
389                    + (rt & 32 ? LO_OFS : HI_OFS));
390 }
391 
392 #undef HI_OFS
393 #undef LO_OFS
394 
395 static TCGv_i64 load_frd(unsigned rt)
396 {
397     TCGv_i64 ret = tcg_temp_new_i64();
398     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
399     return ret;
400 }
401 
402 static TCGv_i64 load_frd0(unsigned rt)
403 {
404     if (rt == 0) {
405         TCGv_i64 ret = tcg_temp_new_i64();
406         tcg_gen_movi_i64(ret, 0);
407         return ret;
408     } else {
409         return load_frd(rt);
410     }
411 }
412 
413 static void save_frd(unsigned rt, TCGv_i64 val)
414 {
415     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
416 }
417 
418 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
419 {
420 #ifdef CONFIG_USER_ONLY
421     tcg_gen_movi_i64(dest, 0);
422 #else
423     if (reg < 4) {
424         tcg_gen_mov_i64(dest, cpu_sr[reg]);
425     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
426         tcg_gen_mov_i64(dest, cpu_srH);
427     } else {
428         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
429     }
430 #endif
431 }
432 
433 /* Skip over the implementation of an insn that has been nullified.
434    Use this when the insn is too complex for a conditional move.  */
435 static void nullify_over(DisasContext *ctx)
436 {
437     if (ctx->null_cond.c != TCG_COND_NEVER) {
438         /* The always condition should have been handled in the main loop.  */
439         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
440 
441         ctx->null_lab = gen_new_label();
442 
443         /* If we're using PSW[N], copy it to a temp because... */
444         if (ctx->null_cond.a0 == cpu_psw_n) {
445             ctx->null_cond.a0 = tcg_temp_new_i64();
446             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
447         }
448         /* ... we clear it before branching over the implementation,
449            so that (1) it's clear after nullifying this insn and
450            (2) if this insn nullifies the next, PSW[N] is valid.  */
451         if (ctx->psw_n_nonzero) {
452             ctx->psw_n_nonzero = false;
453             tcg_gen_movi_i64(cpu_psw_n, 0);
454         }
455 
456         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
457                            ctx->null_cond.a1, ctx->null_lab);
458         cond_free(&ctx->null_cond);
459     }
460 }
461 
462 /* Save the current nullification state to PSW[N].  */
463 static void nullify_save(DisasContext *ctx)
464 {
465     if (ctx->null_cond.c == TCG_COND_NEVER) {
466         if (ctx->psw_n_nonzero) {
467             tcg_gen_movi_i64(cpu_psw_n, 0);
468         }
469         return;
470     }
471     if (ctx->null_cond.a0 != cpu_psw_n) {
472         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
473                             ctx->null_cond.a0, ctx->null_cond.a1);
474         ctx->psw_n_nonzero = true;
475     }
476     cond_free(&ctx->null_cond);
477 }
478 
479 /* Set a PSW[N] to X.  The intention is that this is used immediately
480    before a goto_tb/exit_tb, so that there is no fallthru path to other
481    code within the TB.  Therefore we do not update psw_n_nonzero.  */
482 static void nullify_set(DisasContext *ctx, bool x)
483 {
484     if (ctx->psw_n_nonzero || x) {
485         tcg_gen_movi_i64(cpu_psw_n, x);
486     }
487 }
488 
489 /* Mark the end of an instruction that may have been nullified.
490    This is the pair to nullify_over.  Always returns true so that
491    it may be tail-called from a translate function.  */
492 static bool nullify_end(DisasContext *ctx)
493 {
494     TCGLabel *null_lab = ctx->null_lab;
495     DisasJumpType status = ctx->base.is_jmp;
496 
497     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
498        For UPDATED, we cannot update on the nullified path.  */
499     assert(status != DISAS_IAQ_N_UPDATED);
500 
501     if (likely(null_lab == NULL)) {
502         /* The current insn wasn't conditional or handled the condition
503            applied to it without a branch, so the (new) setting of
504            NULL_COND can be applied directly to the next insn.  */
505         return true;
506     }
507     ctx->null_lab = NULL;
508 
509     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
510         /* The next instruction will be unconditional,
511            and NULL_COND already reflects that.  */
512         gen_set_label(null_lab);
513     } else {
514         /* The insn that we just executed is itself nullifying the next
515            instruction.  Store the condition in the PSW[N] global.
516            We asserted PSW[N] = 0 in nullify_over, so that after the
517            label we have the proper value in place.  */
518         nullify_save(ctx);
519         gen_set_label(null_lab);
520         ctx->null_cond = cond_make_n();
521     }
522     if (status == DISAS_NORETURN) {
523         ctx->base.is_jmp = DISAS_NEXT;
524     }
525     return true;
526 }
527 
528 static uint64_t gva_offset_mask(DisasContext *ctx)
529 {
530     return (ctx->tb_flags & PSW_W
531             ? MAKE_64BIT_MASK(0, 62)
532             : MAKE_64BIT_MASK(0, 32));
533 }
534 
535 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
536                             uint64_t ival, TCGv_i64 vval)
537 {
538     uint64_t mask = gva_offset_mask(ctx);
539 
540     if (ival != -1) {
541         tcg_gen_movi_i64(dest, ival & mask);
542         return;
543     }
544     tcg_debug_assert(vval != NULL);
545 
546     /*
547      * We know that the IAOQ is already properly masked.
548      * This optimization is primarily for "iaoq_f = iaoq_b".
549      */
550     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
551         tcg_gen_mov_i64(dest, vval);
552     } else {
553         tcg_gen_andi_i64(dest, vval, mask);
554     }
555 }
556 
557 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
558 {
559     return ctx->iaoq_f + disp + 8;
560 }
561 
562 static void gen_excp_1(int exception)
563 {
564     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
565 }
566 
567 static void gen_excp(DisasContext *ctx, int exception)
568 {
569     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
570     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
571     nullify_save(ctx);
572     gen_excp_1(exception);
573     ctx->base.is_jmp = DISAS_NORETURN;
574 }
575 
576 static bool gen_excp_iir(DisasContext *ctx, int exc)
577 {
578     nullify_over(ctx);
579     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
580                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
581     gen_excp(ctx, exc);
582     return nullify_end(ctx);
583 }
584 
585 static bool gen_illegal(DisasContext *ctx)
586 {
587     return gen_excp_iir(ctx, EXCP_ILL);
588 }
589 
590 #ifdef CONFIG_USER_ONLY
591 #define CHECK_MOST_PRIVILEGED(EXCP) \
592     return gen_excp_iir(ctx, EXCP)
593 #else
594 #define CHECK_MOST_PRIVILEGED(EXCP) \
595     do {                                     \
596         if (ctx->privilege != 0) {           \
597             return gen_excp_iir(ctx, EXCP);  \
598         }                                    \
599     } while (0)
600 #endif
601 
602 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
603 {
604     return translator_use_goto_tb(&ctx->base, dest);
605 }
606 
607 /* If the next insn is to be nullified, and it's on the same page,
608    and we're not attempting to set a breakpoint on it, then we can
609    totally skip the nullified insn.  This avoids creating and
610    executing a TB that merely branches to the next TB.  */
611 static bool use_nullify_skip(DisasContext *ctx)
612 {
613     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
614             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
615 }
616 
617 static void gen_goto_tb(DisasContext *ctx, int which,
618                         uint64_t f, uint64_t b)
619 {
620     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
621         tcg_gen_goto_tb(which);
622         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
623         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
624         tcg_gen_exit_tb(ctx->base.tb, which);
625     } else {
626         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
627         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
628         tcg_gen_lookup_and_goto_ptr();
629     }
630 }
631 
632 static bool cond_need_sv(int c)
633 {
634     return c == 2 || c == 3 || c == 6;
635 }
636 
637 static bool cond_need_cb(int c)
638 {
639     return c == 4 || c == 5;
640 }
641 
642 /* Need extensions from TCGv_i32 to TCGv_i64. */
643 static bool cond_need_ext(DisasContext *ctx, bool d)
644 {
645     return !(ctx->is_pa20 && d);
646 }
647 
648 /*
649  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
650  * the Parisc 1.1 Architecture Reference Manual for details.
651  */
652 
653 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
654                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
655 {
656     DisasCond cond;
657     TCGv_i64 tmp;
658 
659     switch (cf >> 1) {
660     case 0: /* Never / TR    (0 / 1) */
661         cond = cond_make_f();
662         break;
663     case 1: /* = / <>        (Z / !Z) */
664         if (cond_need_ext(ctx, d)) {
665             tmp = tcg_temp_new_i64();
666             tcg_gen_ext32u_i64(tmp, res);
667             res = tmp;
668         }
669         cond = cond_make_0(TCG_COND_EQ, res);
670         break;
671     case 2: /* < / >=        (N ^ V / !(N ^ V) */
672         tmp = tcg_temp_new_i64();
673         tcg_gen_xor_i64(tmp, res, sv);
674         if (cond_need_ext(ctx, d)) {
675             tcg_gen_ext32s_i64(tmp, tmp);
676         }
677         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
678         break;
679     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
680         /*
681          * Simplify:
682          *   (N ^ V) | Z
683          *   ((res < 0) ^ (sv < 0)) | !res
684          *   ((res ^ sv) < 0) | !res
685          *   (~(res ^ sv) >= 0) | !res
686          *   !(~(res ^ sv) >> 31) | !res
687          *   !(~(res ^ sv) >> 31 & res)
688          */
689         tmp = tcg_temp_new_i64();
690         tcg_gen_eqv_i64(tmp, res, sv);
691         if (cond_need_ext(ctx, d)) {
692             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
693             tcg_gen_and_i64(tmp, tmp, res);
694             tcg_gen_ext32u_i64(tmp, tmp);
695         } else {
696             tcg_gen_sari_i64(tmp, tmp, 63);
697             tcg_gen_and_i64(tmp, tmp, res);
698         }
699         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
700         break;
701     case 4: /* NUV / UV      (!C / C) */
702         /* Only bit 0 of cb_msb is ever set. */
703         cond = cond_make_0(TCG_COND_EQ, cb_msb);
704         break;
705     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
706         tmp = tcg_temp_new_i64();
707         tcg_gen_neg_i64(tmp, cb_msb);
708         tcg_gen_and_i64(tmp, tmp, res);
709         if (cond_need_ext(ctx, d)) {
710             tcg_gen_ext32u_i64(tmp, tmp);
711         }
712         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
713         break;
714     case 6: /* SV / NSV      (V / !V) */
715         if (cond_need_ext(ctx, d)) {
716             tmp = tcg_temp_new_i64();
717             tcg_gen_ext32s_i64(tmp, sv);
718             sv = tmp;
719         }
720         cond = cond_make_0(TCG_COND_LT, sv);
721         break;
722     case 7: /* OD / EV */
723         tmp = tcg_temp_new_i64();
724         tcg_gen_andi_i64(tmp, res, 1);
725         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
726         break;
727     default:
728         g_assert_not_reached();
729     }
730     if (cf & 1) {
731         cond.c = tcg_invert_cond(cond.c);
732     }
733 
734     return cond;
735 }
736 
737 /* Similar, but for the special case of subtraction without borrow, we
738    can use the inputs directly.  This can allow other computation to be
739    deleted as unused.  */
740 
741 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
742                              TCGv_i64 res, TCGv_i64 in1,
743                              TCGv_i64 in2, TCGv_i64 sv)
744 {
745     TCGCond tc;
746     bool ext_uns;
747 
748     switch (cf >> 1) {
749     case 1: /* = / <> */
750         tc = TCG_COND_EQ;
751         ext_uns = true;
752         break;
753     case 2: /* < / >= */
754         tc = TCG_COND_LT;
755         ext_uns = false;
756         break;
757     case 3: /* <= / > */
758         tc = TCG_COND_LE;
759         ext_uns = false;
760         break;
761     case 4: /* << / >>= */
762         tc = TCG_COND_LTU;
763         ext_uns = true;
764         break;
765     case 5: /* <<= / >> */
766         tc = TCG_COND_LEU;
767         ext_uns = true;
768         break;
769     default:
770         return do_cond(ctx, cf, d, res, NULL, sv);
771     }
772 
773     if (cf & 1) {
774         tc = tcg_invert_cond(tc);
775     }
776     if (cond_need_ext(ctx, d)) {
777         TCGv_i64 t1 = tcg_temp_new_i64();
778         TCGv_i64 t2 = tcg_temp_new_i64();
779 
780         if (ext_uns) {
781             tcg_gen_ext32u_i64(t1, in1);
782             tcg_gen_ext32u_i64(t2, in2);
783         } else {
784             tcg_gen_ext32s_i64(t1, in1);
785             tcg_gen_ext32s_i64(t2, in2);
786         }
787         return cond_make_tmp(tc, t1, t2);
788     }
789     return cond_make(tc, in1, in2);
790 }
791 
792 /*
793  * Similar, but for logicals, where the carry and overflow bits are not
794  * computed, and use of them is undefined.
795  *
796  * Undefined or not, hardware does not trap.  It seems reasonable to
797  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
798  * how cases c={2,3} are treated.
799  */
800 
801 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
802                              TCGv_i64 res)
803 {
804     TCGCond tc;
805     bool ext_uns;
806 
807     switch (cf) {
808     case 0:  /* never */
809     case 9:  /* undef, C */
810     case 11: /* undef, C & !Z */
811     case 12: /* undef, V */
812         return cond_make_f();
813 
814     case 1:  /* true */
815     case 8:  /* undef, !C */
816     case 10: /* undef, !C | Z */
817     case 13: /* undef, !V */
818         return cond_make_t();
819 
820     case 2:  /* == */
821         tc = TCG_COND_EQ;
822         ext_uns = true;
823         break;
824     case 3:  /* <> */
825         tc = TCG_COND_NE;
826         ext_uns = true;
827         break;
828     case 4:  /* < */
829         tc = TCG_COND_LT;
830         ext_uns = false;
831         break;
832     case 5:  /* >= */
833         tc = TCG_COND_GE;
834         ext_uns = false;
835         break;
836     case 6:  /* <= */
837         tc = TCG_COND_LE;
838         ext_uns = false;
839         break;
840     case 7:  /* > */
841         tc = TCG_COND_GT;
842         ext_uns = false;
843         break;
844 
845     case 14: /* OD */
846     case 15: /* EV */
847         return do_cond(ctx, cf, d, res, NULL, NULL);
848 
849     default:
850         g_assert_not_reached();
851     }
852 
853     if (cond_need_ext(ctx, d)) {
854         TCGv_i64 tmp = tcg_temp_new_i64();
855 
856         if (ext_uns) {
857             tcg_gen_ext32u_i64(tmp, res);
858         } else {
859             tcg_gen_ext32s_i64(tmp, res);
860         }
861         return cond_make_0_tmp(tc, tmp);
862     }
863     return cond_make_0(tc, res);
864 }
865 
866 /* Similar, but for shift/extract/deposit conditions.  */
867 
868 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
869                              TCGv_i64 res)
870 {
871     unsigned c, f;
872 
873     /* Convert the compressed condition codes to standard.
874        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
875        4-7 are the reverse of 0-3.  */
876     c = orig & 3;
877     if (c == 3) {
878         c = 7;
879     }
880     f = (orig & 4) / 4;
881 
882     return do_log_cond(ctx, c * 2 + f, d, res);
883 }
884 
885 /* Similar, but for unit conditions.  */
886 
887 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
888                               TCGv_i64 in1, TCGv_i64 in2)
889 {
890     DisasCond cond;
891     TCGv_i64 tmp, cb = NULL;
892     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
893 
894     if (cf & 8) {
895         /* Since we want to test lots of carry-out bits all at once, do not
896          * do our normal thing and compute carry-in of bit B+1 since that
897          * leaves us with carry bits spread across two words.
898          */
899         cb = tcg_temp_new_i64();
900         tmp = tcg_temp_new_i64();
901         tcg_gen_or_i64(cb, in1, in2);
902         tcg_gen_and_i64(tmp, in1, in2);
903         tcg_gen_andc_i64(cb, cb, res);
904         tcg_gen_or_i64(cb, cb, tmp);
905     }
906 
907     switch (cf >> 1) {
908     case 0: /* never / TR */
909     case 1: /* undefined */
910     case 5: /* undefined */
911         cond = cond_make_f();
912         break;
913 
914     case 2: /* SBZ / NBZ */
915         /* See hasless(v,1) from
916          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
917          */
918         tmp = tcg_temp_new_i64();
919         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
920         tcg_gen_andc_i64(tmp, tmp, res);
921         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
922         cond = cond_make_0(TCG_COND_NE, tmp);
923         break;
924 
925     case 3: /* SHZ / NHZ */
926         tmp = tcg_temp_new_i64();
927         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
928         tcg_gen_andc_i64(tmp, tmp, res);
929         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
930         cond = cond_make_0(TCG_COND_NE, tmp);
931         break;
932 
933     case 4: /* SDC / NDC */
934         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
935         cond = cond_make_0(TCG_COND_NE, cb);
936         break;
937 
938     case 6: /* SBC / NBC */
939         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
940         cond = cond_make_0(TCG_COND_NE, cb);
941         break;
942 
943     case 7: /* SHC / NHC */
944         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
945         cond = cond_make_0(TCG_COND_NE, cb);
946         break;
947 
948     default:
949         g_assert_not_reached();
950     }
951     if (cf & 1) {
952         cond.c = tcg_invert_cond(cond.c);
953     }
954 
955     return cond;
956 }
957 
958 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
959                           TCGv_i64 cb, TCGv_i64 cb_msb)
960 {
961     if (cond_need_ext(ctx, d)) {
962         TCGv_i64 t = tcg_temp_new_i64();
963         tcg_gen_extract_i64(t, cb, 32, 1);
964         return t;
965     }
966     return cb_msb;
967 }
968 
969 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
970 {
971     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
972 }
973 
974 /* Compute signed overflow for addition.  */
975 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
976                           TCGv_i64 in1, TCGv_i64 in2)
977 {
978     TCGv_i64 sv = tcg_temp_new_i64();
979     TCGv_i64 tmp = tcg_temp_new_i64();
980 
981     tcg_gen_xor_i64(sv, res, in1);
982     tcg_gen_xor_i64(tmp, in1, in2);
983     tcg_gen_andc_i64(sv, sv, tmp);
984 
985     return sv;
986 }
987 
988 /* Compute signed overflow for subtraction.  */
989 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
990                           TCGv_i64 in1, TCGv_i64 in2)
991 {
992     TCGv_i64 sv = tcg_temp_new_i64();
993     TCGv_i64 tmp = tcg_temp_new_i64();
994 
995     tcg_gen_xor_i64(sv, res, in1);
996     tcg_gen_xor_i64(tmp, in1, in2);
997     tcg_gen_and_i64(sv, sv, tmp);
998 
999     return sv;
1000 }
1001 
1002 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1003                    TCGv_i64 in2, unsigned shift, bool is_l,
1004                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1005 {
1006     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1007     unsigned c = cf >> 1;
1008     DisasCond cond;
1009 
1010     dest = tcg_temp_new_i64();
1011     cb = NULL;
1012     cb_msb = NULL;
1013     cb_cond = NULL;
1014 
1015     if (shift) {
1016         tmp = tcg_temp_new_i64();
1017         tcg_gen_shli_i64(tmp, in1, shift);
1018         in1 = tmp;
1019     }
1020 
1021     if (!is_l || cond_need_cb(c)) {
1022         cb_msb = tcg_temp_new_i64();
1023         cb = tcg_temp_new_i64();
1024 
1025         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1026         if (is_c) {
1027             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1028                              get_psw_carry(ctx, d), ctx->zero);
1029         }
1030         tcg_gen_xor_i64(cb, in1, in2);
1031         tcg_gen_xor_i64(cb, cb, dest);
1032         if (cond_need_cb(c)) {
1033             cb_cond = get_carry(ctx, d, cb, cb_msb);
1034         }
1035     } else {
1036         tcg_gen_add_i64(dest, in1, in2);
1037         if (is_c) {
1038             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1039         }
1040     }
1041 
1042     /* Compute signed overflow if required.  */
1043     sv = NULL;
1044     if (is_tsv || cond_need_sv(c)) {
1045         sv = do_add_sv(ctx, dest, in1, in2);
1046         if (is_tsv) {
1047             /* ??? Need to include overflow from shift.  */
1048             gen_helper_tsv(tcg_env, sv);
1049         }
1050     }
1051 
1052     /* Emit any conditional trap before any writeback.  */
1053     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1054     if (is_tc) {
1055         tmp = tcg_temp_new_i64();
1056         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1057         gen_helper_tcond(tcg_env, tmp);
1058     }
1059 
1060     /* Write back the result.  */
1061     if (!is_l) {
1062         save_or_nullify(ctx, cpu_psw_cb, cb);
1063         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1064     }
1065     save_gpr(ctx, rt, dest);
1066 
1067     /* Install the new nullification.  */
1068     cond_free(&ctx->null_cond);
1069     ctx->null_cond = cond;
1070 }
1071 
1072 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1073                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1074 {
1075     TCGv_i64 tcg_r1, tcg_r2;
1076 
1077     if (a->cf) {
1078         nullify_over(ctx);
1079     }
1080     tcg_r1 = load_gpr(ctx, a->r1);
1081     tcg_r2 = load_gpr(ctx, a->r2);
1082     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1083            is_tsv, is_tc, is_c, a->cf, a->d);
1084     return nullify_end(ctx);
1085 }
1086 
1087 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1088                        bool is_tsv, bool is_tc)
1089 {
1090     TCGv_i64 tcg_im, tcg_r2;
1091 
1092     if (a->cf) {
1093         nullify_over(ctx);
1094     }
1095     tcg_im = tcg_constant_i64(a->i);
1096     tcg_r2 = load_gpr(ctx, a->r);
1097     /* All ADDI conditions are 32-bit. */
1098     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1099     return nullify_end(ctx);
1100 }
1101 
1102 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1103                    TCGv_i64 in2, bool is_tsv, bool is_b,
1104                    bool is_tc, unsigned cf, bool d)
1105 {
1106     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1107     unsigned c = cf >> 1;
1108     DisasCond cond;
1109 
1110     dest = tcg_temp_new_i64();
1111     cb = tcg_temp_new_i64();
1112     cb_msb = tcg_temp_new_i64();
1113 
1114     if (is_b) {
1115         /* DEST,C = IN1 + ~IN2 + C.  */
1116         tcg_gen_not_i64(cb, in2);
1117         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1118                          get_psw_carry(ctx, d), ctx->zero);
1119         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1120         tcg_gen_xor_i64(cb, cb, in1);
1121         tcg_gen_xor_i64(cb, cb, dest);
1122     } else {
1123         /*
1124          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1125          * operations by seeding the high word with 1 and subtracting.
1126          */
1127         TCGv_i64 one = tcg_constant_i64(1);
1128         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1129         tcg_gen_eqv_i64(cb, in1, in2);
1130         tcg_gen_xor_i64(cb, cb, dest);
1131     }
1132 
1133     /* Compute signed overflow if required.  */
1134     sv = NULL;
1135     if (is_tsv || cond_need_sv(c)) {
1136         sv = do_sub_sv(ctx, dest, in1, in2);
1137         if (is_tsv) {
1138             gen_helper_tsv(tcg_env, sv);
1139         }
1140     }
1141 
1142     /* Compute the condition.  We cannot use the special case for borrow.  */
1143     if (!is_b) {
1144         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1145     } else {
1146         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1147     }
1148 
1149     /* Emit any conditional trap before any writeback.  */
1150     if (is_tc) {
1151         tmp = tcg_temp_new_i64();
1152         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1153         gen_helper_tcond(tcg_env, tmp);
1154     }
1155 
1156     /* Write back the result.  */
1157     save_or_nullify(ctx, cpu_psw_cb, cb);
1158     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1159     save_gpr(ctx, rt, dest);
1160 
1161     /* Install the new nullification.  */
1162     cond_free(&ctx->null_cond);
1163     ctx->null_cond = cond;
1164 }
1165 
1166 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1167                        bool is_tsv, bool is_b, bool is_tc)
1168 {
1169     TCGv_i64 tcg_r1, tcg_r2;
1170 
1171     if (a->cf) {
1172         nullify_over(ctx);
1173     }
1174     tcg_r1 = load_gpr(ctx, a->r1);
1175     tcg_r2 = load_gpr(ctx, a->r2);
1176     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1177     return nullify_end(ctx);
1178 }
1179 
1180 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1181 {
1182     TCGv_i64 tcg_im, tcg_r2;
1183 
1184     if (a->cf) {
1185         nullify_over(ctx);
1186     }
1187     tcg_im = tcg_constant_i64(a->i);
1188     tcg_r2 = load_gpr(ctx, a->r);
1189     /* All SUBI conditions are 32-bit. */
1190     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1191     return nullify_end(ctx);
1192 }
1193 
1194 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1195                       TCGv_i64 in2, unsigned cf, bool d)
1196 {
1197     TCGv_i64 dest, sv;
1198     DisasCond cond;
1199 
1200     dest = tcg_temp_new_i64();
1201     tcg_gen_sub_i64(dest, in1, in2);
1202 
1203     /* Compute signed overflow if required.  */
1204     sv = NULL;
1205     if (cond_need_sv(cf >> 1)) {
1206         sv = do_sub_sv(ctx, dest, in1, in2);
1207     }
1208 
1209     /* Form the condition for the compare.  */
1210     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1211 
1212     /* Clear.  */
1213     tcg_gen_movi_i64(dest, 0);
1214     save_gpr(ctx, rt, dest);
1215 
1216     /* Install the new nullification.  */
1217     cond_free(&ctx->null_cond);
1218     ctx->null_cond = cond;
1219 }
1220 
1221 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1222                    TCGv_i64 in2, unsigned cf, bool d,
1223                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1224 {
1225     TCGv_i64 dest = dest_gpr(ctx, rt);
1226 
1227     /* Perform the operation, and writeback.  */
1228     fn(dest, in1, in2);
1229     save_gpr(ctx, rt, dest);
1230 
1231     /* Install the new nullification.  */
1232     cond_free(&ctx->null_cond);
1233     if (cf) {
1234         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1235     }
1236 }
1237 
1238 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1239                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1240 {
1241     TCGv_i64 tcg_r1, tcg_r2;
1242 
1243     if (a->cf) {
1244         nullify_over(ctx);
1245     }
1246     tcg_r1 = load_gpr(ctx, a->r1);
1247     tcg_r2 = load_gpr(ctx, a->r2);
1248     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1249     return nullify_end(ctx);
1250 }
1251 
1252 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1253                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1254                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1255 {
1256     TCGv_i64 dest;
1257     DisasCond cond;
1258 
1259     if (cf == 0) {
1260         dest = dest_gpr(ctx, rt);
1261         fn(dest, in1, in2);
1262         save_gpr(ctx, rt, dest);
1263         cond_free(&ctx->null_cond);
1264     } else {
1265         dest = tcg_temp_new_i64();
1266         fn(dest, in1, in2);
1267 
1268         cond = do_unit_cond(cf, d, dest, in1, in2);
1269 
1270         if (is_tc) {
1271             TCGv_i64 tmp = tcg_temp_new_i64();
1272             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1273             gen_helper_tcond(tcg_env, tmp);
1274         }
1275         save_gpr(ctx, rt, dest);
1276 
1277         cond_free(&ctx->null_cond);
1278         ctx->null_cond = cond;
1279     }
1280 }
1281 
1282 #ifndef CONFIG_USER_ONLY
1283 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1284    from the top 2 bits of the base register.  There are a few system
1285    instructions that have a 3-bit space specifier, for which SR0 is
1286    not special.  To handle this, pass ~SP.  */
1287 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1288 {
1289     TCGv_ptr ptr;
1290     TCGv_i64 tmp;
1291     TCGv_i64 spc;
1292 
1293     if (sp != 0) {
1294         if (sp < 0) {
1295             sp = ~sp;
1296         }
1297         spc = tcg_temp_new_i64();
1298         load_spr(ctx, spc, sp);
1299         return spc;
1300     }
1301     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1302         return cpu_srH;
1303     }
1304 
1305     ptr = tcg_temp_new_ptr();
1306     tmp = tcg_temp_new_i64();
1307     spc = tcg_temp_new_i64();
1308 
1309     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1310     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1311     tcg_gen_andi_i64(tmp, tmp, 030);
1312     tcg_gen_trunc_i64_ptr(ptr, tmp);
1313 
1314     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1315     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1316 
1317     return spc;
1318 }
1319 #endif
1320 
1321 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1322                      unsigned rb, unsigned rx, int scale, int64_t disp,
1323                      unsigned sp, int modify, bool is_phys)
1324 {
1325     TCGv_i64 base = load_gpr(ctx, rb);
1326     TCGv_i64 ofs;
1327     TCGv_i64 addr;
1328 
1329     /* Note that RX is mutually exclusive with DISP.  */
1330     if (rx) {
1331         ofs = tcg_temp_new_i64();
1332         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1333         tcg_gen_add_i64(ofs, ofs, base);
1334     } else if (disp || modify) {
1335         ofs = tcg_temp_new_i64();
1336         tcg_gen_addi_i64(ofs, base, disp);
1337     } else {
1338         ofs = base;
1339     }
1340 
1341     *pofs = ofs;
1342     *pgva = addr = tcg_temp_new_i64();
1343     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1344 #ifndef CONFIG_USER_ONLY
1345     if (!is_phys) {
1346         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1347     }
1348 #endif
1349 }
1350 
1351 /* Emit a memory load.  The modify parameter should be
1352  * < 0 for pre-modify,
1353  * > 0 for post-modify,
1354  * = 0 for no base register update.
1355  */
1356 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1357                        unsigned rx, int scale, int64_t disp,
1358                        unsigned sp, int modify, MemOp mop)
1359 {
1360     TCGv_i64 ofs;
1361     TCGv_i64 addr;
1362 
1363     /* Caller uses nullify_over/nullify_end.  */
1364     assert(ctx->null_cond.c == TCG_COND_NEVER);
1365 
1366     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1367              ctx->mmu_idx == MMU_PHYS_IDX);
1368     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1369     if (modify) {
1370         save_gpr(ctx, rb, ofs);
1371     }
1372 }
1373 
1374 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1375                        unsigned rx, int scale, int64_t disp,
1376                        unsigned sp, int modify, MemOp mop)
1377 {
1378     TCGv_i64 ofs;
1379     TCGv_i64 addr;
1380 
1381     /* Caller uses nullify_over/nullify_end.  */
1382     assert(ctx->null_cond.c == TCG_COND_NEVER);
1383 
1384     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1385              ctx->mmu_idx == MMU_PHYS_IDX);
1386     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1387     if (modify) {
1388         save_gpr(ctx, rb, ofs);
1389     }
1390 }
1391 
1392 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1393                         unsigned rx, int scale, int64_t disp,
1394                         unsigned sp, int modify, MemOp mop)
1395 {
1396     TCGv_i64 ofs;
1397     TCGv_i64 addr;
1398 
1399     /* Caller uses nullify_over/nullify_end.  */
1400     assert(ctx->null_cond.c == TCG_COND_NEVER);
1401 
1402     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1403              ctx->mmu_idx == MMU_PHYS_IDX);
1404     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1405     if (modify) {
1406         save_gpr(ctx, rb, ofs);
1407     }
1408 }
1409 
1410 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1411                         unsigned rx, int scale, int64_t disp,
1412                         unsigned sp, int modify, MemOp mop)
1413 {
1414     TCGv_i64 ofs;
1415     TCGv_i64 addr;
1416 
1417     /* Caller uses nullify_over/nullify_end.  */
1418     assert(ctx->null_cond.c == TCG_COND_NEVER);
1419 
1420     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1421              ctx->mmu_idx == MMU_PHYS_IDX);
1422     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1423     if (modify) {
1424         save_gpr(ctx, rb, ofs);
1425     }
1426 }
1427 
1428 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1429                     unsigned rx, int scale, int64_t disp,
1430                     unsigned sp, int modify, MemOp mop)
1431 {
1432     TCGv_i64 dest;
1433 
1434     nullify_over(ctx);
1435 
1436     if (modify == 0) {
1437         /* No base register update.  */
1438         dest = dest_gpr(ctx, rt);
1439     } else {
1440         /* Make sure if RT == RB, we see the result of the load.  */
1441         dest = tcg_temp_new_i64();
1442     }
1443     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1444     save_gpr(ctx, rt, dest);
1445 
1446     return nullify_end(ctx);
1447 }
1448 
1449 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1450                       unsigned rx, int scale, int64_t disp,
1451                       unsigned sp, int modify)
1452 {
1453     TCGv_i32 tmp;
1454 
1455     nullify_over(ctx);
1456 
1457     tmp = tcg_temp_new_i32();
1458     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1459     save_frw_i32(rt, tmp);
1460 
1461     if (rt == 0) {
1462         gen_helper_loaded_fr0(tcg_env);
1463     }
1464 
1465     return nullify_end(ctx);
1466 }
1467 
1468 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1469 {
1470     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1471                      a->disp, a->sp, a->m);
1472 }
1473 
1474 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1475                       unsigned rx, int scale, int64_t disp,
1476                       unsigned sp, int modify)
1477 {
1478     TCGv_i64 tmp;
1479 
1480     nullify_over(ctx);
1481 
1482     tmp = tcg_temp_new_i64();
1483     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1484     save_frd(rt, tmp);
1485 
1486     if (rt == 0) {
1487         gen_helper_loaded_fr0(tcg_env);
1488     }
1489 
1490     return nullify_end(ctx);
1491 }
1492 
1493 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1494 {
1495     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1496                      a->disp, a->sp, a->m);
1497 }
1498 
1499 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1500                      int64_t disp, unsigned sp,
1501                      int modify, MemOp mop)
1502 {
1503     nullify_over(ctx);
1504     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1505     return nullify_end(ctx);
1506 }
1507 
1508 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1509                        unsigned rx, int scale, int64_t disp,
1510                        unsigned sp, int modify)
1511 {
1512     TCGv_i32 tmp;
1513 
1514     nullify_over(ctx);
1515 
1516     tmp = load_frw_i32(rt);
1517     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1518 
1519     return nullify_end(ctx);
1520 }
1521 
1522 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1523 {
1524     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1525                       a->disp, a->sp, a->m);
1526 }
1527 
1528 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1529                        unsigned rx, int scale, int64_t disp,
1530                        unsigned sp, int modify)
1531 {
1532     TCGv_i64 tmp;
1533 
1534     nullify_over(ctx);
1535 
1536     tmp = load_frd(rt);
1537     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1538 
1539     return nullify_end(ctx);
1540 }
1541 
1542 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1543 {
1544     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1545                       a->disp, a->sp, a->m);
1546 }
1547 
1548 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1549                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1550 {
1551     TCGv_i32 tmp;
1552 
1553     nullify_over(ctx);
1554     tmp = load_frw0_i32(ra);
1555 
1556     func(tmp, tcg_env, tmp);
1557 
1558     save_frw_i32(rt, tmp);
1559     return nullify_end(ctx);
1560 }
1561 
1562 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1563                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1564 {
1565     TCGv_i32 dst;
1566     TCGv_i64 src;
1567 
1568     nullify_over(ctx);
1569     src = load_frd(ra);
1570     dst = tcg_temp_new_i32();
1571 
1572     func(dst, tcg_env, src);
1573 
1574     save_frw_i32(rt, dst);
1575     return nullify_end(ctx);
1576 }
1577 
1578 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1579                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1580 {
1581     TCGv_i64 tmp;
1582 
1583     nullify_over(ctx);
1584     tmp = load_frd0(ra);
1585 
1586     func(tmp, tcg_env, tmp);
1587 
1588     save_frd(rt, tmp);
1589     return nullify_end(ctx);
1590 }
1591 
1592 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1593                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1594 {
1595     TCGv_i32 src;
1596     TCGv_i64 dst;
1597 
1598     nullify_over(ctx);
1599     src = load_frw0_i32(ra);
1600     dst = tcg_temp_new_i64();
1601 
1602     func(dst, tcg_env, src);
1603 
1604     save_frd(rt, dst);
1605     return nullify_end(ctx);
1606 }
1607 
1608 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1609                         unsigned ra, unsigned rb,
1610                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1611 {
1612     TCGv_i32 a, b;
1613 
1614     nullify_over(ctx);
1615     a = load_frw0_i32(ra);
1616     b = load_frw0_i32(rb);
1617 
1618     func(a, tcg_env, a, b);
1619 
1620     save_frw_i32(rt, a);
1621     return nullify_end(ctx);
1622 }
1623 
1624 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1625                         unsigned ra, unsigned rb,
1626                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1627 {
1628     TCGv_i64 a, b;
1629 
1630     nullify_over(ctx);
1631     a = load_frd0(ra);
1632     b = load_frd0(rb);
1633 
1634     func(a, tcg_env, a, b);
1635 
1636     save_frd(rt, a);
1637     return nullify_end(ctx);
1638 }
1639 
1640 /* Emit an unconditional branch to a direct target, which may or may not
1641    have already had nullification handled.  */
1642 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1643                        unsigned link, bool is_n)
1644 {
1645     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1646         if (link != 0) {
1647             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1648         }
1649         ctx->iaoq_n = dest;
1650         if (is_n) {
1651             ctx->null_cond.c = TCG_COND_ALWAYS;
1652         }
1653     } else {
1654         nullify_over(ctx);
1655 
1656         if (link != 0) {
1657             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1658         }
1659 
1660         if (is_n && use_nullify_skip(ctx)) {
1661             nullify_set(ctx, 0);
1662             gen_goto_tb(ctx, 0, dest, dest + 4);
1663         } else {
1664             nullify_set(ctx, is_n);
1665             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1666         }
1667 
1668         nullify_end(ctx);
1669 
1670         nullify_set(ctx, 0);
1671         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1672         ctx->base.is_jmp = DISAS_NORETURN;
1673     }
1674     return true;
1675 }
1676 
1677 /* Emit a conditional branch to a direct target.  If the branch itself
1678    is nullified, we should have already used nullify_over.  */
1679 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1680                        DisasCond *cond)
1681 {
1682     uint64_t dest = iaoq_dest(ctx, disp);
1683     TCGLabel *taken = NULL;
1684     TCGCond c = cond->c;
1685     bool n;
1686 
1687     assert(ctx->null_cond.c == TCG_COND_NEVER);
1688 
1689     /* Handle TRUE and NEVER as direct branches.  */
1690     if (c == TCG_COND_ALWAYS) {
1691         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1692     }
1693     if (c == TCG_COND_NEVER) {
1694         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1695     }
1696 
1697     taken = gen_new_label();
1698     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1699     cond_free(cond);
1700 
1701     /* Not taken: Condition not satisfied; nullify on backward branches. */
1702     n = is_n && disp < 0;
1703     if (n && use_nullify_skip(ctx)) {
1704         nullify_set(ctx, 0);
1705         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1706     } else {
1707         if (!n && ctx->null_lab) {
1708             gen_set_label(ctx->null_lab);
1709             ctx->null_lab = NULL;
1710         }
1711         nullify_set(ctx, n);
1712         if (ctx->iaoq_n == -1) {
1713             /* The temporary iaoq_n_var died at the branch above.
1714                Regenerate it here instead of saving it.  */
1715             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1716         }
1717         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1718     }
1719 
1720     gen_set_label(taken);
1721 
1722     /* Taken: Condition satisfied; nullify on forward branches.  */
1723     n = is_n && disp >= 0;
1724     if (n && use_nullify_skip(ctx)) {
1725         nullify_set(ctx, 0);
1726         gen_goto_tb(ctx, 1, dest, dest + 4);
1727     } else {
1728         nullify_set(ctx, n);
1729         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1730     }
1731 
1732     /* Not taken: the branch itself was nullified.  */
1733     if (ctx->null_lab) {
1734         gen_set_label(ctx->null_lab);
1735         ctx->null_lab = NULL;
1736         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1737     } else {
1738         ctx->base.is_jmp = DISAS_NORETURN;
1739     }
1740     return true;
1741 }
1742 
1743 /* Emit an unconditional branch to an indirect target.  This handles
1744    nullification of the branch itself.  */
1745 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1746                        unsigned link, bool is_n)
1747 {
1748     TCGv_i64 a0, a1, next, tmp;
1749     TCGCond c;
1750 
1751     assert(ctx->null_lab == NULL);
1752 
1753     if (ctx->null_cond.c == TCG_COND_NEVER) {
1754         if (link != 0) {
1755             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1756         }
1757         next = tcg_temp_new_i64();
1758         tcg_gen_mov_i64(next, dest);
1759         if (is_n) {
1760             if (use_nullify_skip(ctx)) {
1761                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1762                 tcg_gen_addi_i64(next, next, 4);
1763                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1764                 nullify_set(ctx, 0);
1765                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1766                 return true;
1767             }
1768             ctx->null_cond.c = TCG_COND_ALWAYS;
1769         }
1770         ctx->iaoq_n = -1;
1771         ctx->iaoq_n_var = next;
1772     } else if (is_n && use_nullify_skip(ctx)) {
1773         /* The (conditional) branch, B, nullifies the next insn, N,
1774            and we're allowed to skip execution N (no single-step or
1775            tracepoint in effect).  Since the goto_ptr that we must use
1776            for the indirect branch consumes no special resources, we
1777            can (conditionally) skip B and continue execution.  */
1778         /* The use_nullify_skip test implies we have a known control path.  */
1779         tcg_debug_assert(ctx->iaoq_b != -1);
1780         tcg_debug_assert(ctx->iaoq_n != -1);
1781 
1782         /* We do have to handle the non-local temporary, DEST, before
1783            branching.  Since IOAQ_F is not really live at this point, we
1784            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1785         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1786         next = tcg_temp_new_i64();
1787         tcg_gen_addi_i64(next, dest, 4);
1788         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1789 
1790         nullify_over(ctx);
1791         if (link != 0) {
1792             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1793         }
1794         tcg_gen_lookup_and_goto_ptr();
1795         return nullify_end(ctx);
1796     } else {
1797         c = ctx->null_cond.c;
1798         a0 = ctx->null_cond.a0;
1799         a1 = ctx->null_cond.a1;
1800 
1801         tmp = tcg_temp_new_i64();
1802         next = tcg_temp_new_i64();
1803 
1804         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1805         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1806         ctx->iaoq_n = -1;
1807         ctx->iaoq_n_var = next;
1808 
1809         if (link != 0) {
1810             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1811         }
1812 
1813         if (is_n) {
1814             /* The branch nullifies the next insn, which means the state of N
1815                after the branch is the inverse of the state of N that applied
1816                to the branch.  */
1817             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1818             cond_free(&ctx->null_cond);
1819             ctx->null_cond = cond_make_n();
1820             ctx->psw_n_nonzero = true;
1821         } else {
1822             cond_free(&ctx->null_cond);
1823         }
1824     }
1825     return true;
1826 }
1827 
1828 /* Implement
1829  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1830  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1831  *    else
1832  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1833  * which keeps the privilege level from being increased.
1834  */
1835 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1836 {
1837     TCGv_i64 dest;
1838     switch (ctx->privilege) {
1839     case 0:
1840         /* Privilege 0 is maximum and is allowed to decrease.  */
1841         return offset;
1842     case 3:
1843         /* Privilege 3 is minimum and is never allowed to increase.  */
1844         dest = tcg_temp_new_i64();
1845         tcg_gen_ori_i64(dest, offset, 3);
1846         break;
1847     default:
1848         dest = tcg_temp_new_i64();
1849         tcg_gen_andi_i64(dest, offset, -4);
1850         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1851         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1852         break;
1853     }
1854     return dest;
1855 }
1856 
1857 #ifdef CONFIG_USER_ONLY
1858 /* On Linux, page zero is normally marked execute only + gateway.
1859    Therefore normal read or write is supposed to fail, but specific
1860    offsets have kernel code mapped to raise permissions to implement
1861    system calls.  Handling this via an explicit check here, rather
1862    in than the "be disp(sr2,r0)" instruction that probably sent us
1863    here, is the easiest way to handle the branch delay slot on the
1864    aforementioned BE.  */
1865 static void do_page_zero(DisasContext *ctx)
1866 {
1867     TCGv_i64 tmp;
1868 
1869     /* If by some means we get here with PSW[N]=1, that implies that
1870        the B,GATE instruction would be skipped, and we'd fault on the
1871        next insn within the privileged page.  */
1872     switch (ctx->null_cond.c) {
1873     case TCG_COND_NEVER:
1874         break;
1875     case TCG_COND_ALWAYS:
1876         tcg_gen_movi_i64(cpu_psw_n, 0);
1877         goto do_sigill;
1878     default:
1879         /* Since this is always the first (and only) insn within the
1880            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1881         g_assert_not_reached();
1882     }
1883 
1884     /* Check that we didn't arrive here via some means that allowed
1885        non-sequential instruction execution.  Normally the PSW[B] bit
1886        detects this by disallowing the B,GATE instruction to execute
1887        under such conditions.  */
1888     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1889         goto do_sigill;
1890     }
1891 
1892     switch (ctx->iaoq_f & -4) {
1893     case 0x00: /* Null pointer call */
1894         gen_excp_1(EXCP_IMP);
1895         ctx->base.is_jmp = DISAS_NORETURN;
1896         break;
1897 
1898     case 0xb0: /* LWS */
1899         gen_excp_1(EXCP_SYSCALL_LWS);
1900         ctx->base.is_jmp = DISAS_NORETURN;
1901         break;
1902 
1903     case 0xe0: /* SET_THREAD_POINTER */
1904         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1905         tmp = tcg_temp_new_i64();
1906         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1907         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1908         tcg_gen_addi_i64(tmp, tmp, 4);
1909         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1910         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1911         break;
1912 
1913     case 0x100: /* SYSCALL */
1914         gen_excp_1(EXCP_SYSCALL);
1915         ctx->base.is_jmp = DISAS_NORETURN;
1916         break;
1917 
1918     default:
1919     do_sigill:
1920         gen_excp_1(EXCP_ILL);
1921         ctx->base.is_jmp = DISAS_NORETURN;
1922         break;
1923     }
1924 }
1925 #endif
1926 
1927 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1928 {
1929     cond_free(&ctx->null_cond);
1930     return true;
1931 }
1932 
1933 static bool trans_break(DisasContext *ctx, arg_break *a)
1934 {
1935     return gen_excp_iir(ctx, EXCP_BREAK);
1936 }
1937 
1938 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1939 {
1940     /* No point in nullifying the memory barrier.  */
1941     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1942 
1943     cond_free(&ctx->null_cond);
1944     return true;
1945 }
1946 
1947 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1948 {
1949     unsigned rt = a->t;
1950     TCGv_i64 tmp = dest_gpr(ctx, rt);
1951     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1952     save_gpr(ctx, rt, tmp);
1953 
1954     cond_free(&ctx->null_cond);
1955     return true;
1956 }
1957 
1958 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1959 {
1960     unsigned rt = a->t;
1961     unsigned rs = a->sp;
1962     TCGv_i64 t0 = tcg_temp_new_i64();
1963 
1964     load_spr(ctx, t0, rs);
1965     tcg_gen_shri_i64(t0, t0, 32);
1966 
1967     save_gpr(ctx, rt, t0);
1968 
1969     cond_free(&ctx->null_cond);
1970     return true;
1971 }
1972 
1973 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1974 {
1975     unsigned rt = a->t;
1976     unsigned ctl = a->r;
1977     TCGv_i64 tmp;
1978 
1979     switch (ctl) {
1980     case CR_SAR:
1981         if (a->e == 0) {
1982             /* MFSAR without ,W masks low 5 bits.  */
1983             tmp = dest_gpr(ctx, rt);
1984             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1985             save_gpr(ctx, rt, tmp);
1986             goto done;
1987         }
1988         save_gpr(ctx, rt, cpu_sar);
1989         goto done;
1990     case CR_IT: /* Interval Timer */
1991         /* FIXME: Respect PSW_S bit.  */
1992         nullify_over(ctx);
1993         tmp = dest_gpr(ctx, rt);
1994         if (translator_io_start(&ctx->base)) {
1995             gen_helper_read_interval_timer(tmp);
1996             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1997         } else {
1998             gen_helper_read_interval_timer(tmp);
1999         }
2000         save_gpr(ctx, rt, tmp);
2001         return nullify_end(ctx);
2002     case 26:
2003     case 27:
2004         break;
2005     default:
2006         /* All other control registers are privileged.  */
2007         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2008         break;
2009     }
2010 
2011     tmp = tcg_temp_new_i64();
2012     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2013     save_gpr(ctx, rt, tmp);
2014 
2015  done:
2016     cond_free(&ctx->null_cond);
2017     return true;
2018 }
2019 
2020 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2021 {
2022     unsigned rr = a->r;
2023     unsigned rs = a->sp;
2024     TCGv_i64 tmp;
2025 
2026     if (rs >= 5) {
2027         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2028     }
2029     nullify_over(ctx);
2030 
2031     tmp = tcg_temp_new_i64();
2032     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2033 
2034     if (rs >= 4) {
2035         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2036         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2037     } else {
2038         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2039     }
2040 
2041     return nullify_end(ctx);
2042 }
2043 
2044 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2045 {
2046     unsigned ctl = a->t;
2047     TCGv_i64 reg;
2048     TCGv_i64 tmp;
2049 
2050     if (ctl == CR_SAR) {
2051         reg = load_gpr(ctx, a->r);
2052         tmp = tcg_temp_new_i64();
2053         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2054         save_or_nullify(ctx, cpu_sar, tmp);
2055 
2056         cond_free(&ctx->null_cond);
2057         return true;
2058     }
2059 
2060     /* All other control registers are privileged or read-only.  */
2061     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2062 
2063 #ifndef CONFIG_USER_ONLY
2064     nullify_over(ctx);
2065     reg = load_gpr(ctx, a->r);
2066 
2067     switch (ctl) {
2068     case CR_IT:
2069         gen_helper_write_interval_timer(tcg_env, reg);
2070         break;
2071     case CR_EIRR:
2072         gen_helper_write_eirr(tcg_env, reg);
2073         break;
2074     case CR_EIEM:
2075         gen_helper_write_eiem(tcg_env, reg);
2076         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2077         break;
2078 
2079     case CR_IIASQ:
2080     case CR_IIAOQ:
2081         /* FIXME: Respect PSW_Q bit */
2082         /* The write advances the queue and stores to the back element.  */
2083         tmp = tcg_temp_new_i64();
2084         tcg_gen_ld_i64(tmp, tcg_env,
2085                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2086         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2087         tcg_gen_st_i64(reg, tcg_env,
2088                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2089         break;
2090 
2091     case CR_PID1:
2092     case CR_PID2:
2093     case CR_PID3:
2094     case CR_PID4:
2095         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2096 #ifndef CONFIG_USER_ONLY
2097         gen_helper_change_prot_id(tcg_env);
2098 #endif
2099         break;
2100 
2101     default:
2102         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2103         break;
2104     }
2105     return nullify_end(ctx);
2106 #endif
2107 }
2108 
2109 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2110 {
2111     TCGv_i64 tmp = tcg_temp_new_i64();
2112 
2113     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2114     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2115     save_or_nullify(ctx, cpu_sar, tmp);
2116 
2117     cond_free(&ctx->null_cond);
2118     return true;
2119 }
2120 
2121 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2122 {
2123     TCGv_i64 dest = dest_gpr(ctx, a->t);
2124 
2125 #ifdef CONFIG_USER_ONLY
2126     /* We don't implement space registers in user mode. */
2127     tcg_gen_movi_i64(dest, 0);
2128 #else
2129     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2130     tcg_gen_shri_i64(dest, dest, 32);
2131 #endif
2132     save_gpr(ctx, a->t, dest);
2133 
2134     cond_free(&ctx->null_cond);
2135     return true;
2136 }
2137 
2138 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2139 {
2140     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2141 #ifndef CONFIG_USER_ONLY
2142     TCGv_i64 tmp;
2143 
2144     nullify_over(ctx);
2145 
2146     tmp = tcg_temp_new_i64();
2147     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2148     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2149     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2150     save_gpr(ctx, a->t, tmp);
2151 
2152     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2153     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2154     return nullify_end(ctx);
2155 #endif
2156 }
2157 
2158 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2159 {
2160     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2161 #ifndef CONFIG_USER_ONLY
2162     TCGv_i64 tmp;
2163 
2164     nullify_over(ctx);
2165 
2166     tmp = tcg_temp_new_i64();
2167     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2168     tcg_gen_ori_i64(tmp, tmp, a->i);
2169     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2170     save_gpr(ctx, a->t, tmp);
2171 
2172     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2173     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2174     return nullify_end(ctx);
2175 #endif
2176 }
2177 
2178 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2179 {
2180     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2181 #ifndef CONFIG_USER_ONLY
2182     TCGv_i64 tmp, reg;
2183     nullify_over(ctx);
2184 
2185     reg = load_gpr(ctx, a->r);
2186     tmp = tcg_temp_new_i64();
2187     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2188 
2189     /* Exit the TB to recognize new interrupts.  */
2190     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2191     return nullify_end(ctx);
2192 #endif
2193 }
2194 
2195 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2196 {
2197     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2198 #ifndef CONFIG_USER_ONLY
2199     nullify_over(ctx);
2200 
2201     if (rfi_r) {
2202         gen_helper_rfi_r(tcg_env);
2203     } else {
2204         gen_helper_rfi(tcg_env);
2205     }
2206     /* Exit the TB to recognize new interrupts.  */
2207     tcg_gen_exit_tb(NULL, 0);
2208     ctx->base.is_jmp = DISAS_NORETURN;
2209 
2210     return nullify_end(ctx);
2211 #endif
2212 }
2213 
2214 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2215 {
2216     return do_rfi(ctx, false);
2217 }
2218 
2219 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2220 {
2221     return do_rfi(ctx, true);
2222 }
2223 
2224 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2225 {
2226     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2227 #ifndef CONFIG_USER_ONLY
2228     nullify_over(ctx);
2229     gen_helper_halt(tcg_env);
2230     ctx->base.is_jmp = DISAS_NORETURN;
2231     return nullify_end(ctx);
2232 #endif
2233 }
2234 
2235 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2236 {
2237     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2238 #ifndef CONFIG_USER_ONLY
2239     nullify_over(ctx);
2240     gen_helper_reset(tcg_env);
2241     ctx->base.is_jmp = DISAS_NORETURN;
2242     return nullify_end(ctx);
2243 #endif
2244 }
2245 
2246 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2247 {
2248     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2249 #ifndef CONFIG_USER_ONLY
2250     nullify_over(ctx);
2251     gen_helper_getshadowregs(tcg_env);
2252     return nullify_end(ctx);
2253 #endif
2254 }
2255 
2256 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2257 {
2258     if (a->m) {
2259         TCGv_i64 dest = dest_gpr(ctx, a->b);
2260         TCGv_i64 src1 = load_gpr(ctx, a->b);
2261         TCGv_i64 src2 = load_gpr(ctx, a->x);
2262 
2263         /* The only thing we need to do is the base register modification.  */
2264         tcg_gen_add_i64(dest, src1, src2);
2265         save_gpr(ctx, a->b, dest);
2266     }
2267     cond_free(&ctx->null_cond);
2268     return true;
2269 }
2270 
2271 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2272 {
2273     TCGv_i64 dest, ofs;
2274     TCGv_i32 level, want;
2275     TCGv_i64 addr;
2276 
2277     nullify_over(ctx);
2278 
2279     dest = dest_gpr(ctx, a->t);
2280     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2281 
2282     if (a->imm) {
2283         level = tcg_constant_i32(a->ri);
2284     } else {
2285         level = tcg_temp_new_i32();
2286         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2287         tcg_gen_andi_i32(level, level, 3);
2288     }
2289     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2290 
2291     gen_helper_probe(dest, tcg_env, addr, level, want);
2292 
2293     save_gpr(ctx, a->t, dest);
2294     return nullify_end(ctx);
2295 }
2296 
2297 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2298 {
2299     if (ctx->is_pa20) {
2300         return false;
2301     }
2302     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2303 #ifndef CONFIG_USER_ONLY
2304     TCGv_i64 addr;
2305     TCGv_i64 ofs, reg;
2306 
2307     nullify_over(ctx);
2308 
2309     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2310     reg = load_gpr(ctx, a->r);
2311     if (a->addr) {
2312         gen_helper_itlba_pa11(tcg_env, addr, reg);
2313     } else {
2314         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2315     }
2316 
2317     /* Exit TB for TLB change if mmu is enabled.  */
2318     if (ctx->tb_flags & PSW_C) {
2319         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2320     }
2321     return nullify_end(ctx);
2322 #endif
2323 }
2324 
2325 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2326 {
2327     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2328 #ifndef CONFIG_USER_ONLY
2329     TCGv_i64 addr;
2330     TCGv_i64 ofs;
2331 
2332     nullify_over(ctx);
2333 
2334     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2335     if (a->m) {
2336         save_gpr(ctx, a->b, ofs);
2337     }
2338     if (a->local) {
2339         gen_helper_ptlbe(tcg_env);
2340     } else {
2341         gen_helper_ptlb(tcg_env, addr);
2342     }
2343 
2344     /* Exit TB for TLB change if mmu is enabled.  */
2345     if (ctx->tb_flags & PSW_C) {
2346         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2347     }
2348     return nullify_end(ctx);
2349 #endif
2350 }
2351 
2352 /*
2353  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2354  * See
2355  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2356  *     page 13-9 (195/206)
2357  */
2358 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2359 {
2360     if (ctx->is_pa20) {
2361         return false;
2362     }
2363     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2364 #ifndef CONFIG_USER_ONLY
2365     TCGv_i64 addr, atl, stl;
2366     TCGv_i64 reg;
2367 
2368     nullify_over(ctx);
2369 
2370     /*
2371      * FIXME:
2372      *  if (not (pcxl or pcxl2))
2373      *    return gen_illegal(ctx);
2374      */
2375 
2376     atl = tcg_temp_new_i64();
2377     stl = tcg_temp_new_i64();
2378     addr = tcg_temp_new_i64();
2379 
2380     tcg_gen_ld32u_i64(stl, tcg_env,
2381                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2382                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2383     tcg_gen_ld32u_i64(atl, tcg_env,
2384                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2385                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2386     tcg_gen_shli_i64(stl, stl, 32);
2387     tcg_gen_or_i64(addr, atl, stl);
2388 
2389     reg = load_gpr(ctx, a->r);
2390     if (a->addr) {
2391         gen_helper_itlba_pa11(tcg_env, addr, reg);
2392     } else {
2393         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2394     }
2395 
2396     /* Exit TB for TLB change if mmu is enabled.  */
2397     if (ctx->tb_flags & PSW_C) {
2398         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2399     }
2400     return nullify_end(ctx);
2401 #endif
2402 }
2403 
2404 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2405 {
2406     if (!ctx->is_pa20) {
2407         return false;
2408     }
2409     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2410 #ifndef CONFIG_USER_ONLY
2411     nullify_over(ctx);
2412     {
2413         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2414         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2415 
2416         if (a->data) {
2417             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2418         } else {
2419             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2420         }
2421     }
2422     /* Exit TB for TLB change if mmu is enabled.  */
2423     if (ctx->tb_flags & PSW_C) {
2424         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2425     }
2426     return nullify_end(ctx);
2427 #endif
2428 }
2429 
2430 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2431 {
2432     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2433 #ifndef CONFIG_USER_ONLY
2434     TCGv_i64 vaddr;
2435     TCGv_i64 ofs, paddr;
2436 
2437     nullify_over(ctx);
2438 
2439     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2440 
2441     paddr = tcg_temp_new_i64();
2442     gen_helper_lpa(paddr, tcg_env, vaddr);
2443 
2444     /* Note that physical address result overrides base modification.  */
2445     if (a->m) {
2446         save_gpr(ctx, a->b, ofs);
2447     }
2448     save_gpr(ctx, a->t, paddr);
2449 
2450     return nullify_end(ctx);
2451 #endif
2452 }
2453 
2454 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2455 {
2456     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2457 
2458     /* The Coherence Index is an implementation-defined function of the
2459        physical address.  Two addresses with the same CI have a coherent
2460        view of the cache.  Our implementation is to return 0 for all,
2461        since the entire address space is coherent.  */
2462     save_gpr(ctx, a->t, ctx->zero);
2463 
2464     cond_free(&ctx->null_cond);
2465     return true;
2466 }
2467 
2468 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2469 {
2470     return do_add_reg(ctx, a, false, false, false, false);
2471 }
2472 
2473 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2474 {
2475     return do_add_reg(ctx, a, true, false, false, false);
2476 }
2477 
2478 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2479 {
2480     return do_add_reg(ctx, a, false, true, false, false);
2481 }
2482 
2483 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2484 {
2485     return do_add_reg(ctx, a, false, false, false, true);
2486 }
2487 
2488 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2489 {
2490     return do_add_reg(ctx, a, false, true, false, true);
2491 }
2492 
2493 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2494 {
2495     return do_sub_reg(ctx, a, false, false, false);
2496 }
2497 
2498 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2499 {
2500     return do_sub_reg(ctx, a, true, false, false);
2501 }
2502 
2503 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2504 {
2505     return do_sub_reg(ctx, a, false, false, true);
2506 }
2507 
2508 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2509 {
2510     return do_sub_reg(ctx, a, true, false, true);
2511 }
2512 
2513 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2514 {
2515     return do_sub_reg(ctx, a, false, true, false);
2516 }
2517 
2518 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2519 {
2520     return do_sub_reg(ctx, a, true, true, false);
2521 }
2522 
2523 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2524 {
2525     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2526 }
2527 
2528 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2529 {
2530     return do_log_reg(ctx, a, tcg_gen_and_i64);
2531 }
2532 
2533 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2534 {
2535     if (a->cf == 0) {
2536         unsigned r2 = a->r2;
2537         unsigned r1 = a->r1;
2538         unsigned rt = a->t;
2539 
2540         if (rt == 0) { /* NOP */
2541             cond_free(&ctx->null_cond);
2542             return true;
2543         }
2544         if (r2 == 0) { /* COPY */
2545             if (r1 == 0) {
2546                 TCGv_i64 dest = dest_gpr(ctx, rt);
2547                 tcg_gen_movi_i64(dest, 0);
2548                 save_gpr(ctx, rt, dest);
2549             } else {
2550                 save_gpr(ctx, rt, cpu_gr[r1]);
2551             }
2552             cond_free(&ctx->null_cond);
2553             return true;
2554         }
2555 #ifndef CONFIG_USER_ONLY
2556         /* These are QEMU extensions and are nops in the real architecture:
2557          *
2558          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2559          * or %r31,%r31,%r31 -- death loop; offline cpu
2560          *                      currently implemented as idle.
2561          */
2562         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2563             /* No need to check for supervisor, as userland can only pause
2564                until the next timer interrupt.  */
2565             nullify_over(ctx);
2566 
2567             /* Advance the instruction queue.  */
2568             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2569             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2570             nullify_set(ctx, 0);
2571 
2572             /* Tell the qemu main loop to halt until this cpu has work.  */
2573             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2574                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2575             gen_excp_1(EXCP_HALTED);
2576             ctx->base.is_jmp = DISAS_NORETURN;
2577 
2578             return nullify_end(ctx);
2579         }
2580 #endif
2581     }
2582     return do_log_reg(ctx, a, tcg_gen_or_i64);
2583 }
2584 
2585 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2586 {
2587     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2588 }
2589 
2590 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2591 {
2592     TCGv_i64 tcg_r1, tcg_r2;
2593 
2594     if (a->cf) {
2595         nullify_over(ctx);
2596     }
2597     tcg_r1 = load_gpr(ctx, a->r1);
2598     tcg_r2 = load_gpr(ctx, a->r2);
2599     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2600     return nullify_end(ctx);
2601 }
2602 
2603 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2604 {
2605     TCGv_i64 tcg_r1, tcg_r2;
2606 
2607     if (a->cf) {
2608         nullify_over(ctx);
2609     }
2610     tcg_r1 = load_gpr(ctx, a->r1);
2611     tcg_r2 = load_gpr(ctx, a->r2);
2612     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2613     return nullify_end(ctx);
2614 }
2615 
2616 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2617 {
2618     TCGv_i64 tcg_r1, tcg_r2, tmp;
2619 
2620     if (a->cf) {
2621         nullify_over(ctx);
2622     }
2623     tcg_r1 = load_gpr(ctx, a->r1);
2624     tcg_r2 = load_gpr(ctx, a->r2);
2625     tmp = tcg_temp_new_i64();
2626     tcg_gen_not_i64(tmp, tcg_r2);
2627     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2628     return nullify_end(ctx);
2629 }
2630 
2631 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2632 {
2633     return do_uaddcm(ctx, a, false);
2634 }
2635 
2636 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2637 {
2638     return do_uaddcm(ctx, a, true);
2639 }
2640 
2641 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2642 {
2643     TCGv_i64 tmp;
2644 
2645     nullify_over(ctx);
2646 
2647     tmp = tcg_temp_new_i64();
2648     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2649     if (!is_i) {
2650         tcg_gen_not_i64(tmp, tmp);
2651     }
2652     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2653     tcg_gen_muli_i64(tmp, tmp, 6);
2654     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2655             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2656     return nullify_end(ctx);
2657 }
2658 
2659 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2660 {
2661     return do_dcor(ctx, a, false);
2662 }
2663 
2664 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2665 {
2666     return do_dcor(ctx, a, true);
2667 }
2668 
2669 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2670 {
2671     TCGv_i64 dest, add1, add2, addc, in1, in2;
2672     TCGv_i64 cout;
2673 
2674     nullify_over(ctx);
2675 
2676     in1 = load_gpr(ctx, a->r1);
2677     in2 = load_gpr(ctx, a->r2);
2678 
2679     add1 = tcg_temp_new_i64();
2680     add2 = tcg_temp_new_i64();
2681     addc = tcg_temp_new_i64();
2682     dest = tcg_temp_new_i64();
2683 
2684     /* Form R1 << 1 | PSW[CB]{8}.  */
2685     tcg_gen_add_i64(add1, in1, in1);
2686     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2687 
2688     /*
2689      * Add or subtract R2, depending on PSW[V].  Proper computation of
2690      * carry requires that we subtract via + ~R2 + 1, as described in
2691      * the manual.  By extracting and masking V, we can produce the
2692      * proper inputs to the addition without movcond.
2693      */
2694     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2695     tcg_gen_xor_i64(add2, in2, addc);
2696     tcg_gen_andi_i64(addc, addc, 1);
2697 
2698     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2699     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2700                      addc, ctx->zero);
2701 
2702     /* Write back the result register.  */
2703     save_gpr(ctx, a->t, dest);
2704 
2705     /* Write back PSW[CB].  */
2706     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2707     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2708 
2709     /* Write back PSW[V] for the division step.  */
2710     cout = get_psw_carry(ctx, false);
2711     tcg_gen_neg_i64(cpu_psw_v, cout);
2712     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2713 
2714     /* Install the new nullification.  */
2715     if (a->cf) {
2716         TCGv_i64 sv = NULL;
2717         if (cond_need_sv(a->cf >> 1)) {
2718             /* ??? The lshift is supposed to contribute to overflow.  */
2719             sv = do_add_sv(ctx, dest, add1, add2);
2720         }
2721         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2722     }
2723 
2724     return nullify_end(ctx);
2725 }
2726 
2727 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2728 {
2729     return do_add_imm(ctx, a, false, false);
2730 }
2731 
2732 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2733 {
2734     return do_add_imm(ctx, a, true, false);
2735 }
2736 
2737 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2738 {
2739     return do_add_imm(ctx, a, false, true);
2740 }
2741 
2742 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2743 {
2744     return do_add_imm(ctx, a, true, true);
2745 }
2746 
2747 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2748 {
2749     return do_sub_imm(ctx, a, false);
2750 }
2751 
2752 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2753 {
2754     return do_sub_imm(ctx, a, true);
2755 }
2756 
2757 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2758 {
2759     TCGv_i64 tcg_im, tcg_r2;
2760 
2761     if (a->cf) {
2762         nullify_over(ctx);
2763     }
2764 
2765     tcg_im = tcg_constant_i64(a->i);
2766     tcg_r2 = load_gpr(ctx, a->r);
2767     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2768 
2769     return nullify_end(ctx);
2770 }
2771 
2772 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2773                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2774 {
2775     TCGv_i64 r1, r2, dest;
2776 
2777     if (!ctx->is_pa20) {
2778         return false;
2779     }
2780 
2781     nullify_over(ctx);
2782 
2783     r1 = load_gpr(ctx, a->r1);
2784     r2 = load_gpr(ctx, a->r2);
2785     dest = dest_gpr(ctx, a->t);
2786 
2787     fn(dest, r1, r2);
2788     save_gpr(ctx, a->t, dest);
2789 
2790     return nullify_end(ctx);
2791 }
2792 
2793 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2794                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2795 {
2796     TCGv_i64 r, dest;
2797 
2798     if (!ctx->is_pa20) {
2799         return false;
2800     }
2801 
2802     nullify_over(ctx);
2803 
2804     r = load_gpr(ctx, a->r);
2805     dest = dest_gpr(ctx, a->t);
2806 
2807     fn(dest, r, a->i);
2808     save_gpr(ctx, a->t, dest);
2809 
2810     return nullify_end(ctx);
2811 }
2812 
2813 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2814                                 void (*fn)(TCGv_i64, TCGv_i64,
2815                                            TCGv_i64, TCGv_i32))
2816 {
2817     TCGv_i64 r1, r2, dest;
2818 
2819     if (!ctx->is_pa20) {
2820         return false;
2821     }
2822 
2823     nullify_over(ctx);
2824 
2825     r1 = load_gpr(ctx, a->r1);
2826     r2 = load_gpr(ctx, a->r2);
2827     dest = dest_gpr(ctx, a->t);
2828 
2829     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2830     save_gpr(ctx, a->t, dest);
2831 
2832     return nullify_end(ctx);
2833 }
2834 
2835 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2836 {
2837     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2838 }
2839 
2840 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2841 {
2842     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2843 }
2844 
2845 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2846 {
2847     return do_multimedia(ctx, a, gen_helper_hadd_us);
2848 }
2849 
2850 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2851 {
2852     return do_multimedia(ctx, a, gen_helper_havg);
2853 }
2854 
2855 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2856 {
2857     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2858 }
2859 
2860 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2861 {
2862     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2863 }
2864 
2865 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2866 {
2867     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2868 }
2869 
2870 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2871 {
2872     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2873 }
2874 
2875 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2876 {
2877     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2878 }
2879 
2880 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2881 {
2882     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2883 }
2884 
2885 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2886 {
2887     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2888 }
2889 
2890 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2891 {
2892     return do_multimedia(ctx, a, gen_helper_hsub_us);
2893 }
2894 
2895 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2896 {
2897     uint64_t mask = 0xffff0000ffff0000ull;
2898     TCGv_i64 tmp = tcg_temp_new_i64();
2899 
2900     tcg_gen_andi_i64(tmp, r2, mask);
2901     tcg_gen_andi_i64(dst, r1, mask);
2902     tcg_gen_shri_i64(tmp, tmp, 16);
2903     tcg_gen_or_i64(dst, dst, tmp);
2904 }
2905 
2906 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2907 {
2908     return do_multimedia(ctx, a, gen_mixh_l);
2909 }
2910 
2911 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2912 {
2913     uint64_t mask = 0x0000ffff0000ffffull;
2914     TCGv_i64 tmp = tcg_temp_new_i64();
2915 
2916     tcg_gen_andi_i64(tmp, r1, mask);
2917     tcg_gen_andi_i64(dst, r2, mask);
2918     tcg_gen_shli_i64(tmp, tmp, 16);
2919     tcg_gen_or_i64(dst, dst, tmp);
2920 }
2921 
2922 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2923 {
2924     return do_multimedia(ctx, a, gen_mixh_r);
2925 }
2926 
2927 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2928 {
2929     TCGv_i64 tmp = tcg_temp_new_i64();
2930 
2931     tcg_gen_shri_i64(tmp, r2, 32);
2932     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2933 }
2934 
2935 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2936 {
2937     return do_multimedia(ctx, a, gen_mixw_l);
2938 }
2939 
2940 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2941 {
2942     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2943 }
2944 
2945 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2946 {
2947     return do_multimedia(ctx, a, gen_mixw_r);
2948 }
2949 
2950 static bool trans_permh(DisasContext *ctx, arg_permh *a)
2951 {
2952     TCGv_i64 r, t0, t1, t2, t3;
2953 
2954     if (!ctx->is_pa20) {
2955         return false;
2956     }
2957 
2958     nullify_over(ctx);
2959 
2960     r = load_gpr(ctx, a->r1);
2961     t0 = tcg_temp_new_i64();
2962     t1 = tcg_temp_new_i64();
2963     t2 = tcg_temp_new_i64();
2964     t3 = tcg_temp_new_i64();
2965 
2966     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
2967     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
2968     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
2969     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
2970 
2971     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
2972     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
2973     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
2974 
2975     save_gpr(ctx, a->t, t0);
2976     return nullify_end(ctx);
2977 }
2978 
2979 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2980 {
2981     if (!ctx->is_pa20 && a->size > MO_32) {
2982         return gen_illegal(ctx);
2983     }
2984     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2985                    a->disp, a->sp, a->m, a->size | MO_TE);
2986 }
2987 
2988 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2989 {
2990     assert(a->x == 0 && a->scale == 0);
2991     if (!ctx->is_pa20 && a->size > MO_32) {
2992         return gen_illegal(ctx);
2993     }
2994     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2995 }
2996 
2997 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2998 {
2999     MemOp mop = MO_TE | MO_ALIGN | a->size;
3000     TCGv_i64 dest, ofs;
3001     TCGv_i64 addr;
3002 
3003     if (!ctx->is_pa20 && a->size > MO_32) {
3004         return gen_illegal(ctx);
3005     }
3006 
3007     nullify_over(ctx);
3008 
3009     if (a->m) {
3010         /* Base register modification.  Make sure if RT == RB,
3011            we see the result of the load.  */
3012         dest = tcg_temp_new_i64();
3013     } else {
3014         dest = dest_gpr(ctx, a->t);
3015     }
3016 
3017     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3018              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3019 
3020     /*
3021      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3022      * However actual hardware succeeds with aligned mod 4.
3023      * Detect this case and log a GUEST_ERROR.
3024      *
3025      * TODO: HPPA64 relaxes the over-alignment requirement
3026      * with the ,co completer.
3027      */
3028     gen_helper_ldc_check(addr);
3029 
3030     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3031 
3032     if (a->m) {
3033         save_gpr(ctx, a->b, ofs);
3034     }
3035     save_gpr(ctx, a->t, dest);
3036 
3037     return nullify_end(ctx);
3038 }
3039 
3040 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3041 {
3042     TCGv_i64 ofs, val;
3043     TCGv_i64 addr;
3044 
3045     nullify_over(ctx);
3046 
3047     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3048              ctx->mmu_idx == MMU_PHYS_IDX);
3049     val = load_gpr(ctx, a->r);
3050     if (a->a) {
3051         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3052             gen_helper_stby_e_parallel(tcg_env, addr, val);
3053         } else {
3054             gen_helper_stby_e(tcg_env, addr, val);
3055         }
3056     } else {
3057         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3058             gen_helper_stby_b_parallel(tcg_env, addr, val);
3059         } else {
3060             gen_helper_stby_b(tcg_env, addr, val);
3061         }
3062     }
3063     if (a->m) {
3064         tcg_gen_andi_i64(ofs, ofs, ~3);
3065         save_gpr(ctx, a->b, ofs);
3066     }
3067 
3068     return nullify_end(ctx);
3069 }
3070 
3071 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3072 {
3073     TCGv_i64 ofs, val;
3074     TCGv_i64 addr;
3075 
3076     if (!ctx->is_pa20) {
3077         return false;
3078     }
3079     nullify_over(ctx);
3080 
3081     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3082              ctx->mmu_idx == MMU_PHYS_IDX);
3083     val = load_gpr(ctx, a->r);
3084     if (a->a) {
3085         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3086             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3087         } else {
3088             gen_helper_stdby_e(tcg_env, addr, val);
3089         }
3090     } else {
3091         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3092             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3093         } else {
3094             gen_helper_stdby_b(tcg_env, addr, val);
3095         }
3096     }
3097     if (a->m) {
3098         tcg_gen_andi_i64(ofs, ofs, ~7);
3099         save_gpr(ctx, a->b, ofs);
3100     }
3101 
3102     return nullify_end(ctx);
3103 }
3104 
3105 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3106 {
3107     int hold_mmu_idx = ctx->mmu_idx;
3108 
3109     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3110     ctx->mmu_idx = MMU_PHYS_IDX;
3111     trans_ld(ctx, a);
3112     ctx->mmu_idx = hold_mmu_idx;
3113     return true;
3114 }
3115 
3116 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3117 {
3118     int hold_mmu_idx = ctx->mmu_idx;
3119 
3120     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3121     ctx->mmu_idx = MMU_PHYS_IDX;
3122     trans_st(ctx, a);
3123     ctx->mmu_idx = hold_mmu_idx;
3124     return true;
3125 }
3126 
3127 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3128 {
3129     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3130 
3131     tcg_gen_movi_i64(tcg_rt, a->i);
3132     save_gpr(ctx, a->t, tcg_rt);
3133     cond_free(&ctx->null_cond);
3134     return true;
3135 }
3136 
3137 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3138 {
3139     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3140     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3141 
3142     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3143     save_gpr(ctx, 1, tcg_r1);
3144     cond_free(&ctx->null_cond);
3145     return true;
3146 }
3147 
3148 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3149 {
3150     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3151 
3152     /* Special case rb == 0, for the LDI pseudo-op.
3153        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3154     if (a->b == 0) {
3155         tcg_gen_movi_i64(tcg_rt, a->i);
3156     } else {
3157         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3158     }
3159     save_gpr(ctx, a->t, tcg_rt);
3160     cond_free(&ctx->null_cond);
3161     return true;
3162 }
3163 
3164 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3165                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3166 {
3167     TCGv_i64 dest, in2, sv;
3168     DisasCond cond;
3169 
3170     in2 = load_gpr(ctx, r);
3171     dest = tcg_temp_new_i64();
3172 
3173     tcg_gen_sub_i64(dest, in1, in2);
3174 
3175     sv = NULL;
3176     if (cond_need_sv(c)) {
3177         sv = do_sub_sv(ctx, dest, in1, in2);
3178     }
3179 
3180     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3181     return do_cbranch(ctx, disp, n, &cond);
3182 }
3183 
3184 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3185 {
3186     if (!ctx->is_pa20 && a->d) {
3187         return false;
3188     }
3189     nullify_over(ctx);
3190     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3191                    a->c, a->f, a->d, a->n, a->disp);
3192 }
3193 
3194 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3195 {
3196     if (!ctx->is_pa20 && a->d) {
3197         return false;
3198     }
3199     nullify_over(ctx);
3200     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3201                    a->c, a->f, a->d, a->n, a->disp);
3202 }
3203 
3204 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3205                     unsigned c, unsigned f, unsigned n, int disp)
3206 {
3207     TCGv_i64 dest, in2, sv, cb_cond;
3208     DisasCond cond;
3209     bool d = false;
3210 
3211     /*
3212      * For hppa64, the ADDB conditions change with PSW.W,
3213      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3214      */
3215     if (ctx->tb_flags & PSW_W) {
3216         d = c >= 5;
3217         if (d) {
3218             c &= 3;
3219         }
3220     }
3221 
3222     in2 = load_gpr(ctx, r);
3223     dest = tcg_temp_new_i64();
3224     sv = NULL;
3225     cb_cond = NULL;
3226 
3227     if (cond_need_cb(c)) {
3228         TCGv_i64 cb = tcg_temp_new_i64();
3229         TCGv_i64 cb_msb = tcg_temp_new_i64();
3230 
3231         tcg_gen_movi_i64(cb_msb, 0);
3232         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3233         tcg_gen_xor_i64(cb, in1, in2);
3234         tcg_gen_xor_i64(cb, cb, dest);
3235         cb_cond = get_carry(ctx, d, cb, cb_msb);
3236     } else {
3237         tcg_gen_add_i64(dest, in1, in2);
3238     }
3239     if (cond_need_sv(c)) {
3240         sv = do_add_sv(ctx, dest, in1, in2);
3241     }
3242 
3243     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3244     save_gpr(ctx, r, dest);
3245     return do_cbranch(ctx, disp, n, &cond);
3246 }
3247 
3248 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3249 {
3250     nullify_over(ctx);
3251     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3252 }
3253 
3254 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3255 {
3256     nullify_over(ctx);
3257     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3258 }
3259 
3260 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3261 {
3262     TCGv_i64 tmp, tcg_r;
3263     DisasCond cond;
3264 
3265     nullify_over(ctx);
3266 
3267     tmp = tcg_temp_new_i64();
3268     tcg_r = load_gpr(ctx, a->r);
3269     if (cond_need_ext(ctx, a->d)) {
3270         /* Force shift into [32,63] */
3271         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3272         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3273     } else {
3274         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3275     }
3276 
3277     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3278     return do_cbranch(ctx, a->disp, a->n, &cond);
3279 }
3280 
3281 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3282 {
3283     TCGv_i64 tmp, tcg_r;
3284     DisasCond cond;
3285     int p;
3286 
3287     nullify_over(ctx);
3288 
3289     tmp = tcg_temp_new_i64();
3290     tcg_r = load_gpr(ctx, a->r);
3291     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3292     tcg_gen_shli_i64(tmp, tcg_r, p);
3293 
3294     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3295     return do_cbranch(ctx, a->disp, a->n, &cond);
3296 }
3297 
3298 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3299 {
3300     TCGv_i64 dest;
3301     DisasCond cond;
3302 
3303     nullify_over(ctx);
3304 
3305     dest = dest_gpr(ctx, a->r2);
3306     if (a->r1 == 0) {
3307         tcg_gen_movi_i64(dest, 0);
3308     } else {
3309         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3310     }
3311 
3312     /* All MOVB conditions are 32-bit. */
3313     cond = do_sed_cond(ctx, a->c, false, dest);
3314     return do_cbranch(ctx, a->disp, a->n, &cond);
3315 }
3316 
3317 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3318 {
3319     TCGv_i64 dest;
3320     DisasCond cond;
3321 
3322     nullify_over(ctx);
3323 
3324     dest = dest_gpr(ctx, a->r);
3325     tcg_gen_movi_i64(dest, a->i);
3326 
3327     /* All MOVBI conditions are 32-bit. */
3328     cond = do_sed_cond(ctx, a->c, false, dest);
3329     return do_cbranch(ctx, a->disp, a->n, &cond);
3330 }
3331 
3332 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3333 {
3334     TCGv_i64 dest, src2;
3335 
3336     if (!ctx->is_pa20 && a->d) {
3337         return false;
3338     }
3339     if (a->c) {
3340         nullify_over(ctx);
3341     }
3342 
3343     dest = dest_gpr(ctx, a->t);
3344     src2 = load_gpr(ctx, a->r2);
3345     if (a->r1 == 0) {
3346         if (a->d) {
3347             tcg_gen_shr_i64(dest, src2, cpu_sar);
3348         } else {
3349             TCGv_i64 tmp = tcg_temp_new_i64();
3350 
3351             tcg_gen_ext32u_i64(dest, src2);
3352             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3353             tcg_gen_shr_i64(dest, dest, tmp);
3354         }
3355     } else if (a->r1 == a->r2) {
3356         if (a->d) {
3357             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3358         } else {
3359             TCGv_i32 t32 = tcg_temp_new_i32();
3360             TCGv_i32 s32 = tcg_temp_new_i32();
3361 
3362             tcg_gen_extrl_i64_i32(t32, src2);
3363             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3364             tcg_gen_andi_i32(s32, s32, 31);
3365             tcg_gen_rotr_i32(t32, t32, s32);
3366             tcg_gen_extu_i32_i64(dest, t32);
3367         }
3368     } else {
3369         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3370 
3371         if (a->d) {
3372             TCGv_i64 t = tcg_temp_new_i64();
3373             TCGv_i64 n = tcg_temp_new_i64();
3374 
3375             tcg_gen_xori_i64(n, cpu_sar, 63);
3376             tcg_gen_shl_i64(t, src2, n);
3377             tcg_gen_shli_i64(t, t, 1);
3378             tcg_gen_shr_i64(dest, src1, cpu_sar);
3379             tcg_gen_or_i64(dest, dest, t);
3380         } else {
3381             TCGv_i64 t = tcg_temp_new_i64();
3382             TCGv_i64 s = tcg_temp_new_i64();
3383 
3384             tcg_gen_concat32_i64(t, src2, src1);
3385             tcg_gen_andi_i64(s, cpu_sar, 31);
3386             tcg_gen_shr_i64(dest, t, s);
3387         }
3388     }
3389     save_gpr(ctx, a->t, dest);
3390 
3391     /* Install the new nullification.  */
3392     cond_free(&ctx->null_cond);
3393     if (a->c) {
3394         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3395     }
3396     return nullify_end(ctx);
3397 }
3398 
3399 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3400 {
3401     unsigned width, sa;
3402     TCGv_i64 dest, t2;
3403 
3404     if (!ctx->is_pa20 && a->d) {
3405         return false;
3406     }
3407     if (a->c) {
3408         nullify_over(ctx);
3409     }
3410 
3411     width = a->d ? 64 : 32;
3412     sa = width - 1 - a->cpos;
3413 
3414     dest = dest_gpr(ctx, a->t);
3415     t2 = load_gpr(ctx, a->r2);
3416     if (a->r1 == 0) {
3417         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3418     } else if (width == TARGET_LONG_BITS) {
3419         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3420     } else {
3421         assert(!a->d);
3422         if (a->r1 == a->r2) {
3423             TCGv_i32 t32 = tcg_temp_new_i32();
3424             tcg_gen_extrl_i64_i32(t32, t2);
3425             tcg_gen_rotri_i32(t32, t32, sa);
3426             tcg_gen_extu_i32_i64(dest, t32);
3427         } else {
3428             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3429             tcg_gen_extract_i64(dest, dest, sa, 32);
3430         }
3431     }
3432     save_gpr(ctx, a->t, dest);
3433 
3434     /* Install the new nullification.  */
3435     cond_free(&ctx->null_cond);
3436     if (a->c) {
3437         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3438     }
3439     return nullify_end(ctx);
3440 }
3441 
3442 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3443 {
3444     unsigned widthm1 = a->d ? 63 : 31;
3445     TCGv_i64 dest, src, tmp;
3446 
3447     if (!ctx->is_pa20 && a->d) {
3448         return false;
3449     }
3450     if (a->c) {
3451         nullify_over(ctx);
3452     }
3453 
3454     dest = dest_gpr(ctx, a->t);
3455     src = load_gpr(ctx, a->r);
3456     tmp = tcg_temp_new_i64();
3457 
3458     /* Recall that SAR is using big-endian bit numbering.  */
3459     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3460     tcg_gen_xori_i64(tmp, tmp, widthm1);
3461 
3462     if (a->se) {
3463         if (!a->d) {
3464             tcg_gen_ext32s_i64(dest, src);
3465             src = dest;
3466         }
3467         tcg_gen_sar_i64(dest, src, tmp);
3468         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3469     } else {
3470         if (!a->d) {
3471             tcg_gen_ext32u_i64(dest, src);
3472             src = dest;
3473         }
3474         tcg_gen_shr_i64(dest, src, tmp);
3475         tcg_gen_extract_i64(dest, dest, 0, a->len);
3476     }
3477     save_gpr(ctx, a->t, dest);
3478 
3479     /* Install the new nullification.  */
3480     cond_free(&ctx->null_cond);
3481     if (a->c) {
3482         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3483     }
3484     return nullify_end(ctx);
3485 }
3486 
3487 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3488 {
3489     unsigned len, cpos, width;
3490     TCGv_i64 dest, src;
3491 
3492     if (!ctx->is_pa20 && a->d) {
3493         return false;
3494     }
3495     if (a->c) {
3496         nullify_over(ctx);
3497     }
3498 
3499     len = a->len;
3500     width = a->d ? 64 : 32;
3501     cpos = width - 1 - a->pos;
3502     if (cpos + len > width) {
3503         len = width - cpos;
3504     }
3505 
3506     dest = dest_gpr(ctx, a->t);
3507     src = load_gpr(ctx, a->r);
3508     if (a->se) {
3509         tcg_gen_sextract_i64(dest, src, cpos, len);
3510     } else {
3511         tcg_gen_extract_i64(dest, src, cpos, len);
3512     }
3513     save_gpr(ctx, a->t, dest);
3514 
3515     /* Install the new nullification.  */
3516     cond_free(&ctx->null_cond);
3517     if (a->c) {
3518         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3519     }
3520     return nullify_end(ctx);
3521 }
3522 
3523 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3524 {
3525     unsigned len, width;
3526     uint64_t mask0, mask1;
3527     TCGv_i64 dest;
3528 
3529     if (!ctx->is_pa20 && a->d) {
3530         return false;
3531     }
3532     if (a->c) {
3533         nullify_over(ctx);
3534     }
3535 
3536     len = a->len;
3537     width = a->d ? 64 : 32;
3538     if (a->cpos + len > width) {
3539         len = width - a->cpos;
3540     }
3541 
3542     dest = dest_gpr(ctx, a->t);
3543     mask0 = deposit64(0, a->cpos, len, a->i);
3544     mask1 = deposit64(-1, a->cpos, len, a->i);
3545 
3546     if (a->nz) {
3547         TCGv_i64 src = load_gpr(ctx, a->t);
3548         tcg_gen_andi_i64(dest, src, mask1);
3549         tcg_gen_ori_i64(dest, dest, mask0);
3550     } else {
3551         tcg_gen_movi_i64(dest, mask0);
3552     }
3553     save_gpr(ctx, a->t, dest);
3554 
3555     /* Install the new nullification.  */
3556     cond_free(&ctx->null_cond);
3557     if (a->c) {
3558         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3559     }
3560     return nullify_end(ctx);
3561 }
3562 
3563 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3564 {
3565     unsigned rs = a->nz ? a->t : 0;
3566     unsigned len, width;
3567     TCGv_i64 dest, val;
3568 
3569     if (!ctx->is_pa20 && a->d) {
3570         return false;
3571     }
3572     if (a->c) {
3573         nullify_over(ctx);
3574     }
3575 
3576     len = a->len;
3577     width = a->d ? 64 : 32;
3578     if (a->cpos + len > width) {
3579         len = width - a->cpos;
3580     }
3581 
3582     dest = dest_gpr(ctx, a->t);
3583     val = load_gpr(ctx, a->r);
3584     if (rs == 0) {
3585         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3586     } else {
3587         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3588     }
3589     save_gpr(ctx, a->t, dest);
3590 
3591     /* Install the new nullification.  */
3592     cond_free(&ctx->null_cond);
3593     if (a->c) {
3594         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3595     }
3596     return nullify_end(ctx);
3597 }
3598 
3599 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3600                        bool d, bool nz, unsigned len, TCGv_i64 val)
3601 {
3602     unsigned rs = nz ? rt : 0;
3603     unsigned widthm1 = d ? 63 : 31;
3604     TCGv_i64 mask, tmp, shift, dest;
3605     uint64_t msb = 1ULL << (len - 1);
3606 
3607     dest = dest_gpr(ctx, rt);
3608     shift = tcg_temp_new_i64();
3609     tmp = tcg_temp_new_i64();
3610 
3611     /* Convert big-endian bit numbering in SAR to left-shift.  */
3612     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3613     tcg_gen_xori_i64(shift, shift, widthm1);
3614 
3615     mask = tcg_temp_new_i64();
3616     tcg_gen_movi_i64(mask, msb + (msb - 1));
3617     tcg_gen_and_i64(tmp, val, mask);
3618     if (rs) {
3619         tcg_gen_shl_i64(mask, mask, shift);
3620         tcg_gen_shl_i64(tmp, tmp, shift);
3621         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3622         tcg_gen_or_i64(dest, dest, tmp);
3623     } else {
3624         tcg_gen_shl_i64(dest, tmp, shift);
3625     }
3626     save_gpr(ctx, rt, dest);
3627 
3628     /* Install the new nullification.  */
3629     cond_free(&ctx->null_cond);
3630     if (c) {
3631         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3632     }
3633     return nullify_end(ctx);
3634 }
3635 
3636 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3637 {
3638     if (!ctx->is_pa20 && a->d) {
3639         return false;
3640     }
3641     if (a->c) {
3642         nullify_over(ctx);
3643     }
3644     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3645                       load_gpr(ctx, a->r));
3646 }
3647 
3648 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3649 {
3650     if (!ctx->is_pa20 && a->d) {
3651         return false;
3652     }
3653     if (a->c) {
3654         nullify_over(ctx);
3655     }
3656     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3657                       tcg_constant_i64(a->i));
3658 }
3659 
3660 static bool trans_be(DisasContext *ctx, arg_be *a)
3661 {
3662     TCGv_i64 tmp;
3663 
3664 #ifdef CONFIG_USER_ONLY
3665     /* ??? It seems like there should be a good way of using
3666        "be disp(sr2, r0)", the canonical gateway entry mechanism
3667        to our advantage.  But that appears to be inconvenient to
3668        manage along side branch delay slots.  Therefore we handle
3669        entry into the gateway page via absolute address.  */
3670     /* Since we don't implement spaces, just branch.  Do notice the special
3671        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3672        goto_tb to the TB containing the syscall.  */
3673     if (a->b == 0) {
3674         return do_dbranch(ctx, a->disp, a->l, a->n);
3675     }
3676 #else
3677     nullify_over(ctx);
3678 #endif
3679 
3680     tmp = tcg_temp_new_i64();
3681     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3682     tmp = do_ibranch_priv(ctx, tmp);
3683 
3684 #ifdef CONFIG_USER_ONLY
3685     return do_ibranch(ctx, tmp, a->l, a->n);
3686 #else
3687     TCGv_i64 new_spc = tcg_temp_new_i64();
3688 
3689     load_spr(ctx, new_spc, a->sp);
3690     if (a->l) {
3691         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3692         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3693     }
3694     if (a->n && use_nullify_skip(ctx)) {
3695         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3696         tcg_gen_addi_i64(tmp, tmp, 4);
3697         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3698         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3699         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3700     } else {
3701         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3702         if (ctx->iaoq_b == -1) {
3703             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3704         }
3705         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3706         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3707         nullify_set(ctx, a->n);
3708     }
3709     tcg_gen_lookup_and_goto_ptr();
3710     ctx->base.is_jmp = DISAS_NORETURN;
3711     return nullify_end(ctx);
3712 #endif
3713 }
3714 
3715 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3716 {
3717     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3718 }
3719 
3720 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3721 {
3722     uint64_t dest = iaoq_dest(ctx, a->disp);
3723 
3724     nullify_over(ctx);
3725 
3726     /* Make sure the caller hasn't done something weird with the queue.
3727      * ??? This is not quite the same as the PSW[B] bit, which would be
3728      * expensive to track.  Real hardware will trap for
3729      *    b  gateway
3730      *    b  gateway+4  (in delay slot of first branch)
3731      * However, checking for a non-sequential instruction queue *will*
3732      * diagnose the security hole
3733      *    b  gateway
3734      *    b  evil
3735      * in which instructions at evil would run with increased privs.
3736      */
3737     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3738         return gen_illegal(ctx);
3739     }
3740 
3741 #ifndef CONFIG_USER_ONLY
3742     if (ctx->tb_flags & PSW_C) {
3743         CPUHPPAState *env = cpu_env(ctx->cs);
3744         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3745         /* If we could not find a TLB entry, then we need to generate an
3746            ITLB miss exception so the kernel will provide it.
3747            The resulting TLB fill operation will invalidate this TB and
3748            we will re-translate, at which point we *will* be able to find
3749            the TLB entry and determine if this is in fact a gateway page.  */
3750         if (type < 0) {
3751             gen_excp(ctx, EXCP_ITLB_MISS);
3752             return true;
3753         }
3754         /* No change for non-gateway pages or for priv decrease.  */
3755         if (type >= 4 && type - 4 < ctx->privilege) {
3756             dest = deposit32(dest, 0, 2, type - 4);
3757         }
3758     } else {
3759         dest &= -4;  /* priv = 0 */
3760     }
3761 #endif
3762 
3763     if (a->l) {
3764         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3765         if (ctx->privilege < 3) {
3766             tcg_gen_andi_i64(tmp, tmp, -4);
3767         }
3768         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3769         save_gpr(ctx, a->l, tmp);
3770     }
3771 
3772     return do_dbranch(ctx, dest, 0, a->n);
3773 }
3774 
3775 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3776 {
3777     if (a->x) {
3778         TCGv_i64 tmp = tcg_temp_new_i64();
3779         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3780         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3781         /* The computation here never changes privilege level.  */
3782         return do_ibranch(ctx, tmp, a->l, a->n);
3783     } else {
3784         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3785         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3786     }
3787 }
3788 
3789 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3790 {
3791     TCGv_i64 dest;
3792 
3793     if (a->x == 0) {
3794         dest = load_gpr(ctx, a->b);
3795     } else {
3796         dest = tcg_temp_new_i64();
3797         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3798         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3799     }
3800     dest = do_ibranch_priv(ctx, dest);
3801     return do_ibranch(ctx, dest, 0, a->n);
3802 }
3803 
3804 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3805 {
3806     TCGv_i64 dest;
3807 
3808 #ifdef CONFIG_USER_ONLY
3809     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3810     return do_ibranch(ctx, dest, a->l, a->n);
3811 #else
3812     nullify_over(ctx);
3813     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3814 
3815     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3816     if (ctx->iaoq_b == -1) {
3817         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3818     }
3819     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3820     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3821     if (a->l) {
3822         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3823     }
3824     nullify_set(ctx, a->n);
3825     tcg_gen_lookup_and_goto_ptr();
3826     ctx->base.is_jmp = DISAS_NORETURN;
3827     return nullify_end(ctx);
3828 #endif
3829 }
3830 
3831 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3832 {
3833     /* All branch target stack instructions implement as nop. */
3834     return ctx->is_pa20;
3835 }
3836 
3837 /*
3838  * Float class 0
3839  */
3840 
3841 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3842 {
3843     tcg_gen_mov_i32(dst, src);
3844 }
3845 
3846 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3847 {
3848     uint64_t ret;
3849 
3850     if (ctx->is_pa20) {
3851         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3852     } else {
3853         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3854     }
3855 
3856     nullify_over(ctx);
3857     save_frd(0, tcg_constant_i64(ret));
3858     return nullify_end(ctx);
3859 }
3860 
3861 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3862 {
3863     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3864 }
3865 
3866 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3867 {
3868     tcg_gen_mov_i64(dst, src);
3869 }
3870 
3871 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3872 {
3873     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3874 }
3875 
3876 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3877 {
3878     tcg_gen_andi_i32(dst, src, INT32_MAX);
3879 }
3880 
3881 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3882 {
3883     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3884 }
3885 
3886 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3887 {
3888     tcg_gen_andi_i64(dst, src, INT64_MAX);
3889 }
3890 
3891 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3892 {
3893     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3894 }
3895 
3896 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3897 {
3898     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3899 }
3900 
3901 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3902 {
3903     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3904 }
3905 
3906 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3907 {
3908     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3909 }
3910 
3911 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3912 {
3913     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3914 }
3915 
3916 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3917 {
3918     tcg_gen_xori_i32(dst, src, INT32_MIN);
3919 }
3920 
3921 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3922 {
3923     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3924 }
3925 
3926 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3927 {
3928     tcg_gen_xori_i64(dst, src, INT64_MIN);
3929 }
3930 
3931 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3932 {
3933     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3934 }
3935 
3936 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3937 {
3938     tcg_gen_ori_i32(dst, src, INT32_MIN);
3939 }
3940 
3941 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3942 {
3943     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3944 }
3945 
3946 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3947 {
3948     tcg_gen_ori_i64(dst, src, INT64_MIN);
3949 }
3950 
3951 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3952 {
3953     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3954 }
3955 
3956 /*
3957  * Float class 1
3958  */
3959 
3960 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3961 {
3962     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3963 }
3964 
3965 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3966 {
3967     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3968 }
3969 
3970 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3971 {
3972     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3973 }
3974 
3975 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3976 {
3977     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3978 }
3979 
3980 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3981 {
3982     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3983 }
3984 
3985 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3986 {
3987     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3988 }
3989 
3990 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3991 {
3992     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3993 }
3994 
3995 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3996 {
3997     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3998 }
3999 
4000 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4001 {
4002     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4003 }
4004 
4005 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4006 {
4007     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4008 }
4009 
4010 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4011 {
4012     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4013 }
4014 
4015 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4016 {
4017     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4018 }
4019 
4020 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4021 {
4022     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4023 }
4024 
4025 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4026 {
4027     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4028 }
4029 
4030 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4031 {
4032     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4033 }
4034 
4035 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4036 {
4037     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4038 }
4039 
4040 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4041 {
4042     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4043 }
4044 
4045 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4046 {
4047     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4048 }
4049 
4050 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4051 {
4052     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4053 }
4054 
4055 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4056 {
4057     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4058 }
4059 
4060 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4061 {
4062     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4063 }
4064 
4065 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4066 {
4067     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4068 }
4069 
4070 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4071 {
4072     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4073 }
4074 
4075 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4076 {
4077     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4078 }
4079 
4080 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4081 {
4082     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4083 }
4084 
4085 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4086 {
4087     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4088 }
4089 
4090 /*
4091  * Float class 2
4092  */
4093 
4094 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4095 {
4096     TCGv_i32 ta, tb, tc, ty;
4097 
4098     nullify_over(ctx);
4099 
4100     ta = load_frw0_i32(a->r1);
4101     tb = load_frw0_i32(a->r2);
4102     ty = tcg_constant_i32(a->y);
4103     tc = tcg_constant_i32(a->c);
4104 
4105     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4106 
4107     return nullify_end(ctx);
4108 }
4109 
4110 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4111 {
4112     TCGv_i64 ta, tb;
4113     TCGv_i32 tc, ty;
4114 
4115     nullify_over(ctx);
4116 
4117     ta = load_frd0(a->r1);
4118     tb = load_frd0(a->r2);
4119     ty = tcg_constant_i32(a->y);
4120     tc = tcg_constant_i32(a->c);
4121 
4122     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4123 
4124     return nullify_end(ctx);
4125 }
4126 
4127 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4128 {
4129     TCGv_i64 t;
4130 
4131     nullify_over(ctx);
4132 
4133     t = tcg_temp_new_i64();
4134     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4135 
4136     if (a->y == 1) {
4137         int mask;
4138         bool inv = false;
4139 
4140         switch (a->c) {
4141         case 0: /* simple */
4142             tcg_gen_andi_i64(t, t, 0x4000000);
4143             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4144             goto done;
4145         case 2: /* rej */
4146             inv = true;
4147             /* fallthru */
4148         case 1: /* acc */
4149             mask = 0x43ff800;
4150             break;
4151         case 6: /* rej8 */
4152             inv = true;
4153             /* fallthru */
4154         case 5: /* acc8 */
4155             mask = 0x43f8000;
4156             break;
4157         case 9: /* acc6 */
4158             mask = 0x43e0000;
4159             break;
4160         case 13: /* acc4 */
4161             mask = 0x4380000;
4162             break;
4163         case 17: /* acc2 */
4164             mask = 0x4200000;
4165             break;
4166         default:
4167             gen_illegal(ctx);
4168             return true;
4169         }
4170         if (inv) {
4171             TCGv_i64 c = tcg_constant_i64(mask);
4172             tcg_gen_or_i64(t, t, c);
4173             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4174         } else {
4175             tcg_gen_andi_i64(t, t, mask);
4176             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4177         }
4178     } else {
4179         unsigned cbit = (a->y ^ 1) - 1;
4180 
4181         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4182         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4183     }
4184 
4185  done:
4186     return nullify_end(ctx);
4187 }
4188 
4189 /*
4190  * Float class 2
4191  */
4192 
4193 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4194 {
4195     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4196 }
4197 
4198 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4199 {
4200     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4201 }
4202 
4203 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4204 {
4205     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4206 }
4207 
4208 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4209 {
4210     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4211 }
4212 
4213 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4214 {
4215     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4216 }
4217 
4218 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4219 {
4220     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4221 }
4222 
4223 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4224 {
4225     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4226 }
4227 
4228 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4229 {
4230     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4231 }
4232 
4233 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4234 {
4235     TCGv_i64 x, y;
4236 
4237     nullify_over(ctx);
4238 
4239     x = load_frw0_i64(a->r1);
4240     y = load_frw0_i64(a->r2);
4241     tcg_gen_mul_i64(x, x, y);
4242     save_frd(a->t, x);
4243 
4244     return nullify_end(ctx);
4245 }
4246 
4247 /* Convert the fmpyadd single-precision register encodings to standard.  */
4248 static inline int fmpyadd_s_reg(unsigned r)
4249 {
4250     return (r & 16) * 2 + 16 + (r & 15);
4251 }
4252 
4253 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4254 {
4255     int tm = fmpyadd_s_reg(a->tm);
4256     int ra = fmpyadd_s_reg(a->ra);
4257     int ta = fmpyadd_s_reg(a->ta);
4258     int rm2 = fmpyadd_s_reg(a->rm2);
4259     int rm1 = fmpyadd_s_reg(a->rm1);
4260 
4261     nullify_over(ctx);
4262 
4263     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4264     do_fop_weww(ctx, ta, ta, ra,
4265                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4266 
4267     return nullify_end(ctx);
4268 }
4269 
4270 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4271 {
4272     return do_fmpyadd_s(ctx, a, false);
4273 }
4274 
4275 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4276 {
4277     return do_fmpyadd_s(ctx, a, true);
4278 }
4279 
4280 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4281 {
4282     nullify_over(ctx);
4283 
4284     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4285     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4286                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4287 
4288     return nullify_end(ctx);
4289 }
4290 
4291 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4292 {
4293     return do_fmpyadd_d(ctx, a, false);
4294 }
4295 
4296 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4297 {
4298     return do_fmpyadd_d(ctx, a, true);
4299 }
4300 
4301 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4302 {
4303     TCGv_i32 x, y, z;
4304 
4305     nullify_over(ctx);
4306     x = load_frw0_i32(a->rm1);
4307     y = load_frw0_i32(a->rm2);
4308     z = load_frw0_i32(a->ra3);
4309 
4310     if (a->neg) {
4311         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4312     } else {
4313         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4314     }
4315 
4316     save_frw_i32(a->t, x);
4317     return nullify_end(ctx);
4318 }
4319 
4320 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4321 {
4322     TCGv_i64 x, y, z;
4323 
4324     nullify_over(ctx);
4325     x = load_frd0(a->rm1);
4326     y = load_frd0(a->rm2);
4327     z = load_frd0(a->ra3);
4328 
4329     if (a->neg) {
4330         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4331     } else {
4332         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4333     }
4334 
4335     save_frd(a->t, x);
4336     return nullify_end(ctx);
4337 }
4338 
4339 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4340 {
4341     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4342 #ifndef CONFIG_USER_ONLY
4343     if (a->i == 0x100) {
4344         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4345         nullify_over(ctx);
4346         gen_helper_diag_btlb(tcg_env);
4347         return nullify_end(ctx);
4348     }
4349 #endif
4350     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4351     return true;
4352 }
4353 
4354 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4355 {
4356     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4357     int bound;
4358 
4359     ctx->cs = cs;
4360     ctx->tb_flags = ctx->base.tb->flags;
4361     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4362 
4363 #ifdef CONFIG_USER_ONLY
4364     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4365     ctx->mmu_idx = MMU_USER_IDX;
4366     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4367     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4368     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4369 #else
4370     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4371     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4372                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4373                     : MMU_PHYS_IDX);
4374 
4375     /* Recover the IAOQ values from the GVA + PRIV.  */
4376     uint64_t cs_base = ctx->base.tb->cs_base;
4377     uint64_t iasq_f = cs_base & ~0xffffffffull;
4378     int32_t diff = cs_base;
4379 
4380     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4381     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4382 #endif
4383     ctx->iaoq_n = -1;
4384     ctx->iaoq_n_var = NULL;
4385 
4386     ctx->zero = tcg_constant_i64(0);
4387 
4388     /* Bound the number of instructions by those left on the page.  */
4389     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4390     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4391 }
4392 
4393 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4394 {
4395     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4396 
4397     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4398     ctx->null_cond = cond_make_f();
4399     ctx->psw_n_nonzero = false;
4400     if (ctx->tb_flags & PSW_N) {
4401         ctx->null_cond.c = TCG_COND_ALWAYS;
4402         ctx->psw_n_nonzero = true;
4403     }
4404     ctx->null_lab = NULL;
4405 }
4406 
4407 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4408 {
4409     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4410 
4411     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4412 }
4413 
4414 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4415 {
4416     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4417     CPUHPPAState *env = cpu_env(cs);
4418     DisasJumpType ret;
4419 
4420     /* Execute one insn.  */
4421 #ifdef CONFIG_USER_ONLY
4422     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4423         do_page_zero(ctx);
4424         ret = ctx->base.is_jmp;
4425         assert(ret != DISAS_NEXT);
4426     } else
4427 #endif
4428     {
4429         /* Always fetch the insn, even if nullified, so that we check
4430            the page permissions for execute.  */
4431         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4432 
4433         /* Set up the IA queue for the next insn.
4434            This will be overwritten by a branch.  */
4435         if (ctx->iaoq_b == -1) {
4436             ctx->iaoq_n = -1;
4437             ctx->iaoq_n_var = tcg_temp_new_i64();
4438             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4439         } else {
4440             ctx->iaoq_n = ctx->iaoq_b + 4;
4441             ctx->iaoq_n_var = NULL;
4442         }
4443 
4444         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4445             ctx->null_cond.c = TCG_COND_NEVER;
4446             ret = DISAS_NEXT;
4447         } else {
4448             ctx->insn = insn;
4449             if (!decode(ctx, insn)) {
4450                 gen_illegal(ctx);
4451             }
4452             ret = ctx->base.is_jmp;
4453             assert(ctx->null_lab == NULL);
4454         }
4455     }
4456 
4457     /* Advance the insn queue.  Note that this check also detects
4458        a priority change within the instruction queue.  */
4459     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4460         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4461             && use_goto_tb(ctx, ctx->iaoq_b)
4462             && (ctx->null_cond.c == TCG_COND_NEVER
4463                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4464             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4465             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4466             ctx->base.is_jmp = ret = DISAS_NORETURN;
4467         } else {
4468             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4469         }
4470     }
4471     ctx->iaoq_f = ctx->iaoq_b;
4472     ctx->iaoq_b = ctx->iaoq_n;
4473     ctx->base.pc_next += 4;
4474 
4475     switch (ret) {
4476     case DISAS_NORETURN:
4477     case DISAS_IAQ_N_UPDATED:
4478         break;
4479 
4480     case DISAS_NEXT:
4481     case DISAS_IAQ_N_STALE:
4482     case DISAS_IAQ_N_STALE_EXIT:
4483         if (ctx->iaoq_f == -1) {
4484             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4485             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4486 #ifndef CONFIG_USER_ONLY
4487             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4488 #endif
4489             nullify_save(ctx);
4490             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4491                                 ? DISAS_EXIT
4492                                 : DISAS_IAQ_N_UPDATED);
4493         } else if (ctx->iaoq_b == -1) {
4494             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4495         }
4496         break;
4497 
4498     default:
4499         g_assert_not_reached();
4500     }
4501 }
4502 
4503 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4504 {
4505     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4506     DisasJumpType is_jmp = ctx->base.is_jmp;
4507 
4508     switch (is_jmp) {
4509     case DISAS_NORETURN:
4510         break;
4511     case DISAS_TOO_MANY:
4512     case DISAS_IAQ_N_STALE:
4513     case DISAS_IAQ_N_STALE_EXIT:
4514         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4515         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4516         nullify_save(ctx);
4517         /* FALLTHRU */
4518     case DISAS_IAQ_N_UPDATED:
4519         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4520             tcg_gen_lookup_and_goto_ptr();
4521             break;
4522         }
4523         /* FALLTHRU */
4524     case DISAS_EXIT:
4525         tcg_gen_exit_tb(NULL, 0);
4526         break;
4527     default:
4528         g_assert_not_reached();
4529     }
4530 }
4531 
4532 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4533                               CPUState *cs, FILE *logfile)
4534 {
4535     target_ulong pc = dcbase->pc_first;
4536 
4537 #ifdef CONFIG_USER_ONLY
4538     switch (pc) {
4539     case 0x00:
4540         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4541         return;
4542     case 0xb0:
4543         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4544         return;
4545     case 0xe0:
4546         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4547         return;
4548     case 0x100:
4549         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4550         return;
4551     }
4552 #endif
4553 
4554     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4555     target_disas(logfile, cs, pc, dcbase->tb->size);
4556 }
4557 
4558 static const TranslatorOps hppa_tr_ops = {
4559     .init_disas_context = hppa_tr_init_disas_context,
4560     .tb_start           = hppa_tr_tb_start,
4561     .insn_start         = hppa_tr_insn_start,
4562     .translate_insn     = hppa_tr_translate_insn,
4563     .tb_stop            = hppa_tr_tb_stop,
4564     .disas_log          = hppa_tr_disas_log,
4565 };
4566 
4567 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4568                            target_ulong pc, void *host_pc)
4569 {
4570     DisasContext ctx;
4571     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4572 }
4573