xref: /openbmc/qemu/target/hppa/translate.c (revision 05caa062)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "qemu/host-utils.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasIAQE {
45     /* IASQ; may be null for no change from TB. */
46     TCGv_i64 space;
47     /* IAOQ base; may be null for relative address. */
48     TCGv_i64 base;
49     /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
50     int64_t disp;
51 } DisasIAQE;
52 
53 typedef struct DisasDelayException {
54     struct DisasDelayException *next;
55     TCGLabel *lab;
56     uint32_t insn;
57     bool set_iir;
58     int8_t set_n;
59     uint8_t excp;
60     /* Saved state at parent insn. */
61     DisasIAQE iaq_f, iaq_b;
62 } DisasDelayException;
63 
64 typedef struct DisasContext {
65     DisasContextBase base;
66     CPUState *cs;
67 
68     /* IAQ_Front, IAQ_Back. */
69     DisasIAQE iaq_f, iaq_b;
70     /* IAQ_Next, for jumps, otherwise null for simple advance. */
71     DisasIAQE iaq_j, *iaq_n;
72 
73     /* IAOQ_Front at entry to TB. */
74     uint64_t iaoq_first;
75 
76     DisasCond null_cond;
77     TCGLabel *null_lab;
78 
79     DisasDelayException *delay_excp_list;
80     TCGv_i64 zero;
81 
82     uint32_t insn;
83     uint32_t tb_flags;
84     int mmu_idx;
85     int privilege;
86     uint32_t psw_xb;
87     bool psw_n_nonzero;
88     bool psw_b_next;
89     bool is_pa20;
90     bool insn_start_updated;
91 
92 #ifdef CONFIG_USER_ONLY
93     MemOp unalign;
94 #endif
95 } DisasContext;
96 
97 #ifdef CONFIG_USER_ONLY
98 #define UNALIGN(C)       (C)->unalign
99 #define MMU_DISABLED(C)  false
100 #else
101 #define UNALIGN(C)       MO_ALIGN
102 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
103 #endif
104 
105 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
106 static int expand_sm_imm(DisasContext *ctx, int val)
107 {
108     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
109     if (ctx->is_pa20) {
110         if (val & PSW_SM_W) {
111             val |= PSW_W;
112         }
113         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
114     } else {
115         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
116     }
117     return val;
118 }
119 
120 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
121 static int expand_sr3x(DisasContext *ctx, int val)
122 {
123     return ~val;
124 }
125 
126 /* Convert the M:A bits within a memory insn to the tri-state value
127    we use for the final M.  */
128 static int ma_to_m(DisasContext *ctx, int val)
129 {
130     return val & 2 ? (val & 1 ? -1 : 1) : 0;
131 }
132 
133 /* Convert the sign of the displacement to a pre or post-modify.  */
134 static int pos_to_m(DisasContext *ctx, int val)
135 {
136     return val ? 1 : -1;
137 }
138 
139 static int neg_to_m(DisasContext *ctx, int val)
140 {
141     return val ? -1 : 1;
142 }
143 
144 /* Used for branch targets and fp memory ops.  */
145 static int expand_shl2(DisasContext *ctx, int val)
146 {
147     return val << 2;
148 }
149 
150 /* Used for assemble_21.  */
151 static int expand_shl11(DisasContext *ctx, int val)
152 {
153     return val << 11;
154 }
155 
156 static int assemble_6(DisasContext *ctx, int val)
157 {
158     /*
159      * Officially, 32 * x + 32 - y.
160      * Here, x is already in bit 5, and y is [4:0].
161      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
162      * with the overflow from bit 4 summing with x.
163      */
164     return (val ^ 31) + 1;
165 }
166 
167 /* Expander for assemble_16a(s,cat(im10a,0),i). */
168 static int expand_11a(DisasContext *ctx, int val)
169 {
170     /*
171      * @val is bit 0 and bits [4:15].
172      * Swizzle thing around depending on PSW.W.
173      */
174     int im10a = extract32(val, 1, 10);
175     int s = extract32(val, 11, 2);
176     int i = (-(val & 1) << 13) | (im10a << 3);
177 
178     if (ctx->tb_flags & PSW_W) {
179         i ^= s << 13;
180     }
181     return i;
182 }
183 
184 /* Expander for assemble_16a(s,im11a,i). */
185 static int expand_12a(DisasContext *ctx, int val)
186 {
187     /*
188      * @val is bit 0 and bits [3:15].
189      * Swizzle thing around depending on PSW.W.
190      */
191     int im11a = extract32(val, 1, 11);
192     int s = extract32(val, 12, 2);
193     int i = (-(val & 1) << 13) | (im11a << 2);
194 
195     if (ctx->tb_flags & PSW_W) {
196         i ^= s << 13;
197     }
198     return i;
199 }
200 
201 /* Expander for assemble_16(s,im14). */
202 static int expand_16(DisasContext *ctx, int val)
203 {
204     /*
205      * @val is bits [0:15], containing both im14 and s.
206      * Swizzle thing around depending on PSW.W.
207      */
208     int s = extract32(val, 14, 2);
209     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
210 
211     if (ctx->tb_flags & PSW_W) {
212         i ^= s << 13;
213     }
214     return i;
215 }
216 
217 /* The sp field is only present with !PSW_W. */
218 static int sp0_if_wide(DisasContext *ctx, int sp)
219 {
220     return ctx->tb_flags & PSW_W ? 0 : sp;
221 }
222 
223 /* Translate CMPI doubleword conditions to standard. */
224 static int cmpbid_c(DisasContext *ctx, int val)
225 {
226     return val ? val : 4; /* 0 == "*<<" */
227 }
228 
229 /*
230  * In many places pa1.x did not decode the bit that later became
231  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
232  */
233 static int pa20_d(DisasContext *ctx, int val)
234 {
235     return ctx->is_pa20 & val;
236 }
237 
238 /* Include the auto-generated decoder.  */
239 #include "decode-insns.c.inc"
240 
241 /* We are not using a goto_tb (for whatever reason), but have updated
242    the iaq (for whatever reason), so don't do it again on exit.  */
243 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
244 
245 /* We are exiting the TB, but have neither emitted a goto_tb, nor
246    updated the iaq for the next instruction to be executed.  */
247 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
248 
249 /* Similarly, but we want to return to the main loop immediately
250    to recognize unmasked interrupts.  */
251 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
252 #define DISAS_EXIT                  DISAS_TARGET_3
253 
254 /* global register indexes */
255 static TCGv_i64 cpu_gr[32];
256 static TCGv_i64 cpu_sr[4];
257 static TCGv_i64 cpu_srH;
258 static TCGv_i64 cpu_iaoq_f;
259 static TCGv_i64 cpu_iaoq_b;
260 static TCGv_i64 cpu_iasq_f;
261 static TCGv_i64 cpu_iasq_b;
262 static TCGv_i64 cpu_sar;
263 static TCGv_i64 cpu_psw_n;
264 static TCGv_i64 cpu_psw_v;
265 static TCGv_i64 cpu_psw_cb;
266 static TCGv_i64 cpu_psw_cb_msb;
267 static TCGv_i32 cpu_psw_xb;
268 
269 void hppa_translate_init(void)
270 {
271 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
272 
273     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
274     static const GlobalVar vars[] = {
275         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
276         DEF_VAR(psw_n),
277         DEF_VAR(psw_v),
278         DEF_VAR(psw_cb),
279         DEF_VAR(psw_cb_msb),
280         DEF_VAR(iaoq_f),
281         DEF_VAR(iaoq_b),
282     };
283 
284 #undef DEF_VAR
285 
286     /* Use the symbolic register names that match the disassembler.  */
287     static const char gr_names[32][4] = {
288         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
289         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
290         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
291         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
292     };
293     /* SR[4-7] are not global registers so that we can index them.  */
294     static const char sr_names[5][4] = {
295         "sr0", "sr1", "sr2", "sr3", "srH"
296     };
297 
298     int i;
299 
300     cpu_gr[0] = NULL;
301     for (i = 1; i < 32; i++) {
302         cpu_gr[i] = tcg_global_mem_new(tcg_env,
303                                        offsetof(CPUHPPAState, gr[i]),
304                                        gr_names[i]);
305     }
306     for (i = 0; i < 4; i++) {
307         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
308                                            offsetof(CPUHPPAState, sr[i]),
309                                            sr_names[i]);
310     }
311     cpu_srH = tcg_global_mem_new_i64(tcg_env,
312                                      offsetof(CPUHPPAState, sr[4]),
313                                      sr_names[4]);
314 
315     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
316         const GlobalVar *v = &vars[i];
317         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
318     }
319 
320     cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
321                                         offsetof(CPUHPPAState, psw_xb),
322                                         "psw_xb");
323     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
324                                         offsetof(CPUHPPAState, iasq_f),
325                                         "iasq_f");
326     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
327                                         offsetof(CPUHPPAState, iasq_b),
328                                         "iasq_b");
329 }
330 
331 static void set_insn_breg(DisasContext *ctx, int breg)
332 {
333     assert(!ctx->insn_start_updated);
334     ctx->insn_start_updated = true;
335     tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
336 }
337 
338 static DisasCond cond_make_f(void)
339 {
340     return (DisasCond){
341         .c = TCG_COND_NEVER,
342         .a0 = NULL,
343         .a1 = NULL,
344     };
345 }
346 
347 static DisasCond cond_make_t(void)
348 {
349     return (DisasCond){
350         .c = TCG_COND_ALWAYS,
351         .a0 = NULL,
352         .a1 = NULL,
353     };
354 }
355 
356 static DisasCond cond_make_n(void)
357 {
358     return (DisasCond){
359         .c = TCG_COND_NE,
360         .a0 = cpu_psw_n,
361         .a1 = tcg_constant_i64(0)
362     };
363 }
364 
365 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
366 {
367     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
368     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
369 }
370 
371 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
372 {
373     return cond_make_tt(c, a0, tcg_constant_i64(imm));
374 }
375 
376 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
377 {
378     TCGv_i64 tmp = tcg_temp_new_i64();
379     tcg_gen_mov_i64(tmp, a0);
380     return cond_make_ti(c, tmp, imm);
381 }
382 
383 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
384 {
385     TCGv_i64 t0 = tcg_temp_new_i64();
386     TCGv_i64 t1 = tcg_temp_new_i64();
387 
388     tcg_gen_mov_i64(t0, a0);
389     tcg_gen_mov_i64(t1, a1);
390     return cond_make_tt(c, t0, t1);
391 }
392 
393 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
394 {
395     if (reg == 0) {
396         return ctx->zero;
397     } else {
398         return cpu_gr[reg];
399     }
400 }
401 
402 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
403 {
404     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
405         return tcg_temp_new_i64();
406     } else {
407         return cpu_gr[reg];
408     }
409 }
410 
411 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
412 {
413     if (ctx->null_cond.c != TCG_COND_NEVER) {
414         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
415                             ctx->null_cond.a1, dest, t);
416     } else {
417         tcg_gen_mov_i64(dest, t);
418     }
419 }
420 
421 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
422 {
423     if (reg != 0) {
424         save_or_nullify(ctx, cpu_gr[reg], t);
425     }
426 }
427 
428 #if HOST_BIG_ENDIAN
429 # define HI_OFS  0
430 # define LO_OFS  4
431 #else
432 # define HI_OFS  4
433 # define LO_OFS  0
434 #endif
435 
436 static TCGv_i32 load_frw_i32(unsigned rt)
437 {
438     TCGv_i32 ret = tcg_temp_new_i32();
439     tcg_gen_ld_i32(ret, tcg_env,
440                    offsetof(CPUHPPAState, fr[rt & 31])
441                    + (rt & 32 ? LO_OFS : HI_OFS));
442     return ret;
443 }
444 
445 static TCGv_i32 load_frw0_i32(unsigned rt)
446 {
447     if (rt == 0) {
448         TCGv_i32 ret = tcg_temp_new_i32();
449         tcg_gen_movi_i32(ret, 0);
450         return ret;
451     } else {
452         return load_frw_i32(rt);
453     }
454 }
455 
456 static TCGv_i64 load_frw0_i64(unsigned rt)
457 {
458     TCGv_i64 ret = tcg_temp_new_i64();
459     if (rt == 0) {
460         tcg_gen_movi_i64(ret, 0);
461     } else {
462         tcg_gen_ld32u_i64(ret, tcg_env,
463                           offsetof(CPUHPPAState, fr[rt & 31])
464                           + (rt & 32 ? LO_OFS : HI_OFS));
465     }
466     return ret;
467 }
468 
469 static void save_frw_i32(unsigned rt, TCGv_i32 val)
470 {
471     tcg_gen_st_i32(val, tcg_env,
472                    offsetof(CPUHPPAState, fr[rt & 31])
473                    + (rt & 32 ? LO_OFS : HI_OFS));
474 }
475 
476 #undef HI_OFS
477 #undef LO_OFS
478 
479 static TCGv_i64 load_frd(unsigned rt)
480 {
481     TCGv_i64 ret = tcg_temp_new_i64();
482     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
483     return ret;
484 }
485 
486 static TCGv_i64 load_frd0(unsigned rt)
487 {
488     if (rt == 0) {
489         TCGv_i64 ret = tcg_temp_new_i64();
490         tcg_gen_movi_i64(ret, 0);
491         return ret;
492     } else {
493         return load_frd(rt);
494     }
495 }
496 
497 static void save_frd(unsigned rt, TCGv_i64 val)
498 {
499     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
500 }
501 
502 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
503 {
504 #ifdef CONFIG_USER_ONLY
505     tcg_gen_movi_i64(dest, 0);
506 #else
507     if (reg < 4) {
508         tcg_gen_mov_i64(dest, cpu_sr[reg]);
509     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
510         tcg_gen_mov_i64(dest, cpu_srH);
511     } else {
512         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
513     }
514 #endif
515 }
516 
517 /*
518  * Write a value to psw_xb, bearing in mind the known value.
519  * To be used just before exiting the TB, so do not update the known value.
520  */
521 static void store_psw_xb(DisasContext *ctx, uint32_t xb)
522 {
523     tcg_debug_assert(xb == 0 || xb == PSW_B);
524     if (ctx->psw_xb != xb) {
525         tcg_gen_movi_i32(cpu_psw_xb, xb);
526     }
527 }
528 
529 /* Write a value to psw_xb, and update the known value. */
530 static void set_psw_xb(DisasContext *ctx, uint32_t xb)
531 {
532     store_psw_xb(ctx, xb);
533     ctx->psw_xb = xb;
534 }
535 
536 /* Skip over the implementation of an insn that has been nullified.
537    Use this when the insn is too complex for a conditional move.  */
538 static void nullify_over(DisasContext *ctx)
539 {
540     if (ctx->null_cond.c != TCG_COND_NEVER) {
541         /* The always condition should have been handled in the main loop.  */
542         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
543 
544         ctx->null_lab = gen_new_label();
545 
546         /* If we're using PSW[N], copy it to a temp because... */
547         if (ctx->null_cond.a0 == cpu_psw_n) {
548             ctx->null_cond.a0 = tcg_temp_new_i64();
549             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
550         }
551         /* ... we clear it before branching over the implementation,
552            so that (1) it's clear after nullifying this insn and
553            (2) if this insn nullifies the next, PSW[N] is valid.  */
554         if (ctx->psw_n_nonzero) {
555             ctx->psw_n_nonzero = false;
556             tcg_gen_movi_i64(cpu_psw_n, 0);
557         }
558 
559         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
560                            ctx->null_cond.a1, ctx->null_lab);
561         ctx->null_cond = cond_make_f();
562     }
563 }
564 
565 /* Save the current nullification state to PSW[N].  */
566 static void nullify_save(DisasContext *ctx)
567 {
568     if (ctx->null_cond.c == TCG_COND_NEVER) {
569         if (ctx->psw_n_nonzero) {
570             tcg_gen_movi_i64(cpu_psw_n, 0);
571         }
572         return;
573     }
574     if (ctx->null_cond.a0 != cpu_psw_n) {
575         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
576                             ctx->null_cond.a0, ctx->null_cond.a1);
577         ctx->psw_n_nonzero = true;
578     }
579     ctx->null_cond = cond_make_f();
580 }
581 
582 /* Set a PSW[N] to X.  The intention is that this is used immediately
583    before a goto_tb/exit_tb, so that there is no fallthru path to other
584    code within the TB.  Therefore we do not update psw_n_nonzero.  */
585 static void nullify_set(DisasContext *ctx, bool x)
586 {
587     if (ctx->psw_n_nonzero || x) {
588         tcg_gen_movi_i64(cpu_psw_n, x);
589     }
590 }
591 
592 /* Mark the end of an instruction that may have been nullified.
593    This is the pair to nullify_over.  Always returns true so that
594    it may be tail-called from a translate function.  */
595 static bool nullify_end(DisasContext *ctx)
596 {
597     TCGLabel *null_lab = ctx->null_lab;
598     DisasJumpType status = ctx->base.is_jmp;
599 
600     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
601        For UPDATED, we cannot update on the nullified path.  */
602     assert(status != DISAS_IAQ_N_UPDATED);
603     /* Taken branches are handled manually. */
604     assert(!ctx->psw_b_next);
605 
606     if (likely(null_lab == NULL)) {
607         /* The current insn wasn't conditional or handled the condition
608            applied to it without a branch, so the (new) setting of
609            NULL_COND can be applied directly to the next insn.  */
610         return true;
611     }
612     ctx->null_lab = NULL;
613 
614     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
615         /* The next instruction will be unconditional,
616            and NULL_COND already reflects that.  */
617         gen_set_label(null_lab);
618     } else {
619         /* The insn that we just executed is itself nullifying the next
620            instruction.  Store the condition in the PSW[N] global.
621            We asserted PSW[N] = 0 in nullify_over, so that after the
622            label we have the proper value in place.  */
623         nullify_save(ctx);
624         gen_set_label(null_lab);
625         ctx->null_cond = cond_make_n();
626     }
627     if (status == DISAS_NORETURN) {
628         ctx->base.is_jmp = DISAS_NEXT;
629     }
630     return true;
631 }
632 
633 static bool iaqe_variable(const DisasIAQE *e)
634 {
635     return e->base || e->space;
636 }
637 
638 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
639 {
640     return (DisasIAQE){
641         .space = e->space,
642         .base = e->base,
643         .disp = e->disp + disp,
644     };
645 }
646 
647 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
648 {
649     return (DisasIAQE){
650         .space = ctx->iaq_b.space,
651         .disp = ctx->iaq_f.disp + 8 + disp,
652     };
653 }
654 
655 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
656 {
657     return (DisasIAQE){
658         .space = ctx->iaq_b.space,
659         .base = var,
660     };
661 }
662 
663 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
664                             const DisasIAQE *src)
665 {
666     tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
667 }
668 
669 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
670                                 const DisasIAQE *b)
671 {
672     DisasIAQE b_next;
673 
674     if (b == NULL) {
675         b_next = iaqe_incr(f, 4);
676         b = &b_next;
677     }
678 
679     /*
680      * There is an edge case
681      *    bv   r0(rN)
682      *    b,l  disp,r0
683      * for which F will use cpu_iaoq_b (from the indirect branch),
684      * and B will use cpu_iaoq_f (from the direct branch).
685      * In this case we need an extra temporary.
686      */
687     if (f->base != cpu_iaoq_b) {
688         copy_iaoq_entry(ctx, cpu_iaoq_b, b);
689         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
690     } else if (f->base == b->base) {
691         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
692         tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
693     } else {
694         TCGv_i64 tmp = tcg_temp_new_i64();
695         copy_iaoq_entry(ctx, tmp, b);
696         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
697         tcg_gen_mov_i64(cpu_iaoq_b, tmp);
698     }
699 
700     if (f->space) {
701         tcg_gen_mov_i64(cpu_iasq_f, f->space);
702     }
703     if (b->space || f->space) {
704         tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
705     }
706 }
707 
708 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
709 {
710     tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
711     if (!link) {
712         return;
713     }
714     DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
715     copy_iaoq_entry(ctx, cpu_gr[link], &next);
716 #ifndef CONFIG_USER_ONLY
717     if (with_sr0) {
718         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
719     }
720 #endif
721 }
722 
723 static void gen_excp_1(int exception)
724 {
725     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
726 }
727 
728 static void gen_excp(DisasContext *ctx, int exception)
729 {
730     install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
731     nullify_save(ctx);
732     gen_excp_1(exception);
733     ctx->base.is_jmp = DISAS_NORETURN;
734 }
735 
736 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
737 {
738     DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
739 
740     memset(e, 0, sizeof(*e));
741     e->next = ctx->delay_excp_list;
742     ctx->delay_excp_list = e;
743 
744     e->lab = gen_new_label();
745     e->insn = ctx->insn;
746     e->set_iir = true;
747     e->set_n = ctx->psw_n_nonzero ? 0 : -1;
748     e->excp = excp;
749     e->iaq_f = ctx->iaq_f;
750     e->iaq_b = ctx->iaq_b;
751 
752     return e;
753 }
754 
755 static bool gen_excp_iir(DisasContext *ctx, int exc)
756 {
757     if (ctx->null_cond.c == TCG_COND_NEVER) {
758         tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
759                        tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
760         gen_excp(ctx, exc);
761     } else {
762         DisasDelayException *e = delay_excp(ctx, exc);
763         tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
764                            ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
765         ctx->null_cond = cond_make_f();
766     }
767     return true;
768 }
769 
770 static bool gen_illegal(DisasContext *ctx)
771 {
772     return gen_excp_iir(ctx, EXCP_ILL);
773 }
774 
775 #ifdef CONFIG_USER_ONLY
776 #define CHECK_MOST_PRIVILEGED(EXCP) \
777     return gen_excp_iir(ctx, EXCP)
778 #else
779 #define CHECK_MOST_PRIVILEGED(EXCP) \
780     do {                                     \
781         if (ctx->privilege != 0) {           \
782             return gen_excp_iir(ctx, EXCP);  \
783         }                                    \
784     } while (0)
785 #endif
786 
787 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
788                         const DisasIAQE *b)
789 {
790     return (!iaqe_variable(f) &&
791             (b == NULL || !iaqe_variable(b)) &&
792             translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
793 }
794 
795 /* If the next insn is to be nullified, and it's on the same page,
796    and we're not attempting to set a breakpoint on it, then we can
797    totally skip the nullified insn.  This avoids creating and
798    executing a TB that merely branches to the next TB.  */
799 static bool use_nullify_skip(DisasContext *ctx)
800 {
801     return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
802             && !iaqe_variable(&ctx->iaq_b)
803             && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
804                 & TARGET_PAGE_MASK) == 0);
805 }
806 
807 static void gen_goto_tb(DisasContext *ctx, int which,
808                         const DisasIAQE *f, const DisasIAQE *b)
809 {
810     install_iaq_entries(ctx, f, b);
811     if (use_goto_tb(ctx, f, b)) {
812         tcg_gen_goto_tb(which);
813         tcg_gen_exit_tb(ctx->base.tb, which);
814     } else {
815         tcg_gen_lookup_and_goto_ptr();
816     }
817 }
818 
819 static bool cond_need_sv(int c)
820 {
821     return c == 2 || c == 3 || c == 6;
822 }
823 
824 static bool cond_need_cb(int c)
825 {
826     return c == 4 || c == 5;
827 }
828 
829 /*
830  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
831  * the Parisc 1.1 Architecture Reference Manual for details.
832  */
833 
834 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
835                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
836 {
837     TCGCond sign_cond, zero_cond;
838     uint64_t sign_imm, zero_imm;
839     DisasCond cond;
840     TCGv_i64 tmp;
841 
842     if (d) {
843         /* 64-bit condition. */
844         sign_imm = 0;
845         sign_cond = TCG_COND_LT;
846         zero_imm = 0;
847         zero_cond = TCG_COND_EQ;
848     } else {
849         /* 32-bit condition. */
850         sign_imm = 1ull << 31;
851         sign_cond = TCG_COND_TSTNE;
852         zero_imm = UINT32_MAX;
853         zero_cond = TCG_COND_TSTEQ;
854     }
855 
856     switch (cf >> 1) {
857     case 0: /* Never / TR    (0 / 1) */
858         cond = cond_make_f();
859         break;
860     case 1: /* = / <>        (Z / !Z) */
861         cond = cond_make_vi(zero_cond, res, zero_imm);
862         break;
863     case 2: /* < / >=        (N ^ V / !(N ^ V) */
864         tmp = tcg_temp_new_i64();
865         tcg_gen_xor_i64(tmp, res, sv);
866         cond = cond_make_ti(sign_cond, tmp, sign_imm);
867         break;
868     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
869         /*
870          * Simplify:
871          *   (N ^ V) | Z
872          *   ((res < 0) ^ (sv < 0)) | !res
873          *   ((res ^ sv) < 0) | !res
874          *   ((res ^ sv) < 0 ? 1 : !res)
875          *   !((res ^ sv) < 0 ? 0 : res)
876          */
877         tmp = tcg_temp_new_i64();
878         tcg_gen_xor_i64(tmp, res, sv);
879         tcg_gen_movcond_i64(sign_cond, tmp,
880                             tmp, tcg_constant_i64(sign_imm),
881                             ctx->zero, res);
882         cond = cond_make_ti(zero_cond, tmp, zero_imm);
883         break;
884     case 4: /* NUV / UV      (!UV / UV) */
885         cond = cond_make_vi(TCG_COND_EQ, uv, 0);
886         break;
887     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
888         tmp = tcg_temp_new_i64();
889         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
890         cond = cond_make_ti(zero_cond, tmp, zero_imm);
891         break;
892     case 6: /* SV / NSV      (V / !V) */
893         cond = cond_make_vi(sign_cond, sv, sign_imm);
894         break;
895     case 7: /* OD / EV */
896         cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
897         break;
898     default:
899         g_assert_not_reached();
900     }
901     if (cf & 1) {
902         cond.c = tcg_invert_cond(cond.c);
903     }
904 
905     return cond;
906 }
907 
908 /* Similar, but for the special case of subtraction without borrow, we
909    can use the inputs directly.  This can allow other computation to be
910    deleted as unused.  */
911 
912 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
913                              TCGv_i64 res, TCGv_i64 in1,
914                              TCGv_i64 in2, TCGv_i64 sv)
915 {
916     TCGCond tc;
917     bool ext_uns;
918 
919     switch (cf >> 1) {
920     case 1: /* = / <> */
921         tc = TCG_COND_EQ;
922         ext_uns = true;
923         break;
924     case 2: /* < / >= */
925         tc = TCG_COND_LT;
926         ext_uns = false;
927         break;
928     case 3: /* <= / > */
929         tc = TCG_COND_LE;
930         ext_uns = false;
931         break;
932     case 4: /* << / >>= */
933         tc = TCG_COND_LTU;
934         ext_uns = true;
935         break;
936     case 5: /* <<= / >> */
937         tc = TCG_COND_LEU;
938         ext_uns = true;
939         break;
940     default:
941         return do_cond(ctx, cf, d, res, NULL, sv);
942     }
943 
944     if (cf & 1) {
945         tc = tcg_invert_cond(tc);
946     }
947     if (!d) {
948         TCGv_i64 t1 = tcg_temp_new_i64();
949         TCGv_i64 t2 = tcg_temp_new_i64();
950 
951         if (ext_uns) {
952             tcg_gen_ext32u_i64(t1, in1);
953             tcg_gen_ext32u_i64(t2, in2);
954         } else {
955             tcg_gen_ext32s_i64(t1, in1);
956             tcg_gen_ext32s_i64(t2, in2);
957         }
958         return cond_make_tt(tc, t1, t2);
959     }
960     return cond_make_vv(tc, in1, in2);
961 }
962 
963 /*
964  * Similar, but for logicals, where the carry and overflow bits are not
965  * computed, and use of them is undefined.
966  *
967  * Undefined or not, hardware does not trap.  It seems reasonable to
968  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
969  * how cases c={2,3} are treated.
970  */
971 
972 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
973                              TCGv_i64 res)
974 {
975     TCGCond tc;
976     uint64_t imm;
977 
978     switch (cf >> 1) {
979     case 0:  /* never / always */
980     case 4:  /* undef, C */
981     case 5:  /* undef, C & !Z */
982     case 6:  /* undef, V */
983         return cf & 1 ? cond_make_t() : cond_make_f();
984     case 1:  /* == / <> */
985         tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
986         imm = d ? 0 : UINT32_MAX;
987         break;
988     case 2:  /* < / >= */
989         tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
990         imm = d ? 0 : 1ull << 31;
991         break;
992     case 3:  /* <= / > */
993         tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
994         if (!d) {
995             TCGv_i64 tmp = tcg_temp_new_i64();
996             tcg_gen_ext32s_i64(tmp, res);
997             return cond_make_ti(tc, tmp, 0);
998         }
999         return cond_make_vi(tc, res, 0);
1000     case 7: /* OD / EV */
1001         tc = TCG_COND_TSTNE;
1002         imm = 1;
1003         break;
1004     default:
1005         g_assert_not_reached();
1006     }
1007     if (cf & 1) {
1008         tc = tcg_invert_cond(tc);
1009     }
1010     return cond_make_vi(tc, res, imm);
1011 }
1012 
1013 /* Similar, but for shift/extract/deposit conditions.  */
1014 
1015 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1016                              TCGv_i64 res)
1017 {
1018     unsigned c, f;
1019 
1020     /* Convert the compressed condition codes to standard.
1021        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1022        4-7 are the reverse of 0-3.  */
1023     c = orig & 3;
1024     if (c == 3) {
1025         c = 7;
1026     }
1027     f = (orig & 4) / 4;
1028 
1029     return do_log_cond(ctx, c * 2 + f, d, res);
1030 }
1031 
1032 /* Similar, but for unit zero conditions.  */
1033 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
1034 {
1035     TCGv_i64 tmp;
1036     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
1037     uint64_t ones = 0, sgns = 0;
1038 
1039     switch (cf >> 1) {
1040     case 1: /* SBW / NBW */
1041         if (d) {
1042             ones = d_repl;
1043             sgns = d_repl << 31;
1044         }
1045         break;
1046     case 2: /* SBZ / NBZ */
1047         ones = d_repl * 0x01010101u;
1048         sgns = ones << 7;
1049         break;
1050     case 3: /* SHZ / NHZ */
1051         ones = d_repl * 0x00010001u;
1052         sgns = ones << 15;
1053         break;
1054     }
1055     if (ones == 0) {
1056         /* Undefined, or 0/1 (never/always). */
1057         return cf & 1 ? cond_make_t() : cond_make_f();
1058     }
1059 
1060     /*
1061      * See hasless(v,1) from
1062      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1063      */
1064     tmp = tcg_temp_new_i64();
1065     tcg_gen_subi_i64(tmp, res, ones);
1066     tcg_gen_andc_i64(tmp, tmp, res);
1067 
1068     return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
1069 }
1070 
1071 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1072                           TCGv_i64 cb, TCGv_i64 cb_msb)
1073 {
1074     if (!d) {
1075         TCGv_i64 t = tcg_temp_new_i64();
1076         tcg_gen_extract_i64(t, cb, 32, 1);
1077         return t;
1078     }
1079     return cb_msb;
1080 }
1081 
1082 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1083 {
1084     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1085 }
1086 
1087 /* Compute signed overflow for addition.  */
1088 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1089                           TCGv_i64 in1, TCGv_i64 in2,
1090                           TCGv_i64 orig_in1, int shift, bool d)
1091 {
1092     TCGv_i64 sv = tcg_temp_new_i64();
1093     TCGv_i64 tmp = tcg_temp_new_i64();
1094 
1095     tcg_gen_xor_i64(sv, res, in1);
1096     tcg_gen_xor_i64(tmp, in1, in2);
1097     tcg_gen_andc_i64(sv, sv, tmp);
1098 
1099     switch (shift) {
1100     case 0:
1101         break;
1102     case 1:
1103         /* Shift left by one and compare the sign. */
1104         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1105         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1106         /* Incorporate into the overflow. */
1107         tcg_gen_or_i64(sv, sv, tmp);
1108         break;
1109     default:
1110         {
1111             int sign_bit = d ? 63 : 31;
1112 
1113             /* Compare the sign against all lower bits. */
1114             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1115             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1116             /*
1117              * If one of the bits shifting into or through the sign
1118              * differs, then we have overflow.
1119              */
1120             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1121             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1122                                 tcg_constant_i64(-1), sv);
1123         }
1124     }
1125     return sv;
1126 }
1127 
1128 /* Compute unsigned overflow for addition.  */
1129 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1130                           TCGv_i64 in1, int shift, bool d)
1131 {
1132     if (shift == 0) {
1133         return get_carry(ctx, d, cb, cb_msb);
1134     } else {
1135         TCGv_i64 tmp = tcg_temp_new_i64();
1136         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1137         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1138         return tmp;
1139     }
1140 }
1141 
1142 /* Compute signed overflow for subtraction.  */
1143 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1144                           TCGv_i64 in1, TCGv_i64 in2)
1145 {
1146     TCGv_i64 sv = tcg_temp_new_i64();
1147     TCGv_i64 tmp = tcg_temp_new_i64();
1148 
1149     tcg_gen_xor_i64(sv, res, in1);
1150     tcg_gen_xor_i64(tmp, in1, in2);
1151     tcg_gen_and_i64(sv, sv, tmp);
1152 
1153     return sv;
1154 }
1155 
1156 static void gen_tc(DisasContext *ctx, DisasCond *cond)
1157 {
1158     DisasDelayException *e;
1159 
1160     switch (cond->c) {
1161     case TCG_COND_NEVER:
1162         break;
1163     case TCG_COND_ALWAYS:
1164         gen_excp_iir(ctx, EXCP_COND);
1165         break;
1166     default:
1167         e = delay_excp(ctx, EXCP_COND);
1168         tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
1169         /* In the non-trap path, the condition is known false. */
1170         *cond = cond_make_f();
1171         break;
1172     }
1173 }
1174 
1175 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
1176 {
1177     DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
1178     DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
1179 
1180     tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
1181 
1182     /* In the non-trap path, V is known zero. */
1183     *sv = tcg_constant_i64(0);
1184 }
1185 
1186 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1187                    TCGv_i64 in2, unsigned shift, bool is_l,
1188                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1189 {
1190     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1191     unsigned c = cf >> 1;
1192     DisasCond cond;
1193 
1194     dest = tcg_temp_new_i64();
1195     cb = NULL;
1196     cb_msb = NULL;
1197 
1198     in1 = orig_in1;
1199     if (shift) {
1200         tmp = tcg_temp_new_i64();
1201         tcg_gen_shli_i64(tmp, in1, shift);
1202         in1 = tmp;
1203     }
1204 
1205     if (!is_l || cond_need_cb(c)) {
1206         cb_msb = tcg_temp_new_i64();
1207         cb = tcg_temp_new_i64();
1208 
1209         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1210         if (is_c) {
1211             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1212                              get_psw_carry(ctx, d), ctx->zero);
1213         }
1214         tcg_gen_xor_i64(cb, in1, in2);
1215         tcg_gen_xor_i64(cb, cb, dest);
1216     } else {
1217         tcg_gen_add_i64(dest, in1, in2);
1218         if (is_c) {
1219             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1220         }
1221     }
1222 
1223     /* Compute signed overflow if required.  */
1224     sv = NULL;
1225     if (is_tsv || cond_need_sv(c)) {
1226         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1227         if (is_tsv) {
1228             gen_tsv(ctx, &sv, d);
1229         }
1230     }
1231 
1232     /* Compute unsigned overflow if required.  */
1233     uv = NULL;
1234     if (cond_need_cb(c)) {
1235         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1236     }
1237 
1238     /* Emit any conditional trap before any writeback.  */
1239     cond = do_cond(ctx, cf, d, dest, uv, sv);
1240     if (is_tc) {
1241         gen_tc(ctx, &cond);
1242     }
1243 
1244     /* Write back the result.  */
1245     if (!is_l) {
1246         save_or_nullify(ctx, cpu_psw_cb, cb);
1247         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1248     }
1249     save_gpr(ctx, rt, dest);
1250 
1251     /* Install the new nullification.  */
1252     ctx->null_cond = cond;
1253 }
1254 
1255 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1256                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1257 {
1258     TCGv_i64 tcg_r1, tcg_r2;
1259 
1260     if (unlikely(is_tc && a->cf == 1)) {
1261         /* Unconditional trap on condition. */
1262         return gen_excp_iir(ctx, EXCP_COND);
1263     }
1264     if (a->cf) {
1265         nullify_over(ctx);
1266     }
1267     tcg_r1 = load_gpr(ctx, a->r1);
1268     tcg_r2 = load_gpr(ctx, a->r2);
1269     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1270            is_tsv, is_tc, is_c, a->cf, a->d);
1271     return nullify_end(ctx);
1272 }
1273 
1274 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1275                        bool is_tsv, bool is_tc)
1276 {
1277     TCGv_i64 tcg_im, tcg_r2;
1278 
1279     if (unlikely(is_tc && a->cf == 1)) {
1280         /* Unconditional trap on condition. */
1281         return gen_excp_iir(ctx, EXCP_COND);
1282     }
1283     if (a->cf) {
1284         nullify_over(ctx);
1285     }
1286     tcg_im = tcg_constant_i64(a->i);
1287     tcg_r2 = load_gpr(ctx, a->r);
1288     /* All ADDI conditions are 32-bit. */
1289     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1290     return nullify_end(ctx);
1291 }
1292 
1293 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1294                    TCGv_i64 in2, bool is_tsv, bool is_b,
1295                    bool is_tc, unsigned cf, bool d)
1296 {
1297     TCGv_i64 dest, sv, cb, cb_msb;
1298     unsigned c = cf >> 1;
1299     DisasCond cond;
1300 
1301     dest = tcg_temp_new_i64();
1302     cb = tcg_temp_new_i64();
1303     cb_msb = tcg_temp_new_i64();
1304 
1305     if (is_b) {
1306         /* DEST,C = IN1 + ~IN2 + C.  */
1307         tcg_gen_not_i64(cb, in2);
1308         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1309                          get_psw_carry(ctx, d), ctx->zero);
1310         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1311         tcg_gen_xor_i64(cb, cb, in1);
1312         tcg_gen_xor_i64(cb, cb, dest);
1313     } else {
1314         /*
1315          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1316          * operations by seeding the high word with 1 and subtracting.
1317          */
1318         TCGv_i64 one = tcg_constant_i64(1);
1319         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1320         tcg_gen_eqv_i64(cb, in1, in2);
1321         tcg_gen_xor_i64(cb, cb, dest);
1322     }
1323 
1324     /* Compute signed overflow if required.  */
1325     sv = NULL;
1326     if (is_tsv || cond_need_sv(c)) {
1327         sv = do_sub_sv(ctx, dest, in1, in2);
1328         if (is_tsv) {
1329             gen_tsv(ctx, &sv, d);
1330         }
1331     }
1332 
1333     /* Compute the condition.  We cannot use the special case for borrow.  */
1334     if (!is_b) {
1335         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1336     } else {
1337         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1338     }
1339 
1340     /* Emit any conditional trap before any writeback.  */
1341     if (is_tc) {
1342         gen_tc(ctx, &cond);
1343     }
1344 
1345     /* Write back the result.  */
1346     save_or_nullify(ctx, cpu_psw_cb, cb);
1347     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1348     save_gpr(ctx, rt, dest);
1349 
1350     /* Install the new nullification.  */
1351     ctx->null_cond = cond;
1352 }
1353 
1354 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1355                        bool is_tsv, bool is_b, bool is_tc)
1356 {
1357     TCGv_i64 tcg_r1, tcg_r2;
1358 
1359     if (a->cf) {
1360         nullify_over(ctx);
1361     }
1362     tcg_r1 = load_gpr(ctx, a->r1);
1363     tcg_r2 = load_gpr(ctx, a->r2);
1364     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1365     return nullify_end(ctx);
1366 }
1367 
1368 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1369 {
1370     TCGv_i64 tcg_im, tcg_r2;
1371 
1372     if (a->cf) {
1373         nullify_over(ctx);
1374     }
1375     tcg_im = tcg_constant_i64(a->i);
1376     tcg_r2 = load_gpr(ctx, a->r);
1377     /* All SUBI conditions are 32-bit. */
1378     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1379     return nullify_end(ctx);
1380 }
1381 
1382 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1383                       TCGv_i64 in2, unsigned cf, bool d)
1384 {
1385     TCGv_i64 dest, sv;
1386     DisasCond cond;
1387 
1388     dest = tcg_temp_new_i64();
1389     tcg_gen_sub_i64(dest, in1, in2);
1390 
1391     /* Compute signed overflow if required.  */
1392     sv = NULL;
1393     if (cond_need_sv(cf >> 1)) {
1394         sv = do_sub_sv(ctx, dest, in1, in2);
1395     }
1396 
1397     /* Form the condition for the compare.  */
1398     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1399 
1400     /* Clear.  */
1401     tcg_gen_movi_i64(dest, 0);
1402     save_gpr(ctx, rt, dest);
1403 
1404     /* Install the new nullification.  */
1405     ctx->null_cond = cond;
1406 }
1407 
1408 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1409                    TCGv_i64 in2, unsigned cf, bool d,
1410                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1411 {
1412     TCGv_i64 dest = dest_gpr(ctx, rt);
1413 
1414     /* Perform the operation, and writeback.  */
1415     fn(dest, in1, in2);
1416     save_gpr(ctx, rt, dest);
1417 
1418     /* Install the new nullification.  */
1419     ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1420 }
1421 
1422 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1423                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1424 {
1425     TCGv_i64 tcg_r1, tcg_r2;
1426 
1427     if (a->cf) {
1428         nullify_over(ctx);
1429     }
1430     tcg_r1 = load_gpr(ctx, a->r1);
1431     tcg_r2 = load_gpr(ctx, a->r2);
1432     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1433     return nullify_end(ctx);
1434 }
1435 
1436 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1437                            TCGv_i64 in2, unsigned cf, bool d,
1438                            bool is_tc, bool is_add)
1439 {
1440     TCGv_i64 dest = tcg_temp_new_i64();
1441     uint64_t test_cb = 0;
1442     DisasCond cond;
1443 
1444     /* Select which carry-out bits to test. */
1445     switch (cf >> 1) {
1446     case 4: /* NDC / SDC -- 4-bit carries */
1447         test_cb = dup_const(MO_8, 0x88);
1448         break;
1449     case 5: /* NWC / SWC -- 32-bit carries */
1450         if (d) {
1451             test_cb = dup_const(MO_32, INT32_MIN);
1452         } else {
1453             cf &= 1; /* undefined -- map to never/always */
1454         }
1455         break;
1456     case 6: /* NBC / SBC -- 8-bit carries */
1457         test_cb = dup_const(MO_8, INT8_MIN);
1458         break;
1459     case 7: /* NHC / SHC -- 16-bit carries */
1460         test_cb = dup_const(MO_16, INT16_MIN);
1461         break;
1462     }
1463     if (!d) {
1464         test_cb = (uint32_t)test_cb;
1465     }
1466 
1467     if (!test_cb) {
1468         /* No need to compute carries if we don't need to test them. */
1469         if (is_add) {
1470             tcg_gen_add_i64(dest, in1, in2);
1471         } else {
1472             tcg_gen_sub_i64(dest, in1, in2);
1473         }
1474         cond = do_unit_zero_cond(cf, d, dest);
1475     } else {
1476         TCGv_i64 cb = tcg_temp_new_i64();
1477 
1478         if (d) {
1479             TCGv_i64 cb_msb = tcg_temp_new_i64();
1480             if (is_add) {
1481                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1482                 tcg_gen_xor_i64(cb, in1, in2);
1483             } else {
1484                 /* See do_sub, !is_b. */
1485                 TCGv_i64 one = tcg_constant_i64(1);
1486                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1487                 tcg_gen_eqv_i64(cb, in1, in2);
1488             }
1489             tcg_gen_xor_i64(cb, cb, dest);
1490             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1491         } else {
1492             if (is_add) {
1493                 tcg_gen_add_i64(dest, in1, in2);
1494                 tcg_gen_xor_i64(cb, in1, in2);
1495             } else {
1496                 tcg_gen_sub_i64(dest, in1, in2);
1497                 tcg_gen_eqv_i64(cb, in1, in2);
1498             }
1499             tcg_gen_xor_i64(cb, cb, dest);
1500             tcg_gen_shri_i64(cb, cb, 1);
1501         }
1502 
1503         cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
1504                             cb, test_cb);
1505     }
1506 
1507     if (is_tc) {
1508         gen_tc(ctx, &cond);
1509     }
1510     save_gpr(ctx, rt, dest);
1511 
1512     ctx->null_cond = cond;
1513 }
1514 
1515 #ifndef CONFIG_USER_ONLY
1516 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1517    from the top 2 bits of the base register.  There are a few system
1518    instructions that have a 3-bit space specifier, for which SR0 is
1519    not special.  To handle this, pass ~SP.  */
1520 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1521 {
1522     TCGv_ptr ptr;
1523     TCGv_i64 tmp;
1524     TCGv_i64 spc;
1525 
1526     if (sp != 0) {
1527         if (sp < 0) {
1528             sp = ~sp;
1529         }
1530         spc = tcg_temp_new_i64();
1531         load_spr(ctx, spc, sp);
1532         return spc;
1533     }
1534     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1535         return cpu_srH;
1536     }
1537 
1538     ptr = tcg_temp_new_ptr();
1539     tmp = tcg_temp_new_i64();
1540     spc = tcg_temp_new_i64();
1541 
1542     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1543     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1544     tcg_gen_andi_i64(tmp, tmp, 030);
1545     tcg_gen_trunc_i64_ptr(ptr, tmp);
1546 
1547     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1548     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1549 
1550     return spc;
1551 }
1552 #endif
1553 
1554 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1555                      unsigned rb, unsigned rx, int scale, int64_t disp,
1556                      unsigned sp, int modify, bool is_phys)
1557 {
1558     TCGv_i64 base = load_gpr(ctx, rb);
1559     TCGv_i64 ofs;
1560     TCGv_i64 addr;
1561 
1562     set_insn_breg(ctx, rb);
1563 
1564     /* Note that RX is mutually exclusive with DISP.  */
1565     if (rx) {
1566         ofs = tcg_temp_new_i64();
1567         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1568         tcg_gen_add_i64(ofs, ofs, base);
1569     } else if (disp || modify) {
1570         ofs = tcg_temp_new_i64();
1571         tcg_gen_addi_i64(ofs, base, disp);
1572     } else {
1573         ofs = base;
1574     }
1575 
1576     *pofs = ofs;
1577     *pgva = addr = tcg_temp_new_i64();
1578     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1579                      gva_offset_mask(ctx->tb_flags));
1580 #ifndef CONFIG_USER_ONLY
1581     if (!is_phys) {
1582         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1583     }
1584 #endif
1585 }
1586 
1587 /* Emit a memory load.  The modify parameter should be
1588  * < 0 for pre-modify,
1589  * > 0 for post-modify,
1590  * = 0 for no base register update.
1591  */
1592 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1593                        unsigned rx, int scale, int64_t disp,
1594                        unsigned sp, int modify, MemOp mop)
1595 {
1596     TCGv_i64 ofs;
1597     TCGv_i64 addr;
1598 
1599     /* Caller uses nullify_over/nullify_end.  */
1600     assert(ctx->null_cond.c == TCG_COND_NEVER);
1601 
1602     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1603              MMU_DISABLED(ctx));
1604     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1605     if (modify) {
1606         save_gpr(ctx, rb, ofs);
1607     }
1608 }
1609 
1610 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1611                        unsigned rx, int scale, int64_t disp,
1612                        unsigned sp, int modify, MemOp mop)
1613 {
1614     TCGv_i64 ofs;
1615     TCGv_i64 addr;
1616 
1617     /* Caller uses nullify_over/nullify_end.  */
1618     assert(ctx->null_cond.c == TCG_COND_NEVER);
1619 
1620     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1621              MMU_DISABLED(ctx));
1622     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1623     if (modify) {
1624         save_gpr(ctx, rb, ofs);
1625     }
1626 }
1627 
1628 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1629                         unsigned rx, int scale, int64_t disp,
1630                         unsigned sp, int modify, MemOp mop)
1631 {
1632     TCGv_i64 ofs;
1633     TCGv_i64 addr;
1634 
1635     /* Caller uses nullify_over/nullify_end.  */
1636     assert(ctx->null_cond.c == TCG_COND_NEVER);
1637 
1638     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1639              MMU_DISABLED(ctx));
1640     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1641     if (modify) {
1642         save_gpr(ctx, rb, ofs);
1643     }
1644 }
1645 
1646 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1647                         unsigned rx, int scale, int64_t disp,
1648                         unsigned sp, int modify, MemOp mop)
1649 {
1650     TCGv_i64 ofs;
1651     TCGv_i64 addr;
1652 
1653     /* Caller uses nullify_over/nullify_end.  */
1654     assert(ctx->null_cond.c == TCG_COND_NEVER);
1655 
1656     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1657              MMU_DISABLED(ctx));
1658     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1659     if (modify) {
1660         save_gpr(ctx, rb, ofs);
1661     }
1662 }
1663 
1664 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1665                     unsigned rx, int scale, int64_t disp,
1666                     unsigned sp, int modify, MemOp mop)
1667 {
1668     TCGv_i64 dest;
1669 
1670     nullify_over(ctx);
1671 
1672     if (modify == 0) {
1673         /* No base register update.  */
1674         dest = dest_gpr(ctx, rt);
1675     } else {
1676         /* Make sure if RT == RB, we see the result of the load.  */
1677         dest = tcg_temp_new_i64();
1678     }
1679     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1680     save_gpr(ctx, rt, dest);
1681 
1682     return nullify_end(ctx);
1683 }
1684 
1685 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1686                       unsigned rx, int scale, int64_t disp,
1687                       unsigned sp, int modify)
1688 {
1689     TCGv_i32 tmp;
1690 
1691     nullify_over(ctx);
1692 
1693     tmp = tcg_temp_new_i32();
1694     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1695     save_frw_i32(rt, tmp);
1696 
1697     if (rt == 0) {
1698         gen_helper_loaded_fr0(tcg_env);
1699     }
1700 
1701     return nullify_end(ctx);
1702 }
1703 
1704 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1705 {
1706     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1707                      a->disp, a->sp, a->m);
1708 }
1709 
1710 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1711                       unsigned rx, int scale, int64_t disp,
1712                       unsigned sp, int modify)
1713 {
1714     TCGv_i64 tmp;
1715 
1716     nullify_over(ctx);
1717 
1718     tmp = tcg_temp_new_i64();
1719     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1720     save_frd(rt, tmp);
1721 
1722     if (rt == 0) {
1723         gen_helper_loaded_fr0(tcg_env);
1724     }
1725 
1726     return nullify_end(ctx);
1727 }
1728 
1729 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1730 {
1731     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1732                      a->disp, a->sp, a->m);
1733 }
1734 
1735 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1736                      int64_t disp, unsigned sp,
1737                      int modify, MemOp mop)
1738 {
1739     nullify_over(ctx);
1740     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1741     return nullify_end(ctx);
1742 }
1743 
1744 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1745                        unsigned rx, int scale, int64_t disp,
1746                        unsigned sp, int modify)
1747 {
1748     TCGv_i32 tmp;
1749 
1750     nullify_over(ctx);
1751 
1752     tmp = load_frw_i32(rt);
1753     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1754 
1755     return nullify_end(ctx);
1756 }
1757 
1758 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1759 {
1760     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1761                       a->disp, a->sp, a->m);
1762 }
1763 
1764 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1765                        unsigned rx, int scale, int64_t disp,
1766                        unsigned sp, int modify)
1767 {
1768     TCGv_i64 tmp;
1769 
1770     nullify_over(ctx);
1771 
1772     tmp = load_frd(rt);
1773     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1774 
1775     return nullify_end(ctx);
1776 }
1777 
1778 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1779 {
1780     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1781                       a->disp, a->sp, a->m);
1782 }
1783 
1784 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1785                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1786 {
1787     TCGv_i32 tmp;
1788 
1789     nullify_over(ctx);
1790     tmp = load_frw0_i32(ra);
1791 
1792     func(tmp, tcg_env, tmp);
1793 
1794     save_frw_i32(rt, tmp);
1795     return nullify_end(ctx);
1796 }
1797 
1798 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1799                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1800 {
1801     TCGv_i32 dst;
1802     TCGv_i64 src;
1803 
1804     nullify_over(ctx);
1805     src = load_frd(ra);
1806     dst = tcg_temp_new_i32();
1807 
1808     func(dst, tcg_env, src);
1809 
1810     save_frw_i32(rt, dst);
1811     return nullify_end(ctx);
1812 }
1813 
1814 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1815                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1816 {
1817     TCGv_i64 tmp;
1818 
1819     nullify_over(ctx);
1820     tmp = load_frd0(ra);
1821 
1822     func(tmp, tcg_env, tmp);
1823 
1824     save_frd(rt, tmp);
1825     return nullify_end(ctx);
1826 }
1827 
1828 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1829                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1830 {
1831     TCGv_i32 src;
1832     TCGv_i64 dst;
1833 
1834     nullify_over(ctx);
1835     src = load_frw0_i32(ra);
1836     dst = tcg_temp_new_i64();
1837 
1838     func(dst, tcg_env, src);
1839 
1840     save_frd(rt, dst);
1841     return nullify_end(ctx);
1842 }
1843 
1844 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1845                         unsigned ra, unsigned rb,
1846                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1847 {
1848     TCGv_i32 a, b;
1849 
1850     nullify_over(ctx);
1851     a = load_frw0_i32(ra);
1852     b = load_frw0_i32(rb);
1853 
1854     func(a, tcg_env, a, b);
1855 
1856     save_frw_i32(rt, a);
1857     return nullify_end(ctx);
1858 }
1859 
1860 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1861                         unsigned ra, unsigned rb,
1862                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1863 {
1864     TCGv_i64 a, b;
1865 
1866     nullify_over(ctx);
1867     a = load_frd0(ra);
1868     b = load_frd0(rb);
1869 
1870     func(a, tcg_env, a, b);
1871 
1872     save_frd(rt, a);
1873     return nullify_end(ctx);
1874 }
1875 
1876 /* Emit an unconditional branch to a direct target, which may or may not
1877    have already had nullification handled.  */
1878 static bool do_dbranch(DisasContext *ctx, int64_t disp,
1879                        unsigned link, bool is_n)
1880 {
1881     ctx->iaq_j = iaqe_branchi(ctx, disp);
1882 
1883     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1884         install_link(ctx, link, false);
1885         if (is_n) {
1886             if (use_nullify_skip(ctx)) {
1887                 nullify_set(ctx, 0);
1888                 store_psw_xb(ctx, 0);
1889                 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1890                 ctx->base.is_jmp = DISAS_NORETURN;
1891                 return true;
1892             }
1893             ctx->null_cond.c = TCG_COND_ALWAYS;
1894         }
1895         ctx->iaq_n = &ctx->iaq_j;
1896         ctx->psw_b_next = true;
1897     } else {
1898         nullify_over(ctx);
1899 
1900         install_link(ctx, link, false);
1901         if (is_n && use_nullify_skip(ctx)) {
1902             nullify_set(ctx, 0);
1903             store_psw_xb(ctx, 0);
1904             gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1905         } else {
1906             nullify_set(ctx, is_n);
1907             store_psw_xb(ctx, PSW_B);
1908             gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
1909         }
1910         nullify_end(ctx);
1911 
1912         nullify_set(ctx, 0);
1913         store_psw_xb(ctx, 0);
1914         gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
1915         ctx->base.is_jmp = DISAS_NORETURN;
1916     }
1917     return true;
1918 }
1919 
1920 /* Emit a conditional branch to a direct target.  If the branch itself
1921    is nullified, we should have already used nullify_over.  */
1922 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1923                        DisasCond *cond)
1924 {
1925     DisasIAQE next;
1926     TCGLabel *taken = NULL;
1927     TCGCond c = cond->c;
1928     bool n;
1929 
1930     assert(ctx->null_cond.c == TCG_COND_NEVER);
1931 
1932     /* Handle TRUE and NEVER as direct branches.  */
1933     if (c == TCG_COND_ALWAYS) {
1934         return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
1935     }
1936 
1937     taken = gen_new_label();
1938     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1939 
1940     /* Not taken: Condition not satisfied; nullify on backward branches. */
1941     n = is_n && disp < 0;
1942     if (n && use_nullify_skip(ctx)) {
1943         nullify_set(ctx, 0);
1944         store_psw_xb(ctx, 0);
1945         next = iaqe_incr(&ctx->iaq_b, 4);
1946         gen_goto_tb(ctx, 0, &next, NULL);
1947     } else {
1948         if (!n && ctx->null_lab) {
1949             gen_set_label(ctx->null_lab);
1950             ctx->null_lab = NULL;
1951         }
1952         nullify_set(ctx, n);
1953         store_psw_xb(ctx, 0);
1954         gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
1955     }
1956 
1957     gen_set_label(taken);
1958 
1959     /* Taken: Condition satisfied; nullify on forward branches.  */
1960     n = is_n && disp >= 0;
1961 
1962     next = iaqe_branchi(ctx, disp);
1963     if (n && use_nullify_skip(ctx)) {
1964         nullify_set(ctx, 0);
1965         store_psw_xb(ctx, 0);
1966         gen_goto_tb(ctx, 1, &next, NULL);
1967     } else {
1968         nullify_set(ctx, n);
1969         store_psw_xb(ctx, PSW_B);
1970         gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
1971     }
1972 
1973     /* Not taken: the branch itself was nullified.  */
1974     if (ctx->null_lab) {
1975         gen_set_label(ctx->null_lab);
1976         ctx->null_lab = NULL;
1977         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1978     } else {
1979         ctx->base.is_jmp = DISAS_NORETURN;
1980     }
1981     return true;
1982 }
1983 
1984 /*
1985  * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
1986  * This handles nullification of the branch itself.
1987  */
1988 static bool do_ibranch(DisasContext *ctx, unsigned link,
1989                        bool with_sr0, bool is_n)
1990 {
1991     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1992         install_link(ctx, link, with_sr0);
1993         if (is_n) {
1994             if (use_nullify_skip(ctx)) {
1995                 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
1996                 nullify_set(ctx, 0);
1997                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1998                 return true;
1999             }
2000             ctx->null_cond.c = TCG_COND_ALWAYS;
2001         }
2002         ctx->iaq_n = &ctx->iaq_j;
2003         ctx->psw_b_next = true;
2004         return true;
2005     }
2006 
2007     nullify_over(ctx);
2008 
2009     install_link(ctx, link, with_sr0);
2010     if (is_n && use_nullify_skip(ctx)) {
2011         install_iaq_entries(ctx, &ctx->iaq_j, NULL);
2012         nullify_set(ctx, 0);
2013         store_psw_xb(ctx, 0);
2014     } else {
2015         install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
2016         nullify_set(ctx, is_n);
2017         store_psw_xb(ctx, PSW_B);
2018     }
2019 
2020     tcg_gen_lookup_and_goto_ptr();
2021     ctx->base.is_jmp = DISAS_NORETURN;
2022     return nullify_end(ctx);
2023 }
2024 
2025 /* Implement
2026  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
2027  *      IAOQ_Next{30..31} ← GR[b]{30..31};
2028  *    else
2029  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2030  * which keeps the privilege level from being increased.
2031  */
2032 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
2033 {
2034     TCGv_i64 dest = tcg_temp_new_i64();
2035     switch (ctx->privilege) {
2036     case 0:
2037         /* Privilege 0 is maximum and is allowed to decrease.  */
2038         tcg_gen_mov_i64(dest, offset);
2039         break;
2040     case 3:
2041         /* Privilege 3 is minimum and is never allowed to increase.  */
2042         tcg_gen_ori_i64(dest, offset, 3);
2043         break;
2044     default:
2045         tcg_gen_andi_i64(dest, offset, -4);
2046         tcg_gen_ori_i64(dest, dest, ctx->privilege);
2047         tcg_gen_umax_i64(dest, dest, offset);
2048         break;
2049     }
2050     return dest;
2051 }
2052 
2053 #ifdef CONFIG_USER_ONLY
2054 /* On Linux, page zero is normally marked execute only + gateway.
2055    Therefore normal read or write is supposed to fail, but specific
2056    offsets have kernel code mapped to raise permissions to implement
2057    system calls.  Handling this via an explicit check here, rather
2058    in than the "be disp(sr2,r0)" instruction that probably sent us
2059    here, is the easiest way to handle the branch delay slot on the
2060    aforementioned BE.  */
2061 static void do_page_zero(DisasContext *ctx)
2062 {
2063     assert(ctx->iaq_f.disp == 0);
2064 
2065     /* If by some means we get here with PSW[N]=1, that implies that
2066        the B,GATE instruction would be skipped, and we'd fault on the
2067        next insn within the privileged page.  */
2068     switch (ctx->null_cond.c) {
2069     case TCG_COND_NEVER:
2070         break;
2071     case TCG_COND_ALWAYS:
2072         tcg_gen_movi_i64(cpu_psw_n, 0);
2073         goto do_sigill;
2074     default:
2075         /* Since this is always the first (and only) insn within the
2076            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2077         g_assert_not_reached();
2078     }
2079 
2080     /* If PSW[B] is set, the B,GATE insn would trap. */
2081     if (ctx->psw_xb & PSW_B) {
2082         goto do_sigill;
2083     }
2084 
2085     switch (ctx->base.pc_first) {
2086     case 0x00: /* Null pointer call */
2087         gen_excp_1(EXCP_IMP);
2088         ctx->base.is_jmp = DISAS_NORETURN;
2089         break;
2090 
2091     case 0xb0: /* LWS */
2092         gen_excp_1(EXCP_SYSCALL_LWS);
2093         ctx->base.is_jmp = DISAS_NORETURN;
2094         break;
2095 
2096     case 0xe0: /* SET_THREAD_POINTER */
2097         {
2098             DisasIAQE next = { .base = tcg_temp_new_i64() };
2099 
2100             tcg_gen_st_i64(cpu_gr[26], tcg_env,
2101                            offsetof(CPUHPPAState, cr[27]));
2102             tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
2103             install_iaq_entries(ctx, &next, NULL);
2104             ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2105         }
2106         break;
2107 
2108     case 0x100: /* SYSCALL */
2109         gen_excp_1(EXCP_SYSCALL);
2110         ctx->base.is_jmp = DISAS_NORETURN;
2111         break;
2112 
2113     default:
2114     do_sigill:
2115         gen_excp_1(EXCP_ILL);
2116         ctx->base.is_jmp = DISAS_NORETURN;
2117         break;
2118     }
2119 }
2120 #endif
2121 
2122 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2123 {
2124     ctx->null_cond = cond_make_f();
2125     return true;
2126 }
2127 
2128 static bool trans_break(DisasContext *ctx, arg_break *a)
2129 {
2130     return gen_excp_iir(ctx, EXCP_BREAK);
2131 }
2132 
2133 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2134 {
2135     /* No point in nullifying the memory barrier.  */
2136     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2137 
2138     ctx->null_cond = cond_make_f();
2139     return true;
2140 }
2141 
2142 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2143 {
2144     TCGv_i64 dest = dest_gpr(ctx, a->t);
2145 
2146     copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
2147     tcg_gen_andi_i64(dest, dest, -4);
2148 
2149     save_gpr(ctx, a->t, dest);
2150     ctx->null_cond = cond_make_f();
2151     return true;
2152 }
2153 
2154 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2155 {
2156     unsigned rt = a->t;
2157     unsigned rs = a->sp;
2158     TCGv_i64 t0 = tcg_temp_new_i64();
2159 
2160     load_spr(ctx, t0, rs);
2161     tcg_gen_shri_i64(t0, t0, 32);
2162 
2163     save_gpr(ctx, rt, t0);
2164 
2165     ctx->null_cond = cond_make_f();
2166     return true;
2167 }
2168 
2169 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2170 {
2171     unsigned rt = a->t;
2172     unsigned ctl = a->r;
2173     TCGv_i64 tmp;
2174 
2175     switch (ctl) {
2176     case CR_SAR:
2177         if (a->e == 0) {
2178             /* MFSAR without ,W masks low 5 bits.  */
2179             tmp = dest_gpr(ctx, rt);
2180             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2181             save_gpr(ctx, rt, tmp);
2182             goto done;
2183         }
2184         save_gpr(ctx, rt, cpu_sar);
2185         goto done;
2186     case CR_IT: /* Interval Timer */
2187         /* FIXME: Respect PSW_S bit.  */
2188         nullify_over(ctx);
2189         tmp = dest_gpr(ctx, rt);
2190         if (translator_io_start(&ctx->base)) {
2191             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2192         }
2193         gen_helper_read_interval_timer(tmp);
2194         save_gpr(ctx, rt, tmp);
2195         return nullify_end(ctx);
2196     case 26:
2197     case 27:
2198         break;
2199     default:
2200         /* All other control registers are privileged.  */
2201         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2202         break;
2203     }
2204 
2205     tmp = tcg_temp_new_i64();
2206     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2207     save_gpr(ctx, rt, tmp);
2208 
2209  done:
2210     ctx->null_cond = cond_make_f();
2211     return true;
2212 }
2213 
2214 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2215 {
2216     unsigned rr = a->r;
2217     unsigned rs = a->sp;
2218     TCGv_i64 tmp;
2219 
2220     if (rs >= 5) {
2221         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2222     }
2223     nullify_over(ctx);
2224 
2225     tmp = tcg_temp_new_i64();
2226     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2227 
2228     if (rs >= 4) {
2229         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2230         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2231     } else {
2232         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2233     }
2234 
2235     return nullify_end(ctx);
2236 }
2237 
2238 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2239 {
2240     unsigned ctl = a->t;
2241     TCGv_i64 reg;
2242     TCGv_i64 tmp;
2243 
2244     if (ctl == CR_SAR) {
2245         reg = load_gpr(ctx, a->r);
2246         tmp = tcg_temp_new_i64();
2247         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2248         save_or_nullify(ctx, cpu_sar, tmp);
2249 
2250         ctx->null_cond = cond_make_f();
2251         return true;
2252     }
2253 
2254     /* All other control registers are privileged or read-only.  */
2255     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2256 
2257 #ifndef CONFIG_USER_ONLY
2258     nullify_over(ctx);
2259 
2260     if (ctx->is_pa20) {
2261         reg = load_gpr(ctx, a->r);
2262     } else {
2263         reg = tcg_temp_new_i64();
2264         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2265     }
2266 
2267     switch (ctl) {
2268     case CR_IT:
2269         if (translator_io_start(&ctx->base)) {
2270             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2271         }
2272         gen_helper_write_interval_timer(tcg_env, reg);
2273         break;
2274     case CR_EIRR:
2275         /* Helper modifies interrupt lines and is therefore IO. */
2276         translator_io_start(&ctx->base);
2277         gen_helper_write_eirr(tcg_env, reg);
2278         /* Exit to re-evaluate interrupts in the main loop. */
2279         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2280         break;
2281 
2282     case CR_IIASQ:
2283     case CR_IIAOQ:
2284         /* FIXME: Respect PSW_Q bit */
2285         /* The write advances the queue and stores to the back element.  */
2286         tmp = tcg_temp_new_i64();
2287         tcg_gen_ld_i64(tmp, tcg_env,
2288                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2289         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2290         tcg_gen_st_i64(reg, tcg_env,
2291                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2292         break;
2293 
2294     case CR_PID1:
2295     case CR_PID2:
2296     case CR_PID3:
2297     case CR_PID4:
2298         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2299 #ifndef CONFIG_USER_ONLY
2300         gen_helper_change_prot_id(tcg_env);
2301 #endif
2302         break;
2303 
2304     case CR_EIEM:
2305         /* Exit to re-evaluate interrupts in the main loop. */
2306         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2307         /* FALLTHRU */
2308     default:
2309         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2310         break;
2311     }
2312     return nullify_end(ctx);
2313 #endif
2314 }
2315 
2316 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2317 {
2318     TCGv_i64 tmp = tcg_temp_new_i64();
2319 
2320     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2321     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2322     save_or_nullify(ctx, cpu_sar, tmp);
2323 
2324     ctx->null_cond = cond_make_f();
2325     return true;
2326 }
2327 
2328 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2329 {
2330     TCGv_i64 dest = dest_gpr(ctx, a->t);
2331 
2332 #ifdef CONFIG_USER_ONLY
2333     /* We don't implement space registers in user mode. */
2334     tcg_gen_movi_i64(dest, 0);
2335 #else
2336     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2337     tcg_gen_shri_i64(dest, dest, 32);
2338 #endif
2339     save_gpr(ctx, a->t, dest);
2340 
2341     ctx->null_cond = cond_make_f();
2342     return true;
2343 }
2344 
2345 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2346 {
2347 #ifdef CONFIG_USER_ONLY
2348     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2349 #else
2350     TCGv_i64 tmp;
2351 
2352     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2353     if (a->i) {
2354         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2355     }
2356 
2357     nullify_over(ctx);
2358 
2359     tmp = tcg_temp_new_i64();
2360     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2361     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2362     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2363     save_gpr(ctx, a->t, tmp);
2364 
2365     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2366     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2367     return nullify_end(ctx);
2368 #endif
2369 }
2370 
2371 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2372 {
2373     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2374 #ifndef CONFIG_USER_ONLY
2375     TCGv_i64 tmp;
2376 
2377     nullify_over(ctx);
2378 
2379     tmp = tcg_temp_new_i64();
2380     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2381     tcg_gen_ori_i64(tmp, tmp, a->i);
2382     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2383     save_gpr(ctx, a->t, tmp);
2384 
2385     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2386     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2387     return nullify_end(ctx);
2388 #endif
2389 }
2390 
2391 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2392 {
2393     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2394 #ifndef CONFIG_USER_ONLY
2395     TCGv_i64 tmp, reg;
2396     nullify_over(ctx);
2397 
2398     reg = load_gpr(ctx, a->r);
2399     tmp = tcg_temp_new_i64();
2400     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2401 
2402     /* Exit the TB to recognize new interrupts.  */
2403     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2404     return nullify_end(ctx);
2405 #endif
2406 }
2407 
2408 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2409 {
2410     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2411 #ifndef CONFIG_USER_ONLY
2412     nullify_over(ctx);
2413 
2414     if (rfi_r) {
2415         gen_helper_rfi_r(tcg_env);
2416     } else {
2417         gen_helper_rfi(tcg_env);
2418     }
2419     /* Exit the TB to recognize new interrupts.  */
2420     tcg_gen_exit_tb(NULL, 0);
2421     ctx->base.is_jmp = DISAS_NORETURN;
2422 
2423     return nullify_end(ctx);
2424 #endif
2425 }
2426 
2427 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2428 {
2429     return do_rfi(ctx, false);
2430 }
2431 
2432 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2433 {
2434     return do_rfi(ctx, true);
2435 }
2436 
2437 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2438 {
2439     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2440 #ifndef CONFIG_USER_ONLY
2441     set_psw_xb(ctx, 0);
2442     nullify_over(ctx);
2443     gen_helper_halt(tcg_env);
2444     ctx->base.is_jmp = DISAS_NORETURN;
2445     return nullify_end(ctx);
2446 #endif
2447 }
2448 
2449 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2450 {
2451     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2452 #ifndef CONFIG_USER_ONLY
2453     set_psw_xb(ctx, 0);
2454     nullify_over(ctx);
2455     gen_helper_reset(tcg_env);
2456     ctx->base.is_jmp = DISAS_NORETURN;
2457     return nullify_end(ctx);
2458 #endif
2459 }
2460 
2461 static bool do_getshadowregs(DisasContext *ctx)
2462 {
2463     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2464     nullify_over(ctx);
2465     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2466     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2467     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2468     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2469     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2470     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2471     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2472     return nullify_end(ctx);
2473 }
2474 
2475 static bool do_putshadowregs(DisasContext *ctx)
2476 {
2477     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2478     nullify_over(ctx);
2479     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2480     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2481     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2482     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2483     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2484     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2485     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2486     return nullify_end(ctx);
2487 }
2488 
2489 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2490 {
2491     return do_getshadowregs(ctx);
2492 }
2493 
2494 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2495 {
2496     if (a->m) {
2497         TCGv_i64 dest = dest_gpr(ctx, a->b);
2498         TCGv_i64 src1 = load_gpr(ctx, a->b);
2499         TCGv_i64 src2 = load_gpr(ctx, a->x);
2500 
2501         /* The only thing we need to do is the base register modification.  */
2502         tcg_gen_add_i64(dest, src1, src2);
2503         save_gpr(ctx, a->b, dest);
2504     }
2505     ctx->null_cond = cond_make_f();
2506     return true;
2507 }
2508 
2509 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2510 {
2511     /* End TB for flush instruction cache, so we pick up new insns. */
2512     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2513     return trans_nop_addrx(ctx, a);
2514 }
2515 
2516 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2517 {
2518     TCGv_i64 dest, ofs;
2519     TCGv_i32 level, want;
2520     TCGv_i64 addr;
2521 
2522     nullify_over(ctx);
2523 
2524     dest = dest_gpr(ctx, a->t);
2525     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2526 
2527     if (a->imm) {
2528         level = tcg_constant_i32(a->ri & 3);
2529     } else {
2530         level = tcg_temp_new_i32();
2531         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2532         tcg_gen_andi_i32(level, level, 3);
2533     }
2534     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2535 
2536     gen_helper_probe(dest, tcg_env, addr, level, want);
2537 
2538     save_gpr(ctx, a->t, dest);
2539     return nullify_end(ctx);
2540 }
2541 
2542 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2543 {
2544     if (ctx->is_pa20) {
2545         return false;
2546     }
2547     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548 #ifndef CONFIG_USER_ONLY
2549     TCGv_i64 addr;
2550     TCGv_i64 ofs, reg;
2551 
2552     nullify_over(ctx);
2553 
2554     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2555     reg = load_gpr(ctx, a->r);
2556     if (a->addr) {
2557         gen_helper_itlba_pa11(tcg_env, addr, reg);
2558     } else {
2559         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2560     }
2561 
2562     /* Exit TB for TLB change if mmu is enabled.  */
2563     if (ctx->tb_flags & PSW_C) {
2564         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2565     }
2566     return nullify_end(ctx);
2567 #endif
2568 }
2569 
2570 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2571 {
2572     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2573 #ifndef CONFIG_USER_ONLY
2574     TCGv_i64 addr;
2575     TCGv_i64 ofs;
2576 
2577     nullify_over(ctx);
2578 
2579     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2580 
2581     /*
2582      * Page align now, rather than later, so that we can add in the
2583      * page_size field from pa2.0 from the low 4 bits of GR[b].
2584      */
2585     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2586     if (ctx->is_pa20) {
2587         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2588     }
2589 
2590     if (local) {
2591         gen_helper_ptlb_l(tcg_env, addr);
2592     } else {
2593         gen_helper_ptlb(tcg_env, addr);
2594     }
2595 
2596     if (a->m) {
2597         save_gpr(ctx, a->b, ofs);
2598     }
2599 
2600     /* Exit TB for TLB change if mmu is enabled.  */
2601     if (ctx->tb_flags & PSW_C) {
2602         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2603     }
2604     return nullify_end(ctx);
2605 #endif
2606 }
2607 
2608 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2609 {
2610     return do_pxtlb(ctx, a, false);
2611 }
2612 
2613 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2614 {
2615     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2616 }
2617 
2618 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2619 {
2620     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2621 #ifndef CONFIG_USER_ONLY
2622     nullify_over(ctx);
2623 
2624     trans_nop_addrx(ctx, a);
2625     gen_helper_ptlbe(tcg_env);
2626 
2627     /* Exit TB for TLB change if mmu is enabled.  */
2628     if (ctx->tb_flags & PSW_C) {
2629         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2630     }
2631     return nullify_end(ctx);
2632 #endif
2633 }
2634 
2635 /*
2636  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2637  * See
2638  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2639  *     page 13-9 (195/206)
2640  */
2641 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2642 {
2643     if (ctx->is_pa20) {
2644         return false;
2645     }
2646     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2647 #ifndef CONFIG_USER_ONLY
2648     TCGv_i64 addr, atl, stl;
2649     TCGv_i64 reg;
2650 
2651     nullify_over(ctx);
2652 
2653     /*
2654      * FIXME:
2655      *  if (not (pcxl or pcxl2))
2656      *    return gen_illegal(ctx);
2657      */
2658 
2659     atl = tcg_temp_new_i64();
2660     stl = tcg_temp_new_i64();
2661     addr = tcg_temp_new_i64();
2662 
2663     tcg_gen_ld32u_i64(stl, tcg_env,
2664                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2665                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2666     tcg_gen_ld32u_i64(atl, tcg_env,
2667                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2668                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2669     tcg_gen_shli_i64(stl, stl, 32);
2670     tcg_gen_or_i64(addr, atl, stl);
2671 
2672     reg = load_gpr(ctx, a->r);
2673     if (a->addr) {
2674         gen_helper_itlba_pa11(tcg_env, addr, reg);
2675     } else {
2676         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2677     }
2678 
2679     /* Exit TB for TLB change if mmu is enabled.  */
2680     if (ctx->tb_flags & PSW_C) {
2681         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2682     }
2683     return nullify_end(ctx);
2684 #endif
2685 }
2686 
2687 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2688 {
2689     if (!ctx->is_pa20) {
2690         return false;
2691     }
2692     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2693 #ifndef CONFIG_USER_ONLY
2694     nullify_over(ctx);
2695     {
2696         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2697         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2698 
2699         if (a->data) {
2700             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2701         } else {
2702             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2703         }
2704     }
2705     /* Exit TB for TLB change if mmu is enabled.  */
2706     if (ctx->tb_flags & PSW_C) {
2707         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2708     }
2709     return nullify_end(ctx);
2710 #endif
2711 }
2712 
2713 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2714 {
2715     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2716 #ifndef CONFIG_USER_ONLY
2717     TCGv_i64 vaddr;
2718     TCGv_i64 ofs, paddr;
2719 
2720     nullify_over(ctx);
2721 
2722     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2723 
2724     paddr = tcg_temp_new_i64();
2725     gen_helper_lpa(paddr, tcg_env, vaddr);
2726 
2727     /* Note that physical address result overrides base modification.  */
2728     if (a->m) {
2729         save_gpr(ctx, a->b, ofs);
2730     }
2731     save_gpr(ctx, a->t, paddr);
2732 
2733     return nullify_end(ctx);
2734 #endif
2735 }
2736 
2737 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2738 {
2739     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2740 
2741     /* The Coherence Index is an implementation-defined function of the
2742        physical address.  Two addresses with the same CI have a coherent
2743        view of the cache.  Our implementation is to return 0 for all,
2744        since the entire address space is coherent.  */
2745     save_gpr(ctx, a->t, ctx->zero);
2746 
2747     ctx->null_cond = cond_make_f();
2748     return true;
2749 }
2750 
2751 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2752 {
2753     return do_add_reg(ctx, a, false, false, false, false);
2754 }
2755 
2756 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2757 {
2758     return do_add_reg(ctx, a, true, false, false, false);
2759 }
2760 
2761 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2762 {
2763     return do_add_reg(ctx, a, false, true, false, false);
2764 }
2765 
2766 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2767 {
2768     return do_add_reg(ctx, a, false, false, false, true);
2769 }
2770 
2771 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2772 {
2773     return do_add_reg(ctx, a, false, true, false, true);
2774 }
2775 
2776 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2777 {
2778     return do_sub_reg(ctx, a, false, false, false);
2779 }
2780 
2781 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2782 {
2783     return do_sub_reg(ctx, a, true, false, false);
2784 }
2785 
2786 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2787 {
2788     return do_sub_reg(ctx, a, false, false, true);
2789 }
2790 
2791 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2792 {
2793     return do_sub_reg(ctx, a, true, false, true);
2794 }
2795 
2796 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2797 {
2798     return do_sub_reg(ctx, a, false, true, false);
2799 }
2800 
2801 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2802 {
2803     return do_sub_reg(ctx, a, true, true, false);
2804 }
2805 
2806 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2807 {
2808     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2809 }
2810 
2811 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2812 {
2813     return do_log_reg(ctx, a, tcg_gen_and_i64);
2814 }
2815 
2816 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2817 {
2818     if (a->cf == 0) {
2819         unsigned r2 = a->r2;
2820         unsigned r1 = a->r1;
2821         unsigned rt = a->t;
2822 
2823         if (rt == 0) { /* NOP */
2824             ctx->null_cond = cond_make_f();
2825             return true;
2826         }
2827         if (r2 == 0) { /* COPY */
2828             if (r1 == 0) {
2829                 TCGv_i64 dest = dest_gpr(ctx, rt);
2830                 tcg_gen_movi_i64(dest, 0);
2831                 save_gpr(ctx, rt, dest);
2832             } else {
2833                 save_gpr(ctx, rt, cpu_gr[r1]);
2834             }
2835             ctx->null_cond = cond_make_f();
2836             return true;
2837         }
2838 #ifndef CONFIG_USER_ONLY
2839         /* These are QEMU extensions and are nops in the real architecture:
2840          *
2841          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2842          * or %r31,%r31,%r31 -- death loop; offline cpu
2843          *                      currently implemented as idle.
2844          */
2845         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2846             /* No need to check for supervisor, as userland can only pause
2847                until the next timer interrupt.  */
2848 
2849             set_psw_xb(ctx, 0);
2850 
2851             nullify_over(ctx);
2852 
2853             /* Advance the instruction queue.  */
2854             install_iaq_entries(ctx, &ctx->iaq_b, NULL);
2855             nullify_set(ctx, 0);
2856 
2857             /* Tell the qemu main loop to halt until this cpu has work.  */
2858             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2859                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2860             gen_excp_1(EXCP_HALTED);
2861             ctx->base.is_jmp = DISAS_NORETURN;
2862 
2863             return nullify_end(ctx);
2864         }
2865 #endif
2866     }
2867     return do_log_reg(ctx, a, tcg_gen_or_i64);
2868 }
2869 
2870 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2871 {
2872     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2873 }
2874 
2875 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2876 {
2877     TCGv_i64 tcg_r1, tcg_r2;
2878 
2879     if (a->cf) {
2880         nullify_over(ctx);
2881     }
2882     tcg_r1 = load_gpr(ctx, a->r1);
2883     tcg_r2 = load_gpr(ctx, a->r2);
2884     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2885     return nullify_end(ctx);
2886 }
2887 
2888 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2889 {
2890     TCGv_i64 tcg_r1, tcg_r2, dest;
2891 
2892     if (a->cf) {
2893         nullify_over(ctx);
2894     }
2895 
2896     tcg_r1 = load_gpr(ctx, a->r1);
2897     tcg_r2 = load_gpr(ctx, a->r2);
2898     dest = dest_gpr(ctx, a->t);
2899 
2900     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2901     save_gpr(ctx, a->t, dest);
2902 
2903     ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2904     return nullify_end(ctx);
2905 }
2906 
2907 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2908 {
2909     TCGv_i64 tcg_r1, tcg_r2, tmp;
2910 
2911     if (a->cf == 0) {
2912         tcg_r2 = load_gpr(ctx, a->r2);
2913         tmp = dest_gpr(ctx, a->t);
2914 
2915         if (a->r1 == 0) {
2916             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2917             tcg_gen_not_i64(tmp, tcg_r2);
2918         } else {
2919             /*
2920              * Recall that r1 - r2 == r1 + ~r2 + 1.
2921              * Thus r1 + ~r2 == r1 - r2 - 1,
2922              * which does not require an extra temporary.
2923              */
2924             tcg_r1 = load_gpr(ctx, a->r1);
2925             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2926             tcg_gen_subi_i64(tmp, tmp, 1);
2927         }
2928         save_gpr(ctx, a->t, tmp);
2929         ctx->null_cond = cond_make_f();
2930         return true;
2931     }
2932 
2933     nullify_over(ctx);
2934     tcg_r1 = load_gpr(ctx, a->r1);
2935     tcg_r2 = load_gpr(ctx, a->r2);
2936     tmp = tcg_temp_new_i64();
2937     tcg_gen_not_i64(tmp, tcg_r2);
2938     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2939     return nullify_end(ctx);
2940 }
2941 
2942 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2943 {
2944     return do_uaddcm(ctx, a, false);
2945 }
2946 
2947 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2948 {
2949     return do_uaddcm(ctx, a, true);
2950 }
2951 
2952 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2953 {
2954     TCGv_i64 tmp;
2955 
2956     nullify_over(ctx);
2957 
2958     tmp = tcg_temp_new_i64();
2959     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2960     if (!is_i) {
2961         tcg_gen_not_i64(tmp, tmp);
2962     }
2963     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2964     tcg_gen_muli_i64(tmp, tmp, 6);
2965     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2966                    a->cf, a->d, false, is_i);
2967     return nullify_end(ctx);
2968 }
2969 
2970 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2971 {
2972     return do_dcor(ctx, a, false);
2973 }
2974 
2975 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2976 {
2977     return do_dcor(ctx, a, true);
2978 }
2979 
2980 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2981 {
2982     TCGv_i64 dest, add1, add2, addc, in1, in2;
2983 
2984     nullify_over(ctx);
2985 
2986     in1 = load_gpr(ctx, a->r1);
2987     in2 = load_gpr(ctx, a->r2);
2988 
2989     add1 = tcg_temp_new_i64();
2990     add2 = tcg_temp_new_i64();
2991     addc = tcg_temp_new_i64();
2992     dest = tcg_temp_new_i64();
2993 
2994     /* Form R1 << 1 | PSW[CB]{8}.  */
2995     tcg_gen_add_i64(add1, in1, in1);
2996     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2997 
2998     /*
2999      * Add or subtract R2, depending on PSW[V].  Proper computation of
3000      * carry requires that we subtract via + ~R2 + 1, as described in
3001      * the manual.  By extracting and masking V, we can produce the
3002      * proper inputs to the addition without movcond.
3003      */
3004     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
3005     tcg_gen_xor_i64(add2, in2, addc);
3006     tcg_gen_andi_i64(addc, addc, 1);
3007 
3008     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
3009     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
3010                      addc, ctx->zero);
3011 
3012     /* Write back the result register.  */
3013     save_gpr(ctx, a->t, dest);
3014 
3015     /* Write back PSW[CB].  */
3016     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
3017     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
3018 
3019     /*
3020      * Write back PSW[V] for the division step.
3021      * Shift cb{8} from where it lives in bit 32 to bit 31,
3022      * so that it overlaps r2{32} in bit 31.
3023      */
3024     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
3025     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
3026 
3027     /* Install the new nullification.  */
3028     if (a->cf) {
3029         TCGv_i64 sv = NULL, uv = NULL;
3030         if (cond_need_sv(a->cf >> 1)) {
3031             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
3032         } else if (cond_need_cb(a->cf >> 1)) {
3033             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
3034         }
3035         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
3036     }
3037 
3038     return nullify_end(ctx);
3039 }
3040 
3041 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
3042 {
3043     return do_add_imm(ctx, a, false, false);
3044 }
3045 
3046 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
3047 {
3048     return do_add_imm(ctx, a, true, false);
3049 }
3050 
3051 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
3052 {
3053     return do_add_imm(ctx, a, false, true);
3054 }
3055 
3056 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
3057 {
3058     return do_add_imm(ctx, a, true, true);
3059 }
3060 
3061 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
3062 {
3063     return do_sub_imm(ctx, a, false);
3064 }
3065 
3066 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
3067 {
3068     return do_sub_imm(ctx, a, true);
3069 }
3070 
3071 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3072 {
3073     TCGv_i64 tcg_im, tcg_r2;
3074 
3075     if (a->cf) {
3076         nullify_over(ctx);
3077     }
3078 
3079     tcg_im = tcg_constant_i64(a->i);
3080     tcg_r2 = load_gpr(ctx, a->r);
3081     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3082 
3083     return nullify_end(ctx);
3084 }
3085 
3086 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3087                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3088 {
3089     TCGv_i64 r1, r2, dest;
3090 
3091     if (!ctx->is_pa20) {
3092         return false;
3093     }
3094 
3095     nullify_over(ctx);
3096 
3097     r1 = load_gpr(ctx, a->r1);
3098     r2 = load_gpr(ctx, a->r2);
3099     dest = dest_gpr(ctx, a->t);
3100 
3101     fn(dest, r1, r2);
3102     save_gpr(ctx, a->t, dest);
3103 
3104     return nullify_end(ctx);
3105 }
3106 
3107 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3108                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3109 {
3110     TCGv_i64 r, dest;
3111 
3112     if (!ctx->is_pa20) {
3113         return false;
3114     }
3115 
3116     nullify_over(ctx);
3117 
3118     r = load_gpr(ctx, a->r);
3119     dest = dest_gpr(ctx, a->t);
3120 
3121     fn(dest, r, a->i);
3122     save_gpr(ctx, a->t, dest);
3123 
3124     return nullify_end(ctx);
3125 }
3126 
3127 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3128                                 void (*fn)(TCGv_i64, TCGv_i64,
3129                                            TCGv_i64, TCGv_i32))
3130 {
3131     TCGv_i64 r1, r2, dest;
3132 
3133     if (!ctx->is_pa20) {
3134         return false;
3135     }
3136 
3137     nullify_over(ctx);
3138 
3139     r1 = load_gpr(ctx, a->r1);
3140     r2 = load_gpr(ctx, a->r2);
3141     dest = dest_gpr(ctx, a->t);
3142 
3143     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3144     save_gpr(ctx, a->t, dest);
3145 
3146     return nullify_end(ctx);
3147 }
3148 
3149 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3150 {
3151     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3152 }
3153 
3154 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3155 {
3156     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3157 }
3158 
3159 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3160 {
3161     return do_multimedia(ctx, a, gen_helper_hadd_us);
3162 }
3163 
3164 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3165 {
3166     return do_multimedia(ctx, a, gen_helper_havg);
3167 }
3168 
3169 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3170 {
3171     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3172 }
3173 
3174 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3175 {
3176     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3177 }
3178 
3179 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3180 {
3181     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3182 }
3183 
3184 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3185 {
3186     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3187 }
3188 
3189 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3190 {
3191     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3192 }
3193 
3194 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3195 {
3196     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3197 }
3198 
3199 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3200 {
3201     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3202 }
3203 
3204 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3205 {
3206     return do_multimedia(ctx, a, gen_helper_hsub_us);
3207 }
3208 
3209 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3210 {
3211     uint64_t mask = 0xffff0000ffff0000ull;
3212     TCGv_i64 tmp = tcg_temp_new_i64();
3213 
3214     tcg_gen_andi_i64(tmp, r2, mask);
3215     tcg_gen_andi_i64(dst, r1, mask);
3216     tcg_gen_shri_i64(tmp, tmp, 16);
3217     tcg_gen_or_i64(dst, dst, tmp);
3218 }
3219 
3220 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3221 {
3222     return do_multimedia(ctx, a, gen_mixh_l);
3223 }
3224 
3225 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3226 {
3227     uint64_t mask = 0x0000ffff0000ffffull;
3228     TCGv_i64 tmp = tcg_temp_new_i64();
3229 
3230     tcg_gen_andi_i64(tmp, r1, mask);
3231     tcg_gen_andi_i64(dst, r2, mask);
3232     tcg_gen_shli_i64(tmp, tmp, 16);
3233     tcg_gen_or_i64(dst, dst, tmp);
3234 }
3235 
3236 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3237 {
3238     return do_multimedia(ctx, a, gen_mixh_r);
3239 }
3240 
3241 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3242 {
3243     TCGv_i64 tmp = tcg_temp_new_i64();
3244 
3245     tcg_gen_shri_i64(tmp, r2, 32);
3246     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3247 }
3248 
3249 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3250 {
3251     return do_multimedia(ctx, a, gen_mixw_l);
3252 }
3253 
3254 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3255 {
3256     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3257 }
3258 
3259 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3260 {
3261     return do_multimedia(ctx, a, gen_mixw_r);
3262 }
3263 
3264 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3265 {
3266     TCGv_i64 r, t0, t1, t2, t3;
3267 
3268     if (!ctx->is_pa20) {
3269         return false;
3270     }
3271 
3272     nullify_over(ctx);
3273 
3274     r = load_gpr(ctx, a->r1);
3275     t0 = tcg_temp_new_i64();
3276     t1 = tcg_temp_new_i64();
3277     t2 = tcg_temp_new_i64();
3278     t3 = tcg_temp_new_i64();
3279 
3280     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3281     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3282     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3283     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3284 
3285     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3286     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3287     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3288 
3289     save_gpr(ctx, a->t, t0);
3290     return nullify_end(ctx);
3291 }
3292 
3293 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3294 {
3295     if (ctx->is_pa20) {
3296        /*
3297         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3298         * Any base modification still occurs.
3299         */
3300         if (a->t == 0) {
3301             return trans_nop_addrx(ctx, a);
3302         }
3303     } else if (a->size > MO_32) {
3304         return gen_illegal(ctx);
3305     }
3306     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3307                    a->disp, a->sp, a->m, a->size | MO_TE);
3308 }
3309 
3310 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3311 {
3312     assert(a->x == 0 && a->scale == 0);
3313     if (!ctx->is_pa20 && a->size > MO_32) {
3314         return gen_illegal(ctx);
3315     }
3316     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3317 }
3318 
3319 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3320 {
3321     MemOp mop = MO_TE | MO_ALIGN | a->size;
3322     TCGv_i64 dest, ofs;
3323     TCGv_i64 addr;
3324 
3325     if (!ctx->is_pa20 && a->size > MO_32) {
3326         return gen_illegal(ctx);
3327     }
3328 
3329     nullify_over(ctx);
3330 
3331     if (a->m) {
3332         /* Base register modification.  Make sure if RT == RB,
3333            we see the result of the load.  */
3334         dest = tcg_temp_new_i64();
3335     } else {
3336         dest = dest_gpr(ctx, a->t);
3337     }
3338 
3339     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3340              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3341 
3342     /*
3343      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3344      * However actual hardware succeeds with aligned mod 4.
3345      * Detect this case and log a GUEST_ERROR.
3346      *
3347      * TODO: HPPA64 relaxes the over-alignment requirement
3348      * with the ,co completer.
3349      */
3350     gen_helper_ldc_check(addr);
3351 
3352     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3353 
3354     if (a->m) {
3355         save_gpr(ctx, a->b, ofs);
3356     }
3357     save_gpr(ctx, a->t, dest);
3358 
3359     return nullify_end(ctx);
3360 }
3361 
3362 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3363 {
3364     TCGv_i64 ofs, val;
3365     TCGv_i64 addr;
3366 
3367     nullify_over(ctx);
3368 
3369     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3370              MMU_DISABLED(ctx));
3371     val = load_gpr(ctx, a->r);
3372     if (a->a) {
3373         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3374             gen_helper_stby_e_parallel(tcg_env, addr, val);
3375         } else {
3376             gen_helper_stby_e(tcg_env, addr, val);
3377         }
3378     } else {
3379         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3380             gen_helper_stby_b_parallel(tcg_env, addr, val);
3381         } else {
3382             gen_helper_stby_b(tcg_env, addr, val);
3383         }
3384     }
3385     if (a->m) {
3386         tcg_gen_andi_i64(ofs, ofs, ~3);
3387         save_gpr(ctx, a->b, ofs);
3388     }
3389 
3390     return nullify_end(ctx);
3391 }
3392 
3393 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3394 {
3395     TCGv_i64 ofs, val;
3396     TCGv_i64 addr;
3397 
3398     if (!ctx->is_pa20) {
3399         return false;
3400     }
3401     nullify_over(ctx);
3402 
3403     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3404              MMU_DISABLED(ctx));
3405     val = load_gpr(ctx, a->r);
3406     if (a->a) {
3407         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3408             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3409         } else {
3410             gen_helper_stdby_e(tcg_env, addr, val);
3411         }
3412     } else {
3413         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3414             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3415         } else {
3416             gen_helper_stdby_b(tcg_env, addr, val);
3417         }
3418     }
3419     if (a->m) {
3420         tcg_gen_andi_i64(ofs, ofs, ~7);
3421         save_gpr(ctx, a->b, ofs);
3422     }
3423 
3424     return nullify_end(ctx);
3425 }
3426 
3427 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3428 {
3429     int hold_mmu_idx = ctx->mmu_idx;
3430 
3431     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3432     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3433     trans_ld(ctx, a);
3434     ctx->mmu_idx = hold_mmu_idx;
3435     return true;
3436 }
3437 
3438 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3439 {
3440     int hold_mmu_idx = ctx->mmu_idx;
3441 
3442     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3443     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3444     trans_st(ctx, a);
3445     ctx->mmu_idx = hold_mmu_idx;
3446     return true;
3447 }
3448 
3449 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3450 {
3451     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3452 
3453     tcg_gen_movi_i64(tcg_rt, a->i);
3454     save_gpr(ctx, a->t, tcg_rt);
3455     ctx->null_cond = cond_make_f();
3456     return true;
3457 }
3458 
3459 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3460 {
3461     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3462     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3463 
3464     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3465     save_gpr(ctx, 1, tcg_r1);
3466     ctx->null_cond = cond_make_f();
3467     return true;
3468 }
3469 
3470 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3471 {
3472     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3473 
3474     /* Special case rb == 0, for the LDI pseudo-op.
3475        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3476     if (a->b == 0) {
3477         tcg_gen_movi_i64(tcg_rt, a->i);
3478     } else {
3479         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3480     }
3481     save_gpr(ctx, a->t, tcg_rt);
3482     ctx->null_cond = cond_make_f();
3483     return true;
3484 }
3485 
3486 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3487                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3488 {
3489     TCGv_i64 dest, in2, sv;
3490     DisasCond cond;
3491 
3492     in2 = load_gpr(ctx, r);
3493     dest = tcg_temp_new_i64();
3494 
3495     tcg_gen_sub_i64(dest, in1, in2);
3496 
3497     sv = NULL;
3498     if (cond_need_sv(c)) {
3499         sv = do_sub_sv(ctx, dest, in1, in2);
3500     }
3501 
3502     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3503     return do_cbranch(ctx, disp, n, &cond);
3504 }
3505 
3506 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3507 {
3508     if (!ctx->is_pa20 && a->d) {
3509         return false;
3510     }
3511     nullify_over(ctx);
3512     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3513                    a->c, a->f, a->d, a->n, a->disp);
3514 }
3515 
3516 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3517 {
3518     if (!ctx->is_pa20 && a->d) {
3519         return false;
3520     }
3521     nullify_over(ctx);
3522     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3523                    a->c, a->f, a->d, a->n, a->disp);
3524 }
3525 
3526 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3527                     unsigned c, unsigned f, unsigned n, int disp)
3528 {
3529     TCGv_i64 dest, in2, sv, cb_cond;
3530     DisasCond cond;
3531     bool d = false;
3532 
3533     /*
3534      * For hppa64, the ADDB conditions change with PSW.W,
3535      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3536      */
3537     if (ctx->tb_flags & PSW_W) {
3538         d = c >= 5;
3539         if (d) {
3540             c &= 3;
3541         }
3542     }
3543 
3544     in2 = load_gpr(ctx, r);
3545     dest = tcg_temp_new_i64();
3546     sv = NULL;
3547     cb_cond = NULL;
3548 
3549     if (cond_need_cb(c)) {
3550         TCGv_i64 cb = tcg_temp_new_i64();
3551         TCGv_i64 cb_msb = tcg_temp_new_i64();
3552 
3553         tcg_gen_movi_i64(cb_msb, 0);
3554         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3555         tcg_gen_xor_i64(cb, in1, in2);
3556         tcg_gen_xor_i64(cb, cb, dest);
3557         cb_cond = get_carry(ctx, d, cb, cb_msb);
3558     } else {
3559         tcg_gen_add_i64(dest, in1, in2);
3560     }
3561     if (cond_need_sv(c)) {
3562         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3563     }
3564 
3565     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3566     save_gpr(ctx, r, dest);
3567     return do_cbranch(ctx, disp, n, &cond);
3568 }
3569 
3570 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3571 {
3572     nullify_over(ctx);
3573     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3574 }
3575 
3576 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3577 {
3578     nullify_over(ctx);
3579     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3580 }
3581 
3582 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3583 {
3584     TCGv_i64 tmp, tcg_r;
3585     DisasCond cond;
3586 
3587     nullify_over(ctx);
3588 
3589     tmp = tcg_temp_new_i64();
3590     tcg_r = load_gpr(ctx, a->r);
3591     if (a->d) {
3592         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3593     } else {
3594         /* Force shift into [32,63] */
3595         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3596         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3597     }
3598 
3599     cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
3600     return do_cbranch(ctx, a->disp, a->n, &cond);
3601 }
3602 
3603 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3604 {
3605     DisasCond cond;
3606     int p = a->p | (a->d ? 0 : 32);
3607 
3608     nullify_over(ctx);
3609     cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
3610                         load_gpr(ctx, a->r), 1ull << (63 - p));
3611     return do_cbranch(ctx, a->disp, a->n, &cond);
3612 }
3613 
3614 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3615 {
3616     TCGv_i64 dest;
3617     DisasCond cond;
3618 
3619     nullify_over(ctx);
3620 
3621     dest = dest_gpr(ctx, a->r2);
3622     if (a->r1 == 0) {
3623         tcg_gen_movi_i64(dest, 0);
3624     } else {
3625         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3626     }
3627 
3628     /* All MOVB conditions are 32-bit. */
3629     cond = do_sed_cond(ctx, a->c, false, dest);
3630     return do_cbranch(ctx, a->disp, a->n, &cond);
3631 }
3632 
3633 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3634 {
3635     TCGv_i64 dest;
3636     DisasCond cond;
3637 
3638     nullify_over(ctx);
3639 
3640     dest = dest_gpr(ctx, a->r);
3641     tcg_gen_movi_i64(dest, a->i);
3642 
3643     /* All MOVBI conditions are 32-bit. */
3644     cond = do_sed_cond(ctx, a->c, false, dest);
3645     return do_cbranch(ctx, a->disp, a->n, &cond);
3646 }
3647 
3648 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3649 {
3650     TCGv_i64 dest, src2;
3651 
3652     if (!ctx->is_pa20 && a->d) {
3653         return false;
3654     }
3655     if (a->c) {
3656         nullify_over(ctx);
3657     }
3658 
3659     dest = dest_gpr(ctx, a->t);
3660     src2 = load_gpr(ctx, a->r2);
3661     if (a->r1 == 0) {
3662         if (a->d) {
3663             tcg_gen_shr_i64(dest, src2, cpu_sar);
3664         } else {
3665             TCGv_i64 tmp = tcg_temp_new_i64();
3666 
3667             tcg_gen_ext32u_i64(dest, src2);
3668             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3669             tcg_gen_shr_i64(dest, dest, tmp);
3670         }
3671     } else if (a->r1 == a->r2) {
3672         if (a->d) {
3673             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3674         } else {
3675             TCGv_i32 t32 = tcg_temp_new_i32();
3676             TCGv_i32 s32 = tcg_temp_new_i32();
3677 
3678             tcg_gen_extrl_i64_i32(t32, src2);
3679             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3680             tcg_gen_andi_i32(s32, s32, 31);
3681             tcg_gen_rotr_i32(t32, t32, s32);
3682             tcg_gen_extu_i32_i64(dest, t32);
3683         }
3684     } else {
3685         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3686 
3687         if (a->d) {
3688             TCGv_i64 t = tcg_temp_new_i64();
3689             TCGv_i64 n = tcg_temp_new_i64();
3690 
3691             tcg_gen_xori_i64(n, cpu_sar, 63);
3692             tcg_gen_shl_i64(t, src1, n);
3693             tcg_gen_shli_i64(t, t, 1);
3694             tcg_gen_shr_i64(dest, src2, cpu_sar);
3695             tcg_gen_or_i64(dest, dest, t);
3696         } else {
3697             TCGv_i64 t = tcg_temp_new_i64();
3698             TCGv_i64 s = tcg_temp_new_i64();
3699 
3700             tcg_gen_concat32_i64(t, src2, src1);
3701             tcg_gen_andi_i64(s, cpu_sar, 31);
3702             tcg_gen_shr_i64(dest, t, s);
3703         }
3704     }
3705     save_gpr(ctx, a->t, dest);
3706 
3707     /* Install the new nullification.  */
3708     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3709     return nullify_end(ctx);
3710 }
3711 
3712 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3713 {
3714     unsigned width, sa;
3715     TCGv_i64 dest, t2;
3716 
3717     if (!ctx->is_pa20 && a->d) {
3718         return false;
3719     }
3720     if (a->c) {
3721         nullify_over(ctx);
3722     }
3723 
3724     width = a->d ? 64 : 32;
3725     sa = width - 1 - a->cpos;
3726 
3727     dest = dest_gpr(ctx, a->t);
3728     t2 = load_gpr(ctx, a->r2);
3729     if (a->r1 == 0) {
3730         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3731     } else if (width == TARGET_LONG_BITS) {
3732         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3733     } else {
3734         assert(!a->d);
3735         if (a->r1 == a->r2) {
3736             TCGv_i32 t32 = tcg_temp_new_i32();
3737             tcg_gen_extrl_i64_i32(t32, t2);
3738             tcg_gen_rotri_i32(t32, t32, sa);
3739             tcg_gen_extu_i32_i64(dest, t32);
3740         } else {
3741             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3742             tcg_gen_extract_i64(dest, dest, sa, 32);
3743         }
3744     }
3745     save_gpr(ctx, a->t, dest);
3746 
3747     /* Install the new nullification.  */
3748     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3749     return nullify_end(ctx);
3750 }
3751 
3752 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3753 {
3754     unsigned widthm1 = a->d ? 63 : 31;
3755     TCGv_i64 dest, src, tmp;
3756 
3757     if (!ctx->is_pa20 && a->d) {
3758         return false;
3759     }
3760     if (a->c) {
3761         nullify_over(ctx);
3762     }
3763 
3764     dest = dest_gpr(ctx, a->t);
3765     src = load_gpr(ctx, a->r);
3766     tmp = tcg_temp_new_i64();
3767 
3768     /* Recall that SAR is using big-endian bit numbering.  */
3769     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3770     tcg_gen_xori_i64(tmp, tmp, widthm1);
3771 
3772     if (a->se) {
3773         if (!a->d) {
3774             tcg_gen_ext32s_i64(dest, src);
3775             src = dest;
3776         }
3777         tcg_gen_sar_i64(dest, src, tmp);
3778         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3779     } else {
3780         if (!a->d) {
3781             tcg_gen_ext32u_i64(dest, src);
3782             src = dest;
3783         }
3784         tcg_gen_shr_i64(dest, src, tmp);
3785         tcg_gen_extract_i64(dest, dest, 0, a->len);
3786     }
3787     save_gpr(ctx, a->t, dest);
3788 
3789     /* Install the new nullification.  */
3790     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3791     return nullify_end(ctx);
3792 }
3793 
3794 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3795 {
3796     unsigned len, cpos, width;
3797     TCGv_i64 dest, src;
3798 
3799     if (!ctx->is_pa20 && a->d) {
3800         return false;
3801     }
3802     if (a->c) {
3803         nullify_over(ctx);
3804     }
3805 
3806     len = a->len;
3807     width = a->d ? 64 : 32;
3808     cpos = width - 1 - a->pos;
3809     if (cpos + len > width) {
3810         len = width - cpos;
3811     }
3812 
3813     dest = dest_gpr(ctx, a->t);
3814     src = load_gpr(ctx, a->r);
3815     if (a->se) {
3816         tcg_gen_sextract_i64(dest, src, cpos, len);
3817     } else {
3818         tcg_gen_extract_i64(dest, src, cpos, len);
3819     }
3820     save_gpr(ctx, a->t, dest);
3821 
3822     /* Install the new nullification.  */
3823     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3824     return nullify_end(ctx);
3825 }
3826 
3827 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3828 {
3829     unsigned len, width;
3830     uint64_t mask0, mask1;
3831     TCGv_i64 dest;
3832 
3833     if (!ctx->is_pa20 && a->d) {
3834         return false;
3835     }
3836     if (a->c) {
3837         nullify_over(ctx);
3838     }
3839 
3840     len = a->len;
3841     width = a->d ? 64 : 32;
3842     if (a->cpos + len > width) {
3843         len = width - a->cpos;
3844     }
3845 
3846     dest = dest_gpr(ctx, a->t);
3847     mask0 = deposit64(0, a->cpos, len, a->i);
3848     mask1 = deposit64(-1, a->cpos, len, a->i);
3849 
3850     if (a->nz) {
3851         TCGv_i64 src = load_gpr(ctx, a->t);
3852         tcg_gen_andi_i64(dest, src, mask1);
3853         tcg_gen_ori_i64(dest, dest, mask0);
3854     } else {
3855         tcg_gen_movi_i64(dest, mask0);
3856     }
3857     save_gpr(ctx, a->t, dest);
3858 
3859     /* Install the new nullification.  */
3860     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3861     return nullify_end(ctx);
3862 }
3863 
3864 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3865 {
3866     unsigned rs = a->nz ? a->t : 0;
3867     unsigned len, width;
3868     TCGv_i64 dest, val;
3869 
3870     if (!ctx->is_pa20 && a->d) {
3871         return false;
3872     }
3873     if (a->c) {
3874         nullify_over(ctx);
3875     }
3876 
3877     len = a->len;
3878     width = a->d ? 64 : 32;
3879     if (a->cpos + len > width) {
3880         len = width - a->cpos;
3881     }
3882 
3883     dest = dest_gpr(ctx, a->t);
3884     val = load_gpr(ctx, a->r);
3885     if (rs == 0) {
3886         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3887     } else {
3888         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3889     }
3890     save_gpr(ctx, a->t, dest);
3891 
3892     /* Install the new nullification.  */
3893     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3894     return nullify_end(ctx);
3895 }
3896 
3897 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3898                        bool d, bool nz, unsigned len, TCGv_i64 val)
3899 {
3900     unsigned rs = nz ? rt : 0;
3901     unsigned widthm1 = d ? 63 : 31;
3902     TCGv_i64 mask, tmp, shift, dest;
3903     uint64_t msb = 1ULL << (len - 1);
3904 
3905     dest = dest_gpr(ctx, rt);
3906     shift = tcg_temp_new_i64();
3907     tmp = tcg_temp_new_i64();
3908 
3909     /* Convert big-endian bit numbering in SAR to left-shift.  */
3910     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3911     tcg_gen_xori_i64(shift, shift, widthm1);
3912 
3913     mask = tcg_temp_new_i64();
3914     tcg_gen_movi_i64(mask, msb + (msb - 1));
3915     tcg_gen_and_i64(tmp, val, mask);
3916     if (rs) {
3917         tcg_gen_shl_i64(mask, mask, shift);
3918         tcg_gen_shl_i64(tmp, tmp, shift);
3919         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3920         tcg_gen_or_i64(dest, dest, tmp);
3921     } else {
3922         tcg_gen_shl_i64(dest, tmp, shift);
3923     }
3924     save_gpr(ctx, rt, dest);
3925 
3926     /* Install the new nullification.  */
3927     ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3928     return nullify_end(ctx);
3929 }
3930 
3931 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3932 {
3933     if (!ctx->is_pa20 && a->d) {
3934         return false;
3935     }
3936     if (a->c) {
3937         nullify_over(ctx);
3938     }
3939     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3940                       load_gpr(ctx, a->r));
3941 }
3942 
3943 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3944 {
3945     if (!ctx->is_pa20 && a->d) {
3946         return false;
3947     }
3948     if (a->c) {
3949         nullify_over(ctx);
3950     }
3951     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3952                       tcg_constant_i64(a->i));
3953 }
3954 
3955 static bool trans_be(DisasContext *ctx, arg_be *a)
3956 {
3957 #ifndef CONFIG_USER_ONLY
3958     ctx->iaq_j.space = tcg_temp_new_i64();
3959     load_spr(ctx, ctx->iaq_j.space, a->sp);
3960 #endif
3961 
3962     ctx->iaq_j.base = tcg_temp_new_i64();
3963     ctx->iaq_j.disp = 0;
3964 
3965     tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
3966     ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
3967 
3968     return do_ibranch(ctx, a->l, true, a->n);
3969 }
3970 
3971 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3972 {
3973     return do_dbranch(ctx, a->disp, a->l, a->n);
3974 }
3975 
3976 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3977 {
3978     int64_t disp = a->disp;
3979     bool indirect = false;
3980 
3981     /* Trap if PSW[B] is set. */
3982     if (ctx->psw_xb & PSW_B) {
3983         return gen_illegal(ctx);
3984     }
3985 
3986     nullify_over(ctx);
3987 
3988 #ifndef CONFIG_USER_ONLY
3989     if (ctx->privilege == 0) {
3990         /* Privilege cannot decrease. */
3991     } else if (!(ctx->tb_flags & PSW_C)) {
3992         /* With paging disabled, priv becomes 0. */
3993         disp -= ctx->privilege;
3994     } else {
3995         /* Adjust the dest offset for the privilege change from the PTE. */
3996         TCGv_i64 off = tcg_temp_new_i64();
3997 
3998         copy_iaoq_entry(ctx, off, &ctx->iaq_f);
3999         gen_helper_b_gate_priv(off, tcg_env, off);
4000 
4001         ctx->iaq_j.base = off;
4002         ctx->iaq_j.disp = disp + 8;
4003         indirect = true;
4004     }
4005 #endif
4006 
4007     if (a->l) {
4008         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4009         if (ctx->privilege < 3) {
4010             tcg_gen_andi_i64(tmp, tmp, -4);
4011         }
4012         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4013         save_gpr(ctx, a->l, tmp);
4014     }
4015 
4016     if (indirect) {
4017         return do_ibranch(ctx, 0, false, a->n);
4018     }
4019     return do_dbranch(ctx, disp, 0, a->n);
4020 }
4021 
4022 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4023 {
4024     if (a->x) {
4025         DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
4026         TCGv_i64 t0 = tcg_temp_new_i64();
4027         TCGv_i64 t1 = tcg_temp_new_i64();
4028 
4029         /* The computation here never changes privilege level.  */
4030         copy_iaoq_entry(ctx, t0, &next);
4031         tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
4032         tcg_gen_add_i64(t0, t0, t1);
4033 
4034         ctx->iaq_j = iaqe_next_absv(ctx, t0);
4035         return do_ibranch(ctx, a->l, false, a->n);
4036     } else {
4037         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4038         return do_dbranch(ctx, 0, a->l, a->n);
4039     }
4040 }
4041 
4042 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4043 {
4044     TCGv_i64 dest;
4045 
4046     if (a->x == 0) {
4047         dest = load_gpr(ctx, a->b);
4048     } else {
4049         dest = tcg_temp_new_i64();
4050         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4051         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4052     }
4053     dest = do_ibranch_priv(ctx, dest);
4054     ctx->iaq_j = iaqe_next_absv(ctx, dest);
4055 
4056     return do_ibranch(ctx, 0, false, a->n);
4057 }
4058 
4059 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4060 {
4061     TCGv_i64 b = load_gpr(ctx, a->b);
4062 
4063 #ifndef CONFIG_USER_ONLY
4064     ctx->iaq_j.space = space_select(ctx, 0, b);
4065 #endif
4066     ctx->iaq_j.base = do_ibranch_priv(ctx, b);
4067     ctx->iaq_j.disp = 0;
4068 
4069     return do_ibranch(ctx, a->l, false, a->n);
4070 }
4071 
4072 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4073 {
4074     /* All branch target stack instructions implement as nop. */
4075     return ctx->is_pa20;
4076 }
4077 
4078 /*
4079  * Float class 0
4080  */
4081 
4082 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4083 {
4084     tcg_gen_mov_i32(dst, src);
4085 }
4086 
4087 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4088 {
4089     uint64_t ret;
4090 
4091     if (ctx->is_pa20) {
4092         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4093     } else {
4094         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4095     }
4096 
4097     nullify_over(ctx);
4098     save_frd(0, tcg_constant_i64(ret));
4099     return nullify_end(ctx);
4100 }
4101 
4102 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4103 {
4104     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4105 }
4106 
4107 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4108 {
4109     tcg_gen_mov_i64(dst, src);
4110 }
4111 
4112 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4113 {
4114     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4115 }
4116 
4117 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4118 {
4119     tcg_gen_andi_i32(dst, src, INT32_MAX);
4120 }
4121 
4122 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4123 {
4124     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4125 }
4126 
4127 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4128 {
4129     tcg_gen_andi_i64(dst, src, INT64_MAX);
4130 }
4131 
4132 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4133 {
4134     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4135 }
4136 
4137 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4138 {
4139     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4140 }
4141 
4142 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4143 {
4144     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4145 }
4146 
4147 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4148 {
4149     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4150 }
4151 
4152 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4153 {
4154     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4155 }
4156 
4157 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4158 {
4159     tcg_gen_xori_i32(dst, src, INT32_MIN);
4160 }
4161 
4162 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4163 {
4164     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4165 }
4166 
4167 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4168 {
4169     tcg_gen_xori_i64(dst, src, INT64_MIN);
4170 }
4171 
4172 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4173 {
4174     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4175 }
4176 
4177 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4178 {
4179     tcg_gen_ori_i32(dst, src, INT32_MIN);
4180 }
4181 
4182 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4183 {
4184     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4185 }
4186 
4187 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4188 {
4189     tcg_gen_ori_i64(dst, src, INT64_MIN);
4190 }
4191 
4192 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4193 {
4194     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4195 }
4196 
4197 /*
4198  * Float class 1
4199  */
4200 
4201 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4202 {
4203     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4204 }
4205 
4206 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4207 {
4208     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4209 }
4210 
4211 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4212 {
4213     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4214 }
4215 
4216 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4217 {
4218     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4219 }
4220 
4221 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4222 {
4223     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4224 }
4225 
4226 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4227 {
4228     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4229 }
4230 
4231 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4232 {
4233     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4234 }
4235 
4236 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4237 {
4238     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4239 }
4240 
4241 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4242 {
4243     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4244 }
4245 
4246 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4247 {
4248     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4249 }
4250 
4251 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4252 {
4253     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4254 }
4255 
4256 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4257 {
4258     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4259 }
4260 
4261 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4262 {
4263     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4264 }
4265 
4266 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4267 {
4268     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4269 }
4270 
4271 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4272 {
4273     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4274 }
4275 
4276 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4277 {
4278     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4279 }
4280 
4281 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4282 {
4283     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4284 }
4285 
4286 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4287 {
4288     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4289 }
4290 
4291 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4292 {
4293     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4294 }
4295 
4296 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4297 {
4298     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4299 }
4300 
4301 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4302 {
4303     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4304 }
4305 
4306 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4307 {
4308     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4309 }
4310 
4311 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4312 {
4313     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4314 }
4315 
4316 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4317 {
4318     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4319 }
4320 
4321 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4322 {
4323     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4324 }
4325 
4326 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4327 {
4328     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4329 }
4330 
4331 /*
4332  * Float class 2
4333  */
4334 
4335 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4336 {
4337     TCGv_i32 ta, tb, tc, ty;
4338 
4339     nullify_over(ctx);
4340 
4341     ta = load_frw0_i32(a->r1);
4342     tb = load_frw0_i32(a->r2);
4343     ty = tcg_constant_i32(a->y);
4344     tc = tcg_constant_i32(a->c);
4345 
4346     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4347 
4348     return nullify_end(ctx);
4349 }
4350 
4351 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4352 {
4353     TCGv_i64 ta, tb;
4354     TCGv_i32 tc, ty;
4355 
4356     nullify_over(ctx);
4357 
4358     ta = load_frd0(a->r1);
4359     tb = load_frd0(a->r2);
4360     ty = tcg_constant_i32(a->y);
4361     tc = tcg_constant_i32(a->c);
4362 
4363     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4364 
4365     return nullify_end(ctx);
4366 }
4367 
4368 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4369 {
4370     TCGCond tc = TCG_COND_TSTNE;
4371     uint32_t mask;
4372     TCGv_i64 t;
4373 
4374     nullify_over(ctx);
4375 
4376     t = tcg_temp_new_i64();
4377     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4378 
4379     if (a->y == 1) {
4380         switch (a->c) {
4381         case 0: /* simple */
4382             mask = R_FPSR_C_MASK;
4383             break;
4384         case 2: /* rej */
4385             tc = TCG_COND_TSTEQ;
4386             /* fallthru */
4387         case 1: /* acc */
4388             mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
4389             break;
4390         case 6: /* rej8 */
4391             tc = TCG_COND_TSTEQ;
4392             /* fallthru */
4393         case 5: /* acc8 */
4394             mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
4395             break;
4396         case 9: /* acc6 */
4397             mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
4398             break;
4399         case 13: /* acc4 */
4400             mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
4401             break;
4402         case 17: /* acc2 */
4403             mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
4404             break;
4405         default:
4406             gen_illegal(ctx);
4407             return true;
4408         }
4409     } else {
4410         unsigned cbit = (a->y ^ 1) - 1;
4411         mask = R_FPSR_CA0_MASK >> cbit;
4412     }
4413 
4414     ctx->null_cond = cond_make_ti(tc, t, mask);
4415     return nullify_end(ctx);
4416 }
4417 
4418 /*
4419  * Float class 2
4420  */
4421 
4422 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4423 {
4424     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4425 }
4426 
4427 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4428 {
4429     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4430 }
4431 
4432 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4433 {
4434     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4435 }
4436 
4437 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4438 {
4439     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4440 }
4441 
4442 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4443 {
4444     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4445 }
4446 
4447 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4448 {
4449     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4450 }
4451 
4452 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4453 {
4454     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4455 }
4456 
4457 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4458 {
4459     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4460 }
4461 
4462 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4463 {
4464     TCGv_i64 x, y;
4465 
4466     nullify_over(ctx);
4467 
4468     x = load_frw0_i64(a->r1);
4469     y = load_frw0_i64(a->r2);
4470     tcg_gen_mul_i64(x, x, y);
4471     save_frd(a->t, x);
4472 
4473     return nullify_end(ctx);
4474 }
4475 
4476 /* Convert the fmpyadd single-precision register encodings to standard.  */
4477 static inline int fmpyadd_s_reg(unsigned r)
4478 {
4479     return (r & 16) * 2 + 16 + (r & 15);
4480 }
4481 
4482 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4483 {
4484     int tm = fmpyadd_s_reg(a->tm);
4485     int ra = fmpyadd_s_reg(a->ra);
4486     int ta = fmpyadd_s_reg(a->ta);
4487     int rm2 = fmpyadd_s_reg(a->rm2);
4488     int rm1 = fmpyadd_s_reg(a->rm1);
4489 
4490     nullify_over(ctx);
4491 
4492     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4493     do_fop_weww(ctx, ta, ta, ra,
4494                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4495 
4496     return nullify_end(ctx);
4497 }
4498 
4499 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4500 {
4501     return do_fmpyadd_s(ctx, a, false);
4502 }
4503 
4504 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4505 {
4506     return do_fmpyadd_s(ctx, a, true);
4507 }
4508 
4509 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4510 {
4511     nullify_over(ctx);
4512 
4513     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4514     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4515                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4516 
4517     return nullify_end(ctx);
4518 }
4519 
4520 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4521 {
4522     return do_fmpyadd_d(ctx, a, false);
4523 }
4524 
4525 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4526 {
4527     return do_fmpyadd_d(ctx, a, true);
4528 }
4529 
4530 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4531 {
4532     TCGv_i32 x, y, z;
4533 
4534     nullify_over(ctx);
4535     x = load_frw0_i32(a->rm1);
4536     y = load_frw0_i32(a->rm2);
4537     z = load_frw0_i32(a->ra3);
4538 
4539     if (a->neg) {
4540         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4541     } else {
4542         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4543     }
4544 
4545     save_frw_i32(a->t, x);
4546     return nullify_end(ctx);
4547 }
4548 
4549 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4550 {
4551     TCGv_i64 x, y, z;
4552 
4553     nullify_over(ctx);
4554     x = load_frd0(a->rm1);
4555     y = load_frd0(a->rm2);
4556     z = load_frd0(a->ra3);
4557 
4558     if (a->neg) {
4559         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4560     } else {
4561         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4562     }
4563 
4564     save_frd(a->t, x);
4565     return nullify_end(ctx);
4566 }
4567 
4568 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4569 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4570 {
4571     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4572 #ifndef CONFIG_USER_ONLY
4573     nullify_over(ctx);
4574     gen_helper_diag_btlb(tcg_env);
4575     return nullify_end(ctx);
4576 #endif
4577 }
4578 
4579 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4580 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4581 {
4582     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4583 #ifndef CONFIG_USER_ONLY
4584     nullify_over(ctx);
4585     gen_helper_diag_console_output(tcg_env);
4586     return nullify_end(ctx);
4587 #endif
4588 }
4589 
4590 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4591 {
4592     return !ctx->is_pa20 && do_getshadowregs(ctx);
4593 }
4594 
4595 static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4596 {
4597     return ctx->is_pa20 && do_getshadowregs(ctx);
4598 }
4599 
4600 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4601 {
4602     return !ctx->is_pa20 && do_putshadowregs(ctx);
4603 }
4604 
4605 static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4606 {
4607     return ctx->is_pa20 && do_putshadowregs(ctx);
4608 }
4609 
4610 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4611 {
4612     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4613     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4614     return true;
4615 }
4616 
4617 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4618 {
4619     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4620     uint64_t cs_base;
4621     int bound;
4622 
4623     ctx->cs = cs;
4624     ctx->tb_flags = ctx->base.tb->flags;
4625     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4626     ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
4627 
4628 #ifdef CONFIG_USER_ONLY
4629     ctx->privilege = PRIV_USER;
4630     ctx->mmu_idx = MMU_USER_IDX;
4631     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4632 #else
4633     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4634     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4635                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4636                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4637 #endif
4638 
4639     cs_base = ctx->base.tb->cs_base;
4640     ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
4641 
4642     if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
4643         ctx->iaq_b.space = cpu_iasq_b;
4644         ctx->iaq_b.base = cpu_iaoq_b;
4645     } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
4646         ctx->iaq_b.base = cpu_iaoq_b;
4647     } else {
4648         uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
4649         uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
4650         ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
4651     }
4652 
4653     ctx->zero = tcg_constant_i64(0);
4654 
4655     /* Bound the number of instructions by those left on the page.  */
4656     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4657     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4658 }
4659 
4660 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4661 {
4662     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4663 
4664     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4665     ctx->null_cond = cond_make_f();
4666     ctx->psw_n_nonzero = false;
4667     if (ctx->tb_flags & PSW_N) {
4668         ctx->null_cond.c = TCG_COND_ALWAYS;
4669         ctx->psw_n_nonzero = true;
4670     }
4671     ctx->null_lab = NULL;
4672 }
4673 
4674 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4675 {
4676     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4677     uint64_t iaoq_f, iaoq_b;
4678     int64_t diff;
4679 
4680     tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
4681 
4682     iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
4683     if (iaqe_variable(&ctx->iaq_b)) {
4684         diff = INT32_MIN;
4685     } else {
4686         iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
4687         diff = iaoq_b - iaoq_f;
4688         /* Direct branches can only produce a 24-bit displacement. */
4689         tcg_debug_assert(diff == (int32_t)diff);
4690         tcg_debug_assert(diff != INT32_MIN);
4691     }
4692 
4693     tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
4694     ctx->insn_start_updated = false;
4695 }
4696 
4697 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4698 {
4699     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4700     CPUHPPAState *env = cpu_env(cs);
4701     DisasJumpType ret;
4702 
4703     /* Execute one insn.  */
4704 #ifdef CONFIG_USER_ONLY
4705     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4706         do_page_zero(ctx);
4707         ret = ctx->base.is_jmp;
4708         assert(ret != DISAS_NEXT);
4709     } else
4710 #endif
4711     {
4712         /* Always fetch the insn, even if nullified, so that we check
4713            the page permissions for execute.  */
4714         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4715 
4716         /*
4717          * Set up the IA queue for the next insn.
4718          * This will be overwritten by a branch.
4719          */
4720         ctx->iaq_n = NULL;
4721         memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
4722         ctx->psw_b_next = false;
4723 
4724         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4725             ctx->null_cond.c = TCG_COND_NEVER;
4726             ret = DISAS_NEXT;
4727         } else {
4728             ctx->insn = insn;
4729             if (!decode(ctx, insn)) {
4730                 gen_illegal(ctx);
4731             }
4732             ret = ctx->base.is_jmp;
4733             assert(ctx->null_lab == NULL);
4734         }
4735 
4736         if (ret != DISAS_NORETURN) {
4737             set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
4738         }
4739     }
4740 
4741     /* If the TranslationBlock must end, do so. */
4742     ctx->base.pc_next += 4;
4743     if (ret != DISAS_NEXT) {
4744         return;
4745     }
4746     /* Note this also detects a priority change. */
4747     if (iaqe_variable(&ctx->iaq_b)
4748         || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
4749         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
4750         return;
4751     }
4752 
4753     /*
4754      * Advance the insn queue.
4755      * The only exit now is DISAS_TOO_MANY from the translator loop.
4756      */
4757     ctx->iaq_f.disp = ctx->iaq_b.disp;
4758     if (!ctx->iaq_n) {
4759         ctx->iaq_b.disp += 4;
4760         return;
4761     }
4762     /*
4763      * If IAQ_Next is variable in any way, we need to copy into the
4764      * IAQ_Back globals, in case the next insn raises an exception.
4765      */
4766     if (ctx->iaq_n->base) {
4767         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
4768         ctx->iaq_b.base = cpu_iaoq_b;
4769         ctx->iaq_b.disp = 0;
4770     } else {
4771         ctx->iaq_b.disp = ctx->iaq_n->disp;
4772     }
4773     if (ctx->iaq_n->space) {
4774         tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
4775         ctx->iaq_b.space = cpu_iasq_b;
4776     }
4777 }
4778 
4779 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4780 {
4781     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4782     DisasJumpType is_jmp = ctx->base.is_jmp;
4783     /* Assume the insn queue has not been advanced. */
4784     DisasIAQE *f = &ctx->iaq_b;
4785     DisasIAQE *b = ctx->iaq_n;
4786 
4787     switch (is_jmp) {
4788     case DISAS_NORETURN:
4789         break;
4790     case DISAS_TOO_MANY:
4791         /* The insn queue has not been advanced. */
4792         f = &ctx->iaq_f;
4793         b = &ctx->iaq_b;
4794         /* FALLTHRU */
4795     case DISAS_IAQ_N_STALE:
4796         if (use_goto_tb(ctx, f, b)
4797             && (ctx->null_cond.c == TCG_COND_NEVER
4798                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4799             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4800             gen_goto_tb(ctx, 0, f, b);
4801             break;
4802         }
4803         /* FALLTHRU */
4804     case DISAS_IAQ_N_STALE_EXIT:
4805         install_iaq_entries(ctx, f, b);
4806         nullify_save(ctx);
4807         if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4808             tcg_gen_exit_tb(NULL, 0);
4809             break;
4810         }
4811         /* FALLTHRU */
4812     case DISAS_IAQ_N_UPDATED:
4813         tcg_gen_lookup_and_goto_ptr();
4814         break;
4815     case DISAS_EXIT:
4816         tcg_gen_exit_tb(NULL, 0);
4817         break;
4818     default:
4819         g_assert_not_reached();
4820     }
4821 
4822     for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
4823         gen_set_label(e->lab);
4824         if (e->set_n >= 0) {
4825             tcg_gen_movi_i64(cpu_psw_n, e->set_n);
4826         }
4827         if (e->set_iir) {
4828             tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
4829                            offsetof(CPUHPPAState, cr[CR_IIR]));
4830         }
4831         install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
4832         gen_excp_1(e->excp);
4833     }
4834 }
4835 
4836 #ifdef CONFIG_USER_ONLY
4837 static bool hppa_tr_disas_log(const DisasContextBase *dcbase,
4838                               CPUState *cs, FILE *logfile)
4839 {
4840     target_ulong pc = dcbase->pc_first;
4841 
4842     switch (pc) {
4843     case 0x00:
4844         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4845         return true;
4846     case 0xb0:
4847         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4848         return true;
4849     case 0xe0:
4850         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4851         return true;
4852     case 0x100:
4853         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4854         return true;
4855     }
4856     return false;
4857 }
4858 #endif
4859 
4860 static const TranslatorOps hppa_tr_ops = {
4861     .init_disas_context = hppa_tr_init_disas_context,
4862     .tb_start           = hppa_tr_tb_start,
4863     .insn_start         = hppa_tr_insn_start,
4864     .translate_insn     = hppa_tr_translate_insn,
4865     .tb_stop            = hppa_tr_tb_stop,
4866 #ifdef CONFIG_USER_ONLY
4867     .disas_log          = hppa_tr_disas_log,
4868 #endif
4869 };
4870 
4871 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4872                            vaddr pc, void *host_pc)
4873 {
4874     DisasContext ctx = { };
4875     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4876 }
4877