xref: /openbmc/qemu/target/hppa/translate.c (revision 6dd9b145)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "exec/page-protection.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 /* Choose to use explicit sizes within this file. */
38 #undef tcg_temp_new
39 
40 typedef struct DisasCond {
41     TCGCond c;
42     TCGv_i64 a0, a1;
43 } DisasCond;
44 
45 typedef struct DisasIAQE {
46     /* IASQ; may be null for no change from TB. */
47     TCGv_i64 space;
48     /* IAOQ base; may be null for relative address. */
49     TCGv_i64 base;
50     /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
51     int64_t disp;
52 } DisasIAQE;
53 
54 typedef struct DisasDelayException {
55     struct DisasDelayException *next;
56     TCGLabel *lab;
57     uint32_t insn;
58     bool set_iir;
59     int8_t set_n;
60     uint8_t excp;
61     /* Saved state at parent insn. */
62     DisasIAQE iaq_f, iaq_b;
63 } DisasDelayException;
64 
65 typedef struct DisasContext {
66     DisasContextBase base;
67     CPUState *cs;
68 
69     /* IAQ_Front, IAQ_Back. */
70     DisasIAQE iaq_f, iaq_b;
71     /* IAQ_Next, for jumps, otherwise null for simple advance. */
72     DisasIAQE iaq_j, *iaq_n;
73 
74     /* IAOQ_Front at entry to TB. */
75     uint64_t iaoq_first;
76 
77     DisasCond null_cond;
78     TCGLabel *null_lab;
79 
80     DisasDelayException *delay_excp_list;
81     TCGv_i64 zero;
82 
83     uint32_t insn;
84     uint32_t tb_flags;
85     int mmu_idx;
86     int privilege;
87     uint32_t psw_xb;
88     bool psw_n_nonzero;
89     bool psw_b_next;
90     bool is_pa20;
91     bool insn_start_updated;
92 
93 #ifdef CONFIG_USER_ONLY
94     MemOp unalign;
95 #endif
96 } DisasContext;
97 
98 #ifdef CONFIG_USER_ONLY
99 #define UNALIGN(C)       (C)->unalign
100 #define MMU_DISABLED(C)  false
101 #else
102 #define UNALIGN(C)       MO_ALIGN
103 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
104 #endif
105 
106 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
107 static int expand_sm_imm(DisasContext *ctx, int val)
108 {
109     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
110     if (ctx->is_pa20) {
111         if (val & PSW_SM_W) {
112             val |= PSW_W;
113         }
114         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
115     } else {
116         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
117     }
118     return val;
119 }
120 
121 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
122 static int expand_sr3x(DisasContext *ctx, int val)
123 {
124     return ~val;
125 }
126 
127 /* Convert the M:A bits within a memory insn to the tri-state value
128    we use for the final M.  */
129 static int ma_to_m(DisasContext *ctx, int val)
130 {
131     return val & 2 ? (val & 1 ? -1 : 1) : 0;
132 }
133 
134 /* Convert the sign of the displacement to a pre or post-modify.  */
135 static int pos_to_m(DisasContext *ctx, int val)
136 {
137     return val ? 1 : -1;
138 }
139 
140 static int neg_to_m(DisasContext *ctx, int val)
141 {
142     return val ? -1 : 1;
143 }
144 
145 /* Used for branch targets and fp memory ops.  */
146 static int expand_shl2(DisasContext *ctx, int val)
147 {
148     return val << 2;
149 }
150 
151 /* Used for assemble_21.  */
152 static int expand_shl11(DisasContext *ctx, int val)
153 {
154     return val << 11;
155 }
156 
157 static int assemble_6(DisasContext *ctx, int val)
158 {
159     /*
160      * Officially, 32 * x + 32 - y.
161      * Here, x is already in bit 5, and y is [4:0].
162      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
163      * with the overflow from bit 4 summing with x.
164      */
165     return (val ^ 31) + 1;
166 }
167 
168 /* Expander for assemble_16a(s,cat(im10a,0),i). */
169 static int expand_11a(DisasContext *ctx, int val)
170 {
171     /*
172      * @val is bit 0 and bits [4:15].
173      * Swizzle thing around depending on PSW.W.
174      */
175     int im10a = extract32(val, 1, 10);
176     int s = extract32(val, 11, 2);
177     int i = (-(val & 1) << 13) | (im10a << 3);
178 
179     if (ctx->tb_flags & PSW_W) {
180         i ^= s << 13;
181     }
182     return i;
183 }
184 
185 /* Expander for assemble_16a(s,im11a,i). */
186 static int expand_12a(DisasContext *ctx, int val)
187 {
188     /*
189      * @val is bit 0 and bits [3:15].
190      * Swizzle thing around depending on PSW.W.
191      */
192     int im11a = extract32(val, 1, 11);
193     int s = extract32(val, 12, 2);
194     int i = (-(val & 1) << 13) | (im11a << 2);
195 
196     if (ctx->tb_flags & PSW_W) {
197         i ^= s << 13;
198     }
199     return i;
200 }
201 
202 /* Expander for assemble_16(s,im14). */
203 static int expand_16(DisasContext *ctx, int val)
204 {
205     /*
206      * @val is bits [0:15], containing both im14 and s.
207      * Swizzle thing around depending on PSW.W.
208      */
209     int s = extract32(val, 14, 2);
210     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
211 
212     if (ctx->tb_flags & PSW_W) {
213         i ^= s << 13;
214     }
215     return i;
216 }
217 
218 /* The sp field is only present with !PSW_W. */
219 static int sp0_if_wide(DisasContext *ctx, int sp)
220 {
221     return ctx->tb_flags & PSW_W ? 0 : sp;
222 }
223 
224 /* Translate CMPI doubleword conditions to standard. */
225 static int cmpbid_c(DisasContext *ctx, int val)
226 {
227     return val ? val : 4; /* 0 == "*<<" */
228 }
229 
230 /*
231  * In many places pa1.x did not decode the bit that later became
232  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
233  */
234 static int pa20_d(DisasContext *ctx, int val)
235 {
236     return ctx->is_pa20 & val;
237 }
238 
239 /* Include the auto-generated decoder.  */
240 #include "decode-insns.c.inc"
241 
242 /* We are not using a goto_tb (for whatever reason), but have updated
243    the iaq (for whatever reason), so don't do it again on exit.  */
244 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
245 
246 /* We are exiting the TB, but have neither emitted a goto_tb, nor
247    updated the iaq for the next instruction to be executed.  */
248 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
249 
250 /* Similarly, but we want to return to the main loop immediately
251    to recognize unmasked interrupts.  */
252 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
253 #define DISAS_EXIT                  DISAS_TARGET_3
254 
255 /* global register indexes */
256 static TCGv_i64 cpu_gr[32];
257 static TCGv_i64 cpu_sr[4];
258 static TCGv_i64 cpu_srH;
259 static TCGv_i64 cpu_iaoq_f;
260 static TCGv_i64 cpu_iaoq_b;
261 static TCGv_i64 cpu_iasq_f;
262 static TCGv_i64 cpu_iasq_b;
263 static TCGv_i64 cpu_sar;
264 static TCGv_i64 cpu_psw_n;
265 static TCGv_i64 cpu_psw_v;
266 static TCGv_i64 cpu_psw_cb;
267 static TCGv_i64 cpu_psw_cb_msb;
268 static TCGv_i32 cpu_psw_xb;
269 
270 void hppa_translate_init(void)
271 {
272 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
273 
274     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
275     static const GlobalVar vars[] = {
276         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
277         DEF_VAR(psw_n),
278         DEF_VAR(psw_v),
279         DEF_VAR(psw_cb),
280         DEF_VAR(psw_cb_msb),
281         DEF_VAR(iaoq_f),
282         DEF_VAR(iaoq_b),
283     };
284 
285 #undef DEF_VAR
286 
287     /* Use the symbolic register names that match the disassembler.  */
288     static const char gr_names[32][4] = {
289         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
290         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
291         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
292         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
293     };
294     /* SR[4-7] are not global registers so that we can index them.  */
295     static const char sr_names[5][4] = {
296         "sr0", "sr1", "sr2", "sr3", "srH"
297     };
298 
299     int i;
300 
301     cpu_gr[0] = NULL;
302     for (i = 1; i < 32; i++) {
303         cpu_gr[i] = tcg_global_mem_new(tcg_env,
304                                        offsetof(CPUHPPAState, gr[i]),
305                                        gr_names[i]);
306     }
307     for (i = 0; i < 4; i++) {
308         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
309                                            offsetof(CPUHPPAState, sr[i]),
310                                            sr_names[i]);
311     }
312     cpu_srH = tcg_global_mem_new_i64(tcg_env,
313                                      offsetof(CPUHPPAState, sr[4]),
314                                      sr_names[4]);
315 
316     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
317         const GlobalVar *v = &vars[i];
318         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
319     }
320 
321     cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
322                                         offsetof(CPUHPPAState, psw_xb),
323                                         "psw_xb");
324     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
325                                         offsetof(CPUHPPAState, iasq_f),
326                                         "iasq_f");
327     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
328                                         offsetof(CPUHPPAState, iasq_b),
329                                         "iasq_b");
330 }
331 
332 static void set_insn_breg(DisasContext *ctx, int breg)
333 {
334     assert(!ctx->insn_start_updated);
335     ctx->insn_start_updated = true;
336     tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
337 }
338 
339 static DisasCond cond_make_f(void)
340 {
341     return (DisasCond){
342         .c = TCG_COND_NEVER,
343         .a0 = NULL,
344         .a1 = NULL,
345     };
346 }
347 
348 static DisasCond cond_make_t(void)
349 {
350     return (DisasCond){
351         .c = TCG_COND_ALWAYS,
352         .a0 = NULL,
353         .a1 = NULL,
354     };
355 }
356 
357 static DisasCond cond_make_n(void)
358 {
359     return (DisasCond){
360         .c = TCG_COND_NE,
361         .a0 = cpu_psw_n,
362         .a1 = tcg_constant_i64(0)
363     };
364 }
365 
366 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
367 {
368     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
369     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
370 }
371 
372 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
373 {
374     return cond_make_tt(c, a0, tcg_constant_i64(imm));
375 }
376 
377 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
378 {
379     TCGv_i64 tmp = tcg_temp_new_i64();
380     tcg_gen_mov_i64(tmp, a0);
381     return cond_make_ti(c, tmp, imm);
382 }
383 
384 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
385 {
386     TCGv_i64 t0 = tcg_temp_new_i64();
387     TCGv_i64 t1 = tcg_temp_new_i64();
388 
389     tcg_gen_mov_i64(t0, a0);
390     tcg_gen_mov_i64(t1, a1);
391     return cond_make_tt(c, t0, t1);
392 }
393 
394 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
395 {
396     if (reg == 0) {
397         return ctx->zero;
398     } else {
399         return cpu_gr[reg];
400     }
401 }
402 
403 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
404 {
405     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
406         return tcg_temp_new_i64();
407     } else {
408         return cpu_gr[reg];
409     }
410 }
411 
412 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
413 {
414     if (ctx->null_cond.c != TCG_COND_NEVER) {
415         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
416                             ctx->null_cond.a1, dest, t);
417     } else {
418         tcg_gen_mov_i64(dest, t);
419     }
420 }
421 
422 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
423 {
424     if (reg != 0) {
425         save_or_nullify(ctx, cpu_gr[reg], t);
426     }
427 }
428 
429 #if HOST_BIG_ENDIAN
430 # define HI_OFS  0
431 # define LO_OFS  4
432 #else
433 # define HI_OFS  4
434 # define LO_OFS  0
435 #endif
436 
437 static TCGv_i32 load_frw_i32(unsigned rt)
438 {
439     TCGv_i32 ret = tcg_temp_new_i32();
440     tcg_gen_ld_i32(ret, tcg_env,
441                    offsetof(CPUHPPAState, fr[rt & 31])
442                    + (rt & 32 ? LO_OFS : HI_OFS));
443     return ret;
444 }
445 
446 static TCGv_i32 load_frw0_i32(unsigned rt)
447 {
448     if (rt == 0) {
449         TCGv_i32 ret = tcg_temp_new_i32();
450         tcg_gen_movi_i32(ret, 0);
451         return ret;
452     } else {
453         return load_frw_i32(rt);
454     }
455 }
456 
457 static TCGv_i64 load_frw0_i64(unsigned rt)
458 {
459     TCGv_i64 ret = tcg_temp_new_i64();
460     if (rt == 0) {
461         tcg_gen_movi_i64(ret, 0);
462     } else {
463         tcg_gen_ld32u_i64(ret, tcg_env,
464                           offsetof(CPUHPPAState, fr[rt & 31])
465                           + (rt & 32 ? LO_OFS : HI_OFS));
466     }
467     return ret;
468 }
469 
470 static void save_frw_i32(unsigned rt, TCGv_i32 val)
471 {
472     tcg_gen_st_i32(val, tcg_env,
473                    offsetof(CPUHPPAState, fr[rt & 31])
474                    + (rt & 32 ? LO_OFS : HI_OFS));
475 }
476 
477 #undef HI_OFS
478 #undef LO_OFS
479 
480 static TCGv_i64 load_frd(unsigned rt)
481 {
482     TCGv_i64 ret = tcg_temp_new_i64();
483     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
484     return ret;
485 }
486 
487 static TCGv_i64 load_frd0(unsigned rt)
488 {
489     if (rt == 0) {
490         TCGv_i64 ret = tcg_temp_new_i64();
491         tcg_gen_movi_i64(ret, 0);
492         return ret;
493     } else {
494         return load_frd(rt);
495     }
496 }
497 
498 static void save_frd(unsigned rt, TCGv_i64 val)
499 {
500     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
501 }
502 
503 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
504 {
505 #ifdef CONFIG_USER_ONLY
506     tcg_gen_movi_i64(dest, 0);
507 #else
508     if (reg < 4) {
509         tcg_gen_mov_i64(dest, cpu_sr[reg]);
510     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
511         tcg_gen_mov_i64(dest, cpu_srH);
512     } else {
513         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
514     }
515 #endif
516 }
517 
518 /*
519  * Write a value to psw_xb, bearing in mind the known value.
520  * To be used just before exiting the TB, so do not update the known value.
521  */
522 static void store_psw_xb(DisasContext *ctx, uint32_t xb)
523 {
524     tcg_debug_assert(xb == 0 || xb == PSW_B);
525     if (ctx->psw_xb != xb) {
526         tcg_gen_movi_i32(cpu_psw_xb, xb);
527     }
528 }
529 
530 /* Write a value to psw_xb, and update the known value. */
531 static void set_psw_xb(DisasContext *ctx, uint32_t xb)
532 {
533     store_psw_xb(ctx, xb);
534     ctx->psw_xb = xb;
535 }
536 
537 /* Skip over the implementation of an insn that has been nullified.
538    Use this when the insn is too complex for a conditional move.  */
539 static void nullify_over(DisasContext *ctx)
540 {
541     if (ctx->null_cond.c != TCG_COND_NEVER) {
542         /* The always condition should have been handled in the main loop.  */
543         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
544 
545         ctx->null_lab = gen_new_label();
546 
547         /* If we're using PSW[N], copy it to a temp because... */
548         if (ctx->null_cond.a0 == cpu_psw_n) {
549             ctx->null_cond.a0 = tcg_temp_new_i64();
550             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
551         }
552         /* ... we clear it before branching over the implementation,
553            so that (1) it's clear after nullifying this insn and
554            (2) if this insn nullifies the next, PSW[N] is valid.  */
555         if (ctx->psw_n_nonzero) {
556             ctx->psw_n_nonzero = false;
557             tcg_gen_movi_i64(cpu_psw_n, 0);
558         }
559 
560         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
561                            ctx->null_cond.a1, ctx->null_lab);
562         ctx->null_cond = cond_make_f();
563     }
564 }
565 
566 /* Save the current nullification state to PSW[N].  */
567 static void nullify_save(DisasContext *ctx)
568 {
569     if (ctx->null_cond.c == TCG_COND_NEVER) {
570         if (ctx->psw_n_nonzero) {
571             tcg_gen_movi_i64(cpu_psw_n, 0);
572         }
573         return;
574     }
575     if (ctx->null_cond.a0 != cpu_psw_n) {
576         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
577                             ctx->null_cond.a0, ctx->null_cond.a1);
578         ctx->psw_n_nonzero = true;
579     }
580     ctx->null_cond = cond_make_f();
581 }
582 
583 /* Set a PSW[N] to X.  The intention is that this is used immediately
584    before a goto_tb/exit_tb, so that there is no fallthru path to other
585    code within the TB.  Therefore we do not update psw_n_nonzero.  */
586 static void nullify_set(DisasContext *ctx, bool x)
587 {
588     if (ctx->psw_n_nonzero || x) {
589         tcg_gen_movi_i64(cpu_psw_n, x);
590     }
591 }
592 
593 /* Mark the end of an instruction that may have been nullified.
594    This is the pair to nullify_over.  Always returns true so that
595    it may be tail-called from a translate function.  */
596 static bool nullify_end(DisasContext *ctx)
597 {
598     TCGLabel *null_lab = ctx->null_lab;
599     DisasJumpType status = ctx->base.is_jmp;
600 
601     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
602        For UPDATED, we cannot update on the nullified path.  */
603     assert(status != DISAS_IAQ_N_UPDATED);
604     /* Taken branches are handled manually. */
605     assert(!ctx->psw_b_next);
606 
607     if (likely(null_lab == NULL)) {
608         /* The current insn wasn't conditional or handled the condition
609            applied to it without a branch, so the (new) setting of
610            NULL_COND can be applied directly to the next insn.  */
611         return true;
612     }
613     ctx->null_lab = NULL;
614 
615     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
616         /* The next instruction will be unconditional,
617            and NULL_COND already reflects that.  */
618         gen_set_label(null_lab);
619     } else {
620         /* The insn that we just executed is itself nullifying the next
621            instruction.  Store the condition in the PSW[N] global.
622            We asserted PSW[N] = 0 in nullify_over, so that after the
623            label we have the proper value in place.  */
624         nullify_save(ctx);
625         gen_set_label(null_lab);
626         ctx->null_cond = cond_make_n();
627     }
628     if (status == DISAS_NORETURN) {
629         ctx->base.is_jmp = DISAS_NEXT;
630     }
631     return true;
632 }
633 
634 static bool iaqe_variable(const DisasIAQE *e)
635 {
636     return e->base || e->space;
637 }
638 
639 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
640 {
641     return (DisasIAQE){
642         .space = e->space,
643         .base = e->base,
644         .disp = e->disp + disp,
645     };
646 }
647 
648 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
649 {
650     return (DisasIAQE){
651         .space = ctx->iaq_b.space,
652         .disp = ctx->iaq_f.disp + 8 + disp,
653     };
654 }
655 
656 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
657 {
658     return (DisasIAQE){
659         .space = ctx->iaq_b.space,
660         .base = var,
661     };
662 }
663 
664 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
665                             const DisasIAQE *src)
666 {
667     tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
668 }
669 
670 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
671                                 const DisasIAQE *b)
672 {
673     DisasIAQE b_next;
674 
675     if (b == NULL) {
676         b_next = iaqe_incr(f, 4);
677         b = &b_next;
678     }
679 
680     /*
681      * There is an edge case
682      *    bv   r0(rN)
683      *    b,l  disp,r0
684      * for which F will use cpu_iaoq_b (from the indirect branch),
685      * and B will use cpu_iaoq_f (from the direct branch).
686      * In this case we need an extra temporary.
687      */
688     if (f->base != cpu_iaoq_b) {
689         copy_iaoq_entry(ctx, cpu_iaoq_b, b);
690         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
691     } else if (f->base == b->base) {
692         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
693         tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
694     } else {
695         TCGv_i64 tmp = tcg_temp_new_i64();
696         copy_iaoq_entry(ctx, tmp, b);
697         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
698         tcg_gen_mov_i64(cpu_iaoq_b, tmp);
699     }
700 
701     if (f->space) {
702         tcg_gen_mov_i64(cpu_iasq_f, f->space);
703     }
704     if (b->space || f->space) {
705         tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
706     }
707 }
708 
709 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
710 {
711     tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
712     if (!link) {
713         return;
714     }
715     DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
716     copy_iaoq_entry(ctx, cpu_gr[link], &next);
717 #ifndef CONFIG_USER_ONLY
718     if (with_sr0) {
719         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
720     }
721 #endif
722 }
723 
724 static void gen_excp_1(int exception)
725 {
726     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
727 }
728 
729 static void gen_excp(DisasContext *ctx, int exception)
730 {
731     install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
732     nullify_save(ctx);
733     gen_excp_1(exception);
734     ctx->base.is_jmp = DISAS_NORETURN;
735 }
736 
737 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
738 {
739     DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
740 
741     memset(e, 0, sizeof(*e));
742     e->next = ctx->delay_excp_list;
743     ctx->delay_excp_list = e;
744 
745     e->lab = gen_new_label();
746     e->insn = ctx->insn;
747     e->set_iir = true;
748     e->set_n = ctx->psw_n_nonzero ? 0 : -1;
749     e->excp = excp;
750     e->iaq_f = ctx->iaq_f;
751     e->iaq_b = ctx->iaq_b;
752 
753     return e;
754 }
755 
756 static bool gen_excp_iir(DisasContext *ctx, int exc)
757 {
758     if (ctx->null_cond.c == TCG_COND_NEVER) {
759         tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
760                        tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
761         gen_excp(ctx, exc);
762     } else {
763         DisasDelayException *e = delay_excp(ctx, exc);
764         tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
765                            ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
766         ctx->null_cond = cond_make_f();
767     }
768     return true;
769 }
770 
771 static bool gen_illegal(DisasContext *ctx)
772 {
773     return gen_excp_iir(ctx, EXCP_ILL);
774 }
775 
776 #ifdef CONFIG_USER_ONLY
777 #define CHECK_MOST_PRIVILEGED(EXCP) \
778     return gen_excp_iir(ctx, EXCP)
779 #else
780 #define CHECK_MOST_PRIVILEGED(EXCP) \
781     do {                                     \
782         if (ctx->privilege != 0) {           \
783             return gen_excp_iir(ctx, EXCP);  \
784         }                                    \
785     } while (0)
786 #endif
787 
788 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
789                         const DisasIAQE *b)
790 {
791     return (!iaqe_variable(f) &&
792             (b == NULL || !iaqe_variable(b)) &&
793             translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
794 }
795 
796 /* If the next insn is to be nullified, and it's on the same page,
797    and we're not attempting to set a breakpoint on it, then we can
798    totally skip the nullified insn.  This avoids creating and
799    executing a TB that merely branches to the next TB.  */
800 static bool use_nullify_skip(DisasContext *ctx)
801 {
802     return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
803             && !iaqe_variable(&ctx->iaq_b)
804             && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
805                 & TARGET_PAGE_MASK) == 0);
806 }
807 
808 static void gen_goto_tb(DisasContext *ctx, int which,
809                         const DisasIAQE *f, const DisasIAQE *b)
810 {
811     install_iaq_entries(ctx, f, b);
812     if (use_goto_tb(ctx, f, b)) {
813         tcg_gen_goto_tb(which);
814         tcg_gen_exit_tb(ctx->base.tb, which);
815     } else {
816         tcg_gen_lookup_and_goto_ptr();
817     }
818 }
819 
820 static bool cond_need_sv(int c)
821 {
822     return c == 2 || c == 3 || c == 6;
823 }
824 
825 static bool cond_need_cb(int c)
826 {
827     return c == 4 || c == 5;
828 }
829 
830 /*
831  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
832  * the Parisc 1.1 Architecture Reference Manual for details.
833  */
834 
835 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
836                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
837 {
838     TCGCond sign_cond, zero_cond;
839     uint64_t sign_imm, zero_imm;
840     DisasCond cond;
841     TCGv_i64 tmp;
842 
843     if (d) {
844         /* 64-bit condition. */
845         sign_imm = 0;
846         sign_cond = TCG_COND_LT;
847         zero_imm = 0;
848         zero_cond = TCG_COND_EQ;
849     } else {
850         /* 32-bit condition. */
851         sign_imm = 1ull << 31;
852         sign_cond = TCG_COND_TSTNE;
853         zero_imm = UINT32_MAX;
854         zero_cond = TCG_COND_TSTEQ;
855     }
856 
857     switch (cf >> 1) {
858     case 0: /* Never / TR    (0 / 1) */
859         cond = cond_make_f();
860         break;
861     case 1: /* = / <>        (Z / !Z) */
862         cond = cond_make_vi(zero_cond, res, zero_imm);
863         break;
864     case 2: /* < / >=        (N ^ V / !(N ^ V) */
865         tmp = tcg_temp_new_i64();
866         tcg_gen_xor_i64(tmp, res, sv);
867         cond = cond_make_ti(sign_cond, tmp, sign_imm);
868         break;
869     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
870         /*
871          * Simplify:
872          *   (N ^ V) | Z
873          *   ((res < 0) ^ (sv < 0)) | !res
874          *   ((res ^ sv) < 0) | !res
875          *   ((res ^ sv) < 0 ? 1 : !res)
876          *   !((res ^ sv) < 0 ? 0 : res)
877          */
878         tmp = tcg_temp_new_i64();
879         tcg_gen_xor_i64(tmp, res, sv);
880         tcg_gen_movcond_i64(sign_cond, tmp,
881                             tmp, tcg_constant_i64(sign_imm),
882                             ctx->zero, res);
883         cond = cond_make_ti(zero_cond, tmp, zero_imm);
884         break;
885     case 4: /* NUV / UV      (!UV / UV) */
886         cond = cond_make_vi(TCG_COND_EQ, uv, 0);
887         break;
888     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
889         tmp = tcg_temp_new_i64();
890         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
891         cond = cond_make_ti(zero_cond, tmp, zero_imm);
892         break;
893     case 6: /* SV / NSV      (V / !V) */
894         cond = cond_make_vi(sign_cond, sv, sign_imm);
895         break;
896     case 7: /* OD / EV */
897         cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
898         break;
899     default:
900         g_assert_not_reached();
901     }
902     if (cf & 1) {
903         cond.c = tcg_invert_cond(cond.c);
904     }
905 
906     return cond;
907 }
908 
909 /* Similar, but for the special case of subtraction without borrow, we
910    can use the inputs directly.  This can allow other computation to be
911    deleted as unused.  */
912 
913 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
914                              TCGv_i64 res, TCGv_i64 in1,
915                              TCGv_i64 in2, TCGv_i64 sv)
916 {
917     TCGCond tc;
918     bool ext_uns;
919 
920     switch (cf >> 1) {
921     case 1: /* = / <> */
922         tc = TCG_COND_EQ;
923         ext_uns = true;
924         break;
925     case 2: /* < / >= */
926         tc = TCG_COND_LT;
927         ext_uns = false;
928         break;
929     case 3: /* <= / > */
930         tc = TCG_COND_LE;
931         ext_uns = false;
932         break;
933     case 4: /* << / >>= */
934         tc = TCG_COND_LTU;
935         ext_uns = true;
936         break;
937     case 5: /* <<= / >> */
938         tc = TCG_COND_LEU;
939         ext_uns = true;
940         break;
941     default:
942         return do_cond(ctx, cf, d, res, NULL, sv);
943     }
944 
945     if (cf & 1) {
946         tc = tcg_invert_cond(tc);
947     }
948     if (!d) {
949         TCGv_i64 t1 = tcg_temp_new_i64();
950         TCGv_i64 t2 = tcg_temp_new_i64();
951 
952         if (ext_uns) {
953             tcg_gen_ext32u_i64(t1, in1);
954             tcg_gen_ext32u_i64(t2, in2);
955         } else {
956             tcg_gen_ext32s_i64(t1, in1);
957             tcg_gen_ext32s_i64(t2, in2);
958         }
959         return cond_make_tt(tc, t1, t2);
960     }
961     return cond_make_vv(tc, in1, in2);
962 }
963 
964 /*
965  * Similar, but for logicals, where the carry and overflow bits are not
966  * computed, and use of them is undefined.
967  *
968  * Undefined or not, hardware does not trap.  It seems reasonable to
969  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
970  * how cases c={2,3} are treated.
971  */
972 
973 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
974                              TCGv_i64 res)
975 {
976     TCGCond tc;
977     uint64_t imm;
978 
979     switch (cf >> 1) {
980     case 0:  /* never / always */
981     case 4:  /* undef, C */
982     case 5:  /* undef, C & !Z */
983     case 6:  /* undef, V */
984         return cf & 1 ? cond_make_t() : cond_make_f();
985     case 1:  /* == / <> */
986         tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
987         imm = d ? 0 : UINT32_MAX;
988         break;
989     case 2:  /* < / >= */
990         tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
991         imm = d ? 0 : 1ull << 31;
992         break;
993     case 3:  /* <= / > */
994         tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
995         if (!d) {
996             TCGv_i64 tmp = tcg_temp_new_i64();
997             tcg_gen_ext32s_i64(tmp, res);
998             return cond_make_ti(tc, tmp, 0);
999         }
1000         return cond_make_vi(tc, res, 0);
1001     case 7: /* OD / EV */
1002         tc = TCG_COND_TSTNE;
1003         imm = 1;
1004         break;
1005     default:
1006         g_assert_not_reached();
1007     }
1008     if (cf & 1) {
1009         tc = tcg_invert_cond(tc);
1010     }
1011     return cond_make_vi(tc, res, imm);
1012 }
1013 
1014 /* Similar, but for shift/extract/deposit conditions.  */
1015 
1016 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1017                              TCGv_i64 res)
1018 {
1019     unsigned c, f;
1020 
1021     /* Convert the compressed condition codes to standard.
1022        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1023        4-7 are the reverse of 0-3.  */
1024     c = orig & 3;
1025     if (c == 3) {
1026         c = 7;
1027     }
1028     f = (orig & 4) / 4;
1029 
1030     return do_log_cond(ctx, c * 2 + f, d, res);
1031 }
1032 
1033 /* Similar, but for unit zero conditions.  */
1034 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
1035 {
1036     TCGv_i64 tmp;
1037     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
1038     uint64_t ones = 0, sgns = 0;
1039 
1040     switch (cf >> 1) {
1041     case 1: /* SBW / NBW */
1042         if (d) {
1043             ones = d_repl;
1044             sgns = d_repl << 31;
1045         }
1046         break;
1047     case 2: /* SBZ / NBZ */
1048         ones = d_repl * 0x01010101u;
1049         sgns = ones << 7;
1050         break;
1051     case 3: /* SHZ / NHZ */
1052         ones = d_repl * 0x00010001u;
1053         sgns = ones << 15;
1054         break;
1055     }
1056     if (ones == 0) {
1057         /* Undefined, or 0/1 (never/always). */
1058         return cf & 1 ? cond_make_t() : cond_make_f();
1059     }
1060 
1061     /*
1062      * See hasless(v,1) from
1063      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1064      */
1065     tmp = tcg_temp_new_i64();
1066     tcg_gen_subi_i64(tmp, res, ones);
1067     tcg_gen_andc_i64(tmp, tmp, res);
1068 
1069     return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
1070 }
1071 
1072 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1073                           TCGv_i64 cb, TCGv_i64 cb_msb)
1074 {
1075     if (!d) {
1076         TCGv_i64 t = tcg_temp_new_i64();
1077         tcg_gen_extract_i64(t, cb, 32, 1);
1078         return t;
1079     }
1080     return cb_msb;
1081 }
1082 
1083 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1084 {
1085     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1086 }
1087 
1088 /* Compute signed overflow for addition.  */
1089 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1090                           TCGv_i64 in1, TCGv_i64 in2,
1091                           TCGv_i64 orig_in1, int shift, bool d)
1092 {
1093     TCGv_i64 sv = tcg_temp_new_i64();
1094     TCGv_i64 tmp = tcg_temp_new_i64();
1095 
1096     tcg_gen_xor_i64(sv, res, in1);
1097     tcg_gen_xor_i64(tmp, in1, in2);
1098     tcg_gen_andc_i64(sv, sv, tmp);
1099 
1100     switch (shift) {
1101     case 0:
1102         break;
1103     case 1:
1104         /* Shift left by one and compare the sign. */
1105         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1106         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1107         /* Incorporate into the overflow. */
1108         tcg_gen_or_i64(sv, sv, tmp);
1109         break;
1110     default:
1111         {
1112             int sign_bit = d ? 63 : 31;
1113 
1114             /* Compare the sign against all lower bits. */
1115             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1116             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1117             /*
1118              * If one of the bits shifting into or through the sign
1119              * differs, then we have overflow.
1120              */
1121             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1122             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1123                                 tcg_constant_i64(-1), sv);
1124         }
1125     }
1126     return sv;
1127 }
1128 
1129 /* Compute unsigned overflow for addition.  */
1130 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1131                           TCGv_i64 in1, int shift, bool d)
1132 {
1133     if (shift == 0) {
1134         return get_carry(ctx, d, cb, cb_msb);
1135     } else {
1136         TCGv_i64 tmp = tcg_temp_new_i64();
1137         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1138         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1139         return tmp;
1140     }
1141 }
1142 
1143 /* Compute signed overflow for subtraction.  */
1144 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1145                           TCGv_i64 in1, TCGv_i64 in2)
1146 {
1147     TCGv_i64 sv = tcg_temp_new_i64();
1148     TCGv_i64 tmp = tcg_temp_new_i64();
1149 
1150     tcg_gen_xor_i64(sv, res, in1);
1151     tcg_gen_xor_i64(tmp, in1, in2);
1152     tcg_gen_and_i64(sv, sv, tmp);
1153 
1154     return sv;
1155 }
1156 
1157 static void gen_tc(DisasContext *ctx, DisasCond *cond)
1158 {
1159     DisasDelayException *e;
1160 
1161     switch (cond->c) {
1162     case TCG_COND_NEVER:
1163         break;
1164     case TCG_COND_ALWAYS:
1165         gen_excp_iir(ctx, EXCP_COND);
1166         break;
1167     default:
1168         e = delay_excp(ctx, EXCP_COND);
1169         tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
1170         /* In the non-trap path, the condition is known false. */
1171         *cond = cond_make_f();
1172         break;
1173     }
1174 }
1175 
1176 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
1177 {
1178     DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
1179     DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
1180 
1181     tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
1182 
1183     /* In the non-trap path, V is known zero. */
1184     *sv = tcg_constant_i64(0);
1185 }
1186 
1187 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1188                    TCGv_i64 in2, unsigned shift, bool is_l,
1189                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1190 {
1191     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1192     unsigned c = cf >> 1;
1193     DisasCond cond;
1194 
1195     dest = tcg_temp_new_i64();
1196     cb = NULL;
1197     cb_msb = NULL;
1198 
1199     in1 = orig_in1;
1200     if (shift) {
1201         tmp = tcg_temp_new_i64();
1202         tcg_gen_shli_i64(tmp, in1, shift);
1203         in1 = tmp;
1204     }
1205 
1206     if (!is_l || cond_need_cb(c)) {
1207         cb_msb = tcg_temp_new_i64();
1208         cb = tcg_temp_new_i64();
1209 
1210         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1211         if (is_c) {
1212             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1213                              get_psw_carry(ctx, d), ctx->zero);
1214         }
1215         tcg_gen_xor_i64(cb, in1, in2);
1216         tcg_gen_xor_i64(cb, cb, dest);
1217     } else {
1218         tcg_gen_add_i64(dest, in1, in2);
1219         if (is_c) {
1220             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1221         }
1222     }
1223 
1224     /* Compute signed overflow if required.  */
1225     sv = NULL;
1226     if (is_tsv || cond_need_sv(c)) {
1227         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1228         if (is_tsv) {
1229             gen_tsv(ctx, &sv, d);
1230         }
1231     }
1232 
1233     /* Compute unsigned overflow if required.  */
1234     uv = NULL;
1235     if (cond_need_cb(c)) {
1236         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1237     }
1238 
1239     /* Emit any conditional trap before any writeback.  */
1240     cond = do_cond(ctx, cf, d, dest, uv, sv);
1241     if (is_tc) {
1242         gen_tc(ctx, &cond);
1243     }
1244 
1245     /* Write back the result.  */
1246     if (!is_l) {
1247         save_or_nullify(ctx, cpu_psw_cb, cb);
1248         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1249     }
1250     save_gpr(ctx, rt, dest);
1251 
1252     /* Install the new nullification.  */
1253     ctx->null_cond = cond;
1254 }
1255 
1256 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1257                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1258 {
1259     TCGv_i64 tcg_r1, tcg_r2;
1260 
1261     if (unlikely(is_tc && a->cf == 1)) {
1262         /* Unconditional trap on condition. */
1263         return gen_excp_iir(ctx, EXCP_COND);
1264     }
1265     if (a->cf) {
1266         nullify_over(ctx);
1267     }
1268     tcg_r1 = load_gpr(ctx, a->r1);
1269     tcg_r2 = load_gpr(ctx, a->r2);
1270     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1271            is_tsv, is_tc, is_c, a->cf, a->d);
1272     return nullify_end(ctx);
1273 }
1274 
1275 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1276                        bool is_tsv, bool is_tc)
1277 {
1278     TCGv_i64 tcg_im, tcg_r2;
1279 
1280     if (unlikely(is_tc && a->cf == 1)) {
1281         /* Unconditional trap on condition. */
1282         return gen_excp_iir(ctx, EXCP_COND);
1283     }
1284     if (a->cf) {
1285         nullify_over(ctx);
1286     }
1287     tcg_im = tcg_constant_i64(a->i);
1288     tcg_r2 = load_gpr(ctx, a->r);
1289     /* All ADDI conditions are 32-bit. */
1290     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1291     return nullify_end(ctx);
1292 }
1293 
1294 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1295                    TCGv_i64 in2, bool is_tsv, bool is_b,
1296                    bool is_tc, unsigned cf, bool d)
1297 {
1298     TCGv_i64 dest, sv, cb, cb_msb;
1299     unsigned c = cf >> 1;
1300     DisasCond cond;
1301 
1302     dest = tcg_temp_new_i64();
1303     cb = tcg_temp_new_i64();
1304     cb_msb = tcg_temp_new_i64();
1305 
1306     if (is_b) {
1307         /* DEST,C = IN1 + ~IN2 + C.  */
1308         tcg_gen_not_i64(cb, in2);
1309         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1310                          get_psw_carry(ctx, d), ctx->zero);
1311         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1312         tcg_gen_xor_i64(cb, cb, in1);
1313         tcg_gen_xor_i64(cb, cb, dest);
1314     } else {
1315         /*
1316          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1317          * operations by seeding the high word with 1 and subtracting.
1318          */
1319         TCGv_i64 one = tcg_constant_i64(1);
1320         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1321         tcg_gen_eqv_i64(cb, in1, in2);
1322         tcg_gen_xor_i64(cb, cb, dest);
1323     }
1324 
1325     /* Compute signed overflow if required.  */
1326     sv = NULL;
1327     if (is_tsv || cond_need_sv(c)) {
1328         sv = do_sub_sv(ctx, dest, in1, in2);
1329         if (is_tsv) {
1330             gen_tsv(ctx, &sv, d);
1331         }
1332     }
1333 
1334     /* Compute the condition.  We cannot use the special case for borrow.  */
1335     if (!is_b) {
1336         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1337     } else {
1338         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1339     }
1340 
1341     /* Emit any conditional trap before any writeback.  */
1342     if (is_tc) {
1343         gen_tc(ctx, &cond);
1344     }
1345 
1346     /* Write back the result.  */
1347     save_or_nullify(ctx, cpu_psw_cb, cb);
1348     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1349     save_gpr(ctx, rt, dest);
1350 
1351     /* Install the new nullification.  */
1352     ctx->null_cond = cond;
1353 }
1354 
1355 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1356                        bool is_tsv, bool is_b, bool is_tc)
1357 {
1358     TCGv_i64 tcg_r1, tcg_r2;
1359 
1360     if (a->cf) {
1361         nullify_over(ctx);
1362     }
1363     tcg_r1 = load_gpr(ctx, a->r1);
1364     tcg_r2 = load_gpr(ctx, a->r2);
1365     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1366     return nullify_end(ctx);
1367 }
1368 
1369 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1370 {
1371     TCGv_i64 tcg_im, tcg_r2;
1372 
1373     if (a->cf) {
1374         nullify_over(ctx);
1375     }
1376     tcg_im = tcg_constant_i64(a->i);
1377     tcg_r2 = load_gpr(ctx, a->r);
1378     /* All SUBI conditions are 32-bit. */
1379     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1380     return nullify_end(ctx);
1381 }
1382 
1383 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1384                       TCGv_i64 in2, unsigned cf, bool d)
1385 {
1386     TCGv_i64 dest, sv;
1387     DisasCond cond;
1388 
1389     dest = tcg_temp_new_i64();
1390     tcg_gen_sub_i64(dest, in1, in2);
1391 
1392     /* Compute signed overflow if required.  */
1393     sv = NULL;
1394     if (cond_need_sv(cf >> 1)) {
1395         sv = do_sub_sv(ctx, dest, in1, in2);
1396     }
1397 
1398     /* Form the condition for the compare.  */
1399     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1400 
1401     /* Clear.  */
1402     tcg_gen_movi_i64(dest, 0);
1403     save_gpr(ctx, rt, dest);
1404 
1405     /* Install the new nullification.  */
1406     ctx->null_cond = cond;
1407 }
1408 
1409 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1410                    TCGv_i64 in2, unsigned cf, bool d,
1411                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1412 {
1413     TCGv_i64 dest = dest_gpr(ctx, rt);
1414 
1415     /* Perform the operation, and writeback.  */
1416     fn(dest, in1, in2);
1417     save_gpr(ctx, rt, dest);
1418 
1419     /* Install the new nullification.  */
1420     ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1421 }
1422 
1423 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1424                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1425 {
1426     TCGv_i64 tcg_r1, tcg_r2;
1427 
1428     if (a->cf) {
1429         nullify_over(ctx);
1430     }
1431     tcg_r1 = load_gpr(ctx, a->r1);
1432     tcg_r2 = load_gpr(ctx, a->r2);
1433     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1434     return nullify_end(ctx);
1435 }
1436 
1437 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1438                            TCGv_i64 in2, unsigned cf, bool d,
1439                            bool is_tc, bool is_add)
1440 {
1441     TCGv_i64 dest = tcg_temp_new_i64();
1442     uint64_t test_cb = 0;
1443     DisasCond cond;
1444 
1445     /* Select which carry-out bits to test. */
1446     switch (cf >> 1) {
1447     case 4: /* NDC / SDC -- 4-bit carries */
1448         test_cb = dup_const(MO_8, 0x88);
1449         break;
1450     case 5: /* NWC / SWC -- 32-bit carries */
1451         if (d) {
1452             test_cb = dup_const(MO_32, INT32_MIN);
1453         } else {
1454             cf &= 1; /* undefined -- map to never/always */
1455         }
1456         break;
1457     case 6: /* NBC / SBC -- 8-bit carries */
1458         test_cb = dup_const(MO_8, INT8_MIN);
1459         break;
1460     case 7: /* NHC / SHC -- 16-bit carries */
1461         test_cb = dup_const(MO_16, INT16_MIN);
1462         break;
1463     }
1464     if (!d) {
1465         test_cb = (uint32_t)test_cb;
1466     }
1467 
1468     if (!test_cb) {
1469         /* No need to compute carries if we don't need to test them. */
1470         if (is_add) {
1471             tcg_gen_add_i64(dest, in1, in2);
1472         } else {
1473             tcg_gen_sub_i64(dest, in1, in2);
1474         }
1475         cond = do_unit_zero_cond(cf, d, dest);
1476     } else {
1477         TCGv_i64 cb = tcg_temp_new_i64();
1478 
1479         if (d) {
1480             TCGv_i64 cb_msb = tcg_temp_new_i64();
1481             if (is_add) {
1482                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1483                 tcg_gen_xor_i64(cb, in1, in2);
1484             } else {
1485                 /* See do_sub, !is_b. */
1486                 TCGv_i64 one = tcg_constant_i64(1);
1487                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1488                 tcg_gen_eqv_i64(cb, in1, in2);
1489             }
1490             tcg_gen_xor_i64(cb, cb, dest);
1491             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1492         } else {
1493             if (is_add) {
1494                 tcg_gen_add_i64(dest, in1, in2);
1495                 tcg_gen_xor_i64(cb, in1, in2);
1496             } else {
1497                 tcg_gen_sub_i64(dest, in1, in2);
1498                 tcg_gen_eqv_i64(cb, in1, in2);
1499             }
1500             tcg_gen_xor_i64(cb, cb, dest);
1501             tcg_gen_shri_i64(cb, cb, 1);
1502         }
1503 
1504         cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
1505                             cb, test_cb);
1506     }
1507 
1508     if (is_tc) {
1509         gen_tc(ctx, &cond);
1510     }
1511     save_gpr(ctx, rt, dest);
1512 
1513     ctx->null_cond = cond;
1514 }
1515 
1516 #ifndef CONFIG_USER_ONLY
1517 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1518    from the top 2 bits of the base register.  There are a few system
1519    instructions that have a 3-bit space specifier, for which SR0 is
1520    not special.  To handle this, pass ~SP.  */
1521 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1522 {
1523     TCGv_ptr ptr;
1524     TCGv_i64 tmp;
1525     TCGv_i64 spc;
1526 
1527     if (sp != 0) {
1528         if (sp < 0) {
1529             sp = ~sp;
1530         }
1531         spc = tcg_temp_new_i64();
1532         load_spr(ctx, spc, sp);
1533         return spc;
1534     }
1535     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1536         return cpu_srH;
1537     }
1538 
1539     ptr = tcg_temp_new_ptr();
1540     tmp = tcg_temp_new_i64();
1541     spc = tcg_temp_new_i64();
1542 
1543     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1544     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1545     tcg_gen_andi_i64(tmp, tmp, 030);
1546     tcg_gen_trunc_i64_ptr(ptr, tmp);
1547 
1548     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1549     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1550 
1551     return spc;
1552 }
1553 #endif
1554 
1555 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1556                      unsigned rb, unsigned rx, int scale, int64_t disp,
1557                      unsigned sp, int modify, bool is_phys)
1558 {
1559     TCGv_i64 base = load_gpr(ctx, rb);
1560     TCGv_i64 ofs;
1561     TCGv_i64 addr;
1562 
1563     set_insn_breg(ctx, rb);
1564 
1565     /* Note that RX is mutually exclusive with DISP.  */
1566     if (rx) {
1567         ofs = tcg_temp_new_i64();
1568         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1569         tcg_gen_add_i64(ofs, ofs, base);
1570     } else if (disp || modify) {
1571         ofs = tcg_temp_new_i64();
1572         tcg_gen_addi_i64(ofs, base, disp);
1573     } else {
1574         ofs = base;
1575     }
1576 
1577     *pofs = ofs;
1578     *pgva = addr = tcg_temp_new_i64();
1579     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1580                      gva_offset_mask(ctx->tb_flags));
1581 #ifndef CONFIG_USER_ONLY
1582     if (!is_phys) {
1583         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1584     }
1585 #endif
1586 }
1587 
1588 /* Emit a memory load.  The modify parameter should be
1589  * < 0 for pre-modify,
1590  * > 0 for post-modify,
1591  * = 0 for no base register update.
1592  */
1593 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1594                        unsigned rx, int scale, int64_t disp,
1595                        unsigned sp, int modify, MemOp mop)
1596 {
1597     TCGv_i64 ofs;
1598     TCGv_i64 addr;
1599 
1600     /* Caller uses nullify_over/nullify_end.  */
1601     assert(ctx->null_cond.c == TCG_COND_NEVER);
1602 
1603     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1604              MMU_DISABLED(ctx));
1605     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1606     if (modify) {
1607         save_gpr(ctx, rb, ofs);
1608     }
1609 }
1610 
1611 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1612                        unsigned rx, int scale, int64_t disp,
1613                        unsigned sp, int modify, MemOp mop)
1614 {
1615     TCGv_i64 ofs;
1616     TCGv_i64 addr;
1617 
1618     /* Caller uses nullify_over/nullify_end.  */
1619     assert(ctx->null_cond.c == TCG_COND_NEVER);
1620 
1621     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1622              MMU_DISABLED(ctx));
1623     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1624     if (modify) {
1625         save_gpr(ctx, rb, ofs);
1626     }
1627 }
1628 
1629 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1630                         unsigned rx, int scale, int64_t disp,
1631                         unsigned sp, int modify, MemOp mop)
1632 {
1633     TCGv_i64 ofs;
1634     TCGv_i64 addr;
1635 
1636     /* Caller uses nullify_over/nullify_end.  */
1637     assert(ctx->null_cond.c == TCG_COND_NEVER);
1638 
1639     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1640              MMU_DISABLED(ctx));
1641     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1642     if (modify) {
1643         save_gpr(ctx, rb, ofs);
1644     }
1645 }
1646 
1647 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1648                         unsigned rx, int scale, int64_t disp,
1649                         unsigned sp, int modify, MemOp mop)
1650 {
1651     TCGv_i64 ofs;
1652     TCGv_i64 addr;
1653 
1654     /* Caller uses nullify_over/nullify_end.  */
1655     assert(ctx->null_cond.c == TCG_COND_NEVER);
1656 
1657     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1658              MMU_DISABLED(ctx));
1659     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1660     if (modify) {
1661         save_gpr(ctx, rb, ofs);
1662     }
1663 }
1664 
1665 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1666                     unsigned rx, int scale, int64_t disp,
1667                     unsigned sp, int modify, MemOp mop)
1668 {
1669     TCGv_i64 dest;
1670 
1671     nullify_over(ctx);
1672 
1673     if (modify == 0) {
1674         /* No base register update.  */
1675         dest = dest_gpr(ctx, rt);
1676     } else {
1677         /* Make sure if RT == RB, we see the result of the load.  */
1678         dest = tcg_temp_new_i64();
1679     }
1680     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1681     save_gpr(ctx, rt, dest);
1682 
1683     return nullify_end(ctx);
1684 }
1685 
1686 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1687                       unsigned rx, int scale, int64_t disp,
1688                       unsigned sp, int modify)
1689 {
1690     TCGv_i32 tmp;
1691 
1692     nullify_over(ctx);
1693 
1694     tmp = tcg_temp_new_i32();
1695     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1696     save_frw_i32(rt, tmp);
1697 
1698     if (rt == 0) {
1699         gen_helper_loaded_fr0(tcg_env);
1700     }
1701 
1702     return nullify_end(ctx);
1703 }
1704 
1705 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1706 {
1707     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1708                      a->disp, a->sp, a->m);
1709 }
1710 
1711 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1712                       unsigned rx, int scale, int64_t disp,
1713                       unsigned sp, int modify)
1714 {
1715     TCGv_i64 tmp;
1716 
1717     nullify_over(ctx);
1718 
1719     tmp = tcg_temp_new_i64();
1720     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1721     save_frd(rt, tmp);
1722 
1723     if (rt == 0) {
1724         gen_helper_loaded_fr0(tcg_env);
1725     }
1726 
1727     return nullify_end(ctx);
1728 }
1729 
1730 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1731 {
1732     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1733                      a->disp, a->sp, a->m);
1734 }
1735 
1736 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1737                      int64_t disp, unsigned sp,
1738                      int modify, MemOp mop)
1739 {
1740     nullify_over(ctx);
1741     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1742     return nullify_end(ctx);
1743 }
1744 
1745 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1746                        unsigned rx, int scale, int64_t disp,
1747                        unsigned sp, int modify)
1748 {
1749     TCGv_i32 tmp;
1750 
1751     nullify_over(ctx);
1752 
1753     tmp = load_frw_i32(rt);
1754     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1755 
1756     return nullify_end(ctx);
1757 }
1758 
1759 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1760 {
1761     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1762                       a->disp, a->sp, a->m);
1763 }
1764 
1765 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1766                        unsigned rx, int scale, int64_t disp,
1767                        unsigned sp, int modify)
1768 {
1769     TCGv_i64 tmp;
1770 
1771     nullify_over(ctx);
1772 
1773     tmp = load_frd(rt);
1774     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1775 
1776     return nullify_end(ctx);
1777 }
1778 
1779 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1780 {
1781     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1782                       a->disp, a->sp, a->m);
1783 }
1784 
1785 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1786                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1787 {
1788     TCGv_i32 tmp;
1789 
1790     nullify_over(ctx);
1791     tmp = load_frw0_i32(ra);
1792 
1793     func(tmp, tcg_env, tmp);
1794 
1795     save_frw_i32(rt, tmp);
1796     return nullify_end(ctx);
1797 }
1798 
1799 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1800                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1801 {
1802     TCGv_i32 dst;
1803     TCGv_i64 src;
1804 
1805     nullify_over(ctx);
1806     src = load_frd(ra);
1807     dst = tcg_temp_new_i32();
1808 
1809     func(dst, tcg_env, src);
1810 
1811     save_frw_i32(rt, dst);
1812     return nullify_end(ctx);
1813 }
1814 
1815 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1816                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1817 {
1818     TCGv_i64 tmp;
1819 
1820     nullify_over(ctx);
1821     tmp = load_frd0(ra);
1822 
1823     func(tmp, tcg_env, tmp);
1824 
1825     save_frd(rt, tmp);
1826     return nullify_end(ctx);
1827 }
1828 
1829 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1830                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1831 {
1832     TCGv_i32 src;
1833     TCGv_i64 dst;
1834 
1835     nullify_over(ctx);
1836     src = load_frw0_i32(ra);
1837     dst = tcg_temp_new_i64();
1838 
1839     func(dst, tcg_env, src);
1840 
1841     save_frd(rt, dst);
1842     return nullify_end(ctx);
1843 }
1844 
1845 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1846                         unsigned ra, unsigned rb,
1847                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1848 {
1849     TCGv_i32 a, b;
1850 
1851     nullify_over(ctx);
1852     a = load_frw0_i32(ra);
1853     b = load_frw0_i32(rb);
1854 
1855     func(a, tcg_env, a, b);
1856 
1857     save_frw_i32(rt, a);
1858     return nullify_end(ctx);
1859 }
1860 
1861 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1862                         unsigned ra, unsigned rb,
1863                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1864 {
1865     TCGv_i64 a, b;
1866 
1867     nullify_over(ctx);
1868     a = load_frd0(ra);
1869     b = load_frd0(rb);
1870 
1871     func(a, tcg_env, a, b);
1872 
1873     save_frd(rt, a);
1874     return nullify_end(ctx);
1875 }
1876 
1877 /* Emit an unconditional branch to a direct target, which may or may not
1878    have already had nullification handled.  */
1879 static bool do_dbranch(DisasContext *ctx, int64_t disp,
1880                        unsigned link, bool is_n)
1881 {
1882     ctx->iaq_j = iaqe_branchi(ctx, disp);
1883 
1884     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1885         install_link(ctx, link, false);
1886         if (is_n) {
1887             if (use_nullify_skip(ctx)) {
1888                 nullify_set(ctx, 0);
1889                 store_psw_xb(ctx, 0);
1890                 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1891                 ctx->base.is_jmp = DISAS_NORETURN;
1892                 return true;
1893             }
1894             ctx->null_cond.c = TCG_COND_ALWAYS;
1895         }
1896         ctx->iaq_n = &ctx->iaq_j;
1897         ctx->psw_b_next = true;
1898     } else {
1899         nullify_over(ctx);
1900 
1901         install_link(ctx, link, false);
1902         if (is_n && use_nullify_skip(ctx)) {
1903             nullify_set(ctx, 0);
1904             store_psw_xb(ctx, 0);
1905             gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1906         } else {
1907             nullify_set(ctx, is_n);
1908             store_psw_xb(ctx, PSW_B);
1909             gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
1910         }
1911         nullify_end(ctx);
1912 
1913         nullify_set(ctx, 0);
1914         store_psw_xb(ctx, 0);
1915         gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
1916         ctx->base.is_jmp = DISAS_NORETURN;
1917     }
1918     return true;
1919 }
1920 
1921 /* Emit a conditional branch to a direct target.  If the branch itself
1922    is nullified, we should have already used nullify_over.  */
1923 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1924                        DisasCond *cond)
1925 {
1926     DisasIAQE next;
1927     TCGLabel *taken = NULL;
1928     TCGCond c = cond->c;
1929     bool n;
1930 
1931     assert(ctx->null_cond.c == TCG_COND_NEVER);
1932 
1933     /* Handle TRUE and NEVER as direct branches.  */
1934     if (c == TCG_COND_ALWAYS) {
1935         return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
1936     }
1937 
1938     taken = gen_new_label();
1939     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1940 
1941     /* Not taken: Condition not satisfied; nullify on backward branches. */
1942     n = is_n && disp < 0;
1943     if (n && use_nullify_skip(ctx)) {
1944         nullify_set(ctx, 0);
1945         store_psw_xb(ctx, 0);
1946         next = iaqe_incr(&ctx->iaq_b, 4);
1947         gen_goto_tb(ctx, 0, &next, NULL);
1948     } else {
1949         if (!n && ctx->null_lab) {
1950             gen_set_label(ctx->null_lab);
1951             ctx->null_lab = NULL;
1952         }
1953         nullify_set(ctx, n);
1954         store_psw_xb(ctx, 0);
1955         gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
1956     }
1957 
1958     gen_set_label(taken);
1959 
1960     /* Taken: Condition satisfied; nullify on forward branches.  */
1961     n = is_n && disp >= 0;
1962 
1963     next = iaqe_branchi(ctx, disp);
1964     if (n && use_nullify_skip(ctx)) {
1965         nullify_set(ctx, 0);
1966         store_psw_xb(ctx, 0);
1967         gen_goto_tb(ctx, 1, &next, NULL);
1968     } else {
1969         nullify_set(ctx, n);
1970         store_psw_xb(ctx, PSW_B);
1971         gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
1972     }
1973 
1974     /* Not taken: the branch itself was nullified.  */
1975     if (ctx->null_lab) {
1976         gen_set_label(ctx->null_lab);
1977         ctx->null_lab = NULL;
1978         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1979     } else {
1980         ctx->base.is_jmp = DISAS_NORETURN;
1981     }
1982     return true;
1983 }
1984 
1985 /*
1986  * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
1987  * This handles nullification of the branch itself.
1988  */
1989 static bool do_ibranch(DisasContext *ctx, unsigned link,
1990                        bool with_sr0, bool is_n)
1991 {
1992     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1993         install_link(ctx, link, with_sr0);
1994         if (is_n) {
1995             if (use_nullify_skip(ctx)) {
1996                 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
1997                 nullify_set(ctx, 0);
1998                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1999                 return true;
2000             }
2001             ctx->null_cond.c = TCG_COND_ALWAYS;
2002         }
2003         ctx->iaq_n = &ctx->iaq_j;
2004         ctx->psw_b_next = true;
2005         return true;
2006     }
2007 
2008     nullify_over(ctx);
2009 
2010     install_link(ctx, link, with_sr0);
2011     if (is_n && use_nullify_skip(ctx)) {
2012         install_iaq_entries(ctx, &ctx->iaq_j, NULL);
2013         nullify_set(ctx, 0);
2014         store_psw_xb(ctx, 0);
2015     } else {
2016         install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
2017         nullify_set(ctx, is_n);
2018         store_psw_xb(ctx, PSW_B);
2019     }
2020 
2021     tcg_gen_lookup_and_goto_ptr();
2022     ctx->base.is_jmp = DISAS_NORETURN;
2023     return nullify_end(ctx);
2024 }
2025 
2026 /* Implement
2027  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
2028  *      IAOQ_Next{30..31} ← GR[b]{30..31};
2029  *    else
2030  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2031  * which keeps the privilege level from being increased.
2032  */
2033 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
2034 {
2035     TCGv_i64 dest = tcg_temp_new_i64();
2036     switch (ctx->privilege) {
2037     case 0:
2038         /* Privilege 0 is maximum and is allowed to decrease.  */
2039         tcg_gen_mov_i64(dest, offset);
2040         break;
2041     case 3:
2042         /* Privilege 3 is minimum and is never allowed to increase.  */
2043         tcg_gen_ori_i64(dest, offset, 3);
2044         break;
2045     default:
2046         tcg_gen_andi_i64(dest, offset, -4);
2047         tcg_gen_ori_i64(dest, dest, ctx->privilege);
2048         tcg_gen_umax_i64(dest, dest, offset);
2049         break;
2050     }
2051     return dest;
2052 }
2053 
2054 #ifdef CONFIG_USER_ONLY
2055 /* On Linux, page zero is normally marked execute only + gateway.
2056    Therefore normal read or write is supposed to fail, but specific
2057    offsets have kernel code mapped to raise permissions to implement
2058    system calls.  Handling this via an explicit check here, rather
2059    in than the "be disp(sr2,r0)" instruction that probably sent us
2060    here, is the easiest way to handle the branch delay slot on the
2061    aforementioned BE.  */
2062 static void do_page_zero(DisasContext *ctx)
2063 {
2064     assert(ctx->iaq_f.disp == 0);
2065 
2066     /* If by some means we get here with PSW[N]=1, that implies that
2067        the B,GATE instruction would be skipped, and we'd fault on the
2068        next insn within the privileged page.  */
2069     switch (ctx->null_cond.c) {
2070     case TCG_COND_NEVER:
2071         break;
2072     case TCG_COND_ALWAYS:
2073         tcg_gen_movi_i64(cpu_psw_n, 0);
2074         goto do_sigill;
2075     default:
2076         /* Since this is always the first (and only) insn within the
2077            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2078         g_assert_not_reached();
2079     }
2080 
2081     /* If PSW[B] is set, the B,GATE insn would trap. */
2082     if (ctx->psw_xb & PSW_B) {
2083         goto do_sigill;
2084     }
2085 
2086     switch (ctx->base.pc_first) {
2087     case 0x00: /* Null pointer call */
2088         gen_excp_1(EXCP_IMP);
2089         ctx->base.is_jmp = DISAS_NORETURN;
2090         break;
2091 
2092     case 0xb0: /* LWS */
2093         gen_excp_1(EXCP_SYSCALL_LWS);
2094         ctx->base.is_jmp = DISAS_NORETURN;
2095         break;
2096 
2097     case 0xe0: /* SET_THREAD_POINTER */
2098         {
2099             DisasIAQE next = { .base = tcg_temp_new_i64() };
2100 
2101             tcg_gen_st_i64(cpu_gr[26], tcg_env,
2102                            offsetof(CPUHPPAState, cr[27]));
2103             tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
2104             install_iaq_entries(ctx, &next, NULL);
2105             ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2106         }
2107         break;
2108 
2109     case 0x100: /* SYSCALL */
2110         gen_excp_1(EXCP_SYSCALL);
2111         ctx->base.is_jmp = DISAS_NORETURN;
2112         break;
2113 
2114     default:
2115     do_sigill:
2116         gen_excp_1(EXCP_ILL);
2117         ctx->base.is_jmp = DISAS_NORETURN;
2118         break;
2119     }
2120 }
2121 #endif
2122 
2123 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2124 {
2125     ctx->null_cond = cond_make_f();
2126     return true;
2127 }
2128 
2129 static bool trans_break(DisasContext *ctx, arg_break *a)
2130 {
2131     return gen_excp_iir(ctx, EXCP_BREAK);
2132 }
2133 
2134 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2135 {
2136     /* No point in nullifying the memory barrier.  */
2137     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2138 
2139     ctx->null_cond = cond_make_f();
2140     return true;
2141 }
2142 
2143 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2144 {
2145     TCGv_i64 dest = dest_gpr(ctx, a->t);
2146 
2147     copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
2148     tcg_gen_andi_i64(dest, dest, -4);
2149 
2150     save_gpr(ctx, a->t, dest);
2151     ctx->null_cond = cond_make_f();
2152     return true;
2153 }
2154 
2155 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2156 {
2157     unsigned rt = a->t;
2158     unsigned rs = a->sp;
2159     TCGv_i64 t0 = tcg_temp_new_i64();
2160 
2161     load_spr(ctx, t0, rs);
2162     tcg_gen_shri_i64(t0, t0, 32);
2163 
2164     save_gpr(ctx, rt, t0);
2165 
2166     ctx->null_cond = cond_make_f();
2167     return true;
2168 }
2169 
2170 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2171 {
2172     unsigned rt = a->t;
2173     unsigned ctl = a->r;
2174     TCGv_i64 tmp;
2175 
2176     switch (ctl) {
2177     case CR_SAR:
2178         if (a->e == 0) {
2179             /* MFSAR without ,W masks low 5 bits.  */
2180             tmp = dest_gpr(ctx, rt);
2181             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2182             save_gpr(ctx, rt, tmp);
2183             goto done;
2184         }
2185         save_gpr(ctx, rt, cpu_sar);
2186         goto done;
2187     case CR_IT: /* Interval Timer */
2188         /* FIXME: Respect PSW_S bit.  */
2189         nullify_over(ctx);
2190         tmp = dest_gpr(ctx, rt);
2191         if (translator_io_start(&ctx->base)) {
2192             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2193         }
2194         gen_helper_read_interval_timer(tmp);
2195         save_gpr(ctx, rt, tmp);
2196         return nullify_end(ctx);
2197     case 26:
2198     case 27:
2199         break;
2200     default:
2201         /* All other control registers are privileged.  */
2202         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2203         break;
2204     }
2205 
2206     tmp = tcg_temp_new_i64();
2207     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2208     save_gpr(ctx, rt, tmp);
2209 
2210  done:
2211     ctx->null_cond = cond_make_f();
2212     return true;
2213 }
2214 
2215 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2216 {
2217     unsigned rr = a->r;
2218     unsigned rs = a->sp;
2219     TCGv_i64 tmp;
2220 
2221     if (rs >= 5) {
2222         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2223     }
2224     nullify_over(ctx);
2225 
2226     tmp = tcg_temp_new_i64();
2227     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2228 
2229     if (rs >= 4) {
2230         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2231         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2232     } else {
2233         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2234     }
2235 
2236     return nullify_end(ctx);
2237 }
2238 
2239 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2240 {
2241     unsigned ctl = a->t;
2242     TCGv_i64 reg;
2243     TCGv_i64 tmp;
2244 
2245     if (ctl == CR_SAR) {
2246         reg = load_gpr(ctx, a->r);
2247         tmp = tcg_temp_new_i64();
2248         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2249         save_or_nullify(ctx, cpu_sar, tmp);
2250 
2251         ctx->null_cond = cond_make_f();
2252         return true;
2253     }
2254 
2255     /* All other control registers are privileged or read-only.  */
2256     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2257 
2258 #ifndef CONFIG_USER_ONLY
2259     nullify_over(ctx);
2260 
2261     if (ctx->is_pa20) {
2262         reg = load_gpr(ctx, a->r);
2263     } else {
2264         reg = tcg_temp_new_i64();
2265         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2266     }
2267 
2268     switch (ctl) {
2269     case CR_IT:
2270         if (translator_io_start(&ctx->base)) {
2271             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2272         }
2273         gen_helper_write_interval_timer(tcg_env, reg);
2274         break;
2275     case CR_EIRR:
2276         /* Helper modifies interrupt lines and is therefore IO. */
2277         translator_io_start(&ctx->base);
2278         gen_helper_write_eirr(tcg_env, reg);
2279         /* Exit to re-evaluate interrupts in the main loop. */
2280         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2281         break;
2282 
2283     case CR_IIASQ:
2284     case CR_IIAOQ:
2285         /* FIXME: Respect PSW_Q bit */
2286         /* The write advances the queue and stores to the back element.  */
2287         tmp = tcg_temp_new_i64();
2288         tcg_gen_ld_i64(tmp, tcg_env,
2289                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2290         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2291         tcg_gen_st_i64(reg, tcg_env,
2292                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2293         break;
2294 
2295     case CR_PID1:
2296     case CR_PID2:
2297     case CR_PID3:
2298     case CR_PID4:
2299         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2300 #ifndef CONFIG_USER_ONLY
2301         gen_helper_change_prot_id(tcg_env);
2302 #endif
2303         break;
2304 
2305     case CR_EIEM:
2306         /* Exit to re-evaluate interrupts in the main loop. */
2307         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2308         /* FALLTHRU */
2309     default:
2310         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2311         break;
2312     }
2313     return nullify_end(ctx);
2314 #endif
2315 }
2316 
2317 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2318 {
2319     TCGv_i64 tmp = tcg_temp_new_i64();
2320 
2321     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2322     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2323     save_or_nullify(ctx, cpu_sar, tmp);
2324 
2325     ctx->null_cond = cond_make_f();
2326     return true;
2327 }
2328 
2329 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2330 {
2331     TCGv_i64 dest = dest_gpr(ctx, a->t);
2332 
2333 #ifdef CONFIG_USER_ONLY
2334     /* We don't implement space registers in user mode. */
2335     tcg_gen_movi_i64(dest, 0);
2336 #else
2337     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2338     tcg_gen_shri_i64(dest, dest, 32);
2339 #endif
2340     save_gpr(ctx, a->t, dest);
2341 
2342     ctx->null_cond = cond_make_f();
2343     return true;
2344 }
2345 
2346 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2347 {
2348 #ifdef CONFIG_USER_ONLY
2349     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2350 #else
2351     TCGv_i64 tmp;
2352 
2353     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2354     if (a->i) {
2355         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2356     }
2357 
2358     nullify_over(ctx);
2359 
2360     tmp = tcg_temp_new_i64();
2361     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2362     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2363     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2364     save_gpr(ctx, a->t, tmp);
2365 
2366     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2367     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2368     return nullify_end(ctx);
2369 #endif
2370 }
2371 
2372 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2373 {
2374     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2375 #ifndef CONFIG_USER_ONLY
2376     TCGv_i64 tmp;
2377 
2378     nullify_over(ctx);
2379 
2380     tmp = tcg_temp_new_i64();
2381     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2382     tcg_gen_ori_i64(tmp, tmp, a->i);
2383     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2384     save_gpr(ctx, a->t, tmp);
2385 
2386     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2387     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2388     return nullify_end(ctx);
2389 #endif
2390 }
2391 
2392 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2393 {
2394     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2395 #ifndef CONFIG_USER_ONLY
2396     TCGv_i64 tmp, reg;
2397     nullify_over(ctx);
2398 
2399     reg = load_gpr(ctx, a->r);
2400     tmp = tcg_temp_new_i64();
2401     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2402 
2403     /* Exit the TB to recognize new interrupts.  */
2404     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2405     return nullify_end(ctx);
2406 #endif
2407 }
2408 
2409 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2410 {
2411     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2412 #ifndef CONFIG_USER_ONLY
2413     nullify_over(ctx);
2414 
2415     if (rfi_r) {
2416         gen_helper_rfi_r(tcg_env);
2417     } else {
2418         gen_helper_rfi(tcg_env);
2419     }
2420     /* Exit the TB to recognize new interrupts.  */
2421     tcg_gen_exit_tb(NULL, 0);
2422     ctx->base.is_jmp = DISAS_NORETURN;
2423 
2424     return nullify_end(ctx);
2425 #endif
2426 }
2427 
2428 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2429 {
2430     return do_rfi(ctx, false);
2431 }
2432 
2433 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2434 {
2435     return do_rfi(ctx, true);
2436 }
2437 
2438 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2439 {
2440     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2441 #ifndef CONFIG_USER_ONLY
2442     set_psw_xb(ctx, 0);
2443     nullify_over(ctx);
2444     gen_helper_halt(tcg_env);
2445     ctx->base.is_jmp = DISAS_NORETURN;
2446     return nullify_end(ctx);
2447 #endif
2448 }
2449 
2450 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2451 {
2452     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2453 #ifndef CONFIG_USER_ONLY
2454     set_psw_xb(ctx, 0);
2455     nullify_over(ctx);
2456     gen_helper_reset(tcg_env);
2457     ctx->base.is_jmp = DISAS_NORETURN;
2458     return nullify_end(ctx);
2459 #endif
2460 }
2461 
2462 static bool do_getshadowregs(DisasContext *ctx)
2463 {
2464     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2465     nullify_over(ctx);
2466     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2467     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2468     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2469     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2470     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2471     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2472     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2473     return nullify_end(ctx);
2474 }
2475 
2476 static bool do_putshadowregs(DisasContext *ctx)
2477 {
2478     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2479     nullify_over(ctx);
2480     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2481     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2482     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2483     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2484     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2485     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2486     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2487     return nullify_end(ctx);
2488 }
2489 
2490 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2491 {
2492     return do_getshadowregs(ctx);
2493 }
2494 
2495 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2496 {
2497     if (a->m) {
2498         TCGv_i64 dest = dest_gpr(ctx, a->b);
2499         TCGv_i64 src1 = load_gpr(ctx, a->b);
2500         TCGv_i64 src2 = load_gpr(ctx, a->x);
2501 
2502         /* The only thing we need to do is the base register modification.  */
2503         tcg_gen_add_i64(dest, src1, src2);
2504         save_gpr(ctx, a->b, dest);
2505     }
2506     ctx->null_cond = cond_make_f();
2507     return true;
2508 }
2509 
2510 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2511 {
2512     /* End TB for flush instruction cache, so we pick up new insns. */
2513     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2514     return trans_nop_addrx(ctx, a);
2515 }
2516 
2517 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2518 {
2519     TCGv_i64 dest, ofs;
2520     TCGv_i32 level, want;
2521     TCGv_i64 addr;
2522 
2523     nullify_over(ctx);
2524 
2525     dest = dest_gpr(ctx, a->t);
2526     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2527 
2528     if (a->imm) {
2529         level = tcg_constant_i32(a->ri & 3);
2530     } else {
2531         level = tcg_temp_new_i32();
2532         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2533         tcg_gen_andi_i32(level, level, 3);
2534     }
2535     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2536 
2537     gen_helper_probe(dest, tcg_env, addr, level, want);
2538 
2539     save_gpr(ctx, a->t, dest);
2540     return nullify_end(ctx);
2541 }
2542 
2543 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2544 {
2545     if (ctx->is_pa20) {
2546         return false;
2547     }
2548     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2549 #ifndef CONFIG_USER_ONLY
2550     TCGv_i64 addr;
2551     TCGv_i64 ofs, reg;
2552 
2553     nullify_over(ctx);
2554 
2555     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2556     reg = load_gpr(ctx, a->r);
2557     if (a->addr) {
2558         gen_helper_itlba_pa11(tcg_env, addr, reg);
2559     } else {
2560         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2561     }
2562 
2563     /* Exit TB for TLB change if mmu is enabled.  */
2564     if (ctx->tb_flags & PSW_C) {
2565         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2566     }
2567     return nullify_end(ctx);
2568 #endif
2569 }
2570 
2571 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2572 {
2573     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2574 #ifndef CONFIG_USER_ONLY
2575     TCGv_i64 addr;
2576     TCGv_i64 ofs;
2577 
2578     nullify_over(ctx);
2579 
2580     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2581 
2582     /*
2583      * Page align now, rather than later, so that we can add in the
2584      * page_size field from pa2.0 from the low 4 bits of GR[b].
2585      */
2586     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2587     if (ctx->is_pa20) {
2588         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2589     }
2590 
2591     if (local) {
2592         gen_helper_ptlb_l(tcg_env, addr);
2593     } else {
2594         gen_helper_ptlb(tcg_env, addr);
2595     }
2596 
2597     if (a->m) {
2598         save_gpr(ctx, a->b, ofs);
2599     }
2600 
2601     /* Exit TB for TLB change if mmu is enabled.  */
2602     if (ctx->tb_flags & PSW_C) {
2603         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2604     }
2605     return nullify_end(ctx);
2606 #endif
2607 }
2608 
2609 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2610 {
2611     return do_pxtlb(ctx, a, false);
2612 }
2613 
2614 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2615 {
2616     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2617 }
2618 
2619 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2620 {
2621     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2622 #ifndef CONFIG_USER_ONLY
2623     nullify_over(ctx);
2624 
2625     trans_nop_addrx(ctx, a);
2626     gen_helper_ptlbe(tcg_env);
2627 
2628     /* Exit TB for TLB change if mmu is enabled.  */
2629     if (ctx->tb_flags & PSW_C) {
2630         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2631     }
2632     return nullify_end(ctx);
2633 #endif
2634 }
2635 
2636 /*
2637  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2638  * See
2639  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2640  *     page 13-9 (195/206)
2641  */
2642 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2643 {
2644     if (ctx->is_pa20) {
2645         return false;
2646     }
2647     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2648 #ifndef CONFIG_USER_ONLY
2649     TCGv_i64 addr, atl, stl;
2650     TCGv_i64 reg;
2651 
2652     nullify_over(ctx);
2653 
2654     /*
2655      * FIXME:
2656      *  if (not (pcxl or pcxl2))
2657      *    return gen_illegal(ctx);
2658      */
2659 
2660     atl = tcg_temp_new_i64();
2661     stl = tcg_temp_new_i64();
2662     addr = tcg_temp_new_i64();
2663 
2664     tcg_gen_ld32u_i64(stl, tcg_env,
2665                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2666                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2667     tcg_gen_ld32u_i64(atl, tcg_env,
2668                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2669                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2670     tcg_gen_shli_i64(stl, stl, 32);
2671     tcg_gen_or_i64(addr, atl, stl);
2672 
2673     reg = load_gpr(ctx, a->r);
2674     if (a->addr) {
2675         gen_helper_itlba_pa11(tcg_env, addr, reg);
2676     } else {
2677         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2678     }
2679 
2680     /* Exit TB for TLB change if mmu is enabled.  */
2681     if (ctx->tb_flags & PSW_C) {
2682         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2683     }
2684     return nullify_end(ctx);
2685 #endif
2686 }
2687 
2688 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2689 {
2690     if (!ctx->is_pa20) {
2691         return false;
2692     }
2693     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2694 #ifndef CONFIG_USER_ONLY
2695     nullify_over(ctx);
2696     {
2697         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2698         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2699 
2700         if (a->data) {
2701             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2702         } else {
2703             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2704         }
2705     }
2706     /* Exit TB for TLB change if mmu is enabled.  */
2707     if (ctx->tb_flags & PSW_C) {
2708         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2709     }
2710     return nullify_end(ctx);
2711 #endif
2712 }
2713 
2714 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2715 {
2716     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2717 #ifndef CONFIG_USER_ONLY
2718     TCGv_i64 vaddr;
2719     TCGv_i64 ofs, paddr;
2720 
2721     nullify_over(ctx);
2722 
2723     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2724 
2725     paddr = tcg_temp_new_i64();
2726     gen_helper_lpa(paddr, tcg_env, vaddr);
2727 
2728     /* Note that physical address result overrides base modification.  */
2729     if (a->m) {
2730         save_gpr(ctx, a->b, ofs);
2731     }
2732     save_gpr(ctx, a->t, paddr);
2733 
2734     return nullify_end(ctx);
2735 #endif
2736 }
2737 
2738 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2739 {
2740     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2741 
2742     /* The Coherence Index is an implementation-defined function of the
2743        physical address.  Two addresses with the same CI have a coherent
2744        view of the cache.  Our implementation is to return 0 for all,
2745        since the entire address space is coherent.  */
2746     save_gpr(ctx, a->t, ctx->zero);
2747 
2748     ctx->null_cond = cond_make_f();
2749     return true;
2750 }
2751 
2752 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2753 {
2754     return do_add_reg(ctx, a, false, false, false, false);
2755 }
2756 
2757 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2758 {
2759     return do_add_reg(ctx, a, true, false, false, false);
2760 }
2761 
2762 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2763 {
2764     return do_add_reg(ctx, a, false, true, false, false);
2765 }
2766 
2767 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2768 {
2769     return do_add_reg(ctx, a, false, false, false, true);
2770 }
2771 
2772 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2773 {
2774     return do_add_reg(ctx, a, false, true, false, true);
2775 }
2776 
2777 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2778 {
2779     return do_sub_reg(ctx, a, false, false, false);
2780 }
2781 
2782 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2783 {
2784     return do_sub_reg(ctx, a, true, false, false);
2785 }
2786 
2787 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2788 {
2789     return do_sub_reg(ctx, a, false, false, true);
2790 }
2791 
2792 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2793 {
2794     return do_sub_reg(ctx, a, true, false, true);
2795 }
2796 
2797 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2798 {
2799     return do_sub_reg(ctx, a, false, true, false);
2800 }
2801 
2802 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2803 {
2804     return do_sub_reg(ctx, a, true, true, false);
2805 }
2806 
2807 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2808 {
2809     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2810 }
2811 
2812 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2813 {
2814     return do_log_reg(ctx, a, tcg_gen_and_i64);
2815 }
2816 
2817 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2818 {
2819     if (a->cf == 0) {
2820         unsigned r2 = a->r2;
2821         unsigned r1 = a->r1;
2822         unsigned rt = a->t;
2823 
2824         if (rt == 0) { /* NOP */
2825             ctx->null_cond = cond_make_f();
2826             return true;
2827         }
2828         if (r2 == 0) { /* COPY */
2829             if (r1 == 0) {
2830                 TCGv_i64 dest = dest_gpr(ctx, rt);
2831                 tcg_gen_movi_i64(dest, 0);
2832                 save_gpr(ctx, rt, dest);
2833             } else {
2834                 save_gpr(ctx, rt, cpu_gr[r1]);
2835             }
2836             ctx->null_cond = cond_make_f();
2837             return true;
2838         }
2839 #ifndef CONFIG_USER_ONLY
2840         /* These are QEMU extensions and are nops in the real architecture:
2841          *
2842          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2843          * or %r31,%r31,%r31 -- death loop; offline cpu
2844          *                      currently implemented as idle.
2845          */
2846         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2847             /* No need to check for supervisor, as userland can only pause
2848                until the next timer interrupt.  */
2849 
2850             set_psw_xb(ctx, 0);
2851 
2852             nullify_over(ctx);
2853 
2854             /* Advance the instruction queue.  */
2855             install_iaq_entries(ctx, &ctx->iaq_b, NULL);
2856             nullify_set(ctx, 0);
2857 
2858             /* Tell the qemu main loop to halt until this cpu has work.  */
2859             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2860                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2861             gen_excp_1(EXCP_HALTED);
2862             ctx->base.is_jmp = DISAS_NORETURN;
2863 
2864             return nullify_end(ctx);
2865         }
2866 #endif
2867     }
2868     return do_log_reg(ctx, a, tcg_gen_or_i64);
2869 }
2870 
2871 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2872 {
2873     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2874 }
2875 
2876 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2877 {
2878     TCGv_i64 tcg_r1, tcg_r2;
2879 
2880     if (a->cf) {
2881         nullify_over(ctx);
2882     }
2883     tcg_r1 = load_gpr(ctx, a->r1);
2884     tcg_r2 = load_gpr(ctx, a->r2);
2885     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2886     return nullify_end(ctx);
2887 }
2888 
2889 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2890 {
2891     TCGv_i64 tcg_r1, tcg_r2, dest;
2892 
2893     if (a->cf) {
2894         nullify_over(ctx);
2895     }
2896 
2897     tcg_r1 = load_gpr(ctx, a->r1);
2898     tcg_r2 = load_gpr(ctx, a->r2);
2899     dest = dest_gpr(ctx, a->t);
2900 
2901     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2902     save_gpr(ctx, a->t, dest);
2903 
2904     ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2905     return nullify_end(ctx);
2906 }
2907 
2908 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2909 {
2910     TCGv_i64 tcg_r1, tcg_r2, tmp;
2911 
2912     if (a->cf == 0) {
2913         tcg_r2 = load_gpr(ctx, a->r2);
2914         tmp = dest_gpr(ctx, a->t);
2915 
2916         if (a->r1 == 0) {
2917             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2918             tcg_gen_not_i64(tmp, tcg_r2);
2919         } else {
2920             /*
2921              * Recall that r1 - r2 == r1 + ~r2 + 1.
2922              * Thus r1 + ~r2 == r1 - r2 - 1,
2923              * which does not require an extra temporary.
2924              */
2925             tcg_r1 = load_gpr(ctx, a->r1);
2926             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2927             tcg_gen_subi_i64(tmp, tmp, 1);
2928         }
2929         save_gpr(ctx, a->t, tmp);
2930         ctx->null_cond = cond_make_f();
2931         return true;
2932     }
2933 
2934     nullify_over(ctx);
2935     tcg_r1 = load_gpr(ctx, a->r1);
2936     tcg_r2 = load_gpr(ctx, a->r2);
2937     tmp = tcg_temp_new_i64();
2938     tcg_gen_not_i64(tmp, tcg_r2);
2939     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2940     return nullify_end(ctx);
2941 }
2942 
2943 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2944 {
2945     return do_uaddcm(ctx, a, false);
2946 }
2947 
2948 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2949 {
2950     return do_uaddcm(ctx, a, true);
2951 }
2952 
2953 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2954 {
2955     TCGv_i64 tmp;
2956 
2957     nullify_over(ctx);
2958 
2959     tmp = tcg_temp_new_i64();
2960     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2961     if (!is_i) {
2962         tcg_gen_not_i64(tmp, tmp);
2963     }
2964     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2965     tcg_gen_muli_i64(tmp, tmp, 6);
2966     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2967                    a->cf, a->d, false, is_i);
2968     return nullify_end(ctx);
2969 }
2970 
2971 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2972 {
2973     return do_dcor(ctx, a, false);
2974 }
2975 
2976 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2977 {
2978     return do_dcor(ctx, a, true);
2979 }
2980 
2981 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2982 {
2983     TCGv_i64 dest, add1, add2, addc, in1, in2;
2984 
2985     nullify_over(ctx);
2986 
2987     in1 = load_gpr(ctx, a->r1);
2988     in2 = load_gpr(ctx, a->r2);
2989 
2990     add1 = tcg_temp_new_i64();
2991     add2 = tcg_temp_new_i64();
2992     addc = tcg_temp_new_i64();
2993     dest = tcg_temp_new_i64();
2994 
2995     /* Form R1 << 1 | PSW[CB]{8}.  */
2996     tcg_gen_add_i64(add1, in1, in1);
2997     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2998 
2999     /*
3000      * Add or subtract R2, depending on PSW[V].  Proper computation of
3001      * carry requires that we subtract via + ~R2 + 1, as described in
3002      * the manual.  By extracting and masking V, we can produce the
3003      * proper inputs to the addition without movcond.
3004      */
3005     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
3006     tcg_gen_xor_i64(add2, in2, addc);
3007     tcg_gen_andi_i64(addc, addc, 1);
3008 
3009     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
3010     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
3011                      addc, ctx->zero);
3012 
3013     /* Write back the result register.  */
3014     save_gpr(ctx, a->t, dest);
3015 
3016     /* Write back PSW[CB].  */
3017     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
3018     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
3019 
3020     /*
3021      * Write back PSW[V] for the division step.
3022      * Shift cb{8} from where it lives in bit 32 to bit 31,
3023      * so that it overlaps r2{32} in bit 31.
3024      */
3025     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
3026     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
3027 
3028     /* Install the new nullification.  */
3029     if (a->cf) {
3030         TCGv_i64 sv = NULL, uv = NULL;
3031         if (cond_need_sv(a->cf >> 1)) {
3032             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
3033         } else if (cond_need_cb(a->cf >> 1)) {
3034             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
3035         }
3036         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
3037     }
3038 
3039     return nullify_end(ctx);
3040 }
3041 
3042 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
3043 {
3044     return do_add_imm(ctx, a, false, false);
3045 }
3046 
3047 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
3048 {
3049     return do_add_imm(ctx, a, true, false);
3050 }
3051 
3052 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
3053 {
3054     return do_add_imm(ctx, a, false, true);
3055 }
3056 
3057 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
3058 {
3059     return do_add_imm(ctx, a, true, true);
3060 }
3061 
3062 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
3063 {
3064     return do_sub_imm(ctx, a, false);
3065 }
3066 
3067 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
3068 {
3069     return do_sub_imm(ctx, a, true);
3070 }
3071 
3072 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3073 {
3074     TCGv_i64 tcg_im, tcg_r2;
3075 
3076     if (a->cf) {
3077         nullify_over(ctx);
3078     }
3079 
3080     tcg_im = tcg_constant_i64(a->i);
3081     tcg_r2 = load_gpr(ctx, a->r);
3082     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3083 
3084     return nullify_end(ctx);
3085 }
3086 
3087 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3088                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3089 {
3090     TCGv_i64 r1, r2, dest;
3091 
3092     if (!ctx->is_pa20) {
3093         return false;
3094     }
3095 
3096     nullify_over(ctx);
3097 
3098     r1 = load_gpr(ctx, a->r1);
3099     r2 = load_gpr(ctx, a->r2);
3100     dest = dest_gpr(ctx, a->t);
3101 
3102     fn(dest, r1, r2);
3103     save_gpr(ctx, a->t, dest);
3104 
3105     return nullify_end(ctx);
3106 }
3107 
3108 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3109                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3110 {
3111     TCGv_i64 r, dest;
3112 
3113     if (!ctx->is_pa20) {
3114         return false;
3115     }
3116 
3117     nullify_over(ctx);
3118 
3119     r = load_gpr(ctx, a->r);
3120     dest = dest_gpr(ctx, a->t);
3121 
3122     fn(dest, r, a->i);
3123     save_gpr(ctx, a->t, dest);
3124 
3125     return nullify_end(ctx);
3126 }
3127 
3128 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3129                                 void (*fn)(TCGv_i64, TCGv_i64,
3130                                            TCGv_i64, TCGv_i32))
3131 {
3132     TCGv_i64 r1, r2, dest;
3133 
3134     if (!ctx->is_pa20) {
3135         return false;
3136     }
3137 
3138     nullify_over(ctx);
3139 
3140     r1 = load_gpr(ctx, a->r1);
3141     r2 = load_gpr(ctx, a->r2);
3142     dest = dest_gpr(ctx, a->t);
3143 
3144     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3145     save_gpr(ctx, a->t, dest);
3146 
3147     return nullify_end(ctx);
3148 }
3149 
3150 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3151 {
3152     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3153 }
3154 
3155 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3156 {
3157     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3158 }
3159 
3160 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3161 {
3162     return do_multimedia(ctx, a, gen_helper_hadd_us);
3163 }
3164 
3165 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3166 {
3167     return do_multimedia(ctx, a, gen_helper_havg);
3168 }
3169 
3170 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3171 {
3172     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3173 }
3174 
3175 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3176 {
3177     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3178 }
3179 
3180 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3181 {
3182     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3183 }
3184 
3185 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3186 {
3187     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3188 }
3189 
3190 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3191 {
3192     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3193 }
3194 
3195 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3196 {
3197     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3198 }
3199 
3200 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3201 {
3202     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3203 }
3204 
3205 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3206 {
3207     return do_multimedia(ctx, a, gen_helper_hsub_us);
3208 }
3209 
3210 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3211 {
3212     uint64_t mask = 0xffff0000ffff0000ull;
3213     TCGv_i64 tmp = tcg_temp_new_i64();
3214 
3215     tcg_gen_andi_i64(tmp, r2, mask);
3216     tcg_gen_andi_i64(dst, r1, mask);
3217     tcg_gen_shri_i64(tmp, tmp, 16);
3218     tcg_gen_or_i64(dst, dst, tmp);
3219 }
3220 
3221 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3222 {
3223     return do_multimedia(ctx, a, gen_mixh_l);
3224 }
3225 
3226 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3227 {
3228     uint64_t mask = 0x0000ffff0000ffffull;
3229     TCGv_i64 tmp = tcg_temp_new_i64();
3230 
3231     tcg_gen_andi_i64(tmp, r1, mask);
3232     tcg_gen_andi_i64(dst, r2, mask);
3233     tcg_gen_shli_i64(tmp, tmp, 16);
3234     tcg_gen_or_i64(dst, dst, tmp);
3235 }
3236 
3237 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3238 {
3239     return do_multimedia(ctx, a, gen_mixh_r);
3240 }
3241 
3242 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3243 {
3244     TCGv_i64 tmp = tcg_temp_new_i64();
3245 
3246     tcg_gen_shri_i64(tmp, r2, 32);
3247     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3248 }
3249 
3250 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3251 {
3252     return do_multimedia(ctx, a, gen_mixw_l);
3253 }
3254 
3255 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3256 {
3257     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3258 }
3259 
3260 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3261 {
3262     return do_multimedia(ctx, a, gen_mixw_r);
3263 }
3264 
3265 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3266 {
3267     TCGv_i64 r, t0, t1, t2, t3;
3268 
3269     if (!ctx->is_pa20) {
3270         return false;
3271     }
3272 
3273     nullify_over(ctx);
3274 
3275     r = load_gpr(ctx, a->r1);
3276     t0 = tcg_temp_new_i64();
3277     t1 = tcg_temp_new_i64();
3278     t2 = tcg_temp_new_i64();
3279     t3 = tcg_temp_new_i64();
3280 
3281     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3282     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3283     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3284     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3285 
3286     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3287     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3288     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3289 
3290     save_gpr(ctx, a->t, t0);
3291     return nullify_end(ctx);
3292 }
3293 
3294 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3295 {
3296     if (ctx->is_pa20) {
3297        /*
3298         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3299         * Any base modification still occurs.
3300         */
3301         if (a->t == 0) {
3302             return trans_nop_addrx(ctx, a);
3303         }
3304     } else if (a->size > MO_32) {
3305         return gen_illegal(ctx);
3306     }
3307     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3308                    a->disp, a->sp, a->m, a->size | MO_TE);
3309 }
3310 
3311 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3312 {
3313     assert(a->x == 0 && a->scale == 0);
3314     if (!ctx->is_pa20 && a->size > MO_32) {
3315         return gen_illegal(ctx);
3316     }
3317     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3318 }
3319 
3320 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3321 {
3322     MemOp mop = MO_TE | MO_ALIGN | a->size;
3323     TCGv_i64 dest, ofs;
3324     TCGv_i64 addr;
3325 
3326     if (!ctx->is_pa20 && a->size > MO_32) {
3327         return gen_illegal(ctx);
3328     }
3329 
3330     nullify_over(ctx);
3331 
3332     if (a->m) {
3333         /* Base register modification.  Make sure if RT == RB,
3334            we see the result of the load.  */
3335         dest = tcg_temp_new_i64();
3336     } else {
3337         dest = dest_gpr(ctx, a->t);
3338     }
3339 
3340     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3341              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3342 
3343     /*
3344      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3345      * However actual hardware succeeds with aligned mod 4.
3346      * Detect this case and log a GUEST_ERROR.
3347      *
3348      * TODO: HPPA64 relaxes the over-alignment requirement
3349      * with the ,co completer.
3350      */
3351     gen_helper_ldc_check(addr);
3352 
3353     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3354 
3355     if (a->m) {
3356         save_gpr(ctx, a->b, ofs);
3357     }
3358     save_gpr(ctx, a->t, dest);
3359 
3360     return nullify_end(ctx);
3361 }
3362 
3363 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3364 {
3365     TCGv_i64 ofs, val;
3366     TCGv_i64 addr;
3367 
3368     nullify_over(ctx);
3369 
3370     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3371              MMU_DISABLED(ctx));
3372     val = load_gpr(ctx, a->r);
3373     if (a->a) {
3374         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3375             gen_helper_stby_e_parallel(tcg_env, addr, val);
3376         } else {
3377             gen_helper_stby_e(tcg_env, addr, val);
3378         }
3379     } else {
3380         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3381             gen_helper_stby_b_parallel(tcg_env, addr, val);
3382         } else {
3383             gen_helper_stby_b(tcg_env, addr, val);
3384         }
3385     }
3386     if (a->m) {
3387         tcg_gen_andi_i64(ofs, ofs, ~3);
3388         save_gpr(ctx, a->b, ofs);
3389     }
3390 
3391     return nullify_end(ctx);
3392 }
3393 
3394 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3395 {
3396     TCGv_i64 ofs, val;
3397     TCGv_i64 addr;
3398 
3399     if (!ctx->is_pa20) {
3400         return false;
3401     }
3402     nullify_over(ctx);
3403 
3404     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3405              MMU_DISABLED(ctx));
3406     val = load_gpr(ctx, a->r);
3407     if (a->a) {
3408         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3409             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3410         } else {
3411             gen_helper_stdby_e(tcg_env, addr, val);
3412         }
3413     } else {
3414         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3415             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3416         } else {
3417             gen_helper_stdby_b(tcg_env, addr, val);
3418         }
3419     }
3420     if (a->m) {
3421         tcg_gen_andi_i64(ofs, ofs, ~7);
3422         save_gpr(ctx, a->b, ofs);
3423     }
3424 
3425     return nullify_end(ctx);
3426 }
3427 
3428 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3429 {
3430     int hold_mmu_idx = ctx->mmu_idx;
3431 
3432     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3433     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3434     trans_ld(ctx, a);
3435     ctx->mmu_idx = hold_mmu_idx;
3436     return true;
3437 }
3438 
3439 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3440 {
3441     int hold_mmu_idx = ctx->mmu_idx;
3442 
3443     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3444     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3445     trans_st(ctx, a);
3446     ctx->mmu_idx = hold_mmu_idx;
3447     return true;
3448 }
3449 
3450 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3451 {
3452     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3453 
3454     tcg_gen_movi_i64(tcg_rt, a->i);
3455     save_gpr(ctx, a->t, tcg_rt);
3456     ctx->null_cond = cond_make_f();
3457     return true;
3458 }
3459 
3460 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3461 {
3462     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3463     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3464 
3465     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3466     save_gpr(ctx, 1, tcg_r1);
3467     ctx->null_cond = cond_make_f();
3468     return true;
3469 }
3470 
3471 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3472 {
3473     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3474 
3475     /* Special case rb == 0, for the LDI pseudo-op.
3476        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3477     if (a->b == 0) {
3478         tcg_gen_movi_i64(tcg_rt, a->i);
3479     } else {
3480         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3481     }
3482     save_gpr(ctx, a->t, tcg_rt);
3483     ctx->null_cond = cond_make_f();
3484     return true;
3485 }
3486 
3487 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3488                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3489 {
3490     TCGv_i64 dest, in2, sv;
3491     DisasCond cond;
3492 
3493     in2 = load_gpr(ctx, r);
3494     dest = tcg_temp_new_i64();
3495 
3496     tcg_gen_sub_i64(dest, in1, in2);
3497 
3498     sv = NULL;
3499     if (cond_need_sv(c)) {
3500         sv = do_sub_sv(ctx, dest, in1, in2);
3501     }
3502 
3503     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3504     return do_cbranch(ctx, disp, n, &cond);
3505 }
3506 
3507 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3508 {
3509     if (!ctx->is_pa20 && a->d) {
3510         return false;
3511     }
3512     nullify_over(ctx);
3513     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3514                    a->c, a->f, a->d, a->n, a->disp);
3515 }
3516 
3517 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3518 {
3519     if (!ctx->is_pa20 && a->d) {
3520         return false;
3521     }
3522     nullify_over(ctx);
3523     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3524                    a->c, a->f, a->d, a->n, a->disp);
3525 }
3526 
3527 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3528                     unsigned c, unsigned f, unsigned n, int disp)
3529 {
3530     TCGv_i64 dest, in2, sv, cb_cond;
3531     DisasCond cond;
3532     bool d = false;
3533 
3534     /*
3535      * For hppa64, the ADDB conditions change with PSW.W,
3536      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3537      */
3538     if (ctx->tb_flags & PSW_W) {
3539         d = c >= 5;
3540         if (d) {
3541             c &= 3;
3542         }
3543     }
3544 
3545     in2 = load_gpr(ctx, r);
3546     dest = tcg_temp_new_i64();
3547     sv = NULL;
3548     cb_cond = NULL;
3549 
3550     if (cond_need_cb(c)) {
3551         TCGv_i64 cb = tcg_temp_new_i64();
3552         TCGv_i64 cb_msb = tcg_temp_new_i64();
3553 
3554         tcg_gen_movi_i64(cb_msb, 0);
3555         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3556         tcg_gen_xor_i64(cb, in1, in2);
3557         tcg_gen_xor_i64(cb, cb, dest);
3558         cb_cond = get_carry(ctx, d, cb, cb_msb);
3559     } else {
3560         tcg_gen_add_i64(dest, in1, in2);
3561     }
3562     if (cond_need_sv(c)) {
3563         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3564     }
3565 
3566     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3567     save_gpr(ctx, r, dest);
3568     return do_cbranch(ctx, disp, n, &cond);
3569 }
3570 
3571 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3572 {
3573     nullify_over(ctx);
3574     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3575 }
3576 
3577 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3578 {
3579     nullify_over(ctx);
3580     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3581 }
3582 
3583 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3584 {
3585     TCGv_i64 tmp, tcg_r;
3586     DisasCond cond;
3587 
3588     nullify_over(ctx);
3589 
3590     tmp = tcg_temp_new_i64();
3591     tcg_r = load_gpr(ctx, a->r);
3592     if (a->d) {
3593         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3594     } else {
3595         /* Force shift into [32,63] */
3596         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3597         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3598     }
3599 
3600     cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
3601     return do_cbranch(ctx, a->disp, a->n, &cond);
3602 }
3603 
3604 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3605 {
3606     DisasCond cond;
3607     int p = a->p | (a->d ? 0 : 32);
3608 
3609     nullify_over(ctx);
3610     cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
3611                         load_gpr(ctx, a->r), 1ull << (63 - p));
3612     return do_cbranch(ctx, a->disp, a->n, &cond);
3613 }
3614 
3615 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3616 {
3617     TCGv_i64 dest;
3618     DisasCond cond;
3619 
3620     nullify_over(ctx);
3621 
3622     dest = dest_gpr(ctx, a->r2);
3623     if (a->r1 == 0) {
3624         tcg_gen_movi_i64(dest, 0);
3625     } else {
3626         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3627     }
3628 
3629     /* All MOVB conditions are 32-bit. */
3630     cond = do_sed_cond(ctx, a->c, false, dest);
3631     return do_cbranch(ctx, a->disp, a->n, &cond);
3632 }
3633 
3634 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3635 {
3636     TCGv_i64 dest;
3637     DisasCond cond;
3638 
3639     nullify_over(ctx);
3640 
3641     dest = dest_gpr(ctx, a->r);
3642     tcg_gen_movi_i64(dest, a->i);
3643 
3644     /* All MOVBI conditions are 32-bit. */
3645     cond = do_sed_cond(ctx, a->c, false, dest);
3646     return do_cbranch(ctx, a->disp, a->n, &cond);
3647 }
3648 
3649 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3650 {
3651     TCGv_i64 dest, src2;
3652 
3653     if (!ctx->is_pa20 && a->d) {
3654         return false;
3655     }
3656     if (a->c) {
3657         nullify_over(ctx);
3658     }
3659 
3660     dest = dest_gpr(ctx, a->t);
3661     src2 = load_gpr(ctx, a->r2);
3662     if (a->r1 == 0) {
3663         if (a->d) {
3664             tcg_gen_shr_i64(dest, src2, cpu_sar);
3665         } else {
3666             TCGv_i64 tmp = tcg_temp_new_i64();
3667 
3668             tcg_gen_ext32u_i64(dest, src2);
3669             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3670             tcg_gen_shr_i64(dest, dest, tmp);
3671         }
3672     } else if (a->r1 == a->r2) {
3673         if (a->d) {
3674             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3675         } else {
3676             TCGv_i32 t32 = tcg_temp_new_i32();
3677             TCGv_i32 s32 = tcg_temp_new_i32();
3678 
3679             tcg_gen_extrl_i64_i32(t32, src2);
3680             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3681             tcg_gen_andi_i32(s32, s32, 31);
3682             tcg_gen_rotr_i32(t32, t32, s32);
3683             tcg_gen_extu_i32_i64(dest, t32);
3684         }
3685     } else {
3686         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3687 
3688         if (a->d) {
3689             TCGv_i64 t = tcg_temp_new_i64();
3690             TCGv_i64 n = tcg_temp_new_i64();
3691 
3692             tcg_gen_xori_i64(n, cpu_sar, 63);
3693             tcg_gen_shl_i64(t, src1, n);
3694             tcg_gen_shli_i64(t, t, 1);
3695             tcg_gen_shr_i64(dest, src2, cpu_sar);
3696             tcg_gen_or_i64(dest, dest, t);
3697         } else {
3698             TCGv_i64 t = tcg_temp_new_i64();
3699             TCGv_i64 s = tcg_temp_new_i64();
3700 
3701             tcg_gen_concat32_i64(t, src2, src1);
3702             tcg_gen_andi_i64(s, cpu_sar, 31);
3703             tcg_gen_shr_i64(dest, t, s);
3704         }
3705     }
3706     save_gpr(ctx, a->t, dest);
3707 
3708     /* Install the new nullification.  */
3709     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3710     return nullify_end(ctx);
3711 }
3712 
3713 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3714 {
3715     unsigned width, sa;
3716     TCGv_i64 dest, t2;
3717 
3718     if (!ctx->is_pa20 && a->d) {
3719         return false;
3720     }
3721     if (a->c) {
3722         nullify_over(ctx);
3723     }
3724 
3725     width = a->d ? 64 : 32;
3726     sa = width - 1 - a->cpos;
3727 
3728     dest = dest_gpr(ctx, a->t);
3729     t2 = load_gpr(ctx, a->r2);
3730     if (a->r1 == 0) {
3731         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3732     } else if (width == TARGET_LONG_BITS) {
3733         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3734     } else {
3735         assert(!a->d);
3736         if (a->r1 == a->r2) {
3737             TCGv_i32 t32 = tcg_temp_new_i32();
3738             tcg_gen_extrl_i64_i32(t32, t2);
3739             tcg_gen_rotri_i32(t32, t32, sa);
3740             tcg_gen_extu_i32_i64(dest, t32);
3741         } else {
3742             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3743             tcg_gen_extract_i64(dest, dest, sa, 32);
3744         }
3745     }
3746     save_gpr(ctx, a->t, dest);
3747 
3748     /* Install the new nullification.  */
3749     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3750     return nullify_end(ctx);
3751 }
3752 
3753 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3754 {
3755     unsigned widthm1 = a->d ? 63 : 31;
3756     TCGv_i64 dest, src, tmp;
3757 
3758     if (!ctx->is_pa20 && a->d) {
3759         return false;
3760     }
3761     if (a->c) {
3762         nullify_over(ctx);
3763     }
3764 
3765     dest = dest_gpr(ctx, a->t);
3766     src = load_gpr(ctx, a->r);
3767     tmp = tcg_temp_new_i64();
3768 
3769     /* Recall that SAR is using big-endian bit numbering.  */
3770     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3771     tcg_gen_xori_i64(tmp, tmp, widthm1);
3772 
3773     if (a->se) {
3774         if (!a->d) {
3775             tcg_gen_ext32s_i64(dest, src);
3776             src = dest;
3777         }
3778         tcg_gen_sar_i64(dest, src, tmp);
3779         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3780     } else {
3781         if (!a->d) {
3782             tcg_gen_ext32u_i64(dest, src);
3783             src = dest;
3784         }
3785         tcg_gen_shr_i64(dest, src, tmp);
3786         tcg_gen_extract_i64(dest, dest, 0, a->len);
3787     }
3788     save_gpr(ctx, a->t, dest);
3789 
3790     /* Install the new nullification.  */
3791     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3792     return nullify_end(ctx);
3793 }
3794 
3795 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3796 {
3797     unsigned len, cpos, width;
3798     TCGv_i64 dest, src;
3799 
3800     if (!ctx->is_pa20 && a->d) {
3801         return false;
3802     }
3803     if (a->c) {
3804         nullify_over(ctx);
3805     }
3806 
3807     len = a->len;
3808     width = a->d ? 64 : 32;
3809     cpos = width - 1 - a->pos;
3810     if (cpos + len > width) {
3811         len = width - cpos;
3812     }
3813 
3814     dest = dest_gpr(ctx, a->t);
3815     src = load_gpr(ctx, a->r);
3816     if (a->se) {
3817         tcg_gen_sextract_i64(dest, src, cpos, len);
3818     } else {
3819         tcg_gen_extract_i64(dest, src, cpos, len);
3820     }
3821     save_gpr(ctx, a->t, dest);
3822 
3823     /* Install the new nullification.  */
3824     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3825     return nullify_end(ctx);
3826 }
3827 
3828 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3829 {
3830     unsigned len, width;
3831     uint64_t mask0, mask1;
3832     TCGv_i64 dest;
3833 
3834     if (!ctx->is_pa20 && a->d) {
3835         return false;
3836     }
3837     if (a->c) {
3838         nullify_over(ctx);
3839     }
3840 
3841     len = a->len;
3842     width = a->d ? 64 : 32;
3843     if (a->cpos + len > width) {
3844         len = width - a->cpos;
3845     }
3846 
3847     dest = dest_gpr(ctx, a->t);
3848     mask0 = deposit64(0, a->cpos, len, a->i);
3849     mask1 = deposit64(-1, a->cpos, len, a->i);
3850 
3851     if (a->nz) {
3852         TCGv_i64 src = load_gpr(ctx, a->t);
3853         tcg_gen_andi_i64(dest, src, mask1);
3854         tcg_gen_ori_i64(dest, dest, mask0);
3855     } else {
3856         tcg_gen_movi_i64(dest, mask0);
3857     }
3858     save_gpr(ctx, a->t, dest);
3859 
3860     /* Install the new nullification.  */
3861     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3862     return nullify_end(ctx);
3863 }
3864 
3865 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3866 {
3867     unsigned rs = a->nz ? a->t : 0;
3868     unsigned len, width;
3869     TCGv_i64 dest, val;
3870 
3871     if (!ctx->is_pa20 && a->d) {
3872         return false;
3873     }
3874     if (a->c) {
3875         nullify_over(ctx);
3876     }
3877 
3878     len = a->len;
3879     width = a->d ? 64 : 32;
3880     if (a->cpos + len > width) {
3881         len = width - a->cpos;
3882     }
3883 
3884     dest = dest_gpr(ctx, a->t);
3885     val = load_gpr(ctx, a->r);
3886     if (rs == 0) {
3887         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3888     } else {
3889         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3890     }
3891     save_gpr(ctx, a->t, dest);
3892 
3893     /* Install the new nullification.  */
3894     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3895     return nullify_end(ctx);
3896 }
3897 
3898 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3899                        bool d, bool nz, unsigned len, TCGv_i64 val)
3900 {
3901     unsigned rs = nz ? rt : 0;
3902     unsigned widthm1 = d ? 63 : 31;
3903     TCGv_i64 mask, tmp, shift, dest;
3904     uint64_t msb = 1ULL << (len - 1);
3905 
3906     dest = dest_gpr(ctx, rt);
3907     shift = tcg_temp_new_i64();
3908     tmp = tcg_temp_new_i64();
3909 
3910     /* Convert big-endian bit numbering in SAR to left-shift.  */
3911     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3912     tcg_gen_xori_i64(shift, shift, widthm1);
3913 
3914     mask = tcg_temp_new_i64();
3915     tcg_gen_movi_i64(mask, msb + (msb - 1));
3916     tcg_gen_and_i64(tmp, val, mask);
3917     if (rs) {
3918         tcg_gen_shl_i64(mask, mask, shift);
3919         tcg_gen_shl_i64(tmp, tmp, shift);
3920         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3921         tcg_gen_or_i64(dest, dest, tmp);
3922     } else {
3923         tcg_gen_shl_i64(dest, tmp, shift);
3924     }
3925     save_gpr(ctx, rt, dest);
3926 
3927     /* Install the new nullification.  */
3928     ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3929     return nullify_end(ctx);
3930 }
3931 
3932 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3933 {
3934     if (!ctx->is_pa20 && a->d) {
3935         return false;
3936     }
3937     if (a->c) {
3938         nullify_over(ctx);
3939     }
3940     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3941                       load_gpr(ctx, a->r));
3942 }
3943 
3944 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3945 {
3946     if (!ctx->is_pa20 && a->d) {
3947         return false;
3948     }
3949     if (a->c) {
3950         nullify_over(ctx);
3951     }
3952     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3953                       tcg_constant_i64(a->i));
3954 }
3955 
3956 static bool trans_be(DisasContext *ctx, arg_be *a)
3957 {
3958 #ifndef CONFIG_USER_ONLY
3959     ctx->iaq_j.space = tcg_temp_new_i64();
3960     load_spr(ctx, ctx->iaq_j.space, a->sp);
3961 #endif
3962 
3963     ctx->iaq_j.base = tcg_temp_new_i64();
3964     ctx->iaq_j.disp = 0;
3965 
3966     tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
3967     ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
3968 
3969     return do_ibranch(ctx, a->l, true, a->n);
3970 }
3971 
3972 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3973 {
3974     return do_dbranch(ctx, a->disp, a->l, a->n);
3975 }
3976 
3977 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3978 {
3979     int64_t disp = a->disp;
3980     bool indirect = false;
3981 
3982     /* Trap if PSW[B] is set. */
3983     if (ctx->psw_xb & PSW_B) {
3984         return gen_illegal(ctx);
3985     }
3986 
3987     nullify_over(ctx);
3988 
3989 #ifndef CONFIG_USER_ONLY
3990     if (ctx->privilege == 0) {
3991         /* Privilege cannot decrease. */
3992     } else if (!(ctx->tb_flags & PSW_C)) {
3993         /* With paging disabled, priv becomes 0. */
3994         disp -= ctx->privilege;
3995     } else {
3996         /* Adjust the dest offset for the privilege change from the PTE. */
3997         TCGv_i64 off = tcg_temp_new_i64();
3998 
3999         copy_iaoq_entry(ctx, off, &ctx->iaq_f);
4000         gen_helper_b_gate_priv(off, tcg_env, off);
4001 
4002         ctx->iaq_j.base = off;
4003         ctx->iaq_j.disp = disp + 8;
4004         indirect = true;
4005     }
4006 #endif
4007 
4008     if (a->l) {
4009         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4010         if (ctx->privilege < 3) {
4011             tcg_gen_andi_i64(tmp, tmp, -4);
4012         }
4013         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4014         save_gpr(ctx, a->l, tmp);
4015     }
4016 
4017     if (indirect) {
4018         return do_ibranch(ctx, 0, false, a->n);
4019     }
4020     return do_dbranch(ctx, disp, 0, a->n);
4021 }
4022 
4023 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4024 {
4025     if (a->x) {
4026         DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
4027         TCGv_i64 t0 = tcg_temp_new_i64();
4028         TCGv_i64 t1 = tcg_temp_new_i64();
4029 
4030         /* The computation here never changes privilege level.  */
4031         copy_iaoq_entry(ctx, t0, &next);
4032         tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
4033         tcg_gen_add_i64(t0, t0, t1);
4034 
4035         ctx->iaq_j = iaqe_next_absv(ctx, t0);
4036         return do_ibranch(ctx, a->l, false, a->n);
4037     } else {
4038         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4039         return do_dbranch(ctx, 0, a->l, a->n);
4040     }
4041 }
4042 
4043 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4044 {
4045     TCGv_i64 dest;
4046 
4047     if (a->x == 0) {
4048         dest = load_gpr(ctx, a->b);
4049     } else {
4050         dest = tcg_temp_new_i64();
4051         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4052         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4053     }
4054     dest = do_ibranch_priv(ctx, dest);
4055     ctx->iaq_j = iaqe_next_absv(ctx, dest);
4056 
4057     return do_ibranch(ctx, 0, false, a->n);
4058 }
4059 
4060 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4061 {
4062     TCGv_i64 b = load_gpr(ctx, a->b);
4063 
4064 #ifndef CONFIG_USER_ONLY
4065     ctx->iaq_j.space = space_select(ctx, 0, b);
4066 #endif
4067     ctx->iaq_j.base = do_ibranch_priv(ctx, b);
4068     ctx->iaq_j.disp = 0;
4069 
4070     return do_ibranch(ctx, a->l, false, a->n);
4071 }
4072 
4073 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4074 {
4075     /* All branch target stack instructions implement as nop. */
4076     return ctx->is_pa20;
4077 }
4078 
4079 /*
4080  * Float class 0
4081  */
4082 
4083 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4084 {
4085     tcg_gen_mov_i32(dst, src);
4086 }
4087 
4088 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4089 {
4090     uint64_t ret;
4091 
4092     if (ctx->is_pa20) {
4093         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4094     } else {
4095         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4096     }
4097 
4098     nullify_over(ctx);
4099     save_frd(0, tcg_constant_i64(ret));
4100     return nullify_end(ctx);
4101 }
4102 
4103 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4104 {
4105     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4106 }
4107 
4108 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4109 {
4110     tcg_gen_mov_i64(dst, src);
4111 }
4112 
4113 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4114 {
4115     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4116 }
4117 
4118 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4119 {
4120     tcg_gen_andi_i32(dst, src, INT32_MAX);
4121 }
4122 
4123 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4124 {
4125     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4126 }
4127 
4128 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4129 {
4130     tcg_gen_andi_i64(dst, src, INT64_MAX);
4131 }
4132 
4133 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4134 {
4135     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4136 }
4137 
4138 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4139 {
4140     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4141 }
4142 
4143 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4144 {
4145     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4146 }
4147 
4148 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4149 {
4150     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4151 }
4152 
4153 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4154 {
4155     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4156 }
4157 
4158 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4159 {
4160     tcg_gen_xori_i32(dst, src, INT32_MIN);
4161 }
4162 
4163 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4164 {
4165     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4166 }
4167 
4168 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4169 {
4170     tcg_gen_xori_i64(dst, src, INT64_MIN);
4171 }
4172 
4173 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4174 {
4175     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4176 }
4177 
4178 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4179 {
4180     tcg_gen_ori_i32(dst, src, INT32_MIN);
4181 }
4182 
4183 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4184 {
4185     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4186 }
4187 
4188 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4189 {
4190     tcg_gen_ori_i64(dst, src, INT64_MIN);
4191 }
4192 
4193 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4194 {
4195     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4196 }
4197 
4198 /*
4199  * Float class 1
4200  */
4201 
4202 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4203 {
4204     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4205 }
4206 
4207 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4208 {
4209     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4210 }
4211 
4212 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4213 {
4214     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4215 }
4216 
4217 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4218 {
4219     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4220 }
4221 
4222 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4223 {
4224     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4225 }
4226 
4227 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4228 {
4229     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4230 }
4231 
4232 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4233 {
4234     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4235 }
4236 
4237 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4238 {
4239     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4240 }
4241 
4242 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4243 {
4244     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4245 }
4246 
4247 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4248 {
4249     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4250 }
4251 
4252 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4253 {
4254     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4255 }
4256 
4257 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4258 {
4259     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4260 }
4261 
4262 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4263 {
4264     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4265 }
4266 
4267 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4268 {
4269     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4270 }
4271 
4272 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4273 {
4274     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4275 }
4276 
4277 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4278 {
4279     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4280 }
4281 
4282 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4283 {
4284     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4285 }
4286 
4287 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4288 {
4289     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4290 }
4291 
4292 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4293 {
4294     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4295 }
4296 
4297 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4298 {
4299     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4300 }
4301 
4302 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4303 {
4304     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4305 }
4306 
4307 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4308 {
4309     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4310 }
4311 
4312 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4313 {
4314     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4315 }
4316 
4317 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4318 {
4319     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4320 }
4321 
4322 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4323 {
4324     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4325 }
4326 
4327 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4328 {
4329     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4330 }
4331 
4332 /*
4333  * Float class 2
4334  */
4335 
4336 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4337 {
4338     TCGv_i32 ta, tb, tc, ty;
4339 
4340     nullify_over(ctx);
4341 
4342     ta = load_frw0_i32(a->r1);
4343     tb = load_frw0_i32(a->r2);
4344     ty = tcg_constant_i32(a->y);
4345     tc = tcg_constant_i32(a->c);
4346 
4347     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4348 
4349     return nullify_end(ctx);
4350 }
4351 
4352 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4353 {
4354     TCGv_i64 ta, tb;
4355     TCGv_i32 tc, ty;
4356 
4357     nullify_over(ctx);
4358 
4359     ta = load_frd0(a->r1);
4360     tb = load_frd0(a->r2);
4361     ty = tcg_constant_i32(a->y);
4362     tc = tcg_constant_i32(a->c);
4363 
4364     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4365 
4366     return nullify_end(ctx);
4367 }
4368 
4369 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4370 {
4371     TCGCond tc = TCG_COND_TSTNE;
4372     uint32_t mask;
4373     TCGv_i64 t;
4374 
4375     nullify_over(ctx);
4376 
4377     t = tcg_temp_new_i64();
4378     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4379 
4380     if (a->y == 1) {
4381         switch (a->c) {
4382         case 0: /* simple */
4383             mask = R_FPSR_C_MASK;
4384             break;
4385         case 2: /* rej */
4386             tc = TCG_COND_TSTEQ;
4387             /* fallthru */
4388         case 1: /* acc */
4389             mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
4390             break;
4391         case 6: /* rej8 */
4392             tc = TCG_COND_TSTEQ;
4393             /* fallthru */
4394         case 5: /* acc8 */
4395             mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
4396             break;
4397         case 9: /* acc6 */
4398             mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
4399             break;
4400         case 13: /* acc4 */
4401             mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
4402             break;
4403         case 17: /* acc2 */
4404             mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
4405             break;
4406         default:
4407             gen_illegal(ctx);
4408             return true;
4409         }
4410     } else {
4411         unsigned cbit = (a->y ^ 1) - 1;
4412         mask = R_FPSR_CA0_MASK >> cbit;
4413     }
4414 
4415     ctx->null_cond = cond_make_ti(tc, t, mask);
4416     return nullify_end(ctx);
4417 }
4418 
4419 /*
4420  * Float class 2
4421  */
4422 
4423 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4424 {
4425     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4426 }
4427 
4428 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4429 {
4430     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4431 }
4432 
4433 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4434 {
4435     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4436 }
4437 
4438 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4439 {
4440     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4441 }
4442 
4443 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4444 {
4445     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4446 }
4447 
4448 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4449 {
4450     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4451 }
4452 
4453 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4454 {
4455     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4456 }
4457 
4458 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4459 {
4460     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4461 }
4462 
4463 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4464 {
4465     TCGv_i64 x, y;
4466 
4467     nullify_over(ctx);
4468 
4469     x = load_frw0_i64(a->r1);
4470     y = load_frw0_i64(a->r2);
4471     tcg_gen_mul_i64(x, x, y);
4472     save_frd(a->t, x);
4473 
4474     return nullify_end(ctx);
4475 }
4476 
4477 /* Convert the fmpyadd single-precision register encodings to standard.  */
4478 static inline int fmpyadd_s_reg(unsigned r)
4479 {
4480     return (r & 16) * 2 + 16 + (r & 15);
4481 }
4482 
4483 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4484 {
4485     int tm = fmpyadd_s_reg(a->tm);
4486     int ra = fmpyadd_s_reg(a->ra);
4487     int ta = fmpyadd_s_reg(a->ta);
4488     int rm2 = fmpyadd_s_reg(a->rm2);
4489     int rm1 = fmpyadd_s_reg(a->rm1);
4490 
4491     nullify_over(ctx);
4492 
4493     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4494     do_fop_weww(ctx, ta, ta, ra,
4495                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4496 
4497     return nullify_end(ctx);
4498 }
4499 
4500 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4501 {
4502     return do_fmpyadd_s(ctx, a, false);
4503 }
4504 
4505 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4506 {
4507     return do_fmpyadd_s(ctx, a, true);
4508 }
4509 
4510 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4511 {
4512     nullify_over(ctx);
4513 
4514     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4515     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4516                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4517 
4518     return nullify_end(ctx);
4519 }
4520 
4521 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4522 {
4523     return do_fmpyadd_d(ctx, a, false);
4524 }
4525 
4526 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4527 {
4528     return do_fmpyadd_d(ctx, a, true);
4529 }
4530 
4531 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4532 {
4533     TCGv_i32 x, y, z;
4534 
4535     nullify_over(ctx);
4536     x = load_frw0_i32(a->rm1);
4537     y = load_frw0_i32(a->rm2);
4538     z = load_frw0_i32(a->ra3);
4539 
4540     if (a->neg) {
4541         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4542     } else {
4543         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4544     }
4545 
4546     save_frw_i32(a->t, x);
4547     return nullify_end(ctx);
4548 }
4549 
4550 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4551 {
4552     TCGv_i64 x, y, z;
4553 
4554     nullify_over(ctx);
4555     x = load_frd0(a->rm1);
4556     y = load_frd0(a->rm2);
4557     z = load_frd0(a->ra3);
4558 
4559     if (a->neg) {
4560         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4561     } else {
4562         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4563     }
4564 
4565     save_frd(a->t, x);
4566     return nullify_end(ctx);
4567 }
4568 
4569 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4570 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4571 {
4572     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4573 #ifndef CONFIG_USER_ONLY
4574     nullify_over(ctx);
4575     gen_helper_diag_btlb(tcg_env);
4576     return nullify_end(ctx);
4577 #endif
4578 }
4579 
4580 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4581 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4582 {
4583     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4584 #ifndef CONFIG_USER_ONLY
4585     nullify_over(ctx);
4586     gen_helper_diag_console_output(tcg_env);
4587     return nullify_end(ctx);
4588 #endif
4589 }
4590 
4591 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4592 {
4593     return !ctx->is_pa20 && do_getshadowregs(ctx);
4594 }
4595 
4596 static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4597 {
4598     return ctx->is_pa20 && do_getshadowregs(ctx);
4599 }
4600 
4601 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4602 {
4603     return !ctx->is_pa20 && do_putshadowregs(ctx);
4604 }
4605 
4606 static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4607 {
4608     return ctx->is_pa20 && do_putshadowregs(ctx);
4609 }
4610 
4611 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4612 {
4613     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4614     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4615     return true;
4616 }
4617 
4618 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4619 {
4620     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4621     uint64_t cs_base;
4622     int bound;
4623 
4624     ctx->cs = cs;
4625     ctx->tb_flags = ctx->base.tb->flags;
4626     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4627     ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
4628 
4629 #ifdef CONFIG_USER_ONLY
4630     ctx->privilege = PRIV_USER;
4631     ctx->mmu_idx = MMU_USER_IDX;
4632     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4633 #else
4634     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4635     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4636                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4637                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4638 #endif
4639 
4640     cs_base = ctx->base.tb->cs_base;
4641     ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
4642 
4643     if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
4644         ctx->iaq_b.space = cpu_iasq_b;
4645         ctx->iaq_b.base = cpu_iaoq_b;
4646     } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
4647         ctx->iaq_b.base = cpu_iaoq_b;
4648     } else {
4649         uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
4650         uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
4651         ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
4652     }
4653 
4654     ctx->zero = tcg_constant_i64(0);
4655 
4656     /* Bound the number of instructions by those left on the page.  */
4657     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4658     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4659 }
4660 
4661 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4662 {
4663     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4664 
4665     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4666     ctx->null_cond = cond_make_f();
4667     ctx->psw_n_nonzero = false;
4668     if (ctx->tb_flags & PSW_N) {
4669         ctx->null_cond.c = TCG_COND_ALWAYS;
4670         ctx->psw_n_nonzero = true;
4671     }
4672     ctx->null_lab = NULL;
4673 }
4674 
4675 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4676 {
4677     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4678     uint64_t iaoq_f, iaoq_b;
4679     int64_t diff;
4680 
4681     tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
4682 
4683     iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
4684     if (iaqe_variable(&ctx->iaq_b)) {
4685         diff = INT32_MIN;
4686     } else {
4687         iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
4688         diff = iaoq_b - iaoq_f;
4689         /* Direct branches can only produce a 24-bit displacement. */
4690         tcg_debug_assert(diff == (int32_t)diff);
4691         tcg_debug_assert(diff != INT32_MIN);
4692     }
4693 
4694     tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
4695     ctx->insn_start_updated = false;
4696 }
4697 
4698 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4699 {
4700     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4701     CPUHPPAState *env = cpu_env(cs);
4702     DisasJumpType ret;
4703 
4704     /* Execute one insn.  */
4705 #ifdef CONFIG_USER_ONLY
4706     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4707         do_page_zero(ctx);
4708         ret = ctx->base.is_jmp;
4709         assert(ret != DISAS_NEXT);
4710     } else
4711 #endif
4712     {
4713         /* Always fetch the insn, even if nullified, so that we check
4714            the page permissions for execute.  */
4715         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4716 
4717         /*
4718          * Set up the IA queue for the next insn.
4719          * This will be overwritten by a branch.
4720          */
4721         ctx->iaq_n = NULL;
4722         memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
4723         ctx->psw_b_next = false;
4724 
4725         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4726             ctx->null_cond.c = TCG_COND_NEVER;
4727             ret = DISAS_NEXT;
4728         } else {
4729             ctx->insn = insn;
4730             if (!decode(ctx, insn)) {
4731                 gen_illegal(ctx);
4732             }
4733             ret = ctx->base.is_jmp;
4734             assert(ctx->null_lab == NULL);
4735         }
4736 
4737         if (ret != DISAS_NORETURN) {
4738             set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
4739         }
4740     }
4741 
4742     /* If the TranslationBlock must end, do so. */
4743     ctx->base.pc_next += 4;
4744     if (ret != DISAS_NEXT) {
4745         return;
4746     }
4747     /* Note this also detects a priority change. */
4748     if (iaqe_variable(&ctx->iaq_b)
4749         || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
4750         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
4751         return;
4752     }
4753 
4754     /*
4755      * Advance the insn queue.
4756      * The only exit now is DISAS_TOO_MANY from the translator loop.
4757      */
4758     ctx->iaq_f.disp = ctx->iaq_b.disp;
4759     if (!ctx->iaq_n) {
4760         ctx->iaq_b.disp += 4;
4761         return;
4762     }
4763     /*
4764      * If IAQ_Next is variable in any way, we need to copy into the
4765      * IAQ_Back globals, in case the next insn raises an exception.
4766      */
4767     if (ctx->iaq_n->base) {
4768         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
4769         ctx->iaq_b.base = cpu_iaoq_b;
4770         ctx->iaq_b.disp = 0;
4771     } else {
4772         ctx->iaq_b.disp = ctx->iaq_n->disp;
4773     }
4774     if (ctx->iaq_n->space) {
4775         tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
4776         ctx->iaq_b.space = cpu_iasq_b;
4777     }
4778 }
4779 
4780 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4781 {
4782     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4783     DisasJumpType is_jmp = ctx->base.is_jmp;
4784     /* Assume the insn queue has not been advanced. */
4785     DisasIAQE *f = &ctx->iaq_b;
4786     DisasIAQE *b = ctx->iaq_n;
4787 
4788     switch (is_jmp) {
4789     case DISAS_NORETURN:
4790         break;
4791     case DISAS_TOO_MANY:
4792         /* The insn queue has not been advanced. */
4793         f = &ctx->iaq_f;
4794         b = &ctx->iaq_b;
4795         /* FALLTHRU */
4796     case DISAS_IAQ_N_STALE:
4797         if (use_goto_tb(ctx, f, b)
4798             && (ctx->null_cond.c == TCG_COND_NEVER
4799                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4800             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4801             gen_goto_tb(ctx, 0, f, b);
4802             break;
4803         }
4804         /* FALLTHRU */
4805     case DISAS_IAQ_N_STALE_EXIT:
4806         install_iaq_entries(ctx, f, b);
4807         nullify_save(ctx);
4808         if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4809             tcg_gen_exit_tb(NULL, 0);
4810             break;
4811         }
4812         /* FALLTHRU */
4813     case DISAS_IAQ_N_UPDATED:
4814         tcg_gen_lookup_and_goto_ptr();
4815         break;
4816     case DISAS_EXIT:
4817         tcg_gen_exit_tb(NULL, 0);
4818         break;
4819     default:
4820         g_assert_not_reached();
4821     }
4822 
4823     for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
4824         gen_set_label(e->lab);
4825         if (e->set_n >= 0) {
4826             tcg_gen_movi_i64(cpu_psw_n, e->set_n);
4827         }
4828         if (e->set_iir) {
4829             tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
4830                            offsetof(CPUHPPAState, cr[CR_IIR]));
4831         }
4832         install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
4833         gen_excp_1(e->excp);
4834     }
4835 }
4836 
4837 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4838                               CPUState *cs, FILE *logfile)
4839 {
4840     target_ulong pc = dcbase->pc_first;
4841 
4842 #ifdef CONFIG_USER_ONLY
4843     switch (pc) {
4844     case 0x00:
4845         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4846         return;
4847     case 0xb0:
4848         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4849         return;
4850     case 0xe0:
4851         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4852         return;
4853     case 0x100:
4854         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4855         return;
4856     }
4857 #endif
4858 
4859     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4860     target_disas(logfile, cs, pc, dcbase->tb->size);
4861 }
4862 
4863 static const TranslatorOps hppa_tr_ops = {
4864     .init_disas_context = hppa_tr_init_disas_context,
4865     .tb_start           = hppa_tr_tb_start,
4866     .insn_start         = hppa_tr_insn_start,
4867     .translate_insn     = hppa_tr_translate_insn,
4868     .tb_stop            = hppa_tr_tb_stop,
4869     .disas_log          = hppa_tr_disas_log,
4870 };
4871 
4872 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4873                            vaddr pc, void *host_pc)
4874 {
4875     DisasContext ctx = { };
4876     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4877 }
4878