xref: /openbmc/qemu/target/hppa/translate.c (revision d64db833d6e3cbe9ea5f36342480f920f3675cea)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "qemu/host-utils.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/target_page.h"
32 #include "exec/log.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 /* Choose to use explicit sizes within this file. */
39 #undef tcg_temp_new
40 
41 typedef struct DisasCond {
42     TCGCond c;
43     TCGv_i64 a0, a1;
44 } DisasCond;
45 
46 typedef struct DisasIAQE {
47     /* IASQ; may be null for no change from TB. */
48     TCGv_i64 space;
49     /* IAOQ base; may be null for relative address. */
50     TCGv_i64 base;
51     /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
52     int64_t disp;
53 } DisasIAQE;
54 
55 typedef struct DisasDelayException {
56     struct DisasDelayException *next;
57     TCGLabel *lab;
58     uint32_t insn;
59     bool set_iir;
60     int8_t set_n;
61     uint8_t excp;
62     /* Saved state at parent insn. */
63     DisasIAQE iaq_f, iaq_b;
64 } DisasDelayException;
65 
66 typedef struct DisasContext {
67     DisasContextBase base;
68     CPUState *cs;
69 
70     /* IAQ_Front, IAQ_Back. */
71     DisasIAQE iaq_f, iaq_b;
72     /* IAQ_Next, for jumps, otherwise null for simple advance. */
73     DisasIAQE iaq_j, *iaq_n;
74 
75     /* IAOQ_Front at entry to TB. */
76     uint64_t iaoq_first;
77     uint64_t gva_offset_mask;
78 
79     DisasCond null_cond;
80     TCGLabel *null_lab;
81 
82     DisasDelayException *delay_excp_list;
83     TCGv_i64 zero;
84 
85     uint32_t insn;
86     uint32_t tb_flags;
87     int mmu_idx;
88     int privilege;
89     uint32_t psw_xb;
90     bool psw_n_nonzero;
91     bool psw_b_next;
92     bool is_pa20;
93     bool insn_start_updated;
94 
95 #ifdef CONFIG_USER_ONLY
96     MemOp unalign;
97 #endif
98 } DisasContext;
99 
100 #ifdef CONFIG_USER_ONLY
101 #define UNALIGN(C)       (C)->unalign
102 #define MMU_DISABLED(C)  false
103 #else
104 #define UNALIGN(C)       MO_ALIGN
105 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
106 #endif
107 
108 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
109 static int expand_sm_imm(DisasContext *ctx, int val)
110 {
111     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
112     if (ctx->is_pa20) {
113         if (val & PSW_SM_W) {
114             val |= PSW_W;
115         }
116         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
117     } else {
118         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
119     }
120     return val;
121 }
122 
123 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
124 static int expand_sr3x(DisasContext *ctx, int val)
125 {
126     return ~val;
127 }
128 
129 /* Convert the M:A bits within a memory insn to the tri-state value
130    we use for the final M.  */
131 static int ma_to_m(DisasContext *ctx, int val)
132 {
133     return val & 2 ? (val & 1 ? -1 : 1) : 0;
134 }
135 
136 /* Convert the sign of the displacement to a pre or post-modify.  */
137 static int pos_to_m(DisasContext *ctx, int val)
138 {
139     return val ? 1 : -1;
140 }
141 
142 static int neg_to_m(DisasContext *ctx, int val)
143 {
144     return val ? -1 : 1;
145 }
146 
147 /* Used for branch targets and fp memory ops.  */
148 static int expand_shl2(DisasContext *ctx, int val)
149 {
150     return val << 2;
151 }
152 
153 /* Used for assemble_21.  */
154 static int expand_shl11(DisasContext *ctx, int val)
155 {
156     return val << 11;
157 }
158 
159 static int assemble_6(DisasContext *ctx, int val)
160 {
161     /*
162      * Officially, 32 * x + 32 - y.
163      * Here, x is already in bit 5, and y is [4:0].
164      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
165      * with the overflow from bit 4 summing with x.
166      */
167     return (val ^ 31) + 1;
168 }
169 
170 /* Expander for assemble_16a(s,cat(im10a,0),i). */
171 static int expand_11a(DisasContext *ctx, int val)
172 {
173     /*
174      * @val is bit 0 and bits [4:15].
175      * Swizzle thing around depending on PSW.W.
176      */
177     int im10a = extract32(val, 1, 10);
178     int s = extract32(val, 11, 2);
179     int i = (-(val & 1) << 13) | (im10a << 3);
180 
181     if (ctx->tb_flags & PSW_W) {
182         i ^= s << 13;
183     }
184     return i;
185 }
186 
187 /* Expander for assemble_16a(s,im11a,i). */
188 static int expand_12a(DisasContext *ctx, int val)
189 {
190     /*
191      * @val is bit 0 and bits [3:15].
192      * Swizzle thing around depending on PSW.W.
193      */
194     int im11a = extract32(val, 1, 11);
195     int s = extract32(val, 12, 2);
196     int i = (-(val & 1) << 13) | (im11a << 2);
197 
198     if (ctx->tb_flags & PSW_W) {
199         i ^= s << 13;
200     }
201     return i;
202 }
203 
204 /* Expander for assemble_16(s,im14). */
205 static int expand_16(DisasContext *ctx, int val)
206 {
207     /*
208      * @val is bits [0:15], containing both im14 and s.
209      * Swizzle thing around depending on PSW.W.
210      */
211     int s = extract32(val, 14, 2);
212     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
213 
214     if (ctx->tb_flags & PSW_W) {
215         i ^= s << 13;
216     }
217     return i;
218 }
219 
220 /* The sp field is only present with !PSW_W. */
221 static int sp0_if_wide(DisasContext *ctx, int sp)
222 {
223     return ctx->tb_flags & PSW_W ? 0 : sp;
224 }
225 
226 /* Translate CMPI doubleword conditions to standard. */
227 static int cmpbid_c(DisasContext *ctx, int val)
228 {
229     return val ? val : 4; /* 0 == "*<<" */
230 }
231 
232 /*
233  * In many places pa1.x did not decode the bit that later became
234  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
235  */
236 static int pa20_d(DisasContext *ctx, int val)
237 {
238     return ctx->is_pa20 & val;
239 }
240 
241 /* Include the auto-generated decoder.  */
242 #include "decode-insns.c.inc"
243 
244 /* We are not using a goto_tb (for whatever reason), but have updated
245    the iaq (for whatever reason), so don't do it again on exit.  */
246 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
247 
248 /* We are exiting the TB, but have neither emitted a goto_tb, nor
249    updated the iaq for the next instruction to be executed.  */
250 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
251 
252 /* Similarly, but we want to return to the main loop immediately
253    to recognize unmasked interrupts.  */
254 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
255 #define DISAS_EXIT                  DISAS_TARGET_3
256 
257 /* global register indexes */
258 static TCGv_i64 cpu_gr[32];
259 static TCGv_i64 cpu_sr[4];
260 static TCGv_i64 cpu_srH;
261 static TCGv_i64 cpu_iaoq_f;
262 static TCGv_i64 cpu_iaoq_b;
263 static TCGv_i64 cpu_iasq_f;
264 static TCGv_i64 cpu_iasq_b;
265 static TCGv_i64 cpu_sar;
266 static TCGv_i64 cpu_psw_n;
267 static TCGv_i64 cpu_psw_v;
268 static TCGv_i64 cpu_psw_cb;
269 static TCGv_i64 cpu_psw_cb_msb;
270 static TCGv_i32 cpu_psw_xb;
271 
272 void hppa_translate_init(void)
273 {
274 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
275 
276     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
277     static const GlobalVar vars[] = {
278         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
279         DEF_VAR(psw_n),
280         DEF_VAR(psw_v),
281         DEF_VAR(psw_cb),
282         DEF_VAR(psw_cb_msb),
283         DEF_VAR(iaoq_f),
284         DEF_VAR(iaoq_b),
285     };
286 
287 #undef DEF_VAR
288 
289     /* Use the symbolic register names that match the disassembler.  */
290     static const char gr_names[32][4] = {
291         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
292         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
293         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
294         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
295     };
296     /* SR[4-7] are not global registers so that we can index them.  */
297     static const char sr_names[5][4] = {
298         "sr0", "sr1", "sr2", "sr3", "srH"
299     };
300 
301     int i;
302 
303     cpu_gr[0] = NULL;
304     for (i = 1; i < 32; i++) {
305         cpu_gr[i] = tcg_global_mem_new(tcg_env,
306                                        offsetof(CPUHPPAState, gr[i]),
307                                        gr_names[i]);
308     }
309     for (i = 0; i < 4; i++) {
310         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
311                                            offsetof(CPUHPPAState, sr[i]),
312                                            sr_names[i]);
313     }
314     cpu_srH = tcg_global_mem_new_i64(tcg_env,
315                                      offsetof(CPUHPPAState, sr[4]),
316                                      sr_names[4]);
317 
318     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
319         const GlobalVar *v = &vars[i];
320         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
321     }
322 
323     cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
324                                         offsetof(CPUHPPAState, psw_xb),
325                                         "psw_xb");
326     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
327                                         offsetof(CPUHPPAState, iasq_f),
328                                         "iasq_f");
329     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
330                                         offsetof(CPUHPPAState, iasq_b),
331                                         "iasq_b");
332 }
333 
334 static void set_insn_breg(DisasContext *ctx, int breg)
335 {
336     assert(!ctx->insn_start_updated);
337     ctx->insn_start_updated = true;
338     tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
339 }
340 
341 static DisasCond cond_make_f(void)
342 {
343     return (DisasCond){
344         .c = TCG_COND_NEVER,
345         .a0 = NULL,
346         .a1 = NULL,
347     };
348 }
349 
350 static DisasCond cond_make_t(void)
351 {
352     return (DisasCond){
353         .c = TCG_COND_ALWAYS,
354         .a0 = NULL,
355         .a1 = NULL,
356     };
357 }
358 
359 static DisasCond cond_make_n(void)
360 {
361     return (DisasCond){
362         .c = TCG_COND_NE,
363         .a0 = cpu_psw_n,
364         .a1 = tcg_constant_i64(0)
365     };
366 }
367 
368 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
369 {
370     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
371     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
372 }
373 
374 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
375 {
376     return cond_make_tt(c, a0, tcg_constant_i64(imm));
377 }
378 
379 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
380 {
381     TCGv_i64 tmp = tcg_temp_new_i64();
382     tcg_gen_mov_i64(tmp, a0);
383     return cond_make_ti(c, tmp, imm);
384 }
385 
386 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
387 {
388     TCGv_i64 t0 = tcg_temp_new_i64();
389     TCGv_i64 t1 = tcg_temp_new_i64();
390 
391     tcg_gen_mov_i64(t0, a0);
392     tcg_gen_mov_i64(t1, a1);
393     return cond_make_tt(c, t0, t1);
394 }
395 
396 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
397 {
398     if (reg == 0) {
399         return ctx->zero;
400     } else {
401         return cpu_gr[reg];
402     }
403 }
404 
405 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
406 {
407     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
408         return tcg_temp_new_i64();
409     } else {
410         return cpu_gr[reg];
411     }
412 }
413 
414 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
415 {
416     if (ctx->null_cond.c != TCG_COND_NEVER) {
417         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
418                             ctx->null_cond.a1, dest, t);
419     } else {
420         tcg_gen_mov_i64(dest, t);
421     }
422 }
423 
424 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
425 {
426     if (reg != 0) {
427         save_or_nullify(ctx, cpu_gr[reg], t);
428     }
429 }
430 
431 #if HOST_BIG_ENDIAN
432 # define HI_OFS  0
433 # define LO_OFS  4
434 #else
435 # define HI_OFS  4
436 # define LO_OFS  0
437 #endif
438 
439 static TCGv_i32 load_frw_i32(unsigned rt)
440 {
441     TCGv_i32 ret = tcg_temp_new_i32();
442     tcg_gen_ld_i32(ret, tcg_env,
443                    offsetof(CPUHPPAState, fr[rt & 31])
444                    + (rt & 32 ? LO_OFS : HI_OFS));
445     return ret;
446 }
447 
448 static TCGv_i32 load_frw0_i32(unsigned rt)
449 {
450     if (rt == 0) {
451         TCGv_i32 ret = tcg_temp_new_i32();
452         tcg_gen_movi_i32(ret, 0);
453         return ret;
454     } else {
455         return load_frw_i32(rt);
456     }
457 }
458 
459 static TCGv_i64 load_frw0_i64(unsigned rt)
460 {
461     TCGv_i64 ret = tcg_temp_new_i64();
462     if (rt == 0) {
463         tcg_gen_movi_i64(ret, 0);
464     } else {
465         tcg_gen_ld32u_i64(ret, tcg_env,
466                           offsetof(CPUHPPAState, fr[rt & 31])
467                           + (rt & 32 ? LO_OFS : HI_OFS));
468     }
469     return ret;
470 }
471 
472 static void save_frw_i32(unsigned rt, TCGv_i32 val)
473 {
474     tcg_gen_st_i32(val, tcg_env,
475                    offsetof(CPUHPPAState, fr[rt & 31])
476                    + (rt & 32 ? LO_OFS : HI_OFS));
477 }
478 
479 #undef HI_OFS
480 #undef LO_OFS
481 
482 static TCGv_i64 load_frd(unsigned rt)
483 {
484     TCGv_i64 ret = tcg_temp_new_i64();
485     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
486     return ret;
487 }
488 
489 static TCGv_i64 load_frd0(unsigned rt)
490 {
491     if (rt == 0) {
492         TCGv_i64 ret = tcg_temp_new_i64();
493         tcg_gen_movi_i64(ret, 0);
494         return ret;
495     } else {
496         return load_frd(rt);
497     }
498 }
499 
500 static void save_frd(unsigned rt, TCGv_i64 val)
501 {
502     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
503 }
504 
505 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
506 {
507 #ifdef CONFIG_USER_ONLY
508     tcg_gen_movi_i64(dest, 0);
509 #else
510     if (reg < 4) {
511         tcg_gen_mov_i64(dest, cpu_sr[reg]);
512     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
513         tcg_gen_mov_i64(dest, cpu_srH);
514     } else {
515         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
516     }
517 #endif
518 }
519 
520 /*
521  * Write a value to psw_xb, bearing in mind the known value.
522  * To be used just before exiting the TB, so do not update the known value.
523  */
524 static void store_psw_xb(DisasContext *ctx, uint32_t xb)
525 {
526     tcg_debug_assert(xb == 0 || xb == PSW_B);
527     if (ctx->psw_xb != xb) {
528         tcg_gen_movi_i32(cpu_psw_xb, xb);
529     }
530 }
531 
532 /* Write a value to psw_xb, and update the known value. */
533 static void set_psw_xb(DisasContext *ctx, uint32_t xb)
534 {
535     store_psw_xb(ctx, xb);
536     ctx->psw_xb = xb;
537 }
538 
539 /* Skip over the implementation of an insn that has been nullified.
540    Use this when the insn is too complex for a conditional move.  */
541 static void nullify_over(DisasContext *ctx)
542 {
543     if (ctx->null_cond.c != TCG_COND_NEVER) {
544         /* The always condition should have been handled in the main loop.  */
545         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
546 
547         ctx->null_lab = gen_new_label();
548 
549         /* If we're using PSW[N], copy it to a temp because... */
550         if (ctx->null_cond.a0 == cpu_psw_n) {
551             ctx->null_cond.a0 = tcg_temp_new_i64();
552             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
553         }
554         /* ... we clear it before branching over the implementation,
555            so that (1) it's clear after nullifying this insn and
556            (2) if this insn nullifies the next, PSW[N] is valid.  */
557         if (ctx->psw_n_nonzero) {
558             ctx->psw_n_nonzero = false;
559             tcg_gen_movi_i64(cpu_psw_n, 0);
560         }
561 
562         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
563                            ctx->null_cond.a1, ctx->null_lab);
564         ctx->null_cond = cond_make_f();
565     }
566 }
567 
568 /* Save the current nullification state to PSW[N].  */
569 static void nullify_save(DisasContext *ctx)
570 {
571     if (ctx->null_cond.c == TCG_COND_NEVER) {
572         if (ctx->psw_n_nonzero) {
573             tcg_gen_movi_i64(cpu_psw_n, 0);
574         }
575         return;
576     }
577     if (ctx->null_cond.a0 != cpu_psw_n) {
578         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
579                             ctx->null_cond.a0, ctx->null_cond.a1);
580         ctx->psw_n_nonzero = true;
581     }
582     ctx->null_cond = cond_make_f();
583 }
584 
585 /* Set a PSW[N] to X.  The intention is that this is used immediately
586    before a goto_tb/exit_tb, so that there is no fallthru path to other
587    code within the TB.  Therefore we do not update psw_n_nonzero.  */
588 static void nullify_set(DisasContext *ctx, bool x)
589 {
590     if (ctx->psw_n_nonzero || x) {
591         tcg_gen_movi_i64(cpu_psw_n, x);
592     }
593 }
594 
595 /* Mark the end of an instruction that may have been nullified.
596    This is the pair to nullify_over.  Always returns true so that
597    it may be tail-called from a translate function.  */
598 static bool nullify_end(DisasContext *ctx)
599 {
600     TCGLabel *null_lab = ctx->null_lab;
601     DisasJumpType status = ctx->base.is_jmp;
602 
603     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
604        For UPDATED, we cannot update on the nullified path.  */
605     assert(status != DISAS_IAQ_N_UPDATED);
606     /* Taken branches are handled manually. */
607     assert(!ctx->psw_b_next);
608 
609     if (likely(null_lab == NULL)) {
610         /* The current insn wasn't conditional or handled the condition
611            applied to it without a branch, so the (new) setting of
612            NULL_COND can be applied directly to the next insn.  */
613         return true;
614     }
615     ctx->null_lab = NULL;
616 
617     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
618         /* The next instruction will be unconditional,
619            and NULL_COND already reflects that.  */
620         gen_set_label(null_lab);
621     } else {
622         /* The insn that we just executed is itself nullifying the next
623            instruction.  Store the condition in the PSW[N] global.
624            We asserted PSW[N] = 0 in nullify_over, so that after the
625            label we have the proper value in place.  */
626         nullify_save(ctx);
627         gen_set_label(null_lab);
628         ctx->null_cond = cond_make_n();
629     }
630     if (status == DISAS_NORETURN) {
631         ctx->base.is_jmp = DISAS_NEXT;
632     }
633     return true;
634 }
635 
636 static bool iaqe_variable(const DisasIAQE *e)
637 {
638     return e->base || e->space;
639 }
640 
641 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
642 {
643     return (DisasIAQE){
644         .space = e->space,
645         .base = e->base,
646         .disp = e->disp + disp,
647     };
648 }
649 
650 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
651 {
652     return (DisasIAQE){
653         .space = ctx->iaq_b.space,
654         .disp = ctx->iaq_f.disp + 8 + disp,
655     };
656 }
657 
658 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
659 {
660     return (DisasIAQE){
661         .space = ctx->iaq_b.space,
662         .base = var,
663     };
664 }
665 
666 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
667                             const DisasIAQE *src)
668 {
669     tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
670 }
671 
672 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
673                                 const DisasIAQE *b)
674 {
675     DisasIAQE b_next;
676 
677     if (b == NULL) {
678         b_next = iaqe_incr(f, 4);
679         b = &b_next;
680     }
681 
682     /*
683      * There is an edge case
684      *    bv   r0(rN)
685      *    b,l  disp,r0
686      * for which F will use cpu_iaoq_b (from the indirect branch),
687      * and B will use cpu_iaoq_f (from the direct branch).
688      * In this case we need an extra temporary.
689      */
690     if (f->base != cpu_iaoq_b) {
691         copy_iaoq_entry(ctx, cpu_iaoq_b, b);
692         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
693     } else if (f->base == b->base) {
694         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
695         tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
696     } else {
697         TCGv_i64 tmp = tcg_temp_new_i64();
698         copy_iaoq_entry(ctx, tmp, b);
699         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
700         tcg_gen_mov_i64(cpu_iaoq_b, tmp);
701     }
702 
703     if (f->space) {
704         tcg_gen_mov_i64(cpu_iasq_f, f->space);
705     }
706     if (b->space || f->space) {
707         tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
708     }
709 }
710 
711 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
712 {
713     tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
714     if (!link) {
715         return;
716     }
717     DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
718     copy_iaoq_entry(ctx, cpu_gr[link], &next);
719 #ifndef CONFIG_USER_ONLY
720     if (with_sr0) {
721         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
722     }
723 #endif
724 }
725 
726 static void gen_excp_1(int exception)
727 {
728     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
729 }
730 
731 static void gen_excp(DisasContext *ctx, int exception)
732 {
733     install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
734     nullify_save(ctx);
735     gen_excp_1(exception);
736     ctx->base.is_jmp = DISAS_NORETURN;
737 }
738 
739 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
740 {
741     DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
742 
743     memset(e, 0, sizeof(*e));
744     e->next = ctx->delay_excp_list;
745     ctx->delay_excp_list = e;
746 
747     e->lab = gen_new_label();
748     e->insn = ctx->insn;
749     e->set_iir = true;
750     e->set_n = ctx->psw_n_nonzero ? 0 : -1;
751     e->excp = excp;
752     e->iaq_f = ctx->iaq_f;
753     e->iaq_b = ctx->iaq_b;
754 
755     return e;
756 }
757 
758 static bool gen_excp_iir(DisasContext *ctx, int exc)
759 {
760     if (ctx->null_cond.c == TCG_COND_NEVER) {
761         tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
762                        tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
763         gen_excp(ctx, exc);
764     } else {
765         DisasDelayException *e = delay_excp(ctx, exc);
766         tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
767                            ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
768         ctx->null_cond = cond_make_f();
769     }
770     return true;
771 }
772 
773 static bool gen_illegal(DisasContext *ctx)
774 {
775     return gen_excp_iir(ctx, EXCP_ILL);
776 }
777 
778 #ifdef CONFIG_USER_ONLY
779 #define CHECK_MOST_PRIVILEGED(EXCP) \
780     return gen_excp_iir(ctx, EXCP)
781 #else
782 #define CHECK_MOST_PRIVILEGED(EXCP) \
783     do {                                     \
784         if (ctx->privilege != 0) {           \
785             return gen_excp_iir(ctx, EXCP);  \
786         }                                    \
787     } while (0)
788 #endif
789 
790 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
791                         const DisasIAQE *b)
792 {
793     return (!iaqe_variable(f) &&
794             (b == NULL || !iaqe_variable(b)) &&
795             translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
796 }
797 
798 /* If the next insn is to be nullified, and it's on the same page,
799    and we're not attempting to set a breakpoint on it, then we can
800    totally skip the nullified insn.  This avoids creating and
801    executing a TB that merely branches to the next TB.  */
802 static bool use_nullify_skip(DisasContext *ctx)
803 {
804     return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
805             && !iaqe_variable(&ctx->iaq_b)
806             && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
807                 & TARGET_PAGE_MASK) == 0);
808 }
809 
810 static void gen_goto_tb(DisasContext *ctx, int which,
811                         const DisasIAQE *f, const DisasIAQE *b)
812 {
813     install_iaq_entries(ctx, f, b);
814     if (use_goto_tb(ctx, f, b)) {
815         tcg_gen_goto_tb(which);
816         tcg_gen_exit_tb(ctx->base.tb, which);
817     } else {
818         tcg_gen_lookup_and_goto_ptr();
819     }
820 }
821 
822 static bool cond_need_sv(int c)
823 {
824     return c == 2 || c == 3 || c == 6;
825 }
826 
827 static bool cond_need_cb(int c)
828 {
829     return c == 4 || c == 5;
830 }
831 
832 /*
833  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
834  * the Parisc 1.1 Architecture Reference Manual for details.
835  */
836 
837 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
838                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
839 {
840     TCGCond sign_cond, zero_cond;
841     uint64_t sign_imm, zero_imm;
842     DisasCond cond;
843     TCGv_i64 tmp;
844 
845     if (d) {
846         /* 64-bit condition. */
847         sign_imm = 0;
848         sign_cond = TCG_COND_LT;
849         zero_imm = 0;
850         zero_cond = TCG_COND_EQ;
851     } else {
852         /* 32-bit condition. */
853         sign_imm = 1ull << 31;
854         sign_cond = TCG_COND_TSTNE;
855         zero_imm = UINT32_MAX;
856         zero_cond = TCG_COND_TSTEQ;
857     }
858 
859     switch (cf >> 1) {
860     case 0: /* Never / TR    (0 / 1) */
861         cond = cond_make_f();
862         break;
863     case 1: /* = / <>        (Z / !Z) */
864         cond = cond_make_vi(zero_cond, res, zero_imm);
865         break;
866     case 2: /* < / >=        (N ^ V / !(N ^ V) */
867         tmp = tcg_temp_new_i64();
868         tcg_gen_xor_i64(tmp, res, sv);
869         cond = cond_make_ti(sign_cond, tmp, sign_imm);
870         break;
871     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
872         /*
873          * Simplify:
874          *   (N ^ V) | Z
875          *   ((res < 0) ^ (sv < 0)) | !res
876          *   ((res ^ sv) < 0) | !res
877          *   ((res ^ sv) < 0 ? 1 : !res)
878          *   !((res ^ sv) < 0 ? 0 : res)
879          */
880         tmp = tcg_temp_new_i64();
881         tcg_gen_xor_i64(tmp, res, sv);
882         tcg_gen_movcond_i64(sign_cond, tmp,
883                             tmp, tcg_constant_i64(sign_imm),
884                             ctx->zero, res);
885         cond = cond_make_ti(zero_cond, tmp, zero_imm);
886         break;
887     case 4: /* NUV / UV      (!UV / UV) */
888         cond = cond_make_vi(TCG_COND_EQ, uv, 0);
889         break;
890     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
891         tmp = tcg_temp_new_i64();
892         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
893         cond = cond_make_ti(zero_cond, tmp, zero_imm);
894         break;
895     case 6: /* SV / NSV      (V / !V) */
896         cond = cond_make_vi(sign_cond, sv, sign_imm);
897         break;
898     case 7: /* OD / EV */
899         cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
900         break;
901     default:
902         g_assert_not_reached();
903     }
904     if (cf & 1) {
905         cond.c = tcg_invert_cond(cond.c);
906     }
907 
908     return cond;
909 }
910 
911 /* Similar, but for the special case of subtraction without borrow, we
912    can use the inputs directly.  This can allow other computation to be
913    deleted as unused.  */
914 
915 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
916                              TCGv_i64 res, TCGv_i64 in1,
917                              TCGv_i64 in2, TCGv_i64 sv)
918 {
919     TCGCond tc;
920     bool ext_uns;
921 
922     switch (cf >> 1) {
923     case 1: /* = / <> */
924         tc = TCG_COND_EQ;
925         ext_uns = true;
926         break;
927     case 2: /* < / >= */
928         tc = TCG_COND_LT;
929         ext_uns = false;
930         break;
931     case 3: /* <= / > */
932         tc = TCG_COND_LE;
933         ext_uns = false;
934         break;
935     case 4: /* << / >>= */
936         tc = TCG_COND_LTU;
937         ext_uns = true;
938         break;
939     case 5: /* <<= / >> */
940         tc = TCG_COND_LEU;
941         ext_uns = true;
942         break;
943     default:
944         return do_cond(ctx, cf, d, res, NULL, sv);
945     }
946 
947     if (cf & 1) {
948         tc = tcg_invert_cond(tc);
949     }
950     if (!d) {
951         TCGv_i64 t1 = tcg_temp_new_i64();
952         TCGv_i64 t2 = tcg_temp_new_i64();
953 
954         if (ext_uns) {
955             tcg_gen_ext32u_i64(t1, in1);
956             tcg_gen_ext32u_i64(t2, in2);
957         } else {
958             tcg_gen_ext32s_i64(t1, in1);
959             tcg_gen_ext32s_i64(t2, in2);
960         }
961         return cond_make_tt(tc, t1, t2);
962     }
963     return cond_make_vv(tc, in1, in2);
964 }
965 
966 /*
967  * Similar, but for logicals, where the carry and overflow bits are not
968  * computed, and use of them is undefined.
969  *
970  * Undefined or not, hardware does not trap.  It seems reasonable to
971  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
972  * how cases c={2,3} are treated.
973  */
974 
975 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
976                              TCGv_i64 res)
977 {
978     TCGCond tc;
979     uint64_t imm;
980 
981     switch (cf >> 1) {
982     case 0:  /* never / always */
983     case 4:  /* undef, C */
984     case 5:  /* undef, C & !Z */
985     case 6:  /* undef, V */
986         return cf & 1 ? cond_make_t() : cond_make_f();
987     case 1:  /* == / <> */
988         tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
989         imm = d ? 0 : UINT32_MAX;
990         break;
991     case 2:  /* < / >= */
992         tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
993         imm = d ? 0 : 1ull << 31;
994         break;
995     case 3:  /* <= / > */
996         tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
997         if (!d) {
998             TCGv_i64 tmp = tcg_temp_new_i64();
999             tcg_gen_ext32s_i64(tmp, res);
1000             return cond_make_ti(tc, tmp, 0);
1001         }
1002         return cond_make_vi(tc, res, 0);
1003     case 7: /* OD / EV */
1004         tc = TCG_COND_TSTNE;
1005         imm = 1;
1006         break;
1007     default:
1008         g_assert_not_reached();
1009     }
1010     if (cf & 1) {
1011         tc = tcg_invert_cond(tc);
1012     }
1013     return cond_make_vi(tc, res, imm);
1014 }
1015 
1016 /* Similar, but for shift/extract/deposit conditions.  */
1017 
1018 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1019                              TCGv_i64 res)
1020 {
1021     unsigned c, f;
1022 
1023     /* Convert the compressed condition codes to standard.
1024        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1025        4-7 are the reverse of 0-3.  */
1026     c = orig & 3;
1027     if (c == 3) {
1028         c = 7;
1029     }
1030     f = (orig & 4) / 4;
1031 
1032     return do_log_cond(ctx, c * 2 + f, d, res);
1033 }
1034 
1035 /* Similar, but for unit zero conditions.  */
1036 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
1037 {
1038     TCGv_i64 tmp;
1039     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
1040     uint64_t ones = 0, sgns = 0;
1041 
1042     switch (cf >> 1) {
1043     case 1: /* SBW / NBW */
1044         if (d) {
1045             ones = d_repl;
1046             sgns = d_repl << 31;
1047         }
1048         break;
1049     case 2: /* SBZ / NBZ */
1050         ones = d_repl * 0x01010101u;
1051         sgns = ones << 7;
1052         break;
1053     case 3: /* SHZ / NHZ */
1054         ones = d_repl * 0x00010001u;
1055         sgns = ones << 15;
1056         break;
1057     }
1058     if (ones == 0) {
1059         /* Undefined, or 0/1 (never/always). */
1060         return cf & 1 ? cond_make_t() : cond_make_f();
1061     }
1062 
1063     /*
1064      * See hasless(v,1) from
1065      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1066      */
1067     tmp = tcg_temp_new_i64();
1068     tcg_gen_subi_i64(tmp, res, ones);
1069     tcg_gen_andc_i64(tmp, tmp, res);
1070 
1071     return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
1072 }
1073 
1074 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1075                           TCGv_i64 cb, TCGv_i64 cb_msb)
1076 {
1077     if (!d) {
1078         TCGv_i64 t = tcg_temp_new_i64();
1079         tcg_gen_extract_i64(t, cb, 32, 1);
1080         return t;
1081     }
1082     return cb_msb;
1083 }
1084 
1085 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1086 {
1087     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1088 }
1089 
1090 /* Compute signed overflow for addition.  */
1091 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1092                           TCGv_i64 in1, TCGv_i64 in2,
1093                           TCGv_i64 orig_in1, int shift, bool d)
1094 {
1095     TCGv_i64 sv = tcg_temp_new_i64();
1096     TCGv_i64 tmp = tcg_temp_new_i64();
1097 
1098     tcg_gen_xor_i64(sv, res, in1);
1099     tcg_gen_xor_i64(tmp, in1, in2);
1100     tcg_gen_andc_i64(sv, sv, tmp);
1101 
1102     switch (shift) {
1103     case 0:
1104         break;
1105     case 1:
1106         /* Shift left by one and compare the sign. */
1107         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1108         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1109         /* Incorporate into the overflow. */
1110         tcg_gen_or_i64(sv, sv, tmp);
1111         break;
1112     default:
1113         {
1114             int sign_bit = d ? 63 : 31;
1115 
1116             /* Compare the sign against all lower bits. */
1117             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1118             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1119             /*
1120              * If one of the bits shifting into or through the sign
1121              * differs, then we have overflow.
1122              */
1123             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1124             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1125                                 tcg_constant_i64(-1), sv);
1126         }
1127     }
1128     return sv;
1129 }
1130 
1131 /* Compute unsigned overflow for addition.  */
1132 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1133                           TCGv_i64 in1, int shift, bool d)
1134 {
1135     if (shift == 0) {
1136         return get_carry(ctx, d, cb, cb_msb);
1137     } else {
1138         TCGv_i64 tmp = tcg_temp_new_i64();
1139         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1140         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1141         return tmp;
1142     }
1143 }
1144 
1145 /* Compute signed overflow for subtraction.  */
1146 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1147                           TCGv_i64 in1, TCGv_i64 in2)
1148 {
1149     TCGv_i64 sv = tcg_temp_new_i64();
1150     TCGv_i64 tmp = tcg_temp_new_i64();
1151 
1152     tcg_gen_xor_i64(sv, res, in1);
1153     tcg_gen_xor_i64(tmp, in1, in2);
1154     tcg_gen_and_i64(sv, sv, tmp);
1155 
1156     return sv;
1157 }
1158 
1159 static void gen_tc(DisasContext *ctx, DisasCond *cond)
1160 {
1161     DisasDelayException *e;
1162 
1163     switch (cond->c) {
1164     case TCG_COND_NEVER:
1165         break;
1166     case TCG_COND_ALWAYS:
1167         gen_excp_iir(ctx, EXCP_COND);
1168         break;
1169     default:
1170         e = delay_excp(ctx, EXCP_COND);
1171         tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
1172         /* In the non-trap path, the condition is known false. */
1173         *cond = cond_make_f();
1174         break;
1175     }
1176 }
1177 
1178 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
1179 {
1180     DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
1181     DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
1182 
1183     tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
1184 
1185     /* In the non-trap path, V is known zero. */
1186     *sv = tcg_constant_i64(0);
1187 }
1188 
1189 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1190                    TCGv_i64 in2, unsigned shift, bool is_l,
1191                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1192 {
1193     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1194     unsigned c = cf >> 1;
1195     DisasCond cond;
1196 
1197     dest = tcg_temp_new_i64();
1198     cb = NULL;
1199     cb_msb = NULL;
1200 
1201     in1 = orig_in1;
1202     if (shift) {
1203         tmp = tcg_temp_new_i64();
1204         tcg_gen_shli_i64(tmp, in1, shift);
1205         in1 = tmp;
1206     }
1207 
1208     if (!is_l || cond_need_cb(c)) {
1209         cb_msb = tcg_temp_new_i64();
1210         cb = tcg_temp_new_i64();
1211 
1212         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1213         if (is_c) {
1214             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1215                              get_psw_carry(ctx, d), ctx->zero);
1216         }
1217         tcg_gen_xor_i64(cb, in1, in2);
1218         tcg_gen_xor_i64(cb, cb, dest);
1219     } else {
1220         tcg_gen_add_i64(dest, in1, in2);
1221         if (is_c) {
1222             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1223         }
1224     }
1225 
1226     /* Compute signed overflow if required.  */
1227     sv = NULL;
1228     if (is_tsv || cond_need_sv(c)) {
1229         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1230         if (is_tsv) {
1231             gen_tsv(ctx, &sv, d);
1232         }
1233     }
1234 
1235     /* Compute unsigned overflow if required.  */
1236     uv = NULL;
1237     if (cond_need_cb(c)) {
1238         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1239     }
1240 
1241     /* Emit any conditional trap before any writeback.  */
1242     cond = do_cond(ctx, cf, d, dest, uv, sv);
1243     if (is_tc) {
1244         gen_tc(ctx, &cond);
1245     }
1246 
1247     /* Write back the result.  */
1248     if (!is_l) {
1249         save_or_nullify(ctx, cpu_psw_cb, cb);
1250         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1251     }
1252     save_gpr(ctx, rt, dest);
1253 
1254     /* Install the new nullification.  */
1255     ctx->null_cond = cond;
1256 }
1257 
1258 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1259                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1260 {
1261     TCGv_i64 tcg_r1, tcg_r2;
1262 
1263     if (unlikely(is_tc && a->cf == 1)) {
1264         /* Unconditional trap on condition. */
1265         return gen_excp_iir(ctx, EXCP_COND);
1266     }
1267     if (a->cf) {
1268         nullify_over(ctx);
1269     }
1270     tcg_r1 = load_gpr(ctx, a->r1);
1271     tcg_r2 = load_gpr(ctx, a->r2);
1272     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1273            is_tsv, is_tc, is_c, a->cf, a->d);
1274     return nullify_end(ctx);
1275 }
1276 
1277 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1278                        bool is_tsv, bool is_tc)
1279 {
1280     TCGv_i64 tcg_im, tcg_r2;
1281 
1282     if (unlikely(is_tc && a->cf == 1)) {
1283         /* Unconditional trap on condition. */
1284         return gen_excp_iir(ctx, EXCP_COND);
1285     }
1286     if (a->cf) {
1287         nullify_over(ctx);
1288     }
1289     tcg_im = tcg_constant_i64(a->i);
1290     tcg_r2 = load_gpr(ctx, a->r);
1291     /* All ADDI conditions are 32-bit. */
1292     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1293     return nullify_end(ctx);
1294 }
1295 
1296 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1297                    TCGv_i64 in2, bool is_tsv, bool is_b,
1298                    bool is_tc, unsigned cf, bool d)
1299 {
1300     TCGv_i64 dest, sv, cb, cb_msb;
1301     unsigned c = cf >> 1;
1302     DisasCond cond;
1303 
1304     dest = tcg_temp_new_i64();
1305     cb = tcg_temp_new_i64();
1306     cb_msb = tcg_temp_new_i64();
1307 
1308     if (is_b) {
1309         /* DEST,C = IN1 + ~IN2 + C.  */
1310         tcg_gen_not_i64(cb, in2);
1311         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1312                          get_psw_carry(ctx, d), ctx->zero);
1313         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1314         tcg_gen_xor_i64(cb, cb, in1);
1315         tcg_gen_xor_i64(cb, cb, dest);
1316     } else {
1317         /*
1318          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1319          * operations by seeding the high word with 1 and subtracting.
1320          */
1321         TCGv_i64 one = tcg_constant_i64(1);
1322         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1323         tcg_gen_eqv_i64(cb, in1, in2);
1324         tcg_gen_xor_i64(cb, cb, dest);
1325     }
1326 
1327     /* Compute signed overflow if required.  */
1328     sv = NULL;
1329     if (is_tsv || cond_need_sv(c)) {
1330         sv = do_sub_sv(ctx, dest, in1, in2);
1331         if (is_tsv) {
1332             gen_tsv(ctx, &sv, d);
1333         }
1334     }
1335 
1336     /* Compute the condition.  We cannot use the special case for borrow.  */
1337     if (!is_b) {
1338         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1339     } else {
1340         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1341     }
1342 
1343     /* Emit any conditional trap before any writeback.  */
1344     if (is_tc) {
1345         gen_tc(ctx, &cond);
1346     }
1347 
1348     /* Write back the result.  */
1349     save_or_nullify(ctx, cpu_psw_cb, cb);
1350     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1351     save_gpr(ctx, rt, dest);
1352 
1353     /* Install the new nullification.  */
1354     ctx->null_cond = cond;
1355 }
1356 
1357 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1358                        bool is_tsv, bool is_b, bool is_tc)
1359 {
1360     TCGv_i64 tcg_r1, tcg_r2;
1361 
1362     if (a->cf) {
1363         nullify_over(ctx);
1364     }
1365     tcg_r1 = load_gpr(ctx, a->r1);
1366     tcg_r2 = load_gpr(ctx, a->r2);
1367     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1368     return nullify_end(ctx);
1369 }
1370 
1371 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1372 {
1373     TCGv_i64 tcg_im, tcg_r2;
1374 
1375     if (a->cf) {
1376         nullify_over(ctx);
1377     }
1378     tcg_im = tcg_constant_i64(a->i);
1379     tcg_r2 = load_gpr(ctx, a->r);
1380     /* All SUBI conditions are 32-bit. */
1381     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1382     return nullify_end(ctx);
1383 }
1384 
1385 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1386                       TCGv_i64 in2, unsigned cf, bool d)
1387 {
1388     TCGv_i64 dest, sv;
1389     DisasCond cond;
1390 
1391     dest = tcg_temp_new_i64();
1392     tcg_gen_sub_i64(dest, in1, in2);
1393 
1394     /* Compute signed overflow if required.  */
1395     sv = NULL;
1396     if (cond_need_sv(cf >> 1)) {
1397         sv = do_sub_sv(ctx, dest, in1, in2);
1398     }
1399 
1400     /* Form the condition for the compare.  */
1401     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1402 
1403     /* Clear.  */
1404     tcg_gen_movi_i64(dest, 0);
1405     save_gpr(ctx, rt, dest);
1406 
1407     /* Install the new nullification.  */
1408     ctx->null_cond = cond;
1409 }
1410 
1411 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1412                    TCGv_i64 in2, unsigned cf, bool d,
1413                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1414 {
1415     TCGv_i64 dest = dest_gpr(ctx, rt);
1416 
1417     /* Perform the operation, and writeback.  */
1418     fn(dest, in1, in2);
1419     save_gpr(ctx, rt, dest);
1420 
1421     /* Install the new nullification.  */
1422     ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1423 }
1424 
1425 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1426                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1427 {
1428     TCGv_i64 tcg_r1, tcg_r2;
1429 
1430     if (a->cf) {
1431         nullify_over(ctx);
1432     }
1433     tcg_r1 = load_gpr(ctx, a->r1);
1434     tcg_r2 = load_gpr(ctx, a->r2);
1435     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1436     return nullify_end(ctx);
1437 }
1438 
1439 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1440                            TCGv_i64 in2, unsigned cf, bool d,
1441                            bool is_tc, bool is_add)
1442 {
1443     TCGv_i64 dest = tcg_temp_new_i64();
1444     uint64_t test_cb = 0;
1445     DisasCond cond;
1446 
1447     /* Select which carry-out bits to test. */
1448     switch (cf >> 1) {
1449     case 4: /* NDC / SDC -- 4-bit carries */
1450         test_cb = dup_const(MO_8, 0x88);
1451         break;
1452     case 5: /* NWC / SWC -- 32-bit carries */
1453         if (d) {
1454             test_cb = dup_const(MO_32, INT32_MIN);
1455         } else {
1456             cf &= 1; /* undefined -- map to never/always */
1457         }
1458         break;
1459     case 6: /* NBC / SBC -- 8-bit carries */
1460         test_cb = dup_const(MO_8, INT8_MIN);
1461         break;
1462     case 7: /* NHC / SHC -- 16-bit carries */
1463         test_cb = dup_const(MO_16, INT16_MIN);
1464         break;
1465     }
1466     if (!d) {
1467         test_cb = (uint32_t)test_cb;
1468     }
1469 
1470     if (!test_cb) {
1471         /* No need to compute carries if we don't need to test them. */
1472         if (is_add) {
1473             tcg_gen_add_i64(dest, in1, in2);
1474         } else {
1475             tcg_gen_sub_i64(dest, in1, in2);
1476         }
1477         cond = do_unit_zero_cond(cf, d, dest);
1478     } else {
1479         TCGv_i64 cb = tcg_temp_new_i64();
1480 
1481         if (d) {
1482             TCGv_i64 cb_msb = tcg_temp_new_i64();
1483             if (is_add) {
1484                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1485                 tcg_gen_xor_i64(cb, in1, in2);
1486             } else {
1487                 /* See do_sub, !is_b. */
1488                 TCGv_i64 one = tcg_constant_i64(1);
1489                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1490                 tcg_gen_eqv_i64(cb, in1, in2);
1491             }
1492             tcg_gen_xor_i64(cb, cb, dest);
1493             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1494         } else {
1495             if (is_add) {
1496                 tcg_gen_add_i64(dest, in1, in2);
1497                 tcg_gen_xor_i64(cb, in1, in2);
1498             } else {
1499                 tcg_gen_sub_i64(dest, in1, in2);
1500                 tcg_gen_eqv_i64(cb, in1, in2);
1501             }
1502             tcg_gen_xor_i64(cb, cb, dest);
1503             tcg_gen_shri_i64(cb, cb, 1);
1504         }
1505 
1506         cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
1507                             cb, test_cb);
1508     }
1509 
1510     if (is_tc) {
1511         gen_tc(ctx, &cond);
1512     }
1513     save_gpr(ctx, rt, dest);
1514 
1515     ctx->null_cond = cond;
1516 }
1517 
1518 #ifndef CONFIG_USER_ONLY
1519 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1520    from the top 2 bits of the base register.  There are a few system
1521    instructions that have a 3-bit space specifier, for which SR0 is
1522    not special.  To handle this, pass ~SP.  */
1523 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1524 {
1525     TCGv_ptr ptr;
1526     TCGv_i64 tmp;
1527     TCGv_i64 spc;
1528 
1529     if (sp != 0) {
1530         if (sp < 0) {
1531             sp = ~sp;
1532         }
1533         spc = tcg_temp_new_i64();
1534         load_spr(ctx, spc, sp);
1535         return spc;
1536     }
1537     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1538         return cpu_srH;
1539     }
1540 
1541     ptr = tcg_temp_new_ptr();
1542     tmp = tcg_temp_new_i64();
1543     spc = tcg_temp_new_i64();
1544 
1545     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1546     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1547     tcg_gen_andi_i64(tmp, tmp, 030);
1548     tcg_gen_trunc_i64_ptr(ptr, tmp);
1549 
1550     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1551     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1552 
1553     return spc;
1554 }
1555 #endif
1556 
1557 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1558                      unsigned rb, unsigned rx, int scale, int64_t disp,
1559                      unsigned sp, int modify, bool is_phys)
1560 {
1561     TCGv_i64 base = load_gpr(ctx, rb);
1562     TCGv_i64 ofs;
1563     TCGv_i64 addr;
1564 
1565     set_insn_breg(ctx, rb);
1566 
1567     /* Note that RX is mutually exclusive with DISP.  */
1568     if (rx) {
1569         ofs = tcg_temp_new_i64();
1570         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1571         tcg_gen_add_i64(ofs, ofs, base);
1572     } else if (disp || modify) {
1573         ofs = tcg_temp_new_i64();
1574         tcg_gen_addi_i64(ofs, base, disp);
1575     } else {
1576         ofs = base;
1577     }
1578 
1579     *pofs = ofs;
1580     *pgva = addr = tcg_temp_new_i64();
1581     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1582                      ctx->gva_offset_mask);
1583 #ifndef CONFIG_USER_ONLY
1584     if (!is_phys) {
1585         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1586     }
1587 #endif
1588 }
1589 
1590 /* Emit a memory load.  The modify parameter should be
1591  * < 0 for pre-modify,
1592  * > 0 for post-modify,
1593  * = 0 for no base register update.
1594  */
1595 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1596                        unsigned rx, int scale, int64_t disp,
1597                        unsigned sp, int modify, MemOp mop)
1598 {
1599     TCGv_i64 ofs;
1600     TCGv_i64 addr;
1601 
1602     /* Caller uses nullify_over/nullify_end.  */
1603     assert(ctx->null_cond.c == TCG_COND_NEVER);
1604 
1605     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1606              MMU_DISABLED(ctx));
1607     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1608     if (modify) {
1609         save_gpr(ctx, rb, ofs);
1610     }
1611 }
1612 
1613 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1614                        unsigned rx, int scale, int64_t disp,
1615                        unsigned sp, int modify, MemOp mop)
1616 {
1617     TCGv_i64 ofs;
1618     TCGv_i64 addr;
1619 
1620     /* Caller uses nullify_over/nullify_end.  */
1621     assert(ctx->null_cond.c == TCG_COND_NEVER);
1622 
1623     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1624              MMU_DISABLED(ctx));
1625     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1626     if (modify) {
1627         save_gpr(ctx, rb, ofs);
1628     }
1629 }
1630 
1631 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1632                         unsigned rx, int scale, int64_t disp,
1633                         unsigned sp, int modify, MemOp mop)
1634 {
1635     TCGv_i64 ofs;
1636     TCGv_i64 addr;
1637 
1638     /* Caller uses nullify_over/nullify_end.  */
1639     assert(ctx->null_cond.c == TCG_COND_NEVER);
1640 
1641     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1642              MMU_DISABLED(ctx));
1643     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1644     if (modify) {
1645         save_gpr(ctx, rb, ofs);
1646     }
1647 }
1648 
1649 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1650                         unsigned rx, int scale, int64_t disp,
1651                         unsigned sp, int modify, MemOp mop)
1652 {
1653     TCGv_i64 ofs;
1654     TCGv_i64 addr;
1655 
1656     /* Caller uses nullify_over/nullify_end.  */
1657     assert(ctx->null_cond.c == TCG_COND_NEVER);
1658 
1659     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1660              MMU_DISABLED(ctx));
1661     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1662     if (modify) {
1663         save_gpr(ctx, rb, ofs);
1664     }
1665 }
1666 
1667 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1668                     unsigned rx, int scale, int64_t disp,
1669                     unsigned sp, int modify, MemOp mop)
1670 {
1671     TCGv_i64 dest;
1672 
1673     nullify_over(ctx);
1674 
1675     if (modify == 0) {
1676         /* No base register update.  */
1677         dest = dest_gpr(ctx, rt);
1678     } else {
1679         /* Make sure if RT == RB, we see the result of the load.  */
1680         dest = tcg_temp_new_i64();
1681     }
1682     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1683     save_gpr(ctx, rt, dest);
1684 
1685     return nullify_end(ctx);
1686 }
1687 
1688 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1689                       unsigned rx, int scale, int64_t disp,
1690                       unsigned sp, int modify)
1691 {
1692     TCGv_i32 tmp;
1693 
1694     nullify_over(ctx);
1695 
1696     tmp = tcg_temp_new_i32();
1697     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1698     save_frw_i32(rt, tmp);
1699 
1700     if (rt == 0) {
1701         gen_helper_loaded_fr0(tcg_env);
1702     }
1703 
1704     return nullify_end(ctx);
1705 }
1706 
1707 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1708 {
1709     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1710                      a->disp, a->sp, a->m);
1711 }
1712 
1713 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1714                       unsigned rx, int scale, int64_t disp,
1715                       unsigned sp, int modify)
1716 {
1717     TCGv_i64 tmp;
1718 
1719     nullify_over(ctx);
1720 
1721     tmp = tcg_temp_new_i64();
1722     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1723     save_frd(rt, tmp);
1724 
1725     if (rt == 0) {
1726         gen_helper_loaded_fr0(tcg_env);
1727     }
1728 
1729     return nullify_end(ctx);
1730 }
1731 
1732 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1733 {
1734     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1735                      a->disp, a->sp, a->m);
1736 }
1737 
1738 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1739                      int64_t disp, unsigned sp,
1740                      int modify, MemOp mop)
1741 {
1742     nullify_over(ctx);
1743     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1744     return nullify_end(ctx);
1745 }
1746 
1747 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1748                        unsigned rx, int scale, int64_t disp,
1749                        unsigned sp, int modify)
1750 {
1751     TCGv_i32 tmp;
1752 
1753     nullify_over(ctx);
1754 
1755     tmp = load_frw_i32(rt);
1756     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1757 
1758     return nullify_end(ctx);
1759 }
1760 
1761 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1762 {
1763     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1764                       a->disp, a->sp, a->m);
1765 }
1766 
1767 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1768                        unsigned rx, int scale, int64_t disp,
1769                        unsigned sp, int modify)
1770 {
1771     TCGv_i64 tmp;
1772 
1773     nullify_over(ctx);
1774 
1775     tmp = load_frd(rt);
1776     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1777 
1778     return nullify_end(ctx);
1779 }
1780 
1781 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1782 {
1783     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1784                       a->disp, a->sp, a->m);
1785 }
1786 
1787 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1788                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1789 {
1790     TCGv_i32 tmp;
1791 
1792     nullify_over(ctx);
1793     tmp = load_frw0_i32(ra);
1794 
1795     func(tmp, tcg_env, tmp);
1796 
1797     save_frw_i32(rt, tmp);
1798     return nullify_end(ctx);
1799 }
1800 
1801 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1802                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1803 {
1804     TCGv_i32 dst;
1805     TCGv_i64 src;
1806 
1807     nullify_over(ctx);
1808     src = load_frd(ra);
1809     dst = tcg_temp_new_i32();
1810 
1811     func(dst, tcg_env, src);
1812 
1813     save_frw_i32(rt, dst);
1814     return nullify_end(ctx);
1815 }
1816 
1817 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1818                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1819 {
1820     TCGv_i64 tmp;
1821 
1822     nullify_over(ctx);
1823     tmp = load_frd0(ra);
1824 
1825     func(tmp, tcg_env, tmp);
1826 
1827     save_frd(rt, tmp);
1828     return nullify_end(ctx);
1829 }
1830 
1831 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1832                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1833 {
1834     TCGv_i32 src;
1835     TCGv_i64 dst;
1836 
1837     nullify_over(ctx);
1838     src = load_frw0_i32(ra);
1839     dst = tcg_temp_new_i64();
1840 
1841     func(dst, tcg_env, src);
1842 
1843     save_frd(rt, dst);
1844     return nullify_end(ctx);
1845 }
1846 
1847 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1848                         unsigned ra, unsigned rb,
1849                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1850 {
1851     TCGv_i32 a, b;
1852 
1853     nullify_over(ctx);
1854     a = load_frw0_i32(ra);
1855     b = load_frw0_i32(rb);
1856 
1857     func(a, tcg_env, a, b);
1858 
1859     save_frw_i32(rt, a);
1860     return nullify_end(ctx);
1861 }
1862 
1863 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1864                         unsigned ra, unsigned rb,
1865                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1866 {
1867     TCGv_i64 a, b;
1868 
1869     nullify_over(ctx);
1870     a = load_frd0(ra);
1871     b = load_frd0(rb);
1872 
1873     func(a, tcg_env, a, b);
1874 
1875     save_frd(rt, a);
1876     return nullify_end(ctx);
1877 }
1878 
1879 /* Emit an unconditional branch to a direct target, which may or may not
1880    have already had nullification handled.  */
1881 static bool do_dbranch(DisasContext *ctx, int64_t disp,
1882                        unsigned link, bool is_n)
1883 {
1884     ctx->iaq_j = iaqe_branchi(ctx, disp);
1885 
1886     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1887         install_link(ctx, link, false);
1888         if (is_n) {
1889             if (use_nullify_skip(ctx)) {
1890                 nullify_set(ctx, 0);
1891                 store_psw_xb(ctx, 0);
1892                 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1893                 ctx->base.is_jmp = DISAS_NORETURN;
1894                 return true;
1895             }
1896             ctx->null_cond.c = TCG_COND_ALWAYS;
1897         }
1898         ctx->iaq_n = &ctx->iaq_j;
1899         ctx->psw_b_next = true;
1900     } else {
1901         nullify_over(ctx);
1902 
1903         install_link(ctx, link, false);
1904         if (is_n && use_nullify_skip(ctx)) {
1905             nullify_set(ctx, 0);
1906             store_psw_xb(ctx, 0);
1907             gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1908         } else {
1909             nullify_set(ctx, is_n);
1910             store_psw_xb(ctx, PSW_B);
1911             gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
1912         }
1913         nullify_end(ctx);
1914 
1915         nullify_set(ctx, 0);
1916         store_psw_xb(ctx, 0);
1917         gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
1918         ctx->base.is_jmp = DISAS_NORETURN;
1919     }
1920     return true;
1921 }
1922 
1923 /* Emit a conditional branch to a direct target.  If the branch itself
1924    is nullified, we should have already used nullify_over.  */
1925 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1926                        DisasCond *cond)
1927 {
1928     DisasIAQE next;
1929     TCGLabel *taken = NULL;
1930     TCGCond c = cond->c;
1931     bool n;
1932 
1933     assert(ctx->null_cond.c == TCG_COND_NEVER);
1934 
1935     /* Handle TRUE and NEVER as direct branches.  */
1936     if (c == TCG_COND_ALWAYS) {
1937         return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
1938     }
1939 
1940     taken = gen_new_label();
1941     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1942 
1943     /* Not taken: Condition not satisfied; nullify on backward branches. */
1944     n = is_n && disp < 0;
1945     if (n && use_nullify_skip(ctx)) {
1946         nullify_set(ctx, 0);
1947         store_psw_xb(ctx, 0);
1948         next = iaqe_incr(&ctx->iaq_b, 4);
1949         gen_goto_tb(ctx, 0, &next, NULL);
1950     } else {
1951         if (!n && ctx->null_lab) {
1952             gen_set_label(ctx->null_lab);
1953             ctx->null_lab = NULL;
1954         }
1955         nullify_set(ctx, n);
1956         store_psw_xb(ctx, 0);
1957         gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
1958     }
1959 
1960     gen_set_label(taken);
1961 
1962     /* Taken: Condition satisfied; nullify on forward branches.  */
1963     n = is_n && disp >= 0;
1964 
1965     next = iaqe_branchi(ctx, disp);
1966     if (n && use_nullify_skip(ctx)) {
1967         nullify_set(ctx, 0);
1968         store_psw_xb(ctx, 0);
1969         gen_goto_tb(ctx, 1, &next, NULL);
1970     } else {
1971         nullify_set(ctx, n);
1972         store_psw_xb(ctx, PSW_B);
1973         gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
1974     }
1975 
1976     /* Not taken: the branch itself was nullified.  */
1977     if (ctx->null_lab) {
1978         gen_set_label(ctx->null_lab);
1979         ctx->null_lab = NULL;
1980         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1981     } else {
1982         ctx->base.is_jmp = DISAS_NORETURN;
1983     }
1984     return true;
1985 }
1986 
1987 /*
1988  * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
1989  * This handles nullification of the branch itself.
1990  */
1991 static bool do_ibranch(DisasContext *ctx, unsigned link,
1992                        bool with_sr0, bool is_n)
1993 {
1994     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1995         install_link(ctx, link, with_sr0);
1996         if (is_n) {
1997             if (use_nullify_skip(ctx)) {
1998                 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
1999                 nullify_set(ctx, 0);
2000                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2001                 return true;
2002             }
2003             ctx->null_cond.c = TCG_COND_ALWAYS;
2004         }
2005         ctx->iaq_n = &ctx->iaq_j;
2006         ctx->psw_b_next = true;
2007         return true;
2008     }
2009 
2010     nullify_over(ctx);
2011 
2012     install_link(ctx, link, with_sr0);
2013     if (is_n && use_nullify_skip(ctx)) {
2014         install_iaq_entries(ctx, &ctx->iaq_j, NULL);
2015         nullify_set(ctx, 0);
2016         store_psw_xb(ctx, 0);
2017     } else {
2018         install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
2019         nullify_set(ctx, is_n);
2020         store_psw_xb(ctx, PSW_B);
2021     }
2022 
2023     tcg_gen_lookup_and_goto_ptr();
2024     ctx->base.is_jmp = DISAS_NORETURN;
2025     return nullify_end(ctx);
2026 }
2027 
2028 /* Implement
2029  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
2030  *      IAOQ_Next{30..31} ← GR[b]{30..31};
2031  *    else
2032  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2033  * which keeps the privilege level from being increased.
2034  */
2035 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
2036 {
2037     TCGv_i64 dest = tcg_temp_new_i64();
2038     switch (ctx->privilege) {
2039     case 0:
2040         /* Privilege 0 is maximum and is allowed to decrease.  */
2041         tcg_gen_mov_i64(dest, offset);
2042         break;
2043     case 3:
2044         /* Privilege 3 is minimum and is never allowed to increase.  */
2045         tcg_gen_ori_i64(dest, offset, 3);
2046         break;
2047     default:
2048         tcg_gen_andi_i64(dest, offset, -4);
2049         tcg_gen_ori_i64(dest, dest, ctx->privilege);
2050         tcg_gen_umax_i64(dest, dest, offset);
2051         break;
2052     }
2053     return dest;
2054 }
2055 
2056 #ifdef CONFIG_USER_ONLY
2057 /* On Linux, page zero is normally marked execute only + gateway.
2058    Therefore normal read or write is supposed to fail, but specific
2059    offsets have kernel code mapped to raise permissions to implement
2060    system calls.  Handling this via an explicit check here, rather
2061    in than the "be disp(sr2,r0)" instruction that probably sent us
2062    here, is the easiest way to handle the branch delay slot on the
2063    aforementioned BE.  */
2064 static void do_page_zero(DisasContext *ctx)
2065 {
2066     assert(ctx->iaq_f.disp == 0);
2067 
2068     /* If by some means we get here with PSW[N]=1, that implies that
2069        the B,GATE instruction would be skipped, and we'd fault on the
2070        next insn within the privileged page.  */
2071     switch (ctx->null_cond.c) {
2072     case TCG_COND_NEVER:
2073         break;
2074     case TCG_COND_ALWAYS:
2075         tcg_gen_movi_i64(cpu_psw_n, 0);
2076         goto do_sigill;
2077     default:
2078         /* Since this is always the first (and only) insn within the
2079            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2080         g_assert_not_reached();
2081     }
2082 
2083     /* If PSW[B] is set, the B,GATE insn would trap. */
2084     if (ctx->psw_xb & PSW_B) {
2085         goto do_sigill;
2086     }
2087 
2088     switch (ctx->base.pc_first) {
2089     case 0x00: /* Null pointer call */
2090         gen_excp_1(EXCP_IMP);
2091         ctx->base.is_jmp = DISAS_NORETURN;
2092         break;
2093 
2094     case 0xb0: /* LWS */
2095         gen_excp_1(EXCP_SYSCALL_LWS);
2096         ctx->base.is_jmp = DISAS_NORETURN;
2097         break;
2098 
2099     case 0xe0: /* SET_THREAD_POINTER */
2100         {
2101             DisasIAQE next = { .base = tcg_temp_new_i64() };
2102 
2103             tcg_gen_st_i64(cpu_gr[26], tcg_env,
2104                            offsetof(CPUHPPAState, cr[27]));
2105             tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
2106             install_iaq_entries(ctx, &next, NULL);
2107             ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2108         }
2109         break;
2110 
2111     case 0x100: /* SYSCALL */
2112         gen_excp_1(EXCP_SYSCALL);
2113         ctx->base.is_jmp = DISAS_NORETURN;
2114         break;
2115 
2116     default:
2117     do_sigill:
2118         gen_excp_1(EXCP_ILL);
2119         ctx->base.is_jmp = DISAS_NORETURN;
2120         break;
2121     }
2122 }
2123 #endif
2124 
2125 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2126 {
2127     ctx->null_cond = cond_make_f();
2128     return true;
2129 }
2130 
2131 static bool trans_break(DisasContext *ctx, arg_break *a)
2132 {
2133     return gen_excp_iir(ctx, EXCP_BREAK);
2134 }
2135 
2136 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2137 {
2138     /* No point in nullifying the memory barrier.  */
2139     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2140 
2141     ctx->null_cond = cond_make_f();
2142     return true;
2143 }
2144 
2145 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2146 {
2147     TCGv_i64 dest = dest_gpr(ctx, a->t);
2148 
2149     copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
2150     tcg_gen_andi_i64(dest, dest, -4);
2151 
2152     save_gpr(ctx, a->t, dest);
2153     ctx->null_cond = cond_make_f();
2154     return true;
2155 }
2156 
2157 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2158 {
2159     unsigned rt = a->t;
2160     unsigned rs = a->sp;
2161     TCGv_i64 t0 = tcg_temp_new_i64();
2162 
2163     load_spr(ctx, t0, rs);
2164     tcg_gen_shri_i64(t0, t0, 32);
2165 
2166     save_gpr(ctx, rt, t0);
2167 
2168     ctx->null_cond = cond_make_f();
2169     return true;
2170 }
2171 
2172 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2173 {
2174     unsigned rt = a->t;
2175     unsigned ctl = a->r;
2176     TCGv_i64 tmp;
2177 
2178     switch (ctl) {
2179     case CR_SAR:
2180         if (a->e == 0) {
2181             /* MFSAR without ,W masks low 5 bits.  */
2182             tmp = dest_gpr(ctx, rt);
2183             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2184             save_gpr(ctx, rt, tmp);
2185             goto done;
2186         }
2187         save_gpr(ctx, rt, cpu_sar);
2188         goto done;
2189     case CR_IT: /* Interval Timer */
2190         /* FIXME: Respect PSW_S bit.  */
2191         nullify_over(ctx);
2192         tmp = dest_gpr(ctx, rt);
2193         if (translator_io_start(&ctx->base)) {
2194             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2195         }
2196         gen_helper_read_interval_timer(tmp);
2197         save_gpr(ctx, rt, tmp);
2198         return nullify_end(ctx);
2199     case 26:
2200     case 27:
2201         break;
2202     default:
2203         /* All other control registers are privileged.  */
2204         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2205         break;
2206     }
2207 
2208     tmp = tcg_temp_new_i64();
2209     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2210     save_gpr(ctx, rt, tmp);
2211 
2212  done:
2213     ctx->null_cond = cond_make_f();
2214     return true;
2215 }
2216 
2217 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2218 {
2219     unsigned rr = a->r;
2220     unsigned rs = a->sp;
2221     TCGv_i64 tmp;
2222 
2223     if (rs >= 5) {
2224         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2225     }
2226     nullify_over(ctx);
2227 
2228     tmp = tcg_temp_new_i64();
2229     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2230 
2231     if (rs >= 4) {
2232         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2233         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2234     } else {
2235         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2236     }
2237 
2238     return nullify_end(ctx);
2239 }
2240 
2241 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2242 {
2243     unsigned ctl = a->t;
2244     TCGv_i64 reg;
2245     TCGv_i64 tmp;
2246 
2247     if (ctl == CR_SAR) {
2248         reg = load_gpr(ctx, a->r);
2249         tmp = tcg_temp_new_i64();
2250         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2251         save_or_nullify(ctx, cpu_sar, tmp);
2252 
2253         ctx->null_cond = cond_make_f();
2254         return true;
2255     }
2256 
2257     /* All other control registers are privileged or read-only.  */
2258     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2259 
2260 #ifndef CONFIG_USER_ONLY
2261     nullify_over(ctx);
2262 
2263     if (ctx->is_pa20) {
2264         reg = load_gpr(ctx, a->r);
2265     } else {
2266         reg = tcg_temp_new_i64();
2267         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2268     }
2269 
2270     switch (ctl) {
2271     case CR_IT:
2272         if (translator_io_start(&ctx->base)) {
2273             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2274         }
2275         gen_helper_write_interval_timer(tcg_env, reg);
2276         break;
2277     case CR_EIRR:
2278         /* Helper modifies interrupt lines and is therefore IO. */
2279         translator_io_start(&ctx->base);
2280         gen_helper_write_eirr(tcg_env, reg);
2281         /* Exit to re-evaluate interrupts in the main loop. */
2282         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2283         break;
2284 
2285     case CR_IIASQ:
2286     case CR_IIAOQ:
2287         /* FIXME: Respect PSW_Q bit */
2288         /* The write advances the queue and stores to the back element.  */
2289         tmp = tcg_temp_new_i64();
2290         tcg_gen_ld_i64(tmp, tcg_env,
2291                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2292         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2293         tcg_gen_st_i64(reg, tcg_env,
2294                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2295         break;
2296 
2297     case CR_PID1:
2298     case CR_PID2:
2299     case CR_PID3:
2300     case CR_PID4:
2301         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2302 #ifndef CONFIG_USER_ONLY
2303         gen_helper_change_prot_id(tcg_env);
2304 #endif
2305         break;
2306 
2307     case CR_EIEM:
2308         /* Exit to re-evaluate interrupts in the main loop. */
2309         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2310         /* FALLTHRU */
2311     default:
2312         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2313         break;
2314     }
2315     return nullify_end(ctx);
2316 #endif
2317 }
2318 
2319 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2320 {
2321     TCGv_i64 tmp = tcg_temp_new_i64();
2322 
2323     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2324     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2325     save_or_nullify(ctx, cpu_sar, tmp);
2326 
2327     ctx->null_cond = cond_make_f();
2328     return true;
2329 }
2330 
2331 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2332 {
2333     TCGv_i64 dest = dest_gpr(ctx, a->t);
2334 
2335 #ifdef CONFIG_USER_ONLY
2336     /* We don't implement space registers in user mode. */
2337     tcg_gen_movi_i64(dest, 0);
2338 #else
2339     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2340     tcg_gen_shri_i64(dest, dest, 32);
2341 #endif
2342     save_gpr(ctx, a->t, dest);
2343 
2344     ctx->null_cond = cond_make_f();
2345     return true;
2346 }
2347 
2348 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2349 {
2350 #ifdef CONFIG_USER_ONLY
2351     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2352 #else
2353     TCGv_i64 tmp;
2354 
2355     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2356     if (a->i) {
2357         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2358     }
2359 
2360     nullify_over(ctx);
2361 
2362     tmp = tcg_temp_new_i64();
2363     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2364     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2365     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2366     save_gpr(ctx, a->t, tmp);
2367 
2368     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2369     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2370     return nullify_end(ctx);
2371 #endif
2372 }
2373 
2374 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2375 {
2376     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2377 #ifndef CONFIG_USER_ONLY
2378     TCGv_i64 tmp;
2379 
2380     nullify_over(ctx);
2381 
2382     tmp = tcg_temp_new_i64();
2383     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2384     tcg_gen_ori_i64(tmp, tmp, a->i);
2385     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2386     save_gpr(ctx, a->t, tmp);
2387 
2388     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2389     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2390     return nullify_end(ctx);
2391 #endif
2392 }
2393 
2394 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2395 {
2396     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2397 #ifndef CONFIG_USER_ONLY
2398     TCGv_i64 tmp, reg;
2399     nullify_over(ctx);
2400 
2401     reg = load_gpr(ctx, a->r);
2402     tmp = tcg_temp_new_i64();
2403     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2404 
2405     /* Exit the TB to recognize new interrupts.  */
2406     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2407     return nullify_end(ctx);
2408 #endif
2409 }
2410 
2411 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2412 {
2413     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2414 #ifndef CONFIG_USER_ONLY
2415     nullify_over(ctx);
2416 
2417     if (rfi_r) {
2418         gen_helper_rfi_r(tcg_env);
2419     } else {
2420         gen_helper_rfi(tcg_env);
2421     }
2422     /* Exit the TB to recognize new interrupts.  */
2423     tcg_gen_exit_tb(NULL, 0);
2424     ctx->base.is_jmp = DISAS_NORETURN;
2425 
2426     return nullify_end(ctx);
2427 #endif
2428 }
2429 
2430 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2431 {
2432     return do_rfi(ctx, false);
2433 }
2434 
2435 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2436 {
2437     return do_rfi(ctx, true);
2438 }
2439 
2440 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2441 {
2442     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2443 #ifndef CONFIG_USER_ONLY
2444     set_psw_xb(ctx, 0);
2445     nullify_over(ctx);
2446     gen_helper_halt(tcg_env);
2447     ctx->base.is_jmp = DISAS_NORETURN;
2448     return nullify_end(ctx);
2449 #endif
2450 }
2451 
2452 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2453 {
2454     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2455 #ifndef CONFIG_USER_ONLY
2456     set_psw_xb(ctx, 0);
2457     nullify_over(ctx);
2458     gen_helper_reset(tcg_env);
2459     ctx->base.is_jmp = DISAS_NORETURN;
2460     return nullify_end(ctx);
2461 #endif
2462 }
2463 
2464 static bool do_getshadowregs(DisasContext *ctx)
2465 {
2466     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467     nullify_over(ctx);
2468     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2469     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2470     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2471     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2472     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2473     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2474     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2475     return nullify_end(ctx);
2476 }
2477 
2478 static bool do_putshadowregs(DisasContext *ctx)
2479 {
2480     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2481     nullify_over(ctx);
2482     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2483     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2484     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2485     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2486     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2487     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2488     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2489     return nullify_end(ctx);
2490 }
2491 
2492 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2493 {
2494     return do_getshadowregs(ctx);
2495 }
2496 
2497 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2498 {
2499     if (a->m) {
2500         TCGv_i64 dest = dest_gpr(ctx, a->b);
2501         TCGv_i64 src1 = load_gpr(ctx, a->b);
2502         TCGv_i64 src2 = load_gpr(ctx, a->x);
2503 
2504         /* The only thing we need to do is the base register modification.  */
2505         tcg_gen_add_i64(dest, src1, src2);
2506         save_gpr(ctx, a->b, dest);
2507     }
2508     ctx->null_cond = cond_make_f();
2509     return true;
2510 }
2511 
2512 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2513 {
2514     /* End TB for flush instruction cache, so we pick up new insns. */
2515     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2516     return trans_nop_addrx(ctx, a);
2517 }
2518 
2519 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2520 {
2521     TCGv_i64 dest, ofs;
2522     TCGv_i32 level, want;
2523     TCGv_i64 addr;
2524 
2525     nullify_over(ctx);
2526 
2527     dest = dest_gpr(ctx, a->t);
2528     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2529 
2530     if (a->imm) {
2531         level = tcg_constant_i32(a->ri & 3);
2532     } else {
2533         level = tcg_temp_new_i32();
2534         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2535         tcg_gen_andi_i32(level, level, 3);
2536     }
2537     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2538 
2539     gen_helper_probe(dest, tcg_env, addr, level, want);
2540 
2541     save_gpr(ctx, a->t, dest);
2542     return nullify_end(ctx);
2543 }
2544 
2545 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2546 {
2547     if (ctx->is_pa20) {
2548         return false;
2549     }
2550     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2551 #ifndef CONFIG_USER_ONLY
2552     TCGv_i64 addr;
2553     TCGv_i64 ofs, reg;
2554 
2555     nullify_over(ctx);
2556 
2557     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2558     reg = load_gpr(ctx, a->r);
2559     if (a->addr) {
2560         gen_helper_itlba_pa11(tcg_env, addr, reg);
2561     } else {
2562         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2563     }
2564 
2565     /* Exit TB for TLB change if mmu is enabled.  */
2566     if (ctx->tb_flags & PSW_C) {
2567         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2568     }
2569     return nullify_end(ctx);
2570 #endif
2571 }
2572 
2573 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2574 {
2575     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2576 #ifndef CONFIG_USER_ONLY
2577     TCGv_i64 addr;
2578     TCGv_i64 ofs;
2579 
2580     nullify_over(ctx);
2581 
2582     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2583 
2584     /*
2585      * Page align now, rather than later, so that we can add in the
2586      * page_size field from pa2.0 from the low 4 bits of GR[b].
2587      */
2588     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2589     if (ctx->is_pa20) {
2590         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2591     }
2592 
2593     if (local) {
2594         gen_helper_ptlb_l(tcg_env, addr);
2595     } else {
2596         gen_helper_ptlb(tcg_env, addr);
2597     }
2598 
2599     if (a->m) {
2600         save_gpr(ctx, a->b, ofs);
2601     }
2602 
2603     /* Exit TB for TLB change if mmu is enabled.  */
2604     if (ctx->tb_flags & PSW_C) {
2605         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2606     }
2607     return nullify_end(ctx);
2608 #endif
2609 }
2610 
2611 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2612 {
2613     return do_pxtlb(ctx, a, false);
2614 }
2615 
2616 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2617 {
2618     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2619 }
2620 
2621 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2622 {
2623     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2624 #ifndef CONFIG_USER_ONLY
2625     nullify_over(ctx);
2626 
2627     trans_nop_addrx(ctx, a);
2628     gen_helper_ptlbe(tcg_env);
2629 
2630     /* Exit TB for TLB change if mmu is enabled.  */
2631     if (ctx->tb_flags & PSW_C) {
2632         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2633     }
2634     return nullify_end(ctx);
2635 #endif
2636 }
2637 
2638 /*
2639  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2640  * See
2641  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2642  *     page 13-9 (195/206)
2643  */
2644 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2645 {
2646     if (ctx->is_pa20) {
2647         return false;
2648     }
2649     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2650 #ifndef CONFIG_USER_ONLY
2651     TCGv_i64 addr, atl, stl;
2652     TCGv_i64 reg;
2653 
2654     nullify_over(ctx);
2655 
2656     /*
2657      * FIXME:
2658      *  if (not (pcxl or pcxl2))
2659      *    return gen_illegal(ctx);
2660      */
2661 
2662     atl = tcg_temp_new_i64();
2663     stl = tcg_temp_new_i64();
2664     addr = tcg_temp_new_i64();
2665 
2666     tcg_gen_ld32u_i64(stl, tcg_env,
2667                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2668                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2669     tcg_gen_ld32u_i64(atl, tcg_env,
2670                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2671                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2672     tcg_gen_shli_i64(stl, stl, 32);
2673     tcg_gen_or_i64(addr, atl, stl);
2674 
2675     reg = load_gpr(ctx, a->r);
2676     if (a->addr) {
2677         gen_helper_itlba_pa11(tcg_env, addr, reg);
2678     } else {
2679         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2680     }
2681 
2682     /* Exit TB for TLB change if mmu is enabled.  */
2683     if (ctx->tb_flags & PSW_C) {
2684         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2685     }
2686     return nullify_end(ctx);
2687 #endif
2688 }
2689 
2690 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2691 {
2692     if (!ctx->is_pa20) {
2693         return false;
2694     }
2695     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2696 #ifndef CONFIG_USER_ONLY
2697     nullify_over(ctx);
2698     {
2699         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2700         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2701 
2702         if (a->data) {
2703             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2704         } else {
2705             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2706         }
2707     }
2708     /* Exit TB for TLB change if mmu is enabled.  */
2709     if (ctx->tb_flags & PSW_C) {
2710         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2711     }
2712     return nullify_end(ctx);
2713 #endif
2714 }
2715 
2716 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2717 {
2718     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2719 #ifndef CONFIG_USER_ONLY
2720     TCGv_i64 vaddr;
2721     TCGv_i64 ofs, paddr;
2722 
2723     nullify_over(ctx);
2724 
2725     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2726 
2727     paddr = tcg_temp_new_i64();
2728     gen_helper_lpa(paddr, tcg_env, vaddr);
2729 
2730     /* Note that physical address result overrides base modification.  */
2731     if (a->m) {
2732         save_gpr(ctx, a->b, ofs);
2733     }
2734     save_gpr(ctx, a->t, paddr);
2735 
2736     return nullify_end(ctx);
2737 #endif
2738 }
2739 
2740 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2741 {
2742     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2743 
2744     /* The Coherence Index is an implementation-defined function of the
2745        physical address.  Two addresses with the same CI have a coherent
2746        view of the cache.  Our implementation is to return 0 for all,
2747        since the entire address space is coherent.  */
2748     save_gpr(ctx, a->t, ctx->zero);
2749 
2750     ctx->null_cond = cond_make_f();
2751     return true;
2752 }
2753 
2754 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2755 {
2756     return do_add_reg(ctx, a, false, false, false, false);
2757 }
2758 
2759 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2760 {
2761     return do_add_reg(ctx, a, true, false, false, false);
2762 }
2763 
2764 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2765 {
2766     return do_add_reg(ctx, a, false, true, false, false);
2767 }
2768 
2769 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2770 {
2771     return do_add_reg(ctx, a, false, false, false, true);
2772 }
2773 
2774 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2775 {
2776     return do_add_reg(ctx, a, false, true, false, true);
2777 }
2778 
2779 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2780 {
2781     return do_sub_reg(ctx, a, false, false, false);
2782 }
2783 
2784 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2785 {
2786     return do_sub_reg(ctx, a, true, false, false);
2787 }
2788 
2789 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2790 {
2791     return do_sub_reg(ctx, a, false, false, true);
2792 }
2793 
2794 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2795 {
2796     return do_sub_reg(ctx, a, true, false, true);
2797 }
2798 
2799 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2800 {
2801     return do_sub_reg(ctx, a, false, true, false);
2802 }
2803 
2804 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2805 {
2806     return do_sub_reg(ctx, a, true, true, false);
2807 }
2808 
2809 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2810 {
2811     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2812 }
2813 
2814 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2815 {
2816     return do_log_reg(ctx, a, tcg_gen_and_i64);
2817 }
2818 
2819 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2820 {
2821     if (a->cf == 0) {
2822         unsigned r2 = a->r2;
2823         unsigned r1 = a->r1;
2824         unsigned rt = a->t;
2825 
2826         if (rt == 0) { /* NOP */
2827             ctx->null_cond = cond_make_f();
2828             return true;
2829         }
2830         if (r2 == 0) { /* COPY */
2831             if (r1 == 0) {
2832                 TCGv_i64 dest = dest_gpr(ctx, rt);
2833                 tcg_gen_movi_i64(dest, 0);
2834                 save_gpr(ctx, rt, dest);
2835             } else {
2836                 save_gpr(ctx, rt, cpu_gr[r1]);
2837             }
2838             ctx->null_cond = cond_make_f();
2839             return true;
2840         }
2841 #ifndef CONFIG_USER_ONLY
2842         /* These are QEMU extensions and are nops in the real architecture:
2843          *
2844          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2845          * or %r31,%r31,%r31 -- death loop; offline cpu
2846          *                      currently implemented as idle.
2847          */
2848         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2849             /* No need to check for supervisor, as userland can only pause
2850                until the next timer interrupt.  */
2851 
2852             set_psw_xb(ctx, 0);
2853 
2854             nullify_over(ctx);
2855 
2856             /* Advance the instruction queue.  */
2857             install_iaq_entries(ctx, &ctx->iaq_b, NULL);
2858             nullify_set(ctx, 0);
2859 
2860             /* Tell the qemu main loop to halt until this cpu has work.  */
2861             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2862                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2863             gen_excp_1(EXCP_HALTED);
2864             ctx->base.is_jmp = DISAS_NORETURN;
2865 
2866             return nullify_end(ctx);
2867         }
2868 #endif
2869     }
2870     return do_log_reg(ctx, a, tcg_gen_or_i64);
2871 }
2872 
2873 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2874 {
2875     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2876 }
2877 
2878 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2879 {
2880     TCGv_i64 tcg_r1, tcg_r2;
2881 
2882     if (a->cf) {
2883         nullify_over(ctx);
2884     }
2885     tcg_r1 = load_gpr(ctx, a->r1);
2886     tcg_r2 = load_gpr(ctx, a->r2);
2887     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2888     return nullify_end(ctx);
2889 }
2890 
2891 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2892 {
2893     TCGv_i64 tcg_r1, tcg_r2, dest;
2894 
2895     if (a->cf) {
2896         nullify_over(ctx);
2897     }
2898 
2899     tcg_r1 = load_gpr(ctx, a->r1);
2900     tcg_r2 = load_gpr(ctx, a->r2);
2901     dest = dest_gpr(ctx, a->t);
2902 
2903     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2904     save_gpr(ctx, a->t, dest);
2905 
2906     ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2907     return nullify_end(ctx);
2908 }
2909 
2910 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2911 {
2912     TCGv_i64 tcg_r1, tcg_r2, tmp;
2913 
2914     if (a->cf == 0) {
2915         tcg_r2 = load_gpr(ctx, a->r2);
2916         tmp = dest_gpr(ctx, a->t);
2917 
2918         if (a->r1 == 0) {
2919             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2920             tcg_gen_not_i64(tmp, tcg_r2);
2921         } else {
2922             /*
2923              * Recall that r1 - r2 == r1 + ~r2 + 1.
2924              * Thus r1 + ~r2 == r1 - r2 - 1,
2925              * which does not require an extra temporary.
2926              */
2927             tcg_r1 = load_gpr(ctx, a->r1);
2928             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2929             tcg_gen_subi_i64(tmp, tmp, 1);
2930         }
2931         save_gpr(ctx, a->t, tmp);
2932         ctx->null_cond = cond_make_f();
2933         return true;
2934     }
2935 
2936     nullify_over(ctx);
2937     tcg_r1 = load_gpr(ctx, a->r1);
2938     tcg_r2 = load_gpr(ctx, a->r2);
2939     tmp = tcg_temp_new_i64();
2940     tcg_gen_not_i64(tmp, tcg_r2);
2941     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2942     return nullify_end(ctx);
2943 }
2944 
2945 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2946 {
2947     return do_uaddcm(ctx, a, false);
2948 }
2949 
2950 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2951 {
2952     return do_uaddcm(ctx, a, true);
2953 }
2954 
2955 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2956 {
2957     TCGv_i64 tmp;
2958 
2959     nullify_over(ctx);
2960 
2961     tmp = tcg_temp_new_i64();
2962     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2963     if (!is_i) {
2964         tcg_gen_not_i64(tmp, tmp);
2965     }
2966     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2967     tcg_gen_muli_i64(tmp, tmp, 6);
2968     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2969                    a->cf, a->d, false, is_i);
2970     return nullify_end(ctx);
2971 }
2972 
2973 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2974 {
2975     return do_dcor(ctx, a, false);
2976 }
2977 
2978 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2979 {
2980     return do_dcor(ctx, a, true);
2981 }
2982 
2983 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2984 {
2985     TCGv_i64 dest, add1, add2, addc, in1, in2;
2986 
2987     nullify_over(ctx);
2988 
2989     in1 = load_gpr(ctx, a->r1);
2990     in2 = load_gpr(ctx, a->r2);
2991 
2992     add1 = tcg_temp_new_i64();
2993     add2 = tcg_temp_new_i64();
2994     addc = tcg_temp_new_i64();
2995     dest = tcg_temp_new_i64();
2996 
2997     /* Form R1 << 1 | PSW[CB]{8}.  */
2998     tcg_gen_add_i64(add1, in1, in1);
2999     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
3000 
3001     /*
3002      * Add or subtract R2, depending on PSW[V].  Proper computation of
3003      * carry requires that we subtract via + ~R2 + 1, as described in
3004      * the manual.  By extracting and masking V, we can produce the
3005      * proper inputs to the addition without movcond.
3006      */
3007     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
3008     tcg_gen_xor_i64(add2, in2, addc);
3009     tcg_gen_andi_i64(addc, addc, 1);
3010 
3011     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
3012     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
3013                      addc, ctx->zero);
3014 
3015     /* Write back the result register.  */
3016     save_gpr(ctx, a->t, dest);
3017 
3018     /* Write back PSW[CB].  */
3019     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
3020     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
3021 
3022     /*
3023      * Write back PSW[V] for the division step.
3024      * Shift cb{8} from where it lives in bit 32 to bit 31,
3025      * so that it overlaps r2{32} in bit 31.
3026      */
3027     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
3028     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
3029 
3030     /* Install the new nullification.  */
3031     if (a->cf) {
3032         TCGv_i64 sv = NULL, uv = NULL;
3033         if (cond_need_sv(a->cf >> 1)) {
3034             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
3035         } else if (cond_need_cb(a->cf >> 1)) {
3036             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
3037         }
3038         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
3039     }
3040 
3041     return nullify_end(ctx);
3042 }
3043 
3044 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
3045 {
3046     return do_add_imm(ctx, a, false, false);
3047 }
3048 
3049 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
3050 {
3051     return do_add_imm(ctx, a, true, false);
3052 }
3053 
3054 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
3055 {
3056     return do_add_imm(ctx, a, false, true);
3057 }
3058 
3059 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
3060 {
3061     return do_add_imm(ctx, a, true, true);
3062 }
3063 
3064 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
3065 {
3066     return do_sub_imm(ctx, a, false);
3067 }
3068 
3069 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
3070 {
3071     return do_sub_imm(ctx, a, true);
3072 }
3073 
3074 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3075 {
3076     TCGv_i64 tcg_im, tcg_r2;
3077 
3078     if (a->cf) {
3079         nullify_over(ctx);
3080     }
3081 
3082     tcg_im = tcg_constant_i64(a->i);
3083     tcg_r2 = load_gpr(ctx, a->r);
3084     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3085 
3086     return nullify_end(ctx);
3087 }
3088 
3089 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3090                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3091 {
3092     TCGv_i64 r1, r2, dest;
3093 
3094     if (!ctx->is_pa20) {
3095         return false;
3096     }
3097 
3098     nullify_over(ctx);
3099 
3100     r1 = load_gpr(ctx, a->r1);
3101     r2 = load_gpr(ctx, a->r2);
3102     dest = dest_gpr(ctx, a->t);
3103 
3104     fn(dest, r1, r2);
3105     save_gpr(ctx, a->t, dest);
3106 
3107     return nullify_end(ctx);
3108 }
3109 
3110 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3111                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3112 {
3113     TCGv_i64 r, dest;
3114 
3115     if (!ctx->is_pa20) {
3116         return false;
3117     }
3118 
3119     nullify_over(ctx);
3120 
3121     r = load_gpr(ctx, a->r);
3122     dest = dest_gpr(ctx, a->t);
3123 
3124     fn(dest, r, a->i);
3125     save_gpr(ctx, a->t, dest);
3126 
3127     return nullify_end(ctx);
3128 }
3129 
3130 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3131                                 void (*fn)(TCGv_i64, TCGv_i64,
3132                                            TCGv_i64, TCGv_i32))
3133 {
3134     TCGv_i64 r1, r2, dest;
3135 
3136     if (!ctx->is_pa20) {
3137         return false;
3138     }
3139 
3140     nullify_over(ctx);
3141 
3142     r1 = load_gpr(ctx, a->r1);
3143     r2 = load_gpr(ctx, a->r2);
3144     dest = dest_gpr(ctx, a->t);
3145 
3146     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3147     save_gpr(ctx, a->t, dest);
3148 
3149     return nullify_end(ctx);
3150 }
3151 
3152 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3153 {
3154     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3155 }
3156 
3157 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3158 {
3159     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3160 }
3161 
3162 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3163 {
3164     return do_multimedia(ctx, a, gen_helper_hadd_us);
3165 }
3166 
3167 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3168 {
3169     return do_multimedia(ctx, a, gen_helper_havg);
3170 }
3171 
3172 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3173 {
3174     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3175 }
3176 
3177 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3178 {
3179     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3180 }
3181 
3182 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3183 {
3184     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3185 }
3186 
3187 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3188 {
3189     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3190 }
3191 
3192 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3193 {
3194     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3195 }
3196 
3197 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3198 {
3199     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3200 }
3201 
3202 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3203 {
3204     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3205 }
3206 
3207 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3208 {
3209     return do_multimedia(ctx, a, gen_helper_hsub_us);
3210 }
3211 
3212 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3213 {
3214     uint64_t mask = 0xffff0000ffff0000ull;
3215     TCGv_i64 tmp = tcg_temp_new_i64();
3216 
3217     tcg_gen_andi_i64(tmp, r2, mask);
3218     tcg_gen_andi_i64(dst, r1, mask);
3219     tcg_gen_shri_i64(tmp, tmp, 16);
3220     tcg_gen_or_i64(dst, dst, tmp);
3221 }
3222 
3223 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3224 {
3225     return do_multimedia(ctx, a, gen_mixh_l);
3226 }
3227 
3228 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3229 {
3230     uint64_t mask = 0x0000ffff0000ffffull;
3231     TCGv_i64 tmp = tcg_temp_new_i64();
3232 
3233     tcg_gen_andi_i64(tmp, r1, mask);
3234     tcg_gen_andi_i64(dst, r2, mask);
3235     tcg_gen_shli_i64(tmp, tmp, 16);
3236     tcg_gen_or_i64(dst, dst, tmp);
3237 }
3238 
3239 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3240 {
3241     return do_multimedia(ctx, a, gen_mixh_r);
3242 }
3243 
3244 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3245 {
3246     TCGv_i64 tmp = tcg_temp_new_i64();
3247 
3248     tcg_gen_shri_i64(tmp, r2, 32);
3249     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3250 }
3251 
3252 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3253 {
3254     return do_multimedia(ctx, a, gen_mixw_l);
3255 }
3256 
3257 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3258 {
3259     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3260 }
3261 
3262 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3263 {
3264     return do_multimedia(ctx, a, gen_mixw_r);
3265 }
3266 
3267 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3268 {
3269     TCGv_i64 r, t0, t1, t2, t3;
3270 
3271     if (!ctx->is_pa20) {
3272         return false;
3273     }
3274 
3275     nullify_over(ctx);
3276 
3277     r = load_gpr(ctx, a->r1);
3278     t0 = tcg_temp_new_i64();
3279     t1 = tcg_temp_new_i64();
3280     t2 = tcg_temp_new_i64();
3281     t3 = tcg_temp_new_i64();
3282 
3283     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3284     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3285     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3286     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3287 
3288     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3289     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3290     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3291 
3292     save_gpr(ctx, a->t, t0);
3293     return nullify_end(ctx);
3294 }
3295 
3296 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3297 {
3298     if (ctx->is_pa20) {
3299        /*
3300         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3301         * Any base modification still occurs.
3302         */
3303         if (a->t == 0) {
3304             return trans_nop_addrx(ctx, a);
3305         }
3306     } else if (a->size > MO_32) {
3307         return gen_illegal(ctx);
3308     }
3309     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3310                    a->disp, a->sp, a->m, a->size | MO_TE);
3311 }
3312 
3313 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3314 {
3315     assert(a->x == 0 && a->scale == 0);
3316     if (!ctx->is_pa20 && a->size > MO_32) {
3317         return gen_illegal(ctx);
3318     }
3319     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3320 }
3321 
3322 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3323 {
3324     MemOp mop = MO_TE | MO_ALIGN | a->size;
3325     TCGv_i64 dest, ofs;
3326     TCGv_i64 addr;
3327 
3328     if (!ctx->is_pa20 && a->size > MO_32) {
3329         return gen_illegal(ctx);
3330     }
3331 
3332     nullify_over(ctx);
3333 
3334     if (a->m) {
3335         /* Base register modification.  Make sure if RT == RB,
3336            we see the result of the load.  */
3337         dest = tcg_temp_new_i64();
3338     } else {
3339         dest = dest_gpr(ctx, a->t);
3340     }
3341 
3342     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3343              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3344 
3345     /*
3346      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3347      * However actual hardware succeeds with aligned mod 4.
3348      * Detect this case and log a GUEST_ERROR.
3349      *
3350      * TODO: HPPA64 relaxes the over-alignment requirement
3351      * with the ,co completer.
3352      */
3353     gen_helper_ldc_check(addr);
3354 
3355     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3356 
3357     if (a->m) {
3358         save_gpr(ctx, a->b, ofs);
3359     }
3360     save_gpr(ctx, a->t, dest);
3361 
3362     return nullify_end(ctx);
3363 }
3364 
3365 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3366 {
3367     TCGv_i64 ofs, val;
3368     TCGv_i64 addr;
3369 
3370     nullify_over(ctx);
3371 
3372     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3373              MMU_DISABLED(ctx));
3374     val = load_gpr(ctx, a->r);
3375     if (a->a) {
3376         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3377             gen_helper_stby_e_parallel(tcg_env, addr, val);
3378         } else {
3379             gen_helper_stby_e(tcg_env, addr, val);
3380         }
3381     } else {
3382         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3383             gen_helper_stby_b_parallel(tcg_env, addr, val);
3384         } else {
3385             gen_helper_stby_b(tcg_env, addr, val);
3386         }
3387     }
3388     if (a->m) {
3389         tcg_gen_andi_i64(ofs, ofs, ~3);
3390         save_gpr(ctx, a->b, ofs);
3391     }
3392 
3393     return nullify_end(ctx);
3394 }
3395 
3396 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3397 {
3398     TCGv_i64 ofs, val;
3399     TCGv_i64 addr;
3400 
3401     if (!ctx->is_pa20) {
3402         return false;
3403     }
3404     nullify_over(ctx);
3405 
3406     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3407              MMU_DISABLED(ctx));
3408     val = load_gpr(ctx, a->r);
3409     if (a->a) {
3410         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3411             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3412         } else {
3413             gen_helper_stdby_e(tcg_env, addr, val);
3414         }
3415     } else {
3416         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3417             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3418         } else {
3419             gen_helper_stdby_b(tcg_env, addr, val);
3420         }
3421     }
3422     if (a->m) {
3423         tcg_gen_andi_i64(ofs, ofs, ~7);
3424         save_gpr(ctx, a->b, ofs);
3425     }
3426 
3427     return nullify_end(ctx);
3428 }
3429 
3430 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3431 {
3432     int hold_mmu_idx = ctx->mmu_idx;
3433 
3434     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3435     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3436     trans_ld(ctx, a);
3437     ctx->mmu_idx = hold_mmu_idx;
3438     return true;
3439 }
3440 
3441 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3442 {
3443     int hold_mmu_idx = ctx->mmu_idx;
3444 
3445     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3446     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3447     trans_st(ctx, a);
3448     ctx->mmu_idx = hold_mmu_idx;
3449     return true;
3450 }
3451 
3452 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3453 {
3454     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3455 
3456     tcg_gen_movi_i64(tcg_rt, a->i);
3457     save_gpr(ctx, a->t, tcg_rt);
3458     ctx->null_cond = cond_make_f();
3459     return true;
3460 }
3461 
3462 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3463 {
3464     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3465     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3466 
3467     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3468     save_gpr(ctx, 1, tcg_r1);
3469     ctx->null_cond = cond_make_f();
3470     return true;
3471 }
3472 
3473 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3474 {
3475     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3476 
3477     /* Special case rb == 0, for the LDI pseudo-op.
3478        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3479     if (a->b == 0) {
3480         tcg_gen_movi_i64(tcg_rt, a->i);
3481     } else {
3482         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3483     }
3484     save_gpr(ctx, a->t, tcg_rt);
3485     ctx->null_cond = cond_make_f();
3486     return true;
3487 }
3488 
3489 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3490                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3491 {
3492     TCGv_i64 dest, in2, sv;
3493     DisasCond cond;
3494 
3495     in2 = load_gpr(ctx, r);
3496     dest = tcg_temp_new_i64();
3497 
3498     tcg_gen_sub_i64(dest, in1, in2);
3499 
3500     sv = NULL;
3501     if (cond_need_sv(c)) {
3502         sv = do_sub_sv(ctx, dest, in1, in2);
3503     }
3504 
3505     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3506     return do_cbranch(ctx, disp, n, &cond);
3507 }
3508 
3509 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3510 {
3511     if (!ctx->is_pa20 && a->d) {
3512         return false;
3513     }
3514     nullify_over(ctx);
3515     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3516                    a->c, a->f, a->d, a->n, a->disp);
3517 }
3518 
3519 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3520 {
3521     if (!ctx->is_pa20 && a->d) {
3522         return false;
3523     }
3524     nullify_over(ctx);
3525     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3526                    a->c, a->f, a->d, a->n, a->disp);
3527 }
3528 
3529 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3530                     unsigned c, unsigned f, unsigned n, int disp)
3531 {
3532     TCGv_i64 dest, in2, sv, cb_cond;
3533     DisasCond cond;
3534     bool d = false;
3535 
3536     /*
3537      * For hppa64, the ADDB conditions change with PSW.W,
3538      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3539      */
3540     if (ctx->tb_flags & PSW_W) {
3541         d = c >= 5;
3542         if (d) {
3543             c &= 3;
3544         }
3545     }
3546 
3547     in2 = load_gpr(ctx, r);
3548     dest = tcg_temp_new_i64();
3549     sv = NULL;
3550     cb_cond = NULL;
3551 
3552     if (cond_need_cb(c)) {
3553         TCGv_i64 cb = tcg_temp_new_i64();
3554         TCGv_i64 cb_msb = tcg_temp_new_i64();
3555 
3556         tcg_gen_movi_i64(cb_msb, 0);
3557         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3558         tcg_gen_xor_i64(cb, in1, in2);
3559         tcg_gen_xor_i64(cb, cb, dest);
3560         cb_cond = get_carry(ctx, d, cb, cb_msb);
3561     } else {
3562         tcg_gen_add_i64(dest, in1, in2);
3563     }
3564     if (cond_need_sv(c)) {
3565         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3566     }
3567 
3568     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3569     save_gpr(ctx, r, dest);
3570     return do_cbranch(ctx, disp, n, &cond);
3571 }
3572 
3573 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3574 {
3575     nullify_over(ctx);
3576     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3577 }
3578 
3579 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3580 {
3581     nullify_over(ctx);
3582     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3583 }
3584 
3585 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3586 {
3587     TCGv_i64 tmp, tcg_r;
3588     DisasCond cond;
3589 
3590     nullify_over(ctx);
3591 
3592     tmp = tcg_temp_new_i64();
3593     tcg_r = load_gpr(ctx, a->r);
3594     if (a->d) {
3595         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3596     } else {
3597         /* Force shift into [32,63] */
3598         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3599         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3600     }
3601 
3602     cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
3603     return do_cbranch(ctx, a->disp, a->n, &cond);
3604 }
3605 
3606 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3607 {
3608     DisasCond cond;
3609     int p = a->p | (a->d ? 0 : 32);
3610 
3611     nullify_over(ctx);
3612     cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
3613                         load_gpr(ctx, a->r), 1ull << (63 - p));
3614     return do_cbranch(ctx, a->disp, a->n, &cond);
3615 }
3616 
3617 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3618 {
3619     TCGv_i64 dest;
3620     DisasCond cond;
3621 
3622     nullify_over(ctx);
3623 
3624     dest = dest_gpr(ctx, a->r2);
3625     if (a->r1 == 0) {
3626         tcg_gen_movi_i64(dest, 0);
3627     } else {
3628         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3629     }
3630 
3631     /* All MOVB conditions are 32-bit. */
3632     cond = do_sed_cond(ctx, a->c, false, dest);
3633     return do_cbranch(ctx, a->disp, a->n, &cond);
3634 }
3635 
3636 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3637 {
3638     TCGv_i64 dest;
3639     DisasCond cond;
3640 
3641     nullify_over(ctx);
3642 
3643     dest = dest_gpr(ctx, a->r);
3644     tcg_gen_movi_i64(dest, a->i);
3645 
3646     /* All MOVBI conditions are 32-bit. */
3647     cond = do_sed_cond(ctx, a->c, false, dest);
3648     return do_cbranch(ctx, a->disp, a->n, &cond);
3649 }
3650 
3651 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3652 {
3653     TCGv_i64 dest, src2;
3654 
3655     if (!ctx->is_pa20 && a->d) {
3656         return false;
3657     }
3658     if (a->c) {
3659         nullify_over(ctx);
3660     }
3661 
3662     dest = dest_gpr(ctx, a->t);
3663     src2 = load_gpr(ctx, a->r2);
3664     if (a->r1 == 0) {
3665         if (a->d) {
3666             tcg_gen_shr_i64(dest, src2, cpu_sar);
3667         } else {
3668             TCGv_i64 tmp = tcg_temp_new_i64();
3669 
3670             tcg_gen_ext32u_i64(dest, src2);
3671             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3672             tcg_gen_shr_i64(dest, dest, tmp);
3673         }
3674     } else if (a->r1 == a->r2) {
3675         if (a->d) {
3676             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3677         } else {
3678             TCGv_i32 t32 = tcg_temp_new_i32();
3679             TCGv_i32 s32 = tcg_temp_new_i32();
3680 
3681             tcg_gen_extrl_i64_i32(t32, src2);
3682             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3683             tcg_gen_andi_i32(s32, s32, 31);
3684             tcg_gen_rotr_i32(t32, t32, s32);
3685             tcg_gen_extu_i32_i64(dest, t32);
3686         }
3687     } else {
3688         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3689 
3690         if (a->d) {
3691             TCGv_i64 t = tcg_temp_new_i64();
3692             TCGv_i64 n = tcg_temp_new_i64();
3693 
3694             tcg_gen_xori_i64(n, cpu_sar, 63);
3695             tcg_gen_shl_i64(t, src1, n);
3696             tcg_gen_shli_i64(t, t, 1);
3697             tcg_gen_shr_i64(dest, src2, cpu_sar);
3698             tcg_gen_or_i64(dest, dest, t);
3699         } else {
3700             TCGv_i64 t = tcg_temp_new_i64();
3701             TCGv_i64 s = tcg_temp_new_i64();
3702 
3703             tcg_gen_concat32_i64(t, src2, src1);
3704             tcg_gen_andi_i64(s, cpu_sar, 31);
3705             tcg_gen_shr_i64(dest, t, s);
3706         }
3707     }
3708     save_gpr(ctx, a->t, dest);
3709 
3710     /* Install the new nullification.  */
3711     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3712     return nullify_end(ctx);
3713 }
3714 
3715 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3716 {
3717     unsigned width, sa;
3718     TCGv_i64 dest, t2;
3719 
3720     if (!ctx->is_pa20 && a->d) {
3721         return false;
3722     }
3723     if (a->c) {
3724         nullify_over(ctx);
3725     }
3726 
3727     width = a->d ? 64 : 32;
3728     sa = width - 1 - a->cpos;
3729 
3730     dest = dest_gpr(ctx, a->t);
3731     t2 = load_gpr(ctx, a->r2);
3732     if (a->r1 == 0) {
3733         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3734     } else if (width == TARGET_LONG_BITS) {
3735         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3736     } else {
3737         assert(!a->d);
3738         if (a->r1 == a->r2) {
3739             TCGv_i32 t32 = tcg_temp_new_i32();
3740             tcg_gen_extrl_i64_i32(t32, t2);
3741             tcg_gen_rotri_i32(t32, t32, sa);
3742             tcg_gen_extu_i32_i64(dest, t32);
3743         } else {
3744             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3745             tcg_gen_extract_i64(dest, dest, sa, 32);
3746         }
3747     }
3748     save_gpr(ctx, a->t, dest);
3749 
3750     /* Install the new nullification.  */
3751     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3752     return nullify_end(ctx);
3753 }
3754 
3755 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3756 {
3757     unsigned widthm1 = a->d ? 63 : 31;
3758     TCGv_i64 dest, src, tmp;
3759 
3760     if (!ctx->is_pa20 && a->d) {
3761         return false;
3762     }
3763     if (a->c) {
3764         nullify_over(ctx);
3765     }
3766 
3767     dest = dest_gpr(ctx, a->t);
3768     src = load_gpr(ctx, a->r);
3769     tmp = tcg_temp_new_i64();
3770 
3771     /* Recall that SAR is using big-endian bit numbering.  */
3772     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3773     tcg_gen_xori_i64(tmp, tmp, widthm1);
3774 
3775     if (a->se) {
3776         if (!a->d) {
3777             tcg_gen_ext32s_i64(dest, src);
3778             src = dest;
3779         }
3780         tcg_gen_sar_i64(dest, src, tmp);
3781         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3782     } else {
3783         if (!a->d) {
3784             tcg_gen_ext32u_i64(dest, src);
3785             src = dest;
3786         }
3787         tcg_gen_shr_i64(dest, src, tmp);
3788         tcg_gen_extract_i64(dest, dest, 0, a->len);
3789     }
3790     save_gpr(ctx, a->t, dest);
3791 
3792     /* Install the new nullification.  */
3793     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3794     return nullify_end(ctx);
3795 }
3796 
3797 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3798 {
3799     unsigned len, cpos, width;
3800     TCGv_i64 dest, src;
3801 
3802     if (!ctx->is_pa20 && a->d) {
3803         return false;
3804     }
3805     if (a->c) {
3806         nullify_over(ctx);
3807     }
3808 
3809     len = a->len;
3810     width = a->d ? 64 : 32;
3811     cpos = width - 1 - a->pos;
3812     if (cpos + len > width) {
3813         len = width - cpos;
3814     }
3815 
3816     dest = dest_gpr(ctx, a->t);
3817     src = load_gpr(ctx, a->r);
3818     if (a->se) {
3819         tcg_gen_sextract_i64(dest, src, cpos, len);
3820     } else {
3821         tcg_gen_extract_i64(dest, src, cpos, len);
3822     }
3823     save_gpr(ctx, a->t, dest);
3824 
3825     /* Install the new nullification.  */
3826     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3827     return nullify_end(ctx);
3828 }
3829 
3830 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3831 {
3832     unsigned len, width;
3833     uint64_t mask0, mask1;
3834     TCGv_i64 dest;
3835 
3836     if (!ctx->is_pa20 && a->d) {
3837         return false;
3838     }
3839     if (a->c) {
3840         nullify_over(ctx);
3841     }
3842 
3843     len = a->len;
3844     width = a->d ? 64 : 32;
3845     if (a->cpos + len > width) {
3846         len = width - a->cpos;
3847     }
3848 
3849     dest = dest_gpr(ctx, a->t);
3850     mask0 = deposit64(0, a->cpos, len, a->i);
3851     mask1 = deposit64(-1, a->cpos, len, a->i);
3852 
3853     if (a->nz) {
3854         TCGv_i64 src = load_gpr(ctx, a->t);
3855         tcg_gen_andi_i64(dest, src, mask1);
3856         tcg_gen_ori_i64(dest, dest, mask0);
3857     } else {
3858         tcg_gen_movi_i64(dest, mask0);
3859     }
3860     save_gpr(ctx, a->t, dest);
3861 
3862     /* Install the new nullification.  */
3863     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3864     return nullify_end(ctx);
3865 }
3866 
3867 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3868 {
3869     unsigned rs = a->nz ? a->t : 0;
3870     unsigned len, width;
3871     TCGv_i64 dest, val;
3872 
3873     if (!ctx->is_pa20 && a->d) {
3874         return false;
3875     }
3876     if (a->c) {
3877         nullify_over(ctx);
3878     }
3879 
3880     len = a->len;
3881     width = a->d ? 64 : 32;
3882     if (a->cpos + len > width) {
3883         len = width - a->cpos;
3884     }
3885 
3886     dest = dest_gpr(ctx, a->t);
3887     val = load_gpr(ctx, a->r);
3888     if (rs == 0) {
3889         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3890     } else {
3891         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3892     }
3893     save_gpr(ctx, a->t, dest);
3894 
3895     /* Install the new nullification.  */
3896     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3897     return nullify_end(ctx);
3898 }
3899 
3900 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3901                        bool d, bool nz, unsigned len, TCGv_i64 val)
3902 {
3903     unsigned rs = nz ? rt : 0;
3904     unsigned widthm1 = d ? 63 : 31;
3905     TCGv_i64 mask, tmp, shift, dest;
3906     uint64_t msb = 1ULL << (len - 1);
3907 
3908     dest = dest_gpr(ctx, rt);
3909     shift = tcg_temp_new_i64();
3910     tmp = tcg_temp_new_i64();
3911 
3912     /* Convert big-endian bit numbering in SAR to left-shift.  */
3913     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3914     tcg_gen_xori_i64(shift, shift, widthm1);
3915 
3916     mask = tcg_temp_new_i64();
3917     tcg_gen_movi_i64(mask, msb + (msb - 1));
3918     tcg_gen_and_i64(tmp, val, mask);
3919     if (rs) {
3920         tcg_gen_shl_i64(mask, mask, shift);
3921         tcg_gen_shl_i64(tmp, tmp, shift);
3922         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3923         tcg_gen_or_i64(dest, dest, tmp);
3924     } else {
3925         tcg_gen_shl_i64(dest, tmp, shift);
3926     }
3927     save_gpr(ctx, rt, dest);
3928 
3929     /* Install the new nullification.  */
3930     ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3931     return nullify_end(ctx);
3932 }
3933 
3934 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3935 {
3936     if (!ctx->is_pa20 && a->d) {
3937         return false;
3938     }
3939     if (a->c) {
3940         nullify_over(ctx);
3941     }
3942     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3943                       load_gpr(ctx, a->r));
3944 }
3945 
3946 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3947 {
3948     if (!ctx->is_pa20 && a->d) {
3949         return false;
3950     }
3951     if (a->c) {
3952         nullify_over(ctx);
3953     }
3954     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3955                       tcg_constant_i64(a->i));
3956 }
3957 
3958 static bool trans_be(DisasContext *ctx, arg_be *a)
3959 {
3960 #ifndef CONFIG_USER_ONLY
3961     ctx->iaq_j.space = tcg_temp_new_i64();
3962     load_spr(ctx, ctx->iaq_j.space, a->sp);
3963 #endif
3964 
3965     ctx->iaq_j.base = tcg_temp_new_i64();
3966     ctx->iaq_j.disp = 0;
3967 
3968     tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
3969     ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
3970 
3971     return do_ibranch(ctx, a->l, true, a->n);
3972 }
3973 
3974 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3975 {
3976     return do_dbranch(ctx, a->disp, a->l, a->n);
3977 }
3978 
3979 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3980 {
3981     int64_t disp = a->disp;
3982     bool indirect = false;
3983 
3984     /* Trap if PSW[B] is set. */
3985     if (ctx->psw_xb & PSW_B) {
3986         return gen_illegal(ctx);
3987     }
3988 
3989     nullify_over(ctx);
3990 
3991 #ifndef CONFIG_USER_ONLY
3992     if (ctx->privilege == 0) {
3993         /* Privilege cannot decrease. */
3994     } else if (!(ctx->tb_flags & PSW_C)) {
3995         /* With paging disabled, priv becomes 0. */
3996         disp -= ctx->privilege;
3997     } else {
3998         /* Adjust the dest offset for the privilege change from the PTE. */
3999         TCGv_i64 off = tcg_temp_new_i64();
4000 
4001         copy_iaoq_entry(ctx, off, &ctx->iaq_f);
4002         gen_helper_b_gate_priv(off, tcg_env, off);
4003 
4004         ctx->iaq_j.base = off;
4005         ctx->iaq_j.disp = disp + 8;
4006         indirect = true;
4007     }
4008 #endif
4009 
4010     if (a->l) {
4011         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4012         if (ctx->privilege < 3) {
4013             tcg_gen_andi_i64(tmp, tmp, -4);
4014         }
4015         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4016         save_gpr(ctx, a->l, tmp);
4017     }
4018 
4019     if (indirect) {
4020         return do_ibranch(ctx, 0, false, a->n);
4021     }
4022     return do_dbranch(ctx, disp, 0, a->n);
4023 }
4024 
4025 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4026 {
4027     if (a->x) {
4028         DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
4029         TCGv_i64 t0 = tcg_temp_new_i64();
4030         TCGv_i64 t1 = tcg_temp_new_i64();
4031 
4032         /* The computation here never changes privilege level.  */
4033         copy_iaoq_entry(ctx, t0, &next);
4034         tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
4035         tcg_gen_add_i64(t0, t0, t1);
4036 
4037         ctx->iaq_j = iaqe_next_absv(ctx, t0);
4038         return do_ibranch(ctx, a->l, false, a->n);
4039     } else {
4040         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4041         return do_dbranch(ctx, 0, a->l, a->n);
4042     }
4043 }
4044 
4045 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4046 {
4047     TCGv_i64 dest;
4048 
4049     if (a->x == 0) {
4050         dest = load_gpr(ctx, a->b);
4051     } else {
4052         dest = tcg_temp_new_i64();
4053         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4054         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4055     }
4056     dest = do_ibranch_priv(ctx, dest);
4057     ctx->iaq_j = iaqe_next_absv(ctx, dest);
4058 
4059     return do_ibranch(ctx, 0, false, a->n);
4060 }
4061 
4062 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4063 {
4064     TCGv_i64 b = load_gpr(ctx, a->b);
4065 
4066 #ifndef CONFIG_USER_ONLY
4067     ctx->iaq_j.space = space_select(ctx, 0, b);
4068 #endif
4069     ctx->iaq_j.base = do_ibranch_priv(ctx, b);
4070     ctx->iaq_j.disp = 0;
4071 
4072     return do_ibranch(ctx, a->l, false, a->n);
4073 }
4074 
4075 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4076 {
4077     /* All branch target stack instructions implement as nop. */
4078     return ctx->is_pa20;
4079 }
4080 
4081 /*
4082  * Float class 0
4083  */
4084 
4085 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4086 {
4087     tcg_gen_mov_i32(dst, src);
4088 }
4089 
4090 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4091 {
4092     uint64_t ret;
4093 
4094     if (ctx->is_pa20) {
4095         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4096     } else {
4097         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4098     }
4099 
4100     nullify_over(ctx);
4101     save_frd(0, tcg_constant_i64(ret));
4102     return nullify_end(ctx);
4103 }
4104 
4105 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4106 {
4107     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4108 }
4109 
4110 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4111 {
4112     tcg_gen_mov_i64(dst, src);
4113 }
4114 
4115 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4116 {
4117     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4118 }
4119 
4120 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4121 {
4122     tcg_gen_andi_i32(dst, src, INT32_MAX);
4123 }
4124 
4125 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4126 {
4127     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4128 }
4129 
4130 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4131 {
4132     tcg_gen_andi_i64(dst, src, INT64_MAX);
4133 }
4134 
4135 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4136 {
4137     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4138 }
4139 
4140 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4141 {
4142     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4143 }
4144 
4145 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4146 {
4147     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4148 }
4149 
4150 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4151 {
4152     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4153 }
4154 
4155 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4156 {
4157     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4158 }
4159 
4160 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4161 {
4162     tcg_gen_xori_i32(dst, src, INT32_MIN);
4163 }
4164 
4165 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4166 {
4167     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4168 }
4169 
4170 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4171 {
4172     tcg_gen_xori_i64(dst, src, INT64_MIN);
4173 }
4174 
4175 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4176 {
4177     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4178 }
4179 
4180 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4181 {
4182     tcg_gen_ori_i32(dst, src, INT32_MIN);
4183 }
4184 
4185 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4186 {
4187     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4188 }
4189 
4190 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4191 {
4192     tcg_gen_ori_i64(dst, src, INT64_MIN);
4193 }
4194 
4195 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4196 {
4197     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4198 }
4199 
4200 /*
4201  * Float class 1
4202  */
4203 
4204 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4205 {
4206     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4207 }
4208 
4209 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4210 {
4211     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4212 }
4213 
4214 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4215 {
4216     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4217 }
4218 
4219 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4220 {
4221     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4222 }
4223 
4224 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4225 {
4226     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4227 }
4228 
4229 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4230 {
4231     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4232 }
4233 
4234 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4235 {
4236     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4237 }
4238 
4239 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4240 {
4241     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4242 }
4243 
4244 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4245 {
4246     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4247 }
4248 
4249 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4250 {
4251     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4252 }
4253 
4254 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4255 {
4256     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4257 }
4258 
4259 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4260 {
4261     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4262 }
4263 
4264 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4265 {
4266     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4267 }
4268 
4269 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4270 {
4271     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4272 }
4273 
4274 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4275 {
4276     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4277 }
4278 
4279 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4280 {
4281     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4282 }
4283 
4284 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4285 {
4286     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4287 }
4288 
4289 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4290 {
4291     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4292 }
4293 
4294 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4295 {
4296     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4297 }
4298 
4299 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4300 {
4301     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4302 }
4303 
4304 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4305 {
4306     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4307 }
4308 
4309 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4310 {
4311     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4312 }
4313 
4314 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4315 {
4316     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4317 }
4318 
4319 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4320 {
4321     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4322 }
4323 
4324 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4325 {
4326     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4327 }
4328 
4329 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4330 {
4331     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4332 }
4333 
4334 /*
4335  * Float class 2
4336  */
4337 
4338 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4339 {
4340     TCGv_i32 ta, tb, tc, ty;
4341 
4342     nullify_over(ctx);
4343 
4344     ta = load_frw0_i32(a->r1);
4345     tb = load_frw0_i32(a->r2);
4346     ty = tcg_constant_i32(a->y);
4347     tc = tcg_constant_i32(a->c);
4348 
4349     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4350 
4351     return nullify_end(ctx);
4352 }
4353 
4354 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4355 {
4356     TCGv_i64 ta, tb;
4357     TCGv_i32 tc, ty;
4358 
4359     nullify_over(ctx);
4360 
4361     ta = load_frd0(a->r1);
4362     tb = load_frd0(a->r2);
4363     ty = tcg_constant_i32(a->y);
4364     tc = tcg_constant_i32(a->c);
4365 
4366     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4367 
4368     return nullify_end(ctx);
4369 }
4370 
4371 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4372 {
4373     TCGCond tc = TCG_COND_TSTNE;
4374     uint32_t mask;
4375     TCGv_i64 t;
4376 
4377     nullify_over(ctx);
4378 
4379     t = tcg_temp_new_i64();
4380     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4381 
4382     if (a->y == 1) {
4383         switch (a->c) {
4384         case 0: /* simple */
4385             mask = R_FPSR_C_MASK;
4386             break;
4387         case 2: /* rej */
4388             tc = TCG_COND_TSTEQ;
4389             /* fallthru */
4390         case 1: /* acc */
4391             mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
4392             break;
4393         case 6: /* rej8 */
4394             tc = TCG_COND_TSTEQ;
4395             /* fallthru */
4396         case 5: /* acc8 */
4397             mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
4398             break;
4399         case 9: /* acc6 */
4400             mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
4401             break;
4402         case 13: /* acc4 */
4403             mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
4404             break;
4405         case 17: /* acc2 */
4406             mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
4407             break;
4408         default:
4409             gen_illegal(ctx);
4410             return true;
4411         }
4412     } else {
4413         unsigned cbit = (a->y ^ 1) - 1;
4414         mask = R_FPSR_CA0_MASK >> cbit;
4415     }
4416 
4417     ctx->null_cond = cond_make_ti(tc, t, mask);
4418     return nullify_end(ctx);
4419 }
4420 
4421 /*
4422  * Float class 2
4423  */
4424 
4425 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4426 {
4427     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4428 }
4429 
4430 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4431 {
4432     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4433 }
4434 
4435 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4436 {
4437     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4438 }
4439 
4440 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4441 {
4442     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4443 }
4444 
4445 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4446 {
4447     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4448 }
4449 
4450 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4451 {
4452     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4453 }
4454 
4455 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4456 {
4457     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4458 }
4459 
4460 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4461 {
4462     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4463 }
4464 
4465 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4466 {
4467     TCGv_i64 x, y;
4468 
4469     nullify_over(ctx);
4470 
4471     x = load_frw0_i64(a->r1);
4472     y = load_frw0_i64(a->r2);
4473     tcg_gen_mul_i64(x, x, y);
4474     save_frd(a->t, x);
4475 
4476     return nullify_end(ctx);
4477 }
4478 
4479 /* Convert the fmpyadd single-precision register encodings to standard.  */
4480 static inline int fmpyadd_s_reg(unsigned r)
4481 {
4482     return (r & 16) * 2 + 16 + (r & 15);
4483 }
4484 
4485 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4486 {
4487     int tm = fmpyadd_s_reg(a->tm);
4488     int ra = fmpyadd_s_reg(a->ra);
4489     int ta = fmpyadd_s_reg(a->ta);
4490     int rm2 = fmpyadd_s_reg(a->rm2);
4491     int rm1 = fmpyadd_s_reg(a->rm1);
4492 
4493     nullify_over(ctx);
4494 
4495     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4496     do_fop_weww(ctx, ta, ta, ra,
4497                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4498 
4499     return nullify_end(ctx);
4500 }
4501 
4502 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4503 {
4504     return do_fmpyadd_s(ctx, a, false);
4505 }
4506 
4507 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4508 {
4509     return do_fmpyadd_s(ctx, a, true);
4510 }
4511 
4512 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4513 {
4514     nullify_over(ctx);
4515 
4516     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4517     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4518                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4519 
4520     return nullify_end(ctx);
4521 }
4522 
4523 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4524 {
4525     return do_fmpyadd_d(ctx, a, false);
4526 }
4527 
4528 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4529 {
4530     return do_fmpyadd_d(ctx, a, true);
4531 }
4532 
4533 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4534 {
4535     TCGv_i32 x, y, z;
4536 
4537     nullify_over(ctx);
4538     x = load_frw0_i32(a->rm1);
4539     y = load_frw0_i32(a->rm2);
4540     z = load_frw0_i32(a->ra3);
4541 
4542     if (a->neg) {
4543         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4544     } else {
4545         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4546     }
4547 
4548     save_frw_i32(a->t, x);
4549     return nullify_end(ctx);
4550 }
4551 
4552 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4553 {
4554     TCGv_i64 x, y, z;
4555 
4556     nullify_over(ctx);
4557     x = load_frd0(a->rm1);
4558     y = load_frd0(a->rm2);
4559     z = load_frd0(a->ra3);
4560 
4561     if (a->neg) {
4562         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4563     } else {
4564         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4565     }
4566 
4567     save_frd(a->t, x);
4568     return nullify_end(ctx);
4569 }
4570 
4571 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4572 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4573 {
4574     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4575 #ifndef CONFIG_USER_ONLY
4576     nullify_over(ctx);
4577     gen_helper_diag_btlb(tcg_env);
4578     return nullify_end(ctx);
4579 #endif
4580 }
4581 
4582 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4583 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4584 {
4585     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4586 #ifndef CONFIG_USER_ONLY
4587     nullify_over(ctx);
4588     gen_helper_diag_console_output(tcg_env);
4589     return nullify_end(ctx);
4590 #endif
4591 }
4592 
4593 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4594 {
4595     return !ctx->is_pa20 && do_getshadowregs(ctx);
4596 }
4597 
4598 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4599 {
4600     return !ctx->is_pa20 && do_putshadowregs(ctx);
4601 }
4602 
4603 static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a)
4604 {
4605     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4606     nullify_over(ctx);
4607     TCGv_i64 dest = dest_gpr(ctx, a->rt);
4608     tcg_gen_ld_i64(dest, tcg_env,
4609                        offsetof(CPUHPPAState, dr[a->dr]));
4610     save_gpr(ctx, a->rt, dest);
4611     return nullify_end(ctx);
4612 }
4613 
4614 static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a)
4615 {
4616     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4617     nullify_over(ctx);
4618     tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env,
4619                         offsetof(CPUHPPAState, dr[a->dr]));
4620 #ifndef CONFIG_USER_ONLY
4621     if (ctx->is_pa20 && (a->dr == 2)) {
4622         /* Update gva_offset_mask from the new value of %dr2 */
4623         gen_helper_update_gva_offset_mask(tcg_env);
4624         /* Exit to capture the new value for the next TB. */
4625         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4626     }
4627 #endif
4628     return nullify_end(ctx);
4629 }
4630 
4631 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4632 {
4633     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4634     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4635     return true;
4636 }
4637 
4638 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4639 {
4640     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4641     uint64_t cs_base;
4642     int bound;
4643 
4644     ctx->cs = cs;
4645     ctx->tb_flags = ctx->base.tb->flags;
4646     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4647     ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
4648     ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask;
4649 
4650 #ifdef CONFIG_USER_ONLY
4651     ctx->privilege = PRIV_USER;
4652     ctx->mmu_idx = MMU_USER_IDX;
4653     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4654 #else
4655     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4656     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4657                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4658                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4659 #endif
4660 
4661     cs_base = ctx->base.tb->cs_base;
4662     ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
4663 
4664     if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
4665         ctx->iaq_b.space = cpu_iasq_b;
4666         ctx->iaq_b.base = cpu_iaoq_b;
4667     } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
4668         ctx->iaq_b.base = cpu_iaoq_b;
4669     } else {
4670         uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
4671         uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
4672         ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
4673     }
4674 
4675     ctx->zero = tcg_constant_i64(0);
4676 
4677     /* Bound the number of instructions by those left on the page.  */
4678     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4679     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4680 }
4681 
4682 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4683 {
4684     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4685 
4686     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4687     ctx->null_cond = cond_make_f();
4688     ctx->psw_n_nonzero = false;
4689     if (ctx->tb_flags & PSW_N) {
4690         ctx->null_cond.c = TCG_COND_ALWAYS;
4691         ctx->psw_n_nonzero = true;
4692     }
4693     ctx->null_lab = NULL;
4694 }
4695 
4696 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4697 {
4698     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4699     uint64_t iaoq_f, iaoq_b;
4700     int64_t diff;
4701 
4702     tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
4703 
4704     iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
4705     if (iaqe_variable(&ctx->iaq_b)) {
4706         diff = INT32_MIN;
4707     } else {
4708         iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
4709         diff = iaoq_b - iaoq_f;
4710         /* Direct branches can only produce a 24-bit displacement. */
4711         tcg_debug_assert(diff == (int32_t)diff);
4712         tcg_debug_assert(diff != INT32_MIN);
4713     }
4714 
4715     tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
4716     ctx->insn_start_updated = false;
4717 }
4718 
4719 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4720 {
4721     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4722     CPUHPPAState *env = cpu_env(cs);
4723     DisasJumpType ret;
4724 
4725     /* Execute one insn.  */
4726 #ifdef CONFIG_USER_ONLY
4727     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4728         do_page_zero(ctx);
4729         ret = ctx->base.is_jmp;
4730         assert(ret != DISAS_NEXT);
4731     } else
4732 #endif
4733     {
4734         /* Always fetch the insn, even if nullified, so that we check
4735            the page permissions for execute.  */
4736         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4737 
4738         /*
4739          * Set up the IA queue for the next insn.
4740          * This will be overwritten by a branch.
4741          */
4742         ctx->iaq_n = NULL;
4743         memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
4744         ctx->psw_b_next = false;
4745 
4746         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4747             ctx->null_cond.c = TCG_COND_NEVER;
4748             ret = DISAS_NEXT;
4749         } else {
4750             ctx->insn = insn;
4751             if (!decode(ctx, insn)) {
4752                 gen_illegal(ctx);
4753             }
4754             ret = ctx->base.is_jmp;
4755             assert(ctx->null_lab == NULL);
4756         }
4757 
4758         if (ret != DISAS_NORETURN) {
4759             set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
4760         }
4761     }
4762 
4763     /* If the TranslationBlock must end, do so. */
4764     ctx->base.pc_next += 4;
4765     if (ret != DISAS_NEXT) {
4766         return;
4767     }
4768     /* Note this also detects a priority change. */
4769     if (iaqe_variable(&ctx->iaq_b)
4770         || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
4771         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
4772         return;
4773     }
4774 
4775     /*
4776      * Advance the insn queue.
4777      * The only exit now is DISAS_TOO_MANY from the translator loop.
4778      */
4779     ctx->iaq_f.disp = ctx->iaq_b.disp;
4780     if (!ctx->iaq_n) {
4781         ctx->iaq_b.disp += 4;
4782         return;
4783     }
4784     /*
4785      * If IAQ_Next is variable in any way, we need to copy into the
4786      * IAQ_Back globals, in case the next insn raises an exception.
4787      */
4788     if (ctx->iaq_n->base) {
4789         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
4790         ctx->iaq_b.base = cpu_iaoq_b;
4791         ctx->iaq_b.disp = 0;
4792     } else {
4793         ctx->iaq_b.disp = ctx->iaq_n->disp;
4794     }
4795     if (ctx->iaq_n->space) {
4796         tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
4797         ctx->iaq_b.space = cpu_iasq_b;
4798     }
4799 }
4800 
4801 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4802 {
4803     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4804     DisasJumpType is_jmp = ctx->base.is_jmp;
4805     /* Assume the insn queue has not been advanced. */
4806     DisasIAQE *f = &ctx->iaq_b;
4807     DisasIAQE *b = ctx->iaq_n;
4808 
4809     switch (is_jmp) {
4810     case DISAS_NORETURN:
4811         break;
4812     case DISAS_TOO_MANY:
4813         /* The insn queue has not been advanced. */
4814         f = &ctx->iaq_f;
4815         b = &ctx->iaq_b;
4816         /* FALLTHRU */
4817     case DISAS_IAQ_N_STALE:
4818         if (use_goto_tb(ctx, f, b)
4819             && (ctx->null_cond.c == TCG_COND_NEVER
4820                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4821             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4822             gen_goto_tb(ctx, 0, f, b);
4823             break;
4824         }
4825         /* FALLTHRU */
4826     case DISAS_IAQ_N_STALE_EXIT:
4827         install_iaq_entries(ctx, f, b);
4828         nullify_save(ctx);
4829         if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4830             tcg_gen_exit_tb(NULL, 0);
4831             break;
4832         }
4833         /* FALLTHRU */
4834     case DISAS_IAQ_N_UPDATED:
4835         tcg_gen_lookup_and_goto_ptr();
4836         break;
4837     case DISAS_EXIT:
4838         tcg_gen_exit_tb(NULL, 0);
4839         break;
4840     default:
4841         g_assert_not_reached();
4842     }
4843 
4844     for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
4845         gen_set_label(e->lab);
4846         if (e->set_n >= 0) {
4847             tcg_gen_movi_i64(cpu_psw_n, e->set_n);
4848         }
4849         if (e->set_iir) {
4850             tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
4851                            offsetof(CPUHPPAState, cr[CR_IIR]));
4852         }
4853         install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
4854         gen_excp_1(e->excp);
4855     }
4856 }
4857 
4858 #ifdef CONFIG_USER_ONLY
4859 static bool hppa_tr_disas_log(const DisasContextBase *dcbase,
4860                               CPUState *cs, FILE *logfile)
4861 {
4862     target_ulong pc = dcbase->pc_first;
4863 
4864     switch (pc) {
4865     case 0x00:
4866         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4867         return true;
4868     case 0xb0:
4869         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4870         return true;
4871     case 0xe0:
4872         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4873         return true;
4874     case 0x100:
4875         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4876         return true;
4877     }
4878     return false;
4879 }
4880 #endif
4881 
4882 static const TranslatorOps hppa_tr_ops = {
4883     .init_disas_context = hppa_tr_init_disas_context,
4884     .tb_start           = hppa_tr_tb_start,
4885     .insn_start         = hppa_tr_insn_start,
4886     .translate_insn     = hppa_tr_translate_insn,
4887     .tb_stop            = hppa_tr_tb_stop,
4888 #ifdef CONFIG_USER_ONLY
4889     .disas_log          = hppa_tr_disas_log,
4890 #endif
4891 };
4892 
4893 void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
4894                          int *max_insns, vaddr pc, void *host_pc)
4895 {
4896     DisasContext ctx = { };
4897     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4898 }
4899