xref: /openbmc/qemu/target/hppa/translate.c (revision 5872966d)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "exec/page-protection.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 /* Choose to use explicit sizes within this file. */
38 #undef tcg_temp_new
39 
40 typedef struct DisasCond {
41     TCGCond c;
42     TCGv_i64 a0, a1;
43 } DisasCond;
44 
45 typedef struct DisasContext {
46     DisasContextBase base;
47     CPUState *cs;
48 
49     uint64_t iaoq_f;
50     uint64_t iaoq_b;
51     uint64_t iaoq_n;
52     TCGv_i64 iaoq_n_var;
53 
54     DisasCond null_cond;
55     TCGLabel *null_lab;
56 
57     TCGv_i64 zero;
58 
59     uint32_t insn;
60     uint32_t tb_flags;
61     int mmu_idx;
62     int privilege;
63     bool psw_n_nonzero;
64     bool is_pa20;
65     bool insn_start_updated;
66 
67 #ifdef CONFIG_USER_ONLY
68     MemOp unalign;
69 #endif
70 } DisasContext;
71 
72 #ifdef CONFIG_USER_ONLY
73 #define UNALIGN(C)       (C)->unalign
74 #define MMU_DISABLED(C)  false
75 #else
76 #define UNALIGN(C)       MO_ALIGN
77 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
78 #endif
79 
80 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
81 static int expand_sm_imm(DisasContext *ctx, int val)
82 {
83     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
84     if (ctx->is_pa20) {
85         if (val & PSW_SM_W) {
86             val |= PSW_W;
87         }
88         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
89     } else {
90         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
91     }
92     return val;
93 }
94 
95 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
96 static int expand_sr3x(DisasContext *ctx, int val)
97 {
98     return ~val;
99 }
100 
101 /* Convert the M:A bits within a memory insn to the tri-state value
102    we use for the final M.  */
103 static int ma_to_m(DisasContext *ctx, int val)
104 {
105     return val & 2 ? (val & 1 ? -1 : 1) : 0;
106 }
107 
108 /* Convert the sign of the displacement to a pre or post-modify.  */
109 static int pos_to_m(DisasContext *ctx, int val)
110 {
111     return val ? 1 : -1;
112 }
113 
114 static int neg_to_m(DisasContext *ctx, int val)
115 {
116     return val ? -1 : 1;
117 }
118 
119 /* Used for branch targets and fp memory ops.  */
120 static int expand_shl2(DisasContext *ctx, int val)
121 {
122     return val << 2;
123 }
124 
125 /* Used for assemble_21.  */
126 static int expand_shl11(DisasContext *ctx, int val)
127 {
128     return val << 11;
129 }
130 
131 static int assemble_6(DisasContext *ctx, int val)
132 {
133     /*
134      * Officially, 32 * x + 32 - y.
135      * Here, x is already in bit 5, and y is [4:0].
136      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
137      * with the overflow from bit 4 summing with x.
138      */
139     return (val ^ 31) + 1;
140 }
141 
142 /* Expander for assemble_16a(s,cat(im10a,0),i). */
143 static int expand_11a(DisasContext *ctx, int val)
144 {
145     /*
146      * @val is bit 0 and bits [4:15].
147      * Swizzle thing around depending on PSW.W.
148      */
149     int im10a = extract32(val, 1, 10);
150     int s = extract32(val, 11, 2);
151     int i = (-(val & 1) << 13) | (im10a << 3);
152 
153     if (ctx->tb_flags & PSW_W) {
154         i ^= s << 13;
155     }
156     return i;
157 }
158 
159 /* Expander for assemble_16a(s,im11a,i). */
160 static int expand_12a(DisasContext *ctx, int val)
161 {
162     /*
163      * @val is bit 0 and bits [3:15].
164      * Swizzle thing around depending on PSW.W.
165      */
166     int im11a = extract32(val, 1, 11);
167     int s = extract32(val, 12, 2);
168     int i = (-(val & 1) << 13) | (im11a << 2);
169 
170     if (ctx->tb_flags & PSW_W) {
171         i ^= s << 13;
172     }
173     return i;
174 }
175 
176 /* Expander for assemble_16(s,im14). */
177 static int expand_16(DisasContext *ctx, int val)
178 {
179     /*
180      * @val is bits [0:15], containing both im14 and s.
181      * Swizzle thing around depending on PSW.W.
182      */
183     int s = extract32(val, 14, 2);
184     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
185 
186     if (ctx->tb_flags & PSW_W) {
187         i ^= s << 13;
188     }
189     return i;
190 }
191 
192 /* The sp field is only present with !PSW_W. */
193 static int sp0_if_wide(DisasContext *ctx, int sp)
194 {
195     return ctx->tb_flags & PSW_W ? 0 : sp;
196 }
197 
198 /* Translate CMPI doubleword conditions to standard. */
199 static int cmpbid_c(DisasContext *ctx, int val)
200 {
201     return val ? val : 4; /* 0 == "*<<" */
202 }
203 
204 /*
205  * In many places pa1.x did not decode the bit that later became
206  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
207  */
208 static int pa20_d(DisasContext *ctx, int val)
209 {
210     return ctx->is_pa20 & val;
211 }
212 
213 /* Include the auto-generated decoder.  */
214 #include "decode-insns.c.inc"
215 
216 /* We are not using a goto_tb (for whatever reason), but have updated
217    the iaq (for whatever reason), so don't do it again on exit.  */
218 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
219 
220 /* We are exiting the TB, but have neither emitted a goto_tb, nor
221    updated the iaq for the next instruction to be executed.  */
222 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
223 
224 /* Similarly, but we want to return to the main loop immediately
225    to recognize unmasked interrupts.  */
226 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
227 #define DISAS_EXIT                  DISAS_TARGET_3
228 
229 /* global register indexes */
230 static TCGv_i64 cpu_gr[32];
231 static TCGv_i64 cpu_sr[4];
232 static TCGv_i64 cpu_srH;
233 static TCGv_i64 cpu_iaoq_f;
234 static TCGv_i64 cpu_iaoq_b;
235 static TCGv_i64 cpu_iasq_f;
236 static TCGv_i64 cpu_iasq_b;
237 static TCGv_i64 cpu_sar;
238 static TCGv_i64 cpu_psw_n;
239 static TCGv_i64 cpu_psw_v;
240 static TCGv_i64 cpu_psw_cb;
241 static TCGv_i64 cpu_psw_cb_msb;
242 
243 void hppa_translate_init(void)
244 {
245 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
246 
247     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
248     static const GlobalVar vars[] = {
249         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
250         DEF_VAR(psw_n),
251         DEF_VAR(psw_v),
252         DEF_VAR(psw_cb),
253         DEF_VAR(psw_cb_msb),
254         DEF_VAR(iaoq_f),
255         DEF_VAR(iaoq_b),
256     };
257 
258 #undef DEF_VAR
259 
260     /* Use the symbolic register names that match the disassembler.  */
261     static const char gr_names[32][4] = {
262         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
263         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
264         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
265         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
266     };
267     /* SR[4-7] are not global registers so that we can index them.  */
268     static const char sr_names[5][4] = {
269         "sr0", "sr1", "sr2", "sr3", "srH"
270     };
271 
272     int i;
273 
274     cpu_gr[0] = NULL;
275     for (i = 1; i < 32; i++) {
276         cpu_gr[i] = tcg_global_mem_new(tcg_env,
277                                        offsetof(CPUHPPAState, gr[i]),
278                                        gr_names[i]);
279     }
280     for (i = 0; i < 4; i++) {
281         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
282                                            offsetof(CPUHPPAState, sr[i]),
283                                            sr_names[i]);
284     }
285     cpu_srH = tcg_global_mem_new_i64(tcg_env,
286                                      offsetof(CPUHPPAState, sr[4]),
287                                      sr_names[4]);
288 
289     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
290         const GlobalVar *v = &vars[i];
291         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
292     }
293 
294     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
295                                         offsetof(CPUHPPAState, iasq_f),
296                                         "iasq_f");
297     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
298                                         offsetof(CPUHPPAState, iasq_b),
299                                         "iasq_b");
300 }
301 
302 static void set_insn_breg(DisasContext *ctx, int breg)
303 {
304     assert(!ctx->insn_start_updated);
305     ctx->insn_start_updated = true;
306     tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
307 }
308 
309 static DisasCond cond_make_f(void)
310 {
311     return (DisasCond){
312         .c = TCG_COND_NEVER,
313         .a0 = NULL,
314         .a1 = NULL,
315     };
316 }
317 
318 static DisasCond cond_make_t(void)
319 {
320     return (DisasCond){
321         .c = TCG_COND_ALWAYS,
322         .a0 = NULL,
323         .a1 = NULL,
324     };
325 }
326 
327 static DisasCond cond_make_n(void)
328 {
329     return (DisasCond){
330         .c = TCG_COND_NE,
331         .a0 = cpu_psw_n,
332         .a1 = tcg_constant_i64(0)
333     };
334 }
335 
336 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
337 {
338     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
339     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
340 }
341 
342 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
343 {
344     return cond_make_tmp(c, a0, tcg_constant_i64(0));
345 }
346 
347 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
348 {
349     TCGv_i64 tmp = tcg_temp_new_i64();
350     tcg_gen_mov_i64(tmp, a0);
351     return cond_make_0_tmp(c, tmp);
352 }
353 
354 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
355 {
356     TCGv_i64 t0 = tcg_temp_new_i64();
357     TCGv_i64 t1 = tcg_temp_new_i64();
358 
359     tcg_gen_mov_i64(t0, a0);
360     tcg_gen_mov_i64(t1, a1);
361     return cond_make_tmp(c, t0, t1);
362 }
363 
364 static void cond_free(DisasCond *cond)
365 {
366     switch (cond->c) {
367     default:
368         cond->a0 = NULL;
369         cond->a1 = NULL;
370         /* fallthru */
371     case TCG_COND_ALWAYS:
372         cond->c = TCG_COND_NEVER;
373         break;
374     case TCG_COND_NEVER:
375         break;
376     }
377 }
378 
379 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
380 {
381     if (reg == 0) {
382         return ctx->zero;
383     } else {
384         return cpu_gr[reg];
385     }
386 }
387 
388 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
389 {
390     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
391         return tcg_temp_new_i64();
392     } else {
393         return cpu_gr[reg];
394     }
395 }
396 
397 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
398 {
399     if (ctx->null_cond.c != TCG_COND_NEVER) {
400         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
401                             ctx->null_cond.a1, dest, t);
402     } else {
403         tcg_gen_mov_i64(dest, t);
404     }
405 }
406 
407 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
408 {
409     if (reg != 0) {
410         save_or_nullify(ctx, cpu_gr[reg], t);
411     }
412 }
413 
414 #if HOST_BIG_ENDIAN
415 # define HI_OFS  0
416 # define LO_OFS  4
417 #else
418 # define HI_OFS  4
419 # define LO_OFS  0
420 #endif
421 
422 static TCGv_i32 load_frw_i32(unsigned rt)
423 {
424     TCGv_i32 ret = tcg_temp_new_i32();
425     tcg_gen_ld_i32(ret, tcg_env,
426                    offsetof(CPUHPPAState, fr[rt & 31])
427                    + (rt & 32 ? LO_OFS : HI_OFS));
428     return ret;
429 }
430 
431 static TCGv_i32 load_frw0_i32(unsigned rt)
432 {
433     if (rt == 0) {
434         TCGv_i32 ret = tcg_temp_new_i32();
435         tcg_gen_movi_i32(ret, 0);
436         return ret;
437     } else {
438         return load_frw_i32(rt);
439     }
440 }
441 
442 static TCGv_i64 load_frw0_i64(unsigned rt)
443 {
444     TCGv_i64 ret = tcg_temp_new_i64();
445     if (rt == 0) {
446         tcg_gen_movi_i64(ret, 0);
447     } else {
448         tcg_gen_ld32u_i64(ret, tcg_env,
449                           offsetof(CPUHPPAState, fr[rt & 31])
450                           + (rt & 32 ? LO_OFS : HI_OFS));
451     }
452     return ret;
453 }
454 
455 static void save_frw_i32(unsigned rt, TCGv_i32 val)
456 {
457     tcg_gen_st_i32(val, tcg_env,
458                    offsetof(CPUHPPAState, fr[rt & 31])
459                    + (rt & 32 ? LO_OFS : HI_OFS));
460 }
461 
462 #undef HI_OFS
463 #undef LO_OFS
464 
465 static TCGv_i64 load_frd(unsigned rt)
466 {
467     TCGv_i64 ret = tcg_temp_new_i64();
468     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
469     return ret;
470 }
471 
472 static TCGv_i64 load_frd0(unsigned rt)
473 {
474     if (rt == 0) {
475         TCGv_i64 ret = tcg_temp_new_i64();
476         tcg_gen_movi_i64(ret, 0);
477         return ret;
478     } else {
479         return load_frd(rt);
480     }
481 }
482 
483 static void save_frd(unsigned rt, TCGv_i64 val)
484 {
485     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
486 }
487 
488 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
489 {
490 #ifdef CONFIG_USER_ONLY
491     tcg_gen_movi_i64(dest, 0);
492 #else
493     if (reg < 4) {
494         tcg_gen_mov_i64(dest, cpu_sr[reg]);
495     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
496         tcg_gen_mov_i64(dest, cpu_srH);
497     } else {
498         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
499     }
500 #endif
501 }
502 
503 /* Skip over the implementation of an insn that has been nullified.
504    Use this when the insn is too complex for a conditional move.  */
505 static void nullify_over(DisasContext *ctx)
506 {
507     if (ctx->null_cond.c != TCG_COND_NEVER) {
508         /* The always condition should have been handled in the main loop.  */
509         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
510 
511         ctx->null_lab = gen_new_label();
512 
513         /* If we're using PSW[N], copy it to a temp because... */
514         if (ctx->null_cond.a0 == cpu_psw_n) {
515             ctx->null_cond.a0 = tcg_temp_new_i64();
516             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
517         }
518         /* ... we clear it before branching over the implementation,
519            so that (1) it's clear after nullifying this insn and
520            (2) if this insn nullifies the next, PSW[N] is valid.  */
521         if (ctx->psw_n_nonzero) {
522             ctx->psw_n_nonzero = false;
523             tcg_gen_movi_i64(cpu_psw_n, 0);
524         }
525 
526         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
527                            ctx->null_cond.a1, ctx->null_lab);
528         cond_free(&ctx->null_cond);
529     }
530 }
531 
532 /* Save the current nullification state to PSW[N].  */
533 static void nullify_save(DisasContext *ctx)
534 {
535     if (ctx->null_cond.c == TCG_COND_NEVER) {
536         if (ctx->psw_n_nonzero) {
537             tcg_gen_movi_i64(cpu_psw_n, 0);
538         }
539         return;
540     }
541     if (ctx->null_cond.a0 != cpu_psw_n) {
542         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
543                             ctx->null_cond.a0, ctx->null_cond.a1);
544         ctx->psw_n_nonzero = true;
545     }
546     cond_free(&ctx->null_cond);
547 }
548 
549 /* Set a PSW[N] to X.  The intention is that this is used immediately
550    before a goto_tb/exit_tb, so that there is no fallthru path to other
551    code within the TB.  Therefore we do not update psw_n_nonzero.  */
552 static void nullify_set(DisasContext *ctx, bool x)
553 {
554     if (ctx->psw_n_nonzero || x) {
555         tcg_gen_movi_i64(cpu_psw_n, x);
556     }
557 }
558 
559 /* Mark the end of an instruction that may have been nullified.
560    This is the pair to nullify_over.  Always returns true so that
561    it may be tail-called from a translate function.  */
562 static bool nullify_end(DisasContext *ctx)
563 {
564     TCGLabel *null_lab = ctx->null_lab;
565     DisasJumpType status = ctx->base.is_jmp;
566 
567     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
568        For UPDATED, we cannot update on the nullified path.  */
569     assert(status != DISAS_IAQ_N_UPDATED);
570 
571     if (likely(null_lab == NULL)) {
572         /* The current insn wasn't conditional or handled the condition
573            applied to it without a branch, so the (new) setting of
574            NULL_COND can be applied directly to the next insn.  */
575         return true;
576     }
577     ctx->null_lab = NULL;
578 
579     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
580         /* The next instruction will be unconditional,
581            and NULL_COND already reflects that.  */
582         gen_set_label(null_lab);
583     } else {
584         /* The insn that we just executed is itself nullifying the next
585            instruction.  Store the condition in the PSW[N] global.
586            We asserted PSW[N] = 0 in nullify_over, so that after the
587            label we have the proper value in place.  */
588         nullify_save(ctx);
589         gen_set_label(null_lab);
590         ctx->null_cond = cond_make_n();
591     }
592     if (status == DISAS_NORETURN) {
593         ctx->base.is_jmp = DISAS_NEXT;
594     }
595     return true;
596 }
597 
598 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
599                             uint64_t ival, TCGv_i64 vval)
600 {
601     uint64_t mask = gva_offset_mask(ctx->tb_flags);
602 
603     if (ival != -1) {
604         tcg_gen_movi_i64(dest, ival & mask);
605         return;
606     }
607     tcg_debug_assert(vval != NULL);
608 
609     /*
610      * We know that the IAOQ is already properly masked.
611      * This optimization is primarily for "iaoq_f = iaoq_b".
612      */
613     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
614         tcg_gen_mov_i64(dest, vval);
615     } else {
616         tcg_gen_andi_i64(dest, vval, mask);
617     }
618 }
619 
620 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
621 {
622     return ctx->iaoq_f + disp + 8;
623 }
624 
625 static void gen_excp_1(int exception)
626 {
627     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
628 }
629 
630 static void gen_excp(DisasContext *ctx, int exception)
631 {
632     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
633     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
634     nullify_save(ctx);
635     gen_excp_1(exception);
636     ctx->base.is_jmp = DISAS_NORETURN;
637 }
638 
639 static bool gen_excp_iir(DisasContext *ctx, int exc)
640 {
641     nullify_over(ctx);
642     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
643                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
644     gen_excp(ctx, exc);
645     return nullify_end(ctx);
646 }
647 
648 static bool gen_illegal(DisasContext *ctx)
649 {
650     return gen_excp_iir(ctx, EXCP_ILL);
651 }
652 
653 #ifdef CONFIG_USER_ONLY
654 #define CHECK_MOST_PRIVILEGED(EXCP) \
655     return gen_excp_iir(ctx, EXCP)
656 #else
657 #define CHECK_MOST_PRIVILEGED(EXCP) \
658     do {                                     \
659         if (ctx->privilege != 0) {           \
660             return gen_excp_iir(ctx, EXCP);  \
661         }                                    \
662     } while (0)
663 #endif
664 
665 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
666 {
667     return translator_use_goto_tb(&ctx->base, dest);
668 }
669 
670 /* If the next insn is to be nullified, and it's on the same page,
671    and we're not attempting to set a breakpoint on it, then we can
672    totally skip the nullified insn.  This avoids creating and
673    executing a TB that merely branches to the next TB.  */
674 static bool use_nullify_skip(DisasContext *ctx)
675 {
676     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
677             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
678 }
679 
680 static void gen_goto_tb(DisasContext *ctx, int which,
681                         uint64_t f, uint64_t b)
682 {
683     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
684         tcg_gen_goto_tb(which);
685         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
686         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
687         tcg_gen_exit_tb(ctx->base.tb, which);
688     } else {
689         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
690         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
691         tcg_gen_lookup_and_goto_ptr();
692     }
693 }
694 
695 static bool cond_need_sv(int c)
696 {
697     return c == 2 || c == 3 || c == 6;
698 }
699 
700 static bool cond_need_cb(int c)
701 {
702     return c == 4 || c == 5;
703 }
704 
705 /*
706  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
707  * the Parisc 1.1 Architecture Reference Manual for details.
708  */
709 
710 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
711                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
712 {
713     DisasCond cond;
714     TCGv_i64 tmp;
715 
716     switch (cf >> 1) {
717     case 0: /* Never / TR    (0 / 1) */
718         cond = cond_make_f();
719         break;
720     case 1: /* = / <>        (Z / !Z) */
721         if (!d) {
722             tmp = tcg_temp_new_i64();
723             tcg_gen_ext32u_i64(tmp, res);
724             res = tmp;
725         }
726         cond = cond_make_0(TCG_COND_EQ, res);
727         break;
728     case 2: /* < / >=        (N ^ V / !(N ^ V) */
729         tmp = tcg_temp_new_i64();
730         tcg_gen_xor_i64(tmp, res, sv);
731         if (!d) {
732             tcg_gen_ext32s_i64(tmp, tmp);
733         }
734         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
735         break;
736     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
737         /*
738          * Simplify:
739          *   (N ^ V) | Z
740          *   ((res < 0) ^ (sv < 0)) | !res
741          *   ((res ^ sv) < 0) | !res
742          *   (~(res ^ sv) >= 0) | !res
743          *   !(~(res ^ sv) >> 31) | !res
744          *   !(~(res ^ sv) >> 31 & res)
745          */
746         tmp = tcg_temp_new_i64();
747         tcg_gen_eqv_i64(tmp, res, sv);
748         if (!d) {
749             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
750             tcg_gen_and_i64(tmp, tmp, res);
751             tcg_gen_ext32u_i64(tmp, tmp);
752         } else {
753             tcg_gen_sari_i64(tmp, tmp, 63);
754             tcg_gen_and_i64(tmp, tmp, res);
755         }
756         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
757         break;
758     case 4: /* NUV / UV      (!UV / UV) */
759         cond = cond_make_0(TCG_COND_EQ, uv);
760         break;
761     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
762         tmp = tcg_temp_new_i64();
763         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
764         if (!d) {
765             tcg_gen_ext32u_i64(tmp, tmp);
766         }
767         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
768         break;
769     case 6: /* SV / NSV      (V / !V) */
770         if (!d) {
771             tmp = tcg_temp_new_i64();
772             tcg_gen_ext32s_i64(tmp, sv);
773             sv = tmp;
774         }
775         cond = cond_make_0(TCG_COND_LT, sv);
776         break;
777     case 7: /* OD / EV */
778         tmp = tcg_temp_new_i64();
779         tcg_gen_andi_i64(tmp, res, 1);
780         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
781         break;
782     default:
783         g_assert_not_reached();
784     }
785     if (cf & 1) {
786         cond.c = tcg_invert_cond(cond.c);
787     }
788 
789     return cond;
790 }
791 
792 /* Similar, but for the special case of subtraction without borrow, we
793    can use the inputs directly.  This can allow other computation to be
794    deleted as unused.  */
795 
796 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
797                              TCGv_i64 res, TCGv_i64 in1,
798                              TCGv_i64 in2, TCGv_i64 sv)
799 {
800     TCGCond tc;
801     bool ext_uns;
802 
803     switch (cf >> 1) {
804     case 1: /* = / <> */
805         tc = TCG_COND_EQ;
806         ext_uns = true;
807         break;
808     case 2: /* < / >= */
809         tc = TCG_COND_LT;
810         ext_uns = false;
811         break;
812     case 3: /* <= / > */
813         tc = TCG_COND_LE;
814         ext_uns = false;
815         break;
816     case 4: /* << / >>= */
817         tc = TCG_COND_LTU;
818         ext_uns = true;
819         break;
820     case 5: /* <<= / >> */
821         tc = TCG_COND_LEU;
822         ext_uns = true;
823         break;
824     default:
825         return do_cond(ctx, cf, d, res, NULL, sv);
826     }
827 
828     if (cf & 1) {
829         tc = tcg_invert_cond(tc);
830     }
831     if (!d) {
832         TCGv_i64 t1 = tcg_temp_new_i64();
833         TCGv_i64 t2 = tcg_temp_new_i64();
834 
835         if (ext_uns) {
836             tcg_gen_ext32u_i64(t1, in1);
837             tcg_gen_ext32u_i64(t2, in2);
838         } else {
839             tcg_gen_ext32s_i64(t1, in1);
840             tcg_gen_ext32s_i64(t2, in2);
841         }
842         return cond_make_tmp(tc, t1, t2);
843     }
844     return cond_make(tc, in1, in2);
845 }
846 
847 /*
848  * Similar, but for logicals, where the carry and overflow bits are not
849  * computed, and use of them is undefined.
850  *
851  * Undefined or not, hardware does not trap.  It seems reasonable to
852  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
853  * how cases c={2,3} are treated.
854  */
855 
856 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
857                              TCGv_i64 res)
858 {
859     TCGCond tc;
860     bool ext_uns;
861 
862     switch (cf) {
863     case 0:  /* never */
864     case 9:  /* undef, C */
865     case 11: /* undef, C & !Z */
866     case 12: /* undef, V */
867         return cond_make_f();
868 
869     case 1:  /* true */
870     case 8:  /* undef, !C */
871     case 10: /* undef, !C | Z */
872     case 13: /* undef, !V */
873         return cond_make_t();
874 
875     case 2:  /* == */
876         tc = TCG_COND_EQ;
877         ext_uns = true;
878         break;
879     case 3:  /* <> */
880         tc = TCG_COND_NE;
881         ext_uns = true;
882         break;
883     case 4:  /* < */
884         tc = TCG_COND_LT;
885         ext_uns = false;
886         break;
887     case 5:  /* >= */
888         tc = TCG_COND_GE;
889         ext_uns = false;
890         break;
891     case 6:  /* <= */
892         tc = TCG_COND_LE;
893         ext_uns = false;
894         break;
895     case 7:  /* > */
896         tc = TCG_COND_GT;
897         ext_uns = false;
898         break;
899 
900     case 14: /* OD */
901     case 15: /* EV */
902         return do_cond(ctx, cf, d, res, NULL, NULL);
903 
904     default:
905         g_assert_not_reached();
906     }
907 
908     if (!d) {
909         TCGv_i64 tmp = tcg_temp_new_i64();
910 
911         if (ext_uns) {
912             tcg_gen_ext32u_i64(tmp, res);
913         } else {
914             tcg_gen_ext32s_i64(tmp, res);
915         }
916         return cond_make_0_tmp(tc, tmp);
917     }
918     return cond_make_0(tc, res);
919 }
920 
921 /* Similar, but for shift/extract/deposit conditions.  */
922 
923 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
924                              TCGv_i64 res)
925 {
926     unsigned c, f;
927 
928     /* Convert the compressed condition codes to standard.
929        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
930        4-7 are the reverse of 0-3.  */
931     c = orig & 3;
932     if (c == 3) {
933         c = 7;
934     }
935     f = (orig & 4) / 4;
936 
937     return do_log_cond(ctx, c * 2 + f, d, res);
938 }
939 
940 /* Similar, but for unit zero conditions.  */
941 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
942 {
943     TCGv_i64 tmp;
944     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
945     uint64_t ones = 0, sgns = 0;
946 
947     switch (cf >> 1) {
948     case 1: /* SBW / NBW */
949         if (d) {
950             ones = d_repl;
951             sgns = d_repl << 31;
952         }
953         break;
954     case 2: /* SBZ / NBZ */
955         ones = d_repl * 0x01010101u;
956         sgns = ones << 7;
957         break;
958     case 3: /* SHZ / NHZ */
959         ones = d_repl * 0x00010001u;
960         sgns = ones << 15;
961         break;
962     }
963     if (ones == 0) {
964         /* Undefined, or 0/1 (never/always). */
965         return cf & 1 ? cond_make_t() : cond_make_f();
966     }
967 
968     /*
969      * See hasless(v,1) from
970      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
971      */
972     tmp = tcg_temp_new_i64();
973     tcg_gen_subi_i64(tmp, res, ones);
974     tcg_gen_andc_i64(tmp, tmp, res);
975     tcg_gen_andi_i64(tmp, tmp, sgns);
976 
977     return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp);
978 }
979 
980 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
981                           TCGv_i64 cb, TCGv_i64 cb_msb)
982 {
983     if (!d) {
984         TCGv_i64 t = tcg_temp_new_i64();
985         tcg_gen_extract_i64(t, cb, 32, 1);
986         return t;
987     }
988     return cb_msb;
989 }
990 
991 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
992 {
993     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
994 }
995 
996 /* Compute signed overflow for addition.  */
997 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
998                           TCGv_i64 in1, TCGv_i64 in2,
999                           TCGv_i64 orig_in1, int shift, bool d)
1000 {
1001     TCGv_i64 sv = tcg_temp_new_i64();
1002     TCGv_i64 tmp = tcg_temp_new_i64();
1003 
1004     tcg_gen_xor_i64(sv, res, in1);
1005     tcg_gen_xor_i64(tmp, in1, in2);
1006     tcg_gen_andc_i64(sv, sv, tmp);
1007 
1008     switch (shift) {
1009     case 0:
1010         break;
1011     case 1:
1012         /* Shift left by one and compare the sign. */
1013         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1014         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1015         /* Incorporate into the overflow. */
1016         tcg_gen_or_i64(sv, sv, tmp);
1017         break;
1018     default:
1019         {
1020             int sign_bit = d ? 63 : 31;
1021 
1022             /* Compare the sign against all lower bits. */
1023             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1024             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1025             /*
1026              * If one of the bits shifting into or through the sign
1027              * differs, then we have overflow.
1028              */
1029             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1030             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1031                                 tcg_constant_i64(-1), sv);
1032         }
1033     }
1034     return sv;
1035 }
1036 
1037 /* Compute unsigned overflow for addition.  */
1038 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1039                           TCGv_i64 in1, int shift, bool d)
1040 {
1041     if (shift == 0) {
1042         return get_carry(ctx, d, cb, cb_msb);
1043     } else {
1044         TCGv_i64 tmp = tcg_temp_new_i64();
1045         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1046         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1047         return tmp;
1048     }
1049 }
1050 
1051 /* Compute signed overflow for subtraction.  */
1052 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1053                           TCGv_i64 in1, TCGv_i64 in2)
1054 {
1055     TCGv_i64 sv = tcg_temp_new_i64();
1056     TCGv_i64 tmp = tcg_temp_new_i64();
1057 
1058     tcg_gen_xor_i64(sv, res, in1);
1059     tcg_gen_xor_i64(tmp, in1, in2);
1060     tcg_gen_and_i64(sv, sv, tmp);
1061 
1062     return sv;
1063 }
1064 
1065 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1066                    TCGv_i64 in2, unsigned shift, bool is_l,
1067                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1068 {
1069     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1070     unsigned c = cf >> 1;
1071     DisasCond cond;
1072 
1073     dest = tcg_temp_new_i64();
1074     cb = NULL;
1075     cb_msb = NULL;
1076 
1077     in1 = orig_in1;
1078     if (shift) {
1079         tmp = tcg_temp_new_i64();
1080         tcg_gen_shli_i64(tmp, in1, shift);
1081         in1 = tmp;
1082     }
1083 
1084     if (!is_l || cond_need_cb(c)) {
1085         cb_msb = tcg_temp_new_i64();
1086         cb = tcg_temp_new_i64();
1087 
1088         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1089         if (is_c) {
1090             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1091                              get_psw_carry(ctx, d), ctx->zero);
1092         }
1093         tcg_gen_xor_i64(cb, in1, in2);
1094         tcg_gen_xor_i64(cb, cb, dest);
1095     } else {
1096         tcg_gen_add_i64(dest, in1, in2);
1097         if (is_c) {
1098             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1099         }
1100     }
1101 
1102     /* Compute signed overflow if required.  */
1103     sv = NULL;
1104     if (is_tsv || cond_need_sv(c)) {
1105         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1106         if (is_tsv) {
1107             if (!d) {
1108                 tcg_gen_ext32s_i64(sv, sv);
1109             }
1110             gen_helper_tsv(tcg_env, sv);
1111         }
1112     }
1113 
1114     /* Compute unsigned overflow if required.  */
1115     uv = NULL;
1116     if (cond_need_cb(c)) {
1117         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1118     }
1119 
1120     /* Emit any conditional trap before any writeback.  */
1121     cond = do_cond(ctx, cf, d, dest, uv, sv);
1122     if (is_tc) {
1123         tmp = tcg_temp_new_i64();
1124         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1125         gen_helper_tcond(tcg_env, tmp);
1126     }
1127 
1128     /* Write back the result.  */
1129     if (!is_l) {
1130         save_or_nullify(ctx, cpu_psw_cb, cb);
1131         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1132     }
1133     save_gpr(ctx, rt, dest);
1134 
1135     /* Install the new nullification.  */
1136     cond_free(&ctx->null_cond);
1137     ctx->null_cond = cond;
1138 }
1139 
1140 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1141                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1142 {
1143     TCGv_i64 tcg_r1, tcg_r2;
1144 
1145     if (a->cf) {
1146         nullify_over(ctx);
1147     }
1148     tcg_r1 = load_gpr(ctx, a->r1);
1149     tcg_r2 = load_gpr(ctx, a->r2);
1150     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1151            is_tsv, is_tc, is_c, a->cf, a->d);
1152     return nullify_end(ctx);
1153 }
1154 
1155 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1156                        bool is_tsv, bool is_tc)
1157 {
1158     TCGv_i64 tcg_im, tcg_r2;
1159 
1160     if (a->cf) {
1161         nullify_over(ctx);
1162     }
1163     tcg_im = tcg_constant_i64(a->i);
1164     tcg_r2 = load_gpr(ctx, a->r);
1165     /* All ADDI conditions are 32-bit. */
1166     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1167     return nullify_end(ctx);
1168 }
1169 
1170 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1171                    TCGv_i64 in2, bool is_tsv, bool is_b,
1172                    bool is_tc, unsigned cf, bool d)
1173 {
1174     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1175     unsigned c = cf >> 1;
1176     DisasCond cond;
1177 
1178     dest = tcg_temp_new_i64();
1179     cb = tcg_temp_new_i64();
1180     cb_msb = tcg_temp_new_i64();
1181 
1182     if (is_b) {
1183         /* DEST,C = IN1 + ~IN2 + C.  */
1184         tcg_gen_not_i64(cb, in2);
1185         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1186                          get_psw_carry(ctx, d), ctx->zero);
1187         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1188         tcg_gen_xor_i64(cb, cb, in1);
1189         tcg_gen_xor_i64(cb, cb, dest);
1190     } else {
1191         /*
1192          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1193          * operations by seeding the high word with 1 and subtracting.
1194          */
1195         TCGv_i64 one = tcg_constant_i64(1);
1196         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1197         tcg_gen_eqv_i64(cb, in1, in2);
1198         tcg_gen_xor_i64(cb, cb, dest);
1199     }
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (is_tsv || cond_need_sv(c)) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205         if (is_tsv) {
1206             if (!d) {
1207                 tcg_gen_ext32s_i64(sv, sv);
1208             }
1209             gen_helper_tsv(tcg_env, sv);
1210         }
1211     }
1212 
1213     /* Compute the condition.  We cannot use the special case for borrow.  */
1214     if (!is_b) {
1215         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1216     } else {
1217         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1218     }
1219 
1220     /* Emit any conditional trap before any writeback.  */
1221     if (is_tc) {
1222         tmp = tcg_temp_new_i64();
1223         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1224         gen_helper_tcond(tcg_env, tmp);
1225     }
1226 
1227     /* Write back the result.  */
1228     save_or_nullify(ctx, cpu_psw_cb, cb);
1229     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1230     save_gpr(ctx, rt, dest);
1231 
1232     /* Install the new nullification.  */
1233     cond_free(&ctx->null_cond);
1234     ctx->null_cond = cond;
1235 }
1236 
1237 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1238                        bool is_tsv, bool is_b, bool is_tc)
1239 {
1240     TCGv_i64 tcg_r1, tcg_r2;
1241 
1242     if (a->cf) {
1243         nullify_over(ctx);
1244     }
1245     tcg_r1 = load_gpr(ctx, a->r1);
1246     tcg_r2 = load_gpr(ctx, a->r2);
1247     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1248     return nullify_end(ctx);
1249 }
1250 
1251 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1252 {
1253     TCGv_i64 tcg_im, tcg_r2;
1254 
1255     if (a->cf) {
1256         nullify_over(ctx);
1257     }
1258     tcg_im = tcg_constant_i64(a->i);
1259     tcg_r2 = load_gpr(ctx, a->r);
1260     /* All SUBI conditions are 32-bit. */
1261     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1262     return nullify_end(ctx);
1263 }
1264 
1265 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1266                       TCGv_i64 in2, unsigned cf, bool d)
1267 {
1268     TCGv_i64 dest, sv;
1269     DisasCond cond;
1270 
1271     dest = tcg_temp_new_i64();
1272     tcg_gen_sub_i64(dest, in1, in2);
1273 
1274     /* Compute signed overflow if required.  */
1275     sv = NULL;
1276     if (cond_need_sv(cf >> 1)) {
1277         sv = do_sub_sv(ctx, dest, in1, in2);
1278     }
1279 
1280     /* Form the condition for the compare.  */
1281     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1282 
1283     /* Clear.  */
1284     tcg_gen_movi_i64(dest, 0);
1285     save_gpr(ctx, rt, dest);
1286 
1287     /* Install the new nullification.  */
1288     cond_free(&ctx->null_cond);
1289     ctx->null_cond = cond;
1290 }
1291 
1292 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1293                    TCGv_i64 in2, unsigned cf, bool d,
1294                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1295 {
1296     TCGv_i64 dest = dest_gpr(ctx, rt);
1297 
1298     /* Perform the operation, and writeback.  */
1299     fn(dest, in1, in2);
1300     save_gpr(ctx, rt, dest);
1301 
1302     /* Install the new nullification.  */
1303     cond_free(&ctx->null_cond);
1304     if (cf) {
1305         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1306     }
1307 }
1308 
1309 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1310                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1311 {
1312     TCGv_i64 tcg_r1, tcg_r2;
1313 
1314     if (a->cf) {
1315         nullify_over(ctx);
1316     }
1317     tcg_r1 = load_gpr(ctx, a->r1);
1318     tcg_r2 = load_gpr(ctx, a->r2);
1319     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1320     return nullify_end(ctx);
1321 }
1322 
1323 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1324                            TCGv_i64 in2, unsigned cf, bool d,
1325                            bool is_tc, bool is_add)
1326 {
1327     TCGv_i64 dest = tcg_temp_new_i64();
1328     uint64_t test_cb = 0;
1329     DisasCond cond;
1330 
1331     /* Select which carry-out bits to test. */
1332     switch (cf >> 1) {
1333     case 4: /* NDC / SDC -- 4-bit carries */
1334         test_cb = dup_const(MO_8, 0x88);
1335         break;
1336     case 5: /* NWC / SWC -- 32-bit carries */
1337         if (d) {
1338             test_cb = dup_const(MO_32, INT32_MIN);
1339         } else {
1340             cf &= 1; /* undefined -- map to never/always */
1341         }
1342         break;
1343     case 6: /* NBC / SBC -- 8-bit carries */
1344         test_cb = dup_const(MO_8, INT8_MIN);
1345         break;
1346     case 7: /* NHC / SHC -- 16-bit carries */
1347         test_cb = dup_const(MO_16, INT16_MIN);
1348         break;
1349     }
1350     if (!d) {
1351         test_cb = (uint32_t)test_cb;
1352     }
1353 
1354     if (!test_cb) {
1355         /* No need to compute carries if we don't need to test them. */
1356         if (is_add) {
1357             tcg_gen_add_i64(dest, in1, in2);
1358         } else {
1359             tcg_gen_sub_i64(dest, in1, in2);
1360         }
1361         cond = do_unit_zero_cond(cf, d, dest);
1362     } else {
1363         TCGv_i64 cb = tcg_temp_new_i64();
1364 
1365         if (d) {
1366             TCGv_i64 cb_msb = tcg_temp_new_i64();
1367             if (is_add) {
1368                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1369                 tcg_gen_xor_i64(cb, in1, in2);
1370             } else {
1371                 /* See do_sub, !is_b. */
1372                 TCGv_i64 one = tcg_constant_i64(1);
1373                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1374                 tcg_gen_eqv_i64(cb, in1, in2);
1375             }
1376             tcg_gen_xor_i64(cb, cb, dest);
1377             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1378         } else {
1379             if (is_add) {
1380                 tcg_gen_add_i64(dest, in1, in2);
1381                 tcg_gen_xor_i64(cb, in1, in2);
1382             } else {
1383                 tcg_gen_sub_i64(dest, in1, in2);
1384                 tcg_gen_eqv_i64(cb, in1, in2);
1385             }
1386             tcg_gen_xor_i64(cb, cb, dest);
1387             tcg_gen_shri_i64(cb, cb, 1);
1388         }
1389 
1390         tcg_gen_andi_i64(cb, cb, test_cb);
1391         cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb);
1392     }
1393 
1394     if (is_tc) {
1395         TCGv_i64 tmp = tcg_temp_new_i64();
1396         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1397         gen_helper_tcond(tcg_env, tmp);
1398     }
1399     save_gpr(ctx, rt, dest);
1400 
1401     cond_free(&ctx->null_cond);
1402     ctx->null_cond = cond;
1403 }
1404 
1405 #ifndef CONFIG_USER_ONLY
1406 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1407    from the top 2 bits of the base register.  There are a few system
1408    instructions that have a 3-bit space specifier, for which SR0 is
1409    not special.  To handle this, pass ~SP.  */
1410 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1411 {
1412     TCGv_ptr ptr;
1413     TCGv_i64 tmp;
1414     TCGv_i64 spc;
1415 
1416     if (sp != 0) {
1417         if (sp < 0) {
1418             sp = ~sp;
1419         }
1420         spc = tcg_temp_new_i64();
1421         load_spr(ctx, spc, sp);
1422         return spc;
1423     }
1424     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1425         return cpu_srH;
1426     }
1427 
1428     ptr = tcg_temp_new_ptr();
1429     tmp = tcg_temp_new_i64();
1430     spc = tcg_temp_new_i64();
1431 
1432     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1433     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1434     tcg_gen_andi_i64(tmp, tmp, 030);
1435     tcg_gen_trunc_i64_ptr(ptr, tmp);
1436 
1437     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1438     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1439 
1440     return spc;
1441 }
1442 #endif
1443 
1444 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1445                      unsigned rb, unsigned rx, int scale, int64_t disp,
1446                      unsigned sp, int modify, bool is_phys)
1447 {
1448     TCGv_i64 base = load_gpr(ctx, rb);
1449     TCGv_i64 ofs;
1450     TCGv_i64 addr;
1451 
1452     set_insn_breg(ctx, rb);
1453 
1454     /* Note that RX is mutually exclusive with DISP.  */
1455     if (rx) {
1456         ofs = tcg_temp_new_i64();
1457         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1458         tcg_gen_add_i64(ofs, ofs, base);
1459     } else if (disp || modify) {
1460         ofs = tcg_temp_new_i64();
1461         tcg_gen_addi_i64(ofs, base, disp);
1462     } else {
1463         ofs = base;
1464     }
1465 
1466     *pofs = ofs;
1467     *pgva = addr = tcg_temp_new_i64();
1468     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1469                      gva_offset_mask(ctx->tb_flags));
1470 #ifndef CONFIG_USER_ONLY
1471     if (!is_phys) {
1472         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1473     }
1474 #endif
1475 }
1476 
1477 /* Emit a memory load.  The modify parameter should be
1478  * < 0 for pre-modify,
1479  * > 0 for post-modify,
1480  * = 0 for no base register update.
1481  */
1482 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1483                        unsigned rx, int scale, int64_t disp,
1484                        unsigned sp, int modify, MemOp mop)
1485 {
1486     TCGv_i64 ofs;
1487     TCGv_i64 addr;
1488 
1489     /* Caller uses nullify_over/nullify_end.  */
1490     assert(ctx->null_cond.c == TCG_COND_NEVER);
1491 
1492     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1493              MMU_DISABLED(ctx));
1494     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1495     if (modify) {
1496         save_gpr(ctx, rb, ofs);
1497     }
1498 }
1499 
1500 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1501                        unsigned rx, int scale, int64_t disp,
1502                        unsigned sp, int modify, MemOp mop)
1503 {
1504     TCGv_i64 ofs;
1505     TCGv_i64 addr;
1506 
1507     /* Caller uses nullify_over/nullify_end.  */
1508     assert(ctx->null_cond.c == TCG_COND_NEVER);
1509 
1510     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1511              MMU_DISABLED(ctx));
1512     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1513     if (modify) {
1514         save_gpr(ctx, rb, ofs);
1515     }
1516 }
1517 
1518 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1519                         unsigned rx, int scale, int64_t disp,
1520                         unsigned sp, int modify, MemOp mop)
1521 {
1522     TCGv_i64 ofs;
1523     TCGv_i64 addr;
1524 
1525     /* Caller uses nullify_over/nullify_end.  */
1526     assert(ctx->null_cond.c == TCG_COND_NEVER);
1527 
1528     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1529              MMU_DISABLED(ctx));
1530     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1531     if (modify) {
1532         save_gpr(ctx, rb, ofs);
1533     }
1534 }
1535 
1536 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1537                         unsigned rx, int scale, int64_t disp,
1538                         unsigned sp, int modify, MemOp mop)
1539 {
1540     TCGv_i64 ofs;
1541     TCGv_i64 addr;
1542 
1543     /* Caller uses nullify_over/nullify_end.  */
1544     assert(ctx->null_cond.c == TCG_COND_NEVER);
1545 
1546     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1547              MMU_DISABLED(ctx));
1548     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1549     if (modify) {
1550         save_gpr(ctx, rb, ofs);
1551     }
1552 }
1553 
1554 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1555                     unsigned rx, int scale, int64_t disp,
1556                     unsigned sp, int modify, MemOp mop)
1557 {
1558     TCGv_i64 dest;
1559 
1560     nullify_over(ctx);
1561 
1562     if (modify == 0) {
1563         /* No base register update.  */
1564         dest = dest_gpr(ctx, rt);
1565     } else {
1566         /* Make sure if RT == RB, we see the result of the load.  */
1567         dest = tcg_temp_new_i64();
1568     }
1569     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1570     save_gpr(ctx, rt, dest);
1571 
1572     return nullify_end(ctx);
1573 }
1574 
1575 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1576                       unsigned rx, int scale, int64_t disp,
1577                       unsigned sp, int modify)
1578 {
1579     TCGv_i32 tmp;
1580 
1581     nullify_over(ctx);
1582 
1583     tmp = tcg_temp_new_i32();
1584     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1585     save_frw_i32(rt, tmp);
1586 
1587     if (rt == 0) {
1588         gen_helper_loaded_fr0(tcg_env);
1589     }
1590 
1591     return nullify_end(ctx);
1592 }
1593 
1594 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1595 {
1596     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1597                      a->disp, a->sp, a->m);
1598 }
1599 
1600 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1601                       unsigned rx, int scale, int64_t disp,
1602                       unsigned sp, int modify)
1603 {
1604     TCGv_i64 tmp;
1605 
1606     nullify_over(ctx);
1607 
1608     tmp = tcg_temp_new_i64();
1609     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1610     save_frd(rt, tmp);
1611 
1612     if (rt == 0) {
1613         gen_helper_loaded_fr0(tcg_env);
1614     }
1615 
1616     return nullify_end(ctx);
1617 }
1618 
1619 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1620 {
1621     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1622                      a->disp, a->sp, a->m);
1623 }
1624 
1625 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1626                      int64_t disp, unsigned sp,
1627                      int modify, MemOp mop)
1628 {
1629     nullify_over(ctx);
1630     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1631     return nullify_end(ctx);
1632 }
1633 
1634 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1635                        unsigned rx, int scale, int64_t disp,
1636                        unsigned sp, int modify)
1637 {
1638     TCGv_i32 tmp;
1639 
1640     nullify_over(ctx);
1641 
1642     tmp = load_frw_i32(rt);
1643     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1644 
1645     return nullify_end(ctx);
1646 }
1647 
1648 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1649 {
1650     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1651                       a->disp, a->sp, a->m);
1652 }
1653 
1654 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1655                        unsigned rx, int scale, int64_t disp,
1656                        unsigned sp, int modify)
1657 {
1658     TCGv_i64 tmp;
1659 
1660     nullify_over(ctx);
1661 
1662     tmp = load_frd(rt);
1663     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1664 
1665     return nullify_end(ctx);
1666 }
1667 
1668 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1669 {
1670     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1671                       a->disp, a->sp, a->m);
1672 }
1673 
1674 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1675                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1676 {
1677     TCGv_i32 tmp;
1678 
1679     nullify_over(ctx);
1680     tmp = load_frw0_i32(ra);
1681 
1682     func(tmp, tcg_env, tmp);
1683 
1684     save_frw_i32(rt, tmp);
1685     return nullify_end(ctx);
1686 }
1687 
1688 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1689                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1690 {
1691     TCGv_i32 dst;
1692     TCGv_i64 src;
1693 
1694     nullify_over(ctx);
1695     src = load_frd(ra);
1696     dst = tcg_temp_new_i32();
1697 
1698     func(dst, tcg_env, src);
1699 
1700     save_frw_i32(rt, dst);
1701     return nullify_end(ctx);
1702 }
1703 
1704 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1705                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1706 {
1707     TCGv_i64 tmp;
1708 
1709     nullify_over(ctx);
1710     tmp = load_frd0(ra);
1711 
1712     func(tmp, tcg_env, tmp);
1713 
1714     save_frd(rt, tmp);
1715     return nullify_end(ctx);
1716 }
1717 
1718 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1719                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1720 {
1721     TCGv_i32 src;
1722     TCGv_i64 dst;
1723 
1724     nullify_over(ctx);
1725     src = load_frw0_i32(ra);
1726     dst = tcg_temp_new_i64();
1727 
1728     func(dst, tcg_env, src);
1729 
1730     save_frd(rt, dst);
1731     return nullify_end(ctx);
1732 }
1733 
1734 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1735                         unsigned ra, unsigned rb,
1736                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1737 {
1738     TCGv_i32 a, b;
1739 
1740     nullify_over(ctx);
1741     a = load_frw0_i32(ra);
1742     b = load_frw0_i32(rb);
1743 
1744     func(a, tcg_env, a, b);
1745 
1746     save_frw_i32(rt, a);
1747     return nullify_end(ctx);
1748 }
1749 
1750 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1751                         unsigned ra, unsigned rb,
1752                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1753 {
1754     TCGv_i64 a, b;
1755 
1756     nullify_over(ctx);
1757     a = load_frd0(ra);
1758     b = load_frd0(rb);
1759 
1760     func(a, tcg_env, a, b);
1761 
1762     save_frd(rt, a);
1763     return nullify_end(ctx);
1764 }
1765 
1766 /* Emit an unconditional branch to a direct target, which may or may not
1767    have already had nullification handled.  */
1768 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1769                        unsigned link, bool is_n)
1770 {
1771     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1772         if (link != 0) {
1773             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1774         }
1775         ctx->iaoq_n = dest;
1776         if (is_n) {
1777             ctx->null_cond.c = TCG_COND_ALWAYS;
1778         }
1779     } else {
1780         nullify_over(ctx);
1781 
1782         if (link != 0) {
1783             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1784         }
1785 
1786         if (is_n && use_nullify_skip(ctx)) {
1787             nullify_set(ctx, 0);
1788             gen_goto_tb(ctx, 0, dest, dest + 4);
1789         } else {
1790             nullify_set(ctx, is_n);
1791             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1792         }
1793 
1794         nullify_end(ctx);
1795 
1796         nullify_set(ctx, 0);
1797         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1798         ctx->base.is_jmp = DISAS_NORETURN;
1799     }
1800     return true;
1801 }
1802 
1803 /* Emit a conditional branch to a direct target.  If the branch itself
1804    is nullified, we should have already used nullify_over.  */
1805 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1806                        DisasCond *cond)
1807 {
1808     uint64_t dest = iaoq_dest(ctx, disp);
1809     TCGLabel *taken = NULL;
1810     TCGCond c = cond->c;
1811     bool n;
1812 
1813     assert(ctx->null_cond.c == TCG_COND_NEVER);
1814 
1815     /* Handle TRUE and NEVER as direct branches.  */
1816     if (c == TCG_COND_ALWAYS) {
1817         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1818     }
1819     if (c == TCG_COND_NEVER) {
1820         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1821     }
1822 
1823     taken = gen_new_label();
1824     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1825     cond_free(cond);
1826 
1827     /* Not taken: Condition not satisfied; nullify on backward branches. */
1828     n = is_n && disp < 0;
1829     if (n && use_nullify_skip(ctx)) {
1830         nullify_set(ctx, 0);
1831         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1832     } else {
1833         if (!n && ctx->null_lab) {
1834             gen_set_label(ctx->null_lab);
1835             ctx->null_lab = NULL;
1836         }
1837         nullify_set(ctx, n);
1838         if (ctx->iaoq_n == -1) {
1839             /* The temporary iaoq_n_var died at the branch above.
1840                Regenerate it here instead of saving it.  */
1841             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1842         }
1843         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1844     }
1845 
1846     gen_set_label(taken);
1847 
1848     /* Taken: Condition satisfied; nullify on forward branches.  */
1849     n = is_n && disp >= 0;
1850     if (n && use_nullify_skip(ctx)) {
1851         nullify_set(ctx, 0);
1852         gen_goto_tb(ctx, 1, dest, dest + 4);
1853     } else {
1854         nullify_set(ctx, n);
1855         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1856     }
1857 
1858     /* Not taken: the branch itself was nullified.  */
1859     if (ctx->null_lab) {
1860         gen_set_label(ctx->null_lab);
1861         ctx->null_lab = NULL;
1862         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1863     } else {
1864         ctx->base.is_jmp = DISAS_NORETURN;
1865     }
1866     return true;
1867 }
1868 
1869 /* Emit an unconditional branch to an indirect target.  This handles
1870    nullification of the branch itself.  */
1871 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1872                        unsigned link, bool is_n)
1873 {
1874     TCGv_i64 a0, a1, next, tmp;
1875     TCGCond c;
1876 
1877     assert(ctx->null_lab == NULL);
1878 
1879     if (ctx->null_cond.c == TCG_COND_NEVER) {
1880         if (link != 0) {
1881             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1882         }
1883         next = tcg_temp_new_i64();
1884         tcg_gen_mov_i64(next, dest);
1885         if (is_n) {
1886             if (use_nullify_skip(ctx)) {
1887                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1888                 tcg_gen_addi_i64(next, next, 4);
1889                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1890                 nullify_set(ctx, 0);
1891                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1892                 return true;
1893             }
1894             ctx->null_cond.c = TCG_COND_ALWAYS;
1895         }
1896         ctx->iaoq_n = -1;
1897         ctx->iaoq_n_var = next;
1898     } else if (is_n && use_nullify_skip(ctx)) {
1899         /* The (conditional) branch, B, nullifies the next insn, N,
1900            and we're allowed to skip execution N (no single-step or
1901            tracepoint in effect).  Since the goto_ptr that we must use
1902            for the indirect branch consumes no special resources, we
1903            can (conditionally) skip B and continue execution.  */
1904         /* The use_nullify_skip test implies we have a known control path.  */
1905         tcg_debug_assert(ctx->iaoq_b != -1);
1906         tcg_debug_assert(ctx->iaoq_n != -1);
1907 
1908         /* We do have to handle the non-local temporary, DEST, before
1909            branching.  Since IOAQ_F is not really live at this point, we
1910            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1911         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1912         next = tcg_temp_new_i64();
1913         tcg_gen_addi_i64(next, dest, 4);
1914         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1915 
1916         nullify_over(ctx);
1917         if (link != 0) {
1918             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1919         }
1920         tcg_gen_lookup_and_goto_ptr();
1921         return nullify_end(ctx);
1922     } else {
1923         c = ctx->null_cond.c;
1924         a0 = ctx->null_cond.a0;
1925         a1 = ctx->null_cond.a1;
1926 
1927         tmp = tcg_temp_new_i64();
1928         next = tcg_temp_new_i64();
1929 
1930         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1931         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1932         ctx->iaoq_n = -1;
1933         ctx->iaoq_n_var = next;
1934 
1935         if (link != 0) {
1936             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1937         }
1938 
1939         if (is_n) {
1940             /* The branch nullifies the next insn, which means the state of N
1941                after the branch is the inverse of the state of N that applied
1942                to the branch.  */
1943             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1944             cond_free(&ctx->null_cond);
1945             ctx->null_cond = cond_make_n();
1946             ctx->psw_n_nonzero = true;
1947         } else {
1948             cond_free(&ctx->null_cond);
1949         }
1950     }
1951     return true;
1952 }
1953 
1954 /* Implement
1955  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1956  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1957  *    else
1958  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1959  * which keeps the privilege level from being increased.
1960  */
1961 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1962 {
1963     TCGv_i64 dest;
1964     switch (ctx->privilege) {
1965     case 0:
1966         /* Privilege 0 is maximum and is allowed to decrease.  */
1967         return offset;
1968     case 3:
1969         /* Privilege 3 is minimum and is never allowed to increase.  */
1970         dest = tcg_temp_new_i64();
1971         tcg_gen_ori_i64(dest, offset, 3);
1972         break;
1973     default:
1974         dest = tcg_temp_new_i64();
1975         tcg_gen_andi_i64(dest, offset, -4);
1976         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1977         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1978         break;
1979     }
1980     return dest;
1981 }
1982 
1983 #ifdef CONFIG_USER_ONLY
1984 /* On Linux, page zero is normally marked execute only + gateway.
1985    Therefore normal read or write is supposed to fail, but specific
1986    offsets have kernel code mapped to raise permissions to implement
1987    system calls.  Handling this via an explicit check here, rather
1988    in than the "be disp(sr2,r0)" instruction that probably sent us
1989    here, is the easiest way to handle the branch delay slot on the
1990    aforementioned BE.  */
1991 static void do_page_zero(DisasContext *ctx)
1992 {
1993     TCGv_i64 tmp;
1994 
1995     /* If by some means we get here with PSW[N]=1, that implies that
1996        the B,GATE instruction would be skipped, and we'd fault on the
1997        next insn within the privileged page.  */
1998     switch (ctx->null_cond.c) {
1999     case TCG_COND_NEVER:
2000         break;
2001     case TCG_COND_ALWAYS:
2002         tcg_gen_movi_i64(cpu_psw_n, 0);
2003         goto do_sigill;
2004     default:
2005         /* Since this is always the first (and only) insn within the
2006            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2007         g_assert_not_reached();
2008     }
2009 
2010     /* Check that we didn't arrive here via some means that allowed
2011        non-sequential instruction execution.  Normally the PSW[B] bit
2012        detects this by disallowing the B,GATE instruction to execute
2013        under such conditions.  */
2014     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2015         goto do_sigill;
2016     }
2017 
2018     switch (ctx->iaoq_f & -4) {
2019     case 0x00: /* Null pointer call */
2020         gen_excp_1(EXCP_IMP);
2021         ctx->base.is_jmp = DISAS_NORETURN;
2022         break;
2023 
2024     case 0xb0: /* LWS */
2025         gen_excp_1(EXCP_SYSCALL_LWS);
2026         ctx->base.is_jmp = DISAS_NORETURN;
2027         break;
2028 
2029     case 0xe0: /* SET_THREAD_POINTER */
2030         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2031         tmp = tcg_temp_new_i64();
2032         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
2033         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2034         tcg_gen_addi_i64(tmp, tmp, 4);
2035         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2036         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2037         break;
2038 
2039     case 0x100: /* SYSCALL */
2040         gen_excp_1(EXCP_SYSCALL);
2041         ctx->base.is_jmp = DISAS_NORETURN;
2042         break;
2043 
2044     default:
2045     do_sigill:
2046         gen_excp_1(EXCP_ILL);
2047         ctx->base.is_jmp = DISAS_NORETURN;
2048         break;
2049     }
2050 }
2051 #endif
2052 
2053 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2054 {
2055     cond_free(&ctx->null_cond);
2056     return true;
2057 }
2058 
2059 static bool trans_break(DisasContext *ctx, arg_break *a)
2060 {
2061     return gen_excp_iir(ctx, EXCP_BREAK);
2062 }
2063 
2064 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2065 {
2066     /* No point in nullifying the memory barrier.  */
2067     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2068 
2069     cond_free(&ctx->null_cond);
2070     return true;
2071 }
2072 
2073 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2074 {
2075     unsigned rt = a->t;
2076     TCGv_i64 tmp = dest_gpr(ctx, rt);
2077     tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL);
2078     save_gpr(ctx, rt, tmp);
2079 
2080     cond_free(&ctx->null_cond);
2081     return true;
2082 }
2083 
2084 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2085 {
2086     unsigned rt = a->t;
2087     unsigned rs = a->sp;
2088     TCGv_i64 t0 = tcg_temp_new_i64();
2089 
2090     load_spr(ctx, t0, rs);
2091     tcg_gen_shri_i64(t0, t0, 32);
2092 
2093     save_gpr(ctx, rt, t0);
2094 
2095     cond_free(&ctx->null_cond);
2096     return true;
2097 }
2098 
2099 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2100 {
2101     unsigned rt = a->t;
2102     unsigned ctl = a->r;
2103     TCGv_i64 tmp;
2104 
2105     switch (ctl) {
2106     case CR_SAR:
2107         if (a->e == 0) {
2108             /* MFSAR without ,W masks low 5 bits.  */
2109             tmp = dest_gpr(ctx, rt);
2110             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2111             save_gpr(ctx, rt, tmp);
2112             goto done;
2113         }
2114         save_gpr(ctx, rt, cpu_sar);
2115         goto done;
2116     case CR_IT: /* Interval Timer */
2117         /* FIXME: Respect PSW_S bit.  */
2118         nullify_over(ctx);
2119         tmp = dest_gpr(ctx, rt);
2120         if (translator_io_start(&ctx->base)) {
2121             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2122         }
2123         gen_helper_read_interval_timer(tmp);
2124         save_gpr(ctx, rt, tmp);
2125         return nullify_end(ctx);
2126     case 26:
2127     case 27:
2128         break;
2129     default:
2130         /* All other control registers are privileged.  */
2131         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2132         break;
2133     }
2134 
2135     tmp = tcg_temp_new_i64();
2136     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2137     save_gpr(ctx, rt, tmp);
2138 
2139  done:
2140     cond_free(&ctx->null_cond);
2141     return true;
2142 }
2143 
2144 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2145 {
2146     unsigned rr = a->r;
2147     unsigned rs = a->sp;
2148     TCGv_i64 tmp;
2149 
2150     if (rs >= 5) {
2151         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2152     }
2153     nullify_over(ctx);
2154 
2155     tmp = tcg_temp_new_i64();
2156     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2157 
2158     if (rs >= 4) {
2159         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2160         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2161     } else {
2162         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2163     }
2164 
2165     return nullify_end(ctx);
2166 }
2167 
2168 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2169 {
2170     unsigned ctl = a->t;
2171     TCGv_i64 reg;
2172     TCGv_i64 tmp;
2173 
2174     if (ctl == CR_SAR) {
2175         reg = load_gpr(ctx, a->r);
2176         tmp = tcg_temp_new_i64();
2177         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2178         save_or_nullify(ctx, cpu_sar, tmp);
2179 
2180         cond_free(&ctx->null_cond);
2181         return true;
2182     }
2183 
2184     /* All other control registers are privileged or read-only.  */
2185     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2186 
2187 #ifndef CONFIG_USER_ONLY
2188     nullify_over(ctx);
2189 
2190     if (ctx->is_pa20) {
2191         reg = load_gpr(ctx, a->r);
2192     } else {
2193         reg = tcg_temp_new_i64();
2194         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2195     }
2196 
2197     switch (ctl) {
2198     case CR_IT:
2199         if (translator_io_start(&ctx->base)) {
2200             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2201         }
2202         gen_helper_write_interval_timer(tcg_env, reg);
2203         break;
2204     case CR_EIRR:
2205         /* Helper modifies interrupt lines and is therefore IO. */
2206         translator_io_start(&ctx->base);
2207         gen_helper_write_eirr(tcg_env, reg);
2208         /* Exit to re-evaluate interrupts in the main loop. */
2209         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2210         break;
2211 
2212     case CR_IIASQ:
2213     case CR_IIAOQ:
2214         /* FIXME: Respect PSW_Q bit */
2215         /* The write advances the queue and stores to the back element.  */
2216         tmp = tcg_temp_new_i64();
2217         tcg_gen_ld_i64(tmp, tcg_env,
2218                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2219         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2220         tcg_gen_st_i64(reg, tcg_env,
2221                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2222         break;
2223 
2224     case CR_PID1:
2225     case CR_PID2:
2226     case CR_PID3:
2227     case CR_PID4:
2228         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2229 #ifndef CONFIG_USER_ONLY
2230         gen_helper_change_prot_id(tcg_env);
2231 #endif
2232         break;
2233 
2234     case CR_EIEM:
2235         /* Exit to re-evaluate interrupts in the main loop. */
2236         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2237         /* FALLTHRU */
2238     default:
2239         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2240         break;
2241     }
2242     return nullify_end(ctx);
2243 #endif
2244 }
2245 
2246 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2247 {
2248     TCGv_i64 tmp = tcg_temp_new_i64();
2249 
2250     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2251     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2252     save_or_nullify(ctx, cpu_sar, tmp);
2253 
2254     cond_free(&ctx->null_cond);
2255     return true;
2256 }
2257 
2258 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2259 {
2260     TCGv_i64 dest = dest_gpr(ctx, a->t);
2261 
2262 #ifdef CONFIG_USER_ONLY
2263     /* We don't implement space registers in user mode. */
2264     tcg_gen_movi_i64(dest, 0);
2265 #else
2266     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2267     tcg_gen_shri_i64(dest, dest, 32);
2268 #endif
2269     save_gpr(ctx, a->t, dest);
2270 
2271     cond_free(&ctx->null_cond);
2272     return true;
2273 }
2274 
2275 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2276 {
2277 #ifdef CONFIG_USER_ONLY
2278     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2279 #else
2280     TCGv_i64 tmp;
2281 
2282     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2283     if (a->i) {
2284         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2285     }
2286 
2287     nullify_over(ctx);
2288 
2289     tmp = tcg_temp_new_i64();
2290     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2291     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2292     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2293     save_gpr(ctx, a->t, tmp);
2294 
2295     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2296     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2297     return nullify_end(ctx);
2298 #endif
2299 }
2300 
2301 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2302 {
2303     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2304 #ifndef CONFIG_USER_ONLY
2305     TCGv_i64 tmp;
2306 
2307     nullify_over(ctx);
2308 
2309     tmp = tcg_temp_new_i64();
2310     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2311     tcg_gen_ori_i64(tmp, tmp, a->i);
2312     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2313     save_gpr(ctx, a->t, tmp);
2314 
2315     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2316     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2317     return nullify_end(ctx);
2318 #endif
2319 }
2320 
2321 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2322 {
2323     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2324 #ifndef CONFIG_USER_ONLY
2325     TCGv_i64 tmp, reg;
2326     nullify_over(ctx);
2327 
2328     reg = load_gpr(ctx, a->r);
2329     tmp = tcg_temp_new_i64();
2330     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2331 
2332     /* Exit the TB to recognize new interrupts.  */
2333     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2334     return nullify_end(ctx);
2335 #endif
2336 }
2337 
2338 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2339 {
2340     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2341 #ifndef CONFIG_USER_ONLY
2342     nullify_over(ctx);
2343 
2344     if (rfi_r) {
2345         gen_helper_rfi_r(tcg_env);
2346     } else {
2347         gen_helper_rfi(tcg_env);
2348     }
2349     /* Exit the TB to recognize new interrupts.  */
2350     tcg_gen_exit_tb(NULL, 0);
2351     ctx->base.is_jmp = DISAS_NORETURN;
2352 
2353     return nullify_end(ctx);
2354 #endif
2355 }
2356 
2357 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2358 {
2359     return do_rfi(ctx, false);
2360 }
2361 
2362 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2363 {
2364     return do_rfi(ctx, true);
2365 }
2366 
2367 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2368 {
2369     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2370 #ifndef CONFIG_USER_ONLY
2371     nullify_over(ctx);
2372     gen_helper_halt(tcg_env);
2373     ctx->base.is_jmp = DISAS_NORETURN;
2374     return nullify_end(ctx);
2375 #endif
2376 }
2377 
2378 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2379 {
2380     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2381 #ifndef CONFIG_USER_ONLY
2382     nullify_over(ctx);
2383     gen_helper_reset(tcg_env);
2384     ctx->base.is_jmp = DISAS_NORETURN;
2385     return nullify_end(ctx);
2386 #endif
2387 }
2388 
2389 static bool do_getshadowregs(DisasContext *ctx)
2390 {
2391     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2392     nullify_over(ctx);
2393     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2394     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2395     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2396     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2397     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2398     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2399     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2400     return nullify_end(ctx);
2401 }
2402 
2403 static bool do_putshadowregs(DisasContext *ctx)
2404 {
2405     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2406     nullify_over(ctx);
2407     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2408     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2409     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2410     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2411     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2412     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2413     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2414     return nullify_end(ctx);
2415 }
2416 
2417 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2418 {
2419     return do_getshadowregs(ctx);
2420 }
2421 
2422 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2423 {
2424     if (a->m) {
2425         TCGv_i64 dest = dest_gpr(ctx, a->b);
2426         TCGv_i64 src1 = load_gpr(ctx, a->b);
2427         TCGv_i64 src2 = load_gpr(ctx, a->x);
2428 
2429         /* The only thing we need to do is the base register modification.  */
2430         tcg_gen_add_i64(dest, src1, src2);
2431         save_gpr(ctx, a->b, dest);
2432     }
2433     cond_free(&ctx->null_cond);
2434     return true;
2435 }
2436 
2437 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2438 {
2439     /* End TB for flush instruction cache, so we pick up new insns. */
2440     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2441     return trans_nop_addrx(ctx, a);
2442 }
2443 
2444 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2445 {
2446     TCGv_i64 dest, ofs;
2447     TCGv_i32 level, want;
2448     TCGv_i64 addr;
2449 
2450     nullify_over(ctx);
2451 
2452     dest = dest_gpr(ctx, a->t);
2453     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2454 
2455     if (a->imm) {
2456         level = tcg_constant_i32(a->ri & 3);
2457     } else {
2458         level = tcg_temp_new_i32();
2459         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2460         tcg_gen_andi_i32(level, level, 3);
2461     }
2462     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2463 
2464     gen_helper_probe(dest, tcg_env, addr, level, want);
2465 
2466     save_gpr(ctx, a->t, dest);
2467     return nullify_end(ctx);
2468 }
2469 
2470 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2471 {
2472     if (ctx->is_pa20) {
2473         return false;
2474     }
2475     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2476 #ifndef CONFIG_USER_ONLY
2477     TCGv_i64 addr;
2478     TCGv_i64 ofs, reg;
2479 
2480     nullify_over(ctx);
2481 
2482     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2483     reg = load_gpr(ctx, a->r);
2484     if (a->addr) {
2485         gen_helper_itlba_pa11(tcg_env, addr, reg);
2486     } else {
2487         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2488     }
2489 
2490     /* Exit TB for TLB change if mmu is enabled.  */
2491     if (ctx->tb_flags & PSW_C) {
2492         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2493     }
2494     return nullify_end(ctx);
2495 #endif
2496 }
2497 
2498 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2499 {
2500     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2501 #ifndef CONFIG_USER_ONLY
2502     TCGv_i64 addr;
2503     TCGv_i64 ofs;
2504 
2505     nullify_over(ctx);
2506 
2507     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2508 
2509     /*
2510      * Page align now, rather than later, so that we can add in the
2511      * page_size field from pa2.0 from the low 4 bits of GR[b].
2512      */
2513     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2514     if (ctx->is_pa20) {
2515         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2516     }
2517 
2518     if (local) {
2519         gen_helper_ptlb_l(tcg_env, addr);
2520     } else {
2521         gen_helper_ptlb(tcg_env, addr);
2522     }
2523 
2524     if (a->m) {
2525         save_gpr(ctx, a->b, ofs);
2526     }
2527 
2528     /* Exit TB for TLB change if mmu is enabled.  */
2529     if (ctx->tb_flags & PSW_C) {
2530         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2531     }
2532     return nullify_end(ctx);
2533 #endif
2534 }
2535 
2536 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2537 {
2538     return do_pxtlb(ctx, a, false);
2539 }
2540 
2541 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2542 {
2543     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2544 }
2545 
2546 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2547 {
2548     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2549 #ifndef CONFIG_USER_ONLY
2550     nullify_over(ctx);
2551 
2552     trans_nop_addrx(ctx, a);
2553     gen_helper_ptlbe(tcg_env);
2554 
2555     /* Exit TB for TLB change if mmu is enabled.  */
2556     if (ctx->tb_flags & PSW_C) {
2557         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2558     }
2559     return nullify_end(ctx);
2560 #endif
2561 }
2562 
2563 /*
2564  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2565  * See
2566  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2567  *     page 13-9 (195/206)
2568  */
2569 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2570 {
2571     if (ctx->is_pa20) {
2572         return false;
2573     }
2574     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2575 #ifndef CONFIG_USER_ONLY
2576     TCGv_i64 addr, atl, stl;
2577     TCGv_i64 reg;
2578 
2579     nullify_over(ctx);
2580 
2581     /*
2582      * FIXME:
2583      *  if (not (pcxl or pcxl2))
2584      *    return gen_illegal(ctx);
2585      */
2586 
2587     atl = tcg_temp_new_i64();
2588     stl = tcg_temp_new_i64();
2589     addr = tcg_temp_new_i64();
2590 
2591     tcg_gen_ld32u_i64(stl, tcg_env,
2592                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2593                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2594     tcg_gen_ld32u_i64(atl, tcg_env,
2595                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2596                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2597     tcg_gen_shli_i64(stl, stl, 32);
2598     tcg_gen_or_i64(addr, atl, stl);
2599 
2600     reg = load_gpr(ctx, a->r);
2601     if (a->addr) {
2602         gen_helper_itlba_pa11(tcg_env, addr, reg);
2603     } else {
2604         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2605     }
2606 
2607     /* Exit TB for TLB change if mmu is enabled.  */
2608     if (ctx->tb_flags & PSW_C) {
2609         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2610     }
2611     return nullify_end(ctx);
2612 #endif
2613 }
2614 
2615 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2616 {
2617     if (!ctx->is_pa20) {
2618         return false;
2619     }
2620     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2621 #ifndef CONFIG_USER_ONLY
2622     nullify_over(ctx);
2623     {
2624         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2625         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2626 
2627         if (a->data) {
2628             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2629         } else {
2630             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2631         }
2632     }
2633     /* Exit TB for TLB change if mmu is enabled.  */
2634     if (ctx->tb_flags & PSW_C) {
2635         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2636     }
2637     return nullify_end(ctx);
2638 #endif
2639 }
2640 
2641 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2642 {
2643     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2644 #ifndef CONFIG_USER_ONLY
2645     TCGv_i64 vaddr;
2646     TCGv_i64 ofs, paddr;
2647 
2648     nullify_over(ctx);
2649 
2650     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2651 
2652     paddr = tcg_temp_new_i64();
2653     gen_helper_lpa(paddr, tcg_env, vaddr);
2654 
2655     /* Note that physical address result overrides base modification.  */
2656     if (a->m) {
2657         save_gpr(ctx, a->b, ofs);
2658     }
2659     save_gpr(ctx, a->t, paddr);
2660 
2661     return nullify_end(ctx);
2662 #endif
2663 }
2664 
2665 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2666 {
2667     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2668 
2669     /* The Coherence Index is an implementation-defined function of the
2670        physical address.  Two addresses with the same CI have a coherent
2671        view of the cache.  Our implementation is to return 0 for all,
2672        since the entire address space is coherent.  */
2673     save_gpr(ctx, a->t, ctx->zero);
2674 
2675     cond_free(&ctx->null_cond);
2676     return true;
2677 }
2678 
2679 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2680 {
2681     return do_add_reg(ctx, a, false, false, false, false);
2682 }
2683 
2684 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2685 {
2686     return do_add_reg(ctx, a, true, false, false, false);
2687 }
2688 
2689 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2690 {
2691     return do_add_reg(ctx, a, false, true, false, false);
2692 }
2693 
2694 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2695 {
2696     return do_add_reg(ctx, a, false, false, false, true);
2697 }
2698 
2699 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2700 {
2701     return do_add_reg(ctx, a, false, true, false, true);
2702 }
2703 
2704 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2705 {
2706     return do_sub_reg(ctx, a, false, false, false);
2707 }
2708 
2709 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2710 {
2711     return do_sub_reg(ctx, a, true, false, false);
2712 }
2713 
2714 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2715 {
2716     return do_sub_reg(ctx, a, false, false, true);
2717 }
2718 
2719 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2720 {
2721     return do_sub_reg(ctx, a, true, false, true);
2722 }
2723 
2724 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2725 {
2726     return do_sub_reg(ctx, a, false, true, false);
2727 }
2728 
2729 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2730 {
2731     return do_sub_reg(ctx, a, true, true, false);
2732 }
2733 
2734 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2735 {
2736     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2737 }
2738 
2739 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2740 {
2741     return do_log_reg(ctx, a, tcg_gen_and_i64);
2742 }
2743 
2744 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2745 {
2746     if (a->cf == 0) {
2747         unsigned r2 = a->r2;
2748         unsigned r1 = a->r1;
2749         unsigned rt = a->t;
2750 
2751         if (rt == 0) { /* NOP */
2752             cond_free(&ctx->null_cond);
2753             return true;
2754         }
2755         if (r2 == 0) { /* COPY */
2756             if (r1 == 0) {
2757                 TCGv_i64 dest = dest_gpr(ctx, rt);
2758                 tcg_gen_movi_i64(dest, 0);
2759                 save_gpr(ctx, rt, dest);
2760             } else {
2761                 save_gpr(ctx, rt, cpu_gr[r1]);
2762             }
2763             cond_free(&ctx->null_cond);
2764             return true;
2765         }
2766 #ifndef CONFIG_USER_ONLY
2767         /* These are QEMU extensions and are nops in the real architecture:
2768          *
2769          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2770          * or %r31,%r31,%r31 -- death loop; offline cpu
2771          *                      currently implemented as idle.
2772          */
2773         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2774             /* No need to check for supervisor, as userland can only pause
2775                until the next timer interrupt.  */
2776             nullify_over(ctx);
2777 
2778             /* Advance the instruction queue.  */
2779             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2780             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2781             nullify_set(ctx, 0);
2782 
2783             /* Tell the qemu main loop to halt until this cpu has work.  */
2784             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2785                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2786             gen_excp_1(EXCP_HALTED);
2787             ctx->base.is_jmp = DISAS_NORETURN;
2788 
2789             return nullify_end(ctx);
2790         }
2791 #endif
2792     }
2793     return do_log_reg(ctx, a, tcg_gen_or_i64);
2794 }
2795 
2796 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2797 {
2798     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2799 }
2800 
2801 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2802 {
2803     TCGv_i64 tcg_r1, tcg_r2;
2804 
2805     if (a->cf) {
2806         nullify_over(ctx);
2807     }
2808     tcg_r1 = load_gpr(ctx, a->r1);
2809     tcg_r2 = load_gpr(ctx, a->r2);
2810     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2811     return nullify_end(ctx);
2812 }
2813 
2814 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2815 {
2816     TCGv_i64 tcg_r1, tcg_r2, dest;
2817 
2818     if (a->cf) {
2819         nullify_over(ctx);
2820     }
2821 
2822     tcg_r1 = load_gpr(ctx, a->r1);
2823     tcg_r2 = load_gpr(ctx, a->r2);
2824     dest = dest_gpr(ctx, a->t);
2825 
2826     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2827     save_gpr(ctx, a->t, dest);
2828 
2829     cond_free(&ctx->null_cond);
2830     if (a->cf) {
2831         ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2832     }
2833 
2834     return nullify_end(ctx);
2835 }
2836 
2837 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2838 {
2839     TCGv_i64 tcg_r1, tcg_r2, tmp;
2840 
2841     if (a->cf == 0) {
2842         tcg_r2 = load_gpr(ctx, a->r2);
2843         tmp = dest_gpr(ctx, a->t);
2844 
2845         if (a->r1 == 0) {
2846             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2847             tcg_gen_not_i64(tmp, tcg_r2);
2848         } else {
2849             /*
2850              * Recall that r1 - r2 == r1 + ~r2 + 1.
2851              * Thus r1 + ~r2 == r1 - r2 - 1,
2852              * which does not require an extra temporary.
2853              */
2854             tcg_r1 = load_gpr(ctx, a->r1);
2855             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2856             tcg_gen_subi_i64(tmp, tmp, 1);
2857         }
2858         save_gpr(ctx, a->t, tmp);
2859         cond_free(&ctx->null_cond);
2860         return true;
2861     }
2862 
2863     nullify_over(ctx);
2864     tcg_r1 = load_gpr(ctx, a->r1);
2865     tcg_r2 = load_gpr(ctx, a->r2);
2866     tmp = tcg_temp_new_i64();
2867     tcg_gen_not_i64(tmp, tcg_r2);
2868     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2869     return nullify_end(ctx);
2870 }
2871 
2872 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2873 {
2874     return do_uaddcm(ctx, a, false);
2875 }
2876 
2877 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2878 {
2879     return do_uaddcm(ctx, a, true);
2880 }
2881 
2882 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2883 {
2884     TCGv_i64 tmp;
2885 
2886     nullify_over(ctx);
2887 
2888     tmp = tcg_temp_new_i64();
2889     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2890     if (!is_i) {
2891         tcg_gen_not_i64(tmp, tmp);
2892     }
2893     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2894     tcg_gen_muli_i64(tmp, tmp, 6);
2895     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2896                    a->cf, a->d, false, is_i);
2897     return nullify_end(ctx);
2898 }
2899 
2900 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2901 {
2902     return do_dcor(ctx, a, false);
2903 }
2904 
2905 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2906 {
2907     return do_dcor(ctx, a, true);
2908 }
2909 
2910 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2911 {
2912     TCGv_i64 dest, add1, add2, addc, in1, in2;
2913 
2914     nullify_over(ctx);
2915 
2916     in1 = load_gpr(ctx, a->r1);
2917     in2 = load_gpr(ctx, a->r2);
2918 
2919     add1 = tcg_temp_new_i64();
2920     add2 = tcg_temp_new_i64();
2921     addc = tcg_temp_new_i64();
2922     dest = tcg_temp_new_i64();
2923 
2924     /* Form R1 << 1 | PSW[CB]{8}.  */
2925     tcg_gen_add_i64(add1, in1, in1);
2926     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2927 
2928     /*
2929      * Add or subtract R2, depending on PSW[V].  Proper computation of
2930      * carry requires that we subtract via + ~R2 + 1, as described in
2931      * the manual.  By extracting and masking V, we can produce the
2932      * proper inputs to the addition without movcond.
2933      */
2934     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2935     tcg_gen_xor_i64(add2, in2, addc);
2936     tcg_gen_andi_i64(addc, addc, 1);
2937 
2938     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2939     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2940                      addc, ctx->zero);
2941 
2942     /* Write back the result register.  */
2943     save_gpr(ctx, a->t, dest);
2944 
2945     /* Write back PSW[CB].  */
2946     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2947     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2948 
2949     /*
2950      * Write back PSW[V] for the division step.
2951      * Shift cb{8} from where it lives in bit 32 to bit 31,
2952      * so that it overlaps r2{32} in bit 31.
2953      */
2954     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
2955     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2956 
2957     /* Install the new nullification.  */
2958     if (a->cf) {
2959         TCGv_i64 sv = NULL, uv = NULL;
2960         if (cond_need_sv(a->cf >> 1)) {
2961             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
2962         } else if (cond_need_cb(a->cf >> 1)) {
2963             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
2964         }
2965         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
2966     }
2967 
2968     return nullify_end(ctx);
2969 }
2970 
2971 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2972 {
2973     return do_add_imm(ctx, a, false, false);
2974 }
2975 
2976 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2977 {
2978     return do_add_imm(ctx, a, true, false);
2979 }
2980 
2981 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2982 {
2983     return do_add_imm(ctx, a, false, true);
2984 }
2985 
2986 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2987 {
2988     return do_add_imm(ctx, a, true, true);
2989 }
2990 
2991 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2992 {
2993     return do_sub_imm(ctx, a, false);
2994 }
2995 
2996 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2997 {
2998     return do_sub_imm(ctx, a, true);
2999 }
3000 
3001 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3002 {
3003     TCGv_i64 tcg_im, tcg_r2;
3004 
3005     if (a->cf) {
3006         nullify_over(ctx);
3007     }
3008 
3009     tcg_im = tcg_constant_i64(a->i);
3010     tcg_r2 = load_gpr(ctx, a->r);
3011     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3012 
3013     return nullify_end(ctx);
3014 }
3015 
3016 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3017                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3018 {
3019     TCGv_i64 r1, r2, dest;
3020 
3021     if (!ctx->is_pa20) {
3022         return false;
3023     }
3024 
3025     nullify_over(ctx);
3026 
3027     r1 = load_gpr(ctx, a->r1);
3028     r2 = load_gpr(ctx, a->r2);
3029     dest = dest_gpr(ctx, a->t);
3030 
3031     fn(dest, r1, r2);
3032     save_gpr(ctx, a->t, dest);
3033 
3034     return nullify_end(ctx);
3035 }
3036 
3037 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3038                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3039 {
3040     TCGv_i64 r, dest;
3041 
3042     if (!ctx->is_pa20) {
3043         return false;
3044     }
3045 
3046     nullify_over(ctx);
3047 
3048     r = load_gpr(ctx, a->r);
3049     dest = dest_gpr(ctx, a->t);
3050 
3051     fn(dest, r, a->i);
3052     save_gpr(ctx, a->t, dest);
3053 
3054     return nullify_end(ctx);
3055 }
3056 
3057 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3058                                 void (*fn)(TCGv_i64, TCGv_i64,
3059                                            TCGv_i64, TCGv_i32))
3060 {
3061     TCGv_i64 r1, r2, dest;
3062 
3063     if (!ctx->is_pa20) {
3064         return false;
3065     }
3066 
3067     nullify_over(ctx);
3068 
3069     r1 = load_gpr(ctx, a->r1);
3070     r2 = load_gpr(ctx, a->r2);
3071     dest = dest_gpr(ctx, a->t);
3072 
3073     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3074     save_gpr(ctx, a->t, dest);
3075 
3076     return nullify_end(ctx);
3077 }
3078 
3079 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3080 {
3081     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3082 }
3083 
3084 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3085 {
3086     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3087 }
3088 
3089 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3090 {
3091     return do_multimedia(ctx, a, gen_helper_hadd_us);
3092 }
3093 
3094 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3095 {
3096     return do_multimedia(ctx, a, gen_helper_havg);
3097 }
3098 
3099 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3100 {
3101     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3102 }
3103 
3104 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3105 {
3106     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3107 }
3108 
3109 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3110 {
3111     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3112 }
3113 
3114 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3115 {
3116     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3117 }
3118 
3119 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3120 {
3121     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3122 }
3123 
3124 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3125 {
3126     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3127 }
3128 
3129 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3130 {
3131     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3132 }
3133 
3134 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3135 {
3136     return do_multimedia(ctx, a, gen_helper_hsub_us);
3137 }
3138 
3139 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3140 {
3141     uint64_t mask = 0xffff0000ffff0000ull;
3142     TCGv_i64 tmp = tcg_temp_new_i64();
3143 
3144     tcg_gen_andi_i64(tmp, r2, mask);
3145     tcg_gen_andi_i64(dst, r1, mask);
3146     tcg_gen_shri_i64(tmp, tmp, 16);
3147     tcg_gen_or_i64(dst, dst, tmp);
3148 }
3149 
3150 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3151 {
3152     return do_multimedia(ctx, a, gen_mixh_l);
3153 }
3154 
3155 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3156 {
3157     uint64_t mask = 0x0000ffff0000ffffull;
3158     TCGv_i64 tmp = tcg_temp_new_i64();
3159 
3160     tcg_gen_andi_i64(tmp, r1, mask);
3161     tcg_gen_andi_i64(dst, r2, mask);
3162     tcg_gen_shli_i64(tmp, tmp, 16);
3163     tcg_gen_or_i64(dst, dst, tmp);
3164 }
3165 
3166 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3167 {
3168     return do_multimedia(ctx, a, gen_mixh_r);
3169 }
3170 
3171 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3172 {
3173     TCGv_i64 tmp = tcg_temp_new_i64();
3174 
3175     tcg_gen_shri_i64(tmp, r2, 32);
3176     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3177 }
3178 
3179 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3180 {
3181     return do_multimedia(ctx, a, gen_mixw_l);
3182 }
3183 
3184 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3185 {
3186     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3187 }
3188 
3189 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3190 {
3191     return do_multimedia(ctx, a, gen_mixw_r);
3192 }
3193 
3194 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3195 {
3196     TCGv_i64 r, t0, t1, t2, t3;
3197 
3198     if (!ctx->is_pa20) {
3199         return false;
3200     }
3201 
3202     nullify_over(ctx);
3203 
3204     r = load_gpr(ctx, a->r1);
3205     t0 = tcg_temp_new_i64();
3206     t1 = tcg_temp_new_i64();
3207     t2 = tcg_temp_new_i64();
3208     t3 = tcg_temp_new_i64();
3209 
3210     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3211     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3212     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3213     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3214 
3215     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3216     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3217     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3218 
3219     save_gpr(ctx, a->t, t0);
3220     return nullify_end(ctx);
3221 }
3222 
3223 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3224 {
3225     if (ctx->is_pa20) {
3226        /*
3227         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3228         * Any base modification still occurs.
3229         */
3230         if (a->t == 0) {
3231             return trans_nop_addrx(ctx, a);
3232         }
3233     } else if (a->size > MO_32) {
3234         return gen_illegal(ctx);
3235     }
3236     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3237                    a->disp, a->sp, a->m, a->size | MO_TE);
3238 }
3239 
3240 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3241 {
3242     assert(a->x == 0 && a->scale == 0);
3243     if (!ctx->is_pa20 && a->size > MO_32) {
3244         return gen_illegal(ctx);
3245     }
3246     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3247 }
3248 
3249 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3250 {
3251     MemOp mop = MO_TE | MO_ALIGN | a->size;
3252     TCGv_i64 dest, ofs;
3253     TCGv_i64 addr;
3254 
3255     if (!ctx->is_pa20 && a->size > MO_32) {
3256         return gen_illegal(ctx);
3257     }
3258 
3259     nullify_over(ctx);
3260 
3261     if (a->m) {
3262         /* Base register modification.  Make sure if RT == RB,
3263            we see the result of the load.  */
3264         dest = tcg_temp_new_i64();
3265     } else {
3266         dest = dest_gpr(ctx, a->t);
3267     }
3268 
3269     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3270              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3271 
3272     /*
3273      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3274      * However actual hardware succeeds with aligned mod 4.
3275      * Detect this case and log a GUEST_ERROR.
3276      *
3277      * TODO: HPPA64 relaxes the over-alignment requirement
3278      * with the ,co completer.
3279      */
3280     gen_helper_ldc_check(addr);
3281 
3282     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3283 
3284     if (a->m) {
3285         save_gpr(ctx, a->b, ofs);
3286     }
3287     save_gpr(ctx, a->t, dest);
3288 
3289     return nullify_end(ctx);
3290 }
3291 
3292 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3293 {
3294     TCGv_i64 ofs, val;
3295     TCGv_i64 addr;
3296 
3297     nullify_over(ctx);
3298 
3299     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3300              MMU_DISABLED(ctx));
3301     val = load_gpr(ctx, a->r);
3302     if (a->a) {
3303         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3304             gen_helper_stby_e_parallel(tcg_env, addr, val);
3305         } else {
3306             gen_helper_stby_e(tcg_env, addr, val);
3307         }
3308     } else {
3309         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3310             gen_helper_stby_b_parallel(tcg_env, addr, val);
3311         } else {
3312             gen_helper_stby_b(tcg_env, addr, val);
3313         }
3314     }
3315     if (a->m) {
3316         tcg_gen_andi_i64(ofs, ofs, ~3);
3317         save_gpr(ctx, a->b, ofs);
3318     }
3319 
3320     return nullify_end(ctx);
3321 }
3322 
3323 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3324 {
3325     TCGv_i64 ofs, val;
3326     TCGv_i64 addr;
3327 
3328     if (!ctx->is_pa20) {
3329         return false;
3330     }
3331     nullify_over(ctx);
3332 
3333     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3334              MMU_DISABLED(ctx));
3335     val = load_gpr(ctx, a->r);
3336     if (a->a) {
3337         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3338             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3339         } else {
3340             gen_helper_stdby_e(tcg_env, addr, val);
3341         }
3342     } else {
3343         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3344             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3345         } else {
3346             gen_helper_stdby_b(tcg_env, addr, val);
3347         }
3348     }
3349     if (a->m) {
3350         tcg_gen_andi_i64(ofs, ofs, ~7);
3351         save_gpr(ctx, a->b, ofs);
3352     }
3353 
3354     return nullify_end(ctx);
3355 }
3356 
3357 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3358 {
3359     int hold_mmu_idx = ctx->mmu_idx;
3360 
3361     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3362     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3363     trans_ld(ctx, a);
3364     ctx->mmu_idx = hold_mmu_idx;
3365     return true;
3366 }
3367 
3368 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3369 {
3370     int hold_mmu_idx = ctx->mmu_idx;
3371 
3372     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3373     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3374     trans_st(ctx, a);
3375     ctx->mmu_idx = hold_mmu_idx;
3376     return true;
3377 }
3378 
3379 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3380 {
3381     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3382 
3383     tcg_gen_movi_i64(tcg_rt, a->i);
3384     save_gpr(ctx, a->t, tcg_rt);
3385     cond_free(&ctx->null_cond);
3386     return true;
3387 }
3388 
3389 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3390 {
3391     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3392     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3393 
3394     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3395     save_gpr(ctx, 1, tcg_r1);
3396     cond_free(&ctx->null_cond);
3397     return true;
3398 }
3399 
3400 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3401 {
3402     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3403 
3404     /* Special case rb == 0, for the LDI pseudo-op.
3405        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3406     if (a->b == 0) {
3407         tcg_gen_movi_i64(tcg_rt, a->i);
3408     } else {
3409         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3410     }
3411     save_gpr(ctx, a->t, tcg_rt);
3412     cond_free(&ctx->null_cond);
3413     return true;
3414 }
3415 
3416 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3417                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3418 {
3419     TCGv_i64 dest, in2, sv;
3420     DisasCond cond;
3421 
3422     in2 = load_gpr(ctx, r);
3423     dest = tcg_temp_new_i64();
3424 
3425     tcg_gen_sub_i64(dest, in1, in2);
3426 
3427     sv = NULL;
3428     if (cond_need_sv(c)) {
3429         sv = do_sub_sv(ctx, dest, in1, in2);
3430     }
3431 
3432     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3433     return do_cbranch(ctx, disp, n, &cond);
3434 }
3435 
3436 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3437 {
3438     if (!ctx->is_pa20 && a->d) {
3439         return false;
3440     }
3441     nullify_over(ctx);
3442     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3443                    a->c, a->f, a->d, a->n, a->disp);
3444 }
3445 
3446 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3447 {
3448     if (!ctx->is_pa20 && a->d) {
3449         return false;
3450     }
3451     nullify_over(ctx);
3452     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3453                    a->c, a->f, a->d, a->n, a->disp);
3454 }
3455 
3456 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3457                     unsigned c, unsigned f, unsigned n, int disp)
3458 {
3459     TCGv_i64 dest, in2, sv, cb_cond;
3460     DisasCond cond;
3461     bool d = false;
3462 
3463     /*
3464      * For hppa64, the ADDB conditions change with PSW.W,
3465      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3466      */
3467     if (ctx->tb_flags & PSW_W) {
3468         d = c >= 5;
3469         if (d) {
3470             c &= 3;
3471         }
3472     }
3473 
3474     in2 = load_gpr(ctx, r);
3475     dest = tcg_temp_new_i64();
3476     sv = NULL;
3477     cb_cond = NULL;
3478 
3479     if (cond_need_cb(c)) {
3480         TCGv_i64 cb = tcg_temp_new_i64();
3481         TCGv_i64 cb_msb = tcg_temp_new_i64();
3482 
3483         tcg_gen_movi_i64(cb_msb, 0);
3484         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3485         tcg_gen_xor_i64(cb, in1, in2);
3486         tcg_gen_xor_i64(cb, cb, dest);
3487         cb_cond = get_carry(ctx, d, cb, cb_msb);
3488     } else {
3489         tcg_gen_add_i64(dest, in1, in2);
3490     }
3491     if (cond_need_sv(c)) {
3492         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3493     }
3494 
3495     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3496     save_gpr(ctx, r, dest);
3497     return do_cbranch(ctx, disp, n, &cond);
3498 }
3499 
3500 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3501 {
3502     nullify_over(ctx);
3503     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3504 }
3505 
3506 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3507 {
3508     nullify_over(ctx);
3509     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3510 }
3511 
3512 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3513 {
3514     TCGv_i64 tmp, tcg_r;
3515     DisasCond cond;
3516 
3517     nullify_over(ctx);
3518 
3519     tmp = tcg_temp_new_i64();
3520     tcg_r = load_gpr(ctx, a->r);
3521     if (a->d) {
3522         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3523     } else {
3524         /* Force shift into [32,63] */
3525         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3526         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3527     }
3528 
3529     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3530     return do_cbranch(ctx, a->disp, a->n, &cond);
3531 }
3532 
3533 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3534 {
3535     TCGv_i64 tmp, tcg_r;
3536     DisasCond cond;
3537     int p;
3538 
3539     nullify_over(ctx);
3540 
3541     tmp = tcg_temp_new_i64();
3542     tcg_r = load_gpr(ctx, a->r);
3543     p = a->p | (a->d ? 0 : 32);
3544     tcg_gen_shli_i64(tmp, tcg_r, p);
3545 
3546     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3547     return do_cbranch(ctx, a->disp, a->n, &cond);
3548 }
3549 
3550 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3551 {
3552     TCGv_i64 dest;
3553     DisasCond cond;
3554 
3555     nullify_over(ctx);
3556 
3557     dest = dest_gpr(ctx, a->r2);
3558     if (a->r1 == 0) {
3559         tcg_gen_movi_i64(dest, 0);
3560     } else {
3561         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3562     }
3563 
3564     /* All MOVB conditions are 32-bit. */
3565     cond = do_sed_cond(ctx, a->c, false, dest);
3566     return do_cbranch(ctx, a->disp, a->n, &cond);
3567 }
3568 
3569 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3570 {
3571     TCGv_i64 dest;
3572     DisasCond cond;
3573 
3574     nullify_over(ctx);
3575 
3576     dest = dest_gpr(ctx, a->r);
3577     tcg_gen_movi_i64(dest, a->i);
3578 
3579     /* All MOVBI conditions are 32-bit. */
3580     cond = do_sed_cond(ctx, a->c, false, dest);
3581     return do_cbranch(ctx, a->disp, a->n, &cond);
3582 }
3583 
3584 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3585 {
3586     TCGv_i64 dest, src2;
3587 
3588     if (!ctx->is_pa20 && a->d) {
3589         return false;
3590     }
3591     if (a->c) {
3592         nullify_over(ctx);
3593     }
3594 
3595     dest = dest_gpr(ctx, a->t);
3596     src2 = load_gpr(ctx, a->r2);
3597     if (a->r1 == 0) {
3598         if (a->d) {
3599             tcg_gen_shr_i64(dest, src2, cpu_sar);
3600         } else {
3601             TCGv_i64 tmp = tcg_temp_new_i64();
3602 
3603             tcg_gen_ext32u_i64(dest, src2);
3604             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3605             tcg_gen_shr_i64(dest, dest, tmp);
3606         }
3607     } else if (a->r1 == a->r2) {
3608         if (a->d) {
3609             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3610         } else {
3611             TCGv_i32 t32 = tcg_temp_new_i32();
3612             TCGv_i32 s32 = tcg_temp_new_i32();
3613 
3614             tcg_gen_extrl_i64_i32(t32, src2);
3615             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3616             tcg_gen_andi_i32(s32, s32, 31);
3617             tcg_gen_rotr_i32(t32, t32, s32);
3618             tcg_gen_extu_i32_i64(dest, t32);
3619         }
3620     } else {
3621         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3622 
3623         if (a->d) {
3624             TCGv_i64 t = tcg_temp_new_i64();
3625             TCGv_i64 n = tcg_temp_new_i64();
3626 
3627             tcg_gen_xori_i64(n, cpu_sar, 63);
3628             tcg_gen_shl_i64(t, src1, n);
3629             tcg_gen_shli_i64(t, t, 1);
3630             tcg_gen_shr_i64(dest, src2, cpu_sar);
3631             tcg_gen_or_i64(dest, dest, t);
3632         } else {
3633             TCGv_i64 t = tcg_temp_new_i64();
3634             TCGv_i64 s = tcg_temp_new_i64();
3635 
3636             tcg_gen_concat32_i64(t, src2, src1);
3637             tcg_gen_andi_i64(s, cpu_sar, 31);
3638             tcg_gen_shr_i64(dest, t, s);
3639         }
3640     }
3641     save_gpr(ctx, a->t, dest);
3642 
3643     /* Install the new nullification.  */
3644     cond_free(&ctx->null_cond);
3645     if (a->c) {
3646         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3647     }
3648     return nullify_end(ctx);
3649 }
3650 
3651 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3652 {
3653     unsigned width, sa;
3654     TCGv_i64 dest, t2;
3655 
3656     if (!ctx->is_pa20 && a->d) {
3657         return false;
3658     }
3659     if (a->c) {
3660         nullify_over(ctx);
3661     }
3662 
3663     width = a->d ? 64 : 32;
3664     sa = width - 1 - a->cpos;
3665 
3666     dest = dest_gpr(ctx, a->t);
3667     t2 = load_gpr(ctx, a->r2);
3668     if (a->r1 == 0) {
3669         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3670     } else if (width == TARGET_LONG_BITS) {
3671         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3672     } else {
3673         assert(!a->d);
3674         if (a->r1 == a->r2) {
3675             TCGv_i32 t32 = tcg_temp_new_i32();
3676             tcg_gen_extrl_i64_i32(t32, t2);
3677             tcg_gen_rotri_i32(t32, t32, sa);
3678             tcg_gen_extu_i32_i64(dest, t32);
3679         } else {
3680             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3681             tcg_gen_extract_i64(dest, dest, sa, 32);
3682         }
3683     }
3684     save_gpr(ctx, a->t, dest);
3685 
3686     /* Install the new nullification.  */
3687     cond_free(&ctx->null_cond);
3688     if (a->c) {
3689         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3690     }
3691     return nullify_end(ctx);
3692 }
3693 
3694 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3695 {
3696     unsigned widthm1 = a->d ? 63 : 31;
3697     TCGv_i64 dest, src, tmp;
3698 
3699     if (!ctx->is_pa20 && a->d) {
3700         return false;
3701     }
3702     if (a->c) {
3703         nullify_over(ctx);
3704     }
3705 
3706     dest = dest_gpr(ctx, a->t);
3707     src = load_gpr(ctx, a->r);
3708     tmp = tcg_temp_new_i64();
3709 
3710     /* Recall that SAR is using big-endian bit numbering.  */
3711     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3712     tcg_gen_xori_i64(tmp, tmp, widthm1);
3713 
3714     if (a->se) {
3715         if (!a->d) {
3716             tcg_gen_ext32s_i64(dest, src);
3717             src = dest;
3718         }
3719         tcg_gen_sar_i64(dest, src, tmp);
3720         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3721     } else {
3722         if (!a->d) {
3723             tcg_gen_ext32u_i64(dest, src);
3724             src = dest;
3725         }
3726         tcg_gen_shr_i64(dest, src, tmp);
3727         tcg_gen_extract_i64(dest, dest, 0, a->len);
3728     }
3729     save_gpr(ctx, a->t, dest);
3730 
3731     /* Install the new nullification.  */
3732     cond_free(&ctx->null_cond);
3733     if (a->c) {
3734         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3735     }
3736     return nullify_end(ctx);
3737 }
3738 
3739 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3740 {
3741     unsigned len, cpos, width;
3742     TCGv_i64 dest, src;
3743 
3744     if (!ctx->is_pa20 && a->d) {
3745         return false;
3746     }
3747     if (a->c) {
3748         nullify_over(ctx);
3749     }
3750 
3751     len = a->len;
3752     width = a->d ? 64 : 32;
3753     cpos = width - 1 - a->pos;
3754     if (cpos + len > width) {
3755         len = width - cpos;
3756     }
3757 
3758     dest = dest_gpr(ctx, a->t);
3759     src = load_gpr(ctx, a->r);
3760     if (a->se) {
3761         tcg_gen_sextract_i64(dest, src, cpos, len);
3762     } else {
3763         tcg_gen_extract_i64(dest, src, cpos, len);
3764     }
3765     save_gpr(ctx, a->t, dest);
3766 
3767     /* Install the new nullification.  */
3768     cond_free(&ctx->null_cond);
3769     if (a->c) {
3770         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3771     }
3772     return nullify_end(ctx);
3773 }
3774 
3775 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3776 {
3777     unsigned len, width;
3778     uint64_t mask0, mask1;
3779     TCGv_i64 dest;
3780 
3781     if (!ctx->is_pa20 && a->d) {
3782         return false;
3783     }
3784     if (a->c) {
3785         nullify_over(ctx);
3786     }
3787 
3788     len = a->len;
3789     width = a->d ? 64 : 32;
3790     if (a->cpos + len > width) {
3791         len = width - a->cpos;
3792     }
3793 
3794     dest = dest_gpr(ctx, a->t);
3795     mask0 = deposit64(0, a->cpos, len, a->i);
3796     mask1 = deposit64(-1, a->cpos, len, a->i);
3797 
3798     if (a->nz) {
3799         TCGv_i64 src = load_gpr(ctx, a->t);
3800         tcg_gen_andi_i64(dest, src, mask1);
3801         tcg_gen_ori_i64(dest, dest, mask0);
3802     } else {
3803         tcg_gen_movi_i64(dest, mask0);
3804     }
3805     save_gpr(ctx, a->t, dest);
3806 
3807     /* Install the new nullification.  */
3808     cond_free(&ctx->null_cond);
3809     if (a->c) {
3810         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3811     }
3812     return nullify_end(ctx);
3813 }
3814 
3815 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3816 {
3817     unsigned rs = a->nz ? a->t : 0;
3818     unsigned len, width;
3819     TCGv_i64 dest, val;
3820 
3821     if (!ctx->is_pa20 && a->d) {
3822         return false;
3823     }
3824     if (a->c) {
3825         nullify_over(ctx);
3826     }
3827 
3828     len = a->len;
3829     width = a->d ? 64 : 32;
3830     if (a->cpos + len > width) {
3831         len = width - a->cpos;
3832     }
3833 
3834     dest = dest_gpr(ctx, a->t);
3835     val = load_gpr(ctx, a->r);
3836     if (rs == 0) {
3837         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3838     } else {
3839         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3840     }
3841     save_gpr(ctx, a->t, dest);
3842 
3843     /* Install the new nullification.  */
3844     cond_free(&ctx->null_cond);
3845     if (a->c) {
3846         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3847     }
3848     return nullify_end(ctx);
3849 }
3850 
3851 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3852                        bool d, bool nz, unsigned len, TCGv_i64 val)
3853 {
3854     unsigned rs = nz ? rt : 0;
3855     unsigned widthm1 = d ? 63 : 31;
3856     TCGv_i64 mask, tmp, shift, dest;
3857     uint64_t msb = 1ULL << (len - 1);
3858 
3859     dest = dest_gpr(ctx, rt);
3860     shift = tcg_temp_new_i64();
3861     tmp = tcg_temp_new_i64();
3862 
3863     /* Convert big-endian bit numbering in SAR to left-shift.  */
3864     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3865     tcg_gen_xori_i64(shift, shift, widthm1);
3866 
3867     mask = tcg_temp_new_i64();
3868     tcg_gen_movi_i64(mask, msb + (msb - 1));
3869     tcg_gen_and_i64(tmp, val, mask);
3870     if (rs) {
3871         tcg_gen_shl_i64(mask, mask, shift);
3872         tcg_gen_shl_i64(tmp, tmp, shift);
3873         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3874         tcg_gen_or_i64(dest, dest, tmp);
3875     } else {
3876         tcg_gen_shl_i64(dest, tmp, shift);
3877     }
3878     save_gpr(ctx, rt, dest);
3879 
3880     /* Install the new nullification.  */
3881     cond_free(&ctx->null_cond);
3882     if (c) {
3883         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3884     }
3885     return nullify_end(ctx);
3886 }
3887 
3888 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3889 {
3890     if (!ctx->is_pa20 && a->d) {
3891         return false;
3892     }
3893     if (a->c) {
3894         nullify_over(ctx);
3895     }
3896     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3897                       load_gpr(ctx, a->r));
3898 }
3899 
3900 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3901 {
3902     if (!ctx->is_pa20 && a->d) {
3903         return false;
3904     }
3905     if (a->c) {
3906         nullify_over(ctx);
3907     }
3908     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3909                       tcg_constant_i64(a->i));
3910 }
3911 
3912 static bool trans_be(DisasContext *ctx, arg_be *a)
3913 {
3914     TCGv_i64 tmp;
3915 
3916 #ifdef CONFIG_USER_ONLY
3917     /* ??? It seems like there should be a good way of using
3918        "be disp(sr2, r0)", the canonical gateway entry mechanism
3919        to our advantage.  But that appears to be inconvenient to
3920        manage along side branch delay slots.  Therefore we handle
3921        entry into the gateway page via absolute address.  */
3922     /* Since we don't implement spaces, just branch.  Do notice the special
3923        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3924        goto_tb to the TB containing the syscall.  */
3925     if (a->b == 0) {
3926         return do_dbranch(ctx, a->disp, a->l, a->n);
3927     }
3928 #else
3929     nullify_over(ctx);
3930 #endif
3931 
3932     tmp = tcg_temp_new_i64();
3933     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3934     tmp = do_ibranch_priv(ctx, tmp);
3935 
3936 #ifdef CONFIG_USER_ONLY
3937     return do_ibranch(ctx, tmp, a->l, a->n);
3938 #else
3939     TCGv_i64 new_spc = tcg_temp_new_i64();
3940 
3941     load_spr(ctx, new_spc, a->sp);
3942     if (a->l) {
3943         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3944         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
3945     }
3946     if (a->n && use_nullify_skip(ctx)) {
3947         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3948         tcg_gen_addi_i64(tmp, tmp, 4);
3949         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3950         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3951         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3952         nullify_set(ctx, 0);
3953     } else {
3954         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3955         if (ctx->iaoq_b == -1) {
3956             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3957         }
3958         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3959         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3960         nullify_set(ctx, a->n);
3961     }
3962     tcg_gen_lookup_and_goto_ptr();
3963     ctx->base.is_jmp = DISAS_NORETURN;
3964     return nullify_end(ctx);
3965 #endif
3966 }
3967 
3968 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3969 {
3970     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3971 }
3972 
3973 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3974 {
3975     uint64_t dest = iaoq_dest(ctx, a->disp);
3976 
3977     nullify_over(ctx);
3978 
3979     /* Make sure the caller hasn't done something weird with the queue.
3980      * ??? This is not quite the same as the PSW[B] bit, which would be
3981      * expensive to track.  Real hardware will trap for
3982      *    b  gateway
3983      *    b  gateway+4  (in delay slot of first branch)
3984      * However, checking for a non-sequential instruction queue *will*
3985      * diagnose the security hole
3986      *    b  gateway
3987      *    b  evil
3988      * in which instructions at evil would run with increased privs.
3989      */
3990     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3991         return gen_illegal(ctx);
3992     }
3993 
3994 #ifndef CONFIG_USER_ONLY
3995     if (ctx->tb_flags & PSW_C) {
3996         int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
3997         /* If we could not find a TLB entry, then we need to generate an
3998            ITLB miss exception so the kernel will provide it.
3999            The resulting TLB fill operation will invalidate this TB and
4000            we will re-translate, at which point we *will* be able to find
4001            the TLB entry and determine if this is in fact a gateway page.  */
4002         if (type < 0) {
4003             gen_excp(ctx, EXCP_ITLB_MISS);
4004             return true;
4005         }
4006         /* No change for non-gateway pages or for priv decrease.  */
4007         if (type >= 4 && type - 4 < ctx->privilege) {
4008             dest = deposit64(dest, 0, 2, type - 4);
4009         }
4010     } else {
4011         dest &= -4;  /* priv = 0 */
4012     }
4013 #endif
4014 
4015     if (a->l) {
4016         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4017         if (ctx->privilege < 3) {
4018             tcg_gen_andi_i64(tmp, tmp, -4);
4019         }
4020         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4021         save_gpr(ctx, a->l, tmp);
4022     }
4023 
4024     return do_dbranch(ctx, dest, 0, a->n);
4025 }
4026 
4027 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4028 {
4029     if (a->x) {
4030         TCGv_i64 tmp = tcg_temp_new_i64();
4031         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
4032         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
4033         /* The computation here never changes privilege level.  */
4034         return do_ibranch(ctx, tmp, a->l, a->n);
4035     } else {
4036         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4037         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
4038     }
4039 }
4040 
4041 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4042 {
4043     TCGv_i64 dest;
4044 
4045     if (a->x == 0) {
4046         dest = load_gpr(ctx, a->b);
4047     } else {
4048         dest = tcg_temp_new_i64();
4049         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4050         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4051     }
4052     dest = do_ibranch_priv(ctx, dest);
4053     return do_ibranch(ctx, dest, 0, a->n);
4054 }
4055 
4056 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4057 {
4058     TCGv_i64 dest;
4059 
4060 #ifdef CONFIG_USER_ONLY
4061     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4062     return do_ibranch(ctx, dest, a->l, a->n);
4063 #else
4064     nullify_over(ctx);
4065     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
4066 
4067     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
4068     if (ctx->iaoq_b == -1) {
4069         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4070     }
4071     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
4072     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
4073     if (a->l) {
4074         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
4075     }
4076     nullify_set(ctx, a->n);
4077     tcg_gen_lookup_and_goto_ptr();
4078     ctx->base.is_jmp = DISAS_NORETURN;
4079     return nullify_end(ctx);
4080 #endif
4081 }
4082 
4083 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4084 {
4085     /* All branch target stack instructions implement as nop. */
4086     return ctx->is_pa20;
4087 }
4088 
4089 /*
4090  * Float class 0
4091  */
4092 
4093 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4094 {
4095     tcg_gen_mov_i32(dst, src);
4096 }
4097 
4098 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4099 {
4100     uint64_t ret;
4101 
4102     if (ctx->is_pa20) {
4103         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4104     } else {
4105         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4106     }
4107 
4108     nullify_over(ctx);
4109     save_frd(0, tcg_constant_i64(ret));
4110     return nullify_end(ctx);
4111 }
4112 
4113 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4114 {
4115     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4116 }
4117 
4118 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4119 {
4120     tcg_gen_mov_i64(dst, src);
4121 }
4122 
4123 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4124 {
4125     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4126 }
4127 
4128 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4129 {
4130     tcg_gen_andi_i32(dst, src, INT32_MAX);
4131 }
4132 
4133 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4134 {
4135     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4136 }
4137 
4138 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4139 {
4140     tcg_gen_andi_i64(dst, src, INT64_MAX);
4141 }
4142 
4143 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4144 {
4145     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4146 }
4147 
4148 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4149 {
4150     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4151 }
4152 
4153 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4154 {
4155     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4156 }
4157 
4158 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4159 {
4160     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4161 }
4162 
4163 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4164 {
4165     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4166 }
4167 
4168 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4169 {
4170     tcg_gen_xori_i32(dst, src, INT32_MIN);
4171 }
4172 
4173 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4174 {
4175     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4176 }
4177 
4178 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4179 {
4180     tcg_gen_xori_i64(dst, src, INT64_MIN);
4181 }
4182 
4183 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4184 {
4185     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4186 }
4187 
4188 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4189 {
4190     tcg_gen_ori_i32(dst, src, INT32_MIN);
4191 }
4192 
4193 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4194 {
4195     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4196 }
4197 
4198 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4199 {
4200     tcg_gen_ori_i64(dst, src, INT64_MIN);
4201 }
4202 
4203 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4204 {
4205     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4206 }
4207 
4208 /*
4209  * Float class 1
4210  */
4211 
4212 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4213 {
4214     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4215 }
4216 
4217 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4218 {
4219     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4220 }
4221 
4222 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4223 {
4224     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4225 }
4226 
4227 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4228 {
4229     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4230 }
4231 
4232 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4233 {
4234     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4235 }
4236 
4237 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4238 {
4239     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4240 }
4241 
4242 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4243 {
4244     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4245 }
4246 
4247 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4248 {
4249     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4250 }
4251 
4252 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4253 {
4254     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4255 }
4256 
4257 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4258 {
4259     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4260 }
4261 
4262 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4263 {
4264     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4265 }
4266 
4267 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4268 {
4269     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4270 }
4271 
4272 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4273 {
4274     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4275 }
4276 
4277 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4278 {
4279     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4280 }
4281 
4282 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4283 {
4284     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4285 }
4286 
4287 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4288 {
4289     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4290 }
4291 
4292 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4293 {
4294     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4295 }
4296 
4297 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4298 {
4299     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4300 }
4301 
4302 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4303 {
4304     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4305 }
4306 
4307 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4308 {
4309     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4310 }
4311 
4312 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4313 {
4314     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4315 }
4316 
4317 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4318 {
4319     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4320 }
4321 
4322 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4323 {
4324     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4325 }
4326 
4327 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4328 {
4329     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4330 }
4331 
4332 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4333 {
4334     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4335 }
4336 
4337 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4338 {
4339     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4340 }
4341 
4342 /*
4343  * Float class 2
4344  */
4345 
4346 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4347 {
4348     TCGv_i32 ta, tb, tc, ty;
4349 
4350     nullify_over(ctx);
4351 
4352     ta = load_frw0_i32(a->r1);
4353     tb = load_frw0_i32(a->r2);
4354     ty = tcg_constant_i32(a->y);
4355     tc = tcg_constant_i32(a->c);
4356 
4357     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4358 
4359     return nullify_end(ctx);
4360 }
4361 
4362 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4363 {
4364     TCGv_i64 ta, tb;
4365     TCGv_i32 tc, ty;
4366 
4367     nullify_over(ctx);
4368 
4369     ta = load_frd0(a->r1);
4370     tb = load_frd0(a->r2);
4371     ty = tcg_constant_i32(a->y);
4372     tc = tcg_constant_i32(a->c);
4373 
4374     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4375 
4376     return nullify_end(ctx);
4377 }
4378 
4379 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4380 {
4381     TCGv_i64 t;
4382 
4383     nullify_over(ctx);
4384 
4385     t = tcg_temp_new_i64();
4386     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4387 
4388     if (a->y == 1) {
4389         int mask;
4390         bool inv = false;
4391 
4392         switch (a->c) {
4393         case 0: /* simple */
4394             tcg_gen_andi_i64(t, t, 0x4000000);
4395             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4396             goto done;
4397         case 2: /* rej */
4398             inv = true;
4399             /* fallthru */
4400         case 1: /* acc */
4401             mask = 0x43ff800;
4402             break;
4403         case 6: /* rej8 */
4404             inv = true;
4405             /* fallthru */
4406         case 5: /* acc8 */
4407             mask = 0x43f8000;
4408             break;
4409         case 9: /* acc6 */
4410             mask = 0x43e0000;
4411             break;
4412         case 13: /* acc4 */
4413             mask = 0x4380000;
4414             break;
4415         case 17: /* acc2 */
4416             mask = 0x4200000;
4417             break;
4418         default:
4419             gen_illegal(ctx);
4420             return true;
4421         }
4422         if (inv) {
4423             TCGv_i64 c = tcg_constant_i64(mask);
4424             tcg_gen_or_i64(t, t, c);
4425             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4426         } else {
4427             tcg_gen_andi_i64(t, t, mask);
4428             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4429         }
4430     } else {
4431         unsigned cbit = (a->y ^ 1) - 1;
4432 
4433         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4434         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4435     }
4436 
4437  done:
4438     return nullify_end(ctx);
4439 }
4440 
4441 /*
4442  * Float class 2
4443  */
4444 
4445 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4446 {
4447     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4448 }
4449 
4450 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4451 {
4452     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4453 }
4454 
4455 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4456 {
4457     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4458 }
4459 
4460 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4461 {
4462     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4463 }
4464 
4465 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4466 {
4467     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4468 }
4469 
4470 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4471 {
4472     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4473 }
4474 
4475 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4476 {
4477     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4478 }
4479 
4480 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4481 {
4482     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4483 }
4484 
4485 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4486 {
4487     TCGv_i64 x, y;
4488 
4489     nullify_over(ctx);
4490 
4491     x = load_frw0_i64(a->r1);
4492     y = load_frw0_i64(a->r2);
4493     tcg_gen_mul_i64(x, x, y);
4494     save_frd(a->t, x);
4495 
4496     return nullify_end(ctx);
4497 }
4498 
4499 /* Convert the fmpyadd single-precision register encodings to standard.  */
4500 static inline int fmpyadd_s_reg(unsigned r)
4501 {
4502     return (r & 16) * 2 + 16 + (r & 15);
4503 }
4504 
4505 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4506 {
4507     int tm = fmpyadd_s_reg(a->tm);
4508     int ra = fmpyadd_s_reg(a->ra);
4509     int ta = fmpyadd_s_reg(a->ta);
4510     int rm2 = fmpyadd_s_reg(a->rm2);
4511     int rm1 = fmpyadd_s_reg(a->rm1);
4512 
4513     nullify_over(ctx);
4514 
4515     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4516     do_fop_weww(ctx, ta, ta, ra,
4517                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4518 
4519     return nullify_end(ctx);
4520 }
4521 
4522 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4523 {
4524     return do_fmpyadd_s(ctx, a, false);
4525 }
4526 
4527 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4528 {
4529     return do_fmpyadd_s(ctx, a, true);
4530 }
4531 
4532 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4533 {
4534     nullify_over(ctx);
4535 
4536     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4537     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4538                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4539 
4540     return nullify_end(ctx);
4541 }
4542 
4543 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4544 {
4545     return do_fmpyadd_d(ctx, a, false);
4546 }
4547 
4548 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4549 {
4550     return do_fmpyadd_d(ctx, a, true);
4551 }
4552 
4553 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4554 {
4555     TCGv_i32 x, y, z;
4556 
4557     nullify_over(ctx);
4558     x = load_frw0_i32(a->rm1);
4559     y = load_frw0_i32(a->rm2);
4560     z = load_frw0_i32(a->ra3);
4561 
4562     if (a->neg) {
4563         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4564     } else {
4565         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4566     }
4567 
4568     save_frw_i32(a->t, x);
4569     return nullify_end(ctx);
4570 }
4571 
4572 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4573 {
4574     TCGv_i64 x, y, z;
4575 
4576     nullify_over(ctx);
4577     x = load_frd0(a->rm1);
4578     y = load_frd0(a->rm2);
4579     z = load_frd0(a->ra3);
4580 
4581     if (a->neg) {
4582         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4583     } else {
4584         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4585     }
4586 
4587     save_frd(a->t, x);
4588     return nullify_end(ctx);
4589 }
4590 
4591 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
4592 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4593 {
4594     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4595 #ifndef CONFIG_USER_ONLY
4596     nullify_over(ctx);
4597     gen_helper_diag_btlb(tcg_env);
4598     return nullify_end(ctx);
4599 #endif
4600 }
4601 
4602 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
4603 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4604 {
4605     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4606 #ifndef CONFIG_USER_ONLY
4607     nullify_over(ctx);
4608     gen_helper_diag_console_output(tcg_env);
4609     return nullify_end(ctx);
4610 #endif
4611 }
4612 
4613 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4614 {
4615     return !ctx->is_pa20 && do_getshadowregs(ctx);
4616 }
4617 
4618 static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4619 {
4620     return ctx->is_pa20 && do_getshadowregs(ctx);
4621 }
4622 
4623 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4624 {
4625     return !ctx->is_pa20 && do_putshadowregs(ctx);
4626 }
4627 
4628 static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
4629 {
4630     return ctx->is_pa20 && do_putshadowregs(ctx);
4631 }
4632 
4633 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4634 {
4635     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4636     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4637     return true;
4638 }
4639 
4640 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4641 {
4642     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4643     int bound;
4644 
4645     ctx->cs = cs;
4646     ctx->tb_flags = ctx->base.tb->flags;
4647     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4648 
4649 #ifdef CONFIG_USER_ONLY
4650     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4651     ctx->mmu_idx = MMU_USER_IDX;
4652     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4653     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4654     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4655 #else
4656     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4657     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4658                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4659                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4660 
4661     /* Recover the IAOQ values from the GVA + PRIV.  */
4662     uint64_t cs_base = ctx->base.tb->cs_base;
4663     uint64_t iasq_f = cs_base & ~0xffffffffull;
4664     int32_t diff = cs_base;
4665 
4666     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4667     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4668 #endif
4669     ctx->iaoq_n = -1;
4670     ctx->iaoq_n_var = NULL;
4671 
4672     ctx->zero = tcg_constant_i64(0);
4673 
4674     /* Bound the number of instructions by those left on the page.  */
4675     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4676     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4677 }
4678 
4679 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4680 {
4681     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4682 
4683     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4684     ctx->null_cond = cond_make_f();
4685     ctx->psw_n_nonzero = false;
4686     if (ctx->tb_flags & PSW_N) {
4687         ctx->null_cond.c = TCG_COND_ALWAYS;
4688         ctx->psw_n_nonzero = true;
4689     }
4690     ctx->null_lab = NULL;
4691 }
4692 
4693 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4694 {
4695     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4696 
4697     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4698     ctx->insn_start_updated = false;
4699 }
4700 
4701 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4702 {
4703     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4704     CPUHPPAState *env = cpu_env(cs);
4705     DisasJumpType ret;
4706 
4707     /* Execute one insn.  */
4708 #ifdef CONFIG_USER_ONLY
4709     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4710         do_page_zero(ctx);
4711         ret = ctx->base.is_jmp;
4712         assert(ret != DISAS_NEXT);
4713     } else
4714 #endif
4715     {
4716         /* Always fetch the insn, even if nullified, so that we check
4717            the page permissions for execute.  */
4718         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4719 
4720         /* Set up the IA queue for the next insn.
4721            This will be overwritten by a branch.  */
4722         if (ctx->iaoq_b == -1) {
4723             ctx->iaoq_n = -1;
4724             ctx->iaoq_n_var = tcg_temp_new_i64();
4725             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4726         } else {
4727             ctx->iaoq_n = ctx->iaoq_b + 4;
4728             ctx->iaoq_n_var = NULL;
4729         }
4730 
4731         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4732             ctx->null_cond.c = TCG_COND_NEVER;
4733             ret = DISAS_NEXT;
4734         } else {
4735             ctx->insn = insn;
4736             if (!decode(ctx, insn)) {
4737                 gen_illegal(ctx);
4738             }
4739             ret = ctx->base.is_jmp;
4740             assert(ctx->null_lab == NULL);
4741         }
4742     }
4743 
4744     /* Advance the insn queue.  Note that this check also detects
4745        a priority change within the instruction queue.  */
4746     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4747         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4748             && use_goto_tb(ctx, ctx->iaoq_b)
4749             && (ctx->null_cond.c == TCG_COND_NEVER
4750                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4751             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4752             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4753             ctx->base.is_jmp = ret = DISAS_NORETURN;
4754         } else {
4755             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4756         }
4757     }
4758     ctx->iaoq_f = ctx->iaoq_b;
4759     ctx->iaoq_b = ctx->iaoq_n;
4760     ctx->base.pc_next += 4;
4761 
4762     switch (ret) {
4763     case DISAS_NORETURN:
4764     case DISAS_IAQ_N_UPDATED:
4765         break;
4766 
4767     case DISAS_NEXT:
4768     case DISAS_IAQ_N_STALE:
4769     case DISAS_IAQ_N_STALE_EXIT:
4770         if (ctx->iaoq_f == -1) {
4771             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4772             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4773 #ifndef CONFIG_USER_ONLY
4774             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4775 #endif
4776             nullify_save(ctx);
4777             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4778                                 ? DISAS_EXIT
4779                                 : DISAS_IAQ_N_UPDATED);
4780         } else if (ctx->iaoq_b == -1) {
4781             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4782         }
4783         break;
4784 
4785     default:
4786         g_assert_not_reached();
4787     }
4788 }
4789 
4790 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4791 {
4792     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4793     DisasJumpType is_jmp = ctx->base.is_jmp;
4794 
4795     switch (is_jmp) {
4796     case DISAS_NORETURN:
4797         break;
4798     case DISAS_TOO_MANY:
4799     case DISAS_IAQ_N_STALE:
4800     case DISAS_IAQ_N_STALE_EXIT:
4801         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4802         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4803         nullify_save(ctx);
4804         /* FALLTHRU */
4805     case DISAS_IAQ_N_UPDATED:
4806         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4807             tcg_gen_lookup_and_goto_ptr();
4808             break;
4809         }
4810         /* FALLTHRU */
4811     case DISAS_EXIT:
4812         tcg_gen_exit_tb(NULL, 0);
4813         break;
4814     default:
4815         g_assert_not_reached();
4816     }
4817 }
4818 
4819 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4820                               CPUState *cs, FILE *logfile)
4821 {
4822     target_ulong pc = dcbase->pc_first;
4823 
4824 #ifdef CONFIG_USER_ONLY
4825     switch (pc) {
4826     case 0x00:
4827         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4828         return;
4829     case 0xb0:
4830         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4831         return;
4832     case 0xe0:
4833         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4834         return;
4835     case 0x100:
4836         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4837         return;
4838     }
4839 #endif
4840 
4841     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4842     target_disas(logfile, cs, pc, dcbase->tb->size);
4843 }
4844 
4845 static const TranslatorOps hppa_tr_ops = {
4846     .init_disas_context = hppa_tr_init_disas_context,
4847     .tb_start           = hppa_tr_tb_start,
4848     .insn_start         = hppa_tr_insn_start,
4849     .translate_insn     = hppa_tr_translate_insn,
4850     .tb_stop            = hppa_tr_tb_stop,
4851     .disas_log          = hppa_tr_disas_log,
4852 };
4853 
4854 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4855                            vaddr pc, void *host_pc)
4856 {
4857     DisasContext ctx;
4858     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4859 }
4860