xref: /openbmc/qemu/target/hppa/translate.c (revision a158c63b)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47     TCGOp *insn_start;
48 
49     uint64_t iaoq_f;
50     uint64_t iaoq_b;
51     uint64_t iaoq_n;
52     TCGv_i64 iaoq_n_var;
53 
54     DisasCond null_cond;
55     TCGLabel *null_lab;
56 
57     TCGv_i64 zero;
58 
59     uint32_t insn;
60     uint32_t tb_flags;
61     int mmu_idx;
62     int privilege;
63     bool psw_n_nonzero;
64     bool is_pa20;
65 
66 #ifdef CONFIG_USER_ONLY
67     MemOp unalign;
68 #endif
69 } DisasContext;
70 
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C)       (C)->unalign
73 #define MMU_DISABLED(C)  false
74 #else
75 #define UNALIGN(C)       MO_ALIGN
76 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
77 #endif
78 
79 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
80 static int expand_sm_imm(DisasContext *ctx, int val)
81 {
82     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
83     if (ctx->is_pa20) {
84         if (val & PSW_SM_W) {
85             val |= PSW_W;
86         }
87         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
88     } else {
89         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
90     }
91     return val;
92 }
93 
94 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
95 static int expand_sr3x(DisasContext *ctx, int val)
96 {
97     return ~val;
98 }
99 
100 /* Convert the M:A bits within a memory insn to the tri-state value
101    we use for the final M.  */
102 static int ma_to_m(DisasContext *ctx, int val)
103 {
104     return val & 2 ? (val & 1 ? -1 : 1) : 0;
105 }
106 
107 /* Convert the sign of the displacement to a pre or post-modify.  */
108 static int pos_to_m(DisasContext *ctx, int val)
109 {
110     return val ? 1 : -1;
111 }
112 
113 static int neg_to_m(DisasContext *ctx, int val)
114 {
115     return val ? -1 : 1;
116 }
117 
118 /* Used for branch targets and fp memory ops.  */
119 static int expand_shl2(DisasContext *ctx, int val)
120 {
121     return val << 2;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Expander for assemble_16a(s,cat(im10a,0),i). */
142 static int expand_11a(DisasContext *ctx, int val)
143 {
144     /*
145      * @val is bit 0 and bits [4:15].
146      * Swizzle thing around depending on PSW.W.
147      */
148     int im10a = extract32(val, 1, 10);
149     int s = extract32(val, 11, 2);
150     int i = (-(val & 1) << 13) | (im10a << 3);
151 
152     if (ctx->tb_flags & PSW_W) {
153         i ^= s << 13;
154     }
155     return i;
156 }
157 
158 /* Expander for assemble_16a(s,im11a,i). */
159 static int expand_12a(DisasContext *ctx, int val)
160 {
161     /*
162      * @val is bit 0 and bits [3:15].
163      * Swizzle thing around depending on PSW.W.
164      */
165     int im11a = extract32(val, 1, 11);
166     int s = extract32(val, 12, 2);
167     int i = (-(val & 1) << 13) | (im11a << 2);
168 
169     if (ctx->tb_flags & PSW_W) {
170         i ^= s << 13;
171     }
172     return i;
173 }
174 
175 /* Expander for assemble_16(s,im14). */
176 static int expand_16(DisasContext *ctx, int val)
177 {
178     /*
179      * @val is bits [0:15], containing both im14 and s.
180      * Swizzle thing around depending on PSW.W.
181      */
182     int s = extract32(val, 14, 2);
183     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
184 
185     if (ctx->tb_flags & PSW_W) {
186         i ^= s << 13;
187     }
188     return i;
189 }
190 
191 /* The sp field is only present with !PSW_W. */
192 static int sp0_if_wide(DisasContext *ctx, int sp)
193 {
194     return ctx->tb_flags & PSW_W ? 0 : sp;
195 }
196 
197 /* Translate CMPI doubleword conditions to standard. */
198 static int cmpbid_c(DisasContext *ctx, int val)
199 {
200     return val ? val : 4; /* 0 == "*<<" */
201 }
202 
203 
204 /* Include the auto-generated decoder.  */
205 #include "decode-insns.c.inc"
206 
207 /* We are not using a goto_tb (for whatever reason), but have updated
208    the iaq (for whatever reason), so don't do it again on exit.  */
209 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
210 
211 /* We are exiting the TB, but have neither emitted a goto_tb, nor
212    updated the iaq for the next instruction to be executed.  */
213 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
214 
215 /* Similarly, but we want to return to the main loop immediately
216    to recognize unmasked interrupts.  */
217 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
218 #define DISAS_EXIT                  DISAS_TARGET_3
219 
220 /* global register indexes */
221 static TCGv_i64 cpu_gr[32];
222 static TCGv_i64 cpu_sr[4];
223 static TCGv_i64 cpu_srH;
224 static TCGv_i64 cpu_iaoq_f;
225 static TCGv_i64 cpu_iaoq_b;
226 static TCGv_i64 cpu_iasq_f;
227 static TCGv_i64 cpu_iasq_b;
228 static TCGv_i64 cpu_sar;
229 static TCGv_i64 cpu_psw_n;
230 static TCGv_i64 cpu_psw_v;
231 static TCGv_i64 cpu_psw_cb;
232 static TCGv_i64 cpu_psw_cb_msb;
233 
234 void hppa_translate_init(void)
235 {
236 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
237 
238     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
239     static const GlobalVar vars[] = {
240         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
241         DEF_VAR(psw_n),
242         DEF_VAR(psw_v),
243         DEF_VAR(psw_cb),
244         DEF_VAR(psw_cb_msb),
245         DEF_VAR(iaoq_f),
246         DEF_VAR(iaoq_b),
247     };
248 
249 #undef DEF_VAR
250 
251     /* Use the symbolic register names that match the disassembler.  */
252     static const char gr_names[32][4] = {
253         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
254         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
255         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
256         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
257     };
258     /* SR[4-7] are not global registers so that we can index them.  */
259     static const char sr_names[5][4] = {
260         "sr0", "sr1", "sr2", "sr3", "srH"
261     };
262 
263     int i;
264 
265     cpu_gr[0] = NULL;
266     for (i = 1; i < 32; i++) {
267         cpu_gr[i] = tcg_global_mem_new(tcg_env,
268                                        offsetof(CPUHPPAState, gr[i]),
269                                        gr_names[i]);
270     }
271     for (i = 0; i < 4; i++) {
272         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
273                                            offsetof(CPUHPPAState, sr[i]),
274                                            sr_names[i]);
275     }
276     cpu_srH = tcg_global_mem_new_i64(tcg_env,
277                                      offsetof(CPUHPPAState, sr[4]),
278                                      sr_names[4]);
279 
280     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
281         const GlobalVar *v = &vars[i];
282         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
283     }
284 
285     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
286                                         offsetof(CPUHPPAState, iasq_f),
287                                         "iasq_f");
288     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
289                                         offsetof(CPUHPPAState, iasq_b),
290                                         "iasq_b");
291 }
292 
293 static void set_insn_breg(DisasContext *ctx, int breg)
294 {
295     assert(ctx->insn_start != NULL);
296     tcg_set_insn_start_param(ctx->insn_start, 2, breg);
297     ctx->insn_start = NULL;
298 }
299 
300 static DisasCond cond_make_f(void)
301 {
302     return (DisasCond){
303         .c = TCG_COND_NEVER,
304         .a0 = NULL,
305         .a1 = NULL,
306     };
307 }
308 
309 static DisasCond cond_make_t(void)
310 {
311     return (DisasCond){
312         .c = TCG_COND_ALWAYS,
313         .a0 = NULL,
314         .a1 = NULL,
315     };
316 }
317 
318 static DisasCond cond_make_n(void)
319 {
320     return (DisasCond){
321         .c = TCG_COND_NE,
322         .a0 = cpu_psw_n,
323         .a1 = tcg_constant_i64(0)
324     };
325 }
326 
327 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
328 {
329     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
330     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
331 }
332 
333 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
334 {
335     return cond_make_tmp(c, a0, tcg_constant_i64(0));
336 }
337 
338 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
339 {
340     TCGv_i64 tmp = tcg_temp_new_i64();
341     tcg_gen_mov_i64(tmp, a0);
342     return cond_make_0_tmp(c, tmp);
343 }
344 
345 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
346 {
347     TCGv_i64 t0 = tcg_temp_new_i64();
348     TCGv_i64 t1 = tcg_temp_new_i64();
349 
350     tcg_gen_mov_i64(t0, a0);
351     tcg_gen_mov_i64(t1, a1);
352     return cond_make_tmp(c, t0, t1);
353 }
354 
355 static void cond_free(DisasCond *cond)
356 {
357     switch (cond->c) {
358     default:
359         cond->a0 = NULL;
360         cond->a1 = NULL;
361         /* fallthru */
362     case TCG_COND_ALWAYS:
363         cond->c = TCG_COND_NEVER;
364         break;
365     case TCG_COND_NEVER:
366         break;
367     }
368 }
369 
370 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
371 {
372     if (reg == 0) {
373         return ctx->zero;
374     } else {
375         return cpu_gr[reg];
376     }
377 }
378 
379 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
380 {
381     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
382         return tcg_temp_new_i64();
383     } else {
384         return cpu_gr[reg];
385     }
386 }
387 
388 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
389 {
390     if (ctx->null_cond.c != TCG_COND_NEVER) {
391         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
392                             ctx->null_cond.a1, dest, t);
393     } else {
394         tcg_gen_mov_i64(dest, t);
395     }
396 }
397 
398 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
399 {
400     if (reg != 0) {
401         save_or_nullify(ctx, cpu_gr[reg], t);
402     }
403 }
404 
405 #if HOST_BIG_ENDIAN
406 # define HI_OFS  0
407 # define LO_OFS  4
408 #else
409 # define HI_OFS  4
410 # define LO_OFS  0
411 #endif
412 
413 static TCGv_i32 load_frw_i32(unsigned rt)
414 {
415     TCGv_i32 ret = tcg_temp_new_i32();
416     tcg_gen_ld_i32(ret, tcg_env,
417                    offsetof(CPUHPPAState, fr[rt & 31])
418                    + (rt & 32 ? LO_OFS : HI_OFS));
419     return ret;
420 }
421 
422 static TCGv_i32 load_frw0_i32(unsigned rt)
423 {
424     if (rt == 0) {
425         TCGv_i32 ret = tcg_temp_new_i32();
426         tcg_gen_movi_i32(ret, 0);
427         return ret;
428     } else {
429         return load_frw_i32(rt);
430     }
431 }
432 
433 static TCGv_i64 load_frw0_i64(unsigned rt)
434 {
435     TCGv_i64 ret = tcg_temp_new_i64();
436     if (rt == 0) {
437         tcg_gen_movi_i64(ret, 0);
438     } else {
439         tcg_gen_ld32u_i64(ret, tcg_env,
440                           offsetof(CPUHPPAState, fr[rt & 31])
441                           + (rt & 32 ? LO_OFS : HI_OFS));
442     }
443     return ret;
444 }
445 
446 static void save_frw_i32(unsigned rt, TCGv_i32 val)
447 {
448     tcg_gen_st_i32(val, tcg_env,
449                    offsetof(CPUHPPAState, fr[rt & 31])
450                    + (rt & 32 ? LO_OFS : HI_OFS));
451 }
452 
453 #undef HI_OFS
454 #undef LO_OFS
455 
456 static TCGv_i64 load_frd(unsigned rt)
457 {
458     TCGv_i64 ret = tcg_temp_new_i64();
459     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
460     return ret;
461 }
462 
463 static TCGv_i64 load_frd0(unsigned rt)
464 {
465     if (rt == 0) {
466         TCGv_i64 ret = tcg_temp_new_i64();
467         tcg_gen_movi_i64(ret, 0);
468         return ret;
469     } else {
470         return load_frd(rt);
471     }
472 }
473 
474 static void save_frd(unsigned rt, TCGv_i64 val)
475 {
476     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
477 }
478 
479 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
480 {
481 #ifdef CONFIG_USER_ONLY
482     tcg_gen_movi_i64(dest, 0);
483 #else
484     if (reg < 4) {
485         tcg_gen_mov_i64(dest, cpu_sr[reg]);
486     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
487         tcg_gen_mov_i64(dest, cpu_srH);
488     } else {
489         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
490     }
491 #endif
492 }
493 
494 /* Skip over the implementation of an insn that has been nullified.
495    Use this when the insn is too complex for a conditional move.  */
496 static void nullify_over(DisasContext *ctx)
497 {
498     if (ctx->null_cond.c != TCG_COND_NEVER) {
499         /* The always condition should have been handled in the main loop.  */
500         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
501 
502         ctx->null_lab = gen_new_label();
503 
504         /* If we're using PSW[N], copy it to a temp because... */
505         if (ctx->null_cond.a0 == cpu_psw_n) {
506             ctx->null_cond.a0 = tcg_temp_new_i64();
507             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
508         }
509         /* ... we clear it before branching over the implementation,
510            so that (1) it's clear after nullifying this insn and
511            (2) if this insn nullifies the next, PSW[N] is valid.  */
512         if (ctx->psw_n_nonzero) {
513             ctx->psw_n_nonzero = false;
514             tcg_gen_movi_i64(cpu_psw_n, 0);
515         }
516 
517         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
518                            ctx->null_cond.a1, ctx->null_lab);
519         cond_free(&ctx->null_cond);
520     }
521 }
522 
523 /* Save the current nullification state to PSW[N].  */
524 static void nullify_save(DisasContext *ctx)
525 {
526     if (ctx->null_cond.c == TCG_COND_NEVER) {
527         if (ctx->psw_n_nonzero) {
528             tcg_gen_movi_i64(cpu_psw_n, 0);
529         }
530         return;
531     }
532     if (ctx->null_cond.a0 != cpu_psw_n) {
533         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
534                             ctx->null_cond.a0, ctx->null_cond.a1);
535         ctx->psw_n_nonzero = true;
536     }
537     cond_free(&ctx->null_cond);
538 }
539 
540 /* Set a PSW[N] to X.  The intention is that this is used immediately
541    before a goto_tb/exit_tb, so that there is no fallthru path to other
542    code within the TB.  Therefore we do not update psw_n_nonzero.  */
543 static void nullify_set(DisasContext *ctx, bool x)
544 {
545     if (ctx->psw_n_nonzero || x) {
546         tcg_gen_movi_i64(cpu_psw_n, x);
547     }
548 }
549 
550 /* Mark the end of an instruction that may have been nullified.
551    This is the pair to nullify_over.  Always returns true so that
552    it may be tail-called from a translate function.  */
553 static bool nullify_end(DisasContext *ctx)
554 {
555     TCGLabel *null_lab = ctx->null_lab;
556     DisasJumpType status = ctx->base.is_jmp;
557 
558     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
559        For UPDATED, we cannot update on the nullified path.  */
560     assert(status != DISAS_IAQ_N_UPDATED);
561 
562     if (likely(null_lab == NULL)) {
563         /* The current insn wasn't conditional or handled the condition
564            applied to it without a branch, so the (new) setting of
565            NULL_COND can be applied directly to the next insn.  */
566         return true;
567     }
568     ctx->null_lab = NULL;
569 
570     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
571         /* The next instruction will be unconditional,
572            and NULL_COND already reflects that.  */
573         gen_set_label(null_lab);
574     } else {
575         /* The insn that we just executed is itself nullifying the next
576            instruction.  Store the condition in the PSW[N] global.
577            We asserted PSW[N] = 0 in nullify_over, so that after the
578            label we have the proper value in place.  */
579         nullify_save(ctx);
580         gen_set_label(null_lab);
581         ctx->null_cond = cond_make_n();
582     }
583     if (status == DISAS_NORETURN) {
584         ctx->base.is_jmp = DISAS_NEXT;
585     }
586     return true;
587 }
588 
589 static uint64_t gva_offset_mask(DisasContext *ctx)
590 {
591     return (ctx->tb_flags & PSW_W
592             ? MAKE_64BIT_MASK(0, 62)
593             : MAKE_64BIT_MASK(0, 32));
594 }
595 
596 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
597                             uint64_t ival, TCGv_i64 vval)
598 {
599     uint64_t mask = gva_offset_mask(ctx);
600 
601     if (ival != -1) {
602         tcg_gen_movi_i64(dest, ival & mask);
603         return;
604     }
605     tcg_debug_assert(vval != NULL);
606 
607     /*
608      * We know that the IAOQ is already properly masked.
609      * This optimization is primarily for "iaoq_f = iaoq_b".
610      */
611     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
612         tcg_gen_mov_i64(dest, vval);
613     } else {
614         tcg_gen_andi_i64(dest, vval, mask);
615     }
616 }
617 
618 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
619 {
620     return ctx->iaoq_f + disp + 8;
621 }
622 
623 static void gen_excp_1(int exception)
624 {
625     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
626 }
627 
628 static void gen_excp(DisasContext *ctx, int exception)
629 {
630     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
631     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
632     nullify_save(ctx);
633     gen_excp_1(exception);
634     ctx->base.is_jmp = DISAS_NORETURN;
635 }
636 
637 static bool gen_excp_iir(DisasContext *ctx, int exc)
638 {
639     nullify_over(ctx);
640     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
641                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
642     gen_excp(ctx, exc);
643     return nullify_end(ctx);
644 }
645 
646 static bool gen_illegal(DisasContext *ctx)
647 {
648     return gen_excp_iir(ctx, EXCP_ILL);
649 }
650 
651 #ifdef CONFIG_USER_ONLY
652 #define CHECK_MOST_PRIVILEGED(EXCP) \
653     return gen_excp_iir(ctx, EXCP)
654 #else
655 #define CHECK_MOST_PRIVILEGED(EXCP) \
656     do {                                     \
657         if (ctx->privilege != 0) {           \
658             return gen_excp_iir(ctx, EXCP);  \
659         }                                    \
660     } while (0)
661 #endif
662 
663 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
664 {
665     return translator_use_goto_tb(&ctx->base, dest);
666 }
667 
668 /* If the next insn is to be nullified, and it's on the same page,
669    and we're not attempting to set a breakpoint on it, then we can
670    totally skip the nullified insn.  This avoids creating and
671    executing a TB that merely branches to the next TB.  */
672 static bool use_nullify_skip(DisasContext *ctx)
673 {
674     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
675             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
676 }
677 
678 static void gen_goto_tb(DisasContext *ctx, int which,
679                         uint64_t f, uint64_t b)
680 {
681     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
682         tcg_gen_goto_tb(which);
683         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
684         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
685         tcg_gen_exit_tb(ctx->base.tb, which);
686     } else {
687         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
688         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
689         tcg_gen_lookup_and_goto_ptr();
690     }
691 }
692 
693 static bool cond_need_sv(int c)
694 {
695     return c == 2 || c == 3 || c == 6;
696 }
697 
698 static bool cond_need_cb(int c)
699 {
700     return c == 4 || c == 5;
701 }
702 
703 /* Need extensions from TCGv_i32 to TCGv_i64. */
704 static bool cond_need_ext(DisasContext *ctx, bool d)
705 {
706     return !(ctx->is_pa20 && d);
707 }
708 
709 /*
710  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
711  * the Parisc 1.1 Architecture Reference Manual for details.
712  */
713 
714 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
715                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
716 {
717     DisasCond cond;
718     TCGv_i64 tmp;
719 
720     switch (cf >> 1) {
721     case 0: /* Never / TR    (0 / 1) */
722         cond = cond_make_f();
723         break;
724     case 1: /* = / <>        (Z / !Z) */
725         if (cond_need_ext(ctx, d)) {
726             tmp = tcg_temp_new_i64();
727             tcg_gen_ext32u_i64(tmp, res);
728             res = tmp;
729         }
730         cond = cond_make_0(TCG_COND_EQ, res);
731         break;
732     case 2: /* < / >=        (N ^ V / !(N ^ V) */
733         tmp = tcg_temp_new_i64();
734         tcg_gen_xor_i64(tmp, res, sv);
735         if (cond_need_ext(ctx, d)) {
736             tcg_gen_ext32s_i64(tmp, tmp);
737         }
738         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
739         break;
740     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
741         /*
742          * Simplify:
743          *   (N ^ V) | Z
744          *   ((res < 0) ^ (sv < 0)) | !res
745          *   ((res ^ sv) < 0) | !res
746          *   (~(res ^ sv) >= 0) | !res
747          *   !(~(res ^ sv) >> 31) | !res
748          *   !(~(res ^ sv) >> 31 & res)
749          */
750         tmp = tcg_temp_new_i64();
751         tcg_gen_eqv_i64(tmp, res, sv);
752         if (cond_need_ext(ctx, d)) {
753             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
754             tcg_gen_and_i64(tmp, tmp, res);
755             tcg_gen_ext32u_i64(tmp, tmp);
756         } else {
757             tcg_gen_sari_i64(tmp, tmp, 63);
758             tcg_gen_and_i64(tmp, tmp, res);
759         }
760         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
761         break;
762     case 4: /* NUV / UV      (!C / C) */
763         /* Only bit 0 of cb_msb is ever set. */
764         cond = cond_make_0(TCG_COND_EQ, cb_msb);
765         break;
766     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
767         tmp = tcg_temp_new_i64();
768         tcg_gen_neg_i64(tmp, cb_msb);
769         tcg_gen_and_i64(tmp, tmp, res);
770         if (cond_need_ext(ctx, d)) {
771             tcg_gen_ext32u_i64(tmp, tmp);
772         }
773         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
774         break;
775     case 6: /* SV / NSV      (V / !V) */
776         if (cond_need_ext(ctx, d)) {
777             tmp = tcg_temp_new_i64();
778             tcg_gen_ext32s_i64(tmp, sv);
779             sv = tmp;
780         }
781         cond = cond_make_0(TCG_COND_LT, sv);
782         break;
783     case 7: /* OD / EV */
784         tmp = tcg_temp_new_i64();
785         tcg_gen_andi_i64(tmp, res, 1);
786         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
787         break;
788     default:
789         g_assert_not_reached();
790     }
791     if (cf & 1) {
792         cond.c = tcg_invert_cond(cond.c);
793     }
794 
795     return cond;
796 }
797 
798 /* Similar, but for the special case of subtraction without borrow, we
799    can use the inputs directly.  This can allow other computation to be
800    deleted as unused.  */
801 
802 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
803                              TCGv_i64 res, TCGv_i64 in1,
804                              TCGv_i64 in2, TCGv_i64 sv)
805 {
806     TCGCond tc;
807     bool ext_uns;
808 
809     switch (cf >> 1) {
810     case 1: /* = / <> */
811         tc = TCG_COND_EQ;
812         ext_uns = true;
813         break;
814     case 2: /* < / >= */
815         tc = TCG_COND_LT;
816         ext_uns = false;
817         break;
818     case 3: /* <= / > */
819         tc = TCG_COND_LE;
820         ext_uns = false;
821         break;
822     case 4: /* << / >>= */
823         tc = TCG_COND_LTU;
824         ext_uns = true;
825         break;
826     case 5: /* <<= / >> */
827         tc = TCG_COND_LEU;
828         ext_uns = true;
829         break;
830     default:
831         return do_cond(ctx, cf, d, res, NULL, sv);
832     }
833 
834     if (cf & 1) {
835         tc = tcg_invert_cond(tc);
836     }
837     if (cond_need_ext(ctx, d)) {
838         TCGv_i64 t1 = tcg_temp_new_i64();
839         TCGv_i64 t2 = tcg_temp_new_i64();
840 
841         if (ext_uns) {
842             tcg_gen_ext32u_i64(t1, in1);
843             tcg_gen_ext32u_i64(t2, in2);
844         } else {
845             tcg_gen_ext32s_i64(t1, in1);
846             tcg_gen_ext32s_i64(t2, in2);
847         }
848         return cond_make_tmp(tc, t1, t2);
849     }
850     return cond_make(tc, in1, in2);
851 }
852 
853 /*
854  * Similar, but for logicals, where the carry and overflow bits are not
855  * computed, and use of them is undefined.
856  *
857  * Undefined or not, hardware does not trap.  It seems reasonable to
858  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
859  * how cases c={2,3} are treated.
860  */
861 
862 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
863                              TCGv_i64 res)
864 {
865     TCGCond tc;
866     bool ext_uns;
867 
868     switch (cf) {
869     case 0:  /* never */
870     case 9:  /* undef, C */
871     case 11: /* undef, C & !Z */
872     case 12: /* undef, V */
873         return cond_make_f();
874 
875     case 1:  /* true */
876     case 8:  /* undef, !C */
877     case 10: /* undef, !C | Z */
878     case 13: /* undef, !V */
879         return cond_make_t();
880 
881     case 2:  /* == */
882         tc = TCG_COND_EQ;
883         ext_uns = true;
884         break;
885     case 3:  /* <> */
886         tc = TCG_COND_NE;
887         ext_uns = true;
888         break;
889     case 4:  /* < */
890         tc = TCG_COND_LT;
891         ext_uns = false;
892         break;
893     case 5:  /* >= */
894         tc = TCG_COND_GE;
895         ext_uns = false;
896         break;
897     case 6:  /* <= */
898         tc = TCG_COND_LE;
899         ext_uns = false;
900         break;
901     case 7:  /* > */
902         tc = TCG_COND_GT;
903         ext_uns = false;
904         break;
905 
906     case 14: /* OD */
907     case 15: /* EV */
908         return do_cond(ctx, cf, d, res, NULL, NULL);
909 
910     default:
911         g_assert_not_reached();
912     }
913 
914     if (cond_need_ext(ctx, d)) {
915         TCGv_i64 tmp = tcg_temp_new_i64();
916 
917         if (ext_uns) {
918             tcg_gen_ext32u_i64(tmp, res);
919         } else {
920             tcg_gen_ext32s_i64(tmp, res);
921         }
922         return cond_make_0_tmp(tc, tmp);
923     }
924     return cond_make_0(tc, res);
925 }
926 
927 /* Similar, but for shift/extract/deposit conditions.  */
928 
929 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
930                              TCGv_i64 res)
931 {
932     unsigned c, f;
933 
934     /* Convert the compressed condition codes to standard.
935        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
936        4-7 are the reverse of 0-3.  */
937     c = orig & 3;
938     if (c == 3) {
939         c = 7;
940     }
941     f = (orig & 4) / 4;
942 
943     return do_log_cond(ctx, c * 2 + f, d, res);
944 }
945 
946 /* Similar, but for unit conditions.  */
947 
948 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
949                               TCGv_i64 in1, TCGv_i64 in2)
950 {
951     DisasCond cond;
952     TCGv_i64 tmp, cb = NULL;
953     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
954 
955     if (cf & 8) {
956         /* Since we want to test lots of carry-out bits all at once, do not
957          * do our normal thing and compute carry-in of bit B+1 since that
958          * leaves us with carry bits spread across two words.
959          */
960         cb = tcg_temp_new_i64();
961         tmp = tcg_temp_new_i64();
962         tcg_gen_or_i64(cb, in1, in2);
963         tcg_gen_and_i64(tmp, in1, in2);
964         tcg_gen_andc_i64(cb, cb, res);
965         tcg_gen_or_i64(cb, cb, tmp);
966     }
967 
968     switch (cf >> 1) {
969     case 0: /* never / TR */
970     case 1: /* undefined */
971     case 5: /* undefined */
972         cond = cond_make_f();
973         break;
974 
975     case 2: /* SBZ / NBZ */
976         /* See hasless(v,1) from
977          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
978          */
979         tmp = tcg_temp_new_i64();
980         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
981         tcg_gen_andc_i64(tmp, tmp, res);
982         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
983         cond = cond_make_0(TCG_COND_NE, tmp);
984         break;
985 
986     case 3: /* SHZ / NHZ */
987         tmp = tcg_temp_new_i64();
988         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
989         tcg_gen_andc_i64(tmp, tmp, res);
990         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
991         cond = cond_make_0(TCG_COND_NE, tmp);
992         break;
993 
994     case 4: /* SDC / NDC */
995         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
996         cond = cond_make_0(TCG_COND_NE, cb);
997         break;
998 
999     case 6: /* SBC / NBC */
1000         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
1001         cond = cond_make_0(TCG_COND_NE, cb);
1002         break;
1003 
1004     case 7: /* SHC / NHC */
1005         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
1006         cond = cond_make_0(TCG_COND_NE, cb);
1007         break;
1008 
1009     default:
1010         g_assert_not_reached();
1011     }
1012     if (cf & 1) {
1013         cond.c = tcg_invert_cond(cond.c);
1014     }
1015 
1016     return cond;
1017 }
1018 
1019 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1020                           TCGv_i64 cb, TCGv_i64 cb_msb)
1021 {
1022     if (cond_need_ext(ctx, d)) {
1023         TCGv_i64 t = tcg_temp_new_i64();
1024         tcg_gen_extract_i64(t, cb, 32, 1);
1025         return t;
1026     }
1027     return cb_msb;
1028 }
1029 
1030 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1031 {
1032     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1033 }
1034 
1035 /* Compute signed overflow for addition.  */
1036 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1037                           TCGv_i64 in1, TCGv_i64 in2)
1038 {
1039     TCGv_i64 sv = tcg_temp_new_i64();
1040     TCGv_i64 tmp = tcg_temp_new_i64();
1041 
1042     tcg_gen_xor_i64(sv, res, in1);
1043     tcg_gen_xor_i64(tmp, in1, in2);
1044     tcg_gen_andc_i64(sv, sv, tmp);
1045 
1046     return sv;
1047 }
1048 
1049 /* Compute signed overflow for subtraction.  */
1050 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1051                           TCGv_i64 in1, TCGv_i64 in2)
1052 {
1053     TCGv_i64 sv = tcg_temp_new_i64();
1054     TCGv_i64 tmp = tcg_temp_new_i64();
1055 
1056     tcg_gen_xor_i64(sv, res, in1);
1057     tcg_gen_xor_i64(tmp, in1, in2);
1058     tcg_gen_and_i64(sv, sv, tmp);
1059 
1060     return sv;
1061 }
1062 
1063 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1064                    TCGv_i64 in2, unsigned shift, bool is_l,
1065                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1066 {
1067     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1068     unsigned c = cf >> 1;
1069     DisasCond cond;
1070 
1071     dest = tcg_temp_new_i64();
1072     cb = NULL;
1073     cb_msb = NULL;
1074     cb_cond = NULL;
1075 
1076     if (shift) {
1077         tmp = tcg_temp_new_i64();
1078         tcg_gen_shli_i64(tmp, in1, shift);
1079         in1 = tmp;
1080     }
1081 
1082     if (!is_l || cond_need_cb(c)) {
1083         cb_msb = tcg_temp_new_i64();
1084         cb = tcg_temp_new_i64();
1085 
1086         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1087         if (is_c) {
1088             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1089                              get_psw_carry(ctx, d), ctx->zero);
1090         }
1091         tcg_gen_xor_i64(cb, in1, in2);
1092         tcg_gen_xor_i64(cb, cb, dest);
1093         if (cond_need_cb(c)) {
1094             cb_cond = get_carry(ctx, d, cb, cb_msb);
1095         }
1096     } else {
1097         tcg_gen_add_i64(dest, in1, in2);
1098         if (is_c) {
1099             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1100         }
1101     }
1102 
1103     /* Compute signed overflow if required.  */
1104     sv = NULL;
1105     if (is_tsv || cond_need_sv(c)) {
1106         sv = do_add_sv(ctx, dest, in1, in2);
1107         if (is_tsv) {
1108             /* ??? Need to include overflow from shift.  */
1109             gen_helper_tsv(tcg_env, sv);
1110         }
1111     }
1112 
1113     /* Emit any conditional trap before any writeback.  */
1114     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1115     if (is_tc) {
1116         tmp = tcg_temp_new_i64();
1117         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1118         gen_helper_tcond(tcg_env, tmp);
1119     }
1120 
1121     /* Write back the result.  */
1122     if (!is_l) {
1123         save_or_nullify(ctx, cpu_psw_cb, cb);
1124         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1125     }
1126     save_gpr(ctx, rt, dest);
1127 
1128     /* Install the new nullification.  */
1129     cond_free(&ctx->null_cond);
1130     ctx->null_cond = cond;
1131 }
1132 
1133 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1134                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1135 {
1136     TCGv_i64 tcg_r1, tcg_r2;
1137 
1138     if (a->cf) {
1139         nullify_over(ctx);
1140     }
1141     tcg_r1 = load_gpr(ctx, a->r1);
1142     tcg_r2 = load_gpr(ctx, a->r2);
1143     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1144            is_tsv, is_tc, is_c, a->cf, a->d);
1145     return nullify_end(ctx);
1146 }
1147 
1148 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1149                        bool is_tsv, bool is_tc)
1150 {
1151     TCGv_i64 tcg_im, tcg_r2;
1152 
1153     if (a->cf) {
1154         nullify_over(ctx);
1155     }
1156     tcg_im = tcg_constant_i64(a->i);
1157     tcg_r2 = load_gpr(ctx, a->r);
1158     /* All ADDI conditions are 32-bit. */
1159     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1160     return nullify_end(ctx);
1161 }
1162 
1163 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1164                    TCGv_i64 in2, bool is_tsv, bool is_b,
1165                    bool is_tc, unsigned cf, bool d)
1166 {
1167     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1168     unsigned c = cf >> 1;
1169     DisasCond cond;
1170 
1171     dest = tcg_temp_new_i64();
1172     cb = tcg_temp_new_i64();
1173     cb_msb = tcg_temp_new_i64();
1174 
1175     if (is_b) {
1176         /* DEST,C = IN1 + ~IN2 + C.  */
1177         tcg_gen_not_i64(cb, in2);
1178         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1179                          get_psw_carry(ctx, d), ctx->zero);
1180         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1181         tcg_gen_xor_i64(cb, cb, in1);
1182         tcg_gen_xor_i64(cb, cb, dest);
1183     } else {
1184         /*
1185          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1186          * operations by seeding the high word with 1 and subtracting.
1187          */
1188         TCGv_i64 one = tcg_constant_i64(1);
1189         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1190         tcg_gen_eqv_i64(cb, in1, in2);
1191         tcg_gen_xor_i64(cb, cb, dest);
1192     }
1193 
1194     /* Compute signed overflow if required.  */
1195     sv = NULL;
1196     if (is_tsv || cond_need_sv(c)) {
1197         sv = do_sub_sv(ctx, dest, in1, in2);
1198         if (is_tsv) {
1199             gen_helper_tsv(tcg_env, sv);
1200         }
1201     }
1202 
1203     /* Compute the condition.  We cannot use the special case for borrow.  */
1204     if (!is_b) {
1205         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1206     } else {
1207         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1208     }
1209 
1210     /* Emit any conditional trap before any writeback.  */
1211     if (is_tc) {
1212         tmp = tcg_temp_new_i64();
1213         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1214         gen_helper_tcond(tcg_env, tmp);
1215     }
1216 
1217     /* Write back the result.  */
1218     save_or_nullify(ctx, cpu_psw_cb, cb);
1219     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1220     save_gpr(ctx, rt, dest);
1221 
1222     /* Install the new nullification.  */
1223     cond_free(&ctx->null_cond);
1224     ctx->null_cond = cond;
1225 }
1226 
1227 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1228                        bool is_tsv, bool is_b, bool is_tc)
1229 {
1230     TCGv_i64 tcg_r1, tcg_r2;
1231 
1232     if (a->cf) {
1233         nullify_over(ctx);
1234     }
1235     tcg_r1 = load_gpr(ctx, a->r1);
1236     tcg_r2 = load_gpr(ctx, a->r2);
1237     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1238     return nullify_end(ctx);
1239 }
1240 
1241 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1242 {
1243     TCGv_i64 tcg_im, tcg_r2;
1244 
1245     if (a->cf) {
1246         nullify_over(ctx);
1247     }
1248     tcg_im = tcg_constant_i64(a->i);
1249     tcg_r2 = load_gpr(ctx, a->r);
1250     /* All SUBI conditions are 32-bit. */
1251     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1252     return nullify_end(ctx);
1253 }
1254 
1255 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1256                       TCGv_i64 in2, unsigned cf, bool d)
1257 {
1258     TCGv_i64 dest, sv;
1259     DisasCond cond;
1260 
1261     dest = tcg_temp_new_i64();
1262     tcg_gen_sub_i64(dest, in1, in2);
1263 
1264     /* Compute signed overflow if required.  */
1265     sv = NULL;
1266     if (cond_need_sv(cf >> 1)) {
1267         sv = do_sub_sv(ctx, dest, in1, in2);
1268     }
1269 
1270     /* Form the condition for the compare.  */
1271     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1272 
1273     /* Clear.  */
1274     tcg_gen_movi_i64(dest, 0);
1275     save_gpr(ctx, rt, dest);
1276 
1277     /* Install the new nullification.  */
1278     cond_free(&ctx->null_cond);
1279     ctx->null_cond = cond;
1280 }
1281 
1282 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1283                    TCGv_i64 in2, unsigned cf, bool d,
1284                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1285 {
1286     TCGv_i64 dest = dest_gpr(ctx, rt);
1287 
1288     /* Perform the operation, and writeback.  */
1289     fn(dest, in1, in2);
1290     save_gpr(ctx, rt, dest);
1291 
1292     /* Install the new nullification.  */
1293     cond_free(&ctx->null_cond);
1294     if (cf) {
1295         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1296     }
1297 }
1298 
1299 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1300                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1301 {
1302     TCGv_i64 tcg_r1, tcg_r2;
1303 
1304     if (a->cf) {
1305         nullify_over(ctx);
1306     }
1307     tcg_r1 = load_gpr(ctx, a->r1);
1308     tcg_r2 = load_gpr(ctx, a->r2);
1309     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1310     return nullify_end(ctx);
1311 }
1312 
1313 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1314                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1315                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1316 {
1317     TCGv_i64 dest;
1318     DisasCond cond;
1319 
1320     if (cf == 0) {
1321         dest = dest_gpr(ctx, rt);
1322         fn(dest, in1, in2);
1323         save_gpr(ctx, rt, dest);
1324         cond_free(&ctx->null_cond);
1325     } else {
1326         dest = tcg_temp_new_i64();
1327         fn(dest, in1, in2);
1328 
1329         cond = do_unit_cond(cf, d, dest, in1, in2);
1330 
1331         if (is_tc) {
1332             TCGv_i64 tmp = tcg_temp_new_i64();
1333             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1334             gen_helper_tcond(tcg_env, tmp);
1335         }
1336         save_gpr(ctx, rt, dest);
1337 
1338         cond_free(&ctx->null_cond);
1339         ctx->null_cond = cond;
1340     }
1341 }
1342 
1343 #ifndef CONFIG_USER_ONLY
1344 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1345    from the top 2 bits of the base register.  There are a few system
1346    instructions that have a 3-bit space specifier, for which SR0 is
1347    not special.  To handle this, pass ~SP.  */
1348 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1349 {
1350     TCGv_ptr ptr;
1351     TCGv_i64 tmp;
1352     TCGv_i64 spc;
1353 
1354     if (sp != 0) {
1355         if (sp < 0) {
1356             sp = ~sp;
1357         }
1358         spc = tcg_temp_new_i64();
1359         load_spr(ctx, spc, sp);
1360         return spc;
1361     }
1362     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1363         return cpu_srH;
1364     }
1365 
1366     ptr = tcg_temp_new_ptr();
1367     tmp = tcg_temp_new_i64();
1368     spc = tcg_temp_new_i64();
1369 
1370     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1371     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1372     tcg_gen_andi_i64(tmp, tmp, 030);
1373     tcg_gen_trunc_i64_ptr(ptr, tmp);
1374 
1375     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1376     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1377 
1378     return spc;
1379 }
1380 #endif
1381 
1382 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1383                      unsigned rb, unsigned rx, int scale, int64_t disp,
1384                      unsigned sp, int modify, bool is_phys)
1385 {
1386     TCGv_i64 base = load_gpr(ctx, rb);
1387     TCGv_i64 ofs;
1388     TCGv_i64 addr;
1389 
1390     set_insn_breg(ctx, rb);
1391 
1392     /* Note that RX is mutually exclusive with DISP.  */
1393     if (rx) {
1394         ofs = tcg_temp_new_i64();
1395         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1396         tcg_gen_add_i64(ofs, ofs, base);
1397     } else if (disp || modify) {
1398         ofs = tcg_temp_new_i64();
1399         tcg_gen_addi_i64(ofs, base, disp);
1400     } else {
1401         ofs = base;
1402     }
1403 
1404     *pofs = ofs;
1405     *pgva = addr = tcg_temp_new_i64();
1406     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1407 #ifndef CONFIG_USER_ONLY
1408     if (!is_phys) {
1409         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1410     }
1411 #endif
1412 }
1413 
1414 /* Emit a memory load.  The modify parameter should be
1415  * < 0 for pre-modify,
1416  * > 0 for post-modify,
1417  * = 0 for no base register update.
1418  */
1419 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1420                        unsigned rx, int scale, int64_t disp,
1421                        unsigned sp, int modify, MemOp mop)
1422 {
1423     TCGv_i64 ofs;
1424     TCGv_i64 addr;
1425 
1426     /* Caller uses nullify_over/nullify_end.  */
1427     assert(ctx->null_cond.c == TCG_COND_NEVER);
1428 
1429     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1430              MMU_DISABLED(ctx));
1431     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1432     if (modify) {
1433         save_gpr(ctx, rb, ofs);
1434     }
1435 }
1436 
1437 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1438                        unsigned rx, int scale, int64_t disp,
1439                        unsigned sp, int modify, MemOp mop)
1440 {
1441     TCGv_i64 ofs;
1442     TCGv_i64 addr;
1443 
1444     /* Caller uses nullify_over/nullify_end.  */
1445     assert(ctx->null_cond.c == TCG_COND_NEVER);
1446 
1447     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1448              MMU_DISABLED(ctx));
1449     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1450     if (modify) {
1451         save_gpr(ctx, rb, ofs);
1452     }
1453 }
1454 
1455 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1456                         unsigned rx, int scale, int64_t disp,
1457                         unsigned sp, int modify, MemOp mop)
1458 {
1459     TCGv_i64 ofs;
1460     TCGv_i64 addr;
1461 
1462     /* Caller uses nullify_over/nullify_end.  */
1463     assert(ctx->null_cond.c == TCG_COND_NEVER);
1464 
1465     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1466              MMU_DISABLED(ctx));
1467     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1468     if (modify) {
1469         save_gpr(ctx, rb, ofs);
1470     }
1471 }
1472 
1473 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1474                         unsigned rx, int scale, int64_t disp,
1475                         unsigned sp, int modify, MemOp mop)
1476 {
1477     TCGv_i64 ofs;
1478     TCGv_i64 addr;
1479 
1480     /* Caller uses nullify_over/nullify_end.  */
1481     assert(ctx->null_cond.c == TCG_COND_NEVER);
1482 
1483     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1484              MMU_DISABLED(ctx));
1485     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1486     if (modify) {
1487         save_gpr(ctx, rb, ofs);
1488     }
1489 }
1490 
1491 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1492                     unsigned rx, int scale, int64_t disp,
1493                     unsigned sp, int modify, MemOp mop)
1494 {
1495     TCGv_i64 dest;
1496 
1497     nullify_over(ctx);
1498 
1499     if (modify == 0) {
1500         /* No base register update.  */
1501         dest = dest_gpr(ctx, rt);
1502     } else {
1503         /* Make sure if RT == RB, we see the result of the load.  */
1504         dest = tcg_temp_new_i64();
1505     }
1506     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1507     save_gpr(ctx, rt, dest);
1508 
1509     return nullify_end(ctx);
1510 }
1511 
1512 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1513                       unsigned rx, int scale, int64_t disp,
1514                       unsigned sp, int modify)
1515 {
1516     TCGv_i32 tmp;
1517 
1518     nullify_over(ctx);
1519 
1520     tmp = tcg_temp_new_i32();
1521     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1522     save_frw_i32(rt, tmp);
1523 
1524     if (rt == 0) {
1525         gen_helper_loaded_fr0(tcg_env);
1526     }
1527 
1528     return nullify_end(ctx);
1529 }
1530 
1531 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1532 {
1533     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1534                      a->disp, a->sp, a->m);
1535 }
1536 
1537 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1538                       unsigned rx, int scale, int64_t disp,
1539                       unsigned sp, int modify)
1540 {
1541     TCGv_i64 tmp;
1542 
1543     nullify_over(ctx);
1544 
1545     tmp = tcg_temp_new_i64();
1546     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1547     save_frd(rt, tmp);
1548 
1549     if (rt == 0) {
1550         gen_helper_loaded_fr0(tcg_env);
1551     }
1552 
1553     return nullify_end(ctx);
1554 }
1555 
1556 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1557 {
1558     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1559                      a->disp, a->sp, a->m);
1560 }
1561 
1562 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1563                      int64_t disp, unsigned sp,
1564                      int modify, MemOp mop)
1565 {
1566     nullify_over(ctx);
1567     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1568     return nullify_end(ctx);
1569 }
1570 
1571 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1572                        unsigned rx, int scale, int64_t disp,
1573                        unsigned sp, int modify)
1574 {
1575     TCGv_i32 tmp;
1576 
1577     nullify_over(ctx);
1578 
1579     tmp = load_frw_i32(rt);
1580     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1581 
1582     return nullify_end(ctx);
1583 }
1584 
1585 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1586 {
1587     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1588                       a->disp, a->sp, a->m);
1589 }
1590 
1591 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1592                        unsigned rx, int scale, int64_t disp,
1593                        unsigned sp, int modify)
1594 {
1595     TCGv_i64 tmp;
1596 
1597     nullify_over(ctx);
1598 
1599     tmp = load_frd(rt);
1600     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1601 
1602     return nullify_end(ctx);
1603 }
1604 
1605 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1606 {
1607     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1608                       a->disp, a->sp, a->m);
1609 }
1610 
1611 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1612                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1613 {
1614     TCGv_i32 tmp;
1615 
1616     nullify_over(ctx);
1617     tmp = load_frw0_i32(ra);
1618 
1619     func(tmp, tcg_env, tmp);
1620 
1621     save_frw_i32(rt, tmp);
1622     return nullify_end(ctx);
1623 }
1624 
1625 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1626                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1627 {
1628     TCGv_i32 dst;
1629     TCGv_i64 src;
1630 
1631     nullify_over(ctx);
1632     src = load_frd(ra);
1633     dst = tcg_temp_new_i32();
1634 
1635     func(dst, tcg_env, src);
1636 
1637     save_frw_i32(rt, dst);
1638     return nullify_end(ctx);
1639 }
1640 
1641 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1642                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1643 {
1644     TCGv_i64 tmp;
1645 
1646     nullify_over(ctx);
1647     tmp = load_frd0(ra);
1648 
1649     func(tmp, tcg_env, tmp);
1650 
1651     save_frd(rt, tmp);
1652     return nullify_end(ctx);
1653 }
1654 
1655 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1656                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1657 {
1658     TCGv_i32 src;
1659     TCGv_i64 dst;
1660 
1661     nullify_over(ctx);
1662     src = load_frw0_i32(ra);
1663     dst = tcg_temp_new_i64();
1664 
1665     func(dst, tcg_env, src);
1666 
1667     save_frd(rt, dst);
1668     return nullify_end(ctx);
1669 }
1670 
1671 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1672                         unsigned ra, unsigned rb,
1673                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1674 {
1675     TCGv_i32 a, b;
1676 
1677     nullify_over(ctx);
1678     a = load_frw0_i32(ra);
1679     b = load_frw0_i32(rb);
1680 
1681     func(a, tcg_env, a, b);
1682 
1683     save_frw_i32(rt, a);
1684     return nullify_end(ctx);
1685 }
1686 
1687 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1688                         unsigned ra, unsigned rb,
1689                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1690 {
1691     TCGv_i64 a, b;
1692 
1693     nullify_over(ctx);
1694     a = load_frd0(ra);
1695     b = load_frd0(rb);
1696 
1697     func(a, tcg_env, a, b);
1698 
1699     save_frd(rt, a);
1700     return nullify_end(ctx);
1701 }
1702 
1703 /* Emit an unconditional branch to a direct target, which may or may not
1704    have already had nullification handled.  */
1705 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1706                        unsigned link, bool is_n)
1707 {
1708     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1709         if (link != 0) {
1710             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1711         }
1712         ctx->iaoq_n = dest;
1713         if (is_n) {
1714             ctx->null_cond.c = TCG_COND_ALWAYS;
1715         }
1716     } else {
1717         nullify_over(ctx);
1718 
1719         if (link != 0) {
1720             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1721         }
1722 
1723         if (is_n && use_nullify_skip(ctx)) {
1724             nullify_set(ctx, 0);
1725             gen_goto_tb(ctx, 0, dest, dest + 4);
1726         } else {
1727             nullify_set(ctx, is_n);
1728             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1729         }
1730 
1731         nullify_end(ctx);
1732 
1733         nullify_set(ctx, 0);
1734         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1735         ctx->base.is_jmp = DISAS_NORETURN;
1736     }
1737     return true;
1738 }
1739 
1740 /* Emit a conditional branch to a direct target.  If the branch itself
1741    is nullified, we should have already used nullify_over.  */
1742 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1743                        DisasCond *cond)
1744 {
1745     uint64_t dest = iaoq_dest(ctx, disp);
1746     TCGLabel *taken = NULL;
1747     TCGCond c = cond->c;
1748     bool n;
1749 
1750     assert(ctx->null_cond.c == TCG_COND_NEVER);
1751 
1752     /* Handle TRUE and NEVER as direct branches.  */
1753     if (c == TCG_COND_ALWAYS) {
1754         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1755     }
1756     if (c == TCG_COND_NEVER) {
1757         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1758     }
1759 
1760     taken = gen_new_label();
1761     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1762     cond_free(cond);
1763 
1764     /* Not taken: Condition not satisfied; nullify on backward branches. */
1765     n = is_n && disp < 0;
1766     if (n && use_nullify_skip(ctx)) {
1767         nullify_set(ctx, 0);
1768         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1769     } else {
1770         if (!n && ctx->null_lab) {
1771             gen_set_label(ctx->null_lab);
1772             ctx->null_lab = NULL;
1773         }
1774         nullify_set(ctx, n);
1775         if (ctx->iaoq_n == -1) {
1776             /* The temporary iaoq_n_var died at the branch above.
1777                Regenerate it here instead of saving it.  */
1778             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1779         }
1780         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1781     }
1782 
1783     gen_set_label(taken);
1784 
1785     /* Taken: Condition satisfied; nullify on forward branches.  */
1786     n = is_n && disp >= 0;
1787     if (n && use_nullify_skip(ctx)) {
1788         nullify_set(ctx, 0);
1789         gen_goto_tb(ctx, 1, dest, dest + 4);
1790     } else {
1791         nullify_set(ctx, n);
1792         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1793     }
1794 
1795     /* Not taken: the branch itself was nullified.  */
1796     if (ctx->null_lab) {
1797         gen_set_label(ctx->null_lab);
1798         ctx->null_lab = NULL;
1799         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1800     } else {
1801         ctx->base.is_jmp = DISAS_NORETURN;
1802     }
1803     return true;
1804 }
1805 
1806 /* Emit an unconditional branch to an indirect target.  This handles
1807    nullification of the branch itself.  */
1808 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1809                        unsigned link, bool is_n)
1810 {
1811     TCGv_i64 a0, a1, next, tmp;
1812     TCGCond c;
1813 
1814     assert(ctx->null_lab == NULL);
1815 
1816     if (ctx->null_cond.c == TCG_COND_NEVER) {
1817         if (link != 0) {
1818             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1819         }
1820         next = tcg_temp_new_i64();
1821         tcg_gen_mov_i64(next, dest);
1822         if (is_n) {
1823             if (use_nullify_skip(ctx)) {
1824                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1825                 tcg_gen_addi_i64(next, next, 4);
1826                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1827                 nullify_set(ctx, 0);
1828                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1829                 return true;
1830             }
1831             ctx->null_cond.c = TCG_COND_ALWAYS;
1832         }
1833         ctx->iaoq_n = -1;
1834         ctx->iaoq_n_var = next;
1835     } else if (is_n && use_nullify_skip(ctx)) {
1836         /* The (conditional) branch, B, nullifies the next insn, N,
1837            and we're allowed to skip execution N (no single-step or
1838            tracepoint in effect).  Since the goto_ptr that we must use
1839            for the indirect branch consumes no special resources, we
1840            can (conditionally) skip B and continue execution.  */
1841         /* The use_nullify_skip test implies we have a known control path.  */
1842         tcg_debug_assert(ctx->iaoq_b != -1);
1843         tcg_debug_assert(ctx->iaoq_n != -1);
1844 
1845         /* We do have to handle the non-local temporary, DEST, before
1846            branching.  Since IOAQ_F is not really live at this point, we
1847            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1848         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1849         next = tcg_temp_new_i64();
1850         tcg_gen_addi_i64(next, dest, 4);
1851         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1852 
1853         nullify_over(ctx);
1854         if (link != 0) {
1855             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1856         }
1857         tcg_gen_lookup_and_goto_ptr();
1858         return nullify_end(ctx);
1859     } else {
1860         c = ctx->null_cond.c;
1861         a0 = ctx->null_cond.a0;
1862         a1 = ctx->null_cond.a1;
1863 
1864         tmp = tcg_temp_new_i64();
1865         next = tcg_temp_new_i64();
1866 
1867         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1868         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1869         ctx->iaoq_n = -1;
1870         ctx->iaoq_n_var = next;
1871 
1872         if (link != 0) {
1873             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1874         }
1875 
1876         if (is_n) {
1877             /* The branch nullifies the next insn, which means the state of N
1878                after the branch is the inverse of the state of N that applied
1879                to the branch.  */
1880             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1881             cond_free(&ctx->null_cond);
1882             ctx->null_cond = cond_make_n();
1883             ctx->psw_n_nonzero = true;
1884         } else {
1885             cond_free(&ctx->null_cond);
1886         }
1887     }
1888     return true;
1889 }
1890 
1891 /* Implement
1892  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1893  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1894  *    else
1895  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1896  * which keeps the privilege level from being increased.
1897  */
1898 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1899 {
1900     TCGv_i64 dest;
1901     switch (ctx->privilege) {
1902     case 0:
1903         /* Privilege 0 is maximum and is allowed to decrease.  */
1904         return offset;
1905     case 3:
1906         /* Privilege 3 is minimum and is never allowed to increase.  */
1907         dest = tcg_temp_new_i64();
1908         tcg_gen_ori_i64(dest, offset, 3);
1909         break;
1910     default:
1911         dest = tcg_temp_new_i64();
1912         tcg_gen_andi_i64(dest, offset, -4);
1913         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1914         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1915         break;
1916     }
1917     return dest;
1918 }
1919 
1920 #ifdef CONFIG_USER_ONLY
1921 /* On Linux, page zero is normally marked execute only + gateway.
1922    Therefore normal read or write is supposed to fail, but specific
1923    offsets have kernel code mapped to raise permissions to implement
1924    system calls.  Handling this via an explicit check here, rather
1925    in than the "be disp(sr2,r0)" instruction that probably sent us
1926    here, is the easiest way to handle the branch delay slot on the
1927    aforementioned BE.  */
1928 static void do_page_zero(DisasContext *ctx)
1929 {
1930     TCGv_i64 tmp;
1931 
1932     /* If by some means we get here with PSW[N]=1, that implies that
1933        the B,GATE instruction would be skipped, and we'd fault on the
1934        next insn within the privileged page.  */
1935     switch (ctx->null_cond.c) {
1936     case TCG_COND_NEVER:
1937         break;
1938     case TCG_COND_ALWAYS:
1939         tcg_gen_movi_i64(cpu_psw_n, 0);
1940         goto do_sigill;
1941     default:
1942         /* Since this is always the first (and only) insn within the
1943            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1944         g_assert_not_reached();
1945     }
1946 
1947     /* Check that we didn't arrive here via some means that allowed
1948        non-sequential instruction execution.  Normally the PSW[B] bit
1949        detects this by disallowing the B,GATE instruction to execute
1950        under such conditions.  */
1951     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1952         goto do_sigill;
1953     }
1954 
1955     switch (ctx->iaoq_f & -4) {
1956     case 0x00: /* Null pointer call */
1957         gen_excp_1(EXCP_IMP);
1958         ctx->base.is_jmp = DISAS_NORETURN;
1959         break;
1960 
1961     case 0xb0: /* LWS */
1962         gen_excp_1(EXCP_SYSCALL_LWS);
1963         ctx->base.is_jmp = DISAS_NORETURN;
1964         break;
1965 
1966     case 0xe0: /* SET_THREAD_POINTER */
1967         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1968         tmp = tcg_temp_new_i64();
1969         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1970         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1971         tcg_gen_addi_i64(tmp, tmp, 4);
1972         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1973         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1974         break;
1975 
1976     case 0x100: /* SYSCALL */
1977         gen_excp_1(EXCP_SYSCALL);
1978         ctx->base.is_jmp = DISAS_NORETURN;
1979         break;
1980 
1981     default:
1982     do_sigill:
1983         gen_excp_1(EXCP_ILL);
1984         ctx->base.is_jmp = DISAS_NORETURN;
1985         break;
1986     }
1987 }
1988 #endif
1989 
1990 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1991 {
1992     cond_free(&ctx->null_cond);
1993     return true;
1994 }
1995 
1996 static bool trans_break(DisasContext *ctx, arg_break *a)
1997 {
1998     return gen_excp_iir(ctx, EXCP_BREAK);
1999 }
2000 
2001 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2002 {
2003     /* No point in nullifying the memory barrier.  */
2004     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2005 
2006     cond_free(&ctx->null_cond);
2007     return true;
2008 }
2009 
2010 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2011 {
2012     unsigned rt = a->t;
2013     TCGv_i64 tmp = dest_gpr(ctx, rt);
2014     tcg_gen_movi_i64(tmp, ctx->iaoq_f & ~3ULL);
2015     save_gpr(ctx, rt, tmp);
2016 
2017     cond_free(&ctx->null_cond);
2018     return true;
2019 }
2020 
2021 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2022 {
2023     unsigned rt = a->t;
2024     unsigned rs = a->sp;
2025     TCGv_i64 t0 = tcg_temp_new_i64();
2026 
2027     load_spr(ctx, t0, rs);
2028     tcg_gen_shri_i64(t0, t0, 32);
2029 
2030     save_gpr(ctx, rt, t0);
2031 
2032     cond_free(&ctx->null_cond);
2033     return true;
2034 }
2035 
2036 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2037 {
2038     unsigned rt = a->t;
2039     unsigned ctl = a->r;
2040     TCGv_i64 tmp;
2041 
2042     switch (ctl) {
2043     case CR_SAR:
2044         if (a->e == 0) {
2045             /* MFSAR without ,W masks low 5 bits.  */
2046             tmp = dest_gpr(ctx, rt);
2047             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2048             save_gpr(ctx, rt, tmp);
2049             goto done;
2050         }
2051         save_gpr(ctx, rt, cpu_sar);
2052         goto done;
2053     case CR_IT: /* Interval Timer */
2054         /* FIXME: Respect PSW_S bit.  */
2055         nullify_over(ctx);
2056         tmp = dest_gpr(ctx, rt);
2057         if (translator_io_start(&ctx->base)) {
2058             gen_helper_read_interval_timer(tmp);
2059             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2060         } else {
2061             gen_helper_read_interval_timer(tmp);
2062         }
2063         save_gpr(ctx, rt, tmp);
2064         return nullify_end(ctx);
2065     case 26:
2066     case 27:
2067         break;
2068     default:
2069         /* All other control registers are privileged.  */
2070         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2071         break;
2072     }
2073 
2074     tmp = tcg_temp_new_i64();
2075     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2076     save_gpr(ctx, rt, tmp);
2077 
2078  done:
2079     cond_free(&ctx->null_cond);
2080     return true;
2081 }
2082 
2083 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2084 {
2085     unsigned rr = a->r;
2086     unsigned rs = a->sp;
2087     TCGv_i64 tmp;
2088 
2089     if (rs >= 5) {
2090         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2091     }
2092     nullify_over(ctx);
2093 
2094     tmp = tcg_temp_new_i64();
2095     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2096 
2097     if (rs >= 4) {
2098         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2099         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2100     } else {
2101         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2102     }
2103 
2104     return nullify_end(ctx);
2105 }
2106 
2107 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2108 {
2109     unsigned ctl = a->t;
2110     TCGv_i64 reg;
2111     TCGv_i64 tmp;
2112 
2113     if (ctl == CR_SAR) {
2114         reg = load_gpr(ctx, a->r);
2115         tmp = tcg_temp_new_i64();
2116         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2117         save_or_nullify(ctx, cpu_sar, tmp);
2118 
2119         cond_free(&ctx->null_cond);
2120         return true;
2121     }
2122 
2123     /* All other control registers are privileged or read-only.  */
2124     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2125 
2126 #ifndef CONFIG_USER_ONLY
2127     nullify_over(ctx);
2128 
2129     if (ctx->is_pa20) {
2130         reg = load_gpr(ctx, a->r);
2131     } else {
2132         reg = tcg_temp_new_i64();
2133         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2134     }
2135 
2136     switch (ctl) {
2137     case CR_IT:
2138         gen_helper_write_interval_timer(tcg_env, reg);
2139         break;
2140     case CR_EIRR:
2141         gen_helper_write_eirr(tcg_env, reg);
2142         break;
2143     case CR_EIEM:
2144         gen_helper_write_eiem(tcg_env, reg);
2145         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2146         break;
2147 
2148     case CR_IIASQ:
2149     case CR_IIAOQ:
2150         /* FIXME: Respect PSW_Q bit */
2151         /* The write advances the queue and stores to the back element.  */
2152         tmp = tcg_temp_new_i64();
2153         tcg_gen_ld_i64(tmp, tcg_env,
2154                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2155         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2156         tcg_gen_st_i64(reg, tcg_env,
2157                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2158         break;
2159 
2160     case CR_PID1:
2161     case CR_PID2:
2162     case CR_PID3:
2163     case CR_PID4:
2164         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2165 #ifndef CONFIG_USER_ONLY
2166         gen_helper_change_prot_id(tcg_env);
2167 #endif
2168         break;
2169 
2170     default:
2171         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2172         break;
2173     }
2174     return nullify_end(ctx);
2175 #endif
2176 }
2177 
2178 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2179 {
2180     TCGv_i64 tmp = tcg_temp_new_i64();
2181 
2182     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2183     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2184     save_or_nullify(ctx, cpu_sar, tmp);
2185 
2186     cond_free(&ctx->null_cond);
2187     return true;
2188 }
2189 
2190 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2191 {
2192     TCGv_i64 dest = dest_gpr(ctx, a->t);
2193 
2194 #ifdef CONFIG_USER_ONLY
2195     /* We don't implement space registers in user mode. */
2196     tcg_gen_movi_i64(dest, 0);
2197 #else
2198     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2199     tcg_gen_shri_i64(dest, dest, 32);
2200 #endif
2201     save_gpr(ctx, a->t, dest);
2202 
2203     cond_free(&ctx->null_cond);
2204     return true;
2205 }
2206 
2207 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2208 {
2209 #ifdef CONFIG_USER_ONLY
2210     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2211 #else
2212     TCGv_i64 tmp;
2213 
2214     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2215     if (a->i) {
2216         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2217     }
2218 
2219     nullify_over(ctx);
2220 
2221     tmp = tcg_temp_new_i64();
2222     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2223     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2224     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2225     save_gpr(ctx, a->t, tmp);
2226 
2227     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2228     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2229     return nullify_end(ctx);
2230 #endif
2231 }
2232 
2233 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2234 {
2235     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237     TCGv_i64 tmp;
2238 
2239     nullify_over(ctx);
2240 
2241     tmp = tcg_temp_new_i64();
2242     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2243     tcg_gen_ori_i64(tmp, tmp, a->i);
2244     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2245     save_gpr(ctx, a->t, tmp);
2246 
2247     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2248     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2249     return nullify_end(ctx);
2250 #endif
2251 }
2252 
2253 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2254 {
2255     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2256 #ifndef CONFIG_USER_ONLY
2257     TCGv_i64 tmp, reg;
2258     nullify_over(ctx);
2259 
2260     reg = load_gpr(ctx, a->r);
2261     tmp = tcg_temp_new_i64();
2262     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2263 
2264     /* Exit the TB to recognize new interrupts.  */
2265     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2266     return nullify_end(ctx);
2267 #endif
2268 }
2269 
2270 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2271 {
2272     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2273 #ifndef CONFIG_USER_ONLY
2274     nullify_over(ctx);
2275 
2276     if (rfi_r) {
2277         gen_helper_rfi_r(tcg_env);
2278     } else {
2279         gen_helper_rfi(tcg_env);
2280     }
2281     /* Exit the TB to recognize new interrupts.  */
2282     tcg_gen_exit_tb(NULL, 0);
2283     ctx->base.is_jmp = DISAS_NORETURN;
2284 
2285     return nullify_end(ctx);
2286 #endif
2287 }
2288 
2289 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2290 {
2291     return do_rfi(ctx, false);
2292 }
2293 
2294 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2295 {
2296     return do_rfi(ctx, true);
2297 }
2298 
2299 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2300 {
2301     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2302 #ifndef CONFIG_USER_ONLY
2303     nullify_over(ctx);
2304     gen_helper_halt(tcg_env);
2305     ctx->base.is_jmp = DISAS_NORETURN;
2306     return nullify_end(ctx);
2307 #endif
2308 }
2309 
2310 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2311 {
2312     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2313 #ifndef CONFIG_USER_ONLY
2314     nullify_over(ctx);
2315     gen_helper_reset(tcg_env);
2316     ctx->base.is_jmp = DISAS_NORETURN;
2317     return nullify_end(ctx);
2318 #endif
2319 }
2320 
2321 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2322 {
2323     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2324 #ifndef CONFIG_USER_ONLY
2325     nullify_over(ctx);
2326     gen_helper_getshadowregs(tcg_env);
2327     return nullify_end(ctx);
2328 #endif
2329 }
2330 
2331 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2332 {
2333     if (a->m) {
2334         TCGv_i64 dest = dest_gpr(ctx, a->b);
2335         TCGv_i64 src1 = load_gpr(ctx, a->b);
2336         TCGv_i64 src2 = load_gpr(ctx, a->x);
2337 
2338         /* The only thing we need to do is the base register modification.  */
2339         tcg_gen_add_i64(dest, src1, src2);
2340         save_gpr(ctx, a->b, dest);
2341     }
2342     cond_free(&ctx->null_cond);
2343     return true;
2344 }
2345 
2346 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2347 {
2348     /* End TB for flush instruction cache, so we pick up new insns. */
2349     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2350     return trans_nop_addrx(ctx, a);
2351 }
2352 
2353 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2354 {
2355     TCGv_i64 dest, ofs;
2356     TCGv_i32 level, want;
2357     TCGv_i64 addr;
2358 
2359     nullify_over(ctx);
2360 
2361     dest = dest_gpr(ctx, a->t);
2362     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2363 
2364     if (a->imm) {
2365         level = tcg_constant_i32(a->ri & 3);
2366     } else {
2367         level = tcg_temp_new_i32();
2368         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2369         tcg_gen_andi_i32(level, level, 3);
2370     }
2371     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2372 
2373     gen_helper_probe(dest, tcg_env, addr, level, want);
2374 
2375     save_gpr(ctx, a->t, dest);
2376     return nullify_end(ctx);
2377 }
2378 
2379 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2380 {
2381     if (ctx->is_pa20) {
2382         return false;
2383     }
2384     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2385 #ifndef CONFIG_USER_ONLY
2386     TCGv_i64 addr;
2387     TCGv_i64 ofs, reg;
2388 
2389     nullify_over(ctx);
2390 
2391     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2392     reg = load_gpr(ctx, a->r);
2393     if (a->addr) {
2394         gen_helper_itlba_pa11(tcg_env, addr, reg);
2395     } else {
2396         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2397     }
2398 
2399     /* Exit TB for TLB change if mmu is enabled.  */
2400     if (ctx->tb_flags & PSW_C) {
2401         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2402     }
2403     return nullify_end(ctx);
2404 #endif
2405 }
2406 
2407 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2408 {
2409     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2410 #ifndef CONFIG_USER_ONLY
2411     TCGv_i64 addr;
2412     TCGv_i64 ofs;
2413 
2414     nullify_over(ctx);
2415 
2416     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2417 
2418     /*
2419      * Page align now, rather than later, so that we can add in the
2420      * page_size field from pa2.0 from the low 4 bits of GR[b].
2421      */
2422     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2423     if (ctx->is_pa20) {
2424         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2425     }
2426 
2427     if (local) {
2428         gen_helper_ptlb_l(tcg_env, addr);
2429     } else {
2430         gen_helper_ptlb(tcg_env, addr);
2431     }
2432 
2433     if (a->m) {
2434         save_gpr(ctx, a->b, ofs);
2435     }
2436 
2437     /* Exit TB for TLB change if mmu is enabled.  */
2438     if (ctx->tb_flags & PSW_C) {
2439         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2440     }
2441     return nullify_end(ctx);
2442 #endif
2443 }
2444 
2445 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2446 {
2447     return do_pxtlb(ctx, a, false);
2448 }
2449 
2450 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2451 {
2452     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2453 }
2454 
2455 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2456 {
2457     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2458 #ifndef CONFIG_USER_ONLY
2459     nullify_over(ctx);
2460 
2461     trans_nop_addrx(ctx, a);
2462     gen_helper_ptlbe(tcg_env);
2463 
2464     /* Exit TB for TLB change if mmu is enabled.  */
2465     if (ctx->tb_flags & PSW_C) {
2466         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2467     }
2468     return nullify_end(ctx);
2469 #endif
2470 }
2471 
2472 /*
2473  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2474  * See
2475  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2476  *     page 13-9 (195/206)
2477  */
2478 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2479 {
2480     if (ctx->is_pa20) {
2481         return false;
2482     }
2483     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2484 #ifndef CONFIG_USER_ONLY
2485     TCGv_i64 addr, atl, stl;
2486     TCGv_i64 reg;
2487 
2488     nullify_over(ctx);
2489 
2490     /*
2491      * FIXME:
2492      *  if (not (pcxl or pcxl2))
2493      *    return gen_illegal(ctx);
2494      */
2495 
2496     atl = tcg_temp_new_i64();
2497     stl = tcg_temp_new_i64();
2498     addr = tcg_temp_new_i64();
2499 
2500     tcg_gen_ld32u_i64(stl, tcg_env,
2501                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2502                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2503     tcg_gen_ld32u_i64(atl, tcg_env,
2504                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2505                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2506     tcg_gen_shli_i64(stl, stl, 32);
2507     tcg_gen_or_i64(addr, atl, stl);
2508 
2509     reg = load_gpr(ctx, a->r);
2510     if (a->addr) {
2511         gen_helper_itlba_pa11(tcg_env, addr, reg);
2512     } else {
2513         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2514     }
2515 
2516     /* Exit TB for TLB change if mmu is enabled.  */
2517     if (ctx->tb_flags & PSW_C) {
2518         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2519     }
2520     return nullify_end(ctx);
2521 #endif
2522 }
2523 
2524 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2525 {
2526     if (!ctx->is_pa20) {
2527         return false;
2528     }
2529     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2530 #ifndef CONFIG_USER_ONLY
2531     nullify_over(ctx);
2532     {
2533         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2534         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2535 
2536         if (a->data) {
2537             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2538         } else {
2539             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2540         }
2541     }
2542     /* Exit TB for TLB change if mmu is enabled.  */
2543     if (ctx->tb_flags & PSW_C) {
2544         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2545     }
2546     return nullify_end(ctx);
2547 #endif
2548 }
2549 
2550 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2551 {
2552     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2553 #ifndef CONFIG_USER_ONLY
2554     TCGv_i64 vaddr;
2555     TCGv_i64 ofs, paddr;
2556 
2557     nullify_over(ctx);
2558 
2559     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2560 
2561     paddr = tcg_temp_new_i64();
2562     gen_helper_lpa(paddr, tcg_env, vaddr);
2563 
2564     /* Note that physical address result overrides base modification.  */
2565     if (a->m) {
2566         save_gpr(ctx, a->b, ofs);
2567     }
2568     save_gpr(ctx, a->t, paddr);
2569 
2570     return nullify_end(ctx);
2571 #endif
2572 }
2573 
2574 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2575 {
2576     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2577 
2578     /* The Coherence Index is an implementation-defined function of the
2579        physical address.  Two addresses with the same CI have a coherent
2580        view of the cache.  Our implementation is to return 0 for all,
2581        since the entire address space is coherent.  */
2582     save_gpr(ctx, a->t, ctx->zero);
2583 
2584     cond_free(&ctx->null_cond);
2585     return true;
2586 }
2587 
2588 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2589 {
2590     return do_add_reg(ctx, a, false, false, false, false);
2591 }
2592 
2593 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2594 {
2595     return do_add_reg(ctx, a, true, false, false, false);
2596 }
2597 
2598 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2599 {
2600     return do_add_reg(ctx, a, false, true, false, false);
2601 }
2602 
2603 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2604 {
2605     return do_add_reg(ctx, a, false, false, false, true);
2606 }
2607 
2608 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2609 {
2610     return do_add_reg(ctx, a, false, true, false, true);
2611 }
2612 
2613 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2614 {
2615     return do_sub_reg(ctx, a, false, false, false);
2616 }
2617 
2618 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2619 {
2620     return do_sub_reg(ctx, a, true, false, false);
2621 }
2622 
2623 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2624 {
2625     return do_sub_reg(ctx, a, false, false, true);
2626 }
2627 
2628 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2629 {
2630     return do_sub_reg(ctx, a, true, false, true);
2631 }
2632 
2633 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2634 {
2635     return do_sub_reg(ctx, a, false, true, false);
2636 }
2637 
2638 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2639 {
2640     return do_sub_reg(ctx, a, true, true, false);
2641 }
2642 
2643 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2644 {
2645     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2646 }
2647 
2648 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2649 {
2650     return do_log_reg(ctx, a, tcg_gen_and_i64);
2651 }
2652 
2653 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2654 {
2655     if (a->cf == 0) {
2656         unsigned r2 = a->r2;
2657         unsigned r1 = a->r1;
2658         unsigned rt = a->t;
2659 
2660         if (rt == 0) { /* NOP */
2661             cond_free(&ctx->null_cond);
2662             return true;
2663         }
2664         if (r2 == 0) { /* COPY */
2665             if (r1 == 0) {
2666                 TCGv_i64 dest = dest_gpr(ctx, rt);
2667                 tcg_gen_movi_i64(dest, 0);
2668                 save_gpr(ctx, rt, dest);
2669             } else {
2670                 save_gpr(ctx, rt, cpu_gr[r1]);
2671             }
2672             cond_free(&ctx->null_cond);
2673             return true;
2674         }
2675 #ifndef CONFIG_USER_ONLY
2676         /* These are QEMU extensions and are nops in the real architecture:
2677          *
2678          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2679          * or %r31,%r31,%r31 -- death loop; offline cpu
2680          *                      currently implemented as idle.
2681          */
2682         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2683             /* No need to check for supervisor, as userland can only pause
2684                until the next timer interrupt.  */
2685             nullify_over(ctx);
2686 
2687             /* Advance the instruction queue.  */
2688             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2689             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2690             nullify_set(ctx, 0);
2691 
2692             /* Tell the qemu main loop to halt until this cpu has work.  */
2693             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2694                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2695             gen_excp_1(EXCP_HALTED);
2696             ctx->base.is_jmp = DISAS_NORETURN;
2697 
2698             return nullify_end(ctx);
2699         }
2700 #endif
2701     }
2702     return do_log_reg(ctx, a, tcg_gen_or_i64);
2703 }
2704 
2705 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2706 {
2707     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2708 }
2709 
2710 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2711 {
2712     TCGv_i64 tcg_r1, tcg_r2;
2713 
2714     if (a->cf) {
2715         nullify_over(ctx);
2716     }
2717     tcg_r1 = load_gpr(ctx, a->r1);
2718     tcg_r2 = load_gpr(ctx, a->r2);
2719     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2720     return nullify_end(ctx);
2721 }
2722 
2723 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2724 {
2725     TCGv_i64 tcg_r1, tcg_r2;
2726 
2727     if (a->cf) {
2728         nullify_over(ctx);
2729     }
2730     tcg_r1 = load_gpr(ctx, a->r1);
2731     tcg_r2 = load_gpr(ctx, a->r2);
2732     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2733     return nullify_end(ctx);
2734 }
2735 
2736 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2737 {
2738     TCGv_i64 tcg_r1, tcg_r2, tmp;
2739 
2740     if (a->cf) {
2741         nullify_over(ctx);
2742     }
2743     tcg_r1 = load_gpr(ctx, a->r1);
2744     tcg_r2 = load_gpr(ctx, a->r2);
2745     tmp = tcg_temp_new_i64();
2746     tcg_gen_not_i64(tmp, tcg_r2);
2747     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2748     return nullify_end(ctx);
2749 }
2750 
2751 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2752 {
2753     return do_uaddcm(ctx, a, false);
2754 }
2755 
2756 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2757 {
2758     return do_uaddcm(ctx, a, true);
2759 }
2760 
2761 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2762 {
2763     TCGv_i64 tmp;
2764 
2765     nullify_over(ctx);
2766 
2767     tmp = tcg_temp_new_i64();
2768     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2769     if (!is_i) {
2770         tcg_gen_not_i64(tmp, tmp);
2771     }
2772     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2773     tcg_gen_muli_i64(tmp, tmp, 6);
2774     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2775             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2776     return nullify_end(ctx);
2777 }
2778 
2779 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2780 {
2781     return do_dcor(ctx, a, false);
2782 }
2783 
2784 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2785 {
2786     return do_dcor(ctx, a, true);
2787 }
2788 
2789 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2790 {
2791     TCGv_i64 dest, add1, add2, addc, in1, in2;
2792     TCGv_i64 cout;
2793 
2794     nullify_over(ctx);
2795 
2796     in1 = load_gpr(ctx, a->r1);
2797     in2 = load_gpr(ctx, a->r2);
2798 
2799     add1 = tcg_temp_new_i64();
2800     add2 = tcg_temp_new_i64();
2801     addc = tcg_temp_new_i64();
2802     dest = tcg_temp_new_i64();
2803 
2804     /* Form R1 << 1 | PSW[CB]{8}.  */
2805     tcg_gen_add_i64(add1, in1, in1);
2806     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2807 
2808     /*
2809      * Add or subtract R2, depending on PSW[V].  Proper computation of
2810      * carry requires that we subtract via + ~R2 + 1, as described in
2811      * the manual.  By extracting and masking V, we can produce the
2812      * proper inputs to the addition without movcond.
2813      */
2814     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2815     tcg_gen_xor_i64(add2, in2, addc);
2816     tcg_gen_andi_i64(addc, addc, 1);
2817 
2818     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2819     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2820                      addc, ctx->zero);
2821 
2822     /* Write back the result register.  */
2823     save_gpr(ctx, a->t, dest);
2824 
2825     /* Write back PSW[CB].  */
2826     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2827     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2828 
2829     /* Write back PSW[V] for the division step.  */
2830     cout = get_psw_carry(ctx, false);
2831     tcg_gen_neg_i64(cpu_psw_v, cout);
2832     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2833 
2834     /* Install the new nullification.  */
2835     if (a->cf) {
2836         TCGv_i64 sv = NULL;
2837         if (cond_need_sv(a->cf >> 1)) {
2838             /* ??? The lshift is supposed to contribute to overflow.  */
2839             sv = do_add_sv(ctx, dest, add1, add2);
2840         }
2841         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2842     }
2843 
2844     return nullify_end(ctx);
2845 }
2846 
2847 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2848 {
2849     return do_add_imm(ctx, a, false, false);
2850 }
2851 
2852 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2853 {
2854     return do_add_imm(ctx, a, true, false);
2855 }
2856 
2857 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2858 {
2859     return do_add_imm(ctx, a, false, true);
2860 }
2861 
2862 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2863 {
2864     return do_add_imm(ctx, a, true, true);
2865 }
2866 
2867 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2868 {
2869     return do_sub_imm(ctx, a, false);
2870 }
2871 
2872 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2873 {
2874     return do_sub_imm(ctx, a, true);
2875 }
2876 
2877 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2878 {
2879     TCGv_i64 tcg_im, tcg_r2;
2880 
2881     if (a->cf) {
2882         nullify_over(ctx);
2883     }
2884 
2885     tcg_im = tcg_constant_i64(a->i);
2886     tcg_r2 = load_gpr(ctx, a->r);
2887     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2888 
2889     return nullify_end(ctx);
2890 }
2891 
2892 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2893                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2894 {
2895     TCGv_i64 r1, r2, dest;
2896 
2897     if (!ctx->is_pa20) {
2898         return false;
2899     }
2900 
2901     nullify_over(ctx);
2902 
2903     r1 = load_gpr(ctx, a->r1);
2904     r2 = load_gpr(ctx, a->r2);
2905     dest = dest_gpr(ctx, a->t);
2906 
2907     fn(dest, r1, r2);
2908     save_gpr(ctx, a->t, dest);
2909 
2910     return nullify_end(ctx);
2911 }
2912 
2913 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2914                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2915 {
2916     TCGv_i64 r, dest;
2917 
2918     if (!ctx->is_pa20) {
2919         return false;
2920     }
2921 
2922     nullify_over(ctx);
2923 
2924     r = load_gpr(ctx, a->r);
2925     dest = dest_gpr(ctx, a->t);
2926 
2927     fn(dest, r, a->i);
2928     save_gpr(ctx, a->t, dest);
2929 
2930     return nullify_end(ctx);
2931 }
2932 
2933 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2934                                 void (*fn)(TCGv_i64, TCGv_i64,
2935                                            TCGv_i64, TCGv_i32))
2936 {
2937     TCGv_i64 r1, r2, dest;
2938 
2939     if (!ctx->is_pa20) {
2940         return false;
2941     }
2942 
2943     nullify_over(ctx);
2944 
2945     r1 = load_gpr(ctx, a->r1);
2946     r2 = load_gpr(ctx, a->r2);
2947     dest = dest_gpr(ctx, a->t);
2948 
2949     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2950     save_gpr(ctx, a->t, dest);
2951 
2952     return nullify_end(ctx);
2953 }
2954 
2955 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2956 {
2957     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2958 }
2959 
2960 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2961 {
2962     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2963 }
2964 
2965 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2966 {
2967     return do_multimedia(ctx, a, gen_helper_hadd_us);
2968 }
2969 
2970 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2971 {
2972     return do_multimedia(ctx, a, gen_helper_havg);
2973 }
2974 
2975 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2976 {
2977     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2978 }
2979 
2980 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2981 {
2982     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2983 }
2984 
2985 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2986 {
2987     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2988 }
2989 
2990 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2991 {
2992     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2993 }
2994 
2995 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2996 {
2997     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2998 }
2999 
3000 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3001 {
3002     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3003 }
3004 
3005 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3006 {
3007     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3008 }
3009 
3010 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3011 {
3012     return do_multimedia(ctx, a, gen_helper_hsub_us);
3013 }
3014 
3015 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3016 {
3017     uint64_t mask = 0xffff0000ffff0000ull;
3018     TCGv_i64 tmp = tcg_temp_new_i64();
3019 
3020     tcg_gen_andi_i64(tmp, r2, mask);
3021     tcg_gen_andi_i64(dst, r1, mask);
3022     tcg_gen_shri_i64(tmp, tmp, 16);
3023     tcg_gen_or_i64(dst, dst, tmp);
3024 }
3025 
3026 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3027 {
3028     return do_multimedia(ctx, a, gen_mixh_l);
3029 }
3030 
3031 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3032 {
3033     uint64_t mask = 0x0000ffff0000ffffull;
3034     TCGv_i64 tmp = tcg_temp_new_i64();
3035 
3036     tcg_gen_andi_i64(tmp, r1, mask);
3037     tcg_gen_andi_i64(dst, r2, mask);
3038     tcg_gen_shli_i64(tmp, tmp, 16);
3039     tcg_gen_or_i64(dst, dst, tmp);
3040 }
3041 
3042 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3043 {
3044     return do_multimedia(ctx, a, gen_mixh_r);
3045 }
3046 
3047 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3048 {
3049     TCGv_i64 tmp = tcg_temp_new_i64();
3050 
3051     tcg_gen_shri_i64(tmp, r2, 32);
3052     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3053 }
3054 
3055 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3056 {
3057     return do_multimedia(ctx, a, gen_mixw_l);
3058 }
3059 
3060 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3061 {
3062     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3063 }
3064 
3065 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3066 {
3067     return do_multimedia(ctx, a, gen_mixw_r);
3068 }
3069 
3070 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3071 {
3072     TCGv_i64 r, t0, t1, t2, t3;
3073 
3074     if (!ctx->is_pa20) {
3075         return false;
3076     }
3077 
3078     nullify_over(ctx);
3079 
3080     r = load_gpr(ctx, a->r1);
3081     t0 = tcg_temp_new_i64();
3082     t1 = tcg_temp_new_i64();
3083     t2 = tcg_temp_new_i64();
3084     t3 = tcg_temp_new_i64();
3085 
3086     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3087     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3088     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3089     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3090 
3091     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3092     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3093     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3094 
3095     save_gpr(ctx, a->t, t0);
3096     return nullify_end(ctx);
3097 }
3098 
3099 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3100 {
3101     if (ctx->is_pa20) {
3102        /*
3103         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3104         * Any base modification still occurs.
3105         */
3106         if (a->t == 0) {
3107             return trans_nop_addrx(ctx, a);
3108         }
3109     } else if (a->size > MO_32) {
3110         return gen_illegal(ctx);
3111     }
3112     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3113                    a->disp, a->sp, a->m, a->size | MO_TE);
3114 }
3115 
3116 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3117 {
3118     assert(a->x == 0 && a->scale == 0);
3119     if (!ctx->is_pa20 && a->size > MO_32) {
3120         return gen_illegal(ctx);
3121     }
3122     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3123 }
3124 
3125 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3126 {
3127     MemOp mop = MO_TE | MO_ALIGN | a->size;
3128     TCGv_i64 dest, ofs;
3129     TCGv_i64 addr;
3130 
3131     if (!ctx->is_pa20 && a->size > MO_32) {
3132         return gen_illegal(ctx);
3133     }
3134 
3135     nullify_over(ctx);
3136 
3137     if (a->m) {
3138         /* Base register modification.  Make sure if RT == RB,
3139            we see the result of the load.  */
3140         dest = tcg_temp_new_i64();
3141     } else {
3142         dest = dest_gpr(ctx, a->t);
3143     }
3144 
3145     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3146              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3147 
3148     /*
3149      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3150      * However actual hardware succeeds with aligned mod 4.
3151      * Detect this case and log a GUEST_ERROR.
3152      *
3153      * TODO: HPPA64 relaxes the over-alignment requirement
3154      * with the ,co completer.
3155      */
3156     gen_helper_ldc_check(addr);
3157 
3158     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3159 
3160     if (a->m) {
3161         save_gpr(ctx, a->b, ofs);
3162     }
3163     save_gpr(ctx, a->t, dest);
3164 
3165     return nullify_end(ctx);
3166 }
3167 
3168 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3169 {
3170     TCGv_i64 ofs, val;
3171     TCGv_i64 addr;
3172 
3173     nullify_over(ctx);
3174 
3175     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3176              MMU_DISABLED(ctx));
3177     val = load_gpr(ctx, a->r);
3178     if (a->a) {
3179         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3180             gen_helper_stby_e_parallel(tcg_env, addr, val);
3181         } else {
3182             gen_helper_stby_e(tcg_env, addr, val);
3183         }
3184     } else {
3185         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3186             gen_helper_stby_b_parallel(tcg_env, addr, val);
3187         } else {
3188             gen_helper_stby_b(tcg_env, addr, val);
3189         }
3190     }
3191     if (a->m) {
3192         tcg_gen_andi_i64(ofs, ofs, ~3);
3193         save_gpr(ctx, a->b, ofs);
3194     }
3195 
3196     return nullify_end(ctx);
3197 }
3198 
3199 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3200 {
3201     TCGv_i64 ofs, val;
3202     TCGv_i64 addr;
3203 
3204     if (!ctx->is_pa20) {
3205         return false;
3206     }
3207     nullify_over(ctx);
3208 
3209     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3210              MMU_DISABLED(ctx));
3211     val = load_gpr(ctx, a->r);
3212     if (a->a) {
3213         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3214             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3215         } else {
3216             gen_helper_stdby_e(tcg_env, addr, val);
3217         }
3218     } else {
3219         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3220             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3221         } else {
3222             gen_helper_stdby_b(tcg_env, addr, val);
3223         }
3224     }
3225     if (a->m) {
3226         tcg_gen_andi_i64(ofs, ofs, ~7);
3227         save_gpr(ctx, a->b, ofs);
3228     }
3229 
3230     return nullify_end(ctx);
3231 }
3232 
3233 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3234 {
3235     int hold_mmu_idx = ctx->mmu_idx;
3236 
3237     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3238     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3239     trans_ld(ctx, a);
3240     ctx->mmu_idx = hold_mmu_idx;
3241     return true;
3242 }
3243 
3244 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3245 {
3246     int hold_mmu_idx = ctx->mmu_idx;
3247 
3248     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3249     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3250     trans_st(ctx, a);
3251     ctx->mmu_idx = hold_mmu_idx;
3252     return true;
3253 }
3254 
3255 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3256 {
3257     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3258 
3259     tcg_gen_movi_i64(tcg_rt, a->i);
3260     save_gpr(ctx, a->t, tcg_rt);
3261     cond_free(&ctx->null_cond);
3262     return true;
3263 }
3264 
3265 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3266 {
3267     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3268     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3269 
3270     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3271     save_gpr(ctx, 1, tcg_r1);
3272     cond_free(&ctx->null_cond);
3273     return true;
3274 }
3275 
3276 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3277 {
3278     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3279 
3280     /* Special case rb == 0, for the LDI pseudo-op.
3281        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3282     if (a->b == 0) {
3283         tcg_gen_movi_i64(tcg_rt, a->i);
3284     } else {
3285         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3286     }
3287     save_gpr(ctx, a->t, tcg_rt);
3288     cond_free(&ctx->null_cond);
3289     return true;
3290 }
3291 
3292 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3293                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3294 {
3295     TCGv_i64 dest, in2, sv;
3296     DisasCond cond;
3297 
3298     in2 = load_gpr(ctx, r);
3299     dest = tcg_temp_new_i64();
3300 
3301     tcg_gen_sub_i64(dest, in1, in2);
3302 
3303     sv = NULL;
3304     if (cond_need_sv(c)) {
3305         sv = do_sub_sv(ctx, dest, in1, in2);
3306     }
3307 
3308     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3309     return do_cbranch(ctx, disp, n, &cond);
3310 }
3311 
3312 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3313 {
3314     if (!ctx->is_pa20 && a->d) {
3315         return false;
3316     }
3317     nullify_over(ctx);
3318     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3319                    a->c, a->f, a->d, a->n, a->disp);
3320 }
3321 
3322 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3323 {
3324     if (!ctx->is_pa20 && a->d) {
3325         return false;
3326     }
3327     nullify_over(ctx);
3328     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3329                    a->c, a->f, a->d, a->n, a->disp);
3330 }
3331 
3332 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3333                     unsigned c, unsigned f, unsigned n, int disp)
3334 {
3335     TCGv_i64 dest, in2, sv, cb_cond;
3336     DisasCond cond;
3337     bool d = false;
3338 
3339     /*
3340      * For hppa64, the ADDB conditions change with PSW.W,
3341      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3342      */
3343     if (ctx->tb_flags & PSW_W) {
3344         d = c >= 5;
3345         if (d) {
3346             c &= 3;
3347         }
3348     }
3349 
3350     in2 = load_gpr(ctx, r);
3351     dest = tcg_temp_new_i64();
3352     sv = NULL;
3353     cb_cond = NULL;
3354 
3355     if (cond_need_cb(c)) {
3356         TCGv_i64 cb = tcg_temp_new_i64();
3357         TCGv_i64 cb_msb = tcg_temp_new_i64();
3358 
3359         tcg_gen_movi_i64(cb_msb, 0);
3360         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3361         tcg_gen_xor_i64(cb, in1, in2);
3362         tcg_gen_xor_i64(cb, cb, dest);
3363         cb_cond = get_carry(ctx, d, cb, cb_msb);
3364     } else {
3365         tcg_gen_add_i64(dest, in1, in2);
3366     }
3367     if (cond_need_sv(c)) {
3368         sv = do_add_sv(ctx, dest, in1, in2);
3369     }
3370 
3371     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3372     save_gpr(ctx, r, dest);
3373     return do_cbranch(ctx, disp, n, &cond);
3374 }
3375 
3376 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3377 {
3378     nullify_over(ctx);
3379     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3380 }
3381 
3382 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3383 {
3384     nullify_over(ctx);
3385     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3386 }
3387 
3388 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3389 {
3390     TCGv_i64 tmp, tcg_r;
3391     DisasCond cond;
3392 
3393     nullify_over(ctx);
3394 
3395     tmp = tcg_temp_new_i64();
3396     tcg_r = load_gpr(ctx, a->r);
3397     if (cond_need_ext(ctx, a->d)) {
3398         /* Force shift into [32,63] */
3399         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3400         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3401     } else {
3402         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3403     }
3404 
3405     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3406     return do_cbranch(ctx, a->disp, a->n, &cond);
3407 }
3408 
3409 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3410 {
3411     TCGv_i64 tmp, tcg_r;
3412     DisasCond cond;
3413     int p;
3414 
3415     nullify_over(ctx);
3416 
3417     tmp = tcg_temp_new_i64();
3418     tcg_r = load_gpr(ctx, a->r);
3419     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3420     tcg_gen_shli_i64(tmp, tcg_r, p);
3421 
3422     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3423     return do_cbranch(ctx, a->disp, a->n, &cond);
3424 }
3425 
3426 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3427 {
3428     TCGv_i64 dest;
3429     DisasCond cond;
3430 
3431     nullify_over(ctx);
3432 
3433     dest = dest_gpr(ctx, a->r2);
3434     if (a->r1 == 0) {
3435         tcg_gen_movi_i64(dest, 0);
3436     } else {
3437         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3438     }
3439 
3440     /* All MOVB conditions are 32-bit. */
3441     cond = do_sed_cond(ctx, a->c, false, dest);
3442     return do_cbranch(ctx, a->disp, a->n, &cond);
3443 }
3444 
3445 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3446 {
3447     TCGv_i64 dest;
3448     DisasCond cond;
3449 
3450     nullify_over(ctx);
3451 
3452     dest = dest_gpr(ctx, a->r);
3453     tcg_gen_movi_i64(dest, a->i);
3454 
3455     /* All MOVBI conditions are 32-bit. */
3456     cond = do_sed_cond(ctx, a->c, false, dest);
3457     return do_cbranch(ctx, a->disp, a->n, &cond);
3458 }
3459 
3460 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3461 {
3462     TCGv_i64 dest, src2;
3463 
3464     if (!ctx->is_pa20 && a->d) {
3465         return false;
3466     }
3467     if (a->c) {
3468         nullify_over(ctx);
3469     }
3470 
3471     dest = dest_gpr(ctx, a->t);
3472     src2 = load_gpr(ctx, a->r2);
3473     if (a->r1 == 0) {
3474         if (a->d) {
3475             tcg_gen_shr_i64(dest, src2, cpu_sar);
3476         } else {
3477             TCGv_i64 tmp = tcg_temp_new_i64();
3478 
3479             tcg_gen_ext32u_i64(dest, src2);
3480             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3481             tcg_gen_shr_i64(dest, dest, tmp);
3482         }
3483     } else if (a->r1 == a->r2) {
3484         if (a->d) {
3485             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3486         } else {
3487             TCGv_i32 t32 = tcg_temp_new_i32();
3488             TCGv_i32 s32 = tcg_temp_new_i32();
3489 
3490             tcg_gen_extrl_i64_i32(t32, src2);
3491             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3492             tcg_gen_andi_i32(s32, s32, 31);
3493             tcg_gen_rotr_i32(t32, t32, s32);
3494             tcg_gen_extu_i32_i64(dest, t32);
3495         }
3496     } else {
3497         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3498 
3499         if (a->d) {
3500             TCGv_i64 t = tcg_temp_new_i64();
3501             TCGv_i64 n = tcg_temp_new_i64();
3502 
3503             tcg_gen_xori_i64(n, cpu_sar, 63);
3504             tcg_gen_shl_i64(t, src1, n);
3505             tcg_gen_shli_i64(t, t, 1);
3506             tcg_gen_shr_i64(dest, src2, cpu_sar);
3507             tcg_gen_or_i64(dest, dest, t);
3508         } else {
3509             TCGv_i64 t = tcg_temp_new_i64();
3510             TCGv_i64 s = tcg_temp_new_i64();
3511 
3512             tcg_gen_concat32_i64(t, src2, src1);
3513             tcg_gen_andi_i64(s, cpu_sar, 31);
3514             tcg_gen_shr_i64(dest, t, s);
3515         }
3516     }
3517     save_gpr(ctx, a->t, dest);
3518 
3519     /* Install the new nullification.  */
3520     cond_free(&ctx->null_cond);
3521     if (a->c) {
3522         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3523     }
3524     return nullify_end(ctx);
3525 }
3526 
3527 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3528 {
3529     unsigned width, sa;
3530     TCGv_i64 dest, t2;
3531 
3532     if (!ctx->is_pa20 && a->d) {
3533         return false;
3534     }
3535     if (a->c) {
3536         nullify_over(ctx);
3537     }
3538 
3539     width = a->d ? 64 : 32;
3540     sa = width - 1 - a->cpos;
3541 
3542     dest = dest_gpr(ctx, a->t);
3543     t2 = load_gpr(ctx, a->r2);
3544     if (a->r1 == 0) {
3545         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3546     } else if (width == TARGET_LONG_BITS) {
3547         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3548     } else {
3549         assert(!a->d);
3550         if (a->r1 == a->r2) {
3551             TCGv_i32 t32 = tcg_temp_new_i32();
3552             tcg_gen_extrl_i64_i32(t32, t2);
3553             tcg_gen_rotri_i32(t32, t32, sa);
3554             tcg_gen_extu_i32_i64(dest, t32);
3555         } else {
3556             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3557             tcg_gen_extract_i64(dest, dest, sa, 32);
3558         }
3559     }
3560     save_gpr(ctx, a->t, dest);
3561 
3562     /* Install the new nullification.  */
3563     cond_free(&ctx->null_cond);
3564     if (a->c) {
3565         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3566     }
3567     return nullify_end(ctx);
3568 }
3569 
3570 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3571 {
3572     unsigned widthm1 = a->d ? 63 : 31;
3573     TCGv_i64 dest, src, tmp;
3574 
3575     if (!ctx->is_pa20 && a->d) {
3576         return false;
3577     }
3578     if (a->c) {
3579         nullify_over(ctx);
3580     }
3581 
3582     dest = dest_gpr(ctx, a->t);
3583     src = load_gpr(ctx, a->r);
3584     tmp = tcg_temp_new_i64();
3585 
3586     /* Recall that SAR is using big-endian bit numbering.  */
3587     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3588     tcg_gen_xori_i64(tmp, tmp, widthm1);
3589 
3590     if (a->se) {
3591         if (!a->d) {
3592             tcg_gen_ext32s_i64(dest, src);
3593             src = dest;
3594         }
3595         tcg_gen_sar_i64(dest, src, tmp);
3596         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3597     } else {
3598         if (!a->d) {
3599             tcg_gen_ext32u_i64(dest, src);
3600             src = dest;
3601         }
3602         tcg_gen_shr_i64(dest, src, tmp);
3603         tcg_gen_extract_i64(dest, dest, 0, a->len);
3604     }
3605     save_gpr(ctx, a->t, dest);
3606 
3607     /* Install the new nullification.  */
3608     cond_free(&ctx->null_cond);
3609     if (a->c) {
3610         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3611     }
3612     return nullify_end(ctx);
3613 }
3614 
3615 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3616 {
3617     unsigned len, cpos, width;
3618     TCGv_i64 dest, src;
3619 
3620     if (!ctx->is_pa20 && a->d) {
3621         return false;
3622     }
3623     if (a->c) {
3624         nullify_over(ctx);
3625     }
3626 
3627     len = a->len;
3628     width = a->d ? 64 : 32;
3629     cpos = width - 1 - a->pos;
3630     if (cpos + len > width) {
3631         len = width - cpos;
3632     }
3633 
3634     dest = dest_gpr(ctx, a->t);
3635     src = load_gpr(ctx, a->r);
3636     if (a->se) {
3637         tcg_gen_sextract_i64(dest, src, cpos, len);
3638     } else {
3639         tcg_gen_extract_i64(dest, src, cpos, len);
3640     }
3641     save_gpr(ctx, a->t, dest);
3642 
3643     /* Install the new nullification.  */
3644     cond_free(&ctx->null_cond);
3645     if (a->c) {
3646         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3647     }
3648     return nullify_end(ctx);
3649 }
3650 
3651 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3652 {
3653     unsigned len, width;
3654     uint64_t mask0, mask1;
3655     TCGv_i64 dest;
3656 
3657     if (!ctx->is_pa20 && a->d) {
3658         return false;
3659     }
3660     if (a->c) {
3661         nullify_over(ctx);
3662     }
3663 
3664     len = a->len;
3665     width = a->d ? 64 : 32;
3666     if (a->cpos + len > width) {
3667         len = width - a->cpos;
3668     }
3669 
3670     dest = dest_gpr(ctx, a->t);
3671     mask0 = deposit64(0, a->cpos, len, a->i);
3672     mask1 = deposit64(-1, a->cpos, len, a->i);
3673 
3674     if (a->nz) {
3675         TCGv_i64 src = load_gpr(ctx, a->t);
3676         tcg_gen_andi_i64(dest, src, mask1);
3677         tcg_gen_ori_i64(dest, dest, mask0);
3678     } else {
3679         tcg_gen_movi_i64(dest, mask0);
3680     }
3681     save_gpr(ctx, a->t, dest);
3682 
3683     /* Install the new nullification.  */
3684     cond_free(&ctx->null_cond);
3685     if (a->c) {
3686         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3687     }
3688     return nullify_end(ctx);
3689 }
3690 
3691 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3692 {
3693     unsigned rs = a->nz ? a->t : 0;
3694     unsigned len, width;
3695     TCGv_i64 dest, val;
3696 
3697     if (!ctx->is_pa20 && a->d) {
3698         return false;
3699     }
3700     if (a->c) {
3701         nullify_over(ctx);
3702     }
3703 
3704     len = a->len;
3705     width = a->d ? 64 : 32;
3706     if (a->cpos + len > width) {
3707         len = width - a->cpos;
3708     }
3709 
3710     dest = dest_gpr(ctx, a->t);
3711     val = load_gpr(ctx, a->r);
3712     if (rs == 0) {
3713         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3714     } else {
3715         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3716     }
3717     save_gpr(ctx, a->t, dest);
3718 
3719     /* Install the new nullification.  */
3720     cond_free(&ctx->null_cond);
3721     if (a->c) {
3722         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3723     }
3724     return nullify_end(ctx);
3725 }
3726 
3727 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3728                        bool d, bool nz, unsigned len, TCGv_i64 val)
3729 {
3730     unsigned rs = nz ? rt : 0;
3731     unsigned widthm1 = d ? 63 : 31;
3732     TCGv_i64 mask, tmp, shift, dest;
3733     uint64_t msb = 1ULL << (len - 1);
3734 
3735     dest = dest_gpr(ctx, rt);
3736     shift = tcg_temp_new_i64();
3737     tmp = tcg_temp_new_i64();
3738 
3739     /* Convert big-endian bit numbering in SAR to left-shift.  */
3740     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3741     tcg_gen_xori_i64(shift, shift, widthm1);
3742 
3743     mask = tcg_temp_new_i64();
3744     tcg_gen_movi_i64(mask, msb + (msb - 1));
3745     tcg_gen_and_i64(tmp, val, mask);
3746     if (rs) {
3747         tcg_gen_shl_i64(mask, mask, shift);
3748         tcg_gen_shl_i64(tmp, tmp, shift);
3749         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3750         tcg_gen_or_i64(dest, dest, tmp);
3751     } else {
3752         tcg_gen_shl_i64(dest, tmp, shift);
3753     }
3754     save_gpr(ctx, rt, dest);
3755 
3756     /* Install the new nullification.  */
3757     cond_free(&ctx->null_cond);
3758     if (c) {
3759         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3760     }
3761     return nullify_end(ctx);
3762 }
3763 
3764 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3765 {
3766     if (!ctx->is_pa20 && a->d) {
3767         return false;
3768     }
3769     if (a->c) {
3770         nullify_over(ctx);
3771     }
3772     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3773                       load_gpr(ctx, a->r));
3774 }
3775 
3776 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3777 {
3778     if (!ctx->is_pa20 && a->d) {
3779         return false;
3780     }
3781     if (a->c) {
3782         nullify_over(ctx);
3783     }
3784     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3785                       tcg_constant_i64(a->i));
3786 }
3787 
3788 static bool trans_be(DisasContext *ctx, arg_be *a)
3789 {
3790     TCGv_i64 tmp;
3791 
3792 #ifdef CONFIG_USER_ONLY
3793     /* ??? It seems like there should be a good way of using
3794        "be disp(sr2, r0)", the canonical gateway entry mechanism
3795        to our advantage.  But that appears to be inconvenient to
3796        manage along side branch delay slots.  Therefore we handle
3797        entry into the gateway page via absolute address.  */
3798     /* Since we don't implement spaces, just branch.  Do notice the special
3799        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3800        goto_tb to the TB containing the syscall.  */
3801     if (a->b == 0) {
3802         return do_dbranch(ctx, a->disp, a->l, a->n);
3803     }
3804 #else
3805     nullify_over(ctx);
3806 #endif
3807 
3808     tmp = tcg_temp_new_i64();
3809     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3810     tmp = do_ibranch_priv(ctx, tmp);
3811 
3812 #ifdef CONFIG_USER_ONLY
3813     return do_ibranch(ctx, tmp, a->l, a->n);
3814 #else
3815     TCGv_i64 new_spc = tcg_temp_new_i64();
3816 
3817     load_spr(ctx, new_spc, a->sp);
3818     if (a->l) {
3819         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3820         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3821     }
3822     if (a->n && use_nullify_skip(ctx)) {
3823         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3824         tcg_gen_addi_i64(tmp, tmp, 4);
3825         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3826         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3827         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3828     } else {
3829         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3830         if (ctx->iaoq_b == -1) {
3831             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3832         }
3833         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3834         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3835         nullify_set(ctx, a->n);
3836     }
3837     tcg_gen_lookup_and_goto_ptr();
3838     ctx->base.is_jmp = DISAS_NORETURN;
3839     return nullify_end(ctx);
3840 #endif
3841 }
3842 
3843 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3844 {
3845     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3846 }
3847 
3848 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3849 {
3850     uint64_t dest = iaoq_dest(ctx, a->disp);
3851 
3852     nullify_over(ctx);
3853 
3854     /* Make sure the caller hasn't done something weird with the queue.
3855      * ??? This is not quite the same as the PSW[B] bit, which would be
3856      * expensive to track.  Real hardware will trap for
3857      *    b  gateway
3858      *    b  gateway+4  (in delay slot of first branch)
3859      * However, checking for a non-sequential instruction queue *will*
3860      * diagnose the security hole
3861      *    b  gateway
3862      *    b  evil
3863      * in which instructions at evil would run with increased privs.
3864      */
3865     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3866         return gen_illegal(ctx);
3867     }
3868 
3869 #ifndef CONFIG_USER_ONLY
3870     if (ctx->tb_flags & PSW_C) {
3871         int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
3872         /* If we could not find a TLB entry, then we need to generate an
3873            ITLB miss exception so the kernel will provide it.
3874            The resulting TLB fill operation will invalidate this TB and
3875            we will re-translate, at which point we *will* be able to find
3876            the TLB entry and determine if this is in fact a gateway page.  */
3877         if (type < 0) {
3878             gen_excp(ctx, EXCP_ITLB_MISS);
3879             return true;
3880         }
3881         /* No change for non-gateway pages or for priv decrease.  */
3882         if (type >= 4 && type - 4 < ctx->privilege) {
3883             dest = deposit32(dest, 0, 2, type - 4);
3884         }
3885     } else {
3886         dest &= -4;  /* priv = 0 */
3887     }
3888 #endif
3889 
3890     if (a->l) {
3891         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3892         if (ctx->privilege < 3) {
3893             tcg_gen_andi_i64(tmp, tmp, -4);
3894         }
3895         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3896         save_gpr(ctx, a->l, tmp);
3897     }
3898 
3899     return do_dbranch(ctx, dest, 0, a->n);
3900 }
3901 
3902 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3903 {
3904     if (a->x) {
3905         TCGv_i64 tmp = tcg_temp_new_i64();
3906         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3907         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3908         /* The computation here never changes privilege level.  */
3909         return do_ibranch(ctx, tmp, a->l, a->n);
3910     } else {
3911         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3912         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3913     }
3914 }
3915 
3916 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3917 {
3918     TCGv_i64 dest;
3919 
3920     if (a->x == 0) {
3921         dest = load_gpr(ctx, a->b);
3922     } else {
3923         dest = tcg_temp_new_i64();
3924         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3925         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3926     }
3927     dest = do_ibranch_priv(ctx, dest);
3928     return do_ibranch(ctx, dest, 0, a->n);
3929 }
3930 
3931 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3932 {
3933     TCGv_i64 dest;
3934 
3935 #ifdef CONFIG_USER_ONLY
3936     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3937     return do_ibranch(ctx, dest, a->l, a->n);
3938 #else
3939     nullify_over(ctx);
3940     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3941 
3942     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3943     if (ctx->iaoq_b == -1) {
3944         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3945     }
3946     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3947     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3948     if (a->l) {
3949         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3950     }
3951     nullify_set(ctx, a->n);
3952     tcg_gen_lookup_and_goto_ptr();
3953     ctx->base.is_jmp = DISAS_NORETURN;
3954     return nullify_end(ctx);
3955 #endif
3956 }
3957 
3958 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3959 {
3960     /* All branch target stack instructions implement as nop. */
3961     return ctx->is_pa20;
3962 }
3963 
3964 /*
3965  * Float class 0
3966  */
3967 
3968 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3969 {
3970     tcg_gen_mov_i32(dst, src);
3971 }
3972 
3973 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3974 {
3975     uint64_t ret;
3976 
3977     if (ctx->is_pa20) {
3978         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3979     } else {
3980         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3981     }
3982 
3983     nullify_over(ctx);
3984     save_frd(0, tcg_constant_i64(ret));
3985     return nullify_end(ctx);
3986 }
3987 
3988 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3989 {
3990     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3991 }
3992 
3993 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3994 {
3995     tcg_gen_mov_i64(dst, src);
3996 }
3997 
3998 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3999 {
4000     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4001 }
4002 
4003 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4004 {
4005     tcg_gen_andi_i32(dst, src, INT32_MAX);
4006 }
4007 
4008 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4009 {
4010     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4011 }
4012 
4013 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4014 {
4015     tcg_gen_andi_i64(dst, src, INT64_MAX);
4016 }
4017 
4018 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4019 {
4020     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4021 }
4022 
4023 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4024 {
4025     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4026 }
4027 
4028 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4029 {
4030     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4031 }
4032 
4033 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4034 {
4035     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4036 }
4037 
4038 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4039 {
4040     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4041 }
4042 
4043 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4044 {
4045     tcg_gen_xori_i32(dst, src, INT32_MIN);
4046 }
4047 
4048 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4049 {
4050     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4051 }
4052 
4053 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4054 {
4055     tcg_gen_xori_i64(dst, src, INT64_MIN);
4056 }
4057 
4058 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4059 {
4060     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4061 }
4062 
4063 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4064 {
4065     tcg_gen_ori_i32(dst, src, INT32_MIN);
4066 }
4067 
4068 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4069 {
4070     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4071 }
4072 
4073 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4074 {
4075     tcg_gen_ori_i64(dst, src, INT64_MIN);
4076 }
4077 
4078 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4079 {
4080     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4081 }
4082 
4083 /*
4084  * Float class 1
4085  */
4086 
4087 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4088 {
4089     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4090 }
4091 
4092 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4093 {
4094     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4095 }
4096 
4097 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4098 {
4099     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4100 }
4101 
4102 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4103 {
4104     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4105 }
4106 
4107 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4108 {
4109     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4110 }
4111 
4112 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4113 {
4114     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4115 }
4116 
4117 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4118 {
4119     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4120 }
4121 
4122 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4123 {
4124     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4125 }
4126 
4127 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4128 {
4129     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4130 }
4131 
4132 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4133 {
4134     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4135 }
4136 
4137 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4138 {
4139     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4140 }
4141 
4142 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4143 {
4144     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4145 }
4146 
4147 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4148 {
4149     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4150 }
4151 
4152 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4153 {
4154     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4155 }
4156 
4157 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4158 {
4159     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4160 }
4161 
4162 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4163 {
4164     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4165 }
4166 
4167 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4168 {
4169     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4170 }
4171 
4172 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4173 {
4174     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4175 }
4176 
4177 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4178 {
4179     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4180 }
4181 
4182 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4183 {
4184     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4185 }
4186 
4187 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4188 {
4189     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4190 }
4191 
4192 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4193 {
4194     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4195 }
4196 
4197 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4198 {
4199     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4200 }
4201 
4202 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4203 {
4204     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4205 }
4206 
4207 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4208 {
4209     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4210 }
4211 
4212 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4213 {
4214     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4215 }
4216 
4217 /*
4218  * Float class 2
4219  */
4220 
4221 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4222 {
4223     TCGv_i32 ta, tb, tc, ty;
4224 
4225     nullify_over(ctx);
4226 
4227     ta = load_frw0_i32(a->r1);
4228     tb = load_frw0_i32(a->r2);
4229     ty = tcg_constant_i32(a->y);
4230     tc = tcg_constant_i32(a->c);
4231 
4232     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4233 
4234     return nullify_end(ctx);
4235 }
4236 
4237 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4238 {
4239     TCGv_i64 ta, tb;
4240     TCGv_i32 tc, ty;
4241 
4242     nullify_over(ctx);
4243 
4244     ta = load_frd0(a->r1);
4245     tb = load_frd0(a->r2);
4246     ty = tcg_constant_i32(a->y);
4247     tc = tcg_constant_i32(a->c);
4248 
4249     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4250 
4251     return nullify_end(ctx);
4252 }
4253 
4254 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4255 {
4256     TCGv_i64 t;
4257 
4258     nullify_over(ctx);
4259 
4260     t = tcg_temp_new_i64();
4261     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4262 
4263     if (a->y == 1) {
4264         int mask;
4265         bool inv = false;
4266 
4267         switch (a->c) {
4268         case 0: /* simple */
4269             tcg_gen_andi_i64(t, t, 0x4000000);
4270             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4271             goto done;
4272         case 2: /* rej */
4273             inv = true;
4274             /* fallthru */
4275         case 1: /* acc */
4276             mask = 0x43ff800;
4277             break;
4278         case 6: /* rej8 */
4279             inv = true;
4280             /* fallthru */
4281         case 5: /* acc8 */
4282             mask = 0x43f8000;
4283             break;
4284         case 9: /* acc6 */
4285             mask = 0x43e0000;
4286             break;
4287         case 13: /* acc4 */
4288             mask = 0x4380000;
4289             break;
4290         case 17: /* acc2 */
4291             mask = 0x4200000;
4292             break;
4293         default:
4294             gen_illegal(ctx);
4295             return true;
4296         }
4297         if (inv) {
4298             TCGv_i64 c = tcg_constant_i64(mask);
4299             tcg_gen_or_i64(t, t, c);
4300             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4301         } else {
4302             tcg_gen_andi_i64(t, t, mask);
4303             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4304         }
4305     } else {
4306         unsigned cbit = (a->y ^ 1) - 1;
4307 
4308         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4309         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4310     }
4311 
4312  done:
4313     return nullify_end(ctx);
4314 }
4315 
4316 /*
4317  * Float class 2
4318  */
4319 
4320 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4321 {
4322     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4323 }
4324 
4325 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4326 {
4327     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4328 }
4329 
4330 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4331 {
4332     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4333 }
4334 
4335 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4336 {
4337     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4338 }
4339 
4340 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4341 {
4342     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4343 }
4344 
4345 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4346 {
4347     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4348 }
4349 
4350 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4351 {
4352     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4353 }
4354 
4355 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4356 {
4357     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4358 }
4359 
4360 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4361 {
4362     TCGv_i64 x, y;
4363 
4364     nullify_over(ctx);
4365 
4366     x = load_frw0_i64(a->r1);
4367     y = load_frw0_i64(a->r2);
4368     tcg_gen_mul_i64(x, x, y);
4369     save_frd(a->t, x);
4370 
4371     return nullify_end(ctx);
4372 }
4373 
4374 /* Convert the fmpyadd single-precision register encodings to standard.  */
4375 static inline int fmpyadd_s_reg(unsigned r)
4376 {
4377     return (r & 16) * 2 + 16 + (r & 15);
4378 }
4379 
4380 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4381 {
4382     int tm = fmpyadd_s_reg(a->tm);
4383     int ra = fmpyadd_s_reg(a->ra);
4384     int ta = fmpyadd_s_reg(a->ta);
4385     int rm2 = fmpyadd_s_reg(a->rm2);
4386     int rm1 = fmpyadd_s_reg(a->rm1);
4387 
4388     nullify_over(ctx);
4389 
4390     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4391     do_fop_weww(ctx, ta, ta, ra,
4392                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4393 
4394     return nullify_end(ctx);
4395 }
4396 
4397 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4398 {
4399     return do_fmpyadd_s(ctx, a, false);
4400 }
4401 
4402 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4403 {
4404     return do_fmpyadd_s(ctx, a, true);
4405 }
4406 
4407 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4408 {
4409     nullify_over(ctx);
4410 
4411     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4412     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4413                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4414 
4415     return nullify_end(ctx);
4416 }
4417 
4418 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4419 {
4420     return do_fmpyadd_d(ctx, a, false);
4421 }
4422 
4423 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4424 {
4425     return do_fmpyadd_d(ctx, a, true);
4426 }
4427 
4428 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4429 {
4430     TCGv_i32 x, y, z;
4431 
4432     nullify_over(ctx);
4433     x = load_frw0_i32(a->rm1);
4434     y = load_frw0_i32(a->rm2);
4435     z = load_frw0_i32(a->ra3);
4436 
4437     if (a->neg) {
4438         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4439     } else {
4440         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4441     }
4442 
4443     save_frw_i32(a->t, x);
4444     return nullify_end(ctx);
4445 }
4446 
4447 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4448 {
4449     TCGv_i64 x, y, z;
4450 
4451     nullify_over(ctx);
4452     x = load_frd0(a->rm1);
4453     y = load_frd0(a->rm2);
4454     z = load_frd0(a->ra3);
4455 
4456     if (a->neg) {
4457         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4458     } else {
4459         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4460     }
4461 
4462     save_frd(a->t, x);
4463     return nullify_end(ctx);
4464 }
4465 
4466 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4467 {
4468     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4469 #ifndef CONFIG_USER_ONLY
4470     if (a->i == 0x100) {
4471         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4472         nullify_over(ctx);
4473         gen_helper_diag_btlb(tcg_env);
4474         return nullify_end(ctx);
4475     }
4476     if (a->i == 0x101) {
4477         /* print char in %r26 to first serial console, used by SeaBIOS-hppa */
4478         nullify_over(ctx);
4479         gen_helper_diag_console_output(tcg_env);
4480         return nullify_end(ctx);
4481     }
4482 #endif
4483     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4484     return true;
4485 }
4486 
4487 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4488 {
4489     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4490     int bound;
4491 
4492     ctx->cs = cs;
4493     ctx->tb_flags = ctx->base.tb->flags;
4494     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4495 
4496 #ifdef CONFIG_USER_ONLY
4497     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4498     ctx->mmu_idx = MMU_USER_IDX;
4499     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4500     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4501     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4502 #else
4503     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4504     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4505                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4506                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4507 
4508     /* Recover the IAOQ values from the GVA + PRIV.  */
4509     uint64_t cs_base = ctx->base.tb->cs_base;
4510     uint64_t iasq_f = cs_base & ~0xffffffffull;
4511     int32_t diff = cs_base;
4512 
4513     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4514     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4515 #endif
4516     ctx->iaoq_n = -1;
4517     ctx->iaoq_n_var = NULL;
4518 
4519     ctx->zero = tcg_constant_i64(0);
4520 
4521     /* Bound the number of instructions by those left on the page.  */
4522     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4523     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4524 }
4525 
4526 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4527 {
4528     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4529 
4530     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4531     ctx->null_cond = cond_make_f();
4532     ctx->psw_n_nonzero = false;
4533     if (ctx->tb_flags & PSW_N) {
4534         ctx->null_cond.c = TCG_COND_ALWAYS;
4535         ctx->psw_n_nonzero = true;
4536     }
4537     ctx->null_lab = NULL;
4538 }
4539 
4540 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4541 {
4542     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4543 
4544     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4545     ctx->insn_start = tcg_last_op();
4546 }
4547 
4548 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4549 {
4550     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4551     CPUHPPAState *env = cpu_env(cs);
4552     DisasJumpType ret;
4553 
4554     /* Execute one insn.  */
4555 #ifdef CONFIG_USER_ONLY
4556     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4557         do_page_zero(ctx);
4558         ret = ctx->base.is_jmp;
4559         assert(ret != DISAS_NEXT);
4560     } else
4561 #endif
4562     {
4563         /* Always fetch the insn, even if nullified, so that we check
4564            the page permissions for execute.  */
4565         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4566 
4567         /* Set up the IA queue for the next insn.
4568            This will be overwritten by a branch.  */
4569         if (ctx->iaoq_b == -1) {
4570             ctx->iaoq_n = -1;
4571             ctx->iaoq_n_var = tcg_temp_new_i64();
4572             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4573         } else {
4574             ctx->iaoq_n = ctx->iaoq_b + 4;
4575             ctx->iaoq_n_var = NULL;
4576         }
4577 
4578         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4579             ctx->null_cond.c = TCG_COND_NEVER;
4580             ret = DISAS_NEXT;
4581         } else {
4582             ctx->insn = insn;
4583             if (!decode(ctx, insn)) {
4584                 gen_illegal(ctx);
4585             }
4586             ret = ctx->base.is_jmp;
4587             assert(ctx->null_lab == NULL);
4588         }
4589     }
4590 
4591     /* Advance the insn queue.  Note that this check also detects
4592        a priority change within the instruction queue.  */
4593     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4594         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4595             && use_goto_tb(ctx, ctx->iaoq_b)
4596             && (ctx->null_cond.c == TCG_COND_NEVER
4597                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4598             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4599             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4600             ctx->base.is_jmp = ret = DISAS_NORETURN;
4601         } else {
4602             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4603         }
4604     }
4605     ctx->iaoq_f = ctx->iaoq_b;
4606     ctx->iaoq_b = ctx->iaoq_n;
4607     ctx->base.pc_next += 4;
4608 
4609     switch (ret) {
4610     case DISAS_NORETURN:
4611     case DISAS_IAQ_N_UPDATED:
4612         break;
4613 
4614     case DISAS_NEXT:
4615     case DISAS_IAQ_N_STALE:
4616     case DISAS_IAQ_N_STALE_EXIT:
4617         if (ctx->iaoq_f == -1) {
4618             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4619             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4620 #ifndef CONFIG_USER_ONLY
4621             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4622 #endif
4623             nullify_save(ctx);
4624             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4625                                 ? DISAS_EXIT
4626                                 : DISAS_IAQ_N_UPDATED);
4627         } else if (ctx->iaoq_b == -1) {
4628             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4629         }
4630         break;
4631 
4632     default:
4633         g_assert_not_reached();
4634     }
4635 }
4636 
4637 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4638 {
4639     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4640     DisasJumpType is_jmp = ctx->base.is_jmp;
4641 
4642     switch (is_jmp) {
4643     case DISAS_NORETURN:
4644         break;
4645     case DISAS_TOO_MANY:
4646     case DISAS_IAQ_N_STALE:
4647     case DISAS_IAQ_N_STALE_EXIT:
4648         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4649         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4650         nullify_save(ctx);
4651         /* FALLTHRU */
4652     case DISAS_IAQ_N_UPDATED:
4653         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4654             tcg_gen_lookup_and_goto_ptr();
4655             break;
4656         }
4657         /* FALLTHRU */
4658     case DISAS_EXIT:
4659         tcg_gen_exit_tb(NULL, 0);
4660         break;
4661     default:
4662         g_assert_not_reached();
4663     }
4664 }
4665 
4666 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4667                               CPUState *cs, FILE *logfile)
4668 {
4669     target_ulong pc = dcbase->pc_first;
4670 
4671 #ifdef CONFIG_USER_ONLY
4672     switch (pc) {
4673     case 0x00:
4674         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4675         return;
4676     case 0xb0:
4677         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4678         return;
4679     case 0xe0:
4680         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4681         return;
4682     case 0x100:
4683         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4684         return;
4685     }
4686 #endif
4687 
4688     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4689     target_disas(logfile, cs, pc, dcbase->tb->size);
4690 }
4691 
4692 static const TranslatorOps hppa_tr_ops = {
4693     .init_disas_context = hppa_tr_init_disas_context,
4694     .tb_start           = hppa_tr_tb_start,
4695     .insn_start         = hppa_tr_insn_start,
4696     .translate_insn     = hppa_tr_translate_insn,
4697     .tb_stop            = hppa_tr_tb_stop,
4698     .disas_log          = hppa_tr_disas_log,
4699 };
4700 
4701 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4702                            vaddr pc, void *host_pc)
4703 {
4704     DisasContext ctx;
4705     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4706 }
4707