xref: /openbmc/qemu/target/hppa/translate.c (revision b5caa17c)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47 
48     uint64_t iaoq_f;
49     uint64_t iaoq_b;
50     uint64_t iaoq_n;
51     TCGv_i64 iaoq_n_var;
52 
53     DisasCond null_cond;
54     TCGLabel *null_lab;
55 
56     TCGv_i64 zero;
57 
58     uint32_t insn;
59     uint32_t tb_flags;
60     int mmu_idx;
61     int privilege;
62     bool psw_n_nonzero;
63     bool is_pa20;
64 
65 #ifdef CONFIG_USER_ONLY
66     MemOp unalign;
67 #endif
68 } DisasContext;
69 
70 #ifdef CONFIG_USER_ONLY
71 #define UNALIGN(C)  (C)->unalign
72 #else
73 #define UNALIGN(C)  MO_ALIGN
74 #endif
75 
76 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
77 static int expand_sm_imm(DisasContext *ctx, int val)
78 {
79     if (val & PSW_SM_E) {
80         val = (val & ~PSW_SM_E) | PSW_E;
81     }
82     if (val & PSW_SM_W) {
83         val = (val & ~PSW_SM_W) | PSW_W;
84     }
85     return val;
86 }
87 
88 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
89 static int expand_sr3x(DisasContext *ctx, int val)
90 {
91     return ~val;
92 }
93 
94 /* Convert the M:A bits within a memory insn to the tri-state value
95    we use for the final M.  */
96 static int ma_to_m(DisasContext *ctx, int val)
97 {
98     return val & 2 ? (val & 1 ? -1 : 1) : 0;
99 }
100 
101 /* Convert the sign of the displacement to a pre or post-modify.  */
102 static int pos_to_m(DisasContext *ctx, int val)
103 {
104     return val ? 1 : -1;
105 }
106 
107 static int neg_to_m(DisasContext *ctx, int val)
108 {
109     return val ? -1 : 1;
110 }
111 
112 /* Used for branch targets and fp memory ops.  */
113 static int expand_shl2(DisasContext *ctx, int val)
114 {
115     return val << 2;
116 }
117 
118 /* Used for fp memory ops.  */
119 static int expand_shl3(DisasContext *ctx, int val)
120 {
121     return val << 3;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Translate CMPI doubleword conditions to standard. */
142 static int cmpbid_c(DisasContext *ctx, int val)
143 {
144     return val ? val : 4; /* 0 == "*<<" */
145 }
146 
147 
148 /* Include the auto-generated decoder.  */
149 #include "decode-insns.c.inc"
150 
151 /* We are not using a goto_tb (for whatever reason), but have updated
152    the iaq (for whatever reason), so don't do it again on exit.  */
153 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
154 
155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
156    updated the iaq for the next instruction to be executed.  */
157 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
158 
159 /* Similarly, but we want to return to the main loop immediately
160    to recognize unmasked interrupts.  */
161 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
162 #define DISAS_EXIT                  DISAS_TARGET_3
163 
164 /* global register indexes */
165 static TCGv_i64 cpu_gr[32];
166 static TCGv_i64 cpu_sr[4];
167 static TCGv_i64 cpu_srH;
168 static TCGv_i64 cpu_iaoq_f;
169 static TCGv_i64 cpu_iaoq_b;
170 static TCGv_i64 cpu_iasq_f;
171 static TCGv_i64 cpu_iasq_b;
172 static TCGv_i64 cpu_sar;
173 static TCGv_i64 cpu_psw_n;
174 static TCGv_i64 cpu_psw_v;
175 static TCGv_i64 cpu_psw_cb;
176 static TCGv_i64 cpu_psw_cb_msb;
177 
178 void hppa_translate_init(void)
179 {
180 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
181 
182     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
183     static const GlobalVar vars[] = {
184         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
185         DEF_VAR(psw_n),
186         DEF_VAR(psw_v),
187         DEF_VAR(psw_cb),
188         DEF_VAR(psw_cb_msb),
189         DEF_VAR(iaoq_f),
190         DEF_VAR(iaoq_b),
191     };
192 
193 #undef DEF_VAR
194 
195     /* Use the symbolic register names that match the disassembler.  */
196     static const char gr_names[32][4] = {
197         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
198         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
199         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
200         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
201     };
202     /* SR[4-7] are not global registers so that we can index them.  */
203     static const char sr_names[5][4] = {
204         "sr0", "sr1", "sr2", "sr3", "srH"
205     };
206 
207     int i;
208 
209     cpu_gr[0] = NULL;
210     for (i = 1; i < 32; i++) {
211         cpu_gr[i] = tcg_global_mem_new(tcg_env,
212                                        offsetof(CPUHPPAState, gr[i]),
213                                        gr_names[i]);
214     }
215     for (i = 0; i < 4; i++) {
216         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
217                                            offsetof(CPUHPPAState, sr[i]),
218                                            sr_names[i]);
219     }
220     cpu_srH = tcg_global_mem_new_i64(tcg_env,
221                                      offsetof(CPUHPPAState, sr[4]),
222                                      sr_names[4]);
223 
224     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
225         const GlobalVar *v = &vars[i];
226         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
227     }
228 
229     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
230                                         offsetof(CPUHPPAState, iasq_f),
231                                         "iasq_f");
232     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
233                                         offsetof(CPUHPPAState, iasq_b),
234                                         "iasq_b");
235 }
236 
237 static DisasCond cond_make_f(void)
238 {
239     return (DisasCond){
240         .c = TCG_COND_NEVER,
241         .a0 = NULL,
242         .a1 = NULL,
243     };
244 }
245 
246 static DisasCond cond_make_t(void)
247 {
248     return (DisasCond){
249         .c = TCG_COND_ALWAYS,
250         .a0 = NULL,
251         .a1 = NULL,
252     };
253 }
254 
255 static DisasCond cond_make_n(void)
256 {
257     return (DisasCond){
258         .c = TCG_COND_NE,
259         .a0 = cpu_psw_n,
260         .a1 = tcg_constant_i64(0)
261     };
262 }
263 
264 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
265 {
266     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
267     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
268 }
269 
270 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
271 {
272     return cond_make_tmp(c, a0, tcg_constant_i64(0));
273 }
274 
275 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
276 {
277     TCGv_i64 tmp = tcg_temp_new_i64();
278     tcg_gen_mov_i64(tmp, a0);
279     return cond_make_0_tmp(c, tmp);
280 }
281 
282 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
283 {
284     TCGv_i64 t0 = tcg_temp_new_i64();
285     TCGv_i64 t1 = tcg_temp_new_i64();
286 
287     tcg_gen_mov_i64(t0, a0);
288     tcg_gen_mov_i64(t1, a1);
289     return cond_make_tmp(c, t0, t1);
290 }
291 
292 static void cond_free(DisasCond *cond)
293 {
294     switch (cond->c) {
295     default:
296         cond->a0 = NULL;
297         cond->a1 = NULL;
298         /* fallthru */
299     case TCG_COND_ALWAYS:
300         cond->c = TCG_COND_NEVER;
301         break;
302     case TCG_COND_NEVER:
303         break;
304     }
305 }
306 
307 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
308 {
309     if (reg == 0) {
310         return ctx->zero;
311     } else {
312         return cpu_gr[reg];
313     }
314 }
315 
316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
317 {
318     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
319         return tcg_temp_new_i64();
320     } else {
321         return cpu_gr[reg];
322     }
323 }
324 
325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
326 {
327     if (ctx->null_cond.c != TCG_COND_NEVER) {
328         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
329                             ctx->null_cond.a1, dest, t);
330     } else {
331         tcg_gen_mov_i64(dest, t);
332     }
333 }
334 
335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
336 {
337     if (reg != 0) {
338         save_or_nullify(ctx, cpu_gr[reg], t);
339     }
340 }
341 
342 #if HOST_BIG_ENDIAN
343 # define HI_OFS  0
344 # define LO_OFS  4
345 #else
346 # define HI_OFS  4
347 # define LO_OFS  0
348 #endif
349 
350 static TCGv_i32 load_frw_i32(unsigned rt)
351 {
352     TCGv_i32 ret = tcg_temp_new_i32();
353     tcg_gen_ld_i32(ret, tcg_env,
354                    offsetof(CPUHPPAState, fr[rt & 31])
355                    + (rt & 32 ? LO_OFS : HI_OFS));
356     return ret;
357 }
358 
359 static TCGv_i32 load_frw0_i32(unsigned rt)
360 {
361     if (rt == 0) {
362         TCGv_i32 ret = tcg_temp_new_i32();
363         tcg_gen_movi_i32(ret, 0);
364         return ret;
365     } else {
366         return load_frw_i32(rt);
367     }
368 }
369 
370 static TCGv_i64 load_frw0_i64(unsigned rt)
371 {
372     TCGv_i64 ret = tcg_temp_new_i64();
373     if (rt == 0) {
374         tcg_gen_movi_i64(ret, 0);
375     } else {
376         tcg_gen_ld32u_i64(ret, tcg_env,
377                           offsetof(CPUHPPAState, fr[rt & 31])
378                           + (rt & 32 ? LO_OFS : HI_OFS));
379     }
380     return ret;
381 }
382 
383 static void save_frw_i32(unsigned rt, TCGv_i32 val)
384 {
385     tcg_gen_st_i32(val, tcg_env,
386                    offsetof(CPUHPPAState, fr[rt & 31])
387                    + (rt & 32 ? LO_OFS : HI_OFS));
388 }
389 
390 #undef HI_OFS
391 #undef LO_OFS
392 
393 static TCGv_i64 load_frd(unsigned rt)
394 {
395     TCGv_i64 ret = tcg_temp_new_i64();
396     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
397     return ret;
398 }
399 
400 static TCGv_i64 load_frd0(unsigned rt)
401 {
402     if (rt == 0) {
403         TCGv_i64 ret = tcg_temp_new_i64();
404         tcg_gen_movi_i64(ret, 0);
405         return ret;
406     } else {
407         return load_frd(rt);
408     }
409 }
410 
411 static void save_frd(unsigned rt, TCGv_i64 val)
412 {
413     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
414 }
415 
416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
417 {
418 #ifdef CONFIG_USER_ONLY
419     tcg_gen_movi_i64(dest, 0);
420 #else
421     if (reg < 4) {
422         tcg_gen_mov_i64(dest, cpu_sr[reg]);
423     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
424         tcg_gen_mov_i64(dest, cpu_srH);
425     } else {
426         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
427     }
428 #endif
429 }
430 
431 /* Skip over the implementation of an insn that has been nullified.
432    Use this when the insn is too complex for a conditional move.  */
433 static void nullify_over(DisasContext *ctx)
434 {
435     if (ctx->null_cond.c != TCG_COND_NEVER) {
436         /* The always condition should have been handled in the main loop.  */
437         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
438 
439         ctx->null_lab = gen_new_label();
440 
441         /* If we're using PSW[N], copy it to a temp because... */
442         if (ctx->null_cond.a0 == cpu_psw_n) {
443             ctx->null_cond.a0 = tcg_temp_new_i64();
444             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
445         }
446         /* ... we clear it before branching over the implementation,
447            so that (1) it's clear after nullifying this insn and
448            (2) if this insn nullifies the next, PSW[N] is valid.  */
449         if (ctx->psw_n_nonzero) {
450             ctx->psw_n_nonzero = false;
451             tcg_gen_movi_i64(cpu_psw_n, 0);
452         }
453 
454         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
455                            ctx->null_cond.a1, ctx->null_lab);
456         cond_free(&ctx->null_cond);
457     }
458 }
459 
460 /* Save the current nullification state to PSW[N].  */
461 static void nullify_save(DisasContext *ctx)
462 {
463     if (ctx->null_cond.c == TCG_COND_NEVER) {
464         if (ctx->psw_n_nonzero) {
465             tcg_gen_movi_i64(cpu_psw_n, 0);
466         }
467         return;
468     }
469     if (ctx->null_cond.a0 != cpu_psw_n) {
470         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
471                             ctx->null_cond.a0, ctx->null_cond.a1);
472         ctx->psw_n_nonzero = true;
473     }
474     cond_free(&ctx->null_cond);
475 }
476 
477 /* Set a PSW[N] to X.  The intention is that this is used immediately
478    before a goto_tb/exit_tb, so that there is no fallthru path to other
479    code within the TB.  Therefore we do not update psw_n_nonzero.  */
480 static void nullify_set(DisasContext *ctx, bool x)
481 {
482     if (ctx->psw_n_nonzero || x) {
483         tcg_gen_movi_i64(cpu_psw_n, x);
484     }
485 }
486 
487 /* Mark the end of an instruction that may have been nullified.
488    This is the pair to nullify_over.  Always returns true so that
489    it may be tail-called from a translate function.  */
490 static bool nullify_end(DisasContext *ctx)
491 {
492     TCGLabel *null_lab = ctx->null_lab;
493     DisasJumpType status = ctx->base.is_jmp;
494 
495     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496        For UPDATED, we cannot update on the nullified path.  */
497     assert(status != DISAS_IAQ_N_UPDATED);
498 
499     if (likely(null_lab == NULL)) {
500         /* The current insn wasn't conditional or handled the condition
501            applied to it without a branch, so the (new) setting of
502            NULL_COND can be applied directly to the next insn.  */
503         return true;
504     }
505     ctx->null_lab = NULL;
506 
507     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
508         /* The next instruction will be unconditional,
509            and NULL_COND already reflects that.  */
510         gen_set_label(null_lab);
511     } else {
512         /* The insn that we just executed is itself nullifying the next
513            instruction.  Store the condition in the PSW[N] global.
514            We asserted PSW[N] = 0 in nullify_over, so that after the
515            label we have the proper value in place.  */
516         nullify_save(ctx);
517         gen_set_label(null_lab);
518         ctx->null_cond = cond_make_n();
519     }
520     if (status == DISAS_NORETURN) {
521         ctx->base.is_jmp = DISAS_NEXT;
522     }
523     return true;
524 }
525 
526 static uint64_t gva_offset_mask(DisasContext *ctx)
527 {
528     return (ctx->tb_flags & PSW_W
529             ? MAKE_64BIT_MASK(0, 62)
530             : MAKE_64BIT_MASK(0, 32));
531 }
532 
533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
534                             uint64_t ival, TCGv_i64 vval)
535 {
536     uint64_t mask = gva_offset_mask(ctx);
537 
538     if (ival != -1) {
539         tcg_gen_movi_i64(dest, ival & mask);
540         return;
541     }
542     tcg_debug_assert(vval != NULL);
543 
544     /*
545      * We know that the IAOQ is already properly masked.
546      * This optimization is primarily for "iaoq_f = iaoq_b".
547      */
548     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
549         tcg_gen_mov_i64(dest, vval);
550     } else {
551         tcg_gen_andi_i64(dest, vval, mask);
552     }
553 }
554 
555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
556 {
557     return ctx->iaoq_f + disp + 8;
558 }
559 
560 static void gen_excp_1(int exception)
561 {
562     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
563 }
564 
565 static void gen_excp(DisasContext *ctx, int exception)
566 {
567     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
568     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
569     nullify_save(ctx);
570     gen_excp_1(exception);
571     ctx->base.is_jmp = DISAS_NORETURN;
572 }
573 
574 static bool gen_excp_iir(DisasContext *ctx, int exc)
575 {
576     nullify_over(ctx);
577     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
578                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
579     gen_excp(ctx, exc);
580     return nullify_end(ctx);
581 }
582 
583 static bool gen_illegal(DisasContext *ctx)
584 {
585     return gen_excp_iir(ctx, EXCP_ILL);
586 }
587 
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590     return gen_excp_iir(ctx, EXCP)
591 #else
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
593     do {                                     \
594         if (ctx->privilege != 0) {           \
595             return gen_excp_iir(ctx, EXCP);  \
596         }                                    \
597     } while (0)
598 #endif
599 
600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
601 {
602     return translator_use_goto_tb(&ctx->base, dest);
603 }
604 
605 /* If the next insn is to be nullified, and it's on the same page,
606    and we're not attempting to set a breakpoint on it, then we can
607    totally skip the nullified insn.  This avoids creating and
608    executing a TB that merely branches to the next TB.  */
609 static bool use_nullify_skip(DisasContext *ctx)
610 {
611     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
612             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
613 }
614 
615 static void gen_goto_tb(DisasContext *ctx, int which,
616                         uint64_t f, uint64_t b)
617 {
618     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
619         tcg_gen_goto_tb(which);
620         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
621         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
622         tcg_gen_exit_tb(ctx->base.tb, which);
623     } else {
624         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
625         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
626         tcg_gen_lookup_and_goto_ptr();
627     }
628 }
629 
630 static bool cond_need_sv(int c)
631 {
632     return c == 2 || c == 3 || c == 6;
633 }
634 
635 static bool cond_need_cb(int c)
636 {
637     return c == 4 || c == 5;
638 }
639 
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext *ctx, bool d)
642 {
643     return !(ctx->is_pa20 && d);
644 }
645 
646 /*
647  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
648  * the Parisc 1.1 Architecture Reference Manual for details.
649  */
650 
651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
652                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
653 {
654     DisasCond cond;
655     TCGv_i64 tmp;
656 
657     switch (cf >> 1) {
658     case 0: /* Never / TR    (0 / 1) */
659         cond = cond_make_f();
660         break;
661     case 1: /* = / <>        (Z / !Z) */
662         if (cond_need_ext(ctx, d)) {
663             tmp = tcg_temp_new_i64();
664             tcg_gen_ext32u_i64(tmp, res);
665             res = tmp;
666         }
667         cond = cond_make_0(TCG_COND_EQ, res);
668         break;
669     case 2: /* < / >=        (N ^ V / !(N ^ V) */
670         tmp = tcg_temp_new_i64();
671         tcg_gen_xor_i64(tmp, res, sv);
672         if (cond_need_ext(ctx, d)) {
673             tcg_gen_ext32s_i64(tmp, tmp);
674         }
675         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
676         break;
677     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
678         /*
679          * Simplify:
680          *   (N ^ V) | Z
681          *   ((res < 0) ^ (sv < 0)) | !res
682          *   ((res ^ sv) < 0) | !res
683          *   (~(res ^ sv) >= 0) | !res
684          *   !(~(res ^ sv) >> 31) | !res
685          *   !(~(res ^ sv) >> 31 & res)
686          */
687         tmp = tcg_temp_new_i64();
688         tcg_gen_eqv_i64(tmp, res, sv);
689         if (cond_need_ext(ctx, d)) {
690             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
691             tcg_gen_and_i64(tmp, tmp, res);
692             tcg_gen_ext32u_i64(tmp, tmp);
693         } else {
694             tcg_gen_sari_i64(tmp, tmp, 63);
695             tcg_gen_and_i64(tmp, tmp, res);
696         }
697         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
698         break;
699     case 4: /* NUV / UV      (!C / C) */
700         /* Only bit 0 of cb_msb is ever set. */
701         cond = cond_make_0(TCG_COND_EQ, cb_msb);
702         break;
703     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
704         tmp = tcg_temp_new_i64();
705         tcg_gen_neg_i64(tmp, cb_msb);
706         tcg_gen_and_i64(tmp, tmp, res);
707         if (cond_need_ext(ctx, d)) {
708             tcg_gen_ext32u_i64(tmp, tmp);
709         }
710         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
711         break;
712     case 6: /* SV / NSV      (V / !V) */
713         if (cond_need_ext(ctx, d)) {
714             tmp = tcg_temp_new_i64();
715             tcg_gen_ext32s_i64(tmp, sv);
716             sv = tmp;
717         }
718         cond = cond_make_0(TCG_COND_LT, sv);
719         break;
720     case 7: /* OD / EV */
721         tmp = tcg_temp_new_i64();
722         tcg_gen_andi_i64(tmp, res, 1);
723         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
724         break;
725     default:
726         g_assert_not_reached();
727     }
728     if (cf & 1) {
729         cond.c = tcg_invert_cond(cond.c);
730     }
731 
732     return cond;
733 }
734 
735 /* Similar, but for the special case of subtraction without borrow, we
736    can use the inputs directly.  This can allow other computation to be
737    deleted as unused.  */
738 
739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
740                              TCGv_i64 res, TCGv_i64 in1,
741                              TCGv_i64 in2, TCGv_i64 sv)
742 {
743     TCGCond tc;
744     bool ext_uns;
745 
746     switch (cf >> 1) {
747     case 1: /* = / <> */
748         tc = TCG_COND_EQ;
749         ext_uns = true;
750         break;
751     case 2: /* < / >= */
752         tc = TCG_COND_LT;
753         ext_uns = false;
754         break;
755     case 3: /* <= / > */
756         tc = TCG_COND_LE;
757         ext_uns = false;
758         break;
759     case 4: /* << / >>= */
760         tc = TCG_COND_LTU;
761         ext_uns = true;
762         break;
763     case 5: /* <<= / >> */
764         tc = TCG_COND_LEU;
765         ext_uns = true;
766         break;
767     default:
768         return do_cond(ctx, cf, d, res, NULL, sv);
769     }
770 
771     if (cf & 1) {
772         tc = tcg_invert_cond(tc);
773     }
774     if (cond_need_ext(ctx, d)) {
775         TCGv_i64 t1 = tcg_temp_new_i64();
776         TCGv_i64 t2 = tcg_temp_new_i64();
777 
778         if (ext_uns) {
779             tcg_gen_ext32u_i64(t1, in1);
780             tcg_gen_ext32u_i64(t2, in2);
781         } else {
782             tcg_gen_ext32s_i64(t1, in1);
783             tcg_gen_ext32s_i64(t2, in2);
784         }
785         return cond_make_tmp(tc, t1, t2);
786     }
787     return cond_make(tc, in1, in2);
788 }
789 
790 /*
791  * Similar, but for logicals, where the carry and overflow bits are not
792  * computed, and use of them is undefined.
793  *
794  * Undefined or not, hardware does not trap.  It seems reasonable to
795  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796  * how cases c={2,3} are treated.
797  */
798 
799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
800                              TCGv_i64 res)
801 {
802     TCGCond tc;
803     bool ext_uns;
804 
805     switch (cf) {
806     case 0:  /* never */
807     case 9:  /* undef, C */
808     case 11: /* undef, C & !Z */
809     case 12: /* undef, V */
810         return cond_make_f();
811 
812     case 1:  /* true */
813     case 8:  /* undef, !C */
814     case 10: /* undef, !C | Z */
815     case 13: /* undef, !V */
816         return cond_make_t();
817 
818     case 2:  /* == */
819         tc = TCG_COND_EQ;
820         ext_uns = true;
821         break;
822     case 3:  /* <> */
823         tc = TCG_COND_NE;
824         ext_uns = true;
825         break;
826     case 4:  /* < */
827         tc = TCG_COND_LT;
828         ext_uns = false;
829         break;
830     case 5:  /* >= */
831         tc = TCG_COND_GE;
832         ext_uns = false;
833         break;
834     case 6:  /* <= */
835         tc = TCG_COND_LE;
836         ext_uns = false;
837         break;
838     case 7:  /* > */
839         tc = TCG_COND_GT;
840         ext_uns = false;
841         break;
842 
843     case 14: /* OD */
844     case 15: /* EV */
845         return do_cond(ctx, cf, d, res, NULL, NULL);
846 
847     default:
848         g_assert_not_reached();
849     }
850 
851     if (cond_need_ext(ctx, d)) {
852         TCGv_i64 tmp = tcg_temp_new_i64();
853 
854         if (ext_uns) {
855             tcg_gen_ext32u_i64(tmp, res);
856         } else {
857             tcg_gen_ext32s_i64(tmp, res);
858         }
859         return cond_make_0_tmp(tc, tmp);
860     }
861     return cond_make_0(tc, res);
862 }
863 
864 /* Similar, but for shift/extract/deposit conditions.  */
865 
866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
867                              TCGv_i64 res)
868 {
869     unsigned c, f;
870 
871     /* Convert the compressed condition codes to standard.
872        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873        4-7 are the reverse of 0-3.  */
874     c = orig & 3;
875     if (c == 3) {
876         c = 7;
877     }
878     f = (orig & 4) / 4;
879 
880     return do_log_cond(ctx, c * 2 + f, d, res);
881 }
882 
883 /* Similar, but for unit conditions.  */
884 
885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
886                               TCGv_i64 in1, TCGv_i64 in2)
887 {
888     DisasCond cond;
889     TCGv_i64 tmp, cb = NULL;
890     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
891 
892     if (cf & 8) {
893         /* Since we want to test lots of carry-out bits all at once, do not
894          * do our normal thing and compute carry-in of bit B+1 since that
895          * leaves us with carry bits spread across two words.
896          */
897         cb = tcg_temp_new_i64();
898         tmp = tcg_temp_new_i64();
899         tcg_gen_or_i64(cb, in1, in2);
900         tcg_gen_and_i64(tmp, in1, in2);
901         tcg_gen_andc_i64(cb, cb, res);
902         tcg_gen_or_i64(cb, cb, tmp);
903     }
904 
905     switch (cf >> 1) {
906     case 0: /* never / TR */
907     case 1: /* undefined */
908     case 5: /* undefined */
909         cond = cond_make_f();
910         break;
911 
912     case 2: /* SBZ / NBZ */
913         /* See hasless(v,1) from
914          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
915          */
916         tmp = tcg_temp_new_i64();
917         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
918         tcg_gen_andc_i64(tmp, tmp, res);
919         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
920         cond = cond_make_0(TCG_COND_NE, tmp);
921         break;
922 
923     case 3: /* SHZ / NHZ */
924         tmp = tcg_temp_new_i64();
925         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
926         tcg_gen_andc_i64(tmp, tmp, res);
927         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
928         cond = cond_make_0(TCG_COND_NE, tmp);
929         break;
930 
931     case 4: /* SDC / NDC */
932         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
933         cond = cond_make_0(TCG_COND_NE, cb);
934         break;
935 
936     case 6: /* SBC / NBC */
937         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
938         cond = cond_make_0(TCG_COND_NE, cb);
939         break;
940 
941     case 7: /* SHC / NHC */
942         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
943         cond = cond_make_0(TCG_COND_NE, cb);
944         break;
945 
946     default:
947         g_assert_not_reached();
948     }
949     if (cf & 1) {
950         cond.c = tcg_invert_cond(cond.c);
951     }
952 
953     return cond;
954 }
955 
956 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
957                           TCGv_i64 cb, TCGv_i64 cb_msb)
958 {
959     if (cond_need_ext(ctx, d)) {
960         TCGv_i64 t = tcg_temp_new_i64();
961         tcg_gen_extract_i64(t, cb, 32, 1);
962         return t;
963     }
964     return cb_msb;
965 }
966 
967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
968 {
969     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
970 }
971 
972 /* Compute signed overflow for addition.  */
973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
974                           TCGv_i64 in1, TCGv_i64 in2)
975 {
976     TCGv_i64 sv = tcg_temp_new_i64();
977     TCGv_i64 tmp = tcg_temp_new_i64();
978 
979     tcg_gen_xor_i64(sv, res, in1);
980     tcg_gen_xor_i64(tmp, in1, in2);
981     tcg_gen_andc_i64(sv, sv, tmp);
982 
983     return sv;
984 }
985 
986 /* Compute signed overflow for subtraction.  */
987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
988                           TCGv_i64 in1, TCGv_i64 in2)
989 {
990     TCGv_i64 sv = tcg_temp_new_i64();
991     TCGv_i64 tmp = tcg_temp_new_i64();
992 
993     tcg_gen_xor_i64(sv, res, in1);
994     tcg_gen_xor_i64(tmp, in1, in2);
995     tcg_gen_and_i64(sv, sv, tmp);
996 
997     return sv;
998 }
999 
1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1001                    TCGv_i64 in2, unsigned shift, bool is_l,
1002                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1003 {
1004     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1005     unsigned c = cf >> 1;
1006     DisasCond cond;
1007 
1008     dest = tcg_temp_new_i64();
1009     cb = NULL;
1010     cb_msb = NULL;
1011     cb_cond = NULL;
1012 
1013     if (shift) {
1014         tmp = tcg_temp_new_i64();
1015         tcg_gen_shli_i64(tmp, in1, shift);
1016         in1 = tmp;
1017     }
1018 
1019     if (!is_l || cond_need_cb(c)) {
1020         cb_msb = tcg_temp_new_i64();
1021         cb = tcg_temp_new_i64();
1022 
1023         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1024         if (is_c) {
1025             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1026                              get_psw_carry(ctx, d), ctx->zero);
1027         }
1028         tcg_gen_xor_i64(cb, in1, in2);
1029         tcg_gen_xor_i64(cb, cb, dest);
1030         if (cond_need_cb(c)) {
1031             cb_cond = get_carry(ctx, d, cb, cb_msb);
1032         }
1033     } else {
1034         tcg_gen_add_i64(dest, in1, in2);
1035         if (is_c) {
1036             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1037         }
1038     }
1039 
1040     /* Compute signed overflow if required.  */
1041     sv = NULL;
1042     if (is_tsv || cond_need_sv(c)) {
1043         sv = do_add_sv(ctx, dest, in1, in2);
1044         if (is_tsv) {
1045             /* ??? Need to include overflow from shift.  */
1046             gen_helper_tsv(tcg_env, sv);
1047         }
1048     }
1049 
1050     /* Emit any conditional trap before any writeback.  */
1051     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1052     if (is_tc) {
1053         tmp = tcg_temp_new_i64();
1054         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1055         gen_helper_tcond(tcg_env, tmp);
1056     }
1057 
1058     /* Write back the result.  */
1059     if (!is_l) {
1060         save_or_nullify(ctx, cpu_psw_cb, cb);
1061         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1062     }
1063     save_gpr(ctx, rt, dest);
1064 
1065     /* Install the new nullification.  */
1066     cond_free(&ctx->null_cond);
1067     ctx->null_cond = cond;
1068 }
1069 
1070 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1071                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1072 {
1073     TCGv_i64 tcg_r1, tcg_r2;
1074 
1075     if (a->cf) {
1076         nullify_over(ctx);
1077     }
1078     tcg_r1 = load_gpr(ctx, a->r1);
1079     tcg_r2 = load_gpr(ctx, a->r2);
1080     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1081            is_tsv, is_tc, is_c, a->cf, a->d);
1082     return nullify_end(ctx);
1083 }
1084 
1085 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1086                        bool is_tsv, bool is_tc)
1087 {
1088     TCGv_i64 tcg_im, tcg_r2;
1089 
1090     if (a->cf) {
1091         nullify_over(ctx);
1092     }
1093     tcg_im = tcg_constant_i64(a->i);
1094     tcg_r2 = load_gpr(ctx, a->r);
1095     /* All ADDI conditions are 32-bit. */
1096     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1097     return nullify_end(ctx);
1098 }
1099 
1100 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1101                    TCGv_i64 in2, bool is_tsv, bool is_b,
1102                    bool is_tc, unsigned cf, bool d)
1103 {
1104     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1105     unsigned c = cf >> 1;
1106     DisasCond cond;
1107 
1108     dest = tcg_temp_new_i64();
1109     cb = tcg_temp_new_i64();
1110     cb_msb = tcg_temp_new_i64();
1111 
1112     if (is_b) {
1113         /* DEST,C = IN1 + ~IN2 + C.  */
1114         tcg_gen_not_i64(cb, in2);
1115         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1116                          get_psw_carry(ctx, d), ctx->zero);
1117         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1118         tcg_gen_xor_i64(cb, cb, in1);
1119         tcg_gen_xor_i64(cb, cb, dest);
1120     } else {
1121         /*
1122          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1123          * operations by seeding the high word with 1 and subtracting.
1124          */
1125         TCGv_i64 one = tcg_constant_i64(1);
1126         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1127         tcg_gen_eqv_i64(cb, in1, in2);
1128         tcg_gen_xor_i64(cb, cb, dest);
1129     }
1130 
1131     /* Compute signed overflow if required.  */
1132     sv = NULL;
1133     if (is_tsv || cond_need_sv(c)) {
1134         sv = do_sub_sv(ctx, dest, in1, in2);
1135         if (is_tsv) {
1136             gen_helper_tsv(tcg_env, sv);
1137         }
1138     }
1139 
1140     /* Compute the condition.  We cannot use the special case for borrow.  */
1141     if (!is_b) {
1142         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1143     } else {
1144         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1145     }
1146 
1147     /* Emit any conditional trap before any writeback.  */
1148     if (is_tc) {
1149         tmp = tcg_temp_new_i64();
1150         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(tcg_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     save_or_nullify(ctx, cpu_psw_cb, cb);
1156     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157     save_gpr(ctx, rt, dest);
1158 
1159     /* Install the new nullification.  */
1160     cond_free(&ctx->null_cond);
1161     ctx->null_cond = cond;
1162 }
1163 
1164 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1165                        bool is_tsv, bool is_b, bool is_tc)
1166 {
1167     TCGv_i64 tcg_r1, tcg_r2;
1168 
1169     if (a->cf) {
1170         nullify_over(ctx);
1171     }
1172     tcg_r1 = load_gpr(ctx, a->r1);
1173     tcg_r2 = load_gpr(ctx, a->r2);
1174     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1175     return nullify_end(ctx);
1176 }
1177 
1178 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1179 {
1180     TCGv_i64 tcg_im, tcg_r2;
1181 
1182     if (a->cf) {
1183         nullify_over(ctx);
1184     }
1185     tcg_im = tcg_constant_i64(a->i);
1186     tcg_r2 = load_gpr(ctx, a->r);
1187     /* All SUBI conditions are 32-bit. */
1188     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1189     return nullify_end(ctx);
1190 }
1191 
1192 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1193                       TCGv_i64 in2, unsigned cf, bool d)
1194 {
1195     TCGv_i64 dest, sv;
1196     DisasCond cond;
1197 
1198     dest = tcg_temp_new_i64();
1199     tcg_gen_sub_i64(dest, in1, in2);
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (cond_need_sv(cf >> 1)) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205     }
1206 
1207     /* Form the condition for the compare.  */
1208     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1209 
1210     /* Clear.  */
1211     tcg_gen_movi_i64(dest, 0);
1212     save_gpr(ctx, rt, dest);
1213 
1214     /* Install the new nullification.  */
1215     cond_free(&ctx->null_cond);
1216     ctx->null_cond = cond;
1217 }
1218 
1219 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1220                    TCGv_i64 in2, unsigned cf, bool d,
1221                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1222 {
1223     TCGv_i64 dest = dest_gpr(ctx, rt);
1224 
1225     /* Perform the operation, and writeback.  */
1226     fn(dest, in1, in2);
1227     save_gpr(ctx, rt, dest);
1228 
1229     /* Install the new nullification.  */
1230     cond_free(&ctx->null_cond);
1231     if (cf) {
1232         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1233     }
1234 }
1235 
1236 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1238 {
1239     TCGv_i64 tcg_r1, tcg_r2;
1240 
1241     if (a->cf) {
1242         nullify_over(ctx);
1243     }
1244     tcg_r1 = load_gpr(ctx, a->r1);
1245     tcg_r2 = load_gpr(ctx, a->r2);
1246     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1247     return nullify_end(ctx);
1248 }
1249 
1250 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1251                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1252                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1253 {
1254     TCGv_i64 dest;
1255     DisasCond cond;
1256 
1257     if (cf == 0) {
1258         dest = dest_gpr(ctx, rt);
1259         fn(dest, in1, in2);
1260         save_gpr(ctx, rt, dest);
1261         cond_free(&ctx->null_cond);
1262     } else {
1263         dest = tcg_temp_new_i64();
1264         fn(dest, in1, in2);
1265 
1266         cond = do_unit_cond(cf, d, dest, in1, in2);
1267 
1268         if (is_tc) {
1269             TCGv_i64 tmp = tcg_temp_new_i64();
1270             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1271             gen_helper_tcond(tcg_env, tmp);
1272         }
1273         save_gpr(ctx, rt, dest);
1274 
1275         cond_free(&ctx->null_cond);
1276         ctx->null_cond = cond;
1277     }
1278 }
1279 
1280 #ifndef CONFIG_USER_ONLY
1281 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1282    from the top 2 bits of the base register.  There are a few system
1283    instructions that have a 3-bit space specifier, for which SR0 is
1284    not special.  To handle this, pass ~SP.  */
1285 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1286 {
1287     TCGv_ptr ptr;
1288     TCGv_i64 tmp;
1289     TCGv_i64 spc;
1290 
1291     if (sp != 0) {
1292         if (sp < 0) {
1293             sp = ~sp;
1294         }
1295         spc = tcg_temp_new_i64();
1296         load_spr(ctx, spc, sp);
1297         return spc;
1298     }
1299     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1300         return cpu_srH;
1301     }
1302 
1303     ptr = tcg_temp_new_ptr();
1304     tmp = tcg_temp_new_i64();
1305     spc = tcg_temp_new_i64();
1306 
1307     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1308     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1309     tcg_gen_andi_i64(tmp, tmp, 030);
1310     tcg_gen_trunc_i64_ptr(ptr, tmp);
1311 
1312     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1313     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1314 
1315     return spc;
1316 }
1317 #endif
1318 
1319 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1320                      unsigned rb, unsigned rx, int scale, int64_t disp,
1321                      unsigned sp, int modify, bool is_phys)
1322 {
1323     TCGv_i64 base = load_gpr(ctx, rb);
1324     TCGv_i64 ofs;
1325     TCGv_i64 addr;
1326 
1327     /* Note that RX is mutually exclusive with DISP.  */
1328     if (rx) {
1329         ofs = tcg_temp_new_i64();
1330         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1331         tcg_gen_add_i64(ofs, ofs, base);
1332     } else if (disp || modify) {
1333         ofs = tcg_temp_new_i64();
1334         tcg_gen_addi_i64(ofs, base, disp);
1335     } else {
1336         ofs = base;
1337     }
1338 
1339     *pofs = ofs;
1340     *pgva = addr = tcg_temp_new_i64();
1341     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1342 #ifndef CONFIG_USER_ONLY
1343     if (!is_phys) {
1344         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1345     }
1346 #endif
1347 }
1348 
1349 /* Emit a memory load.  The modify parameter should be
1350  * < 0 for pre-modify,
1351  * > 0 for post-modify,
1352  * = 0 for no base register update.
1353  */
1354 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1355                        unsigned rx, int scale, int64_t disp,
1356                        unsigned sp, int modify, MemOp mop)
1357 {
1358     TCGv_i64 ofs;
1359     TCGv_i64 addr;
1360 
1361     /* Caller uses nullify_over/nullify_end.  */
1362     assert(ctx->null_cond.c == TCG_COND_NEVER);
1363 
1364     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1365              ctx->mmu_idx == MMU_PHYS_IDX);
1366     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1367     if (modify) {
1368         save_gpr(ctx, rb, ofs);
1369     }
1370 }
1371 
1372 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1373                        unsigned rx, int scale, int64_t disp,
1374                        unsigned sp, int modify, MemOp mop)
1375 {
1376     TCGv_i64 ofs;
1377     TCGv_i64 addr;
1378 
1379     /* Caller uses nullify_over/nullify_end.  */
1380     assert(ctx->null_cond.c == TCG_COND_NEVER);
1381 
1382     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1383              ctx->mmu_idx == MMU_PHYS_IDX);
1384     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1385     if (modify) {
1386         save_gpr(ctx, rb, ofs);
1387     }
1388 }
1389 
1390 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1391                         unsigned rx, int scale, int64_t disp,
1392                         unsigned sp, int modify, MemOp mop)
1393 {
1394     TCGv_i64 ofs;
1395     TCGv_i64 addr;
1396 
1397     /* Caller uses nullify_over/nullify_end.  */
1398     assert(ctx->null_cond.c == TCG_COND_NEVER);
1399 
1400     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1401              ctx->mmu_idx == MMU_PHYS_IDX);
1402     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1403     if (modify) {
1404         save_gpr(ctx, rb, ofs);
1405     }
1406 }
1407 
1408 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1409                         unsigned rx, int scale, int64_t disp,
1410                         unsigned sp, int modify, MemOp mop)
1411 {
1412     TCGv_i64 ofs;
1413     TCGv_i64 addr;
1414 
1415     /* Caller uses nullify_over/nullify_end.  */
1416     assert(ctx->null_cond.c == TCG_COND_NEVER);
1417 
1418     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1419              ctx->mmu_idx == MMU_PHYS_IDX);
1420     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1421     if (modify) {
1422         save_gpr(ctx, rb, ofs);
1423     }
1424 }
1425 
1426 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1427                     unsigned rx, int scale, int64_t disp,
1428                     unsigned sp, int modify, MemOp mop)
1429 {
1430     TCGv_i64 dest;
1431 
1432     nullify_over(ctx);
1433 
1434     if (modify == 0) {
1435         /* No base register update.  */
1436         dest = dest_gpr(ctx, rt);
1437     } else {
1438         /* Make sure if RT == RB, we see the result of the load.  */
1439         dest = tcg_temp_new_i64();
1440     }
1441     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1442     save_gpr(ctx, rt, dest);
1443 
1444     return nullify_end(ctx);
1445 }
1446 
1447 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1448                       unsigned rx, int scale, int64_t disp,
1449                       unsigned sp, int modify)
1450 {
1451     TCGv_i32 tmp;
1452 
1453     nullify_over(ctx);
1454 
1455     tmp = tcg_temp_new_i32();
1456     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1457     save_frw_i32(rt, tmp);
1458 
1459     if (rt == 0) {
1460         gen_helper_loaded_fr0(tcg_env);
1461     }
1462 
1463     return nullify_end(ctx);
1464 }
1465 
1466 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1467 {
1468     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1469                      a->disp, a->sp, a->m);
1470 }
1471 
1472 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1473                       unsigned rx, int scale, int64_t disp,
1474                       unsigned sp, int modify)
1475 {
1476     TCGv_i64 tmp;
1477 
1478     nullify_over(ctx);
1479 
1480     tmp = tcg_temp_new_i64();
1481     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1482     save_frd(rt, tmp);
1483 
1484     if (rt == 0) {
1485         gen_helper_loaded_fr0(tcg_env);
1486     }
1487 
1488     return nullify_end(ctx);
1489 }
1490 
1491 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1492 {
1493     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1494                      a->disp, a->sp, a->m);
1495 }
1496 
1497 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1498                      int64_t disp, unsigned sp,
1499                      int modify, MemOp mop)
1500 {
1501     nullify_over(ctx);
1502     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1503     return nullify_end(ctx);
1504 }
1505 
1506 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1507                        unsigned rx, int scale, int64_t disp,
1508                        unsigned sp, int modify)
1509 {
1510     TCGv_i32 tmp;
1511 
1512     nullify_over(ctx);
1513 
1514     tmp = load_frw_i32(rt);
1515     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1516 
1517     return nullify_end(ctx);
1518 }
1519 
1520 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1521 {
1522     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1523                       a->disp, a->sp, a->m);
1524 }
1525 
1526 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1527                        unsigned rx, int scale, int64_t disp,
1528                        unsigned sp, int modify)
1529 {
1530     TCGv_i64 tmp;
1531 
1532     nullify_over(ctx);
1533 
1534     tmp = load_frd(rt);
1535     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1536 
1537     return nullify_end(ctx);
1538 }
1539 
1540 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1541 {
1542     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1543                       a->disp, a->sp, a->m);
1544 }
1545 
1546 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1548 {
1549     TCGv_i32 tmp;
1550 
1551     nullify_over(ctx);
1552     tmp = load_frw0_i32(ra);
1553 
1554     func(tmp, tcg_env, tmp);
1555 
1556     save_frw_i32(rt, tmp);
1557     return nullify_end(ctx);
1558 }
1559 
1560 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1561                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1562 {
1563     TCGv_i32 dst;
1564     TCGv_i64 src;
1565 
1566     nullify_over(ctx);
1567     src = load_frd(ra);
1568     dst = tcg_temp_new_i32();
1569 
1570     func(dst, tcg_env, src);
1571 
1572     save_frw_i32(rt, dst);
1573     return nullify_end(ctx);
1574 }
1575 
1576 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1577                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1578 {
1579     TCGv_i64 tmp;
1580 
1581     nullify_over(ctx);
1582     tmp = load_frd0(ra);
1583 
1584     func(tmp, tcg_env, tmp);
1585 
1586     save_frd(rt, tmp);
1587     return nullify_end(ctx);
1588 }
1589 
1590 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1591                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1592 {
1593     TCGv_i32 src;
1594     TCGv_i64 dst;
1595 
1596     nullify_over(ctx);
1597     src = load_frw0_i32(ra);
1598     dst = tcg_temp_new_i64();
1599 
1600     func(dst, tcg_env, src);
1601 
1602     save_frd(rt, dst);
1603     return nullify_end(ctx);
1604 }
1605 
1606 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1607                         unsigned ra, unsigned rb,
1608                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1609 {
1610     TCGv_i32 a, b;
1611 
1612     nullify_over(ctx);
1613     a = load_frw0_i32(ra);
1614     b = load_frw0_i32(rb);
1615 
1616     func(a, tcg_env, a, b);
1617 
1618     save_frw_i32(rt, a);
1619     return nullify_end(ctx);
1620 }
1621 
1622 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1623                         unsigned ra, unsigned rb,
1624                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1625 {
1626     TCGv_i64 a, b;
1627 
1628     nullify_over(ctx);
1629     a = load_frd0(ra);
1630     b = load_frd0(rb);
1631 
1632     func(a, tcg_env, a, b);
1633 
1634     save_frd(rt, a);
1635     return nullify_end(ctx);
1636 }
1637 
1638 /* Emit an unconditional branch to a direct target, which may or may not
1639    have already had nullification handled.  */
1640 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1641                        unsigned link, bool is_n)
1642 {
1643     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1644         if (link != 0) {
1645             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1646         }
1647         ctx->iaoq_n = dest;
1648         if (is_n) {
1649             ctx->null_cond.c = TCG_COND_ALWAYS;
1650         }
1651     } else {
1652         nullify_over(ctx);
1653 
1654         if (link != 0) {
1655             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1656         }
1657 
1658         if (is_n && use_nullify_skip(ctx)) {
1659             nullify_set(ctx, 0);
1660             gen_goto_tb(ctx, 0, dest, dest + 4);
1661         } else {
1662             nullify_set(ctx, is_n);
1663             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1664         }
1665 
1666         nullify_end(ctx);
1667 
1668         nullify_set(ctx, 0);
1669         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1670         ctx->base.is_jmp = DISAS_NORETURN;
1671     }
1672     return true;
1673 }
1674 
1675 /* Emit a conditional branch to a direct target.  If the branch itself
1676    is nullified, we should have already used nullify_over.  */
1677 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1678                        DisasCond *cond)
1679 {
1680     uint64_t dest = iaoq_dest(ctx, disp);
1681     TCGLabel *taken = NULL;
1682     TCGCond c = cond->c;
1683     bool n;
1684 
1685     assert(ctx->null_cond.c == TCG_COND_NEVER);
1686 
1687     /* Handle TRUE and NEVER as direct branches.  */
1688     if (c == TCG_COND_ALWAYS) {
1689         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1690     }
1691     if (c == TCG_COND_NEVER) {
1692         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1693     }
1694 
1695     taken = gen_new_label();
1696     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1697     cond_free(cond);
1698 
1699     /* Not taken: Condition not satisfied; nullify on backward branches. */
1700     n = is_n && disp < 0;
1701     if (n && use_nullify_skip(ctx)) {
1702         nullify_set(ctx, 0);
1703         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1704     } else {
1705         if (!n && ctx->null_lab) {
1706             gen_set_label(ctx->null_lab);
1707             ctx->null_lab = NULL;
1708         }
1709         nullify_set(ctx, n);
1710         if (ctx->iaoq_n == -1) {
1711             /* The temporary iaoq_n_var died at the branch above.
1712                Regenerate it here instead of saving it.  */
1713             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1714         }
1715         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1716     }
1717 
1718     gen_set_label(taken);
1719 
1720     /* Taken: Condition satisfied; nullify on forward branches.  */
1721     n = is_n && disp >= 0;
1722     if (n && use_nullify_skip(ctx)) {
1723         nullify_set(ctx, 0);
1724         gen_goto_tb(ctx, 1, dest, dest + 4);
1725     } else {
1726         nullify_set(ctx, n);
1727         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1728     }
1729 
1730     /* Not taken: the branch itself was nullified.  */
1731     if (ctx->null_lab) {
1732         gen_set_label(ctx->null_lab);
1733         ctx->null_lab = NULL;
1734         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1735     } else {
1736         ctx->base.is_jmp = DISAS_NORETURN;
1737     }
1738     return true;
1739 }
1740 
1741 /* Emit an unconditional branch to an indirect target.  This handles
1742    nullification of the branch itself.  */
1743 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1744                        unsigned link, bool is_n)
1745 {
1746     TCGv_i64 a0, a1, next, tmp;
1747     TCGCond c;
1748 
1749     assert(ctx->null_lab == NULL);
1750 
1751     if (ctx->null_cond.c == TCG_COND_NEVER) {
1752         if (link != 0) {
1753             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1754         }
1755         next = tcg_temp_new_i64();
1756         tcg_gen_mov_i64(next, dest);
1757         if (is_n) {
1758             if (use_nullify_skip(ctx)) {
1759                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1760                 tcg_gen_addi_i64(next, next, 4);
1761                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1762                 nullify_set(ctx, 0);
1763                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1764                 return true;
1765             }
1766             ctx->null_cond.c = TCG_COND_ALWAYS;
1767         }
1768         ctx->iaoq_n = -1;
1769         ctx->iaoq_n_var = next;
1770     } else if (is_n && use_nullify_skip(ctx)) {
1771         /* The (conditional) branch, B, nullifies the next insn, N,
1772            and we're allowed to skip execution N (no single-step or
1773            tracepoint in effect).  Since the goto_ptr that we must use
1774            for the indirect branch consumes no special resources, we
1775            can (conditionally) skip B and continue execution.  */
1776         /* The use_nullify_skip test implies we have a known control path.  */
1777         tcg_debug_assert(ctx->iaoq_b != -1);
1778         tcg_debug_assert(ctx->iaoq_n != -1);
1779 
1780         /* We do have to handle the non-local temporary, DEST, before
1781            branching.  Since IOAQ_F is not really live at this point, we
1782            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1783         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1784         next = tcg_temp_new_i64();
1785         tcg_gen_addi_i64(next, dest, 4);
1786         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1787 
1788         nullify_over(ctx);
1789         if (link != 0) {
1790             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1791         }
1792         tcg_gen_lookup_and_goto_ptr();
1793         return nullify_end(ctx);
1794     } else {
1795         c = ctx->null_cond.c;
1796         a0 = ctx->null_cond.a0;
1797         a1 = ctx->null_cond.a1;
1798 
1799         tmp = tcg_temp_new_i64();
1800         next = tcg_temp_new_i64();
1801 
1802         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1803         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1804         ctx->iaoq_n = -1;
1805         ctx->iaoq_n_var = next;
1806 
1807         if (link != 0) {
1808             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1809         }
1810 
1811         if (is_n) {
1812             /* The branch nullifies the next insn, which means the state of N
1813                after the branch is the inverse of the state of N that applied
1814                to the branch.  */
1815             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1816             cond_free(&ctx->null_cond);
1817             ctx->null_cond = cond_make_n();
1818             ctx->psw_n_nonzero = true;
1819         } else {
1820             cond_free(&ctx->null_cond);
1821         }
1822     }
1823     return true;
1824 }
1825 
1826 /* Implement
1827  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1828  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1829  *    else
1830  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1831  * which keeps the privilege level from being increased.
1832  */
1833 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1834 {
1835     TCGv_i64 dest;
1836     switch (ctx->privilege) {
1837     case 0:
1838         /* Privilege 0 is maximum and is allowed to decrease.  */
1839         return offset;
1840     case 3:
1841         /* Privilege 3 is minimum and is never allowed to increase.  */
1842         dest = tcg_temp_new_i64();
1843         tcg_gen_ori_i64(dest, offset, 3);
1844         break;
1845     default:
1846         dest = tcg_temp_new_i64();
1847         tcg_gen_andi_i64(dest, offset, -4);
1848         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1849         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1850         break;
1851     }
1852     return dest;
1853 }
1854 
1855 #ifdef CONFIG_USER_ONLY
1856 /* On Linux, page zero is normally marked execute only + gateway.
1857    Therefore normal read or write is supposed to fail, but specific
1858    offsets have kernel code mapped to raise permissions to implement
1859    system calls.  Handling this via an explicit check here, rather
1860    in than the "be disp(sr2,r0)" instruction that probably sent us
1861    here, is the easiest way to handle the branch delay slot on the
1862    aforementioned BE.  */
1863 static void do_page_zero(DisasContext *ctx)
1864 {
1865     TCGv_i64 tmp;
1866 
1867     /* If by some means we get here with PSW[N]=1, that implies that
1868        the B,GATE instruction would be skipped, and we'd fault on the
1869        next insn within the privileged page.  */
1870     switch (ctx->null_cond.c) {
1871     case TCG_COND_NEVER:
1872         break;
1873     case TCG_COND_ALWAYS:
1874         tcg_gen_movi_i64(cpu_psw_n, 0);
1875         goto do_sigill;
1876     default:
1877         /* Since this is always the first (and only) insn within the
1878            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1879         g_assert_not_reached();
1880     }
1881 
1882     /* Check that we didn't arrive here via some means that allowed
1883        non-sequential instruction execution.  Normally the PSW[B] bit
1884        detects this by disallowing the B,GATE instruction to execute
1885        under such conditions.  */
1886     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1887         goto do_sigill;
1888     }
1889 
1890     switch (ctx->iaoq_f & -4) {
1891     case 0x00: /* Null pointer call */
1892         gen_excp_1(EXCP_IMP);
1893         ctx->base.is_jmp = DISAS_NORETURN;
1894         break;
1895 
1896     case 0xb0: /* LWS */
1897         gen_excp_1(EXCP_SYSCALL_LWS);
1898         ctx->base.is_jmp = DISAS_NORETURN;
1899         break;
1900 
1901     case 0xe0: /* SET_THREAD_POINTER */
1902         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1903         tmp = tcg_temp_new_i64();
1904         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1905         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1906         tcg_gen_addi_i64(tmp, tmp, 4);
1907         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1908         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1909         break;
1910 
1911     case 0x100: /* SYSCALL */
1912         gen_excp_1(EXCP_SYSCALL);
1913         ctx->base.is_jmp = DISAS_NORETURN;
1914         break;
1915 
1916     default:
1917     do_sigill:
1918         gen_excp_1(EXCP_ILL);
1919         ctx->base.is_jmp = DISAS_NORETURN;
1920         break;
1921     }
1922 }
1923 #endif
1924 
1925 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1926 {
1927     cond_free(&ctx->null_cond);
1928     return true;
1929 }
1930 
1931 static bool trans_break(DisasContext *ctx, arg_break *a)
1932 {
1933     return gen_excp_iir(ctx, EXCP_BREAK);
1934 }
1935 
1936 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1937 {
1938     /* No point in nullifying the memory barrier.  */
1939     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1940 
1941     cond_free(&ctx->null_cond);
1942     return true;
1943 }
1944 
1945 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1946 {
1947     unsigned rt = a->t;
1948     TCGv_i64 tmp = dest_gpr(ctx, rt);
1949     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1950     save_gpr(ctx, rt, tmp);
1951 
1952     cond_free(&ctx->null_cond);
1953     return true;
1954 }
1955 
1956 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1957 {
1958     unsigned rt = a->t;
1959     unsigned rs = a->sp;
1960     TCGv_i64 t0 = tcg_temp_new_i64();
1961 
1962     load_spr(ctx, t0, rs);
1963     tcg_gen_shri_i64(t0, t0, 32);
1964 
1965     save_gpr(ctx, rt, t0);
1966 
1967     cond_free(&ctx->null_cond);
1968     return true;
1969 }
1970 
1971 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1972 {
1973     unsigned rt = a->t;
1974     unsigned ctl = a->r;
1975     TCGv_i64 tmp;
1976 
1977     switch (ctl) {
1978     case CR_SAR:
1979         if (a->e == 0) {
1980             /* MFSAR without ,W masks low 5 bits.  */
1981             tmp = dest_gpr(ctx, rt);
1982             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1983             save_gpr(ctx, rt, tmp);
1984             goto done;
1985         }
1986         save_gpr(ctx, rt, cpu_sar);
1987         goto done;
1988     case CR_IT: /* Interval Timer */
1989         /* FIXME: Respect PSW_S bit.  */
1990         nullify_over(ctx);
1991         tmp = dest_gpr(ctx, rt);
1992         if (translator_io_start(&ctx->base)) {
1993             gen_helper_read_interval_timer(tmp);
1994             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1995         } else {
1996             gen_helper_read_interval_timer(tmp);
1997         }
1998         save_gpr(ctx, rt, tmp);
1999         return nullify_end(ctx);
2000     case 26:
2001     case 27:
2002         break;
2003     default:
2004         /* All other control registers are privileged.  */
2005         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2006         break;
2007     }
2008 
2009     tmp = tcg_temp_new_i64();
2010     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2011     save_gpr(ctx, rt, tmp);
2012 
2013  done:
2014     cond_free(&ctx->null_cond);
2015     return true;
2016 }
2017 
2018 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2019 {
2020     unsigned rr = a->r;
2021     unsigned rs = a->sp;
2022     TCGv_i64 tmp;
2023 
2024     if (rs >= 5) {
2025         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2026     }
2027     nullify_over(ctx);
2028 
2029     tmp = tcg_temp_new_i64();
2030     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2031 
2032     if (rs >= 4) {
2033         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2034         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2035     } else {
2036         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2037     }
2038 
2039     return nullify_end(ctx);
2040 }
2041 
2042 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2043 {
2044     unsigned ctl = a->t;
2045     TCGv_i64 reg;
2046     TCGv_i64 tmp;
2047 
2048     if (ctl == CR_SAR) {
2049         reg = load_gpr(ctx, a->r);
2050         tmp = tcg_temp_new_i64();
2051         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2052         save_or_nullify(ctx, cpu_sar, tmp);
2053 
2054         cond_free(&ctx->null_cond);
2055         return true;
2056     }
2057 
2058     /* All other control registers are privileged or read-only.  */
2059     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2060 
2061 #ifndef CONFIG_USER_ONLY
2062     nullify_over(ctx);
2063     reg = load_gpr(ctx, a->r);
2064 
2065     switch (ctl) {
2066     case CR_IT:
2067         gen_helper_write_interval_timer(tcg_env, reg);
2068         break;
2069     case CR_EIRR:
2070         gen_helper_write_eirr(tcg_env, reg);
2071         break;
2072     case CR_EIEM:
2073         gen_helper_write_eiem(tcg_env, reg);
2074         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2075         break;
2076 
2077     case CR_IIASQ:
2078     case CR_IIAOQ:
2079         /* FIXME: Respect PSW_Q bit */
2080         /* The write advances the queue and stores to the back element.  */
2081         tmp = tcg_temp_new_i64();
2082         tcg_gen_ld_i64(tmp, tcg_env,
2083                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2084         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2085         tcg_gen_st_i64(reg, tcg_env,
2086                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2087         break;
2088 
2089     case CR_PID1:
2090     case CR_PID2:
2091     case CR_PID3:
2092     case CR_PID4:
2093         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2094 #ifndef CONFIG_USER_ONLY
2095         gen_helper_change_prot_id(tcg_env);
2096 #endif
2097         break;
2098 
2099     default:
2100         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101         break;
2102     }
2103     return nullify_end(ctx);
2104 #endif
2105 }
2106 
2107 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2108 {
2109     TCGv_i64 tmp = tcg_temp_new_i64();
2110 
2111     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2112     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2113     save_or_nullify(ctx, cpu_sar, tmp);
2114 
2115     cond_free(&ctx->null_cond);
2116     return true;
2117 }
2118 
2119 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2120 {
2121     TCGv_i64 dest = dest_gpr(ctx, a->t);
2122 
2123 #ifdef CONFIG_USER_ONLY
2124     /* We don't implement space registers in user mode. */
2125     tcg_gen_movi_i64(dest, 0);
2126 #else
2127     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2128     tcg_gen_shri_i64(dest, dest, 32);
2129 #endif
2130     save_gpr(ctx, a->t, dest);
2131 
2132     cond_free(&ctx->null_cond);
2133     return true;
2134 }
2135 
2136 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2137 {
2138     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2139 #ifndef CONFIG_USER_ONLY
2140     TCGv_i64 tmp;
2141 
2142     nullify_over(ctx);
2143 
2144     tmp = tcg_temp_new_i64();
2145     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2146     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2147     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2148     save_gpr(ctx, a->t, tmp);
2149 
2150     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2151     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2152     return nullify_end(ctx);
2153 #endif
2154 }
2155 
2156 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2157 {
2158     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2159 #ifndef CONFIG_USER_ONLY
2160     TCGv_i64 tmp;
2161 
2162     nullify_over(ctx);
2163 
2164     tmp = tcg_temp_new_i64();
2165     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2166     tcg_gen_ori_i64(tmp, tmp, a->i);
2167     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2168     save_gpr(ctx, a->t, tmp);
2169 
2170     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2171     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2172     return nullify_end(ctx);
2173 #endif
2174 }
2175 
2176 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2177 {
2178     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2179 #ifndef CONFIG_USER_ONLY
2180     TCGv_i64 tmp, reg;
2181     nullify_over(ctx);
2182 
2183     reg = load_gpr(ctx, a->r);
2184     tmp = tcg_temp_new_i64();
2185     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2186 
2187     /* Exit the TB to recognize new interrupts.  */
2188     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2189     return nullify_end(ctx);
2190 #endif
2191 }
2192 
2193 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2194 {
2195     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2196 #ifndef CONFIG_USER_ONLY
2197     nullify_over(ctx);
2198 
2199     if (rfi_r) {
2200         gen_helper_rfi_r(tcg_env);
2201     } else {
2202         gen_helper_rfi(tcg_env);
2203     }
2204     /* Exit the TB to recognize new interrupts.  */
2205     tcg_gen_exit_tb(NULL, 0);
2206     ctx->base.is_jmp = DISAS_NORETURN;
2207 
2208     return nullify_end(ctx);
2209 #endif
2210 }
2211 
2212 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2213 {
2214     return do_rfi(ctx, false);
2215 }
2216 
2217 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2218 {
2219     return do_rfi(ctx, true);
2220 }
2221 
2222 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2223 {
2224     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2225 #ifndef CONFIG_USER_ONLY
2226     nullify_over(ctx);
2227     gen_helper_halt(tcg_env);
2228     ctx->base.is_jmp = DISAS_NORETURN;
2229     return nullify_end(ctx);
2230 #endif
2231 }
2232 
2233 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2234 {
2235     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237     nullify_over(ctx);
2238     gen_helper_reset(tcg_env);
2239     ctx->base.is_jmp = DISAS_NORETURN;
2240     return nullify_end(ctx);
2241 #endif
2242 }
2243 
2244 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2245 {
2246     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2247 #ifndef CONFIG_USER_ONLY
2248     nullify_over(ctx);
2249     gen_helper_getshadowregs(tcg_env);
2250     return nullify_end(ctx);
2251 #endif
2252 }
2253 
2254 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2255 {
2256     if (a->m) {
2257         TCGv_i64 dest = dest_gpr(ctx, a->b);
2258         TCGv_i64 src1 = load_gpr(ctx, a->b);
2259         TCGv_i64 src2 = load_gpr(ctx, a->x);
2260 
2261         /* The only thing we need to do is the base register modification.  */
2262         tcg_gen_add_i64(dest, src1, src2);
2263         save_gpr(ctx, a->b, dest);
2264     }
2265     cond_free(&ctx->null_cond);
2266     return true;
2267 }
2268 
2269 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2270 {
2271     TCGv_i64 dest, ofs;
2272     TCGv_i32 level, want;
2273     TCGv_i64 addr;
2274 
2275     nullify_over(ctx);
2276 
2277     dest = dest_gpr(ctx, a->t);
2278     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2279 
2280     if (a->imm) {
2281         level = tcg_constant_i32(a->ri);
2282     } else {
2283         level = tcg_temp_new_i32();
2284         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2285         tcg_gen_andi_i32(level, level, 3);
2286     }
2287     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2288 
2289     gen_helper_probe(dest, tcg_env, addr, level, want);
2290 
2291     save_gpr(ctx, a->t, dest);
2292     return nullify_end(ctx);
2293 }
2294 
2295 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2296 {
2297     if (ctx->is_pa20) {
2298         return false;
2299     }
2300     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302     TCGv_i64 addr;
2303     TCGv_i64 ofs, reg;
2304 
2305     nullify_over(ctx);
2306 
2307     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2308     reg = load_gpr(ctx, a->r);
2309     if (a->addr) {
2310         gen_helper_itlba_pa11(tcg_env, addr, reg);
2311     } else {
2312         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2313     }
2314 
2315     /* Exit TB for TLB change if mmu is enabled.  */
2316     if (ctx->tb_flags & PSW_C) {
2317         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2318     }
2319     return nullify_end(ctx);
2320 #endif
2321 }
2322 
2323 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2324 {
2325     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2326 #ifndef CONFIG_USER_ONLY
2327     TCGv_i64 addr;
2328     TCGv_i64 ofs;
2329 
2330     nullify_over(ctx);
2331 
2332     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2333     if (a->m) {
2334         save_gpr(ctx, a->b, ofs);
2335     }
2336     if (a->local) {
2337         gen_helper_ptlbe(tcg_env);
2338     } else {
2339         gen_helper_ptlb(tcg_env, addr);
2340     }
2341 
2342     /* Exit TB for TLB change if mmu is enabled.  */
2343     if (ctx->tb_flags & PSW_C) {
2344         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2345     }
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 /*
2351  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2352  * See
2353  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2354  *     page 13-9 (195/206)
2355  */
2356 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2357 {
2358     if (ctx->is_pa20) {
2359         return false;
2360     }
2361     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2362 #ifndef CONFIG_USER_ONLY
2363     TCGv_i64 addr, atl, stl;
2364     TCGv_i64 reg;
2365 
2366     nullify_over(ctx);
2367 
2368     /*
2369      * FIXME:
2370      *  if (not (pcxl or pcxl2))
2371      *    return gen_illegal(ctx);
2372      */
2373 
2374     atl = tcg_temp_new_i64();
2375     stl = tcg_temp_new_i64();
2376     addr = tcg_temp_new_i64();
2377 
2378     tcg_gen_ld32u_i64(stl, tcg_env,
2379                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2380                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2381     tcg_gen_ld32u_i64(atl, tcg_env,
2382                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2383                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2384     tcg_gen_shli_i64(stl, stl, 32);
2385     tcg_gen_or_i64(addr, atl, stl);
2386 
2387     reg = load_gpr(ctx, a->r);
2388     if (a->addr) {
2389         gen_helper_itlba_pa11(tcg_env, addr, reg);
2390     } else {
2391         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2392     }
2393 
2394     /* Exit TB for TLB change if mmu is enabled.  */
2395     if (ctx->tb_flags & PSW_C) {
2396         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2397     }
2398     return nullify_end(ctx);
2399 #endif
2400 }
2401 
2402 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2403 {
2404     if (!ctx->is_pa20) {
2405         return false;
2406     }
2407     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2408 #ifndef CONFIG_USER_ONLY
2409     nullify_over(ctx);
2410     {
2411         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2412         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2413 
2414         if (a->data) {
2415             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2416         } else {
2417             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2418         }
2419     }
2420     /* Exit TB for TLB change if mmu is enabled.  */
2421     if (ctx->tb_flags & PSW_C) {
2422         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2423     }
2424     return nullify_end(ctx);
2425 #endif
2426 }
2427 
2428 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2429 {
2430     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2431 #ifndef CONFIG_USER_ONLY
2432     TCGv_i64 vaddr;
2433     TCGv_i64 ofs, paddr;
2434 
2435     nullify_over(ctx);
2436 
2437     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2438 
2439     paddr = tcg_temp_new_i64();
2440     gen_helper_lpa(paddr, tcg_env, vaddr);
2441 
2442     /* Note that physical address result overrides base modification.  */
2443     if (a->m) {
2444         save_gpr(ctx, a->b, ofs);
2445     }
2446     save_gpr(ctx, a->t, paddr);
2447 
2448     return nullify_end(ctx);
2449 #endif
2450 }
2451 
2452 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2453 {
2454     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2455 
2456     /* The Coherence Index is an implementation-defined function of the
2457        physical address.  Two addresses with the same CI have a coherent
2458        view of the cache.  Our implementation is to return 0 for all,
2459        since the entire address space is coherent.  */
2460     save_gpr(ctx, a->t, ctx->zero);
2461 
2462     cond_free(&ctx->null_cond);
2463     return true;
2464 }
2465 
2466 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2467 {
2468     return do_add_reg(ctx, a, false, false, false, false);
2469 }
2470 
2471 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2472 {
2473     return do_add_reg(ctx, a, true, false, false, false);
2474 }
2475 
2476 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2477 {
2478     return do_add_reg(ctx, a, false, true, false, false);
2479 }
2480 
2481 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2482 {
2483     return do_add_reg(ctx, a, false, false, false, true);
2484 }
2485 
2486 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2487 {
2488     return do_add_reg(ctx, a, false, true, false, true);
2489 }
2490 
2491 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2492 {
2493     return do_sub_reg(ctx, a, false, false, false);
2494 }
2495 
2496 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2497 {
2498     return do_sub_reg(ctx, a, true, false, false);
2499 }
2500 
2501 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2502 {
2503     return do_sub_reg(ctx, a, false, false, true);
2504 }
2505 
2506 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2507 {
2508     return do_sub_reg(ctx, a, true, false, true);
2509 }
2510 
2511 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2512 {
2513     return do_sub_reg(ctx, a, false, true, false);
2514 }
2515 
2516 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2517 {
2518     return do_sub_reg(ctx, a, true, true, false);
2519 }
2520 
2521 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2522 {
2523     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2524 }
2525 
2526 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2527 {
2528     return do_log_reg(ctx, a, tcg_gen_and_i64);
2529 }
2530 
2531 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2532 {
2533     if (a->cf == 0) {
2534         unsigned r2 = a->r2;
2535         unsigned r1 = a->r1;
2536         unsigned rt = a->t;
2537 
2538         if (rt == 0) { /* NOP */
2539             cond_free(&ctx->null_cond);
2540             return true;
2541         }
2542         if (r2 == 0) { /* COPY */
2543             if (r1 == 0) {
2544                 TCGv_i64 dest = dest_gpr(ctx, rt);
2545                 tcg_gen_movi_i64(dest, 0);
2546                 save_gpr(ctx, rt, dest);
2547             } else {
2548                 save_gpr(ctx, rt, cpu_gr[r1]);
2549             }
2550             cond_free(&ctx->null_cond);
2551             return true;
2552         }
2553 #ifndef CONFIG_USER_ONLY
2554         /* These are QEMU extensions and are nops in the real architecture:
2555          *
2556          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2557          * or %r31,%r31,%r31 -- death loop; offline cpu
2558          *                      currently implemented as idle.
2559          */
2560         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2561             /* No need to check for supervisor, as userland can only pause
2562                until the next timer interrupt.  */
2563             nullify_over(ctx);
2564 
2565             /* Advance the instruction queue.  */
2566             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2567             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2568             nullify_set(ctx, 0);
2569 
2570             /* Tell the qemu main loop to halt until this cpu has work.  */
2571             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2572                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2573             gen_excp_1(EXCP_HALTED);
2574             ctx->base.is_jmp = DISAS_NORETURN;
2575 
2576             return nullify_end(ctx);
2577         }
2578 #endif
2579     }
2580     return do_log_reg(ctx, a, tcg_gen_or_i64);
2581 }
2582 
2583 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2584 {
2585     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2586 }
2587 
2588 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2589 {
2590     TCGv_i64 tcg_r1, tcg_r2;
2591 
2592     if (a->cf) {
2593         nullify_over(ctx);
2594     }
2595     tcg_r1 = load_gpr(ctx, a->r1);
2596     tcg_r2 = load_gpr(ctx, a->r2);
2597     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2598     return nullify_end(ctx);
2599 }
2600 
2601 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2602 {
2603     TCGv_i64 tcg_r1, tcg_r2;
2604 
2605     if (a->cf) {
2606         nullify_over(ctx);
2607     }
2608     tcg_r1 = load_gpr(ctx, a->r1);
2609     tcg_r2 = load_gpr(ctx, a->r2);
2610     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2611     return nullify_end(ctx);
2612 }
2613 
2614 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2615 {
2616     TCGv_i64 tcg_r1, tcg_r2, tmp;
2617 
2618     if (a->cf) {
2619         nullify_over(ctx);
2620     }
2621     tcg_r1 = load_gpr(ctx, a->r1);
2622     tcg_r2 = load_gpr(ctx, a->r2);
2623     tmp = tcg_temp_new_i64();
2624     tcg_gen_not_i64(tmp, tcg_r2);
2625     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2626     return nullify_end(ctx);
2627 }
2628 
2629 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2630 {
2631     return do_uaddcm(ctx, a, false);
2632 }
2633 
2634 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2635 {
2636     return do_uaddcm(ctx, a, true);
2637 }
2638 
2639 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2640 {
2641     TCGv_i64 tmp;
2642 
2643     nullify_over(ctx);
2644 
2645     tmp = tcg_temp_new_i64();
2646     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2647     if (!is_i) {
2648         tcg_gen_not_i64(tmp, tmp);
2649     }
2650     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2651     tcg_gen_muli_i64(tmp, tmp, 6);
2652     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2653             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2654     return nullify_end(ctx);
2655 }
2656 
2657 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2658 {
2659     return do_dcor(ctx, a, false);
2660 }
2661 
2662 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2663 {
2664     return do_dcor(ctx, a, true);
2665 }
2666 
2667 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2668 {
2669     TCGv_i64 dest, add1, add2, addc, in1, in2;
2670     TCGv_i64 cout;
2671 
2672     nullify_over(ctx);
2673 
2674     in1 = load_gpr(ctx, a->r1);
2675     in2 = load_gpr(ctx, a->r2);
2676 
2677     add1 = tcg_temp_new_i64();
2678     add2 = tcg_temp_new_i64();
2679     addc = tcg_temp_new_i64();
2680     dest = tcg_temp_new_i64();
2681 
2682     /* Form R1 << 1 | PSW[CB]{8}.  */
2683     tcg_gen_add_i64(add1, in1, in1);
2684     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2685 
2686     /*
2687      * Add or subtract R2, depending on PSW[V].  Proper computation of
2688      * carry requires that we subtract via + ~R2 + 1, as described in
2689      * the manual.  By extracting and masking V, we can produce the
2690      * proper inputs to the addition without movcond.
2691      */
2692     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2693     tcg_gen_xor_i64(add2, in2, addc);
2694     tcg_gen_andi_i64(addc, addc, 1);
2695 
2696     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2697     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2698                      addc, ctx->zero);
2699 
2700     /* Write back the result register.  */
2701     save_gpr(ctx, a->t, dest);
2702 
2703     /* Write back PSW[CB].  */
2704     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2705     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2706 
2707     /* Write back PSW[V] for the division step.  */
2708     cout = get_psw_carry(ctx, false);
2709     tcg_gen_neg_i64(cpu_psw_v, cout);
2710     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2711 
2712     /* Install the new nullification.  */
2713     if (a->cf) {
2714         TCGv_i64 sv = NULL;
2715         if (cond_need_sv(a->cf >> 1)) {
2716             /* ??? The lshift is supposed to contribute to overflow.  */
2717             sv = do_add_sv(ctx, dest, add1, add2);
2718         }
2719         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2720     }
2721 
2722     return nullify_end(ctx);
2723 }
2724 
2725 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2726 {
2727     return do_add_imm(ctx, a, false, false);
2728 }
2729 
2730 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2731 {
2732     return do_add_imm(ctx, a, true, false);
2733 }
2734 
2735 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2736 {
2737     return do_add_imm(ctx, a, false, true);
2738 }
2739 
2740 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2741 {
2742     return do_add_imm(ctx, a, true, true);
2743 }
2744 
2745 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2746 {
2747     return do_sub_imm(ctx, a, false);
2748 }
2749 
2750 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2751 {
2752     return do_sub_imm(ctx, a, true);
2753 }
2754 
2755 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2756 {
2757     TCGv_i64 tcg_im, tcg_r2;
2758 
2759     if (a->cf) {
2760         nullify_over(ctx);
2761     }
2762 
2763     tcg_im = tcg_constant_i64(a->i);
2764     tcg_r2 = load_gpr(ctx, a->r);
2765     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2766 
2767     return nullify_end(ctx);
2768 }
2769 
2770 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2771                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2772 {
2773     TCGv_i64 r1, r2, dest;
2774 
2775     if (!ctx->is_pa20) {
2776         return false;
2777     }
2778 
2779     nullify_over(ctx);
2780 
2781     r1 = load_gpr(ctx, a->r1);
2782     r2 = load_gpr(ctx, a->r2);
2783     dest = dest_gpr(ctx, a->t);
2784 
2785     fn(dest, r1, r2);
2786     save_gpr(ctx, a->t, dest);
2787 
2788     return nullify_end(ctx);
2789 }
2790 
2791 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2792                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2793 {
2794     TCGv_i64 r, dest;
2795 
2796     if (!ctx->is_pa20) {
2797         return false;
2798     }
2799 
2800     nullify_over(ctx);
2801 
2802     r = load_gpr(ctx, a->r);
2803     dest = dest_gpr(ctx, a->t);
2804 
2805     fn(dest, r, a->i);
2806     save_gpr(ctx, a->t, dest);
2807 
2808     return nullify_end(ctx);
2809 }
2810 
2811 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2812                                 void (*fn)(TCGv_i64, TCGv_i64,
2813                                            TCGv_i64, TCGv_i32))
2814 {
2815     TCGv_i64 r1, r2, dest;
2816 
2817     if (!ctx->is_pa20) {
2818         return false;
2819     }
2820 
2821     nullify_over(ctx);
2822 
2823     r1 = load_gpr(ctx, a->r1);
2824     r2 = load_gpr(ctx, a->r2);
2825     dest = dest_gpr(ctx, a->t);
2826 
2827     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2828     save_gpr(ctx, a->t, dest);
2829 
2830     return nullify_end(ctx);
2831 }
2832 
2833 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2834 {
2835     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2836 }
2837 
2838 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2839 {
2840     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2841 }
2842 
2843 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2844 {
2845     return do_multimedia(ctx, a, gen_helper_hadd_us);
2846 }
2847 
2848 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2849 {
2850     return do_multimedia(ctx, a, gen_helper_havg);
2851 }
2852 
2853 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2854 {
2855     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2856 }
2857 
2858 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2859 {
2860     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2861 }
2862 
2863 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2864 {
2865     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2866 }
2867 
2868 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2869 {
2870     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2871 }
2872 
2873 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2874 {
2875     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2876 }
2877 
2878 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2879 {
2880     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2881 }
2882 
2883 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2884 {
2885     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2886 }
2887 
2888 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2889 {
2890     return do_multimedia(ctx, a, gen_helper_hsub_us);
2891 }
2892 
2893 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2894 {
2895     uint64_t mask = 0xffff0000ffff0000ull;
2896     TCGv_i64 tmp = tcg_temp_new_i64();
2897 
2898     tcg_gen_andi_i64(tmp, r2, mask);
2899     tcg_gen_andi_i64(dst, r1, mask);
2900     tcg_gen_shri_i64(tmp, tmp, 16);
2901     tcg_gen_or_i64(dst, dst, tmp);
2902 }
2903 
2904 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2905 {
2906     return do_multimedia(ctx, a, gen_mixh_l);
2907 }
2908 
2909 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2910 {
2911     uint64_t mask = 0x0000ffff0000ffffull;
2912     TCGv_i64 tmp = tcg_temp_new_i64();
2913 
2914     tcg_gen_andi_i64(tmp, r1, mask);
2915     tcg_gen_andi_i64(dst, r2, mask);
2916     tcg_gen_shli_i64(tmp, tmp, 16);
2917     tcg_gen_or_i64(dst, dst, tmp);
2918 }
2919 
2920 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2921 {
2922     return do_multimedia(ctx, a, gen_mixh_r);
2923 }
2924 
2925 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2926 {
2927     TCGv_i64 tmp = tcg_temp_new_i64();
2928 
2929     tcg_gen_shri_i64(tmp, r2, 32);
2930     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2931 }
2932 
2933 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2934 {
2935     return do_multimedia(ctx, a, gen_mixw_l);
2936 }
2937 
2938 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2939 {
2940     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2941 }
2942 
2943 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2944 {
2945     return do_multimedia(ctx, a, gen_mixw_r);
2946 }
2947 
2948 static bool trans_permh(DisasContext *ctx, arg_permh *a)
2949 {
2950     TCGv_i64 r, t0, t1, t2, t3;
2951 
2952     if (!ctx->is_pa20) {
2953         return false;
2954     }
2955 
2956     nullify_over(ctx);
2957 
2958     r = load_gpr(ctx, a->r1);
2959     t0 = tcg_temp_new_i64();
2960     t1 = tcg_temp_new_i64();
2961     t2 = tcg_temp_new_i64();
2962     t3 = tcg_temp_new_i64();
2963 
2964     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
2965     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
2966     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
2967     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
2968 
2969     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
2970     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
2971     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
2972 
2973     save_gpr(ctx, a->t, t0);
2974     return nullify_end(ctx);
2975 }
2976 
2977 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2978 {
2979     if (ctx->is_pa20) {
2980        /*
2981         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
2982         * Any base modification still occurs.
2983         */
2984         if (a->t == 0) {
2985             return trans_nop_addrx(ctx, a);
2986         }
2987     } else if (a->size > MO_32) {
2988         return gen_illegal(ctx);
2989     }
2990     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2991                    a->disp, a->sp, a->m, a->size | MO_TE);
2992 }
2993 
2994 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2995 {
2996     assert(a->x == 0 && a->scale == 0);
2997     if (!ctx->is_pa20 && a->size > MO_32) {
2998         return gen_illegal(ctx);
2999     }
3000     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3001 }
3002 
3003 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3004 {
3005     MemOp mop = MO_TE | MO_ALIGN | a->size;
3006     TCGv_i64 dest, ofs;
3007     TCGv_i64 addr;
3008 
3009     if (!ctx->is_pa20 && a->size > MO_32) {
3010         return gen_illegal(ctx);
3011     }
3012 
3013     nullify_over(ctx);
3014 
3015     if (a->m) {
3016         /* Base register modification.  Make sure if RT == RB,
3017            we see the result of the load.  */
3018         dest = tcg_temp_new_i64();
3019     } else {
3020         dest = dest_gpr(ctx, a->t);
3021     }
3022 
3023     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3024              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3025 
3026     /*
3027      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3028      * However actual hardware succeeds with aligned mod 4.
3029      * Detect this case and log a GUEST_ERROR.
3030      *
3031      * TODO: HPPA64 relaxes the over-alignment requirement
3032      * with the ,co completer.
3033      */
3034     gen_helper_ldc_check(addr);
3035 
3036     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3037 
3038     if (a->m) {
3039         save_gpr(ctx, a->b, ofs);
3040     }
3041     save_gpr(ctx, a->t, dest);
3042 
3043     return nullify_end(ctx);
3044 }
3045 
3046 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3047 {
3048     TCGv_i64 ofs, val;
3049     TCGv_i64 addr;
3050 
3051     nullify_over(ctx);
3052 
3053     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3054              ctx->mmu_idx == MMU_PHYS_IDX);
3055     val = load_gpr(ctx, a->r);
3056     if (a->a) {
3057         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3058             gen_helper_stby_e_parallel(tcg_env, addr, val);
3059         } else {
3060             gen_helper_stby_e(tcg_env, addr, val);
3061         }
3062     } else {
3063         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3064             gen_helper_stby_b_parallel(tcg_env, addr, val);
3065         } else {
3066             gen_helper_stby_b(tcg_env, addr, val);
3067         }
3068     }
3069     if (a->m) {
3070         tcg_gen_andi_i64(ofs, ofs, ~3);
3071         save_gpr(ctx, a->b, ofs);
3072     }
3073 
3074     return nullify_end(ctx);
3075 }
3076 
3077 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3078 {
3079     TCGv_i64 ofs, val;
3080     TCGv_i64 addr;
3081 
3082     if (!ctx->is_pa20) {
3083         return false;
3084     }
3085     nullify_over(ctx);
3086 
3087     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3088              ctx->mmu_idx == MMU_PHYS_IDX);
3089     val = load_gpr(ctx, a->r);
3090     if (a->a) {
3091         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3092             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3093         } else {
3094             gen_helper_stdby_e(tcg_env, addr, val);
3095         }
3096     } else {
3097         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3098             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3099         } else {
3100             gen_helper_stdby_b(tcg_env, addr, val);
3101         }
3102     }
3103     if (a->m) {
3104         tcg_gen_andi_i64(ofs, ofs, ~7);
3105         save_gpr(ctx, a->b, ofs);
3106     }
3107 
3108     return nullify_end(ctx);
3109 }
3110 
3111 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3112 {
3113     int hold_mmu_idx = ctx->mmu_idx;
3114 
3115     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3116     ctx->mmu_idx = MMU_PHYS_IDX;
3117     trans_ld(ctx, a);
3118     ctx->mmu_idx = hold_mmu_idx;
3119     return true;
3120 }
3121 
3122 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3123 {
3124     int hold_mmu_idx = ctx->mmu_idx;
3125 
3126     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3127     ctx->mmu_idx = MMU_PHYS_IDX;
3128     trans_st(ctx, a);
3129     ctx->mmu_idx = hold_mmu_idx;
3130     return true;
3131 }
3132 
3133 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3134 {
3135     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3136 
3137     tcg_gen_movi_i64(tcg_rt, a->i);
3138     save_gpr(ctx, a->t, tcg_rt);
3139     cond_free(&ctx->null_cond);
3140     return true;
3141 }
3142 
3143 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3144 {
3145     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3146     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3147 
3148     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3149     save_gpr(ctx, 1, tcg_r1);
3150     cond_free(&ctx->null_cond);
3151     return true;
3152 }
3153 
3154 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3155 {
3156     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3157 
3158     /* Special case rb == 0, for the LDI pseudo-op.
3159        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3160     if (a->b == 0) {
3161         tcg_gen_movi_i64(tcg_rt, a->i);
3162     } else {
3163         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3164     }
3165     save_gpr(ctx, a->t, tcg_rt);
3166     cond_free(&ctx->null_cond);
3167     return true;
3168 }
3169 
3170 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3171                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3172 {
3173     TCGv_i64 dest, in2, sv;
3174     DisasCond cond;
3175 
3176     in2 = load_gpr(ctx, r);
3177     dest = tcg_temp_new_i64();
3178 
3179     tcg_gen_sub_i64(dest, in1, in2);
3180 
3181     sv = NULL;
3182     if (cond_need_sv(c)) {
3183         sv = do_sub_sv(ctx, dest, in1, in2);
3184     }
3185 
3186     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3187     return do_cbranch(ctx, disp, n, &cond);
3188 }
3189 
3190 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3191 {
3192     if (!ctx->is_pa20 && a->d) {
3193         return false;
3194     }
3195     nullify_over(ctx);
3196     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3197                    a->c, a->f, a->d, a->n, a->disp);
3198 }
3199 
3200 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3201 {
3202     if (!ctx->is_pa20 && a->d) {
3203         return false;
3204     }
3205     nullify_over(ctx);
3206     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3207                    a->c, a->f, a->d, a->n, a->disp);
3208 }
3209 
3210 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3211                     unsigned c, unsigned f, unsigned n, int disp)
3212 {
3213     TCGv_i64 dest, in2, sv, cb_cond;
3214     DisasCond cond;
3215     bool d = false;
3216 
3217     /*
3218      * For hppa64, the ADDB conditions change with PSW.W,
3219      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3220      */
3221     if (ctx->tb_flags & PSW_W) {
3222         d = c >= 5;
3223         if (d) {
3224             c &= 3;
3225         }
3226     }
3227 
3228     in2 = load_gpr(ctx, r);
3229     dest = tcg_temp_new_i64();
3230     sv = NULL;
3231     cb_cond = NULL;
3232 
3233     if (cond_need_cb(c)) {
3234         TCGv_i64 cb = tcg_temp_new_i64();
3235         TCGv_i64 cb_msb = tcg_temp_new_i64();
3236 
3237         tcg_gen_movi_i64(cb_msb, 0);
3238         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3239         tcg_gen_xor_i64(cb, in1, in2);
3240         tcg_gen_xor_i64(cb, cb, dest);
3241         cb_cond = get_carry(ctx, d, cb, cb_msb);
3242     } else {
3243         tcg_gen_add_i64(dest, in1, in2);
3244     }
3245     if (cond_need_sv(c)) {
3246         sv = do_add_sv(ctx, dest, in1, in2);
3247     }
3248 
3249     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3250     save_gpr(ctx, r, dest);
3251     return do_cbranch(ctx, disp, n, &cond);
3252 }
3253 
3254 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3255 {
3256     nullify_over(ctx);
3257     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3258 }
3259 
3260 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3261 {
3262     nullify_over(ctx);
3263     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3264 }
3265 
3266 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3267 {
3268     TCGv_i64 tmp, tcg_r;
3269     DisasCond cond;
3270 
3271     nullify_over(ctx);
3272 
3273     tmp = tcg_temp_new_i64();
3274     tcg_r = load_gpr(ctx, a->r);
3275     if (cond_need_ext(ctx, a->d)) {
3276         /* Force shift into [32,63] */
3277         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3278         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3279     } else {
3280         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3281     }
3282 
3283     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3284     return do_cbranch(ctx, a->disp, a->n, &cond);
3285 }
3286 
3287 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3288 {
3289     TCGv_i64 tmp, tcg_r;
3290     DisasCond cond;
3291     int p;
3292 
3293     nullify_over(ctx);
3294 
3295     tmp = tcg_temp_new_i64();
3296     tcg_r = load_gpr(ctx, a->r);
3297     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3298     tcg_gen_shli_i64(tmp, tcg_r, p);
3299 
3300     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3301     return do_cbranch(ctx, a->disp, a->n, &cond);
3302 }
3303 
3304 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3305 {
3306     TCGv_i64 dest;
3307     DisasCond cond;
3308 
3309     nullify_over(ctx);
3310 
3311     dest = dest_gpr(ctx, a->r2);
3312     if (a->r1 == 0) {
3313         tcg_gen_movi_i64(dest, 0);
3314     } else {
3315         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3316     }
3317 
3318     /* All MOVB conditions are 32-bit. */
3319     cond = do_sed_cond(ctx, a->c, false, dest);
3320     return do_cbranch(ctx, a->disp, a->n, &cond);
3321 }
3322 
3323 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3324 {
3325     TCGv_i64 dest;
3326     DisasCond cond;
3327 
3328     nullify_over(ctx);
3329 
3330     dest = dest_gpr(ctx, a->r);
3331     tcg_gen_movi_i64(dest, a->i);
3332 
3333     /* All MOVBI conditions are 32-bit. */
3334     cond = do_sed_cond(ctx, a->c, false, dest);
3335     return do_cbranch(ctx, a->disp, a->n, &cond);
3336 }
3337 
3338 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3339 {
3340     TCGv_i64 dest, src2;
3341 
3342     if (!ctx->is_pa20 && a->d) {
3343         return false;
3344     }
3345     if (a->c) {
3346         nullify_over(ctx);
3347     }
3348 
3349     dest = dest_gpr(ctx, a->t);
3350     src2 = load_gpr(ctx, a->r2);
3351     if (a->r1 == 0) {
3352         if (a->d) {
3353             tcg_gen_shr_i64(dest, src2, cpu_sar);
3354         } else {
3355             TCGv_i64 tmp = tcg_temp_new_i64();
3356 
3357             tcg_gen_ext32u_i64(dest, src2);
3358             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3359             tcg_gen_shr_i64(dest, dest, tmp);
3360         }
3361     } else if (a->r1 == a->r2) {
3362         if (a->d) {
3363             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3364         } else {
3365             TCGv_i32 t32 = tcg_temp_new_i32();
3366             TCGv_i32 s32 = tcg_temp_new_i32();
3367 
3368             tcg_gen_extrl_i64_i32(t32, src2);
3369             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3370             tcg_gen_andi_i32(s32, s32, 31);
3371             tcg_gen_rotr_i32(t32, t32, s32);
3372             tcg_gen_extu_i32_i64(dest, t32);
3373         }
3374     } else {
3375         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3376 
3377         if (a->d) {
3378             TCGv_i64 t = tcg_temp_new_i64();
3379             TCGv_i64 n = tcg_temp_new_i64();
3380 
3381             tcg_gen_xori_i64(n, cpu_sar, 63);
3382             tcg_gen_shl_i64(t, src2, n);
3383             tcg_gen_shli_i64(t, t, 1);
3384             tcg_gen_shr_i64(dest, src1, cpu_sar);
3385             tcg_gen_or_i64(dest, dest, t);
3386         } else {
3387             TCGv_i64 t = tcg_temp_new_i64();
3388             TCGv_i64 s = tcg_temp_new_i64();
3389 
3390             tcg_gen_concat32_i64(t, src2, src1);
3391             tcg_gen_andi_i64(s, cpu_sar, 31);
3392             tcg_gen_shr_i64(dest, t, s);
3393         }
3394     }
3395     save_gpr(ctx, a->t, dest);
3396 
3397     /* Install the new nullification.  */
3398     cond_free(&ctx->null_cond);
3399     if (a->c) {
3400         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3401     }
3402     return nullify_end(ctx);
3403 }
3404 
3405 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3406 {
3407     unsigned width, sa;
3408     TCGv_i64 dest, t2;
3409 
3410     if (!ctx->is_pa20 && a->d) {
3411         return false;
3412     }
3413     if (a->c) {
3414         nullify_over(ctx);
3415     }
3416 
3417     width = a->d ? 64 : 32;
3418     sa = width - 1 - a->cpos;
3419 
3420     dest = dest_gpr(ctx, a->t);
3421     t2 = load_gpr(ctx, a->r2);
3422     if (a->r1 == 0) {
3423         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3424     } else if (width == TARGET_LONG_BITS) {
3425         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3426     } else {
3427         assert(!a->d);
3428         if (a->r1 == a->r2) {
3429             TCGv_i32 t32 = tcg_temp_new_i32();
3430             tcg_gen_extrl_i64_i32(t32, t2);
3431             tcg_gen_rotri_i32(t32, t32, sa);
3432             tcg_gen_extu_i32_i64(dest, t32);
3433         } else {
3434             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3435             tcg_gen_extract_i64(dest, dest, sa, 32);
3436         }
3437     }
3438     save_gpr(ctx, a->t, dest);
3439 
3440     /* Install the new nullification.  */
3441     cond_free(&ctx->null_cond);
3442     if (a->c) {
3443         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3444     }
3445     return nullify_end(ctx);
3446 }
3447 
3448 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3449 {
3450     unsigned widthm1 = a->d ? 63 : 31;
3451     TCGv_i64 dest, src, tmp;
3452 
3453     if (!ctx->is_pa20 && a->d) {
3454         return false;
3455     }
3456     if (a->c) {
3457         nullify_over(ctx);
3458     }
3459 
3460     dest = dest_gpr(ctx, a->t);
3461     src = load_gpr(ctx, a->r);
3462     tmp = tcg_temp_new_i64();
3463 
3464     /* Recall that SAR is using big-endian bit numbering.  */
3465     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3466     tcg_gen_xori_i64(tmp, tmp, widthm1);
3467 
3468     if (a->se) {
3469         if (!a->d) {
3470             tcg_gen_ext32s_i64(dest, src);
3471             src = dest;
3472         }
3473         tcg_gen_sar_i64(dest, src, tmp);
3474         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3475     } else {
3476         if (!a->d) {
3477             tcg_gen_ext32u_i64(dest, src);
3478             src = dest;
3479         }
3480         tcg_gen_shr_i64(dest, src, tmp);
3481         tcg_gen_extract_i64(dest, dest, 0, a->len);
3482     }
3483     save_gpr(ctx, a->t, dest);
3484 
3485     /* Install the new nullification.  */
3486     cond_free(&ctx->null_cond);
3487     if (a->c) {
3488         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3489     }
3490     return nullify_end(ctx);
3491 }
3492 
3493 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3494 {
3495     unsigned len, cpos, width;
3496     TCGv_i64 dest, src;
3497 
3498     if (!ctx->is_pa20 && a->d) {
3499         return false;
3500     }
3501     if (a->c) {
3502         nullify_over(ctx);
3503     }
3504 
3505     len = a->len;
3506     width = a->d ? 64 : 32;
3507     cpos = width - 1 - a->pos;
3508     if (cpos + len > width) {
3509         len = width - cpos;
3510     }
3511 
3512     dest = dest_gpr(ctx, a->t);
3513     src = load_gpr(ctx, a->r);
3514     if (a->se) {
3515         tcg_gen_sextract_i64(dest, src, cpos, len);
3516     } else {
3517         tcg_gen_extract_i64(dest, src, cpos, len);
3518     }
3519     save_gpr(ctx, a->t, dest);
3520 
3521     /* Install the new nullification.  */
3522     cond_free(&ctx->null_cond);
3523     if (a->c) {
3524         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3525     }
3526     return nullify_end(ctx);
3527 }
3528 
3529 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3530 {
3531     unsigned len, width;
3532     uint64_t mask0, mask1;
3533     TCGv_i64 dest;
3534 
3535     if (!ctx->is_pa20 && a->d) {
3536         return false;
3537     }
3538     if (a->c) {
3539         nullify_over(ctx);
3540     }
3541 
3542     len = a->len;
3543     width = a->d ? 64 : 32;
3544     if (a->cpos + len > width) {
3545         len = width - a->cpos;
3546     }
3547 
3548     dest = dest_gpr(ctx, a->t);
3549     mask0 = deposit64(0, a->cpos, len, a->i);
3550     mask1 = deposit64(-1, a->cpos, len, a->i);
3551 
3552     if (a->nz) {
3553         TCGv_i64 src = load_gpr(ctx, a->t);
3554         tcg_gen_andi_i64(dest, src, mask1);
3555         tcg_gen_ori_i64(dest, dest, mask0);
3556     } else {
3557         tcg_gen_movi_i64(dest, mask0);
3558     }
3559     save_gpr(ctx, a->t, dest);
3560 
3561     /* Install the new nullification.  */
3562     cond_free(&ctx->null_cond);
3563     if (a->c) {
3564         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3565     }
3566     return nullify_end(ctx);
3567 }
3568 
3569 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3570 {
3571     unsigned rs = a->nz ? a->t : 0;
3572     unsigned len, width;
3573     TCGv_i64 dest, val;
3574 
3575     if (!ctx->is_pa20 && a->d) {
3576         return false;
3577     }
3578     if (a->c) {
3579         nullify_over(ctx);
3580     }
3581 
3582     len = a->len;
3583     width = a->d ? 64 : 32;
3584     if (a->cpos + len > width) {
3585         len = width - a->cpos;
3586     }
3587 
3588     dest = dest_gpr(ctx, a->t);
3589     val = load_gpr(ctx, a->r);
3590     if (rs == 0) {
3591         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3592     } else {
3593         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3594     }
3595     save_gpr(ctx, a->t, dest);
3596 
3597     /* Install the new nullification.  */
3598     cond_free(&ctx->null_cond);
3599     if (a->c) {
3600         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3601     }
3602     return nullify_end(ctx);
3603 }
3604 
3605 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3606                        bool d, bool nz, unsigned len, TCGv_i64 val)
3607 {
3608     unsigned rs = nz ? rt : 0;
3609     unsigned widthm1 = d ? 63 : 31;
3610     TCGv_i64 mask, tmp, shift, dest;
3611     uint64_t msb = 1ULL << (len - 1);
3612 
3613     dest = dest_gpr(ctx, rt);
3614     shift = tcg_temp_new_i64();
3615     tmp = tcg_temp_new_i64();
3616 
3617     /* Convert big-endian bit numbering in SAR to left-shift.  */
3618     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3619     tcg_gen_xori_i64(shift, shift, widthm1);
3620 
3621     mask = tcg_temp_new_i64();
3622     tcg_gen_movi_i64(mask, msb + (msb - 1));
3623     tcg_gen_and_i64(tmp, val, mask);
3624     if (rs) {
3625         tcg_gen_shl_i64(mask, mask, shift);
3626         tcg_gen_shl_i64(tmp, tmp, shift);
3627         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3628         tcg_gen_or_i64(dest, dest, tmp);
3629     } else {
3630         tcg_gen_shl_i64(dest, tmp, shift);
3631     }
3632     save_gpr(ctx, rt, dest);
3633 
3634     /* Install the new nullification.  */
3635     cond_free(&ctx->null_cond);
3636     if (c) {
3637         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3638     }
3639     return nullify_end(ctx);
3640 }
3641 
3642 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3643 {
3644     if (!ctx->is_pa20 && a->d) {
3645         return false;
3646     }
3647     if (a->c) {
3648         nullify_over(ctx);
3649     }
3650     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3651                       load_gpr(ctx, a->r));
3652 }
3653 
3654 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3655 {
3656     if (!ctx->is_pa20 && a->d) {
3657         return false;
3658     }
3659     if (a->c) {
3660         nullify_over(ctx);
3661     }
3662     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3663                       tcg_constant_i64(a->i));
3664 }
3665 
3666 static bool trans_be(DisasContext *ctx, arg_be *a)
3667 {
3668     TCGv_i64 tmp;
3669 
3670 #ifdef CONFIG_USER_ONLY
3671     /* ??? It seems like there should be a good way of using
3672        "be disp(sr2, r0)", the canonical gateway entry mechanism
3673        to our advantage.  But that appears to be inconvenient to
3674        manage along side branch delay slots.  Therefore we handle
3675        entry into the gateway page via absolute address.  */
3676     /* Since we don't implement spaces, just branch.  Do notice the special
3677        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3678        goto_tb to the TB containing the syscall.  */
3679     if (a->b == 0) {
3680         return do_dbranch(ctx, a->disp, a->l, a->n);
3681     }
3682 #else
3683     nullify_over(ctx);
3684 #endif
3685 
3686     tmp = tcg_temp_new_i64();
3687     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3688     tmp = do_ibranch_priv(ctx, tmp);
3689 
3690 #ifdef CONFIG_USER_ONLY
3691     return do_ibranch(ctx, tmp, a->l, a->n);
3692 #else
3693     TCGv_i64 new_spc = tcg_temp_new_i64();
3694 
3695     load_spr(ctx, new_spc, a->sp);
3696     if (a->l) {
3697         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3698         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3699     }
3700     if (a->n && use_nullify_skip(ctx)) {
3701         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3702         tcg_gen_addi_i64(tmp, tmp, 4);
3703         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3704         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3705         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3706     } else {
3707         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3708         if (ctx->iaoq_b == -1) {
3709             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3710         }
3711         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3712         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3713         nullify_set(ctx, a->n);
3714     }
3715     tcg_gen_lookup_and_goto_ptr();
3716     ctx->base.is_jmp = DISAS_NORETURN;
3717     return nullify_end(ctx);
3718 #endif
3719 }
3720 
3721 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3722 {
3723     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3724 }
3725 
3726 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3727 {
3728     uint64_t dest = iaoq_dest(ctx, a->disp);
3729 
3730     nullify_over(ctx);
3731 
3732     /* Make sure the caller hasn't done something weird with the queue.
3733      * ??? This is not quite the same as the PSW[B] bit, which would be
3734      * expensive to track.  Real hardware will trap for
3735      *    b  gateway
3736      *    b  gateway+4  (in delay slot of first branch)
3737      * However, checking for a non-sequential instruction queue *will*
3738      * diagnose the security hole
3739      *    b  gateway
3740      *    b  evil
3741      * in which instructions at evil would run with increased privs.
3742      */
3743     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3744         return gen_illegal(ctx);
3745     }
3746 
3747 #ifndef CONFIG_USER_ONLY
3748     if (ctx->tb_flags & PSW_C) {
3749         CPUHPPAState *env = cpu_env(ctx->cs);
3750         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3751         /* If we could not find a TLB entry, then we need to generate an
3752            ITLB miss exception so the kernel will provide it.
3753            The resulting TLB fill operation will invalidate this TB and
3754            we will re-translate, at which point we *will* be able to find
3755            the TLB entry and determine if this is in fact a gateway page.  */
3756         if (type < 0) {
3757             gen_excp(ctx, EXCP_ITLB_MISS);
3758             return true;
3759         }
3760         /* No change for non-gateway pages or for priv decrease.  */
3761         if (type >= 4 && type - 4 < ctx->privilege) {
3762             dest = deposit32(dest, 0, 2, type - 4);
3763         }
3764     } else {
3765         dest &= -4;  /* priv = 0 */
3766     }
3767 #endif
3768 
3769     if (a->l) {
3770         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3771         if (ctx->privilege < 3) {
3772             tcg_gen_andi_i64(tmp, tmp, -4);
3773         }
3774         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3775         save_gpr(ctx, a->l, tmp);
3776     }
3777 
3778     return do_dbranch(ctx, dest, 0, a->n);
3779 }
3780 
3781 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3782 {
3783     if (a->x) {
3784         TCGv_i64 tmp = tcg_temp_new_i64();
3785         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3786         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3787         /* The computation here never changes privilege level.  */
3788         return do_ibranch(ctx, tmp, a->l, a->n);
3789     } else {
3790         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3791         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3792     }
3793 }
3794 
3795 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3796 {
3797     TCGv_i64 dest;
3798 
3799     if (a->x == 0) {
3800         dest = load_gpr(ctx, a->b);
3801     } else {
3802         dest = tcg_temp_new_i64();
3803         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3804         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3805     }
3806     dest = do_ibranch_priv(ctx, dest);
3807     return do_ibranch(ctx, dest, 0, a->n);
3808 }
3809 
3810 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3811 {
3812     TCGv_i64 dest;
3813 
3814 #ifdef CONFIG_USER_ONLY
3815     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3816     return do_ibranch(ctx, dest, a->l, a->n);
3817 #else
3818     nullify_over(ctx);
3819     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3820 
3821     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3822     if (ctx->iaoq_b == -1) {
3823         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3824     }
3825     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3826     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3827     if (a->l) {
3828         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3829     }
3830     nullify_set(ctx, a->n);
3831     tcg_gen_lookup_and_goto_ptr();
3832     ctx->base.is_jmp = DISAS_NORETURN;
3833     return nullify_end(ctx);
3834 #endif
3835 }
3836 
3837 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3838 {
3839     /* All branch target stack instructions implement as nop. */
3840     return ctx->is_pa20;
3841 }
3842 
3843 /*
3844  * Float class 0
3845  */
3846 
3847 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3848 {
3849     tcg_gen_mov_i32(dst, src);
3850 }
3851 
3852 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3853 {
3854     uint64_t ret;
3855 
3856     if (ctx->is_pa20) {
3857         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3858     } else {
3859         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3860     }
3861 
3862     nullify_over(ctx);
3863     save_frd(0, tcg_constant_i64(ret));
3864     return nullify_end(ctx);
3865 }
3866 
3867 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3868 {
3869     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3870 }
3871 
3872 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3873 {
3874     tcg_gen_mov_i64(dst, src);
3875 }
3876 
3877 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3878 {
3879     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3880 }
3881 
3882 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3883 {
3884     tcg_gen_andi_i32(dst, src, INT32_MAX);
3885 }
3886 
3887 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3888 {
3889     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3890 }
3891 
3892 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3893 {
3894     tcg_gen_andi_i64(dst, src, INT64_MAX);
3895 }
3896 
3897 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3898 {
3899     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3900 }
3901 
3902 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3903 {
3904     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3905 }
3906 
3907 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3908 {
3909     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3910 }
3911 
3912 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3913 {
3914     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3915 }
3916 
3917 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3918 {
3919     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3920 }
3921 
3922 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3923 {
3924     tcg_gen_xori_i32(dst, src, INT32_MIN);
3925 }
3926 
3927 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3928 {
3929     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3930 }
3931 
3932 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3933 {
3934     tcg_gen_xori_i64(dst, src, INT64_MIN);
3935 }
3936 
3937 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3938 {
3939     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3940 }
3941 
3942 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3943 {
3944     tcg_gen_ori_i32(dst, src, INT32_MIN);
3945 }
3946 
3947 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3948 {
3949     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3950 }
3951 
3952 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3953 {
3954     tcg_gen_ori_i64(dst, src, INT64_MIN);
3955 }
3956 
3957 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3958 {
3959     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3960 }
3961 
3962 /*
3963  * Float class 1
3964  */
3965 
3966 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3967 {
3968     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3969 }
3970 
3971 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3972 {
3973     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3974 }
3975 
3976 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3977 {
3978     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3979 }
3980 
3981 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3982 {
3983     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3984 }
3985 
3986 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3987 {
3988     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3989 }
3990 
3991 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3992 {
3993     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3994 }
3995 
3996 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3997 {
3998     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3999 }
4000 
4001 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4002 {
4003     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4004 }
4005 
4006 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4007 {
4008     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4009 }
4010 
4011 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4012 {
4013     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4014 }
4015 
4016 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4017 {
4018     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4019 }
4020 
4021 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4022 {
4023     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4024 }
4025 
4026 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4027 {
4028     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4029 }
4030 
4031 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4032 {
4033     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4034 }
4035 
4036 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4037 {
4038     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4039 }
4040 
4041 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4042 {
4043     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4044 }
4045 
4046 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4047 {
4048     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4049 }
4050 
4051 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4052 {
4053     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4054 }
4055 
4056 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4057 {
4058     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4059 }
4060 
4061 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4062 {
4063     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4064 }
4065 
4066 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4067 {
4068     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4069 }
4070 
4071 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4072 {
4073     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4074 }
4075 
4076 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4077 {
4078     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4079 }
4080 
4081 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4082 {
4083     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4084 }
4085 
4086 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4087 {
4088     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4089 }
4090 
4091 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4092 {
4093     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4094 }
4095 
4096 /*
4097  * Float class 2
4098  */
4099 
4100 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4101 {
4102     TCGv_i32 ta, tb, tc, ty;
4103 
4104     nullify_over(ctx);
4105 
4106     ta = load_frw0_i32(a->r1);
4107     tb = load_frw0_i32(a->r2);
4108     ty = tcg_constant_i32(a->y);
4109     tc = tcg_constant_i32(a->c);
4110 
4111     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4112 
4113     return nullify_end(ctx);
4114 }
4115 
4116 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4117 {
4118     TCGv_i64 ta, tb;
4119     TCGv_i32 tc, ty;
4120 
4121     nullify_over(ctx);
4122 
4123     ta = load_frd0(a->r1);
4124     tb = load_frd0(a->r2);
4125     ty = tcg_constant_i32(a->y);
4126     tc = tcg_constant_i32(a->c);
4127 
4128     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4129 
4130     return nullify_end(ctx);
4131 }
4132 
4133 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4134 {
4135     TCGv_i64 t;
4136 
4137     nullify_over(ctx);
4138 
4139     t = tcg_temp_new_i64();
4140     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4141 
4142     if (a->y == 1) {
4143         int mask;
4144         bool inv = false;
4145 
4146         switch (a->c) {
4147         case 0: /* simple */
4148             tcg_gen_andi_i64(t, t, 0x4000000);
4149             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4150             goto done;
4151         case 2: /* rej */
4152             inv = true;
4153             /* fallthru */
4154         case 1: /* acc */
4155             mask = 0x43ff800;
4156             break;
4157         case 6: /* rej8 */
4158             inv = true;
4159             /* fallthru */
4160         case 5: /* acc8 */
4161             mask = 0x43f8000;
4162             break;
4163         case 9: /* acc6 */
4164             mask = 0x43e0000;
4165             break;
4166         case 13: /* acc4 */
4167             mask = 0x4380000;
4168             break;
4169         case 17: /* acc2 */
4170             mask = 0x4200000;
4171             break;
4172         default:
4173             gen_illegal(ctx);
4174             return true;
4175         }
4176         if (inv) {
4177             TCGv_i64 c = tcg_constant_i64(mask);
4178             tcg_gen_or_i64(t, t, c);
4179             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4180         } else {
4181             tcg_gen_andi_i64(t, t, mask);
4182             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4183         }
4184     } else {
4185         unsigned cbit = (a->y ^ 1) - 1;
4186 
4187         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4188         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4189     }
4190 
4191  done:
4192     return nullify_end(ctx);
4193 }
4194 
4195 /*
4196  * Float class 2
4197  */
4198 
4199 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4200 {
4201     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4202 }
4203 
4204 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4205 {
4206     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4207 }
4208 
4209 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4210 {
4211     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4212 }
4213 
4214 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4215 {
4216     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4217 }
4218 
4219 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4220 {
4221     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4222 }
4223 
4224 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4225 {
4226     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4227 }
4228 
4229 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4230 {
4231     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4232 }
4233 
4234 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4235 {
4236     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4237 }
4238 
4239 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4240 {
4241     TCGv_i64 x, y;
4242 
4243     nullify_over(ctx);
4244 
4245     x = load_frw0_i64(a->r1);
4246     y = load_frw0_i64(a->r2);
4247     tcg_gen_mul_i64(x, x, y);
4248     save_frd(a->t, x);
4249 
4250     return nullify_end(ctx);
4251 }
4252 
4253 /* Convert the fmpyadd single-precision register encodings to standard.  */
4254 static inline int fmpyadd_s_reg(unsigned r)
4255 {
4256     return (r & 16) * 2 + 16 + (r & 15);
4257 }
4258 
4259 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4260 {
4261     int tm = fmpyadd_s_reg(a->tm);
4262     int ra = fmpyadd_s_reg(a->ra);
4263     int ta = fmpyadd_s_reg(a->ta);
4264     int rm2 = fmpyadd_s_reg(a->rm2);
4265     int rm1 = fmpyadd_s_reg(a->rm1);
4266 
4267     nullify_over(ctx);
4268 
4269     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4270     do_fop_weww(ctx, ta, ta, ra,
4271                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4272 
4273     return nullify_end(ctx);
4274 }
4275 
4276 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4277 {
4278     return do_fmpyadd_s(ctx, a, false);
4279 }
4280 
4281 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4282 {
4283     return do_fmpyadd_s(ctx, a, true);
4284 }
4285 
4286 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4287 {
4288     nullify_over(ctx);
4289 
4290     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4291     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4292                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4293 
4294     return nullify_end(ctx);
4295 }
4296 
4297 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4298 {
4299     return do_fmpyadd_d(ctx, a, false);
4300 }
4301 
4302 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4303 {
4304     return do_fmpyadd_d(ctx, a, true);
4305 }
4306 
4307 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4308 {
4309     TCGv_i32 x, y, z;
4310 
4311     nullify_over(ctx);
4312     x = load_frw0_i32(a->rm1);
4313     y = load_frw0_i32(a->rm2);
4314     z = load_frw0_i32(a->ra3);
4315 
4316     if (a->neg) {
4317         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4318     } else {
4319         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4320     }
4321 
4322     save_frw_i32(a->t, x);
4323     return nullify_end(ctx);
4324 }
4325 
4326 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4327 {
4328     TCGv_i64 x, y, z;
4329 
4330     nullify_over(ctx);
4331     x = load_frd0(a->rm1);
4332     y = load_frd0(a->rm2);
4333     z = load_frd0(a->ra3);
4334 
4335     if (a->neg) {
4336         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4337     } else {
4338         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4339     }
4340 
4341     save_frd(a->t, x);
4342     return nullify_end(ctx);
4343 }
4344 
4345 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4346 {
4347     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4348 #ifndef CONFIG_USER_ONLY
4349     if (a->i == 0x100) {
4350         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4351         nullify_over(ctx);
4352         gen_helper_diag_btlb(tcg_env);
4353         return nullify_end(ctx);
4354     }
4355 #endif
4356     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4357     return true;
4358 }
4359 
4360 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4361 {
4362     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4363     int bound;
4364 
4365     ctx->cs = cs;
4366     ctx->tb_flags = ctx->base.tb->flags;
4367     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4368 
4369 #ifdef CONFIG_USER_ONLY
4370     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4371     ctx->mmu_idx = MMU_USER_IDX;
4372     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4373     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4374     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4375 #else
4376     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4377     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4378                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4379                     : MMU_PHYS_IDX);
4380 
4381     /* Recover the IAOQ values from the GVA + PRIV.  */
4382     uint64_t cs_base = ctx->base.tb->cs_base;
4383     uint64_t iasq_f = cs_base & ~0xffffffffull;
4384     int32_t diff = cs_base;
4385 
4386     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4387     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4388 #endif
4389     ctx->iaoq_n = -1;
4390     ctx->iaoq_n_var = NULL;
4391 
4392     ctx->zero = tcg_constant_i64(0);
4393 
4394     /* Bound the number of instructions by those left on the page.  */
4395     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4396     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4397 }
4398 
4399 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4400 {
4401     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4402 
4403     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4404     ctx->null_cond = cond_make_f();
4405     ctx->psw_n_nonzero = false;
4406     if (ctx->tb_flags & PSW_N) {
4407         ctx->null_cond.c = TCG_COND_ALWAYS;
4408         ctx->psw_n_nonzero = true;
4409     }
4410     ctx->null_lab = NULL;
4411 }
4412 
4413 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4414 {
4415     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4416 
4417     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4418 }
4419 
4420 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4421 {
4422     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4423     CPUHPPAState *env = cpu_env(cs);
4424     DisasJumpType ret;
4425 
4426     /* Execute one insn.  */
4427 #ifdef CONFIG_USER_ONLY
4428     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4429         do_page_zero(ctx);
4430         ret = ctx->base.is_jmp;
4431         assert(ret != DISAS_NEXT);
4432     } else
4433 #endif
4434     {
4435         /* Always fetch the insn, even if nullified, so that we check
4436            the page permissions for execute.  */
4437         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4438 
4439         /* Set up the IA queue for the next insn.
4440            This will be overwritten by a branch.  */
4441         if (ctx->iaoq_b == -1) {
4442             ctx->iaoq_n = -1;
4443             ctx->iaoq_n_var = tcg_temp_new_i64();
4444             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4445         } else {
4446             ctx->iaoq_n = ctx->iaoq_b + 4;
4447             ctx->iaoq_n_var = NULL;
4448         }
4449 
4450         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4451             ctx->null_cond.c = TCG_COND_NEVER;
4452             ret = DISAS_NEXT;
4453         } else {
4454             ctx->insn = insn;
4455             if (!decode(ctx, insn)) {
4456                 gen_illegal(ctx);
4457             }
4458             ret = ctx->base.is_jmp;
4459             assert(ctx->null_lab == NULL);
4460         }
4461     }
4462 
4463     /* Advance the insn queue.  Note that this check also detects
4464        a priority change within the instruction queue.  */
4465     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4466         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4467             && use_goto_tb(ctx, ctx->iaoq_b)
4468             && (ctx->null_cond.c == TCG_COND_NEVER
4469                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4470             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4471             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4472             ctx->base.is_jmp = ret = DISAS_NORETURN;
4473         } else {
4474             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4475         }
4476     }
4477     ctx->iaoq_f = ctx->iaoq_b;
4478     ctx->iaoq_b = ctx->iaoq_n;
4479     ctx->base.pc_next += 4;
4480 
4481     switch (ret) {
4482     case DISAS_NORETURN:
4483     case DISAS_IAQ_N_UPDATED:
4484         break;
4485 
4486     case DISAS_NEXT:
4487     case DISAS_IAQ_N_STALE:
4488     case DISAS_IAQ_N_STALE_EXIT:
4489         if (ctx->iaoq_f == -1) {
4490             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4491             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4492 #ifndef CONFIG_USER_ONLY
4493             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4494 #endif
4495             nullify_save(ctx);
4496             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4497                                 ? DISAS_EXIT
4498                                 : DISAS_IAQ_N_UPDATED);
4499         } else if (ctx->iaoq_b == -1) {
4500             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4501         }
4502         break;
4503 
4504     default:
4505         g_assert_not_reached();
4506     }
4507 }
4508 
4509 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4510 {
4511     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4512     DisasJumpType is_jmp = ctx->base.is_jmp;
4513 
4514     switch (is_jmp) {
4515     case DISAS_NORETURN:
4516         break;
4517     case DISAS_TOO_MANY:
4518     case DISAS_IAQ_N_STALE:
4519     case DISAS_IAQ_N_STALE_EXIT:
4520         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4521         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4522         nullify_save(ctx);
4523         /* FALLTHRU */
4524     case DISAS_IAQ_N_UPDATED:
4525         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4526             tcg_gen_lookup_and_goto_ptr();
4527             break;
4528         }
4529         /* FALLTHRU */
4530     case DISAS_EXIT:
4531         tcg_gen_exit_tb(NULL, 0);
4532         break;
4533     default:
4534         g_assert_not_reached();
4535     }
4536 }
4537 
4538 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4539                               CPUState *cs, FILE *logfile)
4540 {
4541     target_ulong pc = dcbase->pc_first;
4542 
4543 #ifdef CONFIG_USER_ONLY
4544     switch (pc) {
4545     case 0x00:
4546         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4547         return;
4548     case 0xb0:
4549         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4550         return;
4551     case 0xe0:
4552         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4553         return;
4554     case 0x100:
4555         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4556         return;
4557     }
4558 #endif
4559 
4560     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4561     target_disas(logfile, cs, pc, dcbase->tb->size);
4562 }
4563 
4564 static const TranslatorOps hppa_tr_ops = {
4565     .init_disas_context = hppa_tr_init_disas_context,
4566     .tb_start           = hppa_tr_tb_start,
4567     .insn_start         = hppa_tr_insn_start,
4568     .translate_insn     = hppa_tr_translate_insn,
4569     .tb_stop            = hppa_tr_tb_stop,
4570     .disas_log          = hppa_tr_disas_log,
4571 };
4572 
4573 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4574                            target_ulong pc, void *host_pc)
4575 {
4576     DisasContext ctx;
4577     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4578 }
4579