xref: /openbmc/qemu/target/hppa/translate.c (revision eb25d10f)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47 
48     uint64_t iaoq_f;
49     uint64_t iaoq_b;
50     uint64_t iaoq_n;
51     TCGv_i64 iaoq_n_var;
52 
53     DisasCond null_cond;
54     TCGLabel *null_lab;
55 
56     TCGv_i64 zero;
57 
58     uint32_t insn;
59     uint32_t tb_flags;
60     int mmu_idx;
61     int privilege;
62     bool psw_n_nonzero;
63     bool is_pa20;
64 
65 #ifdef CONFIG_USER_ONLY
66     MemOp unalign;
67 #endif
68 } DisasContext;
69 
70 #ifdef CONFIG_USER_ONLY
71 #define UNALIGN(C)  (C)->unalign
72 #else
73 #define UNALIGN(C)  MO_ALIGN
74 #endif
75 
76 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
77 static int expand_sm_imm(DisasContext *ctx, int val)
78 {
79     if (val & PSW_SM_E) {
80         val = (val & ~PSW_SM_E) | PSW_E;
81     }
82     if (val & PSW_SM_W) {
83         val = (val & ~PSW_SM_W) | PSW_W;
84     }
85     return val;
86 }
87 
88 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
89 static int expand_sr3x(DisasContext *ctx, int val)
90 {
91     return ~val;
92 }
93 
94 /* Convert the M:A bits within a memory insn to the tri-state value
95    we use for the final M.  */
96 static int ma_to_m(DisasContext *ctx, int val)
97 {
98     return val & 2 ? (val & 1 ? -1 : 1) : 0;
99 }
100 
101 /* Convert the sign of the displacement to a pre or post-modify.  */
102 static int pos_to_m(DisasContext *ctx, int val)
103 {
104     return val ? 1 : -1;
105 }
106 
107 static int neg_to_m(DisasContext *ctx, int val)
108 {
109     return val ? -1 : 1;
110 }
111 
112 /* Used for branch targets and fp memory ops.  */
113 static int expand_shl2(DisasContext *ctx, int val)
114 {
115     return val << 2;
116 }
117 
118 /* Used for fp memory ops.  */
119 static int expand_shl3(DisasContext *ctx, int val)
120 {
121     return val << 3;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Translate CMPI doubleword conditions to standard. */
142 static int cmpbid_c(DisasContext *ctx, int val)
143 {
144     return val ? val : 4; /* 0 == "*<<" */
145 }
146 
147 
148 /* Include the auto-generated decoder.  */
149 #include "decode-insns.c.inc"
150 
151 /* We are not using a goto_tb (for whatever reason), but have updated
152    the iaq (for whatever reason), so don't do it again on exit.  */
153 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
154 
155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
156    updated the iaq for the next instruction to be executed.  */
157 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
158 
159 /* Similarly, but we want to return to the main loop immediately
160    to recognize unmasked interrupts.  */
161 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
162 #define DISAS_EXIT                  DISAS_TARGET_3
163 
164 /* global register indexes */
165 static TCGv_i64 cpu_gr[32];
166 static TCGv_i64 cpu_sr[4];
167 static TCGv_i64 cpu_srH;
168 static TCGv_i64 cpu_iaoq_f;
169 static TCGv_i64 cpu_iaoq_b;
170 static TCGv_i64 cpu_iasq_f;
171 static TCGv_i64 cpu_iasq_b;
172 static TCGv_i64 cpu_sar;
173 static TCGv_i64 cpu_psw_n;
174 static TCGv_i64 cpu_psw_v;
175 static TCGv_i64 cpu_psw_cb;
176 static TCGv_i64 cpu_psw_cb_msb;
177 
178 void hppa_translate_init(void)
179 {
180 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
181 
182     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
183     static const GlobalVar vars[] = {
184         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
185         DEF_VAR(psw_n),
186         DEF_VAR(psw_v),
187         DEF_VAR(psw_cb),
188         DEF_VAR(psw_cb_msb),
189         DEF_VAR(iaoq_f),
190         DEF_VAR(iaoq_b),
191     };
192 
193 #undef DEF_VAR
194 
195     /* Use the symbolic register names that match the disassembler.  */
196     static const char gr_names[32][4] = {
197         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
198         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
199         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
200         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
201     };
202     /* SR[4-7] are not global registers so that we can index them.  */
203     static const char sr_names[5][4] = {
204         "sr0", "sr1", "sr2", "sr3", "srH"
205     };
206 
207     int i;
208 
209     cpu_gr[0] = NULL;
210     for (i = 1; i < 32; i++) {
211         cpu_gr[i] = tcg_global_mem_new(tcg_env,
212                                        offsetof(CPUHPPAState, gr[i]),
213                                        gr_names[i]);
214     }
215     for (i = 0; i < 4; i++) {
216         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
217                                            offsetof(CPUHPPAState, sr[i]),
218                                            sr_names[i]);
219     }
220     cpu_srH = tcg_global_mem_new_i64(tcg_env,
221                                      offsetof(CPUHPPAState, sr[4]),
222                                      sr_names[4]);
223 
224     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
225         const GlobalVar *v = &vars[i];
226         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
227     }
228 
229     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
230                                         offsetof(CPUHPPAState, iasq_f),
231                                         "iasq_f");
232     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
233                                         offsetof(CPUHPPAState, iasq_b),
234                                         "iasq_b");
235 }
236 
237 static DisasCond cond_make_f(void)
238 {
239     return (DisasCond){
240         .c = TCG_COND_NEVER,
241         .a0 = NULL,
242         .a1 = NULL,
243     };
244 }
245 
246 static DisasCond cond_make_t(void)
247 {
248     return (DisasCond){
249         .c = TCG_COND_ALWAYS,
250         .a0 = NULL,
251         .a1 = NULL,
252     };
253 }
254 
255 static DisasCond cond_make_n(void)
256 {
257     return (DisasCond){
258         .c = TCG_COND_NE,
259         .a0 = cpu_psw_n,
260         .a1 = tcg_constant_i64(0)
261     };
262 }
263 
264 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
265 {
266     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
267     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
268 }
269 
270 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
271 {
272     return cond_make_tmp(c, a0, tcg_constant_i64(0));
273 }
274 
275 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
276 {
277     TCGv_i64 tmp = tcg_temp_new_i64();
278     tcg_gen_mov_i64(tmp, a0);
279     return cond_make_0_tmp(c, tmp);
280 }
281 
282 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
283 {
284     TCGv_i64 t0 = tcg_temp_new_i64();
285     TCGv_i64 t1 = tcg_temp_new_i64();
286 
287     tcg_gen_mov_i64(t0, a0);
288     tcg_gen_mov_i64(t1, a1);
289     return cond_make_tmp(c, t0, t1);
290 }
291 
292 static void cond_free(DisasCond *cond)
293 {
294     switch (cond->c) {
295     default:
296         cond->a0 = NULL;
297         cond->a1 = NULL;
298         /* fallthru */
299     case TCG_COND_ALWAYS:
300         cond->c = TCG_COND_NEVER;
301         break;
302     case TCG_COND_NEVER:
303         break;
304     }
305 }
306 
307 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
308 {
309     if (reg == 0) {
310         return ctx->zero;
311     } else {
312         return cpu_gr[reg];
313     }
314 }
315 
316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
317 {
318     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
319         return tcg_temp_new_i64();
320     } else {
321         return cpu_gr[reg];
322     }
323 }
324 
325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
326 {
327     if (ctx->null_cond.c != TCG_COND_NEVER) {
328         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
329                             ctx->null_cond.a1, dest, t);
330     } else {
331         tcg_gen_mov_i64(dest, t);
332     }
333 }
334 
335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
336 {
337     if (reg != 0) {
338         save_or_nullify(ctx, cpu_gr[reg], t);
339     }
340 }
341 
342 #if HOST_BIG_ENDIAN
343 # define HI_OFS  0
344 # define LO_OFS  4
345 #else
346 # define HI_OFS  4
347 # define LO_OFS  0
348 #endif
349 
350 static TCGv_i32 load_frw_i32(unsigned rt)
351 {
352     TCGv_i32 ret = tcg_temp_new_i32();
353     tcg_gen_ld_i32(ret, tcg_env,
354                    offsetof(CPUHPPAState, fr[rt & 31])
355                    + (rt & 32 ? LO_OFS : HI_OFS));
356     return ret;
357 }
358 
359 static TCGv_i32 load_frw0_i32(unsigned rt)
360 {
361     if (rt == 0) {
362         TCGv_i32 ret = tcg_temp_new_i32();
363         tcg_gen_movi_i32(ret, 0);
364         return ret;
365     } else {
366         return load_frw_i32(rt);
367     }
368 }
369 
370 static TCGv_i64 load_frw0_i64(unsigned rt)
371 {
372     TCGv_i64 ret = tcg_temp_new_i64();
373     if (rt == 0) {
374         tcg_gen_movi_i64(ret, 0);
375     } else {
376         tcg_gen_ld32u_i64(ret, tcg_env,
377                           offsetof(CPUHPPAState, fr[rt & 31])
378                           + (rt & 32 ? LO_OFS : HI_OFS));
379     }
380     return ret;
381 }
382 
383 static void save_frw_i32(unsigned rt, TCGv_i32 val)
384 {
385     tcg_gen_st_i32(val, tcg_env,
386                    offsetof(CPUHPPAState, fr[rt & 31])
387                    + (rt & 32 ? LO_OFS : HI_OFS));
388 }
389 
390 #undef HI_OFS
391 #undef LO_OFS
392 
393 static TCGv_i64 load_frd(unsigned rt)
394 {
395     TCGv_i64 ret = tcg_temp_new_i64();
396     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
397     return ret;
398 }
399 
400 static TCGv_i64 load_frd0(unsigned rt)
401 {
402     if (rt == 0) {
403         TCGv_i64 ret = tcg_temp_new_i64();
404         tcg_gen_movi_i64(ret, 0);
405         return ret;
406     } else {
407         return load_frd(rt);
408     }
409 }
410 
411 static void save_frd(unsigned rt, TCGv_i64 val)
412 {
413     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
414 }
415 
416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
417 {
418 #ifdef CONFIG_USER_ONLY
419     tcg_gen_movi_i64(dest, 0);
420 #else
421     if (reg < 4) {
422         tcg_gen_mov_i64(dest, cpu_sr[reg]);
423     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
424         tcg_gen_mov_i64(dest, cpu_srH);
425     } else {
426         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
427     }
428 #endif
429 }
430 
431 /* Skip over the implementation of an insn that has been nullified.
432    Use this when the insn is too complex for a conditional move.  */
433 static void nullify_over(DisasContext *ctx)
434 {
435     if (ctx->null_cond.c != TCG_COND_NEVER) {
436         /* The always condition should have been handled in the main loop.  */
437         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
438 
439         ctx->null_lab = gen_new_label();
440 
441         /* If we're using PSW[N], copy it to a temp because... */
442         if (ctx->null_cond.a0 == cpu_psw_n) {
443             ctx->null_cond.a0 = tcg_temp_new_i64();
444             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
445         }
446         /* ... we clear it before branching over the implementation,
447            so that (1) it's clear after nullifying this insn and
448            (2) if this insn nullifies the next, PSW[N] is valid.  */
449         if (ctx->psw_n_nonzero) {
450             ctx->psw_n_nonzero = false;
451             tcg_gen_movi_i64(cpu_psw_n, 0);
452         }
453 
454         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
455                            ctx->null_cond.a1, ctx->null_lab);
456         cond_free(&ctx->null_cond);
457     }
458 }
459 
460 /* Save the current nullification state to PSW[N].  */
461 static void nullify_save(DisasContext *ctx)
462 {
463     if (ctx->null_cond.c == TCG_COND_NEVER) {
464         if (ctx->psw_n_nonzero) {
465             tcg_gen_movi_i64(cpu_psw_n, 0);
466         }
467         return;
468     }
469     if (ctx->null_cond.a0 != cpu_psw_n) {
470         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
471                             ctx->null_cond.a0, ctx->null_cond.a1);
472         ctx->psw_n_nonzero = true;
473     }
474     cond_free(&ctx->null_cond);
475 }
476 
477 /* Set a PSW[N] to X.  The intention is that this is used immediately
478    before a goto_tb/exit_tb, so that there is no fallthru path to other
479    code within the TB.  Therefore we do not update psw_n_nonzero.  */
480 static void nullify_set(DisasContext *ctx, bool x)
481 {
482     if (ctx->psw_n_nonzero || x) {
483         tcg_gen_movi_i64(cpu_psw_n, x);
484     }
485 }
486 
487 /* Mark the end of an instruction that may have been nullified.
488    This is the pair to nullify_over.  Always returns true so that
489    it may be tail-called from a translate function.  */
490 static bool nullify_end(DisasContext *ctx)
491 {
492     TCGLabel *null_lab = ctx->null_lab;
493     DisasJumpType status = ctx->base.is_jmp;
494 
495     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496        For UPDATED, we cannot update on the nullified path.  */
497     assert(status != DISAS_IAQ_N_UPDATED);
498 
499     if (likely(null_lab == NULL)) {
500         /* The current insn wasn't conditional or handled the condition
501            applied to it without a branch, so the (new) setting of
502            NULL_COND can be applied directly to the next insn.  */
503         return true;
504     }
505     ctx->null_lab = NULL;
506 
507     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
508         /* The next instruction will be unconditional,
509            and NULL_COND already reflects that.  */
510         gen_set_label(null_lab);
511     } else {
512         /* The insn that we just executed is itself nullifying the next
513            instruction.  Store the condition in the PSW[N] global.
514            We asserted PSW[N] = 0 in nullify_over, so that after the
515            label we have the proper value in place.  */
516         nullify_save(ctx);
517         gen_set_label(null_lab);
518         ctx->null_cond = cond_make_n();
519     }
520     if (status == DISAS_NORETURN) {
521         ctx->base.is_jmp = DISAS_NEXT;
522     }
523     return true;
524 }
525 
526 static uint64_t gva_offset_mask(DisasContext *ctx)
527 {
528     return (ctx->tb_flags & PSW_W
529             ? MAKE_64BIT_MASK(0, 62)
530             : MAKE_64BIT_MASK(0, 32));
531 }
532 
533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
534                             uint64_t ival, TCGv_i64 vval)
535 {
536     uint64_t mask = gva_offset_mask(ctx);
537 
538     if (ival != -1) {
539         tcg_gen_movi_i64(dest, ival & mask);
540         return;
541     }
542     tcg_debug_assert(vval != NULL);
543 
544     /*
545      * We know that the IAOQ is already properly masked.
546      * This optimization is primarily for "iaoq_f = iaoq_b".
547      */
548     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
549         tcg_gen_mov_i64(dest, vval);
550     } else {
551         tcg_gen_andi_i64(dest, vval, mask);
552     }
553 }
554 
555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
556 {
557     return ctx->iaoq_f + disp + 8;
558 }
559 
560 static void gen_excp_1(int exception)
561 {
562     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
563 }
564 
565 static void gen_excp(DisasContext *ctx, int exception)
566 {
567     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
568     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
569     nullify_save(ctx);
570     gen_excp_1(exception);
571     ctx->base.is_jmp = DISAS_NORETURN;
572 }
573 
574 static bool gen_excp_iir(DisasContext *ctx, int exc)
575 {
576     nullify_over(ctx);
577     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
578                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
579     gen_excp(ctx, exc);
580     return nullify_end(ctx);
581 }
582 
583 static bool gen_illegal(DisasContext *ctx)
584 {
585     return gen_excp_iir(ctx, EXCP_ILL);
586 }
587 
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590     return gen_excp_iir(ctx, EXCP)
591 #else
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
593     do {                                     \
594         if (ctx->privilege != 0) {           \
595             return gen_excp_iir(ctx, EXCP);  \
596         }                                    \
597     } while (0)
598 #endif
599 
600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
601 {
602     return translator_use_goto_tb(&ctx->base, dest);
603 }
604 
605 /* If the next insn is to be nullified, and it's on the same page,
606    and we're not attempting to set a breakpoint on it, then we can
607    totally skip the nullified insn.  This avoids creating and
608    executing a TB that merely branches to the next TB.  */
609 static bool use_nullify_skip(DisasContext *ctx)
610 {
611     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
612             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
613 }
614 
615 static void gen_goto_tb(DisasContext *ctx, int which,
616                         uint64_t f, uint64_t b)
617 {
618     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
619         tcg_gen_goto_tb(which);
620         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
621         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
622         tcg_gen_exit_tb(ctx->base.tb, which);
623     } else {
624         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
625         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
626         tcg_gen_lookup_and_goto_ptr();
627     }
628 }
629 
630 static bool cond_need_sv(int c)
631 {
632     return c == 2 || c == 3 || c == 6;
633 }
634 
635 static bool cond_need_cb(int c)
636 {
637     return c == 4 || c == 5;
638 }
639 
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext *ctx, bool d)
642 {
643     return !(ctx->is_pa20 && d);
644 }
645 
646 /*
647  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
648  * the Parisc 1.1 Architecture Reference Manual for details.
649  */
650 
651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
652                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
653 {
654     DisasCond cond;
655     TCGv_i64 tmp;
656 
657     switch (cf >> 1) {
658     case 0: /* Never / TR    (0 / 1) */
659         cond = cond_make_f();
660         break;
661     case 1: /* = / <>        (Z / !Z) */
662         if (cond_need_ext(ctx, d)) {
663             tmp = tcg_temp_new_i64();
664             tcg_gen_ext32u_i64(tmp, res);
665             res = tmp;
666         }
667         cond = cond_make_0(TCG_COND_EQ, res);
668         break;
669     case 2: /* < / >=        (N ^ V / !(N ^ V) */
670         tmp = tcg_temp_new_i64();
671         tcg_gen_xor_i64(tmp, res, sv);
672         if (cond_need_ext(ctx, d)) {
673             tcg_gen_ext32s_i64(tmp, tmp);
674         }
675         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
676         break;
677     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
678         /*
679          * Simplify:
680          *   (N ^ V) | Z
681          *   ((res < 0) ^ (sv < 0)) | !res
682          *   ((res ^ sv) < 0) | !res
683          *   (~(res ^ sv) >= 0) | !res
684          *   !(~(res ^ sv) >> 31) | !res
685          *   !(~(res ^ sv) >> 31 & res)
686          */
687         tmp = tcg_temp_new_i64();
688         tcg_gen_eqv_i64(tmp, res, sv);
689         if (cond_need_ext(ctx, d)) {
690             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
691             tcg_gen_and_i64(tmp, tmp, res);
692             tcg_gen_ext32u_i64(tmp, tmp);
693         } else {
694             tcg_gen_sari_i64(tmp, tmp, 63);
695             tcg_gen_and_i64(tmp, tmp, res);
696         }
697         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
698         break;
699     case 4: /* NUV / UV      (!C / C) */
700         /* Only bit 0 of cb_msb is ever set. */
701         cond = cond_make_0(TCG_COND_EQ, cb_msb);
702         break;
703     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
704         tmp = tcg_temp_new_i64();
705         tcg_gen_neg_i64(tmp, cb_msb);
706         tcg_gen_and_i64(tmp, tmp, res);
707         if (cond_need_ext(ctx, d)) {
708             tcg_gen_ext32u_i64(tmp, tmp);
709         }
710         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
711         break;
712     case 6: /* SV / NSV      (V / !V) */
713         if (cond_need_ext(ctx, d)) {
714             tmp = tcg_temp_new_i64();
715             tcg_gen_ext32s_i64(tmp, sv);
716             sv = tmp;
717         }
718         cond = cond_make_0(TCG_COND_LT, sv);
719         break;
720     case 7: /* OD / EV */
721         tmp = tcg_temp_new_i64();
722         tcg_gen_andi_i64(tmp, res, 1);
723         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
724         break;
725     default:
726         g_assert_not_reached();
727     }
728     if (cf & 1) {
729         cond.c = tcg_invert_cond(cond.c);
730     }
731 
732     return cond;
733 }
734 
735 /* Similar, but for the special case of subtraction without borrow, we
736    can use the inputs directly.  This can allow other computation to be
737    deleted as unused.  */
738 
739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
740                              TCGv_i64 res, TCGv_i64 in1,
741                              TCGv_i64 in2, TCGv_i64 sv)
742 {
743     TCGCond tc;
744     bool ext_uns;
745 
746     switch (cf >> 1) {
747     case 1: /* = / <> */
748         tc = TCG_COND_EQ;
749         ext_uns = true;
750         break;
751     case 2: /* < / >= */
752         tc = TCG_COND_LT;
753         ext_uns = false;
754         break;
755     case 3: /* <= / > */
756         tc = TCG_COND_LE;
757         ext_uns = false;
758         break;
759     case 4: /* << / >>= */
760         tc = TCG_COND_LTU;
761         ext_uns = true;
762         break;
763     case 5: /* <<= / >> */
764         tc = TCG_COND_LEU;
765         ext_uns = true;
766         break;
767     default:
768         return do_cond(ctx, cf, d, res, NULL, sv);
769     }
770 
771     if (cf & 1) {
772         tc = tcg_invert_cond(tc);
773     }
774     if (cond_need_ext(ctx, d)) {
775         TCGv_i64 t1 = tcg_temp_new_i64();
776         TCGv_i64 t2 = tcg_temp_new_i64();
777 
778         if (ext_uns) {
779             tcg_gen_ext32u_i64(t1, in1);
780             tcg_gen_ext32u_i64(t2, in2);
781         } else {
782             tcg_gen_ext32s_i64(t1, in1);
783             tcg_gen_ext32s_i64(t2, in2);
784         }
785         return cond_make_tmp(tc, t1, t2);
786     }
787     return cond_make(tc, in1, in2);
788 }
789 
790 /*
791  * Similar, but for logicals, where the carry and overflow bits are not
792  * computed, and use of them is undefined.
793  *
794  * Undefined or not, hardware does not trap.  It seems reasonable to
795  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796  * how cases c={2,3} are treated.
797  */
798 
799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
800                              TCGv_i64 res)
801 {
802     TCGCond tc;
803     bool ext_uns;
804 
805     switch (cf) {
806     case 0:  /* never */
807     case 9:  /* undef, C */
808     case 11: /* undef, C & !Z */
809     case 12: /* undef, V */
810         return cond_make_f();
811 
812     case 1:  /* true */
813     case 8:  /* undef, !C */
814     case 10: /* undef, !C | Z */
815     case 13: /* undef, !V */
816         return cond_make_t();
817 
818     case 2:  /* == */
819         tc = TCG_COND_EQ;
820         ext_uns = true;
821         break;
822     case 3:  /* <> */
823         tc = TCG_COND_NE;
824         ext_uns = true;
825         break;
826     case 4:  /* < */
827         tc = TCG_COND_LT;
828         ext_uns = false;
829         break;
830     case 5:  /* >= */
831         tc = TCG_COND_GE;
832         ext_uns = false;
833         break;
834     case 6:  /* <= */
835         tc = TCG_COND_LE;
836         ext_uns = false;
837         break;
838     case 7:  /* > */
839         tc = TCG_COND_GT;
840         ext_uns = false;
841         break;
842 
843     case 14: /* OD */
844     case 15: /* EV */
845         return do_cond(ctx, cf, d, res, NULL, NULL);
846 
847     default:
848         g_assert_not_reached();
849     }
850 
851     if (cond_need_ext(ctx, d)) {
852         TCGv_i64 tmp = tcg_temp_new_i64();
853 
854         if (ext_uns) {
855             tcg_gen_ext32u_i64(tmp, res);
856         } else {
857             tcg_gen_ext32s_i64(tmp, res);
858         }
859         return cond_make_0_tmp(tc, tmp);
860     }
861     return cond_make_0(tc, res);
862 }
863 
864 /* Similar, but for shift/extract/deposit conditions.  */
865 
866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
867                              TCGv_i64 res)
868 {
869     unsigned c, f;
870 
871     /* Convert the compressed condition codes to standard.
872        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873        4-7 are the reverse of 0-3.  */
874     c = orig & 3;
875     if (c == 3) {
876         c = 7;
877     }
878     f = (orig & 4) / 4;
879 
880     return do_log_cond(ctx, c * 2 + f, d, res);
881 }
882 
883 /* Similar, but for unit conditions.  */
884 
885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
886                               TCGv_i64 in1, TCGv_i64 in2)
887 {
888     DisasCond cond;
889     TCGv_i64 tmp, cb = NULL;
890     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
891 
892     if (cf & 8) {
893         /* Since we want to test lots of carry-out bits all at once, do not
894          * do our normal thing and compute carry-in of bit B+1 since that
895          * leaves us with carry bits spread across two words.
896          */
897         cb = tcg_temp_new_i64();
898         tmp = tcg_temp_new_i64();
899         tcg_gen_or_i64(cb, in1, in2);
900         tcg_gen_and_i64(tmp, in1, in2);
901         tcg_gen_andc_i64(cb, cb, res);
902         tcg_gen_or_i64(cb, cb, tmp);
903     }
904 
905     switch (cf >> 1) {
906     case 0: /* never / TR */
907     case 1: /* undefined */
908     case 5: /* undefined */
909         cond = cond_make_f();
910         break;
911 
912     case 2: /* SBZ / NBZ */
913         /* See hasless(v,1) from
914          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
915          */
916         tmp = tcg_temp_new_i64();
917         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
918         tcg_gen_andc_i64(tmp, tmp, res);
919         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
920         cond = cond_make_0(TCG_COND_NE, tmp);
921         break;
922 
923     case 3: /* SHZ / NHZ */
924         tmp = tcg_temp_new_i64();
925         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
926         tcg_gen_andc_i64(tmp, tmp, res);
927         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
928         cond = cond_make_0(TCG_COND_NE, tmp);
929         break;
930 
931     case 4: /* SDC / NDC */
932         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
933         cond = cond_make_0(TCG_COND_NE, cb);
934         break;
935 
936     case 6: /* SBC / NBC */
937         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
938         cond = cond_make_0(TCG_COND_NE, cb);
939         break;
940 
941     case 7: /* SHC / NHC */
942         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
943         cond = cond_make_0(TCG_COND_NE, cb);
944         break;
945 
946     default:
947         g_assert_not_reached();
948     }
949     if (cf & 1) {
950         cond.c = tcg_invert_cond(cond.c);
951     }
952 
953     return cond;
954 }
955 
956 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
957                           TCGv_i64 cb, TCGv_i64 cb_msb)
958 {
959     if (cond_need_ext(ctx, d)) {
960         TCGv_i64 t = tcg_temp_new_i64();
961         tcg_gen_extract_i64(t, cb, 32, 1);
962         return t;
963     }
964     return cb_msb;
965 }
966 
967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
968 {
969     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
970 }
971 
972 /* Compute signed overflow for addition.  */
973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
974                           TCGv_i64 in1, TCGv_i64 in2)
975 {
976     TCGv_i64 sv = tcg_temp_new_i64();
977     TCGv_i64 tmp = tcg_temp_new_i64();
978 
979     tcg_gen_xor_i64(sv, res, in1);
980     tcg_gen_xor_i64(tmp, in1, in2);
981     tcg_gen_andc_i64(sv, sv, tmp);
982 
983     return sv;
984 }
985 
986 /* Compute signed overflow for subtraction.  */
987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
988                           TCGv_i64 in1, TCGv_i64 in2)
989 {
990     TCGv_i64 sv = tcg_temp_new_i64();
991     TCGv_i64 tmp = tcg_temp_new_i64();
992 
993     tcg_gen_xor_i64(sv, res, in1);
994     tcg_gen_xor_i64(tmp, in1, in2);
995     tcg_gen_and_i64(sv, sv, tmp);
996 
997     return sv;
998 }
999 
1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1001                    TCGv_i64 in2, unsigned shift, bool is_l,
1002                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1003 {
1004     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1005     unsigned c = cf >> 1;
1006     DisasCond cond;
1007 
1008     dest = tcg_temp_new_i64();
1009     cb = NULL;
1010     cb_msb = NULL;
1011     cb_cond = NULL;
1012 
1013     if (shift) {
1014         tmp = tcg_temp_new_i64();
1015         tcg_gen_shli_i64(tmp, in1, shift);
1016         in1 = tmp;
1017     }
1018 
1019     if (!is_l || cond_need_cb(c)) {
1020         cb_msb = tcg_temp_new_i64();
1021         cb = tcg_temp_new_i64();
1022 
1023         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1024         if (is_c) {
1025             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1026                              get_psw_carry(ctx, d), ctx->zero);
1027         }
1028         tcg_gen_xor_i64(cb, in1, in2);
1029         tcg_gen_xor_i64(cb, cb, dest);
1030         if (cond_need_cb(c)) {
1031             cb_cond = get_carry(ctx, d, cb, cb_msb);
1032         }
1033     } else {
1034         tcg_gen_add_i64(dest, in1, in2);
1035         if (is_c) {
1036             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1037         }
1038     }
1039 
1040     /* Compute signed overflow if required.  */
1041     sv = NULL;
1042     if (is_tsv || cond_need_sv(c)) {
1043         sv = do_add_sv(ctx, dest, in1, in2);
1044         if (is_tsv) {
1045             /* ??? Need to include overflow from shift.  */
1046             gen_helper_tsv(tcg_env, sv);
1047         }
1048     }
1049 
1050     /* Emit any conditional trap before any writeback.  */
1051     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1052     if (is_tc) {
1053         tmp = tcg_temp_new_i64();
1054         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1055         gen_helper_tcond(tcg_env, tmp);
1056     }
1057 
1058     /* Write back the result.  */
1059     if (!is_l) {
1060         save_or_nullify(ctx, cpu_psw_cb, cb);
1061         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1062     }
1063     save_gpr(ctx, rt, dest);
1064 
1065     /* Install the new nullification.  */
1066     cond_free(&ctx->null_cond);
1067     ctx->null_cond = cond;
1068 }
1069 
1070 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1071                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1072 {
1073     TCGv_i64 tcg_r1, tcg_r2;
1074 
1075     if (a->cf) {
1076         nullify_over(ctx);
1077     }
1078     tcg_r1 = load_gpr(ctx, a->r1);
1079     tcg_r2 = load_gpr(ctx, a->r2);
1080     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1081            is_tsv, is_tc, is_c, a->cf, a->d);
1082     return nullify_end(ctx);
1083 }
1084 
1085 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1086                        bool is_tsv, bool is_tc)
1087 {
1088     TCGv_i64 tcg_im, tcg_r2;
1089 
1090     if (a->cf) {
1091         nullify_over(ctx);
1092     }
1093     tcg_im = tcg_constant_i64(a->i);
1094     tcg_r2 = load_gpr(ctx, a->r);
1095     /* All ADDI conditions are 32-bit. */
1096     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1097     return nullify_end(ctx);
1098 }
1099 
1100 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1101                    TCGv_i64 in2, bool is_tsv, bool is_b,
1102                    bool is_tc, unsigned cf, bool d)
1103 {
1104     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1105     unsigned c = cf >> 1;
1106     DisasCond cond;
1107 
1108     dest = tcg_temp_new_i64();
1109     cb = tcg_temp_new_i64();
1110     cb_msb = tcg_temp_new_i64();
1111 
1112     if (is_b) {
1113         /* DEST,C = IN1 + ~IN2 + C.  */
1114         tcg_gen_not_i64(cb, in2);
1115         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1116                          get_psw_carry(ctx, d), ctx->zero);
1117         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1118         tcg_gen_xor_i64(cb, cb, in1);
1119         tcg_gen_xor_i64(cb, cb, dest);
1120     } else {
1121         /*
1122          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1123          * operations by seeding the high word with 1 and subtracting.
1124          */
1125         TCGv_i64 one = tcg_constant_i64(1);
1126         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1127         tcg_gen_eqv_i64(cb, in1, in2);
1128         tcg_gen_xor_i64(cb, cb, dest);
1129     }
1130 
1131     /* Compute signed overflow if required.  */
1132     sv = NULL;
1133     if (is_tsv || cond_need_sv(c)) {
1134         sv = do_sub_sv(ctx, dest, in1, in2);
1135         if (is_tsv) {
1136             gen_helper_tsv(tcg_env, sv);
1137         }
1138     }
1139 
1140     /* Compute the condition.  We cannot use the special case for borrow.  */
1141     if (!is_b) {
1142         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1143     } else {
1144         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1145     }
1146 
1147     /* Emit any conditional trap before any writeback.  */
1148     if (is_tc) {
1149         tmp = tcg_temp_new_i64();
1150         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(tcg_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     save_or_nullify(ctx, cpu_psw_cb, cb);
1156     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157     save_gpr(ctx, rt, dest);
1158 
1159     /* Install the new nullification.  */
1160     cond_free(&ctx->null_cond);
1161     ctx->null_cond = cond;
1162 }
1163 
1164 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1165                        bool is_tsv, bool is_b, bool is_tc)
1166 {
1167     TCGv_i64 tcg_r1, tcg_r2;
1168 
1169     if (a->cf) {
1170         nullify_over(ctx);
1171     }
1172     tcg_r1 = load_gpr(ctx, a->r1);
1173     tcg_r2 = load_gpr(ctx, a->r2);
1174     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1175     return nullify_end(ctx);
1176 }
1177 
1178 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1179 {
1180     TCGv_i64 tcg_im, tcg_r2;
1181 
1182     if (a->cf) {
1183         nullify_over(ctx);
1184     }
1185     tcg_im = tcg_constant_i64(a->i);
1186     tcg_r2 = load_gpr(ctx, a->r);
1187     /* All SUBI conditions are 32-bit. */
1188     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1189     return nullify_end(ctx);
1190 }
1191 
1192 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1193                       TCGv_i64 in2, unsigned cf, bool d)
1194 {
1195     TCGv_i64 dest, sv;
1196     DisasCond cond;
1197 
1198     dest = tcg_temp_new_i64();
1199     tcg_gen_sub_i64(dest, in1, in2);
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (cond_need_sv(cf >> 1)) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205     }
1206 
1207     /* Form the condition for the compare.  */
1208     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1209 
1210     /* Clear.  */
1211     tcg_gen_movi_i64(dest, 0);
1212     save_gpr(ctx, rt, dest);
1213 
1214     /* Install the new nullification.  */
1215     cond_free(&ctx->null_cond);
1216     ctx->null_cond = cond;
1217 }
1218 
1219 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1220                    TCGv_i64 in2, unsigned cf, bool d,
1221                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1222 {
1223     TCGv_i64 dest = dest_gpr(ctx, rt);
1224 
1225     /* Perform the operation, and writeback.  */
1226     fn(dest, in1, in2);
1227     save_gpr(ctx, rt, dest);
1228 
1229     /* Install the new nullification.  */
1230     cond_free(&ctx->null_cond);
1231     if (cf) {
1232         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1233     }
1234 }
1235 
1236 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1238 {
1239     TCGv_i64 tcg_r1, tcg_r2;
1240 
1241     if (a->cf) {
1242         nullify_over(ctx);
1243     }
1244     tcg_r1 = load_gpr(ctx, a->r1);
1245     tcg_r2 = load_gpr(ctx, a->r2);
1246     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1247     return nullify_end(ctx);
1248 }
1249 
1250 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1251                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1252                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1253 {
1254     TCGv_i64 dest;
1255     DisasCond cond;
1256 
1257     if (cf == 0) {
1258         dest = dest_gpr(ctx, rt);
1259         fn(dest, in1, in2);
1260         save_gpr(ctx, rt, dest);
1261         cond_free(&ctx->null_cond);
1262     } else {
1263         dest = tcg_temp_new_i64();
1264         fn(dest, in1, in2);
1265 
1266         cond = do_unit_cond(cf, d, dest, in1, in2);
1267 
1268         if (is_tc) {
1269             TCGv_i64 tmp = tcg_temp_new_i64();
1270             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1271             gen_helper_tcond(tcg_env, tmp);
1272         }
1273         save_gpr(ctx, rt, dest);
1274 
1275         cond_free(&ctx->null_cond);
1276         ctx->null_cond = cond;
1277     }
1278 }
1279 
1280 #ifndef CONFIG_USER_ONLY
1281 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1282    from the top 2 bits of the base register.  There are a few system
1283    instructions that have a 3-bit space specifier, for which SR0 is
1284    not special.  To handle this, pass ~SP.  */
1285 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1286 {
1287     TCGv_ptr ptr;
1288     TCGv_i64 tmp;
1289     TCGv_i64 spc;
1290 
1291     if (sp != 0) {
1292         if (sp < 0) {
1293             sp = ~sp;
1294         }
1295         spc = tcg_temp_new_i64();
1296         load_spr(ctx, spc, sp);
1297         return spc;
1298     }
1299     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1300         return cpu_srH;
1301     }
1302 
1303     ptr = tcg_temp_new_ptr();
1304     tmp = tcg_temp_new_i64();
1305     spc = tcg_temp_new_i64();
1306 
1307     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1308     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1309     tcg_gen_andi_i64(tmp, tmp, 030);
1310     tcg_gen_trunc_i64_ptr(ptr, tmp);
1311 
1312     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1313     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1314 
1315     return spc;
1316 }
1317 #endif
1318 
1319 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1320                      unsigned rb, unsigned rx, int scale, int64_t disp,
1321                      unsigned sp, int modify, bool is_phys)
1322 {
1323     TCGv_i64 base = load_gpr(ctx, rb);
1324     TCGv_i64 ofs;
1325     TCGv_i64 addr;
1326 
1327     /* Note that RX is mutually exclusive with DISP.  */
1328     if (rx) {
1329         ofs = tcg_temp_new_i64();
1330         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1331         tcg_gen_add_i64(ofs, ofs, base);
1332     } else if (disp || modify) {
1333         ofs = tcg_temp_new_i64();
1334         tcg_gen_addi_i64(ofs, base, disp);
1335     } else {
1336         ofs = base;
1337     }
1338 
1339     *pofs = ofs;
1340     *pgva = addr = tcg_temp_new_i64();
1341     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1342 #ifndef CONFIG_USER_ONLY
1343     if (!is_phys) {
1344         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1345     }
1346 #endif
1347 }
1348 
1349 /* Emit a memory load.  The modify parameter should be
1350  * < 0 for pre-modify,
1351  * > 0 for post-modify,
1352  * = 0 for no base register update.
1353  */
1354 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1355                        unsigned rx, int scale, int64_t disp,
1356                        unsigned sp, int modify, MemOp mop)
1357 {
1358     TCGv_i64 ofs;
1359     TCGv_i64 addr;
1360 
1361     /* Caller uses nullify_over/nullify_end.  */
1362     assert(ctx->null_cond.c == TCG_COND_NEVER);
1363 
1364     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1365              ctx->mmu_idx == MMU_PHYS_IDX);
1366     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1367     if (modify) {
1368         save_gpr(ctx, rb, ofs);
1369     }
1370 }
1371 
1372 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1373                        unsigned rx, int scale, int64_t disp,
1374                        unsigned sp, int modify, MemOp mop)
1375 {
1376     TCGv_i64 ofs;
1377     TCGv_i64 addr;
1378 
1379     /* Caller uses nullify_over/nullify_end.  */
1380     assert(ctx->null_cond.c == TCG_COND_NEVER);
1381 
1382     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1383              ctx->mmu_idx == MMU_PHYS_IDX);
1384     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1385     if (modify) {
1386         save_gpr(ctx, rb, ofs);
1387     }
1388 }
1389 
1390 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1391                         unsigned rx, int scale, int64_t disp,
1392                         unsigned sp, int modify, MemOp mop)
1393 {
1394     TCGv_i64 ofs;
1395     TCGv_i64 addr;
1396 
1397     /* Caller uses nullify_over/nullify_end.  */
1398     assert(ctx->null_cond.c == TCG_COND_NEVER);
1399 
1400     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1401              ctx->mmu_idx == MMU_PHYS_IDX);
1402     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1403     if (modify) {
1404         save_gpr(ctx, rb, ofs);
1405     }
1406 }
1407 
1408 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1409                         unsigned rx, int scale, int64_t disp,
1410                         unsigned sp, int modify, MemOp mop)
1411 {
1412     TCGv_i64 ofs;
1413     TCGv_i64 addr;
1414 
1415     /* Caller uses nullify_over/nullify_end.  */
1416     assert(ctx->null_cond.c == TCG_COND_NEVER);
1417 
1418     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1419              ctx->mmu_idx == MMU_PHYS_IDX);
1420     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1421     if (modify) {
1422         save_gpr(ctx, rb, ofs);
1423     }
1424 }
1425 
1426 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1427                     unsigned rx, int scale, int64_t disp,
1428                     unsigned sp, int modify, MemOp mop)
1429 {
1430     TCGv_i64 dest;
1431 
1432     nullify_over(ctx);
1433 
1434     if (modify == 0) {
1435         /* No base register update.  */
1436         dest = dest_gpr(ctx, rt);
1437     } else {
1438         /* Make sure if RT == RB, we see the result of the load.  */
1439         dest = tcg_temp_new_i64();
1440     }
1441     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1442     save_gpr(ctx, rt, dest);
1443 
1444     return nullify_end(ctx);
1445 }
1446 
1447 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1448                       unsigned rx, int scale, int64_t disp,
1449                       unsigned sp, int modify)
1450 {
1451     TCGv_i32 tmp;
1452 
1453     nullify_over(ctx);
1454 
1455     tmp = tcg_temp_new_i32();
1456     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1457     save_frw_i32(rt, tmp);
1458 
1459     if (rt == 0) {
1460         gen_helper_loaded_fr0(tcg_env);
1461     }
1462 
1463     return nullify_end(ctx);
1464 }
1465 
1466 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1467 {
1468     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1469                      a->disp, a->sp, a->m);
1470 }
1471 
1472 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1473                       unsigned rx, int scale, int64_t disp,
1474                       unsigned sp, int modify)
1475 {
1476     TCGv_i64 tmp;
1477 
1478     nullify_over(ctx);
1479 
1480     tmp = tcg_temp_new_i64();
1481     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1482     save_frd(rt, tmp);
1483 
1484     if (rt == 0) {
1485         gen_helper_loaded_fr0(tcg_env);
1486     }
1487 
1488     return nullify_end(ctx);
1489 }
1490 
1491 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1492 {
1493     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1494                      a->disp, a->sp, a->m);
1495 }
1496 
1497 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1498                      int64_t disp, unsigned sp,
1499                      int modify, MemOp mop)
1500 {
1501     nullify_over(ctx);
1502     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1503     return nullify_end(ctx);
1504 }
1505 
1506 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1507                        unsigned rx, int scale, int64_t disp,
1508                        unsigned sp, int modify)
1509 {
1510     TCGv_i32 tmp;
1511 
1512     nullify_over(ctx);
1513 
1514     tmp = load_frw_i32(rt);
1515     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1516 
1517     return nullify_end(ctx);
1518 }
1519 
1520 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1521 {
1522     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1523                       a->disp, a->sp, a->m);
1524 }
1525 
1526 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1527                        unsigned rx, int scale, int64_t disp,
1528                        unsigned sp, int modify)
1529 {
1530     TCGv_i64 tmp;
1531 
1532     nullify_over(ctx);
1533 
1534     tmp = load_frd(rt);
1535     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1536 
1537     return nullify_end(ctx);
1538 }
1539 
1540 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1541 {
1542     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1543                       a->disp, a->sp, a->m);
1544 }
1545 
1546 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1548 {
1549     TCGv_i32 tmp;
1550 
1551     nullify_over(ctx);
1552     tmp = load_frw0_i32(ra);
1553 
1554     func(tmp, tcg_env, tmp);
1555 
1556     save_frw_i32(rt, tmp);
1557     return nullify_end(ctx);
1558 }
1559 
1560 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1561                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1562 {
1563     TCGv_i32 dst;
1564     TCGv_i64 src;
1565 
1566     nullify_over(ctx);
1567     src = load_frd(ra);
1568     dst = tcg_temp_new_i32();
1569 
1570     func(dst, tcg_env, src);
1571 
1572     save_frw_i32(rt, dst);
1573     return nullify_end(ctx);
1574 }
1575 
1576 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1577                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1578 {
1579     TCGv_i64 tmp;
1580 
1581     nullify_over(ctx);
1582     tmp = load_frd0(ra);
1583 
1584     func(tmp, tcg_env, tmp);
1585 
1586     save_frd(rt, tmp);
1587     return nullify_end(ctx);
1588 }
1589 
1590 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1591                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1592 {
1593     TCGv_i32 src;
1594     TCGv_i64 dst;
1595 
1596     nullify_over(ctx);
1597     src = load_frw0_i32(ra);
1598     dst = tcg_temp_new_i64();
1599 
1600     func(dst, tcg_env, src);
1601 
1602     save_frd(rt, dst);
1603     return nullify_end(ctx);
1604 }
1605 
1606 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1607                         unsigned ra, unsigned rb,
1608                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1609 {
1610     TCGv_i32 a, b;
1611 
1612     nullify_over(ctx);
1613     a = load_frw0_i32(ra);
1614     b = load_frw0_i32(rb);
1615 
1616     func(a, tcg_env, a, b);
1617 
1618     save_frw_i32(rt, a);
1619     return nullify_end(ctx);
1620 }
1621 
1622 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1623                         unsigned ra, unsigned rb,
1624                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1625 {
1626     TCGv_i64 a, b;
1627 
1628     nullify_over(ctx);
1629     a = load_frd0(ra);
1630     b = load_frd0(rb);
1631 
1632     func(a, tcg_env, a, b);
1633 
1634     save_frd(rt, a);
1635     return nullify_end(ctx);
1636 }
1637 
1638 /* Emit an unconditional branch to a direct target, which may or may not
1639    have already had nullification handled.  */
1640 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1641                        unsigned link, bool is_n)
1642 {
1643     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1644         if (link != 0) {
1645             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1646         }
1647         ctx->iaoq_n = dest;
1648         if (is_n) {
1649             ctx->null_cond.c = TCG_COND_ALWAYS;
1650         }
1651     } else {
1652         nullify_over(ctx);
1653 
1654         if (link != 0) {
1655             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1656         }
1657 
1658         if (is_n && use_nullify_skip(ctx)) {
1659             nullify_set(ctx, 0);
1660             gen_goto_tb(ctx, 0, dest, dest + 4);
1661         } else {
1662             nullify_set(ctx, is_n);
1663             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1664         }
1665 
1666         nullify_end(ctx);
1667 
1668         nullify_set(ctx, 0);
1669         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1670         ctx->base.is_jmp = DISAS_NORETURN;
1671     }
1672     return true;
1673 }
1674 
1675 /* Emit a conditional branch to a direct target.  If the branch itself
1676    is nullified, we should have already used nullify_over.  */
1677 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1678                        DisasCond *cond)
1679 {
1680     uint64_t dest = iaoq_dest(ctx, disp);
1681     TCGLabel *taken = NULL;
1682     TCGCond c = cond->c;
1683     bool n;
1684 
1685     assert(ctx->null_cond.c == TCG_COND_NEVER);
1686 
1687     /* Handle TRUE and NEVER as direct branches.  */
1688     if (c == TCG_COND_ALWAYS) {
1689         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1690     }
1691     if (c == TCG_COND_NEVER) {
1692         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1693     }
1694 
1695     taken = gen_new_label();
1696     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1697     cond_free(cond);
1698 
1699     /* Not taken: Condition not satisfied; nullify on backward branches. */
1700     n = is_n && disp < 0;
1701     if (n && use_nullify_skip(ctx)) {
1702         nullify_set(ctx, 0);
1703         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1704     } else {
1705         if (!n && ctx->null_lab) {
1706             gen_set_label(ctx->null_lab);
1707             ctx->null_lab = NULL;
1708         }
1709         nullify_set(ctx, n);
1710         if (ctx->iaoq_n == -1) {
1711             /* The temporary iaoq_n_var died at the branch above.
1712                Regenerate it here instead of saving it.  */
1713             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1714         }
1715         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1716     }
1717 
1718     gen_set_label(taken);
1719 
1720     /* Taken: Condition satisfied; nullify on forward branches.  */
1721     n = is_n && disp >= 0;
1722     if (n && use_nullify_skip(ctx)) {
1723         nullify_set(ctx, 0);
1724         gen_goto_tb(ctx, 1, dest, dest + 4);
1725     } else {
1726         nullify_set(ctx, n);
1727         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1728     }
1729 
1730     /* Not taken: the branch itself was nullified.  */
1731     if (ctx->null_lab) {
1732         gen_set_label(ctx->null_lab);
1733         ctx->null_lab = NULL;
1734         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1735     } else {
1736         ctx->base.is_jmp = DISAS_NORETURN;
1737     }
1738     return true;
1739 }
1740 
1741 /* Emit an unconditional branch to an indirect target.  This handles
1742    nullification of the branch itself.  */
1743 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1744                        unsigned link, bool is_n)
1745 {
1746     TCGv_i64 a0, a1, next, tmp;
1747     TCGCond c;
1748 
1749     assert(ctx->null_lab == NULL);
1750 
1751     if (ctx->null_cond.c == TCG_COND_NEVER) {
1752         if (link != 0) {
1753             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1754         }
1755         next = tcg_temp_new_i64();
1756         tcg_gen_mov_i64(next, dest);
1757         if (is_n) {
1758             if (use_nullify_skip(ctx)) {
1759                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1760                 tcg_gen_addi_i64(next, next, 4);
1761                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1762                 nullify_set(ctx, 0);
1763                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1764                 return true;
1765             }
1766             ctx->null_cond.c = TCG_COND_ALWAYS;
1767         }
1768         ctx->iaoq_n = -1;
1769         ctx->iaoq_n_var = next;
1770     } else if (is_n && use_nullify_skip(ctx)) {
1771         /* The (conditional) branch, B, nullifies the next insn, N,
1772            and we're allowed to skip execution N (no single-step or
1773            tracepoint in effect).  Since the goto_ptr that we must use
1774            for the indirect branch consumes no special resources, we
1775            can (conditionally) skip B and continue execution.  */
1776         /* The use_nullify_skip test implies we have a known control path.  */
1777         tcg_debug_assert(ctx->iaoq_b != -1);
1778         tcg_debug_assert(ctx->iaoq_n != -1);
1779 
1780         /* We do have to handle the non-local temporary, DEST, before
1781            branching.  Since IOAQ_F is not really live at this point, we
1782            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1783         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1784         next = tcg_temp_new_i64();
1785         tcg_gen_addi_i64(next, dest, 4);
1786         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1787 
1788         nullify_over(ctx);
1789         if (link != 0) {
1790             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1791         }
1792         tcg_gen_lookup_and_goto_ptr();
1793         return nullify_end(ctx);
1794     } else {
1795         c = ctx->null_cond.c;
1796         a0 = ctx->null_cond.a0;
1797         a1 = ctx->null_cond.a1;
1798 
1799         tmp = tcg_temp_new_i64();
1800         next = tcg_temp_new_i64();
1801 
1802         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1803         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1804         ctx->iaoq_n = -1;
1805         ctx->iaoq_n_var = next;
1806 
1807         if (link != 0) {
1808             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1809         }
1810 
1811         if (is_n) {
1812             /* The branch nullifies the next insn, which means the state of N
1813                after the branch is the inverse of the state of N that applied
1814                to the branch.  */
1815             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1816             cond_free(&ctx->null_cond);
1817             ctx->null_cond = cond_make_n();
1818             ctx->psw_n_nonzero = true;
1819         } else {
1820             cond_free(&ctx->null_cond);
1821         }
1822     }
1823     return true;
1824 }
1825 
1826 /* Implement
1827  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1828  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1829  *    else
1830  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1831  * which keeps the privilege level from being increased.
1832  */
1833 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1834 {
1835     TCGv_i64 dest;
1836     switch (ctx->privilege) {
1837     case 0:
1838         /* Privilege 0 is maximum and is allowed to decrease.  */
1839         return offset;
1840     case 3:
1841         /* Privilege 3 is minimum and is never allowed to increase.  */
1842         dest = tcg_temp_new_i64();
1843         tcg_gen_ori_i64(dest, offset, 3);
1844         break;
1845     default:
1846         dest = tcg_temp_new_i64();
1847         tcg_gen_andi_i64(dest, offset, -4);
1848         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1849         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1850         break;
1851     }
1852     return dest;
1853 }
1854 
1855 #ifdef CONFIG_USER_ONLY
1856 /* On Linux, page zero is normally marked execute only + gateway.
1857    Therefore normal read or write is supposed to fail, but specific
1858    offsets have kernel code mapped to raise permissions to implement
1859    system calls.  Handling this via an explicit check here, rather
1860    in than the "be disp(sr2,r0)" instruction that probably sent us
1861    here, is the easiest way to handle the branch delay slot on the
1862    aforementioned BE.  */
1863 static void do_page_zero(DisasContext *ctx)
1864 {
1865     TCGv_i64 tmp;
1866 
1867     /* If by some means we get here with PSW[N]=1, that implies that
1868        the B,GATE instruction would be skipped, and we'd fault on the
1869        next insn within the privileged page.  */
1870     switch (ctx->null_cond.c) {
1871     case TCG_COND_NEVER:
1872         break;
1873     case TCG_COND_ALWAYS:
1874         tcg_gen_movi_i64(cpu_psw_n, 0);
1875         goto do_sigill;
1876     default:
1877         /* Since this is always the first (and only) insn within the
1878            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1879         g_assert_not_reached();
1880     }
1881 
1882     /* Check that we didn't arrive here via some means that allowed
1883        non-sequential instruction execution.  Normally the PSW[B] bit
1884        detects this by disallowing the B,GATE instruction to execute
1885        under such conditions.  */
1886     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1887         goto do_sigill;
1888     }
1889 
1890     switch (ctx->iaoq_f & -4) {
1891     case 0x00: /* Null pointer call */
1892         gen_excp_1(EXCP_IMP);
1893         ctx->base.is_jmp = DISAS_NORETURN;
1894         break;
1895 
1896     case 0xb0: /* LWS */
1897         gen_excp_1(EXCP_SYSCALL_LWS);
1898         ctx->base.is_jmp = DISAS_NORETURN;
1899         break;
1900 
1901     case 0xe0: /* SET_THREAD_POINTER */
1902         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1903         tmp = tcg_temp_new_i64();
1904         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1905         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1906         tcg_gen_addi_i64(tmp, tmp, 4);
1907         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1908         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1909         break;
1910 
1911     case 0x100: /* SYSCALL */
1912         gen_excp_1(EXCP_SYSCALL);
1913         ctx->base.is_jmp = DISAS_NORETURN;
1914         break;
1915 
1916     default:
1917     do_sigill:
1918         gen_excp_1(EXCP_ILL);
1919         ctx->base.is_jmp = DISAS_NORETURN;
1920         break;
1921     }
1922 }
1923 #endif
1924 
1925 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1926 {
1927     cond_free(&ctx->null_cond);
1928     return true;
1929 }
1930 
1931 static bool trans_break(DisasContext *ctx, arg_break *a)
1932 {
1933     return gen_excp_iir(ctx, EXCP_BREAK);
1934 }
1935 
1936 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1937 {
1938     /* No point in nullifying the memory barrier.  */
1939     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1940 
1941     cond_free(&ctx->null_cond);
1942     return true;
1943 }
1944 
1945 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1946 {
1947     unsigned rt = a->t;
1948     TCGv_i64 tmp = dest_gpr(ctx, rt);
1949     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1950     save_gpr(ctx, rt, tmp);
1951 
1952     cond_free(&ctx->null_cond);
1953     return true;
1954 }
1955 
1956 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1957 {
1958     unsigned rt = a->t;
1959     unsigned rs = a->sp;
1960     TCGv_i64 t0 = tcg_temp_new_i64();
1961 
1962     load_spr(ctx, t0, rs);
1963     tcg_gen_shri_i64(t0, t0, 32);
1964 
1965     save_gpr(ctx, rt, t0);
1966 
1967     cond_free(&ctx->null_cond);
1968     return true;
1969 }
1970 
1971 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1972 {
1973     unsigned rt = a->t;
1974     unsigned ctl = a->r;
1975     TCGv_i64 tmp;
1976 
1977     switch (ctl) {
1978     case CR_SAR:
1979         if (a->e == 0) {
1980             /* MFSAR without ,W masks low 5 bits.  */
1981             tmp = dest_gpr(ctx, rt);
1982             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1983             save_gpr(ctx, rt, tmp);
1984             goto done;
1985         }
1986         save_gpr(ctx, rt, cpu_sar);
1987         goto done;
1988     case CR_IT: /* Interval Timer */
1989         /* FIXME: Respect PSW_S bit.  */
1990         nullify_over(ctx);
1991         tmp = dest_gpr(ctx, rt);
1992         if (translator_io_start(&ctx->base)) {
1993             gen_helper_read_interval_timer(tmp);
1994             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1995         } else {
1996             gen_helper_read_interval_timer(tmp);
1997         }
1998         save_gpr(ctx, rt, tmp);
1999         return nullify_end(ctx);
2000     case 26:
2001     case 27:
2002         break;
2003     default:
2004         /* All other control registers are privileged.  */
2005         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2006         break;
2007     }
2008 
2009     tmp = tcg_temp_new_i64();
2010     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2011     save_gpr(ctx, rt, tmp);
2012 
2013  done:
2014     cond_free(&ctx->null_cond);
2015     return true;
2016 }
2017 
2018 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2019 {
2020     unsigned rr = a->r;
2021     unsigned rs = a->sp;
2022     TCGv_i64 tmp;
2023 
2024     if (rs >= 5) {
2025         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2026     }
2027     nullify_over(ctx);
2028 
2029     tmp = tcg_temp_new_i64();
2030     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2031 
2032     if (rs >= 4) {
2033         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2034         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2035     } else {
2036         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2037     }
2038 
2039     return nullify_end(ctx);
2040 }
2041 
2042 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2043 {
2044     unsigned ctl = a->t;
2045     TCGv_i64 reg;
2046     TCGv_i64 tmp;
2047 
2048     if (ctl == CR_SAR) {
2049         reg = load_gpr(ctx, a->r);
2050         tmp = tcg_temp_new_i64();
2051         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2052         save_or_nullify(ctx, cpu_sar, tmp);
2053 
2054         cond_free(&ctx->null_cond);
2055         return true;
2056     }
2057 
2058     /* All other control registers are privileged or read-only.  */
2059     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2060 
2061 #ifndef CONFIG_USER_ONLY
2062     nullify_over(ctx);
2063     reg = load_gpr(ctx, a->r);
2064 
2065     switch (ctl) {
2066     case CR_IT:
2067         gen_helper_write_interval_timer(tcg_env, reg);
2068         break;
2069     case CR_EIRR:
2070         gen_helper_write_eirr(tcg_env, reg);
2071         break;
2072     case CR_EIEM:
2073         gen_helper_write_eiem(tcg_env, reg);
2074         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2075         break;
2076 
2077     case CR_IIASQ:
2078     case CR_IIAOQ:
2079         /* FIXME: Respect PSW_Q bit */
2080         /* The write advances the queue and stores to the back element.  */
2081         tmp = tcg_temp_new_i64();
2082         tcg_gen_ld_i64(tmp, tcg_env,
2083                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2084         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2085         tcg_gen_st_i64(reg, tcg_env,
2086                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2087         break;
2088 
2089     case CR_PID1:
2090     case CR_PID2:
2091     case CR_PID3:
2092     case CR_PID4:
2093         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2094 #ifndef CONFIG_USER_ONLY
2095         gen_helper_change_prot_id(tcg_env);
2096 #endif
2097         break;
2098 
2099     default:
2100         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101         break;
2102     }
2103     return nullify_end(ctx);
2104 #endif
2105 }
2106 
2107 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2108 {
2109     TCGv_i64 tmp = tcg_temp_new_i64();
2110 
2111     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2112     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2113     save_or_nullify(ctx, cpu_sar, tmp);
2114 
2115     cond_free(&ctx->null_cond);
2116     return true;
2117 }
2118 
2119 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2120 {
2121     TCGv_i64 dest = dest_gpr(ctx, a->t);
2122 
2123 #ifdef CONFIG_USER_ONLY
2124     /* We don't implement space registers in user mode. */
2125     tcg_gen_movi_i64(dest, 0);
2126 #else
2127     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2128     tcg_gen_shri_i64(dest, dest, 32);
2129 #endif
2130     save_gpr(ctx, a->t, dest);
2131 
2132     cond_free(&ctx->null_cond);
2133     return true;
2134 }
2135 
2136 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2137 {
2138     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2139 #ifndef CONFIG_USER_ONLY
2140     TCGv_i64 tmp;
2141 
2142     nullify_over(ctx);
2143 
2144     tmp = tcg_temp_new_i64();
2145     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2146     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2147     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2148     save_gpr(ctx, a->t, tmp);
2149 
2150     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2151     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2152     return nullify_end(ctx);
2153 #endif
2154 }
2155 
2156 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2157 {
2158     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2159 #ifndef CONFIG_USER_ONLY
2160     TCGv_i64 tmp;
2161 
2162     nullify_over(ctx);
2163 
2164     tmp = tcg_temp_new_i64();
2165     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2166     tcg_gen_ori_i64(tmp, tmp, a->i);
2167     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2168     save_gpr(ctx, a->t, tmp);
2169 
2170     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2171     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2172     return nullify_end(ctx);
2173 #endif
2174 }
2175 
2176 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2177 {
2178     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2179 #ifndef CONFIG_USER_ONLY
2180     TCGv_i64 tmp, reg;
2181     nullify_over(ctx);
2182 
2183     reg = load_gpr(ctx, a->r);
2184     tmp = tcg_temp_new_i64();
2185     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2186 
2187     /* Exit the TB to recognize new interrupts.  */
2188     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2189     return nullify_end(ctx);
2190 #endif
2191 }
2192 
2193 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2194 {
2195     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2196 #ifndef CONFIG_USER_ONLY
2197     nullify_over(ctx);
2198 
2199     if (rfi_r) {
2200         gen_helper_rfi_r(tcg_env);
2201     } else {
2202         gen_helper_rfi(tcg_env);
2203     }
2204     /* Exit the TB to recognize new interrupts.  */
2205     tcg_gen_exit_tb(NULL, 0);
2206     ctx->base.is_jmp = DISAS_NORETURN;
2207 
2208     return nullify_end(ctx);
2209 #endif
2210 }
2211 
2212 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2213 {
2214     return do_rfi(ctx, false);
2215 }
2216 
2217 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2218 {
2219     return do_rfi(ctx, true);
2220 }
2221 
2222 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2223 {
2224     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2225 #ifndef CONFIG_USER_ONLY
2226     nullify_over(ctx);
2227     gen_helper_halt(tcg_env);
2228     ctx->base.is_jmp = DISAS_NORETURN;
2229     return nullify_end(ctx);
2230 #endif
2231 }
2232 
2233 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2234 {
2235     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237     nullify_over(ctx);
2238     gen_helper_reset(tcg_env);
2239     ctx->base.is_jmp = DISAS_NORETURN;
2240     return nullify_end(ctx);
2241 #endif
2242 }
2243 
2244 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2245 {
2246     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2247 #ifndef CONFIG_USER_ONLY
2248     nullify_over(ctx);
2249     gen_helper_getshadowregs(tcg_env);
2250     return nullify_end(ctx);
2251 #endif
2252 }
2253 
2254 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2255 {
2256     if (a->m) {
2257         TCGv_i64 dest = dest_gpr(ctx, a->b);
2258         TCGv_i64 src1 = load_gpr(ctx, a->b);
2259         TCGv_i64 src2 = load_gpr(ctx, a->x);
2260 
2261         /* The only thing we need to do is the base register modification.  */
2262         tcg_gen_add_i64(dest, src1, src2);
2263         save_gpr(ctx, a->b, dest);
2264     }
2265     cond_free(&ctx->null_cond);
2266     return true;
2267 }
2268 
2269 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2270 {
2271     TCGv_i64 dest, ofs;
2272     TCGv_i32 level, want;
2273     TCGv_i64 addr;
2274 
2275     nullify_over(ctx);
2276 
2277     dest = dest_gpr(ctx, a->t);
2278     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2279 
2280     if (a->imm) {
2281         level = tcg_constant_i32(a->ri);
2282     } else {
2283         level = tcg_temp_new_i32();
2284         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2285         tcg_gen_andi_i32(level, level, 3);
2286     }
2287     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2288 
2289     gen_helper_probe(dest, tcg_env, addr, level, want);
2290 
2291     save_gpr(ctx, a->t, dest);
2292     return nullify_end(ctx);
2293 }
2294 
2295 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2296 {
2297     if (ctx->is_pa20) {
2298         return false;
2299     }
2300     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302     TCGv_i64 addr;
2303     TCGv_i64 ofs, reg;
2304 
2305     nullify_over(ctx);
2306 
2307     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2308     reg = load_gpr(ctx, a->r);
2309     if (a->addr) {
2310         gen_helper_itlba_pa11(tcg_env, addr, reg);
2311     } else {
2312         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2313     }
2314 
2315     /* Exit TB for TLB change if mmu is enabled.  */
2316     if (ctx->tb_flags & PSW_C) {
2317         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2318     }
2319     return nullify_end(ctx);
2320 #endif
2321 }
2322 
2323 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2324 {
2325     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2326 #ifndef CONFIG_USER_ONLY
2327     TCGv_i64 addr;
2328     TCGv_i64 ofs;
2329 
2330     nullify_over(ctx);
2331 
2332     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2333 
2334     /*
2335      * Page align now, rather than later, so that we can add in the
2336      * page_size field from pa2.0 from the low 4 bits of GR[b].
2337      */
2338     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2339     if (ctx->is_pa20) {
2340         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2341     }
2342 
2343     if (local) {
2344         gen_helper_ptlb_l(tcg_env, addr);
2345     } else {
2346         gen_helper_ptlb(tcg_env, addr);
2347     }
2348 
2349     if (a->m) {
2350         save_gpr(ctx, a->b, ofs);
2351     }
2352 
2353     /* Exit TB for TLB change if mmu is enabled.  */
2354     if (ctx->tb_flags & PSW_C) {
2355         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2356     }
2357     return nullify_end(ctx);
2358 #endif
2359 }
2360 
2361 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2362 {
2363     return do_pxtlb(ctx, a, false);
2364 }
2365 
2366 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2367 {
2368     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2369 }
2370 
2371 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2372 {
2373     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2374 #ifndef CONFIG_USER_ONLY
2375     nullify_over(ctx);
2376 
2377     trans_nop_addrx(ctx, a);
2378     gen_helper_ptlbe(tcg_env);
2379 
2380     /* Exit TB for TLB change if mmu is enabled.  */
2381     if (ctx->tb_flags & PSW_C) {
2382         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2383     }
2384     return nullify_end(ctx);
2385 #endif
2386 }
2387 
2388 /*
2389  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2390  * See
2391  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2392  *     page 13-9 (195/206)
2393  */
2394 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2395 {
2396     if (ctx->is_pa20) {
2397         return false;
2398     }
2399     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2400 #ifndef CONFIG_USER_ONLY
2401     TCGv_i64 addr, atl, stl;
2402     TCGv_i64 reg;
2403 
2404     nullify_over(ctx);
2405 
2406     /*
2407      * FIXME:
2408      *  if (not (pcxl or pcxl2))
2409      *    return gen_illegal(ctx);
2410      */
2411 
2412     atl = tcg_temp_new_i64();
2413     stl = tcg_temp_new_i64();
2414     addr = tcg_temp_new_i64();
2415 
2416     tcg_gen_ld32u_i64(stl, tcg_env,
2417                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2418                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2419     tcg_gen_ld32u_i64(atl, tcg_env,
2420                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2421                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2422     tcg_gen_shli_i64(stl, stl, 32);
2423     tcg_gen_or_i64(addr, atl, stl);
2424 
2425     reg = load_gpr(ctx, a->r);
2426     if (a->addr) {
2427         gen_helper_itlba_pa11(tcg_env, addr, reg);
2428     } else {
2429         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2430     }
2431 
2432     /* Exit TB for TLB change if mmu is enabled.  */
2433     if (ctx->tb_flags & PSW_C) {
2434         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2435     }
2436     return nullify_end(ctx);
2437 #endif
2438 }
2439 
2440 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2441 {
2442     if (!ctx->is_pa20) {
2443         return false;
2444     }
2445     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2446 #ifndef CONFIG_USER_ONLY
2447     nullify_over(ctx);
2448     {
2449         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2450         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2451 
2452         if (a->data) {
2453             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2454         } else {
2455             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2456         }
2457     }
2458     /* Exit TB for TLB change if mmu is enabled.  */
2459     if (ctx->tb_flags & PSW_C) {
2460         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2461     }
2462     return nullify_end(ctx);
2463 #endif
2464 }
2465 
2466 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2467 {
2468     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2469 #ifndef CONFIG_USER_ONLY
2470     TCGv_i64 vaddr;
2471     TCGv_i64 ofs, paddr;
2472 
2473     nullify_over(ctx);
2474 
2475     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2476 
2477     paddr = tcg_temp_new_i64();
2478     gen_helper_lpa(paddr, tcg_env, vaddr);
2479 
2480     /* Note that physical address result overrides base modification.  */
2481     if (a->m) {
2482         save_gpr(ctx, a->b, ofs);
2483     }
2484     save_gpr(ctx, a->t, paddr);
2485 
2486     return nullify_end(ctx);
2487 #endif
2488 }
2489 
2490 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2491 {
2492     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2493 
2494     /* The Coherence Index is an implementation-defined function of the
2495        physical address.  Two addresses with the same CI have a coherent
2496        view of the cache.  Our implementation is to return 0 for all,
2497        since the entire address space is coherent.  */
2498     save_gpr(ctx, a->t, ctx->zero);
2499 
2500     cond_free(&ctx->null_cond);
2501     return true;
2502 }
2503 
2504 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2505 {
2506     return do_add_reg(ctx, a, false, false, false, false);
2507 }
2508 
2509 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2510 {
2511     return do_add_reg(ctx, a, true, false, false, false);
2512 }
2513 
2514 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2515 {
2516     return do_add_reg(ctx, a, false, true, false, false);
2517 }
2518 
2519 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2520 {
2521     return do_add_reg(ctx, a, false, false, false, true);
2522 }
2523 
2524 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2525 {
2526     return do_add_reg(ctx, a, false, true, false, true);
2527 }
2528 
2529 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2530 {
2531     return do_sub_reg(ctx, a, false, false, false);
2532 }
2533 
2534 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2535 {
2536     return do_sub_reg(ctx, a, true, false, false);
2537 }
2538 
2539 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2540 {
2541     return do_sub_reg(ctx, a, false, false, true);
2542 }
2543 
2544 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2545 {
2546     return do_sub_reg(ctx, a, true, false, true);
2547 }
2548 
2549 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2550 {
2551     return do_sub_reg(ctx, a, false, true, false);
2552 }
2553 
2554 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2555 {
2556     return do_sub_reg(ctx, a, true, true, false);
2557 }
2558 
2559 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2560 {
2561     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2562 }
2563 
2564 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2565 {
2566     return do_log_reg(ctx, a, tcg_gen_and_i64);
2567 }
2568 
2569 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2570 {
2571     if (a->cf == 0) {
2572         unsigned r2 = a->r2;
2573         unsigned r1 = a->r1;
2574         unsigned rt = a->t;
2575 
2576         if (rt == 0) { /* NOP */
2577             cond_free(&ctx->null_cond);
2578             return true;
2579         }
2580         if (r2 == 0) { /* COPY */
2581             if (r1 == 0) {
2582                 TCGv_i64 dest = dest_gpr(ctx, rt);
2583                 tcg_gen_movi_i64(dest, 0);
2584                 save_gpr(ctx, rt, dest);
2585             } else {
2586                 save_gpr(ctx, rt, cpu_gr[r1]);
2587             }
2588             cond_free(&ctx->null_cond);
2589             return true;
2590         }
2591 #ifndef CONFIG_USER_ONLY
2592         /* These are QEMU extensions and are nops in the real architecture:
2593          *
2594          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2595          * or %r31,%r31,%r31 -- death loop; offline cpu
2596          *                      currently implemented as idle.
2597          */
2598         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2599             /* No need to check for supervisor, as userland can only pause
2600                until the next timer interrupt.  */
2601             nullify_over(ctx);
2602 
2603             /* Advance the instruction queue.  */
2604             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2605             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2606             nullify_set(ctx, 0);
2607 
2608             /* Tell the qemu main loop to halt until this cpu has work.  */
2609             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2610                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2611             gen_excp_1(EXCP_HALTED);
2612             ctx->base.is_jmp = DISAS_NORETURN;
2613 
2614             return nullify_end(ctx);
2615         }
2616 #endif
2617     }
2618     return do_log_reg(ctx, a, tcg_gen_or_i64);
2619 }
2620 
2621 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2622 {
2623     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2624 }
2625 
2626 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2627 {
2628     TCGv_i64 tcg_r1, tcg_r2;
2629 
2630     if (a->cf) {
2631         nullify_over(ctx);
2632     }
2633     tcg_r1 = load_gpr(ctx, a->r1);
2634     tcg_r2 = load_gpr(ctx, a->r2);
2635     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2636     return nullify_end(ctx);
2637 }
2638 
2639 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2640 {
2641     TCGv_i64 tcg_r1, tcg_r2;
2642 
2643     if (a->cf) {
2644         nullify_over(ctx);
2645     }
2646     tcg_r1 = load_gpr(ctx, a->r1);
2647     tcg_r2 = load_gpr(ctx, a->r2);
2648     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2649     return nullify_end(ctx);
2650 }
2651 
2652 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2653 {
2654     TCGv_i64 tcg_r1, tcg_r2, tmp;
2655 
2656     if (a->cf) {
2657         nullify_over(ctx);
2658     }
2659     tcg_r1 = load_gpr(ctx, a->r1);
2660     tcg_r2 = load_gpr(ctx, a->r2);
2661     tmp = tcg_temp_new_i64();
2662     tcg_gen_not_i64(tmp, tcg_r2);
2663     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2664     return nullify_end(ctx);
2665 }
2666 
2667 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2668 {
2669     return do_uaddcm(ctx, a, false);
2670 }
2671 
2672 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2673 {
2674     return do_uaddcm(ctx, a, true);
2675 }
2676 
2677 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2678 {
2679     TCGv_i64 tmp;
2680 
2681     nullify_over(ctx);
2682 
2683     tmp = tcg_temp_new_i64();
2684     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2685     if (!is_i) {
2686         tcg_gen_not_i64(tmp, tmp);
2687     }
2688     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2689     tcg_gen_muli_i64(tmp, tmp, 6);
2690     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2691             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2692     return nullify_end(ctx);
2693 }
2694 
2695 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2696 {
2697     return do_dcor(ctx, a, false);
2698 }
2699 
2700 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2701 {
2702     return do_dcor(ctx, a, true);
2703 }
2704 
2705 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2706 {
2707     TCGv_i64 dest, add1, add2, addc, in1, in2;
2708     TCGv_i64 cout;
2709 
2710     nullify_over(ctx);
2711 
2712     in1 = load_gpr(ctx, a->r1);
2713     in2 = load_gpr(ctx, a->r2);
2714 
2715     add1 = tcg_temp_new_i64();
2716     add2 = tcg_temp_new_i64();
2717     addc = tcg_temp_new_i64();
2718     dest = tcg_temp_new_i64();
2719 
2720     /* Form R1 << 1 | PSW[CB]{8}.  */
2721     tcg_gen_add_i64(add1, in1, in1);
2722     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2723 
2724     /*
2725      * Add or subtract R2, depending on PSW[V].  Proper computation of
2726      * carry requires that we subtract via + ~R2 + 1, as described in
2727      * the manual.  By extracting and masking V, we can produce the
2728      * proper inputs to the addition without movcond.
2729      */
2730     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2731     tcg_gen_xor_i64(add2, in2, addc);
2732     tcg_gen_andi_i64(addc, addc, 1);
2733 
2734     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2735     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2736                      addc, ctx->zero);
2737 
2738     /* Write back the result register.  */
2739     save_gpr(ctx, a->t, dest);
2740 
2741     /* Write back PSW[CB].  */
2742     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2743     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2744 
2745     /* Write back PSW[V] for the division step.  */
2746     cout = get_psw_carry(ctx, false);
2747     tcg_gen_neg_i64(cpu_psw_v, cout);
2748     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2749 
2750     /* Install the new nullification.  */
2751     if (a->cf) {
2752         TCGv_i64 sv = NULL;
2753         if (cond_need_sv(a->cf >> 1)) {
2754             /* ??? The lshift is supposed to contribute to overflow.  */
2755             sv = do_add_sv(ctx, dest, add1, add2);
2756         }
2757         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2758     }
2759 
2760     return nullify_end(ctx);
2761 }
2762 
2763 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2764 {
2765     return do_add_imm(ctx, a, false, false);
2766 }
2767 
2768 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2769 {
2770     return do_add_imm(ctx, a, true, false);
2771 }
2772 
2773 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2774 {
2775     return do_add_imm(ctx, a, false, true);
2776 }
2777 
2778 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2779 {
2780     return do_add_imm(ctx, a, true, true);
2781 }
2782 
2783 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2784 {
2785     return do_sub_imm(ctx, a, false);
2786 }
2787 
2788 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2789 {
2790     return do_sub_imm(ctx, a, true);
2791 }
2792 
2793 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2794 {
2795     TCGv_i64 tcg_im, tcg_r2;
2796 
2797     if (a->cf) {
2798         nullify_over(ctx);
2799     }
2800 
2801     tcg_im = tcg_constant_i64(a->i);
2802     tcg_r2 = load_gpr(ctx, a->r);
2803     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2804 
2805     return nullify_end(ctx);
2806 }
2807 
2808 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2809                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2810 {
2811     TCGv_i64 r1, r2, dest;
2812 
2813     if (!ctx->is_pa20) {
2814         return false;
2815     }
2816 
2817     nullify_over(ctx);
2818 
2819     r1 = load_gpr(ctx, a->r1);
2820     r2 = load_gpr(ctx, a->r2);
2821     dest = dest_gpr(ctx, a->t);
2822 
2823     fn(dest, r1, r2);
2824     save_gpr(ctx, a->t, dest);
2825 
2826     return nullify_end(ctx);
2827 }
2828 
2829 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2830                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2831 {
2832     TCGv_i64 r, dest;
2833 
2834     if (!ctx->is_pa20) {
2835         return false;
2836     }
2837 
2838     nullify_over(ctx);
2839 
2840     r = load_gpr(ctx, a->r);
2841     dest = dest_gpr(ctx, a->t);
2842 
2843     fn(dest, r, a->i);
2844     save_gpr(ctx, a->t, dest);
2845 
2846     return nullify_end(ctx);
2847 }
2848 
2849 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2850                                 void (*fn)(TCGv_i64, TCGv_i64,
2851                                            TCGv_i64, TCGv_i32))
2852 {
2853     TCGv_i64 r1, r2, dest;
2854 
2855     if (!ctx->is_pa20) {
2856         return false;
2857     }
2858 
2859     nullify_over(ctx);
2860 
2861     r1 = load_gpr(ctx, a->r1);
2862     r2 = load_gpr(ctx, a->r2);
2863     dest = dest_gpr(ctx, a->t);
2864 
2865     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2866     save_gpr(ctx, a->t, dest);
2867 
2868     return nullify_end(ctx);
2869 }
2870 
2871 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2872 {
2873     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2874 }
2875 
2876 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2877 {
2878     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2879 }
2880 
2881 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2882 {
2883     return do_multimedia(ctx, a, gen_helper_hadd_us);
2884 }
2885 
2886 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2887 {
2888     return do_multimedia(ctx, a, gen_helper_havg);
2889 }
2890 
2891 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2892 {
2893     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2894 }
2895 
2896 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2897 {
2898     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2899 }
2900 
2901 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2902 {
2903     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2904 }
2905 
2906 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2907 {
2908     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2909 }
2910 
2911 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2912 {
2913     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2914 }
2915 
2916 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2917 {
2918     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2919 }
2920 
2921 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2922 {
2923     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2924 }
2925 
2926 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2927 {
2928     return do_multimedia(ctx, a, gen_helper_hsub_us);
2929 }
2930 
2931 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2932 {
2933     uint64_t mask = 0xffff0000ffff0000ull;
2934     TCGv_i64 tmp = tcg_temp_new_i64();
2935 
2936     tcg_gen_andi_i64(tmp, r2, mask);
2937     tcg_gen_andi_i64(dst, r1, mask);
2938     tcg_gen_shri_i64(tmp, tmp, 16);
2939     tcg_gen_or_i64(dst, dst, tmp);
2940 }
2941 
2942 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2943 {
2944     return do_multimedia(ctx, a, gen_mixh_l);
2945 }
2946 
2947 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2948 {
2949     uint64_t mask = 0x0000ffff0000ffffull;
2950     TCGv_i64 tmp = tcg_temp_new_i64();
2951 
2952     tcg_gen_andi_i64(tmp, r1, mask);
2953     tcg_gen_andi_i64(dst, r2, mask);
2954     tcg_gen_shli_i64(tmp, tmp, 16);
2955     tcg_gen_or_i64(dst, dst, tmp);
2956 }
2957 
2958 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2959 {
2960     return do_multimedia(ctx, a, gen_mixh_r);
2961 }
2962 
2963 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2964 {
2965     TCGv_i64 tmp = tcg_temp_new_i64();
2966 
2967     tcg_gen_shri_i64(tmp, r2, 32);
2968     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2969 }
2970 
2971 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2972 {
2973     return do_multimedia(ctx, a, gen_mixw_l);
2974 }
2975 
2976 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2977 {
2978     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2979 }
2980 
2981 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2982 {
2983     return do_multimedia(ctx, a, gen_mixw_r);
2984 }
2985 
2986 static bool trans_permh(DisasContext *ctx, arg_permh *a)
2987 {
2988     TCGv_i64 r, t0, t1, t2, t3;
2989 
2990     if (!ctx->is_pa20) {
2991         return false;
2992     }
2993 
2994     nullify_over(ctx);
2995 
2996     r = load_gpr(ctx, a->r1);
2997     t0 = tcg_temp_new_i64();
2998     t1 = tcg_temp_new_i64();
2999     t2 = tcg_temp_new_i64();
3000     t3 = tcg_temp_new_i64();
3001 
3002     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3003     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3004     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3005     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3006 
3007     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3008     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3009     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3010 
3011     save_gpr(ctx, a->t, t0);
3012     return nullify_end(ctx);
3013 }
3014 
3015 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3016 {
3017     if (ctx->is_pa20) {
3018        /*
3019         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3020         * Any base modification still occurs.
3021         */
3022         if (a->t == 0) {
3023             return trans_nop_addrx(ctx, a);
3024         }
3025     } else if (a->size > MO_32) {
3026         return gen_illegal(ctx);
3027     }
3028     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3029                    a->disp, a->sp, a->m, a->size | MO_TE);
3030 }
3031 
3032 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3033 {
3034     assert(a->x == 0 && a->scale == 0);
3035     if (!ctx->is_pa20 && a->size > MO_32) {
3036         return gen_illegal(ctx);
3037     }
3038     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3039 }
3040 
3041 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3042 {
3043     MemOp mop = MO_TE | MO_ALIGN | a->size;
3044     TCGv_i64 dest, ofs;
3045     TCGv_i64 addr;
3046 
3047     if (!ctx->is_pa20 && a->size > MO_32) {
3048         return gen_illegal(ctx);
3049     }
3050 
3051     nullify_over(ctx);
3052 
3053     if (a->m) {
3054         /* Base register modification.  Make sure if RT == RB,
3055            we see the result of the load.  */
3056         dest = tcg_temp_new_i64();
3057     } else {
3058         dest = dest_gpr(ctx, a->t);
3059     }
3060 
3061     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3062              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3063 
3064     /*
3065      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3066      * However actual hardware succeeds with aligned mod 4.
3067      * Detect this case and log a GUEST_ERROR.
3068      *
3069      * TODO: HPPA64 relaxes the over-alignment requirement
3070      * with the ,co completer.
3071      */
3072     gen_helper_ldc_check(addr);
3073 
3074     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3075 
3076     if (a->m) {
3077         save_gpr(ctx, a->b, ofs);
3078     }
3079     save_gpr(ctx, a->t, dest);
3080 
3081     return nullify_end(ctx);
3082 }
3083 
3084 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3085 {
3086     TCGv_i64 ofs, val;
3087     TCGv_i64 addr;
3088 
3089     nullify_over(ctx);
3090 
3091     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3092              ctx->mmu_idx == MMU_PHYS_IDX);
3093     val = load_gpr(ctx, a->r);
3094     if (a->a) {
3095         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3096             gen_helper_stby_e_parallel(tcg_env, addr, val);
3097         } else {
3098             gen_helper_stby_e(tcg_env, addr, val);
3099         }
3100     } else {
3101         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3102             gen_helper_stby_b_parallel(tcg_env, addr, val);
3103         } else {
3104             gen_helper_stby_b(tcg_env, addr, val);
3105         }
3106     }
3107     if (a->m) {
3108         tcg_gen_andi_i64(ofs, ofs, ~3);
3109         save_gpr(ctx, a->b, ofs);
3110     }
3111 
3112     return nullify_end(ctx);
3113 }
3114 
3115 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3116 {
3117     TCGv_i64 ofs, val;
3118     TCGv_i64 addr;
3119 
3120     if (!ctx->is_pa20) {
3121         return false;
3122     }
3123     nullify_over(ctx);
3124 
3125     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3126              ctx->mmu_idx == MMU_PHYS_IDX);
3127     val = load_gpr(ctx, a->r);
3128     if (a->a) {
3129         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3130             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3131         } else {
3132             gen_helper_stdby_e(tcg_env, addr, val);
3133         }
3134     } else {
3135         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3136             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3137         } else {
3138             gen_helper_stdby_b(tcg_env, addr, val);
3139         }
3140     }
3141     if (a->m) {
3142         tcg_gen_andi_i64(ofs, ofs, ~7);
3143         save_gpr(ctx, a->b, ofs);
3144     }
3145 
3146     return nullify_end(ctx);
3147 }
3148 
3149 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3150 {
3151     int hold_mmu_idx = ctx->mmu_idx;
3152 
3153     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3154     ctx->mmu_idx = MMU_PHYS_IDX;
3155     trans_ld(ctx, a);
3156     ctx->mmu_idx = hold_mmu_idx;
3157     return true;
3158 }
3159 
3160 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3161 {
3162     int hold_mmu_idx = ctx->mmu_idx;
3163 
3164     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3165     ctx->mmu_idx = MMU_PHYS_IDX;
3166     trans_st(ctx, a);
3167     ctx->mmu_idx = hold_mmu_idx;
3168     return true;
3169 }
3170 
3171 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3172 {
3173     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3174 
3175     tcg_gen_movi_i64(tcg_rt, a->i);
3176     save_gpr(ctx, a->t, tcg_rt);
3177     cond_free(&ctx->null_cond);
3178     return true;
3179 }
3180 
3181 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3182 {
3183     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3184     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3185 
3186     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3187     save_gpr(ctx, 1, tcg_r1);
3188     cond_free(&ctx->null_cond);
3189     return true;
3190 }
3191 
3192 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3193 {
3194     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3195 
3196     /* Special case rb == 0, for the LDI pseudo-op.
3197        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3198     if (a->b == 0) {
3199         tcg_gen_movi_i64(tcg_rt, a->i);
3200     } else {
3201         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3202     }
3203     save_gpr(ctx, a->t, tcg_rt);
3204     cond_free(&ctx->null_cond);
3205     return true;
3206 }
3207 
3208 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3209                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3210 {
3211     TCGv_i64 dest, in2, sv;
3212     DisasCond cond;
3213 
3214     in2 = load_gpr(ctx, r);
3215     dest = tcg_temp_new_i64();
3216 
3217     tcg_gen_sub_i64(dest, in1, in2);
3218 
3219     sv = NULL;
3220     if (cond_need_sv(c)) {
3221         sv = do_sub_sv(ctx, dest, in1, in2);
3222     }
3223 
3224     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3225     return do_cbranch(ctx, disp, n, &cond);
3226 }
3227 
3228 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3229 {
3230     if (!ctx->is_pa20 && a->d) {
3231         return false;
3232     }
3233     nullify_over(ctx);
3234     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3235                    a->c, a->f, a->d, a->n, a->disp);
3236 }
3237 
3238 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3239 {
3240     if (!ctx->is_pa20 && a->d) {
3241         return false;
3242     }
3243     nullify_over(ctx);
3244     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3245                    a->c, a->f, a->d, a->n, a->disp);
3246 }
3247 
3248 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3249                     unsigned c, unsigned f, unsigned n, int disp)
3250 {
3251     TCGv_i64 dest, in2, sv, cb_cond;
3252     DisasCond cond;
3253     bool d = false;
3254 
3255     /*
3256      * For hppa64, the ADDB conditions change with PSW.W,
3257      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3258      */
3259     if (ctx->tb_flags & PSW_W) {
3260         d = c >= 5;
3261         if (d) {
3262             c &= 3;
3263         }
3264     }
3265 
3266     in2 = load_gpr(ctx, r);
3267     dest = tcg_temp_new_i64();
3268     sv = NULL;
3269     cb_cond = NULL;
3270 
3271     if (cond_need_cb(c)) {
3272         TCGv_i64 cb = tcg_temp_new_i64();
3273         TCGv_i64 cb_msb = tcg_temp_new_i64();
3274 
3275         tcg_gen_movi_i64(cb_msb, 0);
3276         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3277         tcg_gen_xor_i64(cb, in1, in2);
3278         tcg_gen_xor_i64(cb, cb, dest);
3279         cb_cond = get_carry(ctx, d, cb, cb_msb);
3280     } else {
3281         tcg_gen_add_i64(dest, in1, in2);
3282     }
3283     if (cond_need_sv(c)) {
3284         sv = do_add_sv(ctx, dest, in1, in2);
3285     }
3286 
3287     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3288     save_gpr(ctx, r, dest);
3289     return do_cbranch(ctx, disp, n, &cond);
3290 }
3291 
3292 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3293 {
3294     nullify_over(ctx);
3295     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3296 }
3297 
3298 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3299 {
3300     nullify_over(ctx);
3301     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3302 }
3303 
3304 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3305 {
3306     TCGv_i64 tmp, tcg_r;
3307     DisasCond cond;
3308 
3309     nullify_over(ctx);
3310 
3311     tmp = tcg_temp_new_i64();
3312     tcg_r = load_gpr(ctx, a->r);
3313     if (cond_need_ext(ctx, a->d)) {
3314         /* Force shift into [32,63] */
3315         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3316         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3317     } else {
3318         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3319     }
3320 
3321     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3322     return do_cbranch(ctx, a->disp, a->n, &cond);
3323 }
3324 
3325 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3326 {
3327     TCGv_i64 tmp, tcg_r;
3328     DisasCond cond;
3329     int p;
3330 
3331     nullify_over(ctx);
3332 
3333     tmp = tcg_temp_new_i64();
3334     tcg_r = load_gpr(ctx, a->r);
3335     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3336     tcg_gen_shli_i64(tmp, tcg_r, p);
3337 
3338     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3339     return do_cbranch(ctx, a->disp, a->n, &cond);
3340 }
3341 
3342 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3343 {
3344     TCGv_i64 dest;
3345     DisasCond cond;
3346 
3347     nullify_over(ctx);
3348 
3349     dest = dest_gpr(ctx, a->r2);
3350     if (a->r1 == 0) {
3351         tcg_gen_movi_i64(dest, 0);
3352     } else {
3353         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3354     }
3355 
3356     /* All MOVB conditions are 32-bit. */
3357     cond = do_sed_cond(ctx, a->c, false, dest);
3358     return do_cbranch(ctx, a->disp, a->n, &cond);
3359 }
3360 
3361 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3362 {
3363     TCGv_i64 dest;
3364     DisasCond cond;
3365 
3366     nullify_over(ctx);
3367 
3368     dest = dest_gpr(ctx, a->r);
3369     tcg_gen_movi_i64(dest, a->i);
3370 
3371     /* All MOVBI conditions are 32-bit. */
3372     cond = do_sed_cond(ctx, a->c, false, dest);
3373     return do_cbranch(ctx, a->disp, a->n, &cond);
3374 }
3375 
3376 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3377 {
3378     TCGv_i64 dest, src2;
3379 
3380     if (!ctx->is_pa20 && a->d) {
3381         return false;
3382     }
3383     if (a->c) {
3384         nullify_over(ctx);
3385     }
3386 
3387     dest = dest_gpr(ctx, a->t);
3388     src2 = load_gpr(ctx, a->r2);
3389     if (a->r1 == 0) {
3390         if (a->d) {
3391             tcg_gen_shr_i64(dest, src2, cpu_sar);
3392         } else {
3393             TCGv_i64 tmp = tcg_temp_new_i64();
3394 
3395             tcg_gen_ext32u_i64(dest, src2);
3396             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3397             tcg_gen_shr_i64(dest, dest, tmp);
3398         }
3399     } else if (a->r1 == a->r2) {
3400         if (a->d) {
3401             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3402         } else {
3403             TCGv_i32 t32 = tcg_temp_new_i32();
3404             TCGv_i32 s32 = tcg_temp_new_i32();
3405 
3406             tcg_gen_extrl_i64_i32(t32, src2);
3407             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3408             tcg_gen_andi_i32(s32, s32, 31);
3409             tcg_gen_rotr_i32(t32, t32, s32);
3410             tcg_gen_extu_i32_i64(dest, t32);
3411         }
3412     } else {
3413         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3414 
3415         if (a->d) {
3416             TCGv_i64 t = tcg_temp_new_i64();
3417             TCGv_i64 n = tcg_temp_new_i64();
3418 
3419             tcg_gen_xori_i64(n, cpu_sar, 63);
3420             tcg_gen_shl_i64(t, src2, n);
3421             tcg_gen_shli_i64(t, t, 1);
3422             tcg_gen_shr_i64(dest, src1, cpu_sar);
3423             tcg_gen_or_i64(dest, dest, t);
3424         } else {
3425             TCGv_i64 t = tcg_temp_new_i64();
3426             TCGv_i64 s = tcg_temp_new_i64();
3427 
3428             tcg_gen_concat32_i64(t, src2, src1);
3429             tcg_gen_andi_i64(s, cpu_sar, 31);
3430             tcg_gen_shr_i64(dest, t, s);
3431         }
3432     }
3433     save_gpr(ctx, a->t, dest);
3434 
3435     /* Install the new nullification.  */
3436     cond_free(&ctx->null_cond);
3437     if (a->c) {
3438         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3439     }
3440     return nullify_end(ctx);
3441 }
3442 
3443 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3444 {
3445     unsigned width, sa;
3446     TCGv_i64 dest, t2;
3447 
3448     if (!ctx->is_pa20 && a->d) {
3449         return false;
3450     }
3451     if (a->c) {
3452         nullify_over(ctx);
3453     }
3454 
3455     width = a->d ? 64 : 32;
3456     sa = width - 1 - a->cpos;
3457 
3458     dest = dest_gpr(ctx, a->t);
3459     t2 = load_gpr(ctx, a->r2);
3460     if (a->r1 == 0) {
3461         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3462     } else if (width == TARGET_LONG_BITS) {
3463         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3464     } else {
3465         assert(!a->d);
3466         if (a->r1 == a->r2) {
3467             TCGv_i32 t32 = tcg_temp_new_i32();
3468             tcg_gen_extrl_i64_i32(t32, t2);
3469             tcg_gen_rotri_i32(t32, t32, sa);
3470             tcg_gen_extu_i32_i64(dest, t32);
3471         } else {
3472             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3473             tcg_gen_extract_i64(dest, dest, sa, 32);
3474         }
3475     }
3476     save_gpr(ctx, a->t, dest);
3477 
3478     /* Install the new nullification.  */
3479     cond_free(&ctx->null_cond);
3480     if (a->c) {
3481         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3482     }
3483     return nullify_end(ctx);
3484 }
3485 
3486 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3487 {
3488     unsigned widthm1 = a->d ? 63 : 31;
3489     TCGv_i64 dest, src, tmp;
3490 
3491     if (!ctx->is_pa20 && a->d) {
3492         return false;
3493     }
3494     if (a->c) {
3495         nullify_over(ctx);
3496     }
3497 
3498     dest = dest_gpr(ctx, a->t);
3499     src = load_gpr(ctx, a->r);
3500     tmp = tcg_temp_new_i64();
3501 
3502     /* Recall that SAR is using big-endian bit numbering.  */
3503     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3504     tcg_gen_xori_i64(tmp, tmp, widthm1);
3505 
3506     if (a->se) {
3507         if (!a->d) {
3508             tcg_gen_ext32s_i64(dest, src);
3509             src = dest;
3510         }
3511         tcg_gen_sar_i64(dest, src, tmp);
3512         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3513     } else {
3514         if (!a->d) {
3515             tcg_gen_ext32u_i64(dest, src);
3516             src = dest;
3517         }
3518         tcg_gen_shr_i64(dest, src, tmp);
3519         tcg_gen_extract_i64(dest, dest, 0, a->len);
3520     }
3521     save_gpr(ctx, a->t, dest);
3522 
3523     /* Install the new nullification.  */
3524     cond_free(&ctx->null_cond);
3525     if (a->c) {
3526         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3527     }
3528     return nullify_end(ctx);
3529 }
3530 
3531 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3532 {
3533     unsigned len, cpos, width;
3534     TCGv_i64 dest, src;
3535 
3536     if (!ctx->is_pa20 && a->d) {
3537         return false;
3538     }
3539     if (a->c) {
3540         nullify_over(ctx);
3541     }
3542 
3543     len = a->len;
3544     width = a->d ? 64 : 32;
3545     cpos = width - 1 - a->pos;
3546     if (cpos + len > width) {
3547         len = width - cpos;
3548     }
3549 
3550     dest = dest_gpr(ctx, a->t);
3551     src = load_gpr(ctx, a->r);
3552     if (a->se) {
3553         tcg_gen_sextract_i64(dest, src, cpos, len);
3554     } else {
3555         tcg_gen_extract_i64(dest, src, cpos, len);
3556     }
3557     save_gpr(ctx, a->t, dest);
3558 
3559     /* Install the new nullification.  */
3560     cond_free(&ctx->null_cond);
3561     if (a->c) {
3562         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3563     }
3564     return nullify_end(ctx);
3565 }
3566 
3567 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3568 {
3569     unsigned len, width;
3570     uint64_t mask0, mask1;
3571     TCGv_i64 dest;
3572 
3573     if (!ctx->is_pa20 && a->d) {
3574         return false;
3575     }
3576     if (a->c) {
3577         nullify_over(ctx);
3578     }
3579 
3580     len = a->len;
3581     width = a->d ? 64 : 32;
3582     if (a->cpos + len > width) {
3583         len = width - a->cpos;
3584     }
3585 
3586     dest = dest_gpr(ctx, a->t);
3587     mask0 = deposit64(0, a->cpos, len, a->i);
3588     mask1 = deposit64(-1, a->cpos, len, a->i);
3589 
3590     if (a->nz) {
3591         TCGv_i64 src = load_gpr(ctx, a->t);
3592         tcg_gen_andi_i64(dest, src, mask1);
3593         tcg_gen_ori_i64(dest, dest, mask0);
3594     } else {
3595         tcg_gen_movi_i64(dest, mask0);
3596     }
3597     save_gpr(ctx, a->t, dest);
3598 
3599     /* Install the new nullification.  */
3600     cond_free(&ctx->null_cond);
3601     if (a->c) {
3602         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3603     }
3604     return nullify_end(ctx);
3605 }
3606 
3607 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3608 {
3609     unsigned rs = a->nz ? a->t : 0;
3610     unsigned len, width;
3611     TCGv_i64 dest, val;
3612 
3613     if (!ctx->is_pa20 && a->d) {
3614         return false;
3615     }
3616     if (a->c) {
3617         nullify_over(ctx);
3618     }
3619 
3620     len = a->len;
3621     width = a->d ? 64 : 32;
3622     if (a->cpos + len > width) {
3623         len = width - a->cpos;
3624     }
3625 
3626     dest = dest_gpr(ctx, a->t);
3627     val = load_gpr(ctx, a->r);
3628     if (rs == 0) {
3629         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3630     } else {
3631         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3632     }
3633     save_gpr(ctx, a->t, dest);
3634 
3635     /* Install the new nullification.  */
3636     cond_free(&ctx->null_cond);
3637     if (a->c) {
3638         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3639     }
3640     return nullify_end(ctx);
3641 }
3642 
3643 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3644                        bool d, bool nz, unsigned len, TCGv_i64 val)
3645 {
3646     unsigned rs = nz ? rt : 0;
3647     unsigned widthm1 = d ? 63 : 31;
3648     TCGv_i64 mask, tmp, shift, dest;
3649     uint64_t msb = 1ULL << (len - 1);
3650 
3651     dest = dest_gpr(ctx, rt);
3652     shift = tcg_temp_new_i64();
3653     tmp = tcg_temp_new_i64();
3654 
3655     /* Convert big-endian bit numbering in SAR to left-shift.  */
3656     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3657     tcg_gen_xori_i64(shift, shift, widthm1);
3658 
3659     mask = tcg_temp_new_i64();
3660     tcg_gen_movi_i64(mask, msb + (msb - 1));
3661     tcg_gen_and_i64(tmp, val, mask);
3662     if (rs) {
3663         tcg_gen_shl_i64(mask, mask, shift);
3664         tcg_gen_shl_i64(tmp, tmp, shift);
3665         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3666         tcg_gen_or_i64(dest, dest, tmp);
3667     } else {
3668         tcg_gen_shl_i64(dest, tmp, shift);
3669     }
3670     save_gpr(ctx, rt, dest);
3671 
3672     /* Install the new nullification.  */
3673     cond_free(&ctx->null_cond);
3674     if (c) {
3675         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3676     }
3677     return nullify_end(ctx);
3678 }
3679 
3680 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3681 {
3682     if (!ctx->is_pa20 && a->d) {
3683         return false;
3684     }
3685     if (a->c) {
3686         nullify_over(ctx);
3687     }
3688     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3689                       load_gpr(ctx, a->r));
3690 }
3691 
3692 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3693 {
3694     if (!ctx->is_pa20 && a->d) {
3695         return false;
3696     }
3697     if (a->c) {
3698         nullify_over(ctx);
3699     }
3700     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3701                       tcg_constant_i64(a->i));
3702 }
3703 
3704 static bool trans_be(DisasContext *ctx, arg_be *a)
3705 {
3706     TCGv_i64 tmp;
3707 
3708 #ifdef CONFIG_USER_ONLY
3709     /* ??? It seems like there should be a good way of using
3710        "be disp(sr2, r0)", the canonical gateway entry mechanism
3711        to our advantage.  But that appears to be inconvenient to
3712        manage along side branch delay slots.  Therefore we handle
3713        entry into the gateway page via absolute address.  */
3714     /* Since we don't implement spaces, just branch.  Do notice the special
3715        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3716        goto_tb to the TB containing the syscall.  */
3717     if (a->b == 0) {
3718         return do_dbranch(ctx, a->disp, a->l, a->n);
3719     }
3720 #else
3721     nullify_over(ctx);
3722 #endif
3723 
3724     tmp = tcg_temp_new_i64();
3725     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3726     tmp = do_ibranch_priv(ctx, tmp);
3727 
3728 #ifdef CONFIG_USER_ONLY
3729     return do_ibranch(ctx, tmp, a->l, a->n);
3730 #else
3731     TCGv_i64 new_spc = tcg_temp_new_i64();
3732 
3733     load_spr(ctx, new_spc, a->sp);
3734     if (a->l) {
3735         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3736         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3737     }
3738     if (a->n && use_nullify_skip(ctx)) {
3739         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3740         tcg_gen_addi_i64(tmp, tmp, 4);
3741         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3742         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3743         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3744     } else {
3745         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3746         if (ctx->iaoq_b == -1) {
3747             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3748         }
3749         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3750         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3751         nullify_set(ctx, a->n);
3752     }
3753     tcg_gen_lookup_and_goto_ptr();
3754     ctx->base.is_jmp = DISAS_NORETURN;
3755     return nullify_end(ctx);
3756 #endif
3757 }
3758 
3759 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3760 {
3761     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3762 }
3763 
3764 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3765 {
3766     uint64_t dest = iaoq_dest(ctx, a->disp);
3767 
3768     nullify_over(ctx);
3769 
3770     /* Make sure the caller hasn't done something weird with the queue.
3771      * ??? This is not quite the same as the PSW[B] bit, which would be
3772      * expensive to track.  Real hardware will trap for
3773      *    b  gateway
3774      *    b  gateway+4  (in delay slot of first branch)
3775      * However, checking for a non-sequential instruction queue *will*
3776      * diagnose the security hole
3777      *    b  gateway
3778      *    b  evil
3779      * in which instructions at evil would run with increased privs.
3780      */
3781     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3782         return gen_illegal(ctx);
3783     }
3784 
3785 #ifndef CONFIG_USER_ONLY
3786     if (ctx->tb_flags & PSW_C) {
3787         CPUHPPAState *env = cpu_env(ctx->cs);
3788         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3789         /* If we could not find a TLB entry, then we need to generate an
3790            ITLB miss exception so the kernel will provide it.
3791            The resulting TLB fill operation will invalidate this TB and
3792            we will re-translate, at which point we *will* be able to find
3793            the TLB entry and determine if this is in fact a gateway page.  */
3794         if (type < 0) {
3795             gen_excp(ctx, EXCP_ITLB_MISS);
3796             return true;
3797         }
3798         /* No change for non-gateway pages or for priv decrease.  */
3799         if (type >= 4 && type - 4 < ctx->privilege) {
3800             dest = deposit32(dest, 0, 2, type - 4);
3801         }
3802     } else {
3803         dest &= -4;  /* priv = 0 */
3804     }
3805 #endif
3806 
3807     if (a->l) {
3808         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3809         if (ctx->privilege < 3) {
3810             tcg_gen_andi_i64(tmp, tmp, -4);
3811         }
3812         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3813         save_gpr(ctx, a->l, tmp);
3814     }
3815 
3816     return do_dbranch(ctx, dest, 0, a->n);
3817 }
3818 
3819 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3820 {
3821     if (a->x) {
3822         TCGv_i64 tmp = tcg_temp_new_i64();
3823         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3824         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3825         /* The computation here never changes privilege level.  */
3826         return do_ibranch(ctx, tmp, a->l, a->n);
3827     } else {
3828         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3829         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3830     }
3831 }
3832 
3833 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3834 {
3835     TCGv_i64 dest;
3836 
3837     if (a->x == 0) {
3838         dest = load_gpr(ctx, a->b);
3839     } else {
3840         dest = tcg_temp_new_i64();
3841         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3842         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3843     }
3844     dest = do_ibranch_priv(ctx, dest);
3845     return do_ibranch(ctx, dest, 0, a->n);
3846 }
3847 
3848 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3849 {
3850     TCGv_i64 dest;
3851 
3852 #ifdef CONFIG_USER_ONLY
3853     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3854     return do_ibranch(ctx, dest, a->l, a->n);
3855 #else
3856     nullify_over(ctx);
3857     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3858 
3859     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3860     if (ctx->iaoq_b == -1) {
3861         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3862     }
3863     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3864     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3865     if (a->l) {
3866         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3867     }
3868     nullify_set(ctx, a->n);
3869     tcg_gen_lookup_and_goto_ptr();
3870     ctx->base.is_jmp = DISAS_NORETURN;
3871     return nullify_end(ctx);
3872 #endif
3873 }
3874 
3875 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3876 {
3877     /* All branch target stack instructions implement as nop. */
3878     return ctx->is_pa20;
3879 }
3880 
3881 /*
3882  * Float class 0
3883  */
3884 
3885 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3886 {
3887     tcg_gen_mov_i32(dst, src);
3888 }
3889 
3890 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3891 {
3892     uint64_t ret;
3893 
3894     if (ctx->is_pa20) {
3895         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3896     } else {
3897         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3898     }
3899 
3900     nullify_over(ctx);
3901     save_frd(0, tcg_constant_i64(ret));
3902     return nullify_end(ctx);
3903 }
3904 
3905 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3906 {
3907     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3908 }
3909 
3910 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3911 {
3912     tcg_gen_mov_i64(dst, src);
3913 }
3914 
3915 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3916 {
3917     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3918 }
3919 
3920 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3921 {
3922     tcg_gen_andi_i32(dst, src, INT32_MAX);
3923 }
3924 
3925 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3926 {
3927     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3928 }
3929 
3930 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3931 {
3932     tcg_gen_andi_i64(dst, src, INT64_MAX);
3933 }
3934 
3935 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3936 {
3937     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3938 }
3939 
3940 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3941 {
3942     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3943 }
3944 
3945 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3946 {
3947     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3948 }
3949 
3950 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3951 {
3952     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3953 }
3954 
3955 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3956 {
3957     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3958 }
3959 
3960 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3961 {
3962     tcg_gen_xori_i32(dst, src, INT32_MIN);
3963 }
3964 
3965 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3966 {
3967     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3968 }
3969 
3970 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3971 {
3972     tcg_gen_xori_i64(dst, src, INT64_MIN);
3973 }
3974 
3975 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3976 {
3977     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3978 }
3979 
3980 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3981 {
3982     tcg_gen_ori_i32(dst, src, INT32_MIN);
3983 }
3984 
3985 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3986 {
3987     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3988 }
3989 
3990 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3991 {
3992     tcg_gen_ori_i64(dst, src, INT64_MIN);
3993 }
3994 
3995 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3996 {
3997     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3998 }
3999 
4000 /*
4001  * Float class 1
4002  */
4003 
4004 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4005 {
4006     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4007 }
4008 
4009 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4010 {
4011     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4012 }
4013 
4014 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4015 {
4016     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4017 }
4018 
4019 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4020 {
4021     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4022 }
4023 
4024 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4025 {
4026     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4027 }
4028 
4029 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4030 {
4031     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4032 }
4033 
4034 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4035 {
4036     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4037 }
4038 
4039 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4040 {
4041     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4042 }
4043 
4044 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4045 {
4046     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4047 }
4048 
4049 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4050 {
4051     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4052 }
4053 
4054 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4055 {
4056     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4057 }
4058 
4059 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4060 {
4061     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4062 }
4063 
4064 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4065 {
4066     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4067 }
4068 
4069 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4070 {
4071     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4072 }
4073 
4074 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4075 {
4076     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4077 }
4078 
4079 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4080 {
4081     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4082 }
4083 
4084 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4085 {
4086     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4087 }
4088 
4089 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4090 {
4091     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4092 }
4093 
4094 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4095 {
4096     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4097 }
4098 
4099 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4100 {
4101     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4102 }
4103 
4104 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4105 {
4106     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4107 }
4108 
4109 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4110 {
4111     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4112 }
4113 
4114 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4115 {
4116     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4117 }
4118 
4119 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4120 {
4121     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4122 }
4123 
4124 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4125 {
4126     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4127 }
4128 
4129 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4130 {
4131     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4132 }
4133 
4134 /*
4135  * Float class 2
4136  */
4137 
4138 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4139 {
4140     TCGv_i32 ta, tb, tc, ty;
4141 
4142     nullify_over(ctx);
4143 
4144     ta = load_frw0_i32(a->r1);
4145     tb = load_frw0_i32(a->r2);
4146     ty = tcg_constant_i32(a->y);
4147     tc = tcg_constant_i32(a->c);
4148 
4149     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4150 
4151     return nullify_end(ctx);
4152 }
4153 
4154 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4155 {
4156     TCGv_i64 ta, tb;
4157     TCGv_i32 tc, ty;
4158 
4159     nullify_over(ctx);
4160 
4161     ta = load_frd0(a->r1);
4162     tb = load_frd0(a->r2);
4163     ty = tcg_constant_i32(a->y);
4164     tc = tcg_constant_i32(a->c);
4165 
4166     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4167 
4168     return nullify_end(ctx);
4169 }
4170 
4171 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4172 {
4173     TCGv_i64 t;
4174 
4175     nullify_over(ctx);
4176 
4177     t = tcg_temp_new_i64();
4178     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4179 
4180     if (a->y == 1) {
4181         int mask;
4182         bool inv = false;
4183 
4184         switch (a->c) {
4185         case 0: /* simple */
4186             tcg_gen_andi_i64(t, t, 0x4000000);
4187             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4188             goto done;
4189         case 2: /* rej */
4190             inv = true;
4191             /* fallthru */
4192         case 1: /* acc */
4193             mask = 0x43ff800;
4194             break;
4195         case 6: /* rej8 */
4196             inv = true;
4197             /* fallthru */
4198         case 5: /* acc8 */
4199             mask = 0x43f8000;
4200             break;
4201         case 9: /* acc6 */
4202             mask = 0x43e0000;
4203             break;
4204         case 13: /* acc4 */
4205             mask = 0x4380000;
4206             break;
4207         case 17: /* acc2 */
4208             mask = 0x4200000;
4209             break;
4210         default:
4211             gen_illegal(ctx);
4212             return true;
4213         }
4214         if (inv) {
4215             TCGv_i64 c = tcg_constant_i64(mask);
4216             tcg_gen_or_i64(t, t, c);
4217             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4218         } else {
4219             tcg_gen_andi_i64(t, t, mask);
4220             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4221         }
4222     } else {
4223         unsigned cbit = (a->y ^ 1) - 1;
4224 
4225         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4226         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4227     }
4228 
4229  done:
4230     return nullify_end(ctx);
4231 }
4232 
4233 /*
4234  * Float class 2
4235  */
4236 
4237 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4238 {
4239     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4240 }
4241 
4242 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4243 {
4244     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4245 }
4246 
4247 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4248 {
4249     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4250 }
4251 
4252 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4253 {
4254     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4255 }
4256 
4257 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4258 {
4259     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4260 }
4261 
4262 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4263 {
4264     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4265 }
4266 
4267 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4268 {
4269     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4270 }
4271 
4272 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4273 {
4274     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4275 }
4276 
4277 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4278 {
4279     TCGv_i64 x, y;
4280 
4281     nullify_over(ctx);
4282 
4283     x = load_frw0_i64(a->r1);
4284     y = load_frw0_i64(a->r2);
4285     tcg_gen_mul_i64(x, x, y);
4286     save_frd(a->t, x);
4287 
4288     return nullify_end(ctx);
4289 }
4290 
4291 /* Convert the fmpyadd single-precision register encodings to standard.  */
4292 static inline int fmpyadd_s_reg(unsigned r)
4293 {
4294     return (r & 16) * 2 + 16 + (r & 15);
4295 }
4296 
4297 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4298 {
4299     int tm = fmpyadd_s_reg(a->tm);
4300     int ra = fmpyadd_s_reg(a->ra);
4301     int ta = fmpyadd_s_reg(a->ta);
4302     int rm2 = fmpyadd_s_reg(a->rm2);
4303     int rm1 = fmpyadd_s_reg(a->rm1);
4304 
4305     nullify_over(ctx);
4306 
4307     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4308     do_fop_weww(ctx, ta, ta, ra,
4309                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4310 
4311     return nullify_end(ctx);
4312 }
4313 
4314 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4315 {
4316     return do_fmpyadd_s(ctx, a, false);
4317 }
4318 
4319 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4320 {
4321     return do_fmpyadd_s(ctx, a, true);
4322 }
4323 
4324 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4325 {
4326     nullify_over(ctx);
4327 
4328     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4329     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4330                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4331 
4332     return nullify_end(ctx);
4333 }
4334 
4335 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4336 {
4337     return do_fmpyadd_d(ctx, a, false);
4338 }
4339 
4340 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4341 {
4342     return do_fmpyadd_d(ctx, a, true);
4343 }
4344 
4345 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4346 {
4347     TCGv_i32 x, y, z;
4348 
4349     nullify_over(ctx);
4350     x = load_frw0_i32(a->rm1);
4351     y = load_frw0_i32(a->rm2);
4352     z = load_frw0_i32(a->ra3);
4353 
4354     if (a->neg) {
4355         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4356     } else {
4357         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4358     }
4359 
4360     save_frw_i32(a->t, x);
4361     return nullify_end(ctx);
4362 }
4363 
4364 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4365 {
4366     TCGv_i64 x, y, z;
4367 
4368     nullify_over(ctx);
4369     x = load_frd0(a->rm1);
4370     y = load_frd0(a->rm2);
4371     z = load_frd0(a->ra3);
4372 
4373     if (a->neg) {
4374         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4375     } else {
4376         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4377     }
4378 
4379     save_frd(a->t, x);
4380     return nullify_end(ctx);
4381 }
4382 
4383 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4384 {
4385     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4386 #ifndef CONFIG_USER_ONLY
4387     if (a->i == 0x100) {
4388         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4389         nullify_over(ctx);
4390         gen_helper_diag_btlb(tcg_env);
4391         return nullify_end(ctx);
4392     }
4393 #endif
4394     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4395     return true;
4396 }
4397 
4398 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4399 {
4400     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4401     int bound;
4402 
4403     ctx->cs = cs;
4404     ctx->tb_flags = ctx->base.tb->flags;
4405     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4406 
4407 #ifdef CONFIG_USER_ONLY
4408     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4409     ctx->mmu_idx = MMU_USER_IDX;
4410     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4411     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4412     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4413 #else
4414     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4415     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4416                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4417                     : MMU_PHYS_IDX);
4418 
4419     /* Recover the IAOQ values from the GVA + PRIV.  */
4420     uint64_t cs_base = ctx->base.tb->cs_base;
4421     uint64_t iasq_f = cs_base & ~0xffffffffull;
4422     int32_t diff = cs_base;
4423 
4424     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4425     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4426 #endif
4427     ctx->iaoq_n = -1;
4428     ctx->iaoq_n_var = NULL;
4429 
4430     ctx->zero = tcg_constant_i64(0);
4431 
4432     /* Bound the number of instructions by those left on the page.  */
4433     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4434     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4435 }
4436 
4437 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4438 {
4439     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4440 
4441     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4442     ctx->null_cond = cond_make_f();
4443     ctx->psw_n_nonzero = false;
4444     if (ctx->tb_flags & PSW_N) {
4445         ctx->null_cond.c = TCG_COND_ALWAYS;
4446         ctx->psw_n_nonzero = true;
4447     }
4448     ctx->null_lab = NULL;
4449 }
4450 
4451 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4452 {
4453     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4454 
4455     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4456 }
4457 
4458 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4459 {
4460     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4461     CPUHPPAState *env = cpu_env(cs);
4462     DisasJumpType ret;
4463 
4464     /* Execute one insn.  */
4465 #ifdef CONFIG_USER_ONLY
4466     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4467         do_page_zero(ctx);
4468         ret = ctx->base.is_jmp;
4469         assert(ret != DISAS_NEXT);
4470     } else
4471 #endif
4472     {
4473         /* Always fetch the insn, even if nullified, so that we check
4474            the page permissions for execute.  */
4475         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4476 
4477         /* Set up the IA queue for the next insn.
4478            This will be overwritten by a branch.  */
4479         if (ctx->iaoq_b == -1) {
4480             ctx->iaoq_n = -1;
4481             ctx->iaoq_n_var = tcg_temp_new_i64();
4482             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4483         } else {
4484             ctx->iaoq_n = ctx->iaoq_b + 4;
4485             ctx->iaoq_n_var = NULL;
4486         }
4487 
4488         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4489             ctx->null_cond.c = TCG_COND_NEVER;
4490             ret = DISAS_NEXT;
4491         } else {
4492             ctx->insn = insn;
4493             if (!decode(ctx, insn)) {
4494                 gen_illegal(ctx);
4495             }
4496             ret = ctx->base.is_jmp;
4497             assert(ctx->null_lab == NULL);
4498         }
4499     }
4500 
4501     /* Advance the insn queue.  Note that this check also detects
4502        a priority change within the instruction queue.  */
4503     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4504         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4505             && use_goto_tb(ctx, ctx->iaoq_b)
4506             && (ctx->null_cond.c == TCG_COND_NEVER
4507                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4508             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4509             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4510             ctx->base.is_jmp = ret = DISAS_NORETURN;
4511         } else {
4512             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4513         }
4514     }
4515     ctx->iaoq_f = ctx->iaoq_b;
4516     ctx->iaoq_b = ctx->iaoq_n;
4517     ctx->base.pc_next += 4;
4518 
4519     switch (ret) {
4520     case DISAS_NORETURN:
4521     case DISAS_IAQ_N_UPDATED:
4522         break;
4523 
4524     case DISAS_NEXT:
4525     case DISAS_IAQ_N_STALE:
4526     case DISAS_IAQ_N_STALE_EXIT:
4527         if (ctx->iaoq_f == -1) {
4528             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4529             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4530 #ifndef CONFIG_USER_ONLY
4531             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4532 #endif
4533             nullify_save(ctx);
4534             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4535                                 ? DISAS_EXIT
4536                                 : DISAS_IAQ_N_UPDATED);
4537         } else if (ctx->iaoq_b == -1) {
4538             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4539         }
4540         break;
4541 
4542     default:
4543         g_assert_not_reached();
4544     }
4545 }
4546 
4547 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4548 {
4549     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4550     DisasJumpType is_jmp = ctx->base.is_jmp;
4551 
4552     switch (is_jmp) {
4553     case DISAS_NORETURN:
4554         break;
4555     case DISAS_TOO_MANY:
4556     case DISAS_IAQ_N_STALE:
4557     case DISAS_IAQ_N_STALE_EXIT:
4558         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4559         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4560         nullify_save(ctx);
4561         /* FALLTHRU */
4562     case DISAS_IAQ_N_UPDATED:
4563         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4564             tcg_gen_lookup_and_goto_ptr();
4565             break;
4566         }
4567         /* FALLTHRU */
4568     case DISAS_EXIT:
4569         tcg_gen_exit_tb(NULL, 0);
4570         break;
4571     default:
4572         g_assert_not_reached();
4573     }
4574 }
4575 
4576 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4577                               CPUState *cs, FILE *logfile)
4578 {
4579     target_ulong pc = dcbase->pc_first;
4580 
4581 #ifdef CONFIG_USER_ONLY
4582     switch (pc) {
4583     case 0x00:
4584         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4585         return;
4586     case 0xb0:
4587         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4588         return;
4589     case 0xe0:
4590         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4591         return;
4592     case 0x100:
4593         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4594         return;
4595     }
4596 #endif
4597 
4598     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4599     target_disas(logfile, cs, pc, dcbase->tb->size);
4600 }
4601 
4602 static const TranslatorOps hppa_tr_ops = {
4603     .init_disas_context = hppa_tr_init_disas_context,
4604     .tb_start           = hppa_tr_tb_start,
4605     .insn_start         = hppa_tr_insn_start,
4606     .translate_insn     = hppa_tr_translate_insn,
4607     .tb_stop            = hppa_tr_tb_stop,
4608     .disas_log          = hppa_tr_disas_log,
4609 };
4610 
4611 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4612                            target_ulong pc, void *host_pc)
4613 {
4614     DisasContext ctx;
4615     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4616 }
4617