xref: /openbmc/qemu/target/hppa/translate.c (revision 6fd0c7bc)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 /* Since we have a distinction between register size and address size,
37    we need to redefine all of these.  */
38 
39 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
40 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
41 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
42 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
43 
44 
45 typedef struct DisasCond {
46     TCGCond c;
47     TCGv_i64 a0, a1;
48 } DisasCond;
49 
50 typedef struct DisasContext {
51     DisasContextBase base;
52     CPUState *cs;
53 
54     uint64_t iaoq_f;
55     uint64_t iaoq_b;
56     uint64_t iaoq_n;
57     TCGv_i64 iaoq_n_var;
58 
59     DisasCond null_cond;
60     TCGLabel *null_lab;
61 
62     uint32_t insn;
63     uint32_t tb_flags;
64     int mmu_idx;
65     int privilege;
66     bool psw_n_nonzero;
67     bool is_pa20;
68 
69 #ifdef CONFIG_USER_ONLY
70     MemOp unalign;
71 #endif
72 } DisasContext;
73 
74 #ifdef CONFIG_USER_ONLY
75 #define UNALIGN(C)  (C)->unalign
76 #else
77 #define UNALIGN(C)  MO_ALIGN
78 #endif
79 
80 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
81 static int expand_sm_imm(DisasContext *ctx, int val)
82 {
83     if (val & PSW_SM_E) {
84         val = (val & ~PSW_SM_E) | PSW_E;
85     }
86     if (val & PSW_SM_W) {
87         val = (val & ~PSW_SM_W) | PSW_W;
88     }
89     return val;
90 }
91 
92 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
93 static int expand_sr3x(DisasContext *ctx, int val)
94 {
95     return ~val;
96 }
97 
98 /* Convert the M:A bits within a memory insn to the tri-state value
99    we use for the final M.  */
100 static int ma_to_m(DisasContext *ctx, int val)
101 {
102     return val & 2 ? (val & 1 ? -1 : 1) : 0;
103 }
104 
105 /* Convert the sign of the displacement to a pre or post-modify.  */
106 static int pos_to_m(DisasContext *ctx, int val)
107 {
108     return val ? 1 : -1;
109 }
110 
111 static int neg_to_m(DisasContext *ctx, int val)
112 {
113     return val ? -1 : 1;
114 }
115 
116 /* Used for branch targets and fp memory ops.  */
117 static int expand_shl2(DisasContext *ctx, int val)
118 {
119     return val << 2;
120 }
121 
122 /* Used for fp memory ops.  */
123 static int expand_shl3(DisasContext *ctx, int val)
124 {
125     return val << 3;
126 }
127 
128 /* Used for assemble_21.  */
129 static int expand_shl11(DisasContext *ctx, int val)
130 {
131     return val << 11;
132 }
133 
134 static int assemble_6(DisasContext *ctx, int val)
135 {
136     /*
137      * Officially, 32 * x + 32 - y.
138      * Here, x is already in bit 5, and y is [4:0].
139      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
140      * with the overflow from bit 4 summing with x.
141      */
142     return (val ^ 31) + 1;
143 }
144 
145 /* Translate CMPI doubleword conditions to standard. */
146 static int cmpbid_c(DisasContext *ctx, int val)
147 {
148     return val ? val : 4; /* 0 == "*<<" */
149 }
150 
151 
152 /* Include the auto-generated decoder.  */
153 #include "decode-insns.c.inc"
154 
155 /* We are not using a goto_tb (for whatever reason), but have updated
156    the iaq (for whatever reason), so don't do it again on exit.  */
157 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
158 
159 /* We are exiting the TB, but have neither emitted a goto_tb, nor
160    updated the iaq for the next instruction to be executed.  */
161 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
162 
163 /* Similarly, but we want to return to the main loop immediately
164    to recognize unmasked interrupts.  */
165 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
166 #define DISAS_EXIT                  DISAS_TARGET_3
167 
168 /* global register indexes */
169 static TCGv_i64 cpu_gr[32];
170 static TCGv_i64 cpu_sr[4];
171 static TCGv_i64 cpu_srH;
172 static TCGv_i64 cpu_iaoq_f;
173 static TCGv_i64 cpu_iaoq_b;
174 static TCGv_i64 cpu_iasq_f;
175 static TCGv_i64 cpu_iasq_b;
176 static TCGv_i64 cpu_sar;
177 static TCGv_i64 cpu_psw_n;
178 static TCGv_i64 cpu_psw_v;
179 static TCGv_i64 cpu_psw_cb;
180 static TCGv_i64 cpu_psw_cb_msb;
181 
182 void hppa_translate_init(void)
183 {
184 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
185 
186     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
187     static const GlobalVar vars[] = {
188         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
189         DEF_VAR(psw_n),
190         DEF_VAR(psw_v),
191         DEF_VAR(psw_cb),
192         DEF_VAR(psw_cb_msb),
193         DEF_VAR(iaoq_f),
194         DEF_VAR(iaoq_b),
195     };
196 
197 #undef DEF_VAR
198 
199     /* Use the symbolic register names that match the disassembler.  */
200     static const char gr_names[32][4] = {
201         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
202         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
203         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
204         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
205     };
206     /* SR[4-7] are not global registers so that we can index them.  */
207     static const char sr_names[5][4] = {
208         "sr0", "sr1", "sr2", "sr3", "srH"
209     };
210 
211     int i;
212 
213     cpu_gr[0] = NULL;
214     for (i = 1; i < 32; i++) {
215         cpu_gr[i] = tcg_global_mem_new(tcg_env,
216                                        offsetof(CPUHPPAState, gr[i]),
217                                        gr_names[i]);
218     }
219     for (i = 0; i < 4; i++) {
220         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
221                                            offsetof(CPUHPPAState, sr[i]),
222                                            sr_names[i]);
223     }
224     cpu_srH = tcg_global_mem_new_i64(tcg_env,
225                                      offsetof(CPUHPPAState, sr[4]),
226                                      sr_names[4]);
227 
228     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
229         const GlobalVar *v = &vars[i];
230         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
231     }
232 
233     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
234                                         offsetof(CPUHPPAState, iasq_f),
235                                         "iasq_f");
236     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
237                                         offsetof(CPUHPPAState, iasq_b),
238                                         "iasq_b");
239 }
240 
241 static DisasCond cond_make_f(void)
242 {
243     return (DisasCond){
244         .c = TCG_COND_NEVER,
245         .a0 = NULL,
246         .a1 = NULL,
247     };
248 }
249 
250 static DisasCond cond_make_t(void)
251 {
252     return (DisasCond){
253         .c = TCG_COND_ALWAYS,
254         .a0 = NULL,
255         .a1 = NULL,
256     };
257 }
258 
259 static DisasCond cond_make_n(void)
260 {
261     return (DisasCond){
262         .c = TCG_COND_NE,
263         .a0 = cpu_psw_n,
264         .a1 = tcg_constant_i64(0)
265     };
266 }
267 
268 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
269 {
270     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
271     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
272 }
273 
274 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
275 {
276     return cond_make_tmp(c, a0, tcg_constant_i64(0));
277 }
278 
279 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
280 {
281     TCGv_i64 tmp = tcg_temp_new();
282     tcg_gen_mov_i64(tmp, a0);
283     return cond_make_0_tmp(c, tmp);
284 }
285 
286 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
287 {
288     TCGv_i64 t0 = tcg_temp_new();
289     TCGv_i64 t1 = tcg_temp_new();
290 
291     tcg_gen_mov_i64(t0, a0);
292     tcg_gen_mov_i64(t1, a1);
293     return cond_make_tmp(c, t0, t1);
294 }
295 
296 static void cond_free(DisasCond *cond)
297 {
298     switch (cond->c) {
299     default:
300         cond->a0 = NULL;
301         cond->a1 = NULL;
302         /* fallthru */
303     case TCG_COND_ALWAYS:
304         cond->c = TCG_COND_NEVER;
305         break;
306     case TCG_COND_NEVER:
307         break;
308     }
309 }
310 
311 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
312 {
313     if (reg == 0) {
314         TCGv_i64 t = tcg_temp_new();
315         tcg_gen_movi_i64(t, 0);
316         return t;
317     } else {
318         return cpu_gr[reg];
319     }
320 }
321 
322 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
323 {
324     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
325         return tcg_temp_new();
326     } else {
327         return cpu_gr[reg];
328     }
329 }
330 
331 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
332 {
333     if (ctx->null_cond.c != TCG_COND_NEVER) {
334         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
335                             ctx->null_cond.a1, dest, t);
336     } else {
337         tcg_gen_mov_i64(dest, t);
338     }
339 }
340 
341 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
342 {
343     if (reg != 0) {
344         save_or_nullify(ctx, cpu_gr[reg], t);
345     }
346 }
347 
348 #if HOST_BIG_ENDIAN
349 # define HI_OFS  0
350 # define LO_OFS  4
351 #else
352 # define HI_OFS  4
353 # define LO_OFS  0
354 #endif
355 
356 static TCGv_i32 load_frw_i32(unsigned rt)
357 {
358     TCGv_i32 ret = tcg_temp_new_i32();
359     tcg_gen_ld_i32(ret, tcg_env,
360                    offsetof(CPUHPPAState, fr[rt & 31])
361                    + (rt & 32 ? LO_OFS : HI_OFS));
362     return ret;
363 }
364 
365 static TCGv_i32 load_frw0_i32(unsigned rt)
366 {
367     if (rt == 0) {
368         TCGv_i32 ret = tcg_temp_new_i32();
369         tcg_gen_movi_i32(ret, 0);
370         return ret;
371     } else {
372         return load_frw_i32(rt);
373     }
374 }
375 
376 static TCGv_i64 load_frw0_i64(unsigned rt)
377 {
378     TCGv_i64 ret = tcg_temp_new_i64();
379     if (rt == 0) {
380         tcg_gen_movi_i64(ret, 0);
381     } else {
382         tcg_gen_ld32u_i64(ret, tcg_env,
383                           offsetof(CPUHPPAState, fr[rt & 31])
384                           + (rt & 32 ? LO_OFS : HI_OFS));
385     }
386     return ret;
387 }
388 
389 static void save_frw_i32(unsigned rt, TCGv_i32 val)
390 {
391     tcg_gen_st_i32(val, tcg_env,
392                    offsetof(CPUHPPAState, fr[rt & 31])
393                    + (rt & 32 ? LO_OFS : HI_OFS));
394 }
395 
396 #undef HI_OFS
397 #undef LO_OFS
398 
399 static TCGv_i64 load_frd(unsigned rt)
400 {
401     TCGv_i64 ret = tcg_temp_new_i64();
402     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
403     return ret;
404 }
405 
406 static TCGv_i64 load_frd0(unsigned rt)
407 {
408     if (rt == 0) {
409         TCGv_i64 ret = tcg_temp_new_i64();
410         tcg_gen_movi_i64(ret, 0);
411         return ret;
412     } else {
413         return load_frd(rt);
414     }
415 }
416 
417 static void save_frd(unsigned rt, TCGv_i64 val)
418 {
419     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
420 }
421 
422 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
423 {
424 #ifdef CONFIG_USER_ONLY
425     tcg_gen_movi_i64(dest, 0);
426 #else
427     if (reg < 4) {
428         tcg_gen_mov_i64(dest, cpu_sr[reg]);
429     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
430         tcg_gen_mov_i64(dest, cpu_srH);
431     } else {
432         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
433     }
434 #endif
435 }
436 
437 /* Skip over the implementation of an insn that has been nullified.
438    Use this when the insn is too complex for a conditional move.  */
439 static void nullify_over(DisasContext *ctx)
440 {
441     if (ctx->null_cond.c != TCG_COND_NEVER) {
442         /* The always condition should have been handled in the main loop.  */
443         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
444 
445         ctx->null_lab = gen_new_label();
446 
447         /* If we're using PSW[N], copy it to a temp because... */
448         if (ctx->null_cond.a0 == cpu_psw_n) {
449             ctx->null_cond.a0 = tcg_temp_new();
450             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
451         }
452         /* ... we clear it before branching over the implementation,
453            so that (1) it's clear after nullifying this insn and
454            (2) if this insn nullifies the next, PSW[N] is valid.  */
455         if (ctx->psw_n_nonzero) {
456             ctx->psw_n_nonzero = false;
457             tcg_gen_movi_i64(cpu_psw_n, 0);
458         }
459 
460         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
461                            ctx->null_cond.a1, ctx->null_lab);
462         cond_free(&ctx->null_cond);
463     }
464 }
465 
466 /* Save the current nullification state to PSW[N].  */
467 static void nullify_save(DisasContext *ctx)
468 {
469     if (ctx->null_cond.c == TCG_COND_NEVER) {
470         if (ctx->psw_n_nonzero) {
471             tcg_gen_movi_i64(cpu_psw_n, 0);
472         }
473         return;
474     }
475     if (ctx->null_cond.a0 != cpu_psw_n) {
476         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
477                             ctx->null_cond.a0, ctx->null_cond.a1);
478         ctx->psw_n_nonzero = true;
479     }
480     cond_free(&ctx->null_cond);
481 }
482 
483 /* Set a PSW[N] to X.  The intention is that this is used immediately
484    before a goto_tb/exit_tb, so that there is no fallthru path to other
485    code within the TB.  Therefore we do not update psw_n_nonzero.  */
486 static void nullify_set(DisasContext *ctx, bool x)
487 {
488     if (ctx->psw_n_nonzero || x) {
489         tcg_gen_movi_i64(cpu_psw_n, x);
490     }
491 }
492 
493 /* Mark the end of an instruction that may have been nullified.
494    This is the pair to nullify_over.  Always returns true so that
495    it may be tail-called from a translate function.  */
496 static bool nullify_end(DisasContext *ctx)
497 {
498     TCGLabel *null_lab = ctx->null_lab;
499     DisasJumpType status = ctx->base.is_jmp;
500 
501     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
502        For UPDATED, we cannot update on the nullified path.  */
503     assert(status != DISAS_IAQ_N_UPDATED);
504 
505     if (likely(null_lab == NULL)) {
506         /* The current insn wasn't conditional or handled the condition
507            applied to it without a branch, so the (new) setting of
508            NULL_COND can be applied directly to the next insn.  */
509         return true;
510     }
511     ctx->null_lab = NULL;
512 
513     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
514         /* The next instruction will be unconditional,
515            and NULL_COND already reflects that.  */
516         gen_set_label(null_lab);
517     } else {
518         /* The insn that we just executed is itself nullifying the next
519            instruction.  Store the condition in the PSW[N] global.
520            We asserted PSW[N] = 0 in nullify_over, so that after the
521            label we have the proper value in place.  */
522         nullify_save(ctx);
523         gen_set_label(null_lab);
524         ctx->null_cond = cond_make_n();
525     }
526     if (status == DISAS_NORETURN) {
527         ctx->base.is_jmp = DISAS_NEXT;
528     }
529     return true;
530 }
531 
532 static uint64_t gva_offset_mask(DisasContext *ctx)
533 {
534     return (ctx->tb_flags & PSW_W
535             ? MAKE_64BIT_MASK(0, 62)
536             : MAKE_64BIT_MASK(0, 32));
537 }
538 
539 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
540                             uint64_t ival, TCGv_i64 vval)
541 {
542     uint64_t mask = gva_offset_mask(ctx);
543 
544     if (ival != -1) {
545         tcg_gen_movi_i64(dest, ival & mask);
546         return;
547     }
548     tcg_debug_assert(vval != NULL);
549 
550     /*
551      * We know that the IAOQ is already properly masked.
552      * This optimization is primarily for "iaoq_f = iaoq_b".
553      */
554     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
555         tcg_gen_mov_i64(dest, vval);
556     } else {
557         tcg_gen_andi_i64(dest, vval, mask);
558     }
559 }
560 
561 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
562 {
563     return ctx->iaoq_f + disp + 8;
564 }
565 
566 static void gen_excp_1(int exception)
567 {
568     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
569 }
570 
571 static void gen_excp(DisasContext *ctx, int exception)
572 {
573     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
574     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
575     nullify_save(ctx);
576     gen_excp_1(exception);
577     ctx->base.is_jmp = DISAS_NORETURN;
578 }
579 
580 static bool gen_excp_iir(DisasContext *ctx, int exc)
581 {
582     nullify_over(ctx);
583     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
584                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
585     gen_excp(ctx, exc);
586     return nullify_end(ctx);
587 }
588 
589 static bool gen_illegal(DisasContext *ctx)
590 {
591     return gen_excp_iir(ctx, EXCP_ILL);
592 }
593 
594 #ifdef CONFIG_USER_ONLY
595 #define CHECK_MOST_PRIVILEGED(EXCP) \
596     return gen_excp_iir(ctx, EXCP)
597 #else
598 #define CHECK_MOST_PRIVILEGED(EXCP) \
599     do {                                     \
600         if (ctx->privilege != 0) {           \
601             return gen_excp_iir(ctx, EXCP);  \
602         }                                    \
603     } while (0)
604 #endif
605 
606 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
607 {
608     return translator_use_goto_tb(&ctx->base, dest);
609 }
610 
611 /* If the next insn is to be nullified, and it's on the same page,
612    and we're not attempting to set a breakpoint on it, then we can
613    totally skip the nullified insn.  This avoids creating and
614    executing a TB that merely branches to the next TB.  */
615 static bool use_nullify_skip(DisasContext *ctx)
616 {
617     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
618             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
619 }
620 
621 static void gen_goto_tb(DisasContext *ctx, int which,
622                         uint64_t f, uint64_t b)
623 {
624     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
625         tcg_gen_goto_tb(which);
626         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
627         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
628         tcg_gen_exit_tb(ctx->base.tb, which);
629     } else {
630         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
631         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
632         tcg_gen_lookup_and_goto_ptr();
633     }
634 }
635 
636 static bool cond_need_sv(int c)
637 {
638     return c == 2 || c == 3 || c == 6;
639 }
640 
641 static bool cond_need_cb(int c)
642 {
643     return c == 4 || c == 5;
644 }
645 
646 /* Need extensions from TCGv_i32 to TCGv_i64. */
647 static bool cond_need_ext(DisasContext *ctx, bool d)
648 {
649     return !(ctx->is_pa20 && d);
650 }
651 
652 /*
653  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
654  * the Parisc 1.1 Architecture Reference Manual for details.
655  */
656 
657 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
658                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
659 {
660     DisasCond cond;
661     TCGv_i64 tmp;
662 
663     switch (cf >> 1) {
664     case 0: /* Never / TR    (0 / 1) */
665         cond = cond_make_f();
666         break;
667     case 1: /* = / <>        (Z / !Z) */
668         if (cond_need_ext(ctx, d)) {
669             tmp = tcg_temp_new();
670             tcg_gen_ext32u_i64(tmp, res);
671             res = tmp;
672         }
673         cond = cond_make_0(TCG_COND_EQ, res);
674         break;
675     case 2: /* < / >=        (N ^ V / !(N ^ V) */
676         tmp = tcg_temp_new();
677         tcg_gen_xor_i64(tmp, res, sv);
678         if (cond_need_ext(ctx, d)) {
679             tcg_gen_ext32s_i64(tmp, tmp);
680         }
681         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
682         break;
683     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
684         /*
685          * Simplify:
686          *   (N ^ V) | Z
687          *   ((res < 0) ^ (sv < 0)) | !res
688          *   ((res ^ sv) < 0) | !res
689          *   (~(res ^ sv) >= 0) | !res
690          *   !(~(res ^ sv) >> 31) | !res
691          *   !(~(res ^ sv) >> 31 & res)
692          */
693         tmp = tcg_temp_new();
694         tcg_gen_eqv_i64(tmp, res, sv);
695         if (cond_need_ext(ctx, d)) {
696             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
697             tcg_gen_and_i64(tmp, tmp, res);
698             tcg_gen_ext32u_i64(tmp, tmp);
699         } else {
700             tcg_gen_sari_i64(tmp, tmp, 63);
701             tcg_gen_and_i64(tmp, tmp, res);
702         }
703         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
704         break;
705     case 4: /* NUV / UV      (!C / C) */
706         /* Only bit 0 of cb_msb is ever set. */
707         cond = cond_make_0(TCG_COND_EQ, cb_msb);
708         break;
709     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
710         tmp = tcg_temp_new();
711         tcg_gen_neg_i64(tmp, cb_msb);
712         tcg_gen_and_i64(tmp, tmp, res);
713         if (cond_need_ext(ctx, d)) {
714             tcg_gen_ext32u_i64(tmp, tmp);
715         }
716         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
717         break;
718     case 6: /* SV / NSV      (V / !V) */
719         if (cond_need_ext(ctx, d)) {
720             tmp = tcg_temp_new();
721             tcg_gen_ext32s_i64(tmp, sv);
722             sv = tmp;
723         }
724         cond = cond_make_0(TCG_COND_LT, sv);
725         break;
726     case 7: /* OD / EV */
727         tmp = tcg_temp_new();
728         tcg_gen_andi_i64(tmp, res, 1);
729         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
730         break;
731     default:
732         g_assert_not_reached();
733     }
734     if (cf & 1) {
735         cond.c = tcg_invert_cond(cond.c);
736     }
737 
738     return cond;
739 }
740 
741 /* Similar, but for the special case of subtraction without borrow, we
742    can use the inputs directly.  This can allow other computation to be
743    deleted as unused.  */
744 
745 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
746                              TCGv_i64 res, TCGv_i64 in1,
747                              TCGv_i64 in2, TCGv_i64 sv)
748 {
749     TCGCond tc;
750     bool ext_uns;
751 
752     switch (cf >> 1) {
753     case 1: /* = / <> */
754         tc = TCG_COND_EQ;
755         ext_uns = true;
756         break;
757     case 2: /* < / >= */
758         tc = TCG_COND_LT;
759         ext_uns = false;
760         break;
761     case 3: /* <= / > */
762         tc = TCG_COND_LE;
763         ext_uns = false;
764         break;
765     case 4: /* << / >>= */
766         tc = TCG_COND_LTU;
767         ext_uns = true;
768         break;
769     case 5: /* <<= / >> */
770         tc = TCG_COND_LEU;
771         ext_uns = true;
772         break;
773     default:
774         return do_cond(ctx, cf, d, res, NULL, sv);
775     }
776 
777     if (cf & 1) {
778         tc = tcg_invert_cond(tc);
779     }
780     if (cond_need_ext(ctx, d)) {
781         TCGv_i64 t1 = tcg_temp_new();
782         TCGv_i64 t2 = tcg_temp_new();
783 
784         if (ext_uns) {
785             tcg_gen_ext32u_i64(t1, in1);
786             tcg_gen_ext32u_i64(t2, in2);
787         } else {
788             tcg_gen_ext32s_i64(t1, in1);
789             tcg_gen_ext32s_i64(t2, in2);
790         }
791         return cond_make_tmp(tc, t1, t2);
792     }
793     return cond_make(tc, in1, in2);
794 }
795 
796 /*
797  * Similar, but for logicals, where the carry and overflow bits are not
798  * computed, and use of them is undefined.
799  *
800  * Undefined or not, hardware does not trap.  It seems reasonable to
801  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
802  * how cases c={2,3} are treated.
803  */
804 
805 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
806                              TCGv_i64 res)
807 {
808     TCGCond tc;
809     bool ext_uns;
810 
811     switch (cf) {
812     case 0:  /* never */
813     case 9:  /* undef, C */
814     case 11: /* undef, C & !Z */
815     case 12: /* undef, V */
816         return cond_make_f();
817 
818     case 1:  /* true */
819     case 8:  /* undef, !C */
820     case 10: /* undef, !C | Z */
821     case 13: /* undef, !V */
822         return cond_make_t();
823 
824     case 2:  /* == */
825         tc = TCG_COND_EQ;
826         ext_uns = true;
827         break;
828     case 3:  /* <> */
829         tc = TCG_COND_NE;
830         ext_uns = true;
831         break;
832     case 4:  /* < */
833         tc = TCG_COND_LT;
834         ext_uns = false;
835         break;
836     case 5:  /* >= */
837         tc = TCG_COND_GE;
838         ext_uns = false;
839         break;
840     case 6:  /* <= */
841         tc = TCG_COND_LE;
842         ext_uns = false;
843         break;
844     case 7:  /* > */
845         tc = TCG_COND_GT;
846         ext_uns = false;
847         break;
848 
849     case 14: /* OD */
850     case 15: /* EV */
851         return do_cond(ctx, cf, d, res, NULL, NULL);
852 
853     default:
854         g_assert_not_reached();
855     }
856 
857     if (cond_need_ext(ctx, d)) {
858         TCGv_i64 tmp = tcg_temp_new();
859 
860         if (ext_uns) {
861             tcg_gen_ext32u_i64(tmp, res);
862         } else {
863             tcg_gen_ext32s_i64(tmp, res);
864         }
865         return cond_make_0_tmp(tc, tmp);
866     }
867     return cond_make_0(tc, res);
868 }
869 
870 /* Similar, but for shift/extract/deposit conditions.  */
871 
872 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
873                              TCGv_i64 res)
874 {
875     unsigned c, f;
876 
877     /* Convert the compressed condition codes to standard.
878        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
879        4-7 are the reverse of 0-3.  */
880     c = orig & 3;
881     if (c == 3) {
882         c = 7;
883     }
884     f = (orig & 4) / 4;
885 
886     return do_log_cond(ctx, c * 2 + f, d, res);
887 }
888 
889 /* Similar, but for unit conditions.  */
890 
891 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
892                               TCGv_i64 in1, TCGv_i64 in2)
893 {
894     DisasCond cond;
895     TCGv_i64 tmp, cb = NULL;
896     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
897 
898     if (cf & 8) {
899         /* Since we want to test lots of carry-out bits all at once, do not
900          * do our normal thing and compute carry-in of bit B+1 since that
901          * leaves us with carry bits spread across two words.
902          */
903         cb = tcg_temp_new();
904         tmp = tcg_temp_new();
905         tcg_gen_or_i64(cb, in1, in2);
906         tcg_gen_and_i64(tmp, in1, in2);
907         tcg_gen_andc_i64(cb, cb, res);
908         tcg_gen_or_i64(cb, cb, tmp);
909     }
910 
911     switch (cf >> 1) {
912     case 0: /* never / TR */
913     case 1: /* undefined */
914     case 5: /* undefined */
915         cond = cond_make_f();
916         break;
917 
918     case 2: /* SBZ / NBZ */
919         /* See hasless(v,1) from
920          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
921          */
922         tmp = tcg_temp_new();
923         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
924         tcg_gen_andc_i64(tmp, tmp, res);
925         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
926         cond = cond_make_0(TCG_COND_NE, tmp);
927         break;
928 
929     case 3: /* SHZ / NHZ */
930         tmp = tcg_temp_new();
931         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
932         tcg_gen_andc_i64(tmp, tmp, res);
933         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
934         cond = cond_make_0(TCG_COND_NE, tmp);
935         break;
936 
937     case 4: /* SDC / NDC */
938         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
939         cond = cond_make_0(TCG_COND_NE, cb);
940         break;
941 
942     case 6: /* SBC / NBC */
943         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
944         cond = cond_make_0(TCG_COND_NE, cb);
945         break;
946 
947     case 7: /* SHC / NHC */
948         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
949         cond = cond_make_0(TCG_COND_NE, cb);
950         break;
951 
952     default:
953         g_assert_not_reached();
954     }
955     if (cf & 1) {
956         cond.c = tcg_invert_cond(cond.c);
957     }
958 
959     return cond;
960 }
961 
962 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
963                           TCGv_i64 cb, TCGv_i64 cb_msb)
964 {
965     if (cond_need_ext(ctx, d)) {
966         TCGv_i64 t = tcg_temp_new();
967         tcg_gen_extract_i64(t, cb, 32, 1);
968         return t;
969     }
970     return cb_msb;
971 }
972 
973 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
974 {
975     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
976 }
977 
978 /* Compute signed overflow for addition.  */
979 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
980                           TCGv_i64 in1, TCGv_i64 in2)
981 {
982     TCGv_i64 sv = tcg_temp_new();
983     TCGv_i64 tmp = tcg_temp_new();
984 
985     tcg_gen_xor_i64(sv, res, in1);
986     tcg_gen_xor_i64(tmp, in1, in2);
987     tcg_gen_andc_i64(sv, sv, tmp);
988 
989     return sv;
990 }
991 
992 /* Compute signed overflow for subtraction.  */
993 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
994                           TCGv_i64 in1, TCGv_i64 in2)
995 {
996     TCGv_i64 sv = tcg_temp_new();
997     TCGv_i64 tmp = tcg_temp_new();
998 
999     tcg_gen_xor_i64(sv, res, in1);
1000     tcg_gen_xor_i64(tmp, in1, in2);
1001     tcg_gen_and_i64(sv, sv, tmp);
1002 
1003     return sv;
1004 }
1005 
1006 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1007                    TCGv_i64 in2, unsigned shift, bool is_l,
1008                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1009 {
1010     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1011     unsigned c = cf >> 1;
1012     DisasCond cond;
1013 
1014     dest = tcg_temp_new();
1015     cb = NULL;
1016     cb_msb = NULL;
1017     cb_cond = NULL;
1018 
1019     if (shift) {
1020         tmp = tcg_temp_new();
1021         tcg_gen_shli_i64(tmp, in1, shift);
1022         in1 = tmp;
1023     }
1024 
1025     if (!is_l || cond_need_cb(c)) {
1026         TCGv_i64 zero = tcg_constant_i64(0);
1027         cb_msb = tcg_temp_new();
1028         cb = tcg_temp_new();
1029 
1030         tcg_gen_add2_i64(dest, cb_msb, in1, zero, in2, zero);
1031         if (is_c) {
1032             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1033                              get_psw_carry(ctx, d), zero);
1034         }
1035         tcg_gen_xor_i64(cb, in1, in2);
1036         tcg_gen_xor_i64(cb, cb, dest);
1037         if (cond_need_cb(c)) {
1038             cb_cond = get_carry(ctx, d, cb, cb_msb);
1039         }
1040     } else {
1041         tcg_gen_add_i64(dest, in1, in2);
1042         if (is_c) {
1043             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1044         }
1045     }
1046 
1047     /* Compute signed overflow if required.  */
1048     sv = NULL;
1049     if (is_tsv || cond_need_sv(c)) {
1050         sv = do_add_sv(ctx, dest, in1, in2);
1051         if (is_tsv) {
1052             /* ??? Need to include overflow from shift.  */
1053             gen_helper_tsv(tcg_env, sv);
1054         }
1055     }
1056 
1057     /* Emit any conditional trap before any writeback.  */
1058     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1059     if (is_tc) {
1060         tmp = tcg_temp_new();
1061         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1062         gen_helper_tcond(tcg_env, tmp);
1063     }
1064 
1065     /* Write back the result.  */
1066     if (!is_l) {
1067         save_or_nullify(ctx, cpu_psw_cb, cb);
1068         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1069     }
1070     save_gpr(ctx, rt, dest);
1071 
1072     /* Install the new nullification.  */
1073     cond_free(&ctx->null_cond);
1074     ctx->null_cond = cond;
1075 }
1076 
1077 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1078                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1079 {
1080     TCGv_i64 tcg_r1, tcg_r2;
1081 
1082     if (a->cf) {
1083         nullify_over(ctx);
1084     }
1085     tcg_r1 = load_gpr(ctx, a->r1);
1086     tcg_r2 = load_gpr(ctx, a->r2);
1087     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1088            is_tsv, is_tc, is_c, a->cf, a->d);
1089     return nullify_end(ctx);
1090 }
1091 
1092 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1093                        bool is_tsv, bool is_tc)
1094 {
1095     TCGv_i64 tcg_im, tcg_r2;
1096 
1097     if (a->cf) {
1098         nullify_over(ctx);
1099     }
1100     tcg_im = tcg_constant_i64(a->i);
1101     tcg_r2 = load_gpr(ctx, a->r);
1102     /* All ADDI conditions are 32-bit. */
1103     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1104     return nullify_end(ctx);
1105 }
1106 
1107 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1108                    TCGv_i64 in2, bool is_tsv, bool is_b,
1109                    bool is_tc, unsigned cf, bool d)
1110 {
1111     TCGv_i64 dest, sv, cb, cb_msb, zero, tmp;
1112     unsigned c = cf >> 1;
1113     DisasCond cond;
1114 
1115     dest = tcg_temp_new();
1116     cb = tcg_temp_new();
1117     cb_msb = tcg_temp_new();
1118 
1119     zero = tcg_constant_i64(0);
1120     if (is_b) {
1121         /* DEST,C = IN1 + ~IN2 + C.  */
1122         tcg_gen_not_i64(cb, in2);
1123         tcg_gen_add2_i64(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1124         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, zero);
1125         tcg_gen_xor_i64(cb, cb, in1);
1126         tcg_gen_xor_i64(cb, cb, dest);
1127     } else {
1128         /*
1129          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1130          * operations by seeding the high word with 1 and subtracting.
1131          */
1132         TCGv_i64 one = tcg_constant_i64(1);
1133         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, zero);
1134         tcg_gen_eqv_i64(cb, in1, in2);
1135         tcg_gen_xor_i64(cb, cb, dest);
1136     }
1137 
1138     /* Compute signed overflow if required.  */
1139     sv = NULL;
1140     if (is_tsv || cond_need_sv(c)) {
1141         sv = do_sub_sv(ctx, dest, in1, in2);
1142         if (is_tsv) {
1143             gen_helper_tsv(tcg_env, sv);
1144         }
1145     }
1146 
1147     /* Compute the condition.  We cannot use the special case for borrow.  */
1148     if (!is_b) {
1149         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1150     } else {
1151         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1152     }
1153 
1154     /* Emit any conditional trap before any writeback.  */
1155     if (is_tc) {
1156         tmp = tcg_temp_new();
1157         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1158         gen_helper_tcond(tcg_env, tmp);
1159     }
1160 
1161     /* Write back the result.  */
1162     save_or_nullify(ctx, cpu_psw_cb, cb);
1163     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1164     save_gpr(ctx, rt, dest);
1165 
1166     /* Install the new nullification.  */
1167     cond_free(&ctx->null_cond);
1168     ctx->null_cond = cond;
1169 }
1170 
1171 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1172                        bool is_tsv, bool is_b, bool is_tc)
1173 {
1174     TCGv_i64 tcg_r1, tcg_r2;
1175 
1176     if (a->cf) {
1177         nullify_over(ctx);
1178     }
1179     tcg_r1 = load_gpr(ctx, a->r1);
1180     tcg_r2 = load_gpr(ctx, a->r2);
1181     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1182     return nullify_end(ctx);
1183 }
1184 
1185 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1186 {
1187     TCGv_i64 tcg_im, tcg_r2;
1188 
1189     if (a->cf) {
1190         nullify_over(ctx);
1191     }
1192     tcg_im = tcg_constant_i64(a->i);
1193     tcg_r2 = load_gpr(ctx, a->r);
1194     /* All SUBI conditions are 32-bit. */
1195     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1196     return nullify_end(ctx);
1197 }
1198 
1199 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1200                       TCGv_i64 in2, unsigned cf, bool d)
1201 {
1202     TCGv_i64 dest, sv;
1203     DisasCond cond;
1204 
1205     dest = tcg_temp_new();
1206     tcg_gen_sub_i64(dest, in1, in2);
1207 
1208     /* Compute signed overflow if required.  */
1209     sv = NULL;
1210     if (cond_need_sv(cf >> 1)) {
1211         sv = do_sub_sv(ctx, dest, in1, in2);
1212     }
1213 
1214     /* Form the condition for the compare.  */
1215     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1216 
1217     /* Clear.  */
1218     tcg_gen_movi_i64(dest, 0);
1219     save_gpr(ctx, rt, dest);
1220 
1221     /* Install the new nullification.  */
1222     cond_free(&ctx->null_cond);
1223     ctx->null_cond = cond;
1224 }
1225 
1226 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1227                    TCGv_i64 in2, unsigned cf, bool d,
1228                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1229 {
1230     TCGv_i64 dest = dest_gpr(ctx, rt);
1231 
1232     /* Perform the operation, and writeback.  */
1233     fn(dest, in1, in2);
1234     save_gpr(ctx, rt, dest);
1235 
1236     /* Install the new nullification.  */
1237     cond_free(&ctx->null_cond);
1238     if (cf) {
1239         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1240     }
1241 }
1242 
1243 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1244                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1245 {
1246     TCGv_i64 tcg_r1, tcg_r2;
1247 
1248     if (a->cf) {
1249         nullify_over(ctx);
1250     }
1251     tcg_r1 = load_gpr(ctx, a->r1);
1252     tcg_r2 = load_gpr(ctx, a->r2);
1253     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1254     return nullify_end(ctx);
1255 }
1256 
1257 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1258                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1259                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1260 {
1261     TCGv_i64 dest;
1262     DisasCond cond;
1263 
1264     if (cf == 0) {
1265         dest = dest_gpr(ctx, rt);
1266         fn(dest, in1, in2);
1267         save_gpr(ctx, rt, dest);
1268         cond_free(&ctx->null_cond);
1269     } else {
1270         dest = tcg_temp_new();
1271         fn(dest, in1, in2);
1272 
1273         cond = do_unit_cond(cf, d, dest, in1, in2);
1274 
1275         if (is_tc) {
1276             TCGv_i64 tmp = tcg_temp_new();
1277             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1278             gen_helper_tcond(tcg_env, tmp);
1279         }
1280         save_gpr(ctx, rt, dest);
1281 
1282         cond_free(&ctx->null_cond);
1283         ctx->null_cond = cond;
1284     }
1285 }
1286 
1287 #ifndef CONFIG_USER_ONLY
1288 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1289    from the top 2 bits of the base register.  There are a few system
1290    instructions that have a 3-bit space specifier, for which SR0 is
1291    not special.  To handle this, pass ~SP.  */
1292 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1293 {
1294     TCGv_ptr ptr;
1295     TCGv_i64 tmp;
1296     TCGv_i64 spc;
1297 
1298     if (sp != 0) {
1299         if (sp < 0) {
1300             sp = ~sp;
1301         }
1302         spc = tcg_temp_new_i64();
1303         load_spr(ctx, spc, sp);
1304         return spc;
1305     }
1306     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1307         return cpu_srH;
1308     }
1309 
1310     ptr = tcg_temp_new_ptr();
1311     tmp = tcg_temp_new();
1312     spc = tcg_temp_new_i64();
1313 
1314     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1315     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1316     tcg_gen_andi_i64(tmp, tmp, 030);
1317     tcg_gen_trunc_i64_ptr(ptr, tmp);
1318 
1319     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1320     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1321 
1322     return spc;
1323 }
1324 #endif
1325 
1326 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1327                      unsigned rb, unsigned rx, int scale, int64_t disp,
1328                      unsigned sp, int modify, bool is_phys)
1329 {
1330     TCGv_i64 base = load_gpr(ctx, rb);
1331     TCGv_i64 ofs;
1332     TCGv_i64 addr;
1333 
1334     /* Note that RX is mutually exclusive with DISP.  */
1335     if (rx) {
1336         ofs = tcg_temp_new();
1337         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1338         tcg_gen_add_i64(ofs, ofs, base);
1339     } else if (disp || modify) {
1340         ofs = tcg_temp_new();
1341         tcg_gen_addi_i64(ofs, base, disp);
1342     } else {
1343         ofs = base;
1344     }
1345 
1346     *pofs = ofs;
1347     *pgva = addr = tcg_temp_new_i64();
1348     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1349     tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1350 #ifndef CONFIG_USER_ONLY
1351     if (!is_phys) {
1352         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1353     }
1354 #endif
1355 }
1356 
1357 /* Emit a memory load.  The modify parameter should be
1358  * < 0 for pre-modify,
1359  * > 0 for post-modify,
1360  * = 0 for no base register update.
1361  */
1362 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1363                        unsigned rx, int scale, int64_t disp,
1364                        unsigned sp, int modify, MemOp mop)
1365 {
1366     TCGv_i64 ofs;
1367     TCGv_i64 addr;
1368 
1369     /* Caller uses nullify_over/nullify_end.  */
1370     assert(ctx->null_cond.c == TCG_COND_NEVER);
1371 
1372     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1373              ctx->mmu_idx == MMU_PHYS_IDX);
1374     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1375     if (modify) {
1376         save_gpr(ctx, rb, ofs);
1377     }
1378 }
1379 
1380 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1381                        unsigned rx, int scale, int64_t disp,
1382                        unsigned sp, int modify, MemOp mop)
1383 {
1384     TCGv_i64 ofs;
1385     TCGv_i64 addr;
1386 
1387     /* Caller uses nullify_over/nullify_end.  */
1388     assert(ctx->null_cond.c == TCG_COND_NEVER);
1389 
1390     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1391              ctx->mmu_idx == MMU_PHYS_IDX);
1392     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1393     if (modify) {
1394         save_gpr(ctx, rb, ofs);
1395     }
1396 }
1397 
1398 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1399                         unsigned rx, int scale, int64_t disp,
1400                         unsigned sp, int modify, MemOp mop)
1401 {
1402     TCGv_i64 ofs;
1403     TCGv_i64 addr;
1404 
1405     /* Caller uses nullify_over/nullify_end.  */
1406     assert(ctx->null_cond.c == TCG_COND_NEVER);
1407 
1408     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1409              ctx->mmu_idx == MMU_PHYS_IDX);
1410     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1411     if (modify) {
1412         save_gpr(ctx, rb, ofs);
1413     }
1414 }
1415 
1416 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1417                         unsigned rx, int scale, int64_t disp,
1418                         unsigned sp, int modify, MemOp mop)
1419 {
1420     TCGv_i64 ofs;
1421     TCGv_i64 addr;
1422 
1423     /* Caller uses nullify_over/nullify_end.  */
1424     assert(ctx->null_cond.c == TCG_COND_NEVER);
1425 
1426     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1427              ctx->mmu_idx == MMU_PHYS_IDX);
1428     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1429     if (modify) {
1430         save_gpr(ctx, rb, ofs);
1431     }
1432 }
1433 
1434 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1435                     unsigned rx, int scale, int64_t disp,
1436                     unsigned sp, int modify, MemOp mop)
1437 {
1438     TCGv_i64 dest;
1439 
1440     nullify_over(ctx);
1441 
1442     if (modify == 0) {
1443         /* No base register update.  */
1444         dest = dest_gpr(ctx, rt);
1445     } else {
1446         /* Make sure if RT == RB, we see the result of the load.  */
1447         dest = tcg_temp_new();
1448     }
1449     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1450     save_gpr(ctx, rt, dest);
1451 
1452     return nullify_end(ctx);
1453 }
1454 
1455 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1456                       unsigned rx, int scale, int64_t disp,
1457                       unsigned sp, int modify)
1458 {
1459     TCGv_i32 tmp;
1460 
1461     nullify_over(ctx);
1462 
1463     tmp = tcg_temp_new_i32();
1464     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1465     save_frw_i32(rt, tmp);
1466 
1467     if (rt == 0) {
1468         gen_helper_loaded_fr0(tcg_env);
1469     }
1470 
1471     return nullify_end(ctx);
1472 }
1473 
1474 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1475 {
1476     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1477                      a->disp, a->sp, a->m);
1478 }
1479 
1480 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1481                       unsigned rx, int scale, int64_t disp,
1482                       unsigned sp, int modify)
1483 {
1484     TCGv_i64 tmp;
1485 
1486     nullify_over(ctx);
1487 
1488     tmp = tcg_temp_new_i64();
1489     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1490     save_frd(rt, tmp);
1491 
1492     if (rt == 0) {
1493         gen_helper_loaded_fr0(tcg_env);
1494     }
1495 
1496     return nullify_end(ctx);
1497 }
1498 
1499 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1500 {
1501     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1502                      a->disp, a->sp, a->m);
1503 }
1504 
1505 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1506                      int64_t disp, unsigned sp,
1507                      int modify, MemOp mop)
1508 {
1509     nullify_over(ctx);
1510     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1511     return nullify_end(ctx);
1512 }
1513 
1514 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1515                        unsigned rx, int scale, int64_t disp,
1516                        unsigned sp, int modify)
1517 {
1518     TCGv_i32 tmp;
1519 
1520     nullify_over(ctx);
1521 
1522     tmp = load_frw_i32(rt);
1523     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1524 
1525     return nullify_end(ctx);
1526 }
1527 
1528 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1529 {
1530     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1531                       a->disp, a->sp, a->m);
1532 }
1533 
1534 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1535                        unsigned rx, int scale, int64_t disp,
1536                        unsigned sp, int modify)
1537 {
1538     TCGv_i64 tmp;
1539 
1540     nullify_over(ctx);
1541 
1542     tmp = load_frd(rt);
1543     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1544 
1545     return nullify_end(ctx);
1546 }
1547 
1548 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1549 {
1550     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1551                       a->disp, a->sp, a->m);
1552 }
1553 
1554 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1555                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1556 {
1557     TCGv_i32 tmp;
1558 
1559     nullify_over(ctx);
1560     tmp = load_frw0_i32(ra);
1561 
1562     func(tmp, tcg_env, tmp);
1563 
1564     save_frw_i32(rt, tmp);
1565     return nullify_end(ctx);
1566 }
1567 
1568 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1569                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1570 {
1571     TCGv_i32 dst;
1572     TCGv_i64 src;
1573 
1574     nullify_over(ctx);
1575     src = load_frd(ra);
1576     dst = tcg_temp_new_i32();
1577 
1578     func(dst, tcg_env, src);
1579 
1580     save_frw_i32(rt, dst);
1581     return nullify_end(ctx);
1582 }
1583 
1584 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1585                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1586 {
1587     TCGv_i64 tmp;
1588 
1589     nullify_over(ctx);
1590     tmp = load_frd0(ra);
1591 
1592     func(tmp, tcg_env, tmp);
1593 
1594     save_frd(rt, tmp);
1595     return nullify_end(ctx);
1596 }
1597 
1598 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1599                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1600 {
1601     TCGv_i32 src;
1602     TCGv_i64 dst;
1603 
1604     nullify_over(ctx);
1605     src = load_frw0_i32(ra);
1606     dst = tcg_temp_new_i64();
1607 
1608     func(dst, tcg_env, src);
1609 
1610     save_frd(rt, dst);
1611     return nullify_end(ctx);
1612 }
1613 
1614 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1615                         unsigned ra, unsigned rb,
1616                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1617 {
1618     TCGv_i32 a, b;
1619 
1620     nullify_over(ctx);
1621     a = load_frw0_i32(ra);
1622     b = load_frw0_i32(rb);
1623 
1624     func(a, tcg_env, a, b);
1625 
1626     save_frw_i32(rt, a);
1627     return nullify_end(ctx);
1628 }
1629 
1630 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1631                         unsigned ra, unsigned rb,
1632                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1633 {
1634     TCGv_i64 a, b;
1635 
1636     nullify_over(ctx);
1637     a = load_frd0(ra);
1638     b = load_frd0(rb);
1639 
1640     func(a, tcg_env, a, b);
1641 
1642     save_frd(rt, a);
1643     return nullify_end(ctx);
1644 }
1645 
1646 /* Emit an unconditional branch to a direct target, which may or may not
1647    have already had nullification handled.  */
1648 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1649                        unsigned link, bool is_n)
1650 {
1651     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1652         if (link != 0) {
1653             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1654         }
1655         ctx->iaoq_n = dest;
1656         if (is_n) {
1657             ctx->null_cond.c = TCG_COND_ALWAYS;
1658         }
1659     } else {
1660         nullify_over(ctx);
1661 
1662         if (link != 0) {
1663             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1664         }
1665 
1666         if (is_n && use_nullify_skip(ctx)) {
1667             nullify_set(ctx, 0);
1668             gen_goto_tb(ctx, 0, dest, dest + 4);
1669         } else {
1670             nullify_set(ctx, is_n);
1671             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1672         }
1673 
1674         nullify_end(ctx);
1675 
1676         nullify_set(ctx, 0);
1677         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1678         ctx->base.is_jmp = DISAS_NORETURN;
1679     }
1680     return true;
1681 }
1682 
1683 /* Emit a conditional branch to a direct target.  If the branch itself
1684    is nullified, we should have already used nullify_over.  */
1685 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1686                        DisasCond *cond)
1687 {
1688     uint64_t dest = iaoq_dest(ctx, disp);
1689     TCGLabel *taken = NULL;
1690     TCGCond c = cond->c;
1691     bool n;
1692 
1693     assert(ctx->null_cond.c == TCG_COND_NEVER);
1694 
1695     /* Handle TRUE and NEVER as direct branches.  */
1696     if (c == TCG_COND_ALWAYS) {
1697         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1698     }
1699     if (c == TCG_COND_NEVER) {
1700         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1701     }
1702 
1703     taken = gen_new_label();
1704     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1705     cond_free(cond);
1706 
1707     /* Not taken: Condition not satisfied; nullify on backward branches. */
1708     n = is_n && disp < 0;
1709     if (n && use_nullify_skip(ctx)) {
1710         nullify_set(ctx, 0);
1711         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1712     } else {
1713         if (!n && ctx->null_lab) {
1714             gen_set_label(ctx->null_lab);
1715             ctx->null_lab = NULL;
1716         }
1717         nullify_set(ctx, n);
1718         if (ctx->iaoq_n == -1) {
1719             /* The temporary iaoq_n_var died at the branch above.
1720                Regenerate it here instead of saving it.  */
1721             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1722         }
1723         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1724     }
1725 
1726     gen_set_label(taken);
1727 
1728     /* Taken: Condition satisfied; nullify on forward branches.  */
1729     n = is_n && disp >= 0;
1730     if (n && use_nullify_skip(ctx)) {
1731         nullify_set(ctx, 0);
1732         gen_goto_tb(ctx, 1, dest, dest + 4);
1733     } else {
1734         nullify_set(ctx, n);
1735         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1736     }
1737 
1738     /* Not taken: the branch itself was nullified.  */
1739     if (ctx->null_lab) {
1740         gen_set_label(ctx->null_lab);
1741         ctx->null_lab = NULL;
1742         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1743     } else {
1744         ctx->base.is_jmp = DISAS_NORETURN;
1745     }
1746     return true;
1747 }
1748 
1749 /* Emit an unconditional branch to an indirect target.  This handles
1750    nullification of the branch itself.  */
1751 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1752                        unsigned link, bool is_n)
1753 {
1754     TCGv_i64 a0, a1, next, tmp;
1755     TCGCond c;
1756 
1757     assert(ctx->null_lab == NULL);
1758 
1759     if (ctx->null_cond.c == TCG_COND_NEVER) {
1760         if (link != 0) {
1761             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1762         }
1763         next = tcg_temp_new();
1764         tcg_gen_mov_i64(next, dest);
1765         if (is_n) {
1766             if (use_nullify_skip(ctx)) {
1767                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1768                 tcg_gen_addi_i64(next, next, 4);
1769                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1770                 nullify_set(ctx, 0);
1771                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1772                 return true;
1773             }
1774             ctx->null_cond.c = TCG_COND_ALWAYS;
1775         }
1776         ctx->iaoq_n = -1;
1777         ctx->iaoq_n_var = next;
1778     } else if (is_n && use_nullify_skip(ctx)) {
1779         /* The (conditional) branch, B, nullifies the next insn, N,
1780            and we're allowed to skip execution N (no single-step or
1781            tracepoint in effect).  Since the goto_ptr that we must use
1782            for the indirect branch consumes no special resources, we
1783            can (conditionally) skip B and continue execution.  */
1784         /* The use_nullify_skip test implies we have a known control path.  */
1785         tcg_debug_assert(ctx->iaoq_b != -1);
1786         tcg_debug_assert(ctx->iaoq_n != -1);
1787 
1788         /* We do have to handle the non-local temporary, DEST, before
1789            branching.  Since IOAQ_F is not really live at this point, we
1790            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1791         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1792         next = tcg_temp_new();
1793         tcg_gen_addi_i64(next, dest, 4);
1794         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1795 
1796         nullify_over(ctx);
1797         if (link != 0) {
1798             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1799         }
1800         tcg_gen_lookup_and_goto_ptr();
1801         return nullify_end(ctx);
1802     } else {
1803         c = ctx->null_cond.c;
1804         a0 = ctx->null_cond.a0;
1805         a1 = ctx->null_cond.a1;
1806 
1807         tmp = tcg_temp_new();
1808         next = tcg_temp_new();
1809 
1810         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1811         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1812         ctx->iaoq_n = -1;
1813         ctx->iaoq_n_var = next;
1814 
1815         if (link != 0) {
1816             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1817         }
1818 
1819         if (is_n) {
1820             /* The branch nullifies the next insn, which means the state of N
1821                after the branch is the inverse of the state of N that applied
1822                to the branch.  */
1823             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1824             cond_free(&ctx->null_cond);
1825             ctx->null_cond = cond_make_n();
1826             ctx->psw_n_nonzero = true;
1827         } else {
1828             cond_free(&ctx->null_cond);
1829         }
1830     }
1831     return true;
1832 }
1833 
1834 /* Implement
1835  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1836  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1837  *    else
1838  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1839  * which keeps the privilege level from being increased.
1840  */
1841 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1842 {
1843     TCGv_i64 dest;
1844     switch (ctx->privilege) {
1845     case 0:
1846         /* Privilege 0 is maximum and is allowed to decrease.  */
1847         return offset;
1848     case 3:
1849         /* Privilege 3 is minimum and is never allowed to increase.  */
1850         dest = tcg_temp_new();
1851         tcg_gen_ori_i64(dest, offset, 3);
1852         break;
1853     default:
1854         dest = tcg_temp_new();
1855         tcg_gen_andi_i64(dest, offset, -4);
1856         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1857         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1858         break;
1859     }
1860     return dest;
1861 }
1862 
1863 #ifdef CONFIG_USER_ONLY
1864 /* On Linux, page zero is normally marked execute only + gateway.
1865    Therefore normal read or write is supposed to fail, but specific
1866    offsets have kernel code mapped to raise permissions to implement
1867    system calls.  Handling this via an explicit check here, rather
1868    in than the "be disp(sr2,r0)" instruction that probably sent us
1869    here, is the easiest way to handle the branch delay slot on the
1870    aforementioned BE.  */
1871 static void do_page_zero(DisasContext *ctx)
1872 {
1873     TCGv_i64 tmp;
1874 
1875     /* If by some means we get here with PSW[N]=1, that implies that
1876        the B,GATE instruction would be skipped, and we'd fault on the
1877        next insn within the privileged page.  */
1878     switch (ctx->null_cond.c) {
1879     case TCG_COND_NEVER:
1880         break;
1881     case TCG_COND_ALWAYS:
1882         tcg_gen_movi_i64(cpu_psw_n, 0);
1883         goto do_sigill;
1884     default:
1885         /* Since this is always the first (and only) insn within the
1886            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1887         g_assert_not_reached();
1888     }
1889 
1890     /* Check that we didn't arrive here via some means that allowed
1891        non-sequential instruction execution.  Normally the PSW[B] bit
1892        detects this by disallowing the B,GATE instruction to execute
1893        under such conditions.  */
1894     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1895         goto do_sigill;
1896     }
1897 
1898     switch (ctx->iaoq_f & -4) {
1899     case 0x00: /* Null pointer call */
1900         gen_excp_1(EXCP_IMP);
1901         ctx->base.is_jmp = DISAS_NORETURN;
1902         break;
1903 
1904     case 0xb0: /* LWS */
1905         gen_excp_1(EXCP_SYSCALL_LWS);
1906         ctx->base.is_jmp = DISAS_NORETURN;
1907         break;
1908 
1909     case 0xe0: /* SET_THREAD_POINTER */
1910         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1911         tmp = tcg_temp_new();
1912         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1913         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1914         tcg_gen_addi_i64(tmp, tmp, 4);
1915         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1916         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1917         break;
1918 
1919     case 0x100: /* SYSCALL */
1920         gen_excp_1(EXCP_SYSCALL);
1921         ctx->base.is_jmp = DISAS_NORETURN;
1922         break;
1923 
1924     default:
1925     do_sigill:
1926         gen_excp_1(EXCP_ILL);
1927         ctx->base.is_jmp = DISAS_NORETURN;
1928         break;
1929     }
1930 }
1931 #endif
1932 
1933 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1934 {
1935     cond_free(&ctx->null_cond);
1936     return true;
1937 }
1938 
1939 static bool trans_break(DisasContext *ctx, arg_break *a)
1940 {
1941     return gen_excp_iir(ctx, EXCP_BREAK);
1942 }
1943 
1944 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1945 {
1946     /* No point in nullifying the memory barrier.  */
1947     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1948 
1949     cond_free(&ctx->null_cond);
1950     return true;
1951 }
1952 
1953 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1954 {
1955     unsigned rt = a->t;
1956     TCGv_i64 tmp = dest_gpr(ctx, rt);
1957     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1958     save_gpr(ctx, rt, tmp);
1959 
1960     cond_free(&ctx->null_cond);
1961     return true;
1962 }
1963 
1964 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1965 {
1966     unsigned rt = a->t;
1967     unsigned rs = a->sp;
1968     TCGv_i64 t0 = tcg_temp_new_i64();
1969     TCGv_i64 t1 = tcg_temp_new();
1970 
1971     load_spr(ctx, t0, rs);
1972     tcg_gen_shri_i64(t0, t0, 32);
1973     tcg_gen_trunc_i64_reg(t1, t0);
1974 
1975     save_gpr(ctx, rt, t1);
1976 
1977     cond_free(&ctx->null_cond);
1978     return true;
1979 }
1980 
1981 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1982 {
1983     unsigned rt = a->t;
1984     unsigned ctl = a->r;
1985     TCGv_i64 tmp;
1986 
1987     switch (ctl) {
1988     case CR_SAR:
1989         if (a->e == 0) {
1990             /* MFSAR without ,W masks low 5 bits.  */
1991             tmp = dest_gpr(ctx, rt);
1992             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1993             save_gpr(ctx, rt, tmp);
1994             goto done;
1995         }
1996         save_gpr(ctx, rt, cpu_sar);
1997         goto done;
1998     case CR_IT: /* Interval Timer */
1999         /* FIXME: Respect PSW_S bit.  */
2000         nullify_over(ctx);
2001         tmp = dest_gpr(ctx, rt);
2002         if (translator_io_start(&ctx->base)) {
2003             gen_helper_read_interval_timer(tmp);
2004             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2005         } else {
2006             gen_helper_read_interval_timer(tmp);
2007         }
2008         save_gpr(ctx, rt, tmp);
2009         return nullify_end(ctx);
2010     case 26:
2011     case 27:
2012         break;
2013     default:
2014         /* All other control registers are privileged.  */
2015         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2016         break;
2017     }
2018 
2019     tmp = tcg_temp_new();
2020     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2021     save_gpr(ctx, rt, tmp);
2022 
2023  done:
2024     cond_free(&ctx->null_cond);
2025     return true;
2026 }
2027 
2028 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2029 {
2030     unsigned rr = a->r;
2031     unsigned rs = a->sp;
2032     TCGv_i64 t64;
2033 
2034     if (rs >= 5) {
2035         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2036     }
2037     nullify_over(ctx);
2038 
2039     t64 = tcg_temp_new_i64();
2040     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2041     tcg_gen_shli_i64(t64, t64, 32);
2042 
2043     if (rs >= 4) {
2044         tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2045         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2046     } else {
2047         tcg_gen_mov_i64(cpu_sr[rs], t64);
2048     }
2049 
2050     return nullify_end(ctx);
2051 }
2052 
2053 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2054 {
2055     unsigned ctl = a->t;
2056     TCGv_i64 reg;
2057     TCGv_i64 tmp;
2058 
2059     if (ctl == CR_SAR) {
2060         reg = load_gpr(ctx, a->r);
2061         tmp = tcg_temp_new();
2062         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2063         save_or_nullify(ctx, cpu_sar, tmp);
2064 
2065         cond_free(&ctx->null_cond);
2066         return true;
2067     }
2068 
2069     /* All other control registers are privileged or read-only.  */
2070     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2071 
2072 #ifndef CONFIG_USER_ONLY
2073     nullify_over(ctx);
2074     reg = load_gpr(ctx, a->r);
2075 
2076     switch (ctl) {
2077     case CR_IT:
2078         gen_helper_write_interval_timer(tcg_env, reg);
2079         break;
2080     case CR_EIRR:
2081         gen_helper_write_eirr(tcg_env, reg);
2082         break;
2083     case CR_EIEM:
2084         gen_helper_write_eiem(tcg_env, reg);
2085         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2086         break;
2087 
2088     case CR_IIASQ:
2089     case CR_IIAOQ:
2090         /* FIXME: Respect PSW_Q bit */
2091         /* The write advances the queue and stores to the back element.  */
2092         tmp = tcg_temp_new();
2093         tcg_gen_ld_i64(tmp, tcg_env,
2094                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2095         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2096         tcg_gen_st_i64(reg, tcg_env,
2097                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2098         break;
2099 
2100     case CR_PID1:
2101     case CR_PID2:
2102     case CR_PID3:
2103     case CR_PID4:
2104         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2105 #ifndef CONFIG_USER_ONLY
2106         gen_helper_change_prot_id(tcg_env);
2107 #endif
2108         break;
2109 
2110     default:
2111         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2112         break;
2113     }
2114     return nullify_end(ctx);
2115 #endif
2116 }
2117 
2118 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2119 {
2120     TCGv_i64 tmp = tcg_temp_new();
2121 
2122     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2123     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2124     save_or_nullify(ctx, cpu_sar, tmp);
2125 
2126     cond_free(&ctx->null_cond);
2127     return true;
2128 }
2129 
2130 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2131 {
2132     TCGv_i64 dest = dest_gpr(ctx, a->t);
2133 
2134 #ifdef CONFIG_USER_ONLY
2135     /* We don't implement space registers in user mode. */
2136     tcg_gen_movi_i64(dest, 0);
2137 #else
2138     TCGv_i64 t0 = tcg_temp_new_i64();
2139 
2140     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2141     tcg_gen_shri_i64(t0, t0, 32);
2142     tcg_gen_trunc_i64_reg(dest, t0);
2143 #endif
2144     save_gpr(ctx, a->t, dest);
2145 
2146     cond_free(&ctx->null_cond);
2147     return true;
2148 }
2149 
2150 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2151 {
2152     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2153 #ifndef CONFIG_USER_ONLY
2154     TCGv_i64 tmp;
2155 
2156     nullify_over(ctx);
2157 
2158     tmp = tcg_temp_new();
2159     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2160     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2161     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2162     save_gpr(ctx, a->t, tmp);
2163 
2164     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2165     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2166     return nullify_end(ctx);
2167 #endif
2168 }
2169 
2170 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2171 {
2172     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2173 #ifndef CONFIG_USER_ONLY
2174     TCGv_i64 tmp;
2175 
2176     nullify_over(ctx);
2177 
2178     tmp = tcg_temp_new();
2179     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2180     tcg_gen_ori_i64(tmp, tmp, a->i);
2181     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2182     save_gpr(ctx, a->t, tmp);
2183 
2184     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2185     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2186     return nullify_end(ctx);
2187 #endif
2188 }
2189 
2190 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2191 {
2192     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2193 #ifndef CONFIG_USER_ONLY
2194     TCGv_i64 tmp, reg;
2195     nullify_over(ctx);
2196 
2197     reg = load_gpr(ctx, a->r);
2198     tmp = tcg_temp_new();
2199     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2200 
2201     /* Exit the TB to recognize new interrupts.  */
2202     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2203     return nullify_end(ctx);
2204 #endif
2205 }
2206 
2207 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2208 {
2209     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2210 #ifndef CONFIG_USER_ONLY
2211     nullify_over(ctx);
2212 
2213     if (rfi_r) {
2214         gen_helper_rfi_r(tcg_env);
2215     } else {
2216         gen_helper_rfi(tcg_env);
2217     }
2218     /* Exit the TB to recognize new interrupts.  */
2219     tcg_gen_exit_tb(NULL, 0);
2220     ctx->base.is_jmp = DISAS_NORETURN;
2221 
2222     return nullify_end(ctx);
2223 #endif
2224 }
2225 
2226 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2227 {
2228     return do_rfi(ctx, false);
2229 }
2230 
2231 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2232 {
2233     return do_rfi(ctx, true);
2234 }
2235 
2236 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2237 {
2238     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2239 #ifndef CONFIG_USER_ONLY
2240     nullify_over(ctx);
2241     gen_helper_halt(tcg_env);
2242     ctx->base.is_jmp = DISAS_NORETURN;
2243     return nullify_end(ctx);
2244 #endif
2245 }
2246 
2247 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2248 {
2249     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2250 #ifndef CONFIG_USER_ONLY
2251     nullify_over(ctx);
2252     gen_helper_reset(tcg_env);
2253     ctx->base.is_jmp = DISAS_NORETURN;
2254     return nullify_end(ctx);
2255 #endif
2256 }
2257 
2258 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2259 {
2260     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2261 #ifndef CONFIG_USER_ONLY
2262     nullify_over(ctx);
2263     gen_helper_getshadowregs(tcg_env);
2264     return nullify_end(ctx);
2265 #endif
2266 }
2267 
2268 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2269 {
2270     if (a->m) {
2271         TCGv_i64 dest = dest_gpr(ctx, a->b);
2272         TCGv_i64 src1 = load_gpr(ctx, a->b);
2273         TCGv_i64 src2 = load_gpr(ctx, a->x);
2274 
2275         /* The only thing we need to do is the base register modification.  */
2276         tcg_gen_add_i64(dest, src1, src2);
2277         save_gpr(ctx, a->b, dest);
2278     }
2279     cond_free(&ctx->null_cond);
2280     return true;
2281 }
2282 
2283 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2284 {
2285     TCGv_i64 dest, ofs;
2286     TCGv_i32 level, want;
2287     TCGv_i64 addr;
2288 
2289     nullify_over(ctx);
2290 
2291     dest = dest_gpr(ctx, a->t);
2292     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2293 
2294     if (a->imm) {
2295         level = tcg_constant_i32(a->ri);
2296     } else {
2297         level = tcg_temp_new_i32();
2298         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2299         tcg_gen_andi_i32(level, level, 3);
2300     }
2301     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2302 
2303     gen_helper_probe(dest, tcg_env, addr, level, want);
2304 
2305     save_gpr(ctx, a->t, dest);
2306     return nullify_end(ctx);
2307 }
2308 
2309 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2310 {
2311     if (ctx->is_pa20) {
2312         return false;
2313     }
2314     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2315 #ifndef CONFIG_USER_ONLY
2316     TCGv_i64 addr;
2317     TCGv_i64 ofs, reg;
2318 
2319     nullify_over(ctx);
2320 
2321     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2322     reg = load_gpr(ctx, a->r);
2323     if (a->addr) {
2324         gen_helper_itlba_pa11(tcg_env, addr, reg);
2325     } else {
2326         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2327     }
2328 
2329     /* Exit TB for TLB change if mmu is enabled.  */
2330     if (ctx->tb_flags & PSW_C) {
2331         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2332     }
2333     return nullify_end(ctx);
2334 #endif
2335 }
2336 
2337 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2338 {
2339     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2340 #ifndef CONFIG_USER_ONLY
2341     TCGv_i64 addr;
2342     TCGv_i64 ofs;
2343 
2344     nullify_over(ctx);
2345 
2346     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2347     if (a->m) {
2348         save_gpr(ctx, a->b, ofs);
2349     }
2350     if (a->local) {
2351         gen_helper_ptlbe(tcg_env);
2352     } else {
2353         gen_helper_ptlb(tcg_env, addr);
2354     }
2355 
2356     /* Exit TB for TLB change if mmu is enabled.  */
2357     if (ctx->tb_flags & PSW_C) {
2358         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2359     }
2360     return nullify_end(ctx);
2361 #endif
2362 }
2363 
2364 /*
2365  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2366  * See
2367  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2368  *     page 13-9 (195/206)
2369  */
2370 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2371 {
2372     if (ctx->is_pa20) {
2373         return false;
2374     }
2375     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2376 #ifndef CONFIG_USER_ONLY
2377     TCGv_i64 addr, atl, stl;
2378     TCGv_i64 reg;
2379 
2380     nullify_over(ctx);
2381 
2382     /*
2383      * FIXME:
2384      *  if (not (pcxl or pcxl2))
2385      *    return gen_illegal(ctx);
2386      */
2387 
2388     atl = tcg_temp_new_i64();
2389     stl = tcg_temp_new_i64();
2390     addr = tcg_temp_new_i64();
2391 
2392     tcg_gen_ld32u_i64(stl, tcg_env,
2393                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2394                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2395     tcg_gen_ld32u_i64(atl, tcg_env,
2396                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2397                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2398     tcg_gen_shli_i64(stl, stl, 32);
2399     tcg_gen_or_tl(addr, atl, stl);
2400 
2401     reg = load_gpr(ctx, a->r);
2402     if (a->addr) {
2403         gen_helper_itlba_pa11(tcg_env, addr, reg);
2404     } else {
2405         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2406     }
2407 
2408     /* Exit TB for TLB change if mmu is enabled.  */
2409     if (ctx->tb_flags & PSW_C) {
2410         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2411     }
2412     return nullify_end(ctx);
2413 #endif
2414 }
2415 
2416 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2417 {
2418     if (!ctx->is_pa20) {
2419         return false;
2420     }
2421     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2422 #ifndef CONFIG_USER_ONLY
2423     nullify_over(ctx);
2424     {
2425         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2426         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2427 
2428         if (a->data) {
2429             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2430         } else {
2431             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2432         }
2433     }
2434     /* Exit TB for TLB change if mmu is enabled.  */
2435     if (ctx->tb_flags & PSW_C) {
2436         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2437     }
2438     return nullify_end(ctx);
2439 #endif
2440 }
2441 
2442 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2443 {
2444     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2445 #ifndef CONFIG_USER_ONLY
2446     TCGv_i64 vaddr;
2447     TCGv_i64 ofs, paddr;
2448 
2449     nullify_over(ctx);
2450 
2451     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2452 
2453     paddr = tcg_temp_new();
2454     gen_helper_lpa(paddr, tcg_env, vaddr);
2455 
2456     /* Note that physical address result overrides base modification.  */
2457     if (a->m) {
2458         save_gpr(ctx, a->b, ofs);
2459     }
2460     save_gpr(ctx, a->t, paddr);
2461 
2462     return nullify_end(ctx);
2463 #endif
2464 }
2465 
2466 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2467 {
2468     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2469 
2470     /* The Coherence Index is an implementation-defined function of the
2471        physical address.  Two addresses with the same CI have a coherent
2472        view of the cache.  Our implementation is to return 0 for all,
2473        since the entire address space is coherent.  */
2474     save_gpr(ctx, a->t, tcg_constant_i64(0));
2475 
2476     cond_free(&ctx->null_cond);
2477     return true;
2478 }
2479 
2480 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2481 {
2482     return do_add_reg(ctx, a, false, false, false, false);
2483 }
2484 
2485 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2486 {
2487     return do_add_reg(ctx, a, true, false, false, false);
2488 }
2489 
2490 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2491 {
2492     return do_add_reg(ctx, a, false, true, false, false);
2493 }
2494 
2495 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2496 {
2497     return do_add_reg(ctx, a, false, false, false, true);
2498 }
2499 
2500 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2501 {
2502     return do_add_reg(ctx, a, false, true, false, true);
2503 }
2504 
2505 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2506 {
2507     return do_sub_reg(ctx, a, false, false, false);
2508 }
2509 
2510 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2511 {
2512     return do_sub_reg(ctx, a, true, false, false);
2513 }
2514 
2515 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2516 {
2517     return do_sub_reg(ctx, a, false, false, true);
2518 }
2519 
2520 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2521 {
2522     return do_sub_reg(ctx, a, true, false, true);
2523 }
2524 
2525 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2526 {
2527     return do_sub_reg(ctx, a, false, true, false);
2528 }
2529 
2530 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2531 {
2532     return do_sub_reg(ctx, a, true, true, false);
2533 }
2534 
2535 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2536 {
2537     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2538 }
2539 
2540 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2541 {
2542     return do_log_reg(ctx, a, tcg_gen_and_i64);
2543 }
2544 
2545 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2546 {
2547     if (a->cf == 0) {
2548         unsigned r2 = a->r2;
2549         unsigned r1 = a->r1;
2550         unsigned rt = a->t;
2551 
2552         if (rt == 0) { /* NOP */
2553             cond_free(&ctx->null_cond);
2554             return true;
2555         }
2556         if (r2 == 0) { /* COPY */
2557             if (r1 == 0) {
2558                 TCGv_i64 dest = dest_gpr(ctx, rt);
2559                 tcg_gen_movi_i64(dest, 0);
2560                 save_gpr(ctx, rt, dest);
2561             } else {
2562                 save_gpr(ctx, rt, cpu_gr[r1]);
2563             }
2564             cond_free(&ctx->null_cond);
2565             return true;
2566         }
2567 #ifndef CONFIG_USER_ONLY
2568         /* These are QEMU extensions and are nops in the real architecture:
2569          *
2570          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2571          * or %r31,%r31,%r31 -- death loop; offline cpu
2572          *                      currently implemented as idle.
2573          */
2574         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2575             /* No need to check for supervisor, as userland can only pause
2576                until the next timer interrupt.  */
2577             nullify_over(ctx);
2578 
2579             /* Advance the instruction queue.  */
2580             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2581             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2582             nullify_set(ctx, 0);
2583 
2584             /* Tell the qemu main loop to halt until this cpu has work.  */
2585             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2586                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2587             gen_excp_1(EXCP_HALTED);
2588             ctx->base.is_jmp = DISAS_NORETURN;
2589 
2590             return nullify_end(ctx);
2591         }
2592 #endif
2593     }
2594     return do_log_reg(ctx, a, tcg_gen_or_i64);
2595 }
2596 
2597 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2598 {
2599     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2600 }
2601 
2602 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2603 {
2604     TCGv_i64 tcg_r1, tcg_r2;
2605 
2606     if (a->cf) {
2607         nullify_over(ctx);
2608     }
2609     tcg_r1 = load_gpr(ctx, a->r1);
2610     tcg_r2 = load_gpr(ctx, a->r2);
2611     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2612     return nullify_end(ctx);
2613 }
2614 
2615 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2616 {
2617     TCGv_i64 tcg_r1, tcg_r2;
2618 
2619     if (a->cf) {
2620         nullify_over(ctx);
2621     }
2622     tcg_r1 = load_gpr(ctx, a->r1);
2623     tcg_r2 = load_gpr(ctx, a->r2);
2624     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2625     return nullify_end(ctx);
2626 }
2627 
2628 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2629 {
2630     TCGv_i64 tcg_r1, tcg_r2, tmp;
2631 
2632     if (a->cf) {
2633         nullify_over(ctx);
2634     }
2635     tcg_r1 = load_gpr(ctx, a->r1);
2636     tcg_r2 = load_gpr(ctx, a->r2);
2637     tmp = tcg_temp_new();
2638     tcg_gen_not_i64(tmp, tcg_r2);
2639     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2640     return nullify_end(ctx);
2641 }
2642 
2643 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2644 {
2645     return do_uaddcm(ctx, a, false);
2646 }
2647 
2648 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2649 {
2650     return do_uaddcm(ctx, a, true);
2651 }
2652 
2653 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2654 {
2655     TCGv_i64 tmp;
2656 
2657     nullify_over(ctx);
2658 
2659     tmp = tcg_temp_new();
2660     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2661     if (!is_i) {
2662         tcg_gen_not_i64(tmp, tmp);
2663     }
2664     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2665     tcg_gen_muli_i64(tmp, tmp, 6);
2666     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2667             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2668     return nullify_end(ctx);
2669 }
2670 
2671 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2672 {
2673     return do_dcor(ctx, a, false);
2674 }
2675 
2676 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2677 {
2678     return do_dcor(ctx, a, true);
2679 }
2680 
2681 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2682 {
2683     TCGv_i64 dest, add1, add2, addc, zero, in1, in2;
2684     TCGv_i64 cout;
2685 
2686     nullify_over(ctx);
2687 
2688     in1 = load_gpr(ctx, a->r1);
2689     in2 = load_gpr(ctx, a->r2);
2690 
2691     add1 = tcg_temp_new();
2692     add2 = tcg_temp_new();
2693     addc = tcg_temp_new();
2694     dest = tcg_temp_new();
2695     zero = tcg_constant_i64(0);
2696 
2697     /* Form R1 << 1 | PSW[CB]{8}.  */
2698     tcg_gen_add_i64(add1, in1, in1);
2699     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2700 
2701     /*
2702      * Add or subtract R2, depending on PSW[V].  Proper computation of
2703      * carry requires that we subtract via + ~R2 + 1, as described in
2704      * the manual.  By extracting and masking V, we can produce the
2705      * proper inputs to the addition without movcond.
2706      */
2707     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2708     tcg_gen_xor_i64(add2, in2, addc);
2709     tcg_gen_andi_i64(addc, addc, 1);
2710 
2711     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2712     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2713 
2714     /* Write back the result register.  */
2715     save_gpr(ctx, a->t, dest);
2716 
2717     /* Write back PSW[CB].  */
2718     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2719     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2720 
2721     /* Write back PSW[V] for the division step.  */
2722     cout = get_psw_carry(ctx, false);
2723     tcg_gen_neg_i64(cpu_psw_v, cout);
2724     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2725 
2726     /* Install the new nullification.  */
2727     if (a->cf) {
2728         TCGv_i64 sv = NULL;
2729         if (cond_need_sv(a->cf >> 1)) {
2730             /* ??? The lshift is supposed to contribute to overflow.  */
2731             sv = do_add_sv(ctx, dest, add1, add2);
2732         }
2733         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2734     }
2735 
2736     return nullify_end(ctx);
2737 }
2738 
2739 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2740 {
2741     return do_add_imm(ctx, a, false, false);
2742 }
2743 
2744 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2745 {
2746     return do_add_imm(ctx, a, true, false);
2747 }
2748 
2749 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2750 {
2751     return do_add_imm(ctx, a, false, true);
2752 }
2753 
2754 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2755 {
2756     return do_add_imm(ctx, a, true, true);
2757 }
2758 
2759 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2760 {
2761     return do_sub_imm(ctx, a, false);
2762 }
2763 
2764 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2765 {
2766     return do_sub_imm(ctx, a, true);
2767 }
2768 
2769 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2770 {
2771     TCGv_i64 tcg_im, tcg_r2;
2772 
2773     if (a->cf) {
2774         nullify_over(ctx);
2775     }
2776 
2777     tcg_im = tcg_constant_i64(a->i);
2778     tcg_r2 = load_gpr(ctx, a->r);
2779     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2780 
2781     return nullify_end(ctx);
2782 }
2783 
2784 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2785 {
2786     if (!ctx->is_pa20 && a->size > MO_32) {
2787         return gen_illegal(ctx);
2788     }
2789     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2790                    a->disp, a->sp, a->m, a->size | MO_TE);
2791 }
2792 
2793 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2794 {
2795     assert(a->x == 0 && a->scale == 0);
2796     if (!ctx->is_pa20 && a->size > MO_32) {
2797         return gen_illegal(ctx);
2798     }
2799     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2800 }
2801 
2802 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2803 {
2804     MemOp mop = MO_TE | MO_ALIGN | a->size;
2805     TCGv_i64 zero, dest, ofs;
2806     TCGv_i64 addr;
2807 
2808     if (!ctx->is_pa20 && a->size > MO_32) {
2809         return gen_illegal(ctx);
2810     }
2811 
2812     nullify_over(ctx);
2813 
2814     if (a->m) {
2815         /* Base register modification.  Make sure if RT == RB,
2816            we see the result of the load.  */
2817         dest = tcg_temp_new();
2818     } else {
2819         dest = dest_gpr(ctx, a->t);
2820     }
2821 
2822     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2823              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2824 
2825     /*
2826      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2827      * However actual hardware succeeds with aligned mod 4.
2828      * Detect this case and log a GUEST_ERROR.
2829      *
2830      * TODO: HPPA64 relaxes the over-alignment requirement
2831      * with the ,co completer.
2832      */
2833     gen_helper_ldc_check(addr);
2834 
2835     zero = tcg_constant_i64(0);
2836     tcg_gen_atomic_xchg_i64(dest, addr, zero, ctx->mmu_idx, mop);
2837 
2838     if (a->m) {
2839         save_gpr(ctx, a->b, ofs);
2840     }
2841     save_gpr(ctx, a->t, dest);
2842 
2843     return nullify_end(ctx);
2844 }
2845 
2846 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2847 {
2848     TCGv_i64 ofs, val;
2849     TCGv_i64 addr;
2850 
2851     nullify_over(ctx);
2852 
2853     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2854              ctx->mmu_idx == MMU_PHYS_IDX);
2855     val = load_gpr(ctx, a->r);
2856     if (a->a) {
2857         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2858             gen_helper_stby_e_parallel(tcg_env, addr, val);
2859         } else {
2860             gen_helper_stby_e(tcg_env, addr, val);
2861         }
2862     } else {
2863         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2864             gen_helper_stby_b_parallel(tcg_env, addr, val);
2865         } else {
2866             gen_helper_stby_b(tcg_env, addr, val);
2867         }
2868     }
2869     if (a->m) {
2870         tcg_gen_andi_i64(ofs, ofs, ~3);
2871         save_gpr(ctx, a->b, ofs);
2872     }
2873 
2874     return nullify_end(ctx);
2875 }
2876 
2877 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
2878 {
2879     TCGv_i64 ofs, val;
2880     TCGv_i64 addr;
2881 
2882     if (!ctx->is_pa20) {
2883         return false;
2884     }
2885     nullify_over(ctx);
2886 
2887     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2888              ctx->mmu_idx == MMU_PHYS_IDX);
2889     val = load_gpr(ctx, a->r);
2890     if (a->a) {
2891         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2892             gen_helper_stdby_e_parallel(tcg_env, addr, val);
2893         } else {
2894             gen_helper_stdby_e(tcg_env, addr, val);
2895         }
2896     } else {
2897         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2898             gen_helper_stdby_b_parallel(tcg_env, addr, val);
2899         } else {
2900             gen_helper_stdby_b(tcg_env, addr, val);
2901         }
2902     }
2903     if (a->m) {
2904         tcg_gen_andi_i64(ofs, ofs, ~7);
2905         save_gpr(ctx, a->b, ofs);
2906     }
2907 
2908     return nullify_end(ctx);
2909 }
2910 
2911 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2912 {
2913     int hold_mmu_idx = ctx->mmu_idx;
2914 
2915     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2916     ctx->mmu_idx = MMU_PHYS_IDX;
2917     trans_ld(ctx, a);
2918     ctx->mmu_idx = hold_mmu_idx;
2919     return true;
2920 }
2921 
2922 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2923 {
2924     int hold_mmu_idx = ctx->mmu_idx;
2925 
2926     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2927     ctx->mmu_idx = MMU_PHYS_IDX;
2928     trans_st(ctx, a);
2929     ctx->mmu_idx = hold_mmu_idx;
2930     return true;
2931 }
2932 
2933 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2934 {
2935     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2936 
2937     tcg_gen_movi_i64(tcg_rt, a->i);
2938     save_gpr(ctx, a->t, tcg_rt);
2939     cond_free(&ctx->null_cond);
2940     return true;
2941 }
2942 
2943 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2944 {
2945     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
2946     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
2947 
2948     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
2949     save_gpr(ctx, 1, tcg_r1);
2950     cond_free(&ctx->null_cond);
2951     return true;
2952 }
2953 
2954 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2955 {
2956     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2957 
2958     /* Special case rb == 0, for the LDI pseudo-op.
2959        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2960     if (a->b == 0) {
2961         tcg_gen_movi_i64(tcg_rt, a->i);
2962     } else {
2963         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
2964     }
2965     save_gpr(ctx, a->t, tcg_rt);
2966     cond_free(&ctx->null_cond);
2967     return true;
2968 }
2969 
2970 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
2971                     unsigned c, unsigned f, bool d, unsigned n, int disp)
2972 {
2973     TCGv_i64 dest, in2, sv;
2974     DisasCond cond;
2975 
2976     in2 = load_gpr(ctx, r);
2977     dest = tcg_temp_new();
2978 
2979     tcg_gen_sub_i64(dest, in1, in2);
2980 
2981     sv = NULL;
2982     if (cond_need_sv(c)) {
2983         sv = do_sub_sv(ctx, dest, in1, in2);
2984     }
2985 
2986     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
2987     return do_cbranch(ctx, disp, n, &cond);
2988 }
2989 
2990 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2991 {
2992     if (!ctx->is_pa20 && a->d) {
2993         return false;
2994     }
2995     nullify_over(ctx);
2996     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
2997                    a->c, a->f, a->d, a->n, a->disp);
2998 }
2999 
3000 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3001 {
3002     if (!ctx->is_pa20 && a->d) {
3003         return false;
3004     }
3005     nullify_over(ctx);
3006     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3007                    a->c, a->f, a->d, a->n, a->disp);
3008 }
3009 
3010 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3011                     unsigned c, unsigned f, unsigned n, int disp)
3012 {
3013     TCGv_i64 dest, in2, sv, cb_cond;
3014     DisasCond cond;
3015     bool d = false;
3016 
3017     /*
3018      * For hppa64, the ADDB conditions change with PSW.W,
3019      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3020      */
3021     if (ctx->tb_flags & PSW_W) {
3022         d = c >= 5;
3023         if (d) {
3024             c &= 3;
3025         }
3026     }
3027 
3028     in2 = load_gpr(ctx, r);
3029     dest = tcg_temp_new();
3030     sv = NULL;
3031     cb_cond = NULL;
3032 
3033     if (cond_need_cb(c)) {
3034         TCGv_i64 cb = tcg_temp_new();
3035         TCGv_i64 cb_msb = tcg_temp_new();
3036 
3037         tcg_gen_movi_i64(cb_msb, 0);
3038         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3039         tcg_gen_xor_i64(cb, in1, in2);
3040         tcg_gen_xor_i64(cb, cb, dest);
3041         cb_cond = get_carry(ctx, d, cb, cb_msb);
3042     } else {
3043         tcg_gen_add_i64(dest, in1, in2);
3044     }
3045     if (cond_need_sv(c)) {
3046         sv = do_add_sv(ctx, dest, in1, in2);
3047     }
3048 
3049     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3050     save_gpr(ctx, r, dest);
3051     return do_cbranch(ctx, disp, n, &cond);
3052 }
3053 
3054 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3055 {
3056     nullify_over(ctx);
3057     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3058 }
3059 
3060 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3061 {
3062     nullify_over(ctx);
3063     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3064 }
3065 
3066 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3067 {
3068     TCGv_i64 tmp, tcg_r;
3069     DisasCond cond;
3070 
3071     nullify_over(ctx);
3072 
3073     tmp = tcg_temp_new();
3074     tcg_r = load_gpr(ctx, a->r);
3075     if (cond_need_ext(ctx, a->d)) {
3076         /* Force shift into [32,63] */
3077         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3078         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3079     } else {
3080         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3081     }
3082 
3083     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3084     return do_cbranch(ctx, a->disp, a->n, &cond);
3085 }
3086 
3087 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3088 {
3089     TCGv_i64 tmp, tcg_r;
3090     DisasCond cond;
3091     int p;
3092 
3093     nullify_over(ctx);
3094 
3095     tmp = tcg_temp_new();
3096     tcg_r = load_gpr(ctx, a->r);
3097     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3098     tcg_gen_shli_i64(tmp, tcg_r, p);
3099 
3100     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3101     return do_cbranch(ctx, a->disp, a->n, &cond);
3102 }
3103 
3104 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3105 {
3106     TCGv_i64 dest;
3107     DisasCond cond;
3108 
3109     nullify_over(ctx);
3110 
3111     dest = dest_gpr(ctx, a->r2);
3112     if (a->r1 == 0) {
3113         tcg_gen_movi_i64(dest, 0);
3114     } else {
3115         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3116     }
3117 
3118     /* All MOVB conditions are 32-bit. */
3119     cond = do_sed_cond(ctx, a->c, false, dest);
3120     return do_cbranch(ctx, a->disp, a->n, &cond);
3121 }
3122 
3123 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3124 {
3125     TCGv_i64 dest;
3126     DisasCond cond;
3127 
3128     nullify_over(ctx);
3129 
3130     dest = dest_gpr(ctx, a->r);
3131     tcg_gen_movi_i64(dest, a->i);
3132 
3133     /* All MOVBI conditions are 32-bit. */
3134     cond = do_sed_cond(ctx, a->c, false, dest);
3135     return do_cbranch(ctx, a->disp, a->n, &cond);
3136 }
3137 
3138 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3139 {
3140     TCGv_i64 dest, src2;
3141 
3142     if (!ctx->is_pa20 && a->d) {
3143         return false;
3144     }
3145     if (a->c) {
3146         nullify_over(ctx);
3147     }
3148 
3149     dest = dest_gpr(ctx, a->t);
3150     src2 = load_gpr(ctx, a->r2);
3151     if (a->r1 == 0) {
3152         if (a->d) {
3153             tcg_gen_shr_i64(dest, src2, cpu_sar);
3154         } else {
3155             TCGv_i64 tmp = tcg_temp_new();
3156 
3157             tcg_gen_ext32u_i64(dest, src2);
3158             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3159             tcg_gen_shr_i64(dest, dest, tmp);
3160         }
3161     } else if (a->r1 == a->r2) {
3162         if (a->d) {
3163             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3164         } else {
3165             TCGv_i32 t32 = tcg_temp_new_i32();
3166             TCGv_i32 s32 = tcg_temp_new_i32();
3167 
3168             tcg_gen_extrl_i64_i32(t32, src2);
3169             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3170             tcg_gen_andi_i32(s32, s32, 31);
3171             tcg_gen_rotr_i32(t32, t32, s32);
3172             tcg_gen_extu_i32_i64(dest, t32);
3173         }
3174     } else {
3175         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3176 
3177         if (a->d) {
3178             TCGv_i64 t = tcg_temp_new();
3179             TCGv_i64 n = tcg_temp_new();
3180 
3181             tcg_gen_xori_i64(n, cpu_sar, 63);
3182             tcg_gen_shl_i64(t, src2, n);
3183             tcg_gen_shli_i64(t, t, 1);
3184             tcg_gen_shr_i64(dest, src1, cpu_sar);
3185             tcg_gen_or_i64(dest, dest, t);
3186         } else {
3187             TCGv_i64 t = tcg_temp_new_i64();
3188             TCGv_i64 s = tcg_temp_new_i64();
3189 
3190             tcg_gen_concat32_i64(t, src2, src1);
3191             tcg_gen_extu_reg_i64(s, cpu_sar);
3192             tcg_gen_andi_i64(s, s, 31);
3193             tcg_gen_shr_i64(t, t, s);
3194             tcg_gen_trunc_i64_reg(dest, t);
3195         }
3196     }
3197     save_gpr(ctx, a->t, dest);
3198 
3199     /* Install the new nullification.  */
3200     cond_free(&ctx->null_cond);
3201     if (a->c) {
3202         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3203     }
3204     return nullify_end(ctx);
3205 }
3206 
3207 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3208 {
3209     unsigned width, sa;
3210     TCGv_i64 dest, t2;
3211 
3212     if (!ctx->is_pa20 && a->d) {
3213         return false;
3214     }
3215     if (a->c) {
3216         nullify_over(ctx);
3217     }
3218 
3219     width = a->d ? 64 : 32;
3220     sa = width - 1 - a->cpos;
3221 
3222     dest = dest_gpr(ctx, a->t);
3223     t2 = load_gpr(ctx, a->r2);
3224     if (a->r1 == 0) {
3225         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3226     } else if (width == TARGET_LONG_BITS) {
3227         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3228     } else {
3229         assert(!a->d);
3230         if (a->r1 == a->r2) {
3231             TCGv_i32 t32 = tcg_temp_new_i32();
3232             tcg_gen_extrl_i64_i32(t32, t2);
3233             tcg_gen_rotri_i32(t32, t32, sa);
3234             tcg_gen_extu_i32_i64(dest, t32);
3235         } else {
3236             TCGv_i64 t64 = tcg_temp_new_i64();
3237             tcg_gen_concat32_i64(t64, t2, cpu_gr[a->r1]);
3238             tcg_gen_shri_i64(t64, t64, sa);
3239             tcg_gen_trunc_i64_reg(dest, t64);
3240         }
3241     }
3242     save_gpr(ctx, a->t, dest);
3243 
3244     /* Install the new nullification.  */
3245     cond_free(&ctx->null_cond);
3246     if (a->c) {
3247         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3248     }
3249     return nullify_end(ctx);
3250 }
3251 
3252 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3253 {
3254     unsigned widthm1 = a->d ? 63 : 31;
3255     TCGv_i64 dest, src, tmp;
3256 
3257     if (!ctx->is_pa20 && a->d) {
3258         return false;
3259     }
3260     if (a->c) {
3261         nullify_over(ctx);
3262     }
3263 
3264     dest = dest_gpr(ctx, a->t);
3265     src = load_gpr(ctx, a->r);
3266     tmp = tcg_temp_new();
3267 
3268     /* Recall that SAR is using big-endian bit numbering.  */
3269     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3270     tcg_gen_xori_i64(tmp, tmp, widthm1);
3271 
3272     if (a->se) {
3273         if (!a->d) {
3274             tcg_gen_ext32s_i64(dest, src);
3275             src = dest;
3276         }
3277         tcg_gen_sar_i64(dest, src, tmp);
3278         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3279     } else {
3280         if (!a->d) {
3281             tcg_gen_ext32u_i64(dest, src);
3282             src = dest;
3283         }
3284         tcg_gen_shr_i64(dest, src, tmp);
3285         tcg_gen_extract_i64(dest, dest, 0, a->len);
3286     }
3287     save_gpr(ctx, a->t, dest);
3288 
3289     /* Install the new nullification.  */
3290     cond_free(&ctx->null_cond);
3291     if (a->c) {
3292         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3293     }
3294     return nullify_end(ctx);
3295 }
3296 
3297 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3298 {
3299     unsigned len, cpos, width;
3300     TCGv_i64 dest, src;
3301 
3302     if (!ctx->is_pa20 && a->d) {
3303         return false;
3304     }
3305     if (a->c) {
3306         nullify_over(ctx);
3307     }
3308 
3309     len = a->len;
3310     width = a->d ? 64 : 32;
3311     cpos = width - 1 - a->pos;
3312     if (cpos + len > width) {
3313         len = width - cpos;
3314     }
3315 
3316     dest = dest_gpr(ctx, a->t);
3317     src = load_gpr(ctx, a->r);
3318     if (a->se) {
3319         tcg_gen_sextract_i64(dest, src, cpos, len);
3320     } else {
3321         tcg_gen_extract_i64(dest, src, cpos, len);
3322     }
3323     save_gpr(ctx, a->t, dest);
3324 
3325     /* Install the new nullification.  */
3326     cond_free(&ctx->null_cond);
3327     if (a->c) {
3328         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3329     }
3330     return nullify_end(ctx);
3331 }
3332 
3333 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3334 {
3335     unsigned len, width;
3336     uint64_t mask0, mask1;
3337     TCGv_i64 dest;
3338 
3339     if (!ctx->is_pa20 && a->d) {
3340         return false;
3341     }
3342     if (a->c) {
3343         nullify_over(ctx);
3344     }
3345 
3346     len = a->len;
3347     width = a->d ? 64 : 32;
3348     if (a->cpos + len > width) {
3349         len = width - a->cpos;
3350     }
3351 
3352     dest = dest_gpr(ctx, a->t);
3353     mask0 = deposit64(0, a->cpos, len, a->i);
3354     mask1 = deposit64(-1, a->cpos, len, a->i);
3355 
3356     if (a->nz) {
3357         TCGv_i64 src = load_gpr(ctx, a->t);
3358         tcg_gen_andi_i64(dest, src, mask1);
3359         tcg_gen_ori_i64(dest, dest, mask0);
3360     } else {
3361         tcg_gen_movi_i64(dest, mask0);
3362     }
3363     save_gpr(ctx, a->t, dest);
3364 
3365     /* Install the new nullification.  */
3366     cond_free(&ctx->null_cond);
3367     if (a->c) {
3368         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3369     }
3370     return nullify_end(ctx);
3371 }
3372 
3373 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3374 {
3375     unsigned rs = a->nz ? a->t : 0;
3376     unsigned len, width;
3377     TCGv_i64 dest, val;
3378 
3379     if (!ctx->is_pa20 && a->d) {
3380         return false;
3381     }
3382     if (a->c) {
3383         nullify_over(ctx);
3384     }
3385 
3386     len = a->len;
3387     width = a->d ? 64 : 32;
3388     if (a->cpos + len > width) {
3389         len = width - a->cpos;
3390     }
3391 
3392     dest = dest_gpr(ctx, a->t);
3393     val = load_gpr(ctx, a->r);
3394     if (rs == 0) {
3395         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3396     } else {
3397         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3398     }
3399     save_gpr(ctx, a->t, dest);
3400 
3401     /* Install the new nullification.  */
3402     cond_free(&ctx->null_cond);
3403     if (a->c) {
3404         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3405     }
3406     return nullify_end(ctx);
3407 }
3408 
3409 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3410                        bool d, bool nz, unsigned len, TCGv_i64 val)
3411 {
3412     unsigned rs = nz ? rt : 0;
3413     unsigned widthm1 = d ? 63 : 31;
3414     TCGv_i64 mask, tmp, shift, dest;
3415     uint64_t msb = 1ULL << (len - 1);
3416 
3417     dest = dest_gpr(ctx, rt);
3418     shift = tcg_temp_new();
3419     tmp = tcg_temp_new();
3420 
3421     /* Convert big-endian bit numbering in SAR to left-shift.  */
3422     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3423     tcg_gen_xori_i64(shift, shift, widthm1);
3424 
3425     mask = tcg_temp_new();
3426     tcg_gen_movi_i64(mask, msb + (msb - 1));
3427     tcg_gen_and_i64(tmp, val, mask);
3428     if (rs) {
3429         tcg_gen_shl_i64(mask, mask, shift);
3430         tcg_gen_shl_i64(tmp, tmp, shift);
3431         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3432         tcg_gen_or_i64(dest, dest, tmp);
3433     } else {
3434         tcg_gen_shl_i64(dest, tmp, shift);
3435     }
3436     save_gpr(ctx, rt, dest);
3437 
3438     /* Install the new nullification.  */
3439     cond_free(&ctx->null_cond);
3440     if (c) {
3441         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3442     }
3443     return nullify_end(ctx);
3444 }
3445 
3446 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3447 {
3448     if (!ctx->is_pa20 && a->d) {
3449         return false;
3450     }
3451     if (a->c) {
3452         nullify_over(ctx);
3453     }
3454     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3455                       load_gpr(ctx, a->r));
3456 }
3457 
3458 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3459 {
3460     if (!ctx->is_pa20 && a->d) {
3461         return false;
3462     }
3463     if (a->c) {
3464         nullify_over(ctx);
3465     }
3466     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3467                       tcg_constant_i64(a->i));
3468 }
3469 
3470 static bool trans_be(DisasContext *ctx, arg_be *a)
3471 {
3472     TCGv_i64 tmp;
3473 
3474 #ifdef CONFIG_USER_ONLY
3475     /* ??? It seems like there should be a good way of using
3476        "be disp(sr2, r0)", the canonical gateway entry mechanism
3477        to our advantage.  But that appears to be inconvenient to
3478        manage along side branch delay slots.  Therefore we handle
3479        entry into the gateway page via absolute address.  */
3480     /* Since we don't implement spaces, just branch.  Do notice the special
3481        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3482        goto_tb to the TB containing the syscall.  */
3483     if (a->b == 0) {
3484         return do_dbranch(ctx, a->disp, a->l, a->n);
3485     }
3486 #else
3487     nullify_over(ctx);
3488 #endif
3489 
3490     tmp = tcg_temp_new();
3491     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3492     tmp = do_ibranch_priv(ctx, tmp);
3493 
3494 #ifdef CONFIG_USER_ONLY
3495     return do_ibranch(ctx, tmp, a->l, a->n);
3496 #else
3497     TCGv_i64 new_spc = tcg_temp_new_i64();
3498 
3499     load_spr(ctx, new_spc, a->sp);
3500     if (a->l) {
3501         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3502         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3503     }
3504     if (a->n && use_nullify_skip(ctx)) {
3505         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3506         tcg_gen_addi_i64(tmp, tmp, 4);
3507         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3508         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3509         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3510     } else {
3511         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3512         if (ctx->iaoq_b == -1) {
3513             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3514         }
3515         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3516         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3517         nullify_set(ctx, a->n);
3518     }
3519     tcg_gen_lookup_and_goto_ptr();
3520     ctx->base.is_jmp = DISAS_NORETURN;
3521     return nullify_end(ctx);
3522 #endif
3523 }
3524 
3525 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3526 {
3527     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3528 }
3529 
3530 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3531 {
3532     uint64_t dest = iaoq_dest(ctx, a->disp);
3533 
3534     nullify_over(ctx);
3535 
3536     /* Make sure the caller hasn't done something weird with the queue.
3537      * ??? This is not quite the same as the PSW[B] bit, which would be
3538      * expensive to track.  Real hardware will trap for
3539      *    b  gateway
3540      *    b  gateway+4  (in delay slot of first branch)
3541      * However, checking for a non-sequential instruction queue *will*
3542      * diagnose the security hole
3543      *    b  gateway
3544      *    b  evil
3545      * in which instructions at evil would run with increased privs.
3546      */
3547     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3548         return gen_illegal(ctx);
3549     }
3550 
3551 #ifndef CONFIG_USER_ONLY
3552     if (ctx->tb_flags & PSW_C) {
3553         CPUHPPAState *env = cpu_env(ctx->cs);
3554         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3555         /* If we could not find a TLB entry, then we need to generate an
3556            ITLB miss exception so the kernel will provide it.
3557            The resulting TLB fill operation will invalidate this TB and
3558            we will re-translate, at which point we *will* be able to find
3559            the TLB entry and determine if this is in fact a gateway page.  */
3560         if (type < 0) {
3561             gen_excp(ctx, EXCP_ITLB_MISS);
3562             return true;
3563         }
3564         /* No change for non-gateway pages or for priv decrease.  */
3565         if (type >= 4 && type - 4 < ctx->privilege) {
3566             dest = deposit32(dest, 0, 2, type - 4);
3567         }
3568     } else {
3569         dest &= -4;  /* priv = 0 */
3570     }
3571 #endif
3572 
3573     if (a->l) {
3574         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3575         if (ctx->privilege < 3) {
3576             tcg_gen_andi_i64(tmp, tmp, -4);
3577         }
3578         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3579         save_gpr(ctx, a->l, tmp);
3580     }
3581 
3582     return do_dbranch(ctx, dest, 0, a->n);
3583 }
3584 
3585 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3586 {
3587     if (a->x) {
3588         TCGv_i64 tmp = tcg_temp_new();
3589         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3590         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3591         /* The computation here never changes privilege level.  */
3592         return do_ibranch(ctx, tmp, a->l, a->n);
3593     } else {
3594         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3595         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3596     }
3597 }
3598 
3599 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3600 {
3601     TCGv_i64 dest;
3602 
3603     if (a->x == 0) {
3604         dest = load_gpr(ctx, a->b);
3605     } else {
3606         dest = tcg_temp_new();
3607         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3608         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3609     }
3610     dest = do_ibranch_priv(ctx, dest);
3611     return do_ibranch(ctx, dest, 0, a->n);
3612 }
3613 
3614 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3615 {
3616     TCGv_i64 dest;
3617 
3618 #ifdef CONFIG_USER_ONLY
3619     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3620     return do_ibranch(ctx, dest, a->l, a->n);
3621 #else
3622     nullify_over(ctx);
3623     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3624 
3625     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3626     if (ctx->iaoq_b == -1) {
3627         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3628     }
3629     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3630     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3631     if (a->l) {
3632         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3633     }
3634     nullify_set(ctx, a->n);
3635     tcg_gen_lookup_and_goto_ptr();
3636     ctx->base.is_jmp = DISAS_NORETURN;
3637     return nullify_end(ctx);
3638 #endif
3639 }
3640 
3641 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3642 {
3643     /* All branch target stack instructions implement as nop. */
3644     return ctx->is_pa20;
3645 }
3646 
3647 /*
3648  * Float class 0
3649  */
3650 
3651 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3652 {
3653     tcg_gen_mov_i32(dst, src);
3654 }
3655 
3656 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3657 {
3658     uint64_t ret;
3659 
3660     if (ctx->is_pa20) {
3661         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3662     } else {
3663         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3664     }
3665 
3666     nullify_over(ctx);
3667     save_frd(0, tcg_constant_i64(ret));
3668     return nullify_end(ctx);
3669 }
3670 
3671 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3672 {
3673     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3674 }
3675 
3676 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3677 {
3678     tcg_gen_mov_i64(dst, src);
3679 }
3680 
3681 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3682 {
3683     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3684 }
3685 
3686 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3687 {
3688     tcg_gen_andi_i32(dst, src, INT32_MAX);
3689 }
3690 
3691 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3692 {
3693     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3694 }
3695 
3696 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3697 {
3698     tcg_gen_andi_i64(dst, src, INT64_MAX);
3699 }
3700 
3701 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3704 }
3705 
3706 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3709 }
3710 
3711 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3714 }
3715 
3716 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3719 }
3720 
3721 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3724 }
3725 
3726 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3727 {
3728     tcg_gen_xori_i32(dst, src, INT32_MIN);
3729 }
3730 
3731 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3734 }
3735 
3736 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3737 {
3738     tcg_gen_xori_i64(dst, src, INT64_MIN);
3739 }
3740 
3741 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3744 }
3745 
3746 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3747 {
3748     tcg_gen_ori_i32(dst, src, INT32_MIN);
3749 }
3750 
3751 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3754 }
3755 
3756 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3757 {
3758     tcg_gen_ori_i64(dst, src, INT64_MIN);
3759 }
3760 
3761 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3764 }
3765 
3766 /*
3767  * Float class 1
3768  */
3769 
3770 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3771 {
3772     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3773 }
3774 
3775 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3776 {
3777     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3778 }
3779 
3780 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3781 {
3782     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3783 }
3784 
3785 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3786 {
3787     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3788 }
3789 
3790 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3791 {
3792     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3793 }
3794 
3795 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3796 {
3797     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3798 }
3799 
3800 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3801 {
3802     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3803 }
3804 
3805 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3806 {
3807     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3808 }
3809 
3810 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3811 {
3812     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3813 }
3814 
3815 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3816 {
3817     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3818 }
3819 
3820 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3821 {
3822     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3823 }
3824 
3825 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3826 {
3827     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3828 }
3829 
3830 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3831 {
3832     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3833 }
3834 
3835 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3836 {
3837     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3838 }
3839 
3840 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3841 {
3842     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3843 }
3844 
3845 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3846 {
3847     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3848 }
3849 
3850 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3851 {
3852     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3853 }
3854 
3855 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3856 {
3857     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3858 }
3859 
3860 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3861 {
3862     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3863 }
3864 
3865 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3866 {
3867     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3868 }
3869 
3870 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3871 {
3872     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3873 }
3874 
3875 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3876 {
3877     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3878 }
3879 
3880 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3881 {
3882     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3883 }
3884 
3885 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3886 {
3887     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3888 }
3889 
3890 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3891 {
3892     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3893 }
3894 
3895 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3896 {
3897     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3898 }
3899 
3900 /*
3901  * Float class 2
3902  */
3903 
3904 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3905 {
3906     TCGv_i32 ta, tb, tc, ty;
3907 
3908     nullify_over(ctx);
3909 
3910     ta = load_frw0_i32(a->r1);
3911     tb = load_frw0_i32(a->r2);
3912     ty = tcg_constant_i32(a->y);
3913     tc = tcg_constant_i32(a->c);
3914 
3915     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3916 
3917     return nullify_end(ctx);
3918 }
3919 
3920 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3921 {
3922     TCGv_i64 ta, tb;
3923     TCGv_i32 tc, ty;
3924 
3925     nullify_over(ctx);
3926 
3927     ta = load_frd0(a->r1);
3928     tb = load_frd0(a->r2);
3929     ty = tcg_constant_i32(a->y);
3930     tc = tcg_constant_i32(a->c);
3931 
3932     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3933 
3934     return nullify_end(ctx);
3935 }
3936 
3937 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3938 {
3939     TCGv_i64 t;
3940 
3941     nullify_over(ctx);
3942 
3943     t = tcg_temp_new();
3944     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3945 
3946     if (a->y == 1) {
3947         int mask;
3948         bool inv = false;
3949 
3950         switch (a->c) {
3951         case 0: /* simple */
3952             tcg_gen_andi_i64(t, t, 0x4000000);
3953             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3954             goto done;
3955         case 2: /* rej */
3956             inv = true;
3957             /* fallthru */
3958         case 1: /* acc */
3959             mask = 0x43ff800;
3960             break;
3961         case 6: /* rej8 */
3962             inv = true;
3963             /* fallthru */
3964         case 5: /* acc8 */
3965             mask = 0x43f8000;
3966             break;
3967         case 9: /* acc6 */
3968             mask = 0x43e0000;
3969             break;
3970         case 13: /* acc4 */
3971             mask = 0x4380000;
3972             break;
3973         case 17: /* acc2 */
3974             mask = 0x4200000;
3975             break;
3976         default:
3977             gen_illegal(ctx);
3978             return true;
3979         }
3980         if (inv) {
3981             TCGv_i64 c = tcg_constant_i64(mask);
3982             tcg_gen_or_i64(t, t, c);
3983             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3984         } else {
3985             tcg_gen_andi_i64(t, t, mask);
3986             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3987         }
3988     } else {
3989         unsigned cbit = (a->y ^ 1) - 1;
3990 
3991         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
3992         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3993     }
3994 
3995  done:
3996     return nullify_end(ctx);
3997 }
3998 
3999 /*
4000  * Float class 2
4001  */
4002 
4003 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4004 {
4005     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4006 }
4007 
4008 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4009 {
4010     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4011 }
4012 
4013 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4014 {
4015     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4016 }
4017 
4018 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4019 {
4020     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4021 }
4022 
4023 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4024 {
4025     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4026 }
4027 
4028 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4029 {
4030     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4031 }
4032 
4033 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4034 {
4035     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4036 }
4037 
4038 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4039 {
4040     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4041 }
4042 
4043 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4044 {
4045     TCGv_i64 x, y;
4046 
4047     nullify_over(ctx);
4048 
4049     x = load_frw0_i64(a->r1);
4050     y = load_frw0_i64(a->r2);
4051     tcg_gen_mul_i64(x, x, y);
4052     save_frd(a->t, x);
4053 
4054     return nullify_end(ctx);
4055 }
4056 
4057 /* Convert the fmpyadd single-precision register encodings to standard.  */
4058 static inline int fmpyadd_s_reg(unsigned r)
4059 {
4060     return (r & 16) * 2 + 16 + (r & 15);
4061 }
4062 
4063 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4064 {
4065     int tm = fmpyadd_s_reg(a->tm);
4066     int ra = fmpyadd_s_reg(a->ra);
4067     int ta = fmpyadd_s_reg(a->ta);
4068     int rm2 = fmpyadd_s_reg(a->rm2);
4069     int rm1 = fmpyadd_s_reg(a->rm1);
4070 
4071     nullify_over(ctx);
4072 
4073     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4074     do_fop_weww(ctx, ta, ta, ra,
4075                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4076 
4077     return nullify_end(ctx);
4078 }
4079 
4080 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4081 {
4082     return do_fmpyadd_s(ctx, a, false);
4083 }
4084 
4085 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4086 {
4087     return do_fmpyadd_s(ctx, a, true);
4088 }
4089 
4090 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4091 {
4092     nullify_over(ctx);
4093 
4094     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4095     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4096                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4097 
4098     return nullify_end(ctx);
4099 }
4100 
4101 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4102 {
4103     return do_fmpyadd_d(ctx, a, false);
4104 }
4105 
4106 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4107 {
4108     return do_fmpyadd_d(ctx, a, true);
4109 }
4110 
4111 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4112 {
4113     TCGv_i32 x, y, z;
4114 
4115     nullify_over(ctx);
4116     x = load_frw0_i32(a->rm1);
4117     y = load_frw0_i32(a->rm2);
4118     z = load_frw0_i32(a->ra3);
4119 
4120     if (a->neg) {
4121         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4122     } else {
4123         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4124     }
4125 
4126     save_frw_i32(a->t, x);
4127     return nullify_end(ctx);
4128 }
4129 
4130 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4131 {
4132     TCGv_i64 x, y, z;
4133 
4134     nullify_over(ctx);
4135     x = load_frd0(a->rm1);
4136     y = load_frd0(a->rm2);
4137     z = load_frd0(a->ra3);
4138 
4139     if (a->neg) {
4140         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4141     } else {
4142         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4143     }
4144 
4145     save_frd(a->t, x);
4146     return nullify_end(ctx);
4147 }
4148 
4149 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4150 {
4151     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4152 #ifndef CONFIG_USER_ONLY
4153     if (a->i == 0x100) {
4154         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4155         nullify_over(ctx);
4156         gen_helper_diag_btlb(tcg_env);
4157         return nullify_end(ctx);
4158     }
4159 #endif
4160     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4161     return true;
4162 }
4163 
4164 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4165 {
4166     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4167     int bound;
4168 
4169     ctx->cs = cs;
4170     ctx->tb_flags = ctx->base.tb->flags;
4171     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4172 
4173 #ifdef CONFIG_USER_ONLY
4174     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4175     ctx->mmu_idx = MMU_USER_IDX;
4176     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4177     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4178     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4179 #else
4180     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4181     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4182                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4183                     : MMU_PHYS_IDX);
4184 
4185     /* Recover the IAOQ values from the GVA + PRIV.  */
4186     uint64_t cs_base = ctx->base.tb->cs_base;
4187     uint64_t iasq_f = cs_base & ~0xffffffffull;
4188     int32_t diff = cs_base;
4189 
4190     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4191     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4192 #endif
4193     ctx->iaoq_n = -1;
4194     ctx->iaoq_n_var = NULL;
4195 
4196     /* Bound the number of instructions by those left on the page.  */
4197     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4198     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4199 }
4200 
4201 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4202 {
4203     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4204 
4205     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4206     ctx->null_cond = cond_make_f();
4207     ctx->psw_n_nonzero = false;
4208     if (ctx->tb_flags & PSW_N) {
4209         ctx->null_cond.c = TCG_COND_ALWAYS;
4210         ctx->psw_n_nonzero = true;
4211     }
4212     ctx->null_lab = NULL;
4213 }
4214 
4215 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4216 {
4217     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4218 
4219     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4220 }
4221 
4222 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4223 {
4224     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4225     CPUHPPAState *env = cpu_env(cs);
4226     DisasJumpType ret;
4227 
4228     /* Execute one insn.  */
4229 #ifdef CONFIG_USER_ONLY
4230     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4231         do_page_zero(ctx);
4232         ret = ctx->base.is_jmp;
4233         assert(ret != DISAS_NEXT);
4234     } else
4235 #endif
4236     {
4237         /* Always fetch the insn, even if nullified, so that we check
4238            the page permissions for execute.  */
4239         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4240 
4241         /* Set up the IA queue for the next insn.
4242            This will be overwritten by a branch.  */
4243         if (ctx->iaoq_b == -1) {
4244             ctx->iaoq_n = -1;
4245             ctx->iaoq_n_var = tcg_temp_new();
4246             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4247         } else {
4248             ctx->iaoq_n = ctx->iaoq_b + 4;
4249             ctx->iaoq_n_var = NULL;
4250         }
4251 
4252         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4253             ctx->null_cond.c = TCG_COND_NEVER;
4254             ret = DISAS_NEXT;
4255         } else {
4256             ctx->insn = insn;
4257             if (!decode(ctx, insn)) {
4258                 gen_illegal(ctx);
4259             }
4260             ret = ctx->base.is_jmp;
4261             assert(ctx->null_lab == NULL);
4262         }
4263     }
4264 
4265     /* Advance the insn queue.  Note that this check also detects
4266        a priority change within the instruction queue.  */
4267     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4268         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4269             && use_goto_tb(ctx, ctx->iaoq_b)
4270             && (ctx->null_cond.c == TCG_COND_NEVER
4271                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4272             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4273             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4274             ctx->base.is_jmp = ret = DISAS_NORETURN;
4275         } else {
4276             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4277         }
4278     }
4279     ctx->iaoq_f = ctx->iaoq_b;
4280     ctx->iaoq_b = ctx->iaoq_n;
4281     ctx->base.pc_next += 4;
4282 
4283     switch (ret) {
4284     case DISAS_NORETURN:
4285     case DISAS_IAQ_N_UPDATED:
4286         break;
4287 
4288     case DISAS_NEXT:
4289     case DISAS_IAQ_N_STALE:
4290     case DISAS_IAQ_N_STALE_EXIT:
4291         if (ctx->iaoq_f == -1) {
4292             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4293             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4294 #ifndef CONFIG_USER_ONLY
4295             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4296 #endif
4297             nullify_save(ctx);
4298             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4299                                 ? DISAS_EXIT
4300                                 : DISAS_IAQ_N_UPDATED);
4301         } else if (ctx->iaoq_b == -1) {
4302             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4303         }
4304         break;
4305 
4306     default:
4307         g_assert_not_reached();
4308     }
4309 }
4310 
4311 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4312 {
4313     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4314     DisasJumpType is_jmp = ctx->base.is_jmp;
4315 
4316     switch (is_jmp) {
4317     case DISAS_NORETURN:
4318         break;
4319     case DISAS_TOO_MANY:
4320     case DISAS_IAQ_N_STALE:
4321     case DISAS_IAQ_N_STALE_EXIT:
4322         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4323         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4324         nullify_save(ctx);
4325         /* FALLTHRU */
4326     case DISAS_IAQ_N_UPDATED:
4327         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4328             tcg_gen_lookup_and_goto_ptr();
4329             break;
4330         }
4331         /* FALLTHRU */
4332     case DISAS_EXIT:
4333         tcg_gen_exit_tb(NULL, 0);
4334         break;
4335     default:
4336         g_assert_not_reached();
4337     }
4338 }
4339 
4340 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4341                               CPUState *cs, FILE *logfile)
4342 {
4343     target_ulong pc = dcbase->pc_first;
4344 
4345 #ifdef CONFIG_USER_ONLY
4346     switch (pc) {
4347     case 0x00:
4348         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4349         return;
4350     case 0xb0:
4351         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4352         return;
4353     case 0xe0:
4354         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4355         return;
4356     case 0x100:
4357         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4358         return;
4359     }
4360 #endif
4361 
4362     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4363     target_disas(logfile, cs, pc, dcbase->tb->size);
4364 }
4365 
4366 static const TranslatorOps hppa_tr_ops = {
4367     .init_disas_context = hppa_tr_init_disas_context,
4368     .tb_start           = hppa_tr_tb_start,
4369     .insn_start         = hppa_tr_insn_start,
4370     .translate_insn     = hppa_tr_translate_insn,
4371     .tb_stop            = hppa_tr_tb_stop,
4372     .disas_log          = hppa_tr_disas_log,
4373 };
4374 
4375 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4376                            target_ulong pc, void *host_pc)
4377 {
4378     DisasContext ctx;
4379     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4380 }
4381