xref: /openbmc/qemu/target/hppa/translate.c (revision 967662cd)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 typedef struct DisasCond {
37     TCGCond c;
38     TCGv_i64 a0, a1;
39 } DisasCond;
40 
41 typedef struct DisasContext {
42     DisasContextBase base;
43     CPUState *cs;
44 
45     uint64_t iaoq_f;
46     uint64_t iaoq_b;
47     uint64_t iaoq_n;
48     TCGv_i64 iaoq_n_var;
49 
50     DisasCond null_cond;
51     TCGLabel *null_lab;
52 
53     uint32_t insn;
54     uint32_t tb_flags;
55     int mmu_idx;
56     int privilege;
57     bool psw_n_nonzero;
58     bool is_pa20;
59 
60 #ifdef CONFIG_USER_ONLY
61     MemOp unalign;
62 #endif
63 } DisasContext;
64 
65 #ifdef CONFIG_USER_ONLY
66 #define UNALIGN(C)  (C)->unalign
67 #else
68 #define UNALIGN(C)  MO_ALIGN
69 #endif
70 
71 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
72 static int expand_sm_imm(DisasContext *ctx, int val)
73 {
74     if (val & PSW_SM_E) {
75         val = (val & ~PSW_SM_E) | PSW_E;
76     }
77     if (val & PSW_SM_W) {
78         val = (val & ~PSW_SM_W) | PSW_W;
79     }
80     return val;
81 }
82 
83 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
84 static int expand_sr3x(DisasContext *ctx, int val)
85 {
86     return ~val;
87 }
88 
89 /* Convert the M:A bits within a memory insn to the tri-state value
90    we use for the final M.  */
91 static int ma_to_m(DisasContext *ctx, int val)
92 {
93     return val & 2 ? (val & 1 ? -1 : 1) : 0;
94 }
95 
96 /* Convert the sign of the displacement to a pre or post-modify.  */
97 static int pos_to_m(DisasContext *ctx, int val)
98 {
99     return val ? 1 : -1;
100 }
101 
102 static int neg_to_m(DisasContext *ctx, int val)
103 {
104     return val ? -1 : 1;
105 }
106 
107 /* Used for branch targets and fp memory ops.  */
108 static int expand_shl2(DisasContext *ctx, int val)
109 {
110     return val << 2;
111 }
112 
113 /* Used for fp memory ops.  */
114 static int expand_shl3(DisasContext *ctx, int val)
115 {
116     return val << 3;
117 }
118 
119 /* Used for assemble_21.  */
120 static int expand_shl11(DisasContext *ctx, int val)
121 {
122     return val << 11;
123 }
124 
125 static int assemble_6(DisasContext *ctx, int val)
126 {
127     /*
128      * Officially, 32 * x + 32 - y.
129      * Here, x is already in bit 5, and y is [4:0].
130      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
131      * with the overflow from bit 4 summing with x.
132      */
133     return (val ^ 31) + 1;
134 }
135 
136 /* Translate CMPI doubleword conditions to standard. */
137 static int cmpbid_c(DisasContext *ctx, int val)
138 {
139     return val ? val : 4; /* 0 == "*<<" */
140 }
141 
142 
143 /* Include the auto-generated decoder.  */
144 #include "decode-insns.c.inc"
145 
146 /* We are not using a goto_tb (for whatever reason), but have updated
147    the iaq (for whatever reason), so don't do it again on exit.  */
148 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
149 
150 /* We are exiting the TB, but have neither emitted a goto_tb, nor
151    updated the iaq for the next instruction to be executed.  */
152 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
153 
154 /* Similarly, but we want to return to the main loop immediately
155    to recognize unmasked interrupts.  */
156 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
157 #define DISAS_EXIT                  DISAS_TARGET_3
158 
159 /* global register indexes */
160 static TCGv_i64 cpu_gr[32];
161 static TCGv_i64 cpu_sr[4];
162 static TCGv_i64 cpu_srH;
163 static TCGv_i64 cpu_iaoq_f;
164 static TCGv_i64 cpu_iaoq_b;
165 static TCGv_i64 cpu_iasq_f;
166 static TCGv_i64 cpu_iasq_b;
167 static TCGv_i64 cpu_sar;
168 static TCGv_i64 cpu_psw_n;
169 static TCGv_i64 cpu_psw_v;
170 static TCGv_i64 cpu_psw_cb;
171 static TCGv_i64 cpu_psw_cb_msb;
172 
173 void hppa_translate_init(void)
174 {
175 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
176 
177     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
178     static const GlobalVar vars[] = {
179         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
180         DEF_VAR(psw_n),
181         DEF_VAR(psw_v),
182         DEF_VAR(psw_cb),
183         DEF_VAR(psw_cb_msb),
184         DEF_VAR(iaoq_f),
185         DEF_VAR(iaoq_b),
186     };
187 
188 #undef DEF_VAR
189 
190     /* Use the symbolic register names that match the disassembler.  */
191     static const char gr_names[32][4] = {
192         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
193         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
194         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
195         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
196     };
197     /* SR[4-7] are not global registers so that we can index them.  */
198     static const char sr_names[5][4] = {
199         "sr0", "sr1", "sr2", "sr3", "srH"
200     };
201 
202     int i;
203 
204     cpu_gr[0] = NULL;
205     for (i = 1; i < 32; i++) {
206         cpu_gr[i] = tcg_global_mem_new(tcg_env,
207                                        offsetof(CPUHPPAState, gr[i]),
208                                        gr_names[i]);
209     }
210     for (i = 0; i < 4; i++) {
211         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
212                                            offsetof(CPUHPPAState, sr[i]),
213                                            sr_names[i]);
214     }
215     cpu_srH = tcg_global_mem_new_i64(tcg_env,
216                                      offsetof(CPUHPPAState, sr[4]),
217                                      sr_names[4]);
218 
219     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
220         const GlobalVar *v = &vars[i];
221         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
222     }
223 
224     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
225                                         offsetof(CPUHPPAState, iasq_f),
226                                         "iasq_f");
227     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
228                                         offsetof(CPUHPPAState, iasq_b),
229                                         "iasq_b");
230 }
231 
232 static DisasCond cond_make_f(void)
233 {
234     return (DisasCond){
235         .c = TCG_COND_NEVER,
236         .a0 = NULL,
237         .a1 = NULL,
238     };
239 }
240 
241 static DisasCond cond_make_t(void)
242 {
243     return (DisasCond){
244         .c = TCG_COND_ALWAYS,
245         .a0 = NULL,
246         .a1 = NULL,
247     };
248 }
249 
250 static DisasCond cond_make_n(void)
251 {
252     return (DisasCond){
253         .c = TCG_COND_NE,
254         .a0 = cpu_psw_n,
255         .a1 = tcg_constant_i64(0)
256     };
257 }
258 
259 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
260 {
261     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
262     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
263 }
264 
265 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
266 {
267     return cond_make_tmp(c, a0, tcg_constant_i64(0));
268 }
269 
270 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
271 {
272     TCGv_i64 tmp = tcg_temp_new();
273     tcg_gen_mov_i64(tmp, a0);
274     return cond_make_0_tmp(c, tmp);
275 }
276 
277 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
278 {
279     TCGv_i64 t0 = tcg_temp_new();
280     TCGv_i64 t1 = tcg_temp_new();
281 
282     tcg_gen_mov_i64(t0, a0);
283     tcg_gen_mov_i64(t1, a1);
284     return cond_make_tmp(c, t0, t1);
285 }
286 
287 static void cond_free(DisasCond *cond)
288 {
289     switch (cond->c) {
290     default:
291         cond->a0 = NULL;
292         cond->a1 = NULL;
293         /* fallthru */
294     case TCG_COND_ALWAYS:
295         cond->c = TCG_COND_NEVER;
296         break;
297     case TCG_COND_NEVER:
298         break;
299     }
300 }
301 
302 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
303 {
304     if (reg == 0) {
305         TCGv_i64 t = tcg_temp_new();
306         tcg_gen_movi_i64(t, 0);
307         return t;
308     } else {
309         return cpu_gr[reg];
310     }
311 }
312 
313 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
314 {
315     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
316         return tcg_temp_new();
317     } else {
318         return cpu_gr[reg];
319     }
320 }
321 
322 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
323 {
324     if (ctx->null_cond.c != TCG_COND_NEVER) {
325         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
326                             ctx->null_cond.a1, dest, t);
327     } else {
328         tcg_gen_mov_i64(dest, t);
329     }
330 }
331 
332 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
333 {
334     if (reg != 0) {
335         save_or_nullify(ctx, cpu_gr[reg], t);
336     }
337 }
338 
339 #if HOST_BIG_ENDIAN
340 # define HI_OFS  0
341 # define LO_OFS  4
342 #else
343 # define HI_OFS  4
344 # define LO_OFS  0
345 #endif
346 
347 static TCGv_i32 load_frw_i32(unsigned rt)
348 {
349     TCGv_i32 ret = tcg_temp_new_i32();
350     tcg_gen_ld_i32(ret, tcg_env,
351                    offsetof(CPUHPPAState, fr[rt & 31])
352                    + (rt & 32 ? LO_OFS : HI_OFS));
353     return ret;
354 }
355 
356 static TCGv_i32 load_frw0_i32(unsigned rt)
357 {
358     if (rt == 0) {
359         TCGv_i32 ret = tcg_temp_new_i32();
360         tcg_gen_movi_i32(ret, 0);
361         return ret;
362     } else {
363         return load_frw_i32(rt);
364     }
365 }
366 
367 static TCGv_i64 load_frw0_i64(unsigned rt)
368 {
369     TCGv_i64 ret = tcg_temp_new_i64();
370     if (rt == 0) {
371         tcg_gen_movi_i64(ret, 0);
372     } else {
373         tcg_gen_ld32u_i64(ret, tcg_env,
374                           offsetof(CPUHPPAState, fr[rt & 31])
375                           + (rt & 32 ? LO_OFS : HI_OFS));
376     }
377     return ret;
378 }
379 
380 static void save_frw_i32(unsigned rt, TCGv_i32 val)
381 {
382     tcg_gen_st_i32(val, tcg_env,
383                    offsetof(CPUHPPAState, fr[rt & 31])
384                    + (rt & 32 ? LO_OFS : HI_OFS));
385 }
386 
387 #undef HI_OFS
388 #undef LO_OFS
389 
390 static TCGv_i64 load_frd(unsigned rt)
391 {
392     TCGv_i64 ret = tcg_temp_new_i64();
393     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
394     return ret;
395 }
396 
397 static TCGv_i64 load_frd0(unsigned rt)
398 {
399     if (rt == 0) {
400         TCGv_i64 ret = tcg_temp_new_i64();
401         tcg_gen_movi_i64(ret, 0);
402         return ret;
403     } else {
404         return load_frd(rt);
405     }
406 }
407 
408 static void save_frd(unsigned rt, TCGv_i64 val)
409 {
410     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
411 }
412 
413 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
414 {
415 #ifdef CONFIG_USER_ONLY
416     tcg_gen_movi_i64(dest, 0);
417 #else
418     if (reg < 4) {
419         tcg_gen_mov_i64(dest, cpu_sr[reg]);
420     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
421         tcg_gen_mov_i64(dest, cpu_srH);
422     } else {
423         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
424     }
425 #endif
426 }
427 
428 /* Skip over the implementation of an insn that has been nullified.
429    Use this when the insn is too complex for a conditional move.  */
430 static void nullify_over(DisasContext *ctx)
431 {
432     if (ctx->null_cond.c != TCG_COND_NEVER) {
433         /* The always condition should have been handled in the main loop.  */
434         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
435 
436         ctx->null_lab = gen_new_label();
437 
438         /* If we're using PSW[N], copy it to a temp because... */
439         if (ctx->null_cond.a0 == cpu_psw_n) {
440             ctx->null_cond.a0 = tcg_temp_new();
441             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
442         }
443         /* ... we clear it before branching over the implementation,
444            so that (1) it's clear after nullifying this insn and
445            (2) if this insn nullifies the next, PSW[N] is valid.  */
446         if (ctx->psw_n_nonzero) {
447             ctx->psw_n_nonzero = false;
448             tcg_gen_movi_i64(cpu_psw_n, 0);
449         }
450 
451         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
452                            ctx->null_cond.a1, ctx->null_lab);
453         cond_free(&ctx->null_cond);
454     }
455 }
456 
457 /* Save the current nullification state to PSW[N].  */
458 static void nullify_save(DisasContext *ctx)
459 {
460     if (ctx->null_cond.c == TCG_COND_NEVER) {
461         if (ctx->psw_n_nonzero) {
462             tcg_gen_movi_i64(cpu_psw_n, 0);
463         }
464         return;
465     }
466     if (ctx->null_cond.a0 != cpu_psw_n) {
467         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
468                             ctx->null_cond.a0, ctx->null_cond.a1);
469         ctx->psw_n_nonzero = true;
470     }
471     cond_free(&ctx->null_cond);
472 }
473 
474 /* Set a PSW[N] to X.  The intention is that this is used immediately
475    before a goto_tb/exit_tb, so that there is no fallthru path to other
476    code within the TB.  Therefore we do not update psw_n_nonzero.  */
477 static void nullify_set(DisasContext *ctx, bool x)
478 {
479     if (ctx->psw_n_nonzero || x) {
480         tcg_gen_movi_i64(cpu_psw_n, x);
481     }
482 }
483 
484 /* Mark the end of an instruction that may have been nullified.
485    This is the pair to nullify_over.  Always returns true so that
486    it may be tail-called from a translate function.  */
487 static bool nullify_end(DisasContext *ctx)
488 {
489     TCGLabel *null_lab = ctx->null_lab;
490     DisasJumpType status = ctx->base.is_jmp;
491 
492     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
493        For UPDATED, we cannot update on the nullified path.  */
494     assert(status != DISAS_IAQ_N_UPDATED);
495 
496     if (likely(null_lab == NULL)) {
497         /* The current insn wasn't conditional or handled the condition
498            applied to it without a branch, so the (new) setting of
499            NULL_COND can be applied directly to the next insn.  */
500         return true;
501     }
502     ctx->null_lab = NULL;
503 
504     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
505         /* The next instruction will be unconditional,
506            and NULL_COND already reflects that.  */
507         gen_set_label(null_lab);
508     } else {
509         /* The insn that we just executed is itself nullifying the next
510            instruction.  Store the condition in the PSW[N] global.
511            We asserted PSW[N] = 0 in nullify_over, so that after the
512            label we have the proper value in place.  */
513         nullify_save(ctx);
514         gen_set_label(null_lab);
515         ctx->null_cond = cond_make_n();
516     }
517     if (status == DISAS_NORETURN) {
518         ctx->base.is_jmp = DISAS_NEXT;
519     }
520     return true;
521 }
522 
523 static uint64_t gva_offset_mask(DisasContext *ctx)
524 {
525     return (ctx->tb_flags & PSW_W
526             ? MAKE_64BIT_MASK(0, 62)
527             : MAKE_64BIT_MASK(0, 32));
528 }
529 
530 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
531                             uint64_t ival, TCGv_i64 vval)
532 {
533     uint64_t mask = gva_offset_mask(ctx);
534 
535     if (ival != -1) {
536         tcg_gen_movi_i64(dest, ival & mask);
537         return;
538     }
539     tcg_debug_assert(vval != NULL);
540 
541     /*
542      * We know that the IAOQ is already properly masked.
543      * This optimization is primarily for "iaoq_f = iaoq_b".
544      */
545     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
546         tcg_gen_mov_i64(dest, vval);
547     } else {
548         tcg_gen_andi_i64(dest, vval, mask);
549     }
550 }
551 
552 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
553 {
554     return ctx->iaoq_f + disp + 8;
555 }
556 
557 static void gen_excp_1(int exception)
558 {
559     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
560 }
561 
562 static void gen_excp(DisasContext *ctx, int exception)
563 {
564     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
565     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
566     nullify_save(ctx);
567     gen_excp_1(exception);
568     ctx->base.is_jmp = DISAS_NORETURN;
569 }
570 
571 static bool gen_excp_iir(DisasContext *ctx, int exc)
572 {
573     nullify_over(ctx);
574     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
575                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
576     gen_excp(ctx, exc);
577     return nullify_end(ctx);
578 }
579 
580 static bool gen_illegal(DisasContext *ctx)
581 {
582     return gen_excp_iir(ctx, EXCP_ILL);
583 }
584 
585 #ifdef CONFIG_USER_ONLY
586 #define CHECK_MOST_PRIVILEGED(EXCP) \
587     return gen_excp_iir(ctx, EXCP)
588 #else
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590     do {                                     \
591         if (ctx->privilege != 0) {           \
592             return gen_excp_iir(ctx, EXCP);  \
593         }                                    \
594     } while (0)
595 #endif
596 
597 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
598 {
599     return translator_use_goto_tb(&ctx->base, dest);
600 }
601 
602 /* If the next insn is to be nullified, and it's on the same page,
603    and we're not attempting to set a breakpoint on it, then we can
604    totally skip the nullified insn.  This avoids creating and
605    executing a TB that merely branches to the next TB.  */
606 static bool use_nullify_skip(DisasContext *ctx)
607 {
608     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
609             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
610 }
611 
612 static void gen_goto_tb(DisasContext *ctx, int which,
613                         uint64_t f, uint64_t b)
614 {
615     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
616         tcg_gen_goto_tb(which);
617         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
618         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
619         tcg_gen_exit_tb(ctx->base.tb, which);
620     } else {
621         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
622         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
623         tcg_gen_lookup_and_goto_ptr();
624     }
625 }
626 
627 static bool cond_need_sv(int c)
628 {
629     return c == 2 || c == 3 || c == 6;
630 }
631 
632 static bool cond_need_cb(int c)
633 {
634     return c == 4 || c == 5;
635 }
636 
637 /* Need extensions from TCGv_i32 to TCGv_i64. */
638 static bool cond_need_ext(DisasContext *ctx, bool d)
639 {
640     return !(ctx->is_pa20 && d);
641 }
642 
643 /*
644  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
645  * the Parisc 1.1 Architecture Reference Manual for details.
646  */
647 
648 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
649                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
650 {
651     DisasCond cond;
652     TCGv_i64 tmp;
653 
654     switch (cf >> 1) {
655     case 0: /* Never / TR    (0 / 1) */
656         cond = cond_make_f();
657         break;
658     case 1: /* = / <>        (Z / !Z) */
659         if (cond_need_ext(ctx, d)) {
660             tmp = tcg_temp_new();
661             tcg_gen_ext32u_i64(tmp, res);
662             res = tmp;
663         }
664         cond = cond_make_0(TCG_COND_EQ, res);
665         break;
666     case 2: /* < / >=        (N ^ V / !(N ^ V) */
667         tmp = tcg_temp_new();
668         tcg_gen_xor_i64(tmp, res, sv);
669         if (cond_need_ext(ctx, d)) {
670             tcg_gen_ext32s_i64(tmp, tmp);
671         }
672         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
673         break;
674     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
675         /*
676          * Simplify:
677          *   (N ^ V) | Z
678          *   ((res < 0) ^ (sv < 0)) | !res
679          *   ((res ^ sv) < 0) | !res
680          *   (~(res ^ sv) >= 0) | !res
681          *   !(~(res ^ sv) >> 31) | !res
682          *   !(~(res ^ sv) >> 31 & res)
683          */
684         tmp = tcg_temp_new();
685         tcg_gen_eqv_i64(tmp, res, sv);
686         if (cond_need_ext(ctx, d)) {
687             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
688             tcg_gen_and_i64(tmp, tmp, res);
689             tcg_gen_ext32u_i64(tmp, tmp);
690         } else {
691             tcg_gen_sari_i64(tmp, tmp, 63);
692             tcg_gen_and_i64(tmp, tmp, res);
693         }
694         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
695         break;
696     case 4: /* NUV / UV      (!C / C) */
697         /* Only bit 0 of cb_msb is ever set. */
698         cond = cond_make_0(TCG_COND_EQ, cb_msb);
699         break;
700     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
701         tmp = tcg_temp_new();
702         tcg_gen_neg_i64(tmp, cb_msb);
703         tcg_gen_and_i64(tmp, tmp, res);
704         if (cond_need_ext(ctx, d)) {
705             tcg_gen_ext32u_i64(tmp, tmp);
706         }
707         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
708         break;
709     case 6: /* SV / NSV      (V / !V) */
710         if (cond_need_ext(ctx, d)) {
711             tmp = tcg_temp_new();
712             tcg_gen_ext32s_i64(tmp, sv);
713             sv = tmp;
714         }
715         cond = cond_make_0(TCG_COND_LT, sv);
716         break;
717     case 7: /* OD / EV */
718         tmp = tcg_temp_new();
719         tcg_gen_andi_i64(tmp, res, 1);
720         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
721         break;
722     default:
723         g_assert_not_reached();
724     }
725     if (cf & 1) {
726         cond.c = tcg_invert_cond(cond.c);
727     }
728 
729     return cond;
730 }
731 
732 /* Similar, but for the special case of subtraction without borrow, we
733    can use the inputs directly.  This can allow other computation to be
734    deleted as unused.  */
735 
736 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
737                              TCGv_i64 res, TCGv_i64 in1,
738                              TCGv_i64 in2, TCGv_i64 sv)
739 {
740     TCGCond tc;
741     bool ext_uns;
742 
743     switch (cf >> 1) {
744     case 1: /* = / <> */
745         tc = TCG_COND_EQ;
746         ext_uns = true;
747         break;
748     case 2: /* < / >= */
749         tc = TCG_COND_LT;
750         ext_uns = false;
751         break;
752     case 3: /* <= / > */
753         tc = TCG_COND_LE;
754         ext_uns = false;
755         break;
756     case 4: /* << / >>= */
757         tc = TCG_COND_LTU;
758         ext_uns = true;
759         break;
760     case 5: /* <<= / >> */
761         tc = TCG_COND_LEU;
762         ext_uns = true;
763         break;
764     default:
765         return do_cond(ctx, cf, d, res, NULL, sv);
766     }
767 
768     if (cf & 1) {
769         tc = tcg_invert_cond(tc);
770     }
771     if (cond_need_ext(ctx, d)) {
772         TCGv_i64 t1 = tcg_temp_new();
773         TCGv_i64 t2 = tcg_temp_new();
774 
775         if (ext_uns) {
776             tcg_gen_ext32u_i64(t1, in1);
777             tcg_gen_ext32u_i64(t2, in2);
778         } else {
779             tcg_gen_ext32s_i64(t1, in1);
780             tcg_gen_ext32s_i64(t2, in2);
781         }
782         return cond_make_tmp(tc, t1, t2);
783     }
784     return cond_make(tc, in1, in2);
785 }
786 
787 /*
788  * Similar, but for logicals, where the carry and overflow bits are not
789  * computed, and use of them is undefined.
790  *
791  * Undefined or not, hardware does not trap.  It seems reasonable to
792  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
793  * how cases c={2,3} are treated.
794  */
795 
796 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
797                              TCGv_i64 res)
798 {
799     TCGCond tc;
800     bool ext_uns;
801 
802     switch (cf) {
803     case 0:  /* never */
804     case 9:  /* undef, C */
805     case 11: /* undef, C & !Z */
806     case 12: /* undef, V */
807         return cond_make_f();
808 
809     case 1:  /* true */
810     case 8:  /* undef, !C */
811     case 10: /* undef, !C | Z */
812     case 13: /* undef, !V */
813         return cond_make_t();
814 
815     case 2:  /* == */
816         tc = TCG_COND_EQ;
817         ext_uns = true;
818         break;
819     case 3:  /* <> */
820         tc = TCG_COND_NE;
821         ext_uns = true;
822         break;
823     case 4:  /* < */
824         tc = TCG_COND_LT;
825         ext_uns = false;
826         break;
827     case 5:  /* >= */
828         tc = TCG_COND_GE;
829         ext_uns = false;
830         break;
831     case 6:  /* <= */
832         tc = TCG_COND_LE;
833         ext_uns = false;
834         break;
835     case 7:  /* > */
836         tc = TCG_COND_GT;
837         ext_uns = false;
838         break;
839 
840     case 14: /* OD */
841     case 15: /* EV */
842         return do_cond(ctx, cf, d, res, NULL, NULL);
843 
844     default:
845         g_assert_not_reached();
846     }
847 
848     if (cond_need_ext(ctx, d)) {
849         TCGv_i64 tmp = tcg_temp_new();
850 
851         if (ext_uns) {
852             tcg_gen_ext32u_i64(tmp, res);
853         } else {
854             tcg_gen_ext32s_i64(tmp, res);
855         }
856         return cond_make_0_tmp(tc, tmp);
857     }
858     return cond_make_0(tc, res);
859 }
860 
861 /* Similar, but for shift/extract/deposit conditions.  */
862 
863 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
864                              TCGv_i64 res)
865 {
866     unsigned c, f;
867 
868     /* Convert the compressed condition codes to standard.
869        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
870        4-7 are the reverse of 0-3.  */
871     c = orig & 3;
872     if (c == 3) {
873         c = 7;
874     }
875     f = (orig & 4) / 4;
876 
877     return do_log_cond(ctx, c * 2 + f, d, res);
878 }
879 
880 /* Similar, but for unit conditions.  */
881 
882 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
883                               TCGv_i64 in1, TCGv_i64 in2)
884 {
885     DisasCond cond;
886     TCGv_i64 tmp, cb = NULL;
887     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
888 
889     if (cf & 8) {
890         /* Since we want to test lots of carry-out bits all at once, do not
891          * do our normal thing and compute carry-in of bit B+1 since that
892          * leaves us with carry bits spread across two words.
893          */
894         cb = tcg_temp_new();
895         tmp = tcg_temp_new();
896         tcg_gen_or_i64(cb, in1, in2);
897         tcg_gen_and_i64(tmp, in1, in2);
898         tcg_gen_andc_i64(cb, cb, res);
899         tcg_gen_or_i64(cb, cb, tmp);
900     }
901 
902     switch (cf >> 1) {
903     case 0: /* never / TR */
904     case 1: /* undefined */
905     case 5: /* undefined */
906         cond = cond_make_f();
907         break;
908 
909     case 2: /* SBZ / NBZ */
910         /* See hasless(v,1) from
911          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
912          */
913         tmp = tcg_temp_new();
914         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
915         tcg_gen_andc_i64(tmp, tmp, res);
916         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
917         cond = cond_make_0(TCG_COND_NE, tmp);
918         break;
919 
920     case 3: /* SHZ / NHZ */
921         tmp = tcg_temp_new();
922         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
923         tcg_gen_andc_i64(tmp, tmp, res);
924         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
925         cond = cond_make_0(TCG_COND_NE, tmp);
926         break;
927 
928     case 4: /* SDC / NDC */
929         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
930         cond = cond_make_0(TCG_COND_NE, cb);
931         break;
932 
933     case 6: /* SBC / NBC */
934         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
935         cond = cond_make_0(TCG_COND_NE, cb);
936         break;
937 
938     case 7: /* SHC / NHC */
939         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
940         cond = cond_make_0(TCG_COND_NE, cb);
941         break;
942 
943     default:
944         g_assert_not_reached();
945     }
946     if (cf & 1) {
947         cond.c = tcg_invert_cond(cond.c);
948     }
949 
950     return cond;
951 }
952 
953 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
954                           TCGv_i64 cb, TCGv_i64 cb_msb)
955 {
956     if (cond_need_ext(ctx, d)) {
957         TCGv_i64 t = tcg_temp_new();
958         tcg_gen_extract_i64(t, cb, 32, 1);
959         return t;
960     }
961     return cb_msb;
962 }
963 
964 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
965 {
966     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
967 }
968 
969 /* Compute signed overflow for addition.  */
970 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
971                           TCGv_i64 in1, TCGv_i64 in2)
972 {
973     TCGv_i64 sv = tcg_temp_new();
974     TCGv_i64 tmp = tcg_temp_new();
975 
976     tcg_gen_xor_i64(sv, res, in1);
977     tcg_gen_xor_i64(tmp, in1, in2);
978     tcg_gen_andc_i64(sv, sv, tmp);
979 
980     return sv;
981 }
982 
983 /* Compute signed overflow for subtraction.  */
984 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
985                           TCGv_i64 in1, TCGv_i64 in2)
986 {
987     TCGv_i64 sv = tcg_temp_new();
988     TCGv_i64 tmp = tcg_temp_new();
989 
990     tcg_gen_xor_i64(sv, res, in1);
991     tcg_gen_xor_i64(tmp, in1, in2);
992     tcg_gen_and_i64(sv, sv, tmp);
993 
994     return sv;
995 }
996 
997 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
998                    TCGv_i64 in2, unsigned shift, bool is_l,
999                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1000 {
1001     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1002     unsigned c = cf >> 1;
1003     DisasCond cond;
1004 
1005     dest = tcg_temp_new();
1006     cb = NULL;
1007     cb_msb = NULL;
1008     cb_cond = NULL;
1009 
1010     if (shift) {
1011         tmp = tcg_temp_new();
1012         tcg_gen_shli_i64(tmp, in1, shift);
1013         in1 = tmp;
1014     }
1015 
1016     if (!is_l || cond_need_cb(c)) {
1017         TCGv_i64 zero = tcg_constant_i64(0);
1018         cb_msb = tcg_temp_new();
1019         cb = tcg_temp_new();
1020 
1021         tcg_gen_add2_i64(dest, cb_msb, in1, zero, in2, zero);
1022         if (is_c) {
1023             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1024                              get_psw_carry(ctx, d), zero);
1025         }
1026         tcg_gen_xor_i64(cb, in1, in2);
1027         tcg_gen_xor_i64(cb, cb, dest);
1028         if (cond_need_cb(c)) {
1029             cb_cond = get_carry(ctx, d, cb, cb_msb);
1030         }
1031     } else {
1032         tcg_gen_add_i64(dest, in1, in2);
1033         if (is_c) {
1034             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1035         }
1036     }
1037 
1038     /* Compute signed overflow if required.  */
1039     sv = NULL;
1040     if (is_tsv || cond_need_sv(c)) {
1041         sv = do_add_sv(ctx, dest, in1, in2);
1042         if (is_tsv) {
1043             /* ??? Need to include overflow from shift.  */
1044             gen_helper_tsv(tcg_env, sv);
1045         }
1046     }
1047 
1048     /* Emit any conditional trap before any writeback.  */
1049     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1050     if (is_tc) {
1051         tmp = tcg_temp_new();
1052         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1053         gen_helper_tcond(tcg_env, tmp);
1054     }
1055 
1056     /* Write back the result.  */
1057     if (!is_l) {
1058         save_or_nullify(ctx, cpu_psw_cb, cb);
1059         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1060     }
1061     save_gpr(ctx, rt, dest);
1062 
1063     /* Install the new nullification.  */
1064     cond_free(&ctx->null_cond);
1065     ctx->null_cond = cond;
1066 }
1067 
1068 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1069                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1070 {
1071     TCGv_i64 tcg_r1, tcg_r2;
1072 
1073     if (a->cf) {
1074         nullify_over(ctx);
1075     }
1076     tcg_r1 = load_gpr(ctx, a->r1);
1077     tcg_r2 = load_gpr(ctx, a->r2);
1078     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1079            is_tsv, is_tc, is_c, a->cf, a->d);
1080     return nullify_end(ctx);
1081 }
1082 
1083 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1084                        bool is_tsv, bool is_tc)
1085 {
1086     TCGv_i64 tcg_im, tcg_r2;
1087 
1088     if (a->cf) {
1089         nullify_over(ctx);
1090     }
1091     tcg_im = tcg_constant_i64(a->i);
1092     tcg_r2 = load_gpr(ctx, a->r);
1093     /* All ADDI conditions are 32-bit. */
1094     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1095     return nullify_end(ctx);
1096 }
1097 
1098 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1099                    TCGv_i64 in2, bool is_tsv, bool is_b,
1100                    bool is_tc, unsigned cf, bool d)
1101 {
1102     TCGv_i64 dest, sv, cb, cb_msb, zero, tmp;
1103     unsigned c = cf >> 1;
1104     DisasCond cond;
1105 
1106     dest = tcg_temp_new();
1107     cb = tcg_temp_new();
1108     cb_msb = tcg_temp_new();
1109 
1110     zero = tcg_constant_i64(0);
1111     if (is_b) {
1112         /* DEST,C = IN1 + ~IN2 + C.  */
1113         tcg_gen_not_i64(cb, in2);
1114         tcg_gen_add2_i64(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1115         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, zero);
1116         tcg_gen_xor_i64(cb, cb, in1);
1117         tcg_gen_xor_i64(cb, cb, dest);
1118     } else {
1119         /*
1120          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1121          * operations by seeding the high word with 1 and subtracting.
1122          */
1123         TCGv_i64 one = tcg_constant_i64(1);
1124         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, zero);
1125         tcg_gen_eqv_i64(cb, in1, in2);
1126         tcg_gen_xor_i64(cb, cb, dest);
1127     }
1128 
1129     /* Compute signed overflow if required.  */
1130     sv = NULL;
1131     if (is_tsv || cond_need_sv(c)) {
1132         sv = do_sub_sv(ctx, dest, in1, in2);
1133         if (is_tsv) {
1134             gen_helper_tsv(tcg_env, sv);
1135         }
1136     }
1137 
1138     /* Compute the condition.  We cannot use the special case for borrow.  */
1139     if (!is_b) {
1140         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1141     } else {
1142         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1143     }
1144 
1145     /* Emit any conditional trap before any writeback.  */
1146     if (is_tc) {
1147         tmp = tcg_temp_new();
1148         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1149         gen_helper_tcond(tcg_env, tmp);
1150     }
1151 
1152     /* Write back the result.  */
1153     save_or_nullify(ctx, cpu_psw_cb, cb);
1154     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1155     save_gpr(ctx, rt, dest);
1156 
1157     /* Install the new nullification.  */
1158     cond_free(&ctx->null_cond);
1159     ctx->null_cond = cond;
1160 }
1161 
1162 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1163                        bool is_tsv, bool is_b, bool is_tc)
1164 {
1165     TCGv_i64 tcg_r1, tcg_r2;
1166 
1167     if (a->cf) {
1168         nullify_over(ctx);
1169     }
1170     tcg_r1 = load_gpr(ctx, a->r1);
1171     tcg_r2 = load_gpr(ctx, a->r2);
1172     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1173     return nullify_end(ctx);
1174 }
1175 
1176 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1177 {
1178     TCGv_i64 tcg_im, tcg_r2;
1179 
1180     if (a->cf) {
1181         nullify_over(ctx);
1182     }
1183     tcg_im = tcg_constant_i64(a->i);
1184     tcg_r2 = load_gpr(ctx, a->r);
1185     /* All SUBI conditions are 32-bit. */
1186     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1187     return nullify_end(ctx);
1188 }
1189 
1190 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1191                       TCGv_i64 in2, unsigned cf, bool d)
1192 {
1193     TCGv_i64 dest, sv;
1194     DisasCond cond;
1195 
1196     dest = tcg_temp_new();
1197     tcg_gen_sub_i64(dest, in1, in2);
1198 
1199     /* Compute signed overflow if required.  */
1200     sv = NULL;
1201     if (cond_need_sv(cf >> 1)) {
1202         sv = do_sub_sv(ctx, dest, in1, in2);
1203     }
1204 
1205     /* Form the condition for the compare.  */
1206     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1207 
1208     /* Clear.  */
1209     tcg_gen_movi_i64(dest, 0);
1210     save_gpr(ctx, rt, dest);
1211 
1212     /* Install the new nullification.  */
1213     cond_free(&ctx->null_cond);
1214     ctx->null_cond = cond;
1215 }
1216 
1217 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1218                    TCGv_i64 in2, unsigned cf, bool d,
1219                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1220 {
1221     TCGv_i64 dest = dest_gpr(ctx, rt);
1222 
1223     /* Perform the operation, and writeback.  */
1224     fn(dest, in1, in2);
1225     save_gpr(ctx, rt, dest);
1226 
1227     /* Install the new nullification.  */
1228     cond_free(&ctx->null_cond);
1229     if (cf) {
1230         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1231     }
1232 }
1233 
1234 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1235                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1236 {
1237     TCGv_i64 tcg_r1, tcg_r2;
1238 
1239     if (a->cf) {
1240         nullify_over(ctx);
1241     }
1242     tcg_r1 = load_gpr(ctx, a->r1);
1243     tcg_r2 = load_gpr(ctx, a->r2);
1244     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1245     return nullify_end(ctx);
1246 }
1247 
1248 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1249                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1250                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1251 {
1252     TCGv_i64 dest;
1253     DisasCond cond;
1254 
1255     if (cf == 0) {
1256         dest = dest_gpr(ctx, rt);
1257         fn(dest, in1, in2);
1258         save_gpr(ctx, rt, dest);
1259         cond_free(&ctx->null_cond);
1260     } else {
1261         dest = tcg_temp_new();
1262         fn(dest, in1, in2);
1263 
1264         cond = do_unit_cond(cf, d, dest, in1, in2);
1265 
1266         if (is_tc) {
1267             TCGv_i64 tmp = tcg_temp_new();
1268             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1269             gen_helper_tcond(tcg_env, tmp);
1270         }
1271         save_gpr(ctx, rt, dest);
1272 
1273         cond_free(&ctx->null_cond);
1274         ctx->null_cond = cond;
1275     }
1276 }
1277 
1278 #ifndef CONFIG_USER_ONLY
1279 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1280    from the top 2 bits of the base register.  There are a few system
1281    instructions that have a 3-bit space specifier, for which SR0 is
1282    not special.  To handle this, pass ~SP.  */
1283 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1284 {
1285     TCGv_ptr ptr;
1286     TCGv_i64 tmp;
1287     TCGv_i64 spc;
1288 
1289     if (sp != 0) {
1290         if (sp < 0) {
1291             sp = ~sp;
1292         }
1293         spc = tcg_temp_new_i64();
1294         load_spr(ctx, spc, sp);
1295         return spc;
1296     }
1297     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1298         return cpu_srH;
1299     }
1300 
1301     ptr = tcg_temp_new_ptr();
1302     tmp = tcg_temp_new();
1303     spc = tcg_temp_new_i64();
1304 
1305     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1306     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1307     tcg_gen_andi_i64(tmp, tmp, 030);
1308     tcg_gen_trunc_i64_ptr(ptr, tmp);
1309 
1310     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1311     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1312 
1313     return spc;
1314 }
1315 #endif
1316 
1317 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1318                      unsigned rb, unsigned rx, int scale, int64_t disp,
1319                      unsigned sp, int modify, bool is_phys)
1320 {
1321     TCGv_i64 base = load_gpr(ctx, rb);
1322     TCGv_i64 ofs;
1323     TCGv_i64 addr;
1324 
1325     /* Note that RX is mutually exclusive with DISP.  */
1326     if (rx) {
1327         ofs = tcg_temp_new();
1328         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1329         tcg_gen_add_i64(ofs, ofs, base);
1330     } else if (disp || modify) {
1331         ofs = tcg_temp_new();
1332         tcg_gen_addi_i64(ofs, base, disp);
1333     } else {
1334         ofs = base;
1335     }
1336 
1337     *pofs = ofs;
1338     *pgva = addr = tcg_temp_new_i64();
1339     tcg_gen_andi_tl(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1340 #ifndef CONFIG_USER_ONLY
1341     if (!is_phys) {
1342         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1343     }
1344 #endif
1345 }
1346 
1347 /* Emit a memory load.  The modify parameter should be
1348  * < 0 for pre-modify,
1349  * > 0 for post-modify,
1350  * = 0 for no base register update.
1351  */
1352 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1353                        unsigned rx, int scale, int64_t disp,
1354                        unsigned sp, int modify, MemOp mop)
1355 {
1356     TCGv_i64 ofs;
1357     TCGv_i64 addr;
1358 
1359     /* Caller uses nullify_over/nullify_end.  */
1360     assert(ctx->null_cond.c == TCG_COND_NEVER);
1361 
1362     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1363              ctx->mmu_idx == MMU_PHYS_IDX);
1364     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1365     if (modify) {
1366         save_gpr(ctx, rb, ofs);
1367     }
1368 }
1369 
1370 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1371                        unsigned rx, int scale, int64_t disp,
1372                        unsigned sp, int modify, MemOp mop)
1373 {
1374     TCGv_i64 ofs;
1375     TCGv_i64 addr;
1376 
1377     /* Caller uses nullify_over/nullify_end.  */
1378     assert(ctx->null_cond.c == TCG_COND_NEVER);
1379 
1380     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1381              ctx->mmu_idx == MMU_PHYS_IDX);
1382     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1383     if (modify) {
1384         save_gpr(ctx, rb, ofs);
1385     }
1386 }
1387 
1388 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1389                         unsigned rx, int scale, int64_t disp,
1390                         unsigned sp, int modify, MemOp mop)
1391 {
1392     TCGv_i64 ofs;
1393     TCGv_i64 addr;
1394 
1395     /* Caller uses nullify_over/nullify_end.  */
1396     assert(ctx->null_cond.c == TCG_COND_NEVER);
1397 
1398     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1399              ctx->mmu_idx == MMU_PHYS_IDX);
1400     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1401     if (modify) {
1402         save_gpr(ctx, rb, ofs);
1403     }
1404 }
1405 
1406 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1407                         unsigned rx, int scale, int64_t disp,
1408                         unsigned sp, int modify, MemOp mop)
1409 {
1410     TCGv_i64 ofs;
1411     TCGv_i64 addr;
1412 
1413     /* Caller uses nullify_over/nullify_end.  */
1414     assert(ctx->null_cond.c == TCG_COND_NEVER);
1415 
1416     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1417              ctx->mmu_idx == MMU_PHYS_IDX);
1418     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1419     if (modify) {
1420         save_gpr(ctx, rb, ofs);
1421     }
1422 }
1423 
1424 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1425                     unsigned rx, int scale, int64_t disp,
1426                     unsigned sp, int modify, MemOp mop)
1427 {
1428     TCGv_i64 dest;
1429 
1430     nullify_over(ctx);
1431 
1432     if (modify == 0) {
1433         /* No base register update.  */
1434         dest = dest_gpr(ctx, rt);
1435     } else {
1436         /* Make sure if RT == RB, we see the result of the load.  */
1437         dest = tcg_temp_new();
1438     }
1439     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1440     save_gpr(ctx, rt, dest);
1441 
1442     return nullify_end(ctx);
1443 }
1444 
1445 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1446                       unsigned rx, int scale, int64_t disp,
1447                       unsigned sp, int modify)
1448 {
1449     TCGv_i32 tmp;
1450 
1451     nullify_over(ctx);
1452 
1453     tmp = tcg_temp_new_i32();
1454     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1455     save_frw_i32(rt, tmp);
1456 
1457     if (rt == 0) {
1458         gen_helper_loaded_fr0(tcg_env);
1459     }
1460 
1461     return nullify_end(ctx);
1462 }
1463 
1464 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1465 {
1466     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1467                      a->disp, a->sp, a->m);
1468 }
1469 
1470 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1471                       unsigned rx, int scale, int64_t disp,
1472                       unsigned sp, int modify)
1473 {
1474     TCGv_i64 tmp;
1475 
1476     nullify_over(ctx);
1477 
1478     tmp = tcg_temp_new_i64();
1479     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1480     save_frd(rt, tmp);
1481 
1482     if (rt == 0) {
1483         gen_helper_loaded_fr0(tcg_env);
1484     }
1485 
1486     return nullify_end(ctx);
1487 }
1488 
1489 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1490 {
1491     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1492                      a->disp, a->sp, a->m);
1493 }
1494 
1495 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1496                      int64_t disp, unsigned sp,
1497                      int modify, MemOp mop)
1498 {
1499     nullify_over(ctx);
1500     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1501     return nullify_end(ctx);
1502 }
1503 
1504 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1505                        unsigned rx, int scale, int64_t disp,
1506                        unsigned sp, int modify)
1507 {
1508     TCGv_i32 tmp;
1509 
1510     nullify_over(ctx);
1511 
1512     tmp = load_frw_i32(rt);
1513     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1514 
1515     return nullify_end(ctx);
1516 }
1517 
1518 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1519 {
1520     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1521                       a->disp, a->sp, a->m);
1522 }
1523 
1524 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1525                        unsigned rx, int scale, int64_t disp,
1526                        unsigned sp, int modify)
1527 {
1528     TCGv_i64 tmp;
1529 
1530     nullify_over(ctx);
1531 
1532     tmp = load_frd(rt);
1533     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1534 
1535     return nullify_end(ctx);
1536 }
1537 
1538 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1539 {
1540     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1541                       a->disp, a->sp, a->m);
1542 }
1543 
1544 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1545                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1546 {
1547     TCGv_i32 tmp;
1548 
1549     nullify_over(ctx);
1550     tmp = load_frw0_i32(ra);
1551 
1552     func(tmp, tcg_env, tmp);
1553 
1554     save_frw_i32(rt, tmp);
1555     return nullify_end(ctx);
1556 }
1557 
1558 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1559                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1560 {
1561     TCGv_i32 dst;
1562     TCGv_i64 src;
1563 
1564     nullify_over(ctx);
1565     src = load_frd(ra);
1566     dst = tcg_temp_new_i32();
1567 
1568     func(dst, tcg_env, src);
1569 
1570     save_frw_i32(rt, dst);
1571     return nullify_end(ctx);
1572 }
1573 
1574 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1575                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1576 {
1577     TCGv_i64 tmp;
1578 
1579     nullify_over(ctx);
1580     tmp = load_frd0(ra);
1581 
1582     func(tmp, tcg_env, tmp);
1583 
1584     save_frd(rt, tmp);
1585     return nullify_end(ctx);
1586 }
1587 
1588 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1589                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1590 {
1591     TCGv_i32 src;
1592     TCGv_i64 dst;
1593 
1594     nullify_over(ctx);
1595     src = load_frw0_i32(ra);
1596     dst = tcg_temp_new_i64();
1597 
1598     func(dst, tcg_env, src);
1599 
1600     save_frd(rt, dst);
1601     return nullify_end(ctx);
1602 }
1603 
1604 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1605                         unsigned ra, unsigned rb,
1606                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1607 {
1608     TCGv_i32 a, b;
1609 
1610     nullify_over(ctx);
1611     a = load_frw0_i32(ra);
1612     b = load_frw0_i32(rb);
1613 
1614     func(a, tcg_env, a, b);
1615 
1616     save_frw_i32(rt, a);
1617     return nullify_end(ctx);
1618 }
1619 
1620 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1621                         unsigned ra, unsigned rb,
1622                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1623 {
1624     TCGv_i64 a, b;
1625 
1626     nullify_over(ctx);
1627     a = load_frd0(ra);
1628     b = load_frd0(rb);
1629 
1630     func(a, tcg_env, a, b);
1631 
1632     save_frd(rt, a);
1633     return nullify_end(ctx);
1634 }
1635 
1636 /* Emit an unconditional branch to a direct target, which may or may not
1637    have already had nullification handled.  */
1638 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1639                        unsigned link, bool is_n)
1640 {
1641     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1642         if (link != 0) {
1643             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1644         }
1645         ctx->iaoq_n = dest;
1646         if (is_n) {
1647             ctx->null_cond.c = TCG_COND_ALWAYS;
1648         }
1649     } else {
1650         nullify_over(ctx);
1651 
1652         if (link != 0) {
1653             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1654         }
1655 
1656         if (is_n && use_nullify_skip(ctx)) {
1657             nullify_set(ctx, 0);
1658             gen_goto_tb(ctx, 0, dest, dest + 4);
1659         } else {
1660             nullify_set(ctx, is_n);
1661             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1662         }
1663 
1664         nullify_end(ctx);
1665 
1666         nullify_set(ctx, 0);
1667         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1668         ctx->base.is_jmp = DISAS_NORETURN;
1669     }
1670     return true;
1671 }
1672 
1673 /* Emit a conditional branch to a direct target.  If the branch itself
1674    is nullified, we should have already used nullify_over.  */
1675 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1676                        DisasCond *cond)
1677 {
1678     uint64_t dest = iaoq_dest(ctx, disp);
1679     TCGLabel *taken = NULL;
1680     TCGCond c = cond->c;
1681     bool n;
1682 
1683     assert(ctx->null_cond.c == TCG_COND_NEVER);
1684 
1685     /* Handle TRUE and NEVER as direct branches.  */
1686     if (c == TCG_COND_ALWAYS) {
1687         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1688     }
1689     if (c == TCG_COND_NEVER) {
1690         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1691     }
1692 
1693     taken = gen_new_label();
1694     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1695     cond_free(cond);
1696 
1697     /* Not taken: Condition not satisfied; nullify on backward branches. */
1698     n = is_n && disp < 0;
1699     if (n && use_nullify_skip(ctx)) {
1700         nullify_set(ctx, 0);
1701         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1702     } else {
1703         if (!n && ctx->null_lab) {
1704             gen_set_label(ctx->null_lab);
1705             ctx->null_lab = NULL;
1706         }
1707         nullify_set(ctx, n);
1708         if (ctx->iaoq_n == -1) {
1709             /* The temporary iaoq_n_var died at the branch above.
1710                Regenerate it here instead of saving it.  */
1711             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1712         }
1713         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1714     }
1715 
1716     gen_set_label(taken);
1717 
1718     /* Taken: Condition satisfied; nullify on forward branches.  */
1719     n = is_n && disp >= 0;
1720     if (n && use_nullify_skip(ctx)) {
1721         nullify_set(ctx, 0);
1722         gen_goto_tb(ctx, 1, dest, dest + 4);
1723     } else {
1724         nullify_set(ctx, n);
1725         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1726     }
1727 
1728     /* Not taken: the branch itself was nullified.  */
1729     if (ctx->null_lab) {
1730         gen_set_label(ctx->null_lab);
1731         ctx->null_lab = NULL;
1732         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1733     } else {
1734         ctx->base.is_jmp = DISAS_NORETURN;
1735     }
1736     return true;
1737 }
1738 
1739 /* Emit an unconditional branch to an indirect target.  This handles
1740    nullification of the branch itself.  */
1741 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1742                        unsigned link, bool is_n)
1743 {
1744     TCGv_i64 a0, a1, next, tmp;
1745     TCGCond c;
1746 
1747     assert(ctx->null_lab == NULL);
1748 
1749     if (ctx->null_cond.c == TCG_COND_NEVER) {
1750         if (link != 0) {
1751             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1752         }
1753         next = tcg_temp_new();
1754         tcg_gen_mov_i64(next, dest);
1755         if (is_n) {
1756             if (use_nullify_skip(ctx)) {
1757                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1758                 tcg_gen_addi_i64(next, next, 4);
1759                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1760                 nullify_set(ctx, 0);
1761                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1762                 return true;
1763             }
1764             ctx->null_cond.c = TCG_COND_ALWAYS;
1765         }
1766         ctx->iaoq_n = -1;
1767         ctx->iaoq_n_var = next;
1768     } else if (is_n && use_nullify_skip(ctx)) {
1769         /* The (conditional) branch, B, nullifies the next insn, N,
1770            and we're allowed to skip execution N (no single-step or
1771            tracepoint in effect).  Since the goto_ptr that we must use
1772            for the indirect branch consumes no special resources, we
1773            can (conditionally) skip B and continue execution.  */
1774         /* The use_nullify_skip test implies we have a known control path.  */
1775         tcg_debug_assert(ctx->iaoq_b != -1);
1776         tcg_debug_assert(ctx->iaoq_n != -1);
1777 
1778         /* We do have to handle the non-local temporary, DEST, before
1779            branching.  Since IOAQ_F is not really live at this point, we
1780            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1781         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1782         next = tcg_temp_new();
1783         tcg_gen_addi_i64(next, dest, 4);
1784         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1785 
1786         nullify_over(ctx);
1787         if (link != 0) {
1788             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1789         }
1790         tcg_gen_lookup_and_goto_ptr();
1791         return nullify_end(ctx);
1792     } else {
1793         c = ctx->null_cond.c;
1794         a0 = ctx->null_cond.a0;
1795         a1 = ctx->null_cond.a1;
1796 
1797         tmp = tcg_temp_new();
1798         next = tcg_temp_new();
1799 
1800         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1801         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1802         ctx->iaoq_n = -1;
1803         ctx->iaoq_n_var = next;
1804 
1805         if (link != 0) {
1806             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1807         }
1808 
1809         if (is_n) {
1810             /* The branch nullifies the next insn, which means the state of N
1811                after the branch is the inverse of the state of N that applied
1812                to the branch.  */
1813             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1814             cond_free(&ctx->null_cond);
1815             ctx->null_cond = cond_make_n();
1816             ctx->psw_n_nonzero = true;
1817         } else {
1818             cond_free(&ctx->null_cond);
1819         }
1820     }
1821     return true;
1822 }
1823 
1824 /* Implement
1825  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1826  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1827  *    else
1828  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1829  * which keeps the privilege level from being increased.
1830  */
1831 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1832 {
1833     TCGv_i64 dest;
1834     switch (ctx->privilege) {
1835     case 0:
1836         /* Privilege 0 is maximum and is allowed to decrease.  */
1837         return offset;
1838     case 3:
1839         /* Privilege 3 is minimum and is never allowed to increase.  */
1840         dest = tcg_temp_new();
1841         tcg_gen_ori_i64(dest, offset, 3);
1842         break;
1843     default:
1844         dest = tcg_temp_new();
1845         tcg_gen_andi_i64(dest, offset, -4);
1846         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1847         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1848         break;
1849     }
1850     return dest;
1851 }
1852 
1853 #ifdef CONFIG_USER_ONLY
1854 /* On Linux, page zero is normally marked execute only + gateway.
1855    Therefore normal read or write is supposed to fail, but specific
1856    offsets have kernel code mapped to raise permissions to implement
1857    system calls.  Handling this via an explicit check here, rather
1858    in than the "be disp(sr2,r0)" instruction that probably sent us
1859    here, is the easiest way to handle the branch delay slot on the
1860    aforementioned BE.  */
1861 static void do_page_zero(DisasContext *ctx)
1862 {
1863     TCGv_i64 tmp;
1864 
1865     /* If by some means we get here with PSW[N]=1, that implies that
1866        the B,GATE instruction would be skipped, and we'd fault on the
1867        next insn within the privileged page.  */
1868     switch (ctx->null_cond.c) {
1869     case TCG_COND_NEVER:
1870         break;
1871     case TCG_COND_ALWAYS:
1872         tcg_gen_movi_i64(cpu_psw_n, 0);
1873         goto do_sigill;
1874     default:
1875         /* Since this is always the first (and only) insn within the
1876            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1877         g_assert_not_reached();
1878     }
1879 
1880     /* Check that we didn't arrive here via some means that allowed
1881        non-sequential instruction execution.  Normally the PSW[B] bit
1882        detects this by disallowing the B,GATE instruction to execute
1883        under such conditions.  */
1884     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1885         goto do_sigill;
1886     }
1887 
1888     switch (ctx->iaoq_f & -4) {
1889     case 0x00: /* Null pointer call */
1890         gen_excp_1(EXCP_IMP);
1891         ctx->base.is_jmp = DISAS_NORETURN;
1892         break;
1893 
1894     case 0xb0: /* LWS */
1895         gen_excp_1(EXCP_SYSCALL_LWS);
1896         ctx->base.is_jmp = DISAS_NORETURN;
1897         break;
1898 
1899     case 0xe0: /* SET_THREAD_POINTER */
1900         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1901         tmp = tcg_temp_new();
1902         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1903         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1904         tcg_gen_addi_i64(tmp, tmp, 4);
1905         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1906         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1907         break;
1908 
1909     case 0x100: /* SYSCALL */
1910         gen_excp_1(EXCP_SYSCALL);
1911         ctx->base.is_jmp = DISAS_NORETURN;
1912         break;
1913 
1914     default:
1915     do_sigill:
1916         gen_excp_1(EXCP_ILL);
1917         ctx->base.is_jmp = DISAS_NORETURN;
1918         break;
1919     }
1920 }
1921 #endif
1922 
1923 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1924 {
1925     cond_free(&ctx->null_cond);
1926     return true;
1927 }
1928 
1929 static bool trans_break(DisasContext *ctx, arg_break *a)
1930 {
1931     return gen_excp_iir(ctx, EXCP_BREAK);
1932 }
1933 
1934 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1935 {
1936     /* No point in nullifying the memory barrier.  */
1937     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1938 
1939     cond_free(&ctx->null_cond);
1940     return true;
1941 }
1942 
1943 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1944 {
1945     unsigned rt = a->t;
1946     TCGv_i64 tmp = dest_gpr(ctx, rt);
1947     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1948     save_gpr(ctx, rt, tmp);
1949 
1950     cond_free(&ctx->null_cond);
1951     return true;
1952 }
1953 
1954 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1955 {
1956     unsigned rt = a->t;
1957     unsigned rs = a->sp;
1958     TCGv_i64 t0 = tcg_temp_new_i64();
1959 
1960     load_spr(ctx, t0, rs);
1961     tcg_gen_shri_i64(t0, t0, 32);
1962 
1963     save_gpr(ctx, rt, t0);
1964 
1965     cond_free(&ctx->null_cond);
1966     return true;
1967 }
1968 
1969 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1970 {
1971     unsigned rt = a->t;
1972     unsigned ctl = a->r;
1973     TCGv_i64 tmp;
1974 
1975     switch (ctl) {
1976     case CR_SAR:
1977         if (a->e == 0) {
1978             /* MFSAR without ,W masks low 5 bits.  */
1979             tmp = dest_gpr(ctx, rt);
1980             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1981             save_gpr(ctx, rt, tmp);
1982             goto done;
1983         }
1984         save_gpr(ctx, rt, cpu_sar);
1985         goto done;
1986     case CR_IT: /* Interval Timer */
1987         /* FIXME: Respect PSW_S bit.  */
1988         nullify_over(ctx);
1989         tmp = dest_gpr(ctx, rt);
1990         if (translator_io_start(&ctx->base)) {
1991             gen_helper_read_interval_timer(tmp);
1992             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1993         } else {
1994             gen_helper_read_interval_timer(tmp);
1995         }
1996         save_gpr(ctx, rt, tmp);
1997         return nullify_end(ctx);
1998     case 26:
1999     case 27:
2000         break;
2001     default:
2002         /* All other control registers are privileged.  */
2003         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2004         break;
2005     }
2006 
2007     tmp = tcg_temp_new();
2008     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2009     save_gpr(ctx, rt, tmp);
2010 
2011  done:
2012     cond_free(&ctx->null_cond);
2013     return true;
2014 }
2015 
2016 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2017 {
2018     unsigned rr = a->r;
2019     unsigned rs = a->sp;
2020     TCGv_i64 tmp;
2021 
2022     if (rs >= 5) {
2023         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2024     }
2025     nullify_over(ctx);
2026 
2027     tmp = tcg_temp_new_i64();
2028     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2029 
2030     if (rs >= 4) {
2031         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2032         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2033     } else {
2034         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2035     }
2036 
2037     return nullify_end(ctx);
2038 }
2039 
2040 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2041 {
2042     unsigned ctl = a->t;
2043     TCGv_i64 reg;
2044     TCGv_i64 tmp;
2045 
2046     if (ctl == CR_SAR) {
2047         reg = load_gpr(ctx, a->r);
2048         tmp = tcg_temp_new();
2049         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2050         save_or_nullify(ctx, cpu_sar, tmp);
2051 
2052         cond_free(&ctx->null_cond);
2053         return true;
2054     }
2055 
2056     /* All other control registers are privileged or read-only.  */
2057     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2058 
2059 #ifndef CONFIG_USER_ONLY
2060     nullify_over(ctx);
2061     reg = load_gpr(ctx, a->r);
2062 
2063     switch (ctl) {
2064     case CR_IT:
2065         gen_helper_write_interval_timer(tcg_env, reg);
2066         break;
2067     case CR_EIRR:
2068         gen_helper_write_eirr(tcg_env, reg);
2069         break;
2070     case CR_EIEM:
2071         gen_helper_write_eiem(tcg_env, reg);
2072         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2073         break;
2074 
2075     case CR_IIASQ:
2076     case CR_IIAOQ:
2077         /* FIXME: Respect PSW_Q bit */
2078         /* The write advances the queue and stores to the back element.  */
2079         tmp = tcg_temp_new();
2080         tcg_gen_ld_i64(tmp, tcg_env,
2081                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2082         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2083         tcg_gen_st_i64(reg, tcg_env,
2084                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2085         break;
2086 
2087     case CR_PID1:
2088     case CR_PID2:
2089     case CR_PID3:
2090     case CR_PID4:
2091         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2092 #ifndef CONFIG_USER_ONLY
2093         gen_helper_change_prot_id(tcg_env);
2094 #endif
2095         break;
2096 
2097     default:
2098         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2099         break;
2100     }
2101     return nullify_end(ctx);
2102 #endif
2103 }
2104 
2105 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2106 {
2107     TCGv_i64 tmp = tcg_temp_new();
2108 
2109     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2110     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2111     save_or_nullify(ctx, cpu_sar, tmp);
2112 
2113     cond_free(&ctx->null_cond);
2114     return true;
2115 }
2116 
2117 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2118 {
2119     TCGv_i64 dest = dest_gpr(ctx, a->t);
2120 
2121 #ifdef CONFIG_USER_ONLY
2122     /* We don't implement space registers in user mode. */
2123     tcg_gen_movi_i64(dest, 0);
2124 #else
2125     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2126     tcg_gen_shri_i64(dest, dest, 32);
2127 #endif
2128     save_gpr(ctx, a->t, dest);
2129 
2130     cond_free(&ctx->null_cond);
2131     return true;
2132 }
2133 
2134 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2135 {
2136     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2137 #ifndef CONFIG_USER_ONLY
2138     TCGv_i64 tmp;
2139 
2140     nullify_over(ctx);
2141 
2142     tmp = tcg_temp_new();
2143     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2144     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2145     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2146     save_gpr(ctx, a->t, tmp);
2147 
2148     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2149     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2150     return nullify_end(ctx);
2151 #endif
2152 }
2153 
2154 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2155 {
2156     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2157 #ifndef CONFIG_USER_ONLY
2158     TCGv_i64 tmp;
2159 
2160     nullify_over(ctx);
2161 
2162     tmp = tcg_temp_new();
2163     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2164     tcg_gen_ori_i64(tmp, tmp, a->i);
2165     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2166     save_gpr(ctx, a->t, tmp);
2167 
2168     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2169     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2170     return nullify_end(ctx);
2171 #endif
2172 }
2173 
2174 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2175 {
2176     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2177 #ifndef CONFIG_USER_ONLY
2178     TCGv_i64 tmp, reg;
2179     nullify_over(ctx);
2180 
2181     reg = load_gpr(ctx, a->r);
2182     tmp = tcg_temp_new();
2183     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2184 
2185     /* Exit the TB to recognize new interrupts.  */
2186     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2187     return nullify_end(ctx);
2188 #endif
2189 }
2190 
2191 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2192 {
2193     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2194 #ifndef CONFIG_USER_ONLY
2195     nullify_over(ctx);
2196 
2197     if (rfi_r) {
2198         gen_helper_rfi_r(tcg_env);
2199     } else {
2200         gen_helper_rfi(tcg_env);
2201     }
2202     /* Exit the TB to recognize new interrupts.  */
2203     tcg_gen_exit_tb(NULL, 0);
2204     ctx->base.is_jmp = DISAS_NORETURN;
2205 
2206     return nullify_end(ctx);
2207 #endif
2208 }
2209 
2210 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2211 {
2212     return do_rfi(ctx, false);
2213 }
2214 
2215 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2216 {
2217     return do_rfi(ctx, true);
2218 }
2219 
2220 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2221 {
2222     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2223 #ifndef CONFIG_USER_ONLY
2224     nullify_over(ctx);
2225     gen_helper_halt(tcg_env);
2226     ctx->base.is_jmp = DISAS_NORETURN;
2227     return nullify_end(ctx);
2228 #endif
2229 }
2230 
2231 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2232 {
2233     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2234 #ifndef CONFIG_USER_ONLY
2235     nullify_over(ctx);
2236     gen_helper_reset(tcg_env);
2237     ctx->base.is_jmp = DISAS_NORETURN;
2238     return nullify_end(ctx);
2239 #endif
2240 }
2241 
2242 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2243 {
2244     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2245 #ifndef CONFIG_USER_ONLY
2246     nullify_over(ctx);
2247     gen_helper_getshadowregs(tcg_env);
2248     return nullify_end(ctx);
2249 #endif
2250 }
2251 
2252 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2253 {
2254     if (a->m) {
2255         TCGv_i64 dest = dest_gpr(ctx, a->b);
2256         TCGv_i64 src1 = load_gpr(ctx, a->b);
2257         TCGv_i64 src2 = load_gpr(ctx, a->x);
2258 
2259         /* The only thing we need to do is the base register modification.  */
2260         tcg_gen_add_i64(dest, src1, src2);
2261         save_gpr(ctx, a->b, dest);
2262     }
2263     cond_free(&ctx->null_cond);
2264     return true;
2265 }
2266 
2267 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2268 {
2269     TCGv_i64 dest, ofs;
2270     TCGv_i32 level, want;
2271     TCGv_i64 addr;
2272 
2273     nullify_over(ctx);
2274 
2275     dest = dest_gpr(ctx, a->t);
2276     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2277 
2278     if (a->imm) {
2279         level = tcg_constant_i32(a->ri);
2280     } else {
2281         level = tcg_temp_new_i32();
2282         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2283         tcg_gen_andi_i32(level, level, 3);
2284     }
2285     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2286 
2287     gen_helper_probe(dest, tcg_env, addr, level, want);
2288 
2289     save_gpr(ctx, a->t, dest);
2290     return nullify_end(ctx);
2291 }
2292 
2293 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2294 {
2295     if (ctx->is_pa20) {
2296         return false;
2297     }
2298     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2299 #ifndef CONFIG_USER_ONLY
2300     TCGv_i64 addr;
2301     TCGv_i64 ofs, reg;
2302 
2303     nullify_over(ctx);
2304 
2305     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2306     reg = load_gpr(ctx, a->r);
2307     if (a->addr) {
2308         gen_helper_itlba_pa11(tcg_env, addr, reg);
2309     } else {
2310         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2311     }
2312 
2313     /* Exit TB for TLB change if mmu is enabled.  */
2314     if (ctx->tb_flags & PSW_C) {
2315         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2316     }
2317     return nullify_end(ctx);
2318 #endif
2319 }
2320 
2321 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2322 {
2323     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2324 #ifndef CONFIG_USER_ONLY
2325     TCGv_i64 addr;
2326     TCGv_i64 ofs;
2327 
2328     nullify_over(ctx);
2329 
2330     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2331     if (a->m) {
2332         save_gpr(ctx, a->b, ofs);
2333     }
2334     if (a->local) {
2335         gen_helper_ptlbe(tcg_env);
2336     } else {
2337         gen_helper_ptlb(tcg_env, addr);
2338     }
2339 
2340     /* Exit TB for TLB change if mmu is enabled.  */
2341     if (ctx->tb_flags & PSW_C) {
2342         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2343     }
2344     return nullify_end(ctx);
2345 #endif
2346 }
2347 
2348 /*
2349  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2350  * See
2351  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2352  *     page 13-9 (195/206)
2353  */
2354 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2355 {
2356     if (ctx->is_pa20) {
2357         return false;
2358     }
2359     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2360 #ifndef CONFIG_USER_ONLY
2361     TCGv_i64 addr, atl, stl;
2362     TCGv_i64 reg;
2363 
2364     nullify_over(ctx);
2365 
2366     /*
2367      * FIXME:
2368      *  if (not (pcxl or pcxl2))
2369      *    return gen_illegal(ctx);
2370      */
2371 
2372     atl = tcg_temp_new_i64();
2373     stl = tcg_temp_new_i64();
2374     addr = tcg_temp_new_i64();
2375 
2376     tcg_gen_ld32u_i64(stl, tcg_env,
2377                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2378                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2379     tcg_gen_ld32u_i64(atl, tcg_env,
2380                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2381                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2382     tcg_gen_shli_i64(stl, stl, 32);
2383     tcg_gen_or_tl(addr, atl, stl);
2384 
2385     reg = load_gpr(ctx, a->r);
2386     if (a->addr) {
2387         gen_helper_itlba_pa11(tcg_env, addr, reg);
2388     } else {
2389         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2390     }
2391 
2392     /* Exit TB for TLB change if mmu is enabled.  */
2393     if (ctx->tb_flags & PSW_C) {
2394         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2395     }
2396     return nullify_end(ctx);
2397 #endif
2398 }
2399 
2400 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2401 {
2402     if (!ctx->is_pa20) {
2403         return false;
2404     }
2405     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2406 #ifndef CONFIG_USER_ONLY
2407     nullify_over(ctx);
2408     {
2409         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2410         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2411 
2412         if (a->data) {
2413             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2414         } else {
2415             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2416         }
2417     }
2418     /* Exit TB for TLB change if mmu is enabled.  */
2419     if (ctx->tb_flags & PSW_C) {
2420         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2421     }
2422     return nullify_end(ctx);
2423 #endif
2424 }
2425 
2426 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2427 {
2428     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2429 #ifndef CONFIG_USER_ONLY
2430     TCGv_i64 vaddr;
2431     TCGv_i64 ofs, paddr;
2432 
2433     nullify_over(ctx);
2434 
2435     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2436 
2437     paddr = tcg_temp_new();
2438     gen_helper_lpa(paddr, tcg_env, vaddr);
2439 
2440     /* Note that physical address result overrides base modification.  */
2441     if (a->m) {
2442         save_gpr(ctx, a->b, ofs);
2443     }
2444     save_gpr(ctx, a->t, paddr);
2445 
2446     return nullify_end(ctx);
2447 #endif
2448 }
2449 
2450 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2451 {
2452     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2453 
2454     /* The Coherence Index is an implementation-defined function of the
2455        physical address.  Two addresses with the same CI have a coherent
2456        view of the cache.  Our implementation is to return 0 for all,
2457        since the entire address space is coherent.  */
2458     save_gpr(ctx, a->t, tcg_constant_i64(0));
2459 
2460     cond_free(&ctx->null_cond);
2461     return true;
2462 }
2463 
2464 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2465 {
2466     return do_add_reg(ctx, a, false, false, false, false);
2467 }
2468 
2469 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2470 {
2471     return do_add_reg(ctx, a, true, false, false, false);
2472 }
2473 
2474 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2475 {
2476     return do_add_reg(ctx, a, false, true, false, false);
2477 }
2478 
2479 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2480 {
2481     return do_add_reg(ctx, a, false, false, false, true);
2482 }
2483 
2484 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2485 {
2486     return do_add_reg(ctx, a, false, true, false, true);
2487 }
2488 
2489 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2490 {
2491     return do_sub_reg(ctx, a, false, false, false);
2492 }
2493 
2494 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2495 {
2496     return do_sub_reg(ctx, a, true, false, false);
2497 }
2498 
2499 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2500 {
2501     return do_sub_reg(ctx, a, false, false, true);
2502 }
2503 
2504 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2505 {
2506     return do_sub_reg(ctx, a, true, false, true);
2507 }
2508 
2509 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2510 {
2511     return do_sub_reg(ctx, a, false, true, false);
2512 }
2513 
2514 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2515 {
2516     return do_sub_reg(ctx, a, true, true, false);
2517 }
2518 
2519 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2520 {
2521     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2522 }
2523 
2524 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2525 {
2526     return do_log_reg(ctx, a, tcg_gen_and_i64);
2527 }
2528 
2529 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2530 {
2531     if (a->cf == 0) {
2532         unsigned r2 = a->r2;
2533         unsigned r1 = a->r1;
2534         unsigned rt = a->t;
2535 
2536         if (rt == 0) { /* NOP */
2537             cond_free(&ctx->null_cond);
2538             return true;
2539         }
2540         if (r2 == 0) { /* COPY */
2541             if (r1 == 0) {
2542                 TCGv_i64 dest = dest_gpr(ctx, rt);
2543                 tcg_gen_movi_i64(dest, 0);
2544                 save_gpr(ctx, rt, dest);
2545             } else {
2546                 save_gpr(ctx, rt, cpu_gr[r1]);
2547             }
2548             cond_free(&ctx->null_cond);
2549             return true;
2550         }
2551 #ifndef CONFIG_USER_ONLY
2552         /* These are QEMU extensions and are nops in the real architecture:
2553          *
2554          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2555          * or %r31,%r31,%r31 -- death loop; offline cpu
2556          *                      currently implemented as idle.
2557          */
2558         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2559             /* No need to check for supervisor, as userland can only pause
2560                until the next timer interrupt.  */
2561             nullify_over(ctx);
2562 
2563             /* Advance the instruction queue.  */
2564             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2565             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2566             nullify_set(ctx, 0);
2567 
2568             /* Tell the qemu main loop to halt until this cpu has work.  */
2569             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2570                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2571             gen_excp_1(EXCP_HALTED);
2572             ctx->base.is_jmp = DISAS_NORETURN;
2573 
2574             return nullify_end(ctx);
2575         }
2576 #endif
2577     }
2578     return do_log_reg(ctx, a, tcg_gen_or_i64);
2579 }
2580 
2581 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2582 {
2583     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2584 }
2585 
2586 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2587 {
2588     TCGv_i64 tcg_r1, tcg_r2;
2589 
2590     if (a->cf) {
2591         nullify_over(ctx);
2592     }
2593     tcg_r1 = load_gpr(ctx, a->r1);
2594     tcg_r2 = load_gpr(ctx, a->r2);
2595     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2596     return nullify_end(ctx);
2597 }
2598 
2599 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2600 {
2601     TCGv_i64 tcg_r1, tcg_r2;
2602 
2603     if (a->cf) {
2604         nullify_over(ctx);
2605     }
2606     tcg_r1 = load_gpr(ctx, a->r1);
2607     tcg_r2 = load_gpr(ctx, a->r2);
2608     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2609     return nullify_end(ctx);
2610 }
2611 
2612 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2613 {
2614     TCGv_i64 tcg_r1, tcg_r2, tmp;
2615 
2616     if (a->cf) {
2617         nullify_over(ctx);
2618     }
2619     tcg_r1 = load_gpr(ctx, a->r1);
2620     tcg_r2 = load_gpr(ctx, a->r2);
2621     tmp = tcg_temp_new();
2622     tcg_gen_not_i64(tmp, tcg_r2);
2623     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2624     return nullify_end(ctx);
2625 }
2626 
2627 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2628 {
2629     return do_uaddcm(ctx, a, false);
2630 }
2631 
2632 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2633 {
2634     return do_uaddcm(ctx, a, true);
2635 }
2636 
2637 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2638 {
2639     TCGv_i64 tmp;
2640 
2641     nullify_over(ctx);
2642 
2643     tmp = tcg_temp_new();
2644     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2645     if (!is_i) {
2646         tcg_gen_not_i64(tmp, tmp);
2647     }
2648     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2649     tcg_gen_muli_i64(tmp, tmp, 6);
2650     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2651             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2652     return nullify_end(ctx);
2653 }
2654 
2655 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2656 {
2657     return do_dcor(ctx, a, false);
2658 }
2659 
2660 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2661 {
2662     return do_dcor(ctx, a, true);
2663 }
2664 
2665 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2666 {
2667     TCGv_i64 dest, add1, add2, addc, zero, in1, in2;
2668     TCGv_i64 cout;
2669 
2670     nullify_over(ctx);
2671 
2672     in1 = load_gpr(ctx, a->r1);
2673     in2 = load_gpr(ctx, a->r2);
2674 
2675     add1 = tcg_temp_new();
2676     add2 = tcg_temp_new();
2677     addc = tcg_temp_new();
2678     dest = tcg_temp_new();
2679     zero = tcg_constant_i64(0);
2680 
2681     /* Form R1 << 1 | PSW[CB]{8}.  */
2682     tcg_gen_add_i64(add1, in1, in1);
2683     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2684 
2685     /*
2686      * Add or subtract R2, depending on PSW[V].  Proper computation of
2687      * carry requires that we subtract via + ~R2 + 1, as described in
2688      * the manual.  By extracting and masking V, we can produce the
2689      * proper inputs to the addition without movcond.
2690      */
2691     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2692     tcg_gen_xor_i64(add2, in2, addc);
2693     tcg_gen_andi_i64(addc, addc, 1);
2694 
2695     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2696     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2697 
2698     /* Write back the result register.  */
2699     save_gpr(ctx, a->t, dest);
2700 
2701     /* Write back PSW[CB].  */
2702     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2703     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2704 
2705     /* Write back PSW[V] for the division step.  */
2706     cout = get_psw_carry(ctx, false);
2707     tcg_gen_neg_i64(cpu_psw_v, cout);
2708     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2709 
2710     /* Install the new nullification.  */
2711     if (a->cf) {
2712         TCGv_i64 sv = NULL;
2713         if (cond_need_sv(a->cf >> 1)) {
2714             /* ??? The lshift is supposed to contribute to overflow.  */
2715             sv = do_add_sv(ctx, dest, add1, add2);
2716         }
2717         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2718     }
2719 
2720     return nullify_end(ctx);
2721 }
2722 
2723 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2724 {
2725     return do_add_imm(ctx, a, false, false);
2726 }
2727 
2728 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2729 {
2730     return do_add_imm(ctx, a, true, false);
2731 }
2732 
2733 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2734 {
2735     return do_add_imm(ctx, a, false, true);
2736 }
2737 
2738 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2739 {
2740     return do_add_imm(ctx, a, true, true);
2741 }
2742 
2743 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2744 {
2745     return do_sub_imm(ctx, a, false);
2746 }
2747 
2748 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2749 {
2750     return do_sub_imm(ctx, a, true);
2751 }
2752 
2753 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2754 {
2755     TCGv_i64 tcg_im, tcg_r2;
2756 
2757     if (a->cf) {
2758         nullify_over(ctx);
2759     }
2760 
2761     tcg_im = tcg_constant_i64(a->i);
2762     tcg_r2 = load_gpr(ctx, a->r);
2763     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2764 
2765     return nullify_end(ctx);
2766 }
2767 
2768 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2769 {
2770     if (!ctx->is_pa20 && a->size > MO_32) {
2771         return gen_illegal(ctx);
2772     }
2773     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2774                    a->disp, a->sp, a->m, a->size | MO_TE);
2775 }
2776 
2777 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2778 {
2779     assert(a->x == 0 && a->scale == 0);
2780     if (!ctx->is_pa20 && a->size > MO_32) {
2781         return gen_illegal(ctx);
2782     }
2783     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2784 }
2785 
2786 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2787 {
2788     MemOp mop = MO_TE | MO_ALIGN | a->size;
2789     TCGv_i64 zero, dest, ofs;
2790     TCGv_i64 addr;
2791 
2792     if (!ctx->is_pa20 && a->size > MO_32) {
2793         return gen_illegal(ctx);
2794     }
2795 
2796     nullify_over(ctx);
2797 
2798     if (a->m) {
2799         /* Base register modification.  Make sure if RT == RB,
2800            we see the result of the load.  */
2801         dest = tcg_temp_new();
2802     } else {
2803         dest = dest_gpr(ctx, a->t);
2804     }
2805 
2806     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2807              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2808 
2809     /*
2810      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2811      * However actual hardware succeeds with aligned mod 4.
2812      * Detect this case and log a GUEST_ERROR.
2813      *
2814      * TODO: HPPA64 relaxes the over-alignment requirement
2815      * with the ,co completer.
2816      */
2817     gen_helper_ldc_check(addr);
2818 
2819     zero = tcg_constant_i64(0);
2820     tcg_gen_atomic_xchg_i64(dest, addr, zero, ctx->mmu_idx, mop);
2821 
2822     if (a->m) {
2823         save_gpr(ctx, a->b, ofs);
2824     }
2825     save_gpr(ctx, a->t, dest);
2826 
2827     return nullify_end(ctx);
2828 }
2829 
2830 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2831 {
2832     TCGv_i64 ofs, val;
2833     TCGv_i64 addr;
2834 
2835     nullify_over(ctx);
2836 
2837     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2838              ctx->mmu_idx == MMU_PHYS_IDX);
2839     val = load_gpr(ctx, a->r);
2840     if (a->a) {
2841         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2842             gen_helper_stby_e_parallel(tcg_env, addr, val);
2843         } else {
2844             gen_helper_stby_e(tcg_env, addr, val);
2845         }
2846     } else {
2847         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2848             gen_helper_stby_b_parallel(tcg_env, addr, val);
2849         } else {
2850             gen_helper_stby_b(tcg_env, addr, val);
2851         }
2852     }
2853     if (a->m) {
2854         tcg_gen_andi_i64(ofs, ofs, ~3);
2855         save_gpr(ctx, a->b, ofs);
2856     }
2857 
2858     return nullify_end(ctx);
2859 }
2860 
2861 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
2862 {
2863     TCGv_i64 ofs, val;
2864     TCGv_i64 addr;
2865 
2866     if (!ctx->is_pa20) {
2867         return false;
2868     }
2869     nullify_over(ctx);
2870 
2871     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2872              ctx->mmu_idx == MMU_PHYS_IDX);
2873     val = load_gpr(ctx, a->r);
2874     if (a->a) {
2875         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2876             gen_helper_stdby_e_parallel(tcg_env, addr, val);
2877         } else {
2878             gen_helper_stdby_e(tcg_env, addr, val);
2879         }
2880     } else {
2881         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2882             gen_helper_stdby_b_parallel(tcg_env, addr, val);
2883         } else {
2884             gen_helper_stdby_b(tcg_env, addr, val);
2885         }
2886     }
2887     if (a->m) {
2888         tcg_gen_andi_i64(ofs, ofs, ~7);
2889         save_gpr(ctx, a->b, ofs);
2890     }
2891 
2892     return nullify_end(ctx);
2893 }
2894 
2895 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2896 {
2897     int hold_mmu_idx = ctx->mmu_idx;
2898 
2899     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2900     ctx->mmu_idx = MMU_PHYS_IDX;
2901     trans_ld(ctx, a);
2902     ctx->mmu_idx = hold_mmu_idx;
2903     return true;
2904 }
2905 
2906 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2907 {
2908     int hold_mmu_idx = ctx->mmu_idx;
2909 
2910     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2911     ctx->mmu_idx = MMU_PHYS_IDX;
2912     trans_st(ctx, a);
2913     ctx->mmu_idx = hold_mmu_idx;
2914     return true;
2915 }
2916 
2917 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2918 {
2919     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2920 
2921     tcg_gen_movi_i64(tcg_rt, a->i);
2922     save_gpr(ctx, a->t, tcg_rt);
2923     cond_free(&ctx->null_cond);
2924     return true;
2925 }
2926 
2927 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2928 {
2929     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
2930     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
2931 
2932     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
2933     save_gpr(ctx, 1, tcg_r1);
2934     cond_free(&ctx->null_cond);
2935     return true;
2936 }
2937 
2938 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2939 {
2940     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2941 
2942     /* Special case rb == 0, for the LDI pseudo-op.
2943        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2944     if (a->b == 0) {
2945         tcg_gen_movi_i64(tcg_rt, a->i);
2946     } else {
2947         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
2948     }
2949     save_gpr(ctx, a->t, tcg_rt);
2950     cond_free(&ctx->null_cond);
2951     return true;
2952 }
2953 
2954 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
2955                     unsigned c, unsigned f, bool d, unsigned n, int disp)
2956 {
2957     TCGv_i64 dest, in2, sv;
2958     DisasCond cond;
2959 
2960     in2 = load_gpr(ctx, r);
2961     dest = tcg_temp_new();
2962 
2963     tcg_gen_sub_i64(dest, in1, in2);
2964 
2965     sv = NULL;
2966     if (cond_need_sv(c)) {
2967         sv = do_sub_sv(ctx, dest, in1, in2);
2968     }
2969 
2970     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
2971     return do_cbranch(ctx, disp, n, &cond);
2972 }
2973 
2974 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2975 {
2976     if (!ctx->is_pa20 && a->d) {
2977         return false;
2978     }
2979     nullify_over(ctx);
2980     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
2981                    a->c, a->f, a->d, a->n, a->disp);
2982 }
2983 
2984 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
2985 {
2986     if (!ctx->is_pa20 && a->d) {
2987         return false;
2988     }
2989     nullify_over(ctx);
2990     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
2991                    a->c, a->f, a->d, a->n, a->disp);
2992 }
2993 
2994 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
2995                     unsigned c, unsigned f, unsigned n, int disp)
2996 {
2997     TCGv_i64 dest, in2, sv, cb_cond;
2998     DisasCond cond;
2999     bool d = false;
3000 
3001     /*
3002      * For hppa64, the ADDB conditions change with PSW.W,
3003      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3004      */
3005     if (ctx->tb_flags & PSW_W) {
3006         d = c >= 5;
3007         if (d) {
3008             c &= 3;
3009         }
3010     }
3011 
3012     in2 = load_gpr(ctx, r);
3013     dest = tcg_temp_new();
3014     sv = NULL;
3015     cb_cond = NULL;
3016 
3017     if (cond_need_cb(c)) {
3018         TCGv_i64 cb = tcg_temp_new();
3019         TCGv_i64 cb_msb = tcg_temp_new();
3020 
3021         tcg_gen_movi_i64(cb_msb, 0);
3022         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3023         tcg_gen_xor_i64(cb, in1, in2);
3024         tcg_gen_xor_i64(cb, cb, dest);
3025         cb_cond = get_carry(ctx, d, cb, cb_msb);
3026     } else {
3027         tcg_gen_add_i64(dest, in1, in2);
3028     }
3029     if (cond_need_sv(c)) {
3030         sv = do_add_sv(ctx, dest, in1, in2);
3031     }
3032 
3033     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3034     save_gpr(ctx, r, dest);
3035     return do_cbranch(ctx, disp, n, &cond);
3036 }
3037 
3038 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3039 {
3040     nullify_over(ctx);
3041     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3042 }
3043 
3044 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3045 {
3046     nullify_over(ctx);
3047     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3048 }
3049 
3050 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3051 {
3052     TCGv_i64 tmp, tcg_r;
3053     DisasCond cond;
3054 
3055     nullify_over(ctx);
3056 
3057     tmp = tcg_temp_new();
3058     tcg_r = load_gpr(ctx, a->r);
3059     if (cond_need_ext(ctx, a->d)) {
3060         /* Force shift into [32,63] */
3061         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3062         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3063     } else {
3064         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3065     }
3066 
3067     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3068     return do_cbranch(ctx, a->disp, a->n, &cond);
3069 }
3070 
3071 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3072 {
3073     TCGv_i64 tmp, tcg_r;
3074     DisasCond cond;
3075     int p;
3076 
3077     nullify_over(ctx);
3078 
3079     tmp = tcg_temp_new();
3080     tcg_r = load_gpr(ctx, a->r);
3081     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3082     tcg_gen_shli_i64(tmp, tcg_r, p);
3083 
3084     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3085     return do_cbranch(ctx, a->disp, a->n, &cond);
3086 }
3087 
3088 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3089 {
3090     TCGv_i64 dest;
3091     DisasCond cond;
3092 
3093     nullify_over(ctx);
3094 
3095     dest = dest_gpr(ctx, a->r2);
3096     if (a->r1 == 0) {
3097         tcg_gen_movi_i64(dest, 0);
3098     } else {
3099         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3100     }
3101 
3102     /* All MOVB conditions are 32-bit. */
3103     cond = do_sed_cond(ctx, a->c, false, dest);
3104     return do_cbranch(ctx, a->disp, a->n, &cond);
3105 }
3106 
3107 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3108 {
3109     TCGv_i64 dest;
3110     DisasCond cond;
3111 
3112     nullify_over(ctx);
3113 
3114     dest = dest_gpr(ctx, a->r);
3115     tcg_gen_movi_i64(dest, a->i);
3116 
3117     /* All MOVBI conditions are 32-bit. */
3118     cond = do_sed_cond(ctx, a->c, false, dest);
3119     return do_cbranch(ctx, a->disp, a->n, &cond);
3120 }
3121 
3122 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3123 {
3124     TCGv_i64 dest, src2;
3125 
3126     if (!ctx->is_pa20 && a->d) {
3127         return false;
3128     }
3129     if (a->c) {
3130         nullify_over(ctx);
3131     }
3132 
3133     dest = dest_gpr(ctx, a->t);
3134     src2 = load_gpr(ctx, a->r2);
3135     if (a->r1 == 0) {
3136         if (a->d) {
3137             tcg_gen_shr_i64(dest, src2, cpu_sar);
3138         } else {
3139             TCGv_i64 tmp = tcg_temp_new();
3140 
3141             tcg_gen_ext32u_i64(dest, src2);
3142             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3143             tcg_gen_shr_i64(dest, dest, tmp);
3144         }
3145     } else if (a->r1 == a->r2) {
3146         if (a->d) {
3147             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3148         } else {
3149             TCGv_i32 t32 = tcg_temp_new_i32();
3150             TCGv_i32 s32 = tcg_temp_new_i32();
3151 
3152             tcg_gen_extrl_i64_i32(t32, src2);
3153             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3154             tcg_gen_andi_i32(s32, s32, 31);
3155             tcg_gen_rotr_i32(t32, t32, s32);
3156             tcg_gen_extu_i32_i64(dest, t32);
3157         }
3158     } else {
3159         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3160 
3161         if (a->d) {
3162             TCGv_i64 t = tcg_temp_new();
3163             TCGv_i64 n = tcg_temp_new();
3164 
3165             tcg_gen_xori_i64(n, cpu_sar, 63);
3166             tcg_gen_shl_i64(t, src2, n);
3167             tcg_gen_shli_i64(t, t, 1);
3168             tcg_gen_shr_i64(dest, src1, cpu_sar);
3169             tcg_gen_or_i64(dest, dest, t);
3170         } else {
3171             TCGv_i64 t = tcg_temp_new_i64();
3172             TCGv_i64 s = tcg_temp_new_i64();
3173 
3174             tcg_gen_concat32_i64(t, src2, src1);
3175             tcg_gen_andi_i64(s, cpu_sar, 31);
3176             tcg_gen_shr_i64(dest, t, s);
3177         }
3178     }
3179     save_gpr(ctx, a->t, dest);
3180 
3181     /* Install the new nullification.  */
3182     cond_free(&ctx->null_cond);
3183     if (a->c) {
3184         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3185     }
3186     return nullify_end(ctx);
3187 }
3188 
3189 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3190 {
3191     unsigned width, sa;
3192     TCGv_i64 dest, t2;
3193 
3194     if (!ctx->is_pa20 && a->d) {
3195         return false;
3196     }
3197     if (a->c) {
3198         nullify_over(ctx);
3199     }
3200 
3201     width = a->d ? 64 : 32;
3202     sa = width - 1 - a->cpos;
3203 
3204     dest = dest_gpr(ctx, a->t);
3205     t2 = load_gpr(ctx, a->r2);
3206     if (a->r1 == 0) {
3207         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3208     } else if (width == TARGET_LONG_BITS) {
3209         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3210     } else {
3211         assert(!a->d);
3212         if (a->r1 == a->r2) {
3213             TCGv_i32 t32 = tcg_temp_new_i32();
3214             tcg_gen_extrl_i64_i32(t32, t2);
3215             tcg_gen_rotri_i32(t32, t32, sa);
3216             tcg_gen_extu_i32_i64(dest, t32);
3217         } else {
3218             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3219             tcg_gen_extract_i64(dest, dest, sa, 32);
3220         }
3221     }
3222     save_gpr(ctx, a->t, dest);
3223 
3224     /* Install the new nullification.  */
3225     cond_free(&ctx->null_cond);
3226     if (a->c) {
3227         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3228     }
3229     return nullify_end(ctx);
3230 }
3231 
3232 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3233 {
3234     unsigned widthm1 = a->d ? 63 : 31;
3235     TCGv_i64 dest, src, tmp;
3236 
3237     if (!ctx->is_pa20 && a->d) {
3238         return false;
3239     }
3240     if (a->c) {
3241         nullify_over(ctx);
3242     }
3243 
3244     dest = dest_gpr(ctx, a->t);
3245     src = load_gpr(ctx, a->r);
3246     tmp = tcg_temp_new();
3247 
3248     /* Recall that SAR is using big-endian bit numbering.  */
3249     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3250     tcg_gen_xori_i64(tmp, tmp, widthm1);
3251 
3252     if (a->se) {
3253         if (!a->d) {
3254             tcg_gen_ext32s_i64(dest, src);
3255             src = dest;
3256         }
3257         tcg_gen_sar_i64(dest, src, tmp);
3258         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3259     } else {
3260         if (!a->d) {
3261             tcg_gen_ext32u_i64(dest, src);
3262             src = dest;
3263         }
3264         tcg_gen_shr_i64(dest, src, tmp);
3265         tcg_gen_extract_i64(dest, dest, 0, a->len);
3266     }
3267     save_gpr(ctx, a->t, dest);
3268 
3269     /* Install the new nullification.  */
3270     cond_free(&ctx->null_cond);
3271     if (a->c) {
3272         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3273     }
3274     return nullify_end(ctx);
3275 }
3276 
3277 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3278 {
3279     unsigned len, cpos, width;
3280     TCGv_i64 dest, src;
3281 
3282     if (!ctx->is_pa20 && a->d) {
3283         return false;
3284     }
3285     if (a->c) {
3286         nullify_over(ctx);
3287     }
3288 
3289     len = a->len;
3290     width = a->d ? 64 : 32;
3291     cpos = width - 1 - a->pos;
3292     if (cpos + len > width) {
3293         len = width - cpos;
3294     }
3295 
3296     dest = dest_gpr(ctx, a->t);
3297     src = load_gpr(ctx, a->r);
3298     if (a->se) {
3299         tcg_gen_sextract_i64(dest, src, cpos, len);
3300     } else {
3301         tcg_gen_extract_i64(dest, src, cpos, len);
3302     }
3303     save_gpr(ctx, a->t, dest);
3304 
3305     /* Install the new nullification.  */
3306     cond_free(&ctx->null_cond);
3307     if (a->c) {
3308         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3309     }
3310     return nullify_end(ctx);
3311 }
3312 
3313 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3314 {
3315     unsigned len, width;
3316     uint64_t mask0, mask1;
3317     TCGv_i64 dest;
3318 
3319     if (!ctx->is_pa20 && a->d) {
3320         return false;
3321     }
3322     if (a->c) {
3323         nullify_over(ctx);
3324     }
3325 
3326     len = a->len;
3327     width = a->d ? 64 : 32;
3328     if (a->cpos + len > width) {
3329         len = width - a->cpos;
3330     }
3331 
3332     dest = dest_gpr(ctx, a->t);
3333     mask0 = deposit64(0, a->cpos, len, a->i);
3334     mask1 = deposit64(-1, a->cpos, len, a->i);
3335 
3336     if (a->nz) {
3337         TCGv_i64 src = load_gpr(ctx, a->t);
3338         tcg_gen_andi_i64(dest, src, mask1);
3339         tcg_gen_ori_i64(dest, dest, mask0);
3340     } else {
3341         tcg_gen_movi_i64(dest, mask0);
3342     }
3343     save_gpr(ctx, a->t, dest);
3344 
3345     /* Install the new nullification.  */
3346     cond_free(&ctx->null_cond);
3347     if (a->c) {
3348         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3349     }
3350     return nullify_end(ctx);
3351 }
3352 
3353 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3354 {
3355     unsigned rs = a->nz ? a->t : 0;
3356     unsigned len, width;
3357     TCGv_i64 dest, val;
3358 
3359     if (!ctx->is_pa20 && a->d) {
3360         return false;
3361     }
3362     if (a->c) {
3363         nullify_over(ctx);
3364     }
3365 
3366     len = a->len;
3367     width = a->d ? 64 : 32;
3368     if (a->cpos + len > width) {
3369         len = width - a->cpos;
3370     }
3371 
3372     dest = dest_gpr(ctx, a->t);
3373     val = load_gpr(ctx, a->r);
3374     if (rs == 0) {
3375         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3376     } else {
3377         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3378     }
3379     save_gpr(ctx, a->t, dest);
3380 
3381     /* Install the new nullification.  */
3382     cond_free(&ctx->null_cond);
3383     if (a->c) {
3384         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3385     }
3386     return nullify_end(ctx);
3387 }
3388 
3389 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3390                        bool d, bool nz, unsigned len, TCGv_i64 val)
3391 {
3392     unsigned rs = nz ? rt : 0;
3393     unsigned widthm1 = d ? 63 : 31;
3394     TCGv_i64 mask, tmp, shift, dest;
3395     uint64_t msb = 1ULL << (len - 1);
3396 
3397     dest = dest_gpr(ctx, rt);
3398     shift = tcg_temp_new();
3399     tmp = tcg_temp_new();
3400 
3401     /* Convert big-endian bit numbering in SAR to left-shift.  */
3402     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3403     tcg_gen_xori_i64(shift, shift, widthm1);
3404 
3405     mask = tcg_temp_new();
3406     tcg_gen_movi_i64(mask, msb + (msb - 1));
3407     tcg_gen_and_i64(tmp, val, mask);
3408     if (rs) {
3409         tcg_gen_shl_i64(mask, mask, shift);
3410         tcg_gen_shl_i64(tmp, tmp, shift);
3411         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3412         tcg_gen_or_i64(dest, dest, tmp);
3413     } else {
3414         tcg_gen_shl_i64(dest, tmp, shift);
3415     }
3416     save_gpr(ctx, rt, dest);
3417 
3418     /* Install the new nullification.  */
3419     cond_free(&ctx->null_cond);
3420     if (c) {
3421         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3422     }
3423     return nullify_end(ctx);
3424 }
3425 
3426 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3427 {
3428     if (!ctx->is_pa20 && a->d) {
3429         return false;
3430     }
3431     if (a->c) {
3432         nullify_over(ctx);
3433     }
3434     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3435                       load_gpr(ctx, a->r));
3436 }
3437 
3438 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3439 {
3440     if (!ctx->is_pa20 && a->d) {
3441         return false;
3442     }
3443     if (a->c) {
3444         nullify_over(ctx);
3445     }
3446     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3447                       tcg_constant_i64(a->i));
3448 }
3449 
3450 static bool trans_be(DisasContext *ctx, arg_be *a)
3451 {
3452     TCGv_i64 tmp;
3453 
3454 #ifdef CONFIG_USER_ONLY
3455     /* ??? It seems like there should be a good way of using
3456        "be disp(sr2, r0)", the canonical gateway entry mechanism
3457        to our advantage.  But that appears to be inconvenient to
3458        manage along side branch delay slots.  Therefore we handle
3459        entry into the gateway page via absolute address.  */
3460     /* Since we don't implement spaces, just branch.  Do notice the special
3461        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3462        goto_tb to the TB containing the syscall.  */
3463     if (a->b == 0) {
3464         return do_dbranch(ctx, a->disp, a->l, a->n);
3465     }
3466 #else
3467     nullify_over(ctx);
3468 #endif
3469 
3470     tmp = tcg_temp_new();
3471     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3472     tmp = do_ibranch_priv(ctx, tmp);
3473 
3474 #ifdef CONFIG_USER_ONLY
3475     return do_ibranch(ctx, tmp, a->l, a->n);
3476 #else
3477     TCGv_i64 new_spc = tcg_temp_new_i64();
3478 
3479     load_spr(ctx, new_spc, a->sp);
3480     if (a->l) {
3481         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3482         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3483     }
3484     if (a->n && use_nullify_skip(ctx)) {
3485         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3486         tcg_gen_addi_i64(tmp, tmp, 4);
3487         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3488         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3489         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3490     } else {
3491         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3492         if (ctx->iaoq_b == -1) {
3493             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3494         }
3495         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3496         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3497         nullify_set(ctx, a->n);
3498     }
3499     tcg_gen_lookup_and_goto_ptr();
3500     ctx->base.is_jmp = DISAS_NORETURN;
3501     return nullify_end(ctx);
3502 #endif
3503 }
3504 
3505 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3506 {
3507     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3508 }
3509 
3510 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3511 {
3512     uint64_t dest = iaoq_dest(ctx, a->disp);
3513 
3514     nullify_over(ctx);
3515 
3516     /* Make sure the caller hasn't done something weird with the queue.
3517      * ??? This is not quite the same as the PSW[B] bit, which would be
3518      * expensive to track.  Real hardware will trap for
3519      *    b  gateway
3520      *    b  gateway+4  (in delay slot of first branch)
3521      * However, checking for a non-sequential instruction queue *will*
3522      * diagnose the security hole
3523      *    b  gateway
3524      *    b  evil
3525      * in which instructions at evil would run with increased privs.
3526      */
3527     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3528         return gen_illegal(ctx);
3529     }
3530 
3531 #ifndef CONFIG_USER_ONLY
3532     if (ctx->tb_flags & PSW_C) {
3533         CPUHPPAState *env = cpu_env(ctx->cs);
3534         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3535         /* If we could not find a TLB entry, then we need to generate an
3536            ITLB miss exception so the kernel will provide it.
3537            The resulting TLB fill operation will invalidate this TB and
3538            we will re-translate, at which point we *will* be able to find
3539            the TLB entry and determine if this is in fact a gateway page.  */
3540         if (type < 0) {
3541             gen_excp(ctx, EXCP_ITLB_MISS);
3542             return true;
3543         }
3544         /* No change for non-gateway pages or for priv decrease.  */
3545         if (type >= 4 && type - 4 < ctx->privilege) {
3546             dest = deposit32(dest, 0, 2, type - 4);
3547         }
3548     } else {
3549         dest &= -4;  /* priv = 0 */
3550     }
3551 #endif
3552 
3553     if (a->l) {
3554         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3555         if (ctx->privilege < 3) {
3556             tcg_gen_andi_i64(tmp, tmp, -4);
3557         }
3558         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3559         save_gpr(ctx, a->l, tmp);
3560     }
3561 
3562     return do_dbranch(ctx, dest, 0, a->n);
3563 }
3564 
3565 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3566 {
3567     if (a->x) {
3568         TCGv_i64 tmp = tcg_temp_new();
3569         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3570         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3571         /* The computation here never changes privilege level.  */
3572         return do_ibranch(ctx, tmp, a->l, a->n);
3573     } else {
3574         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3575         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3576     }
3577 }
3578 
3579 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3580 {
3581     TCGv_i64 dest;
3582 
3583     if (a->x == 0) {
3584         dest = load_gpr(ctx, a->b);
3585     } else {
3586         dest = tcg_temp_new();
3587         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3588         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3589     }
3590     dest = do_ibranch_priv(ctx, dest);
3591     return do_ibranch(ctx, dest, 0, a->n);
3592 }
3593 
3594 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3595 {
3596     TCGv_i64 dest;
3597 
3598 #ifdef CONFIG_USER_ONLY
3599     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3600     return do_ibranch(ctx, dest, a->l, a->n);
3601 #else
3602     nullify_over(ctx);
3603     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3604 
3605     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3606     if (ctx->iaoq_b == -1) {
3607         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3608     }
3609     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3610     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3611     if (a->l) {
3612         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3613     }
3614     nullify_set(ctx, a->n);
3615     tcg_gen_lookup_and_goto_ptr();
3616     ctx->base.is_jmp = DISAS_NORETURN;
3617     return nullify_end(ctx);
3618 #endif
3619 }
3620 
3621 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3622 {
3623     /* All branch target stack instructions implement as nop. */
3624     return ctx->is_pa20;
3625 }
3626 
3627 /*
3628  * Float class 0
3629  */
3630 
3631 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3632 {
3633     tcg_gen_mov_i32(dst, src);
3634 }
3635 
3636 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3637 {
3638     uint64_t ret;
3639 
3640     if (ctx->is_pa20) {
3641         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3642     } else {
3643         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3644     }
3645 
3646     nullify_over(ctx);
3647     save_frd(0, tcg_constant_i64(ret));
3648     return nullify_end(ctx);
3649 }
3650 
3651 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3652 {
3653     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3654 }
3655 
3656 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3657 {
3658     tcg_gen_mov_i64(dst, src);
3659 }
3660 
3661 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3662 {
3663     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3664 }
3665 
3666 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3667 {
3668     tcg_gen_andi_i32(dst, src, INT32_MAX);
3669 }
3670 
3671 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3672 {
3673     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3674 }
3675 
3676 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3677 {
3678     tcg_gen_andi_i64(dst, src, INT64_MAX);
3679 }
3680 
3681 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3682 {
3683     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3684 }
3685 
3686 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3687 {
3688     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3689 }
3690 
3691 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3692 {
3693     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3694 }
3695 
3696 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3699 }
3700 
3701 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3704 }
3705 
3706 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3707 {
3708     tcg_gen_xori_i32(dst, src, INT32_MIN);
3709 }
3710 
3711 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3714 }
3715 
3716 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3717 {
3718     tcg_gen_xori_i64(dst, src, INT64_MIN);
3719 }
3720 
3721 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3724 }
3725 
3726 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3727 {
3728     tcg_gen_ori_i32(dst, src, INT32_MIN);
3729 }
3730 
3731 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3734 }
3735 
3736 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3737 {
3738     tcg_gen_ori_i64(dst, src, INT64_MIN);
3739 }
3740 
3741 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3744 }
3745 
3746 /*
3747  * Float class 1
3748  */
3749 
3750 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3751 {
3752     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3753 }
3754 
3755 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3756 {
3757     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3758 }
3759 
3760 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3761 {
3762     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3763 }
3764 
3765 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3766 {
3767     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3768 }
3769 
3770 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3771 {
3772     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3773 }
3774 
3775 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3776 {
3777     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3778 }
3779 
3780 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3781 {
3782     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3783 }
3784 
3785 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3786 {
3787     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3788 }
3789 
3790 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3791 {
3792     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3793 }
3794 
3795 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3796 {
3797     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3798 }
3799 
3800 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3801 {
3802     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3803 }
3804 
3805 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3806 {
3807     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3808 }
3809 
3810 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3811 {
3812     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3813 }
3814 
3815 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3816 {
3817     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3818 }
3819 
3820 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3821 {
3822     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3823 }
3824 
3825 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3826 {
3827     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3828 }
3829 
3830 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3831 {
3832     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3833 }
3834 
3835 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3836 {
3837     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3838 }
3839 
3840 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3841 {
3842     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3843 }
3844 
3845 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3846 {
3847     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3848 }
3849 
3850 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3851 {
3852     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3853 }
3854 
3855 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3856 {
3857     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3858 }
3859 
3860 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3861 {
3862     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3863 }
3864 
3865 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3866 {
3867     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3868 }
3869 
3870 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3871 {
3872     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3873 }
3874 
3875 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3876 {
3877     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3878 }
3879 
3880 /*
3881  * Float class 2
3882  */
3883 
3884 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3885 {
3886     TCGv_i32 ta, tb, tc, ty;
3887 
3888     nullify_over(ctx);
3889 
3890     ta = load_frw0_i32(a->r1);
3891     tb = load_frw0_i32(a->r2);
3892     ty = tcg_constant_i32(a->y);
3893     tc = tcg_constant_i32(a->c);
3894 
3895     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3896 
3897     return nullify_end(ctx);
3898 }
3899 
3900 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3901 {
3902     TCGv_i64 ta, tb;
3903     TCGv_i32 tc, ty;
3904 
3905     nullify_over(ctx);
3906 
3907     ta = load_frd0(a->r1);
3908     tb = load_frd0(a->r2);
3909     ty = tcg_constant_i32(a->y);
3910     tc = tcg_constant_i32(a->c);
3911 
3912     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3913 
3914     return nullify_end(ctx);
3915 }
3916 
3917 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3918 {
3919     TCGv_i64 t;
3920 
3921     nullify_over(ctx);
3922 
3923     t = tcg_temp_new();
3924     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3925 
3926     if (a->y == 1) {
3927         int mask;
3928         bool inv = false;
3929 
3930         switch (a->c) {
3931         case 0: /* simple */
3932             tcg_gen_andi_i64(t, t, 0x4000000);
3933             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3934             goto done;
3935         case 2: /* rej */
3936             inv = true;
3937             /* fallthru */
3938         case 1: /* acc */
3939             mask = 0x43ff800;
3940             break;
3941         case 6: /* rej8 */
3942             inv = true;
3943             /* fallthru */
3944         case 5: /* acc8 */
3945             mask = 0x43f8000;
3946             break;
3947         case 9: /* acc6 */
3948             mask = 0x43e0000;
3949             break;
3950         case 13: /* acc4 */
3951             mask = 0x4380000;
3952             break;
3953         case 17: /* acc2 */
3954             mask = 0x4200000;
3955             break;
3956         default:
3957             gen_illegal(ctx);
3958             return true;
3959         }
3960         if (inv) {
3961             TCGv_i64 c = tcg_constant_i64(mask);
3962             tcg_gen_or_i64(t, t, c);
3963             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3964         } else {
3965             tcg_gen_andi_i64(t, t, mask);
3966             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3967         }
3968     } else {
3969         unsigned cbit = (a->y ^ 1) - 1;
3970 
3971         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
3972         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3973     }
3974 
3975  done:
3976     return nullify_end(ctx);
3977 }
3978 
3979 /*
3980  * Float class 2
3981  */
3982 
3983 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3984 {
3985     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3986 }
3987 
3988 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3989 {
3990     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3991 }
3992 
3993 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3994 {
3995     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3996 }
3997 
3998 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3999 {
4000     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4001 }
4002 
4003 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4004 {
4005     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4006 }
4007 
4008 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4009 {
4010     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4011 }
4012 
4013 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4014 {
4015     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4016 }
4017 
4018 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4019 {
4020     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4021 }
4022 
4023 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4024 {
4025     TCGv_i64 x, y;
4026 
4027     nullify_over(ctx);
4028 
4029     x = load_frw0_i64(a->r1);
4030     y = load_frw0_i64(a->r2);
4031     tcg_gen_mul_i64(x, x, y);
4032     save_frd(a->t, x);
4033 
4034     return nullify_end(ctx);
4035 }
4036 
4037 /* Convert the fmpyadd single-precision register encodings to standard.  */
4038 static inline int fmpyadd_s_reg(unsigned r)
4039 {
4040     return (r & 16) * 2 + 16 + (r & 15);
4041 }
4042 
4043 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4044 {
4045     int tm = fmpyadd_s_reg(a->tm);
4046     int ra = fmpyadd_s_reg(a->ra);
4047     int ta = fmpyadd_s_reg(a->ta);
4048     int rm2 = fmpyadd_s_reg(a->rm2);
4049     int rm1 = fmpyadd_s_reg(a->rm1);
4050 
4051     nullify_over(ctx);
4052 
4053     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4054     do_fop_weww(ctx, ta, ta, ra,
4055                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4056 
4057     return nullify_end(ctx);
4058 }
4059 
4060 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4061 {
4062     return do_fmpyadd_s(ctx, a, false);
4063 }
4064 
4065 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4066 {
4067     return do_fmpyadd_s(ctx, a, true);
4068 }
4069 
4070 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4071 {
4072     nullify_over(ctx);
4073 
4074     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4075     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4076                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4077 
4078     return nullify_end(ctx);
4079 }
4080 
4081 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4082 {
4083     return do_fmpyadd_d(ctx, a, false);
4084 }
4085 
4086 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4087 {
4088     return do_fmpyadd_d(ctx, a, true);
4089 }
4090 
4091 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4092 {
4093     TCGv_i32 x, y, z;
4094 
4095     nullify_over(ctx);
4096     x = load_frw0_i32(a->rm1);
4097     y = load_frw0_i32(a->rm2);
4098     z = load_frw0_i32(a->ra3);
4099 
4100     if (a->neg) {
4101         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4102     } else {
4103         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4104     }
4105 
4106     save_frw_i32(a->t, x);
4107     return nullify_end(ctx);
4108 }
4109 
4110 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4111 {
4112     TCGv_i64 x, y, z;
4113 
4114     nullify_over(ctx);
4115     x = load_frd0(a->rm1);
4116     y = load_frd0(a->rm2);
4117     z = load_frd0(a->ra3);
4118 
4119     if (a->neg) {
4120         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4121     } else {
4122         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4123     }
4124 
4125     save_frd(a->t, x);
4126     return nullify_end(ctx);
4127 }
4128 
4129 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4130 {
4131     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4132 #ifndef CONFIG_USER_ONLY
4133     if (a->i == 0x100) {
4134         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4135         nullify_over(ctx);
4136         gen_helper_diag_btlb(tcg_env);
4137         return nullify_end(ctx);
4138     }
4139 #endif
4140     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4141     return true;
4142 }
4143 
4144 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4145 {
4146     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4147     int bound;
4148 
4149     ctx->cs = cs;
4150     ctx->tb_flags = ctx->base.tb->flags;
4151     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4152 
4153 #ifdef CONFIG_USER_ONLY
4154     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4155     ctx->mmu_idx = MMU_USER_IDX;
4156     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4157     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4158     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4159 #else
4160     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4161     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4162                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4163                     : MMU_PHYS_IDX);
4164 
4165     /* Recover the IAOQ values from the GVA + PRIV.  */
4166     uint64_t cs_base = ctx->base.tb->cs_base;
4167     uint64_t iasq_f = cs_base & ~0xffffffffull;
4168     int32_t diff = cs_base;
4169 
4170     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4171     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4172 #endif
4173     ctx->iaoq_n = -1;
4174     ctx->iaoq_n_var = NULL;
4175 
4176     /* Bound the number of instructions by those left on the page.  */
4177     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4178     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4179 }
4180 
4181 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4182 {
4183     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4184 
4185     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4186     ctx->null_cond = cond_make_f();
4187     ctx->psw_n_nonzero = false;
4188     if (ctx->tb_flags & PSW_N) {
4189         ctx->null_cond.c = TCG_COND_ALWAYS;
4190         ctx->psw_n_nonzero = true;
4191     }
4192     ctx->null_lab = NULL;
4193 }
4194 
4195 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4196 {
4197     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4198 
4199     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4200 }
4201 
4202 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4203 {
4204     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4205     CPUHPPAState *env = cpu_env(cs);
4206     DisasJumpType ret;
4207 
4208     /* Execute one insn.  */
4209 #ifdef CONFIG_USER_ONLY
4210     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4211         do_page_zero(ctx);
4212         ret = ctx->base.is_jmp;
4213         assert(ret != DISAS_NEXT);
4214     } else
4215 #endif
4216     {
4217         /* Always fetch the insn, even if nullified, so that we check
4218            the page permissions for execute.  */
4219         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4220 
4221         /* Set up the IA queue for the next insn.
4222            This will be overwritten by a branch.  */
4223         if (ctx->iaoq_b == -1) {
4224             ctx->iaoq_n = -1;
4225             ctx->iaoq_n_var = tcg_temp_new();
4226             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4227         } else {
4228             ctx->iaoq_n = ctx->iaoq_b + 4;
4229             ctx->iaoq_n_var = NULL;
4230         }
4231 
4232         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4233             ctx->null_cond.c = TCG_COND_NEVER;
4234             ret = DISAS_NEXT;
4235         } else {
4236             ctx->insn = insn;
4237             if (!decode(ctx, insn)) {
4238                 gen_illegal(ctx);
4239             }
4240             ret = ctx->base.is_jmp;
4241             assert(ctx->null_lab == NULL);
4242         }
4243     }
4244 
4245     /* Advance the insn queue.  Note that this check also detects
4246        a priority change within the instruction queue.  */
4247     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4248         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4249             && use_goto_tb(ctx, ctx->iaoq_b)
4250             && (ctx->null_cond.c == TCG_COND_NEVER
4251                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4252             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4253             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4254             ctx->base.is_jmp = ret = DISAS_NORETURN;
4255         } else {
4256             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4257         }
4258     }
4259     ctx->iaoq_f = ctx->iaoq_b;
4260     ctx->iaoq_b = ctx->iaoq_n;
4261     ctx->base.pc_next += 4;
4262 
4263     switch (ret) {
4264     case DISAS_NORETURN:
4265     case DISAS_IAQ_N_UPDATED:
4266         break;
4267 
4268     case DISAS_NEXT:
4269     case DISAS_IAQ_N_STALE:
4270     case DISAS_IAQ_N_STALE_EXIT:
4271         if (ctx->iaoq_f == -1) {
4272             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4273             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4274 #ifndef CONFIG_USER_ONLY
4275             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4276 #endif
4277             nullify_save(ctx);
4278             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4279                                 ? DISAS_EXIT
4280                                 : DISAS_IAQ_N_UPDATED);
4281         } else if (ctx->iaoq_b == -1) {
4282             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4283         }
4284         break;
4285 
4286     default:
4287         g_assert_not_reached();
4288     }
4289 }
4290 
4291 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4292 {
4293     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4294     DisasJumpType is_jmp = ctx->base.is_jmp;
4295 
4296     switch (is_jmp) {
4297     case DISAS_NORETURN:
4298         break;
4299     case DISAS_TOO_MANY:
4300     case DISAS_IAQ_N_STALE:
4301     case DISAS_IAQ_N_STALE_EXIT:
4302         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4303         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4304         nullify_save(ctx);
4305         /* FALLTHRU */
4306     case DISAS_IAQ_N_UPDATED:
4307         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4308             tcg_gen_lookup_and_goto_ptr();
4309             break;
4310         }
4311         /* FALLTHRU */
4312     case DISAS_EXIT:
4313         tcg_gen_exit_tb(NULL, 0);
4314         break;
4315     default:
4316         g_assert_not_reached();
4317     }
4318 }
4319 
4320 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4321                               CPUState *cs, FILE *logfile)
4322 {
4323     target_ulong pc = dcbase->pc_first;
4324 
4325 #ifdef CONFIG_USER_ONLY
4326     switch (pc) {
4327     case 0x00:
4328         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4329         return;
4330     case 0xb0:
4331         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4332         return;
4333     case 0xe0:
4334         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4335         return;
4336     case 0x100:
4337         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4338         return;
4339     }
4340 #endif
4341 
4342     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4343     target_disas(logfile, cs, pc, dcbase->tb->size);
4344 }
4345 
4346 static const TranslatorOps hppa_tr_ops = {
4347     .init_disas_context = hppa_tr_init_disas_context,
4348     .tb_start           = hppa_tr_tb_start,
4349     .insn_start         = hppa_tr_insn_start,
4350     .translate_insn     = hppa_tr_translate_insn,
4351     .tb_stop            = hppa_tr_tb_stop,
4352     .disas_log          = hppa_tr_disas_log,
4353 };
4354 
4355 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4356                            target_ulong pc, void *host_pc)
4357 {
4358     DisasContext ctx;
4359     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4360 }
4361