xref: /openbmc/qemu/target/hppa/translate.c (revision d265360f)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 /* Choose to use explicit sizes within this file. */
36 #undef tcg_temp_new
37 
38 typedef struct DisasCond {
39     TCGCond c;
40     TCGv_i64 a0, a1;
41 } DisasCond;
42 
43 typedef struct DisasContext {
44     DisasContextBase base;
45     CPUState *cs;
46 
47     uint64_t iaoq_f;
48     uint64_t iaoq_b;
49     uint64_t iaoq_n;
50     TCGv_i64 iaoq_n_var;
51 
52     DisasCond null_cond;
53     TCGLabel *null_lab;
54 
55     uint32_t insn;
56     uint32_t tb_flags;
57     int mmu_idx;
58     int privilege;
59     bool psw_n_nonzero;
60     bool is_pa20;
61 
62 #ifdef CONFIG_USER_ONLY
63     MemOp unalign;
64 #endif
65 } DisasContext;
66 
67 #ifdef CONFIG_USER_ONLY
68 #define UNALIGN(C)  (C)->unalign
69 #else
70 #define UNALIGN(C)  MO_ALIGN
71 #endif
72 
73 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
74 static int expand_sm_imm(DisasContext *ctx, int val)
75 {
76     if (val & PSW_SM_E) {
77         val = (val & ~PSW_SM_E) | PSW_E;
78     }
79     if (val & PSW_SM_W) {
80         val = (val & ~PSW_SM_W) | PSW_W;
81     }
82     return val;
83 }
84 
85 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
86 static int expand_sr3x(DisasContext *ctx, int val)
87 {
88     return ~val;
89 }
90 
91 /* Convert the M:A bits within a memory insn to the tri-state value
92    we use for the final M.  */
93 static int ma_to_m(DisasContext *ctx, int val)
94 {
95     return val & 2 ? (val & 1 ? -1 : 1) : 0;
96 }
97 
98 /* Convert the sign of the displacement to a pre or post-modify.  */
99 static int pos_to_m(DisasContext *ctx, int val)
100 {
101     return val ? 1 : -1;
102 }
103 
104 static int neg_to_m(DisasContext *ctx, int val)
105 {
106     return val ? -1 : 1;
107 }
108 
109 /* Used for branch targets and fp memory ops.  */
110 static int expand_shl2(DisasContext *ctx, int val)
111 {
112     return val << 2;
113 }
114 
115 /* Used for fp memory ops.  */
116 static int expand_shl3(DisasContext *ctx, int val)
117 {
118     return val << 3;
119 }
120 
121 /* Used for assemble_21.  */
122 static int expand_shl11(DisasContext *ctx, int val)
123 {
124     return val << 11;
125 }
126 
127 static int assemble_6(DisasContext *ctx, int val)
128 {
129     /*
130      * Officially, 32 * x + 32 - y.
131      * Here, x is already in bit 5, and y is [4:0].
132      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
133      * with the overflow from bit 4 summing with x.
134      */
135     return (val ^ 31) + 1;
136 }
137 
138 /* Translate CMPI doubleword conditions to standard. */
139 static int cmpbid_c(DisasContext *ctx, int val)
140 {
141     return val ? val : 4; /* 0 == "*<<" */
142 }
143 
144 
145 /* Include the auto-generated decoder.  */
146 #include "decode-insns.c.inc"
147 
148 /* We are not using a goto_tb (for whatever reason), but have updated
149    the iaq (for whatever reason), so don't do it again on exit.  */
150 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
151 
152 /* We are exiting the TB, but have neither emitted a goto_tb, nor
153    updated the iaq for the next instruction to be executed.  */
154 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
155 
156 /* Similarly, but we want to return to the main loop immediately
157    to recognize unmasked interrupts.  */
158 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
159 #define DISAS_EXIT                  DISAS_TARGET_3
160 
161 /* global register indexes */
162 static TCGv_i64 cpu_gr[32];
163 static TCGv_i64 cpu_sr[4];
164 static TCGv_i64 cpu_srH;
165 static TCGv_i64 cpu_iaoq_f;
166 static TCGv_i64 cpu_iaoq_b;
167 static TCGv_i64 cpu_iasq_f;
168 static TCGv_i64 cpu_iasq_b;
169 static TCGv_i64 cpu_sar;
170 static TCGv_i64 cpu_psw_n;
171 static TCGv_i64 cpu_psw_v;
172 static TCGv_i64 cpu_psw_cb;
173 static TCGv_i64 cpu_psw_cb_msb;
174 
175 void hppa_translate_init(void)
176 {
177 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
178 
179     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
180     static const GlobalVar vars[] = {
181         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
182         DEF_VAR(psw_n),
183         DEF_VAR(psw_v),
184         DEF_VAR(psw_cb),
185         DEF_VAR(psw_cb_msb),
186         DEF_VAR(iaoq_f),
187         DEF_VAR(iaoq_b),
188     };
189 
190 #undef DEF_VAR
191 
192     /* Use the symbolic register names that match the disassembler.  */
193     static const char gr_names[32][4] = {
194         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
195         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
196         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
197         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
198     };
199     /* SR[4-7] are not global registers so that we can index them.  */
200     static const char sr_names[5][4] = {
201         "sr0", "sr1", "sr2", "sr3", "srH"
202     };
203 
204     int i;
205 
206     cpu_gr[0] = NULL;
207     for (i = 1; i < 32; i++) {
208         cpu_gr[i] = tcg_global_mem_new(tcg_env,
209                                        offsetof(CPUHPPAState, gr[i]),
210                                        gr_names[i]);
211     }
212     for (i = 0; i < 4; i++) {
213         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
214                                            offsetof(CPUHPPAState, sr[i]),
215                                            sr_names[i]);
216     }
217     cpu_srH = tcg_global_mem_new_i64(tcg_env,
218                                      offsetof(CPUHPPAState, sr[4]),
219                                      sr_names[4]);
220 
221     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
222         const GlobalVar *v = &vars[i];
223         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
224     }
225 
226     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
227                                         offsetof(CPUHPPAState, iasq_f),
228                                         "iasq_f");
229     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
230                                         offsetof(CPUHPPAState, iasq_b),
231                                         "iasq_b");
232 }
233 
234 static DisasCond cond_make_f(void)
235 {
236     return (DisasCond){
237         .c = TCG_COND_NEVER,
238         .a0 = NULL,
239         .a1 = NULL,
240     };
241 }
242 
243 static DisasCond cond_make_t(void)
244 {
245     return (DisasCond){
246         .c = TCG_COND_ALWAYS,
247         .a0 = NULL,
248         .a1 = NULL,
249     };
250 }
251 
252 static DisasCond cond_make_n(void)
253 {
254     return (DisasCond){
255         .c = TCG_COND_NE,
256         .a0 = cpu_psw_n,
257         .a1 = tcg_constant_i64(0)
258     };
259 }
260 
261 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
262 {
263     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
264     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
265 }
266 
267 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
268 {
269     return cond_make_tmp(c, a0, tcg_constant_i64(0));
270 }
271 
272 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
273 {
274     TCGv_i64 tmp = tcg_temp_new_i64();
275     tcg_gen_mov_i64(tmp, a0);
276     return cond_make_0_tmp(c, tmp);
277 }
278 
279 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
280 {
281     TCGv_i64 t0 = tcg_temp_new_i64();
282     TCGv_i64 t1 = tcg_temp_new_i64();
283 
284     tcg_gen_mov_i64(t0, a0);
285     tcg_gen_mov_i64(t1, a1);
286     return cond_make_tmp(c, t0, t1);
287 }
288 
289 static void cond_free(DisasCond *cond)
290 {
291     switch (cond->c) {
292     default:
293         cond->a0 = NULL;
294         cond->a1 = NULL;
295         /* fallthru */
296     case TCG_COND_ALWAYS:
297         cond->c = TCG_COND_NEVER;
298         break;
299     case TCG_COND_NEVER:
300         break;
301     }
302 }
303 
304 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
305 {
306     if (reg == 0) {
307         TCGv_i64 t = tcg_temp_new_i64();
308         tcg_gen_movi_i64(t, 0);
309         return t;
310     } else {
311         return cpu_gr[reg];
312     }
313 }
314 
315 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
316 {
317     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
318         return tcg_temp_new_i64();
319     } else {
320         return cpu_gr[reg];
321     }
322 }
323 
324 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
325 {
326     if (ctx->null_cond.c != TCG_COND_NEVER) {
327         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
328                             ctx->null_cond.a1, dest, t);
329     } else {
330         tcg_gen_mov_i64(dest, t);
331     }
332 }
333 
334 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
335 {
336     if (reg != 0) {
337         save_or_nullify(ctx, cpu_gr[reg], t);
338     }
339 }
340 
341 #if HOST_BIG_ENDIAN
342 # define HI_OFS  0
343 # define LO_OFS  4
344 #else
345 # define HI_OFS  4
346 # define LO_OFS  0
347 #endif
348 
349 static TCGv_i32 load_frw_i32(unsigned rt)
350 {
351     TCGv_i32 ret = tcg_temp_new_i32();
352     tcg_gen_ld_i32(ret, tcg_env,
353                    offsetof(CPUHPPAState, fr[rt & 31])
354                    + (rt & 32 ? LO_OFS : HI_OFS));
355     return ret;
356 }
357 
358 static TCGv_i32 load_frw0_i32(unsigned rt)
359 {
360     if (rt == 0) {
361         TCGv_i32 ret = tcg_temp_new_i32();
362         tcg_gen_movi_i32(ret, 0);
363         return ret;
364     } else {
365         return load_frw_i32(rt);
366     }
367 }
368 
369 static TCGv_i64 load_frw0_i64(unsigned rt)
370 {
371     TCGv_i64 ret = tcg_temp_new_i64();
372     if (rt == 0) {
373         tcg_gen_movi_i64(ret, 0);
374     } else {
375         tcg_gen_ld32u_i64(ret, tcg_env,
376                           offsetof(CPUHPPAState, fr[rt & 31])
377                           + (rt & 32 ? LO_OFS : HI_OFS));
378     }
379     return ret;
380 }
381 
382 static void save_frw_i32(unsigned rt, TCGv_i32 val)
383 {
384     tcg_gen_st_i32(val, tcg_env,
385                    offsetof(CPUHPPAState, fr[rt & 31])
386                    + (rt & 32 ? LO_OFS : HI_OFS));
387 }
388 
389 #undef HI_OFS
390 #undef LO_OFS
391 
392 static TCGv_i64 load_frd(unsigned rt)
393 {
394     TCGv_i64 ret = tcg_temp_new_i64();
395     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
396     return ret;
397 }
398 
399 static TCGv_i64 load_frd0(unsigned rt)
400 {
401     if (rt == 0) {
402         TCGv_i64 ret = tcg_temp_new_i64();
403         tcg_gen_movi_i64(ret, 0);
404         return ret;
405     } else {
406         return load_frd(rt);
407     }
408 }
409 
410 static void save_frd(unsigned rt, TCGv_i64 val)
411 {
412     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
413 }
414 
415 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
416 {
417 #ifdef CONFIG_USER_ONLY
418     tcg_gen_movi_i64(dest, 0);
419 #else
420     if (reg < 4) {
421         tcg_gen_mov_i64(dest, cpu_sr[reg]);
422     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
423         tcg_gen_mov_i64(dest, cpu_srH);
424     } else {
425         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
426     }
427 #endif
428 }
429 
430 /* Skip over the implementation of an insn that has been nullified.
431    Use this when the insn is too complex for a conditional move.  */
432 static void nullify_over(DisasContext *ctx)
433 {
434     if (ctx->null_cond.c != TCG_COND_NEVER) {
435         /* The always condition should have been handled in the main loop.  */
436         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
437 
438         ctx->null_lab = gen_new_label();
439 
440         /* If we're using PSW[N], copy it to a temp because... */
441         if (ctx->null_cond.a0 == cpu_psw_n) {
442             ctx->null_cond.a0 = tcg_temp_new_i64();
443             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
444         }
445         /* ... we clear it before branching over the implementation,
446            so that (1) it's clear after nullifying this insn and
447            (2) if this insn nullifies the next, PSW[N] is valid.  */
448         if (ctx->psw_n_nonzero) {
449             ctx->psw_n_nonzero = false;
450             tcg_gen_movi_i64(cpu_psw_n, 0);
451         }
452 
453         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
454                            ctx->null_cond.a1, ctx->null_lab);
455         cond_free(&ctx->null_cond);
456     }
457 }
458 
459 /* Save the current nullification state to PSW[N].  */
460 static void nullify_save(DisasContext *ctx)
461 {
462     if (ctx->null_cond.c == TCG_COND_NEVER) {
463         if (ctx->psw_n_nonzero) {
464             tcg_gen_movi_i64(cpu_psw_n, 0);
465         }
466         return;
467     }
468     if (ctx->null_cond.a0 != cpu_psw_n) {
469         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
470                             ctx->null_cond.a0, ctx->null_cond.a1);
471         ctx->psw_n_nonzero = true;
472     }
473     cond_free(&ctx->null_cond);
474 }
475 
476 /* Set a PSW[N] to X.  The intention is that this is used immediately
477    before a goto_tb/exit_tb, so that there is no fallthru path to other
478    code within the TB.  Therefore we do not update psw_n_nonzero.  */
479 static void nullify_set(DisasContext *ctx, bool x)
480 {
481     if (ctx->psw_n_nonzero || x) {
482         tcg_gen_movi_i64(cpu_psw_n, x);
483     }
484 }
485 
486 /* Mark the end of an instruction that may have been nullified.
487    This is the pair to nullify_over.  Always returns true so that
488    it may be tail-called from a translate function.  */
489 static bool nullify_end(DisasContext *ctx)
490 {
491     TCGLabel *null_lab = ctx->null_lab;
492     DisasJumpType status = ctx->base.is_jmp;
493 
494     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
495        For UPDATED, we cannot update on the nullified path.  */
496     assert(status != DISAS_IAQ_N_UPDATED);
497 
498     if (likely(null_lab == NULL)) {
499         /* The current insn wasn't conditional or handled the condition
500            applied to it without a branch, so the (new) setting of
501            NULL_COND can be applied directly to the next insn.  */
502         return true;
503     }
504     ctx->null_lab = NULL;
505 
506     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
507         /* The next instruction will be unconditional,
508            and NULL_COND already reflects that.  */
509         gen_set_label(null_lab);
510     } else {
511         /* The insn that we just executed is itself nullifying the next
512            instruction.  Store the condition in the PSW[N] global.
513            We asserted PSW[N] = 0 in nullify_over, so that after the
514            label we have the proper value in place.  */
515         nullify_save(ctx);
516         gen_set_label(null_lab);
517         ctx->null_cond = cond_make_n();
518     }
519     if (status == DISAS_NORETURN) {
520         ctx->base.is_jmp = DISAS_NEXT;
521     }
522     return true;
523 }
524 
525 static uint64_t gva_offset_mask(DisasContext *ctx)
526 {
527     return (ctx->tb_flags & PSW_W
528             ? MAKE_64BIT_MASK(0, 62)
529             : MAKE_64BIT_MASK(0, 32));
530 }
531 
532 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
533                             uint64_t ival, TCGv_i64 vval)
534 {
535     uint64_t mask = gva_offset_mask(ctx);
536 
537     if (ival != -1) {
538         tcg_gen_movi_i64(dest, ival & mask);
539         return;
540     }
541     tcg_debug_assert(vval != NULL);
542 
543     /*
544      * We know that the IAOQ is already properly masked.
545      * This optimization is primarily for "iaoq_f = iaoq_b".
546      */
547     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
548         tcg_gen_mov_i64(dest, vval);
549     } else {
550         tcg_gen_andi_i64(dest, vval, mask);
551     }
552 }
553 
554 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
555 {
556     return ctx->iaoq_f + disp + 8;
557 }
558 
559 static void gen_excp_1(int exception)
560 {
561     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
562 }
563 
564 static void gen_excp(DisasContext *ctx, int exception)
565 {
566     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
567     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
568     nullify_save(ctx);
569     gen_excp_1(exception);
570     ctx->base.is_jmp = DISAS_NORETURN;
571 }
572 
573 static bool gen_excp_iir(DisasContext *ctx, int exc)
574 {
575     nullify_over(ctx);
576     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
577                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
578     gen_excp(ctx, exc);
579     return nullify_end(ctx);
580 }
581 
582 static bool gen_illegal(DisasContext *ctx)
583 {
584     return gen_excp_iir(ctx, EXCP_ILL);
585 }
586 
587 #ifdef CONFIG_USER_ONLY
588 #define CHECK_MOST_PRIVILEGED(EXCP) \
589     return gen_excp_iir(ctx, EXCP)
590 #else
591 #define CHECK_MOST_PRIVILEGED(EXCP) \
592     do {                                     \
593         if (ctx->privilege != 0) {           \
594             return gen_excp_iir(ctx, EXCP);  \
595         }                                    \
596     } while (0)
597 #endif
598 
599 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
600 {
601     return translator_use_goto_tb(&ctx->base, dest);
602 }
603 
604 /* If the next insn is to be nullified, and it's on the same page,
605    and we're not attempting to set a breakpoint on it, then we can
606    totally skip the nullified insn.  This avoids creating and
607    executing a TB that merely branches to the next TB.  */
608 static bool use_nullify_skip(DisasContext *ctx)
609 {
610     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
611             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
612 }
613 
614 static void gen_goto_tb(DisasContext *ctx, int which,
615                         uint64_t f, uint64_t b)
616 {
617     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
618         tcg_gen_goto_tb(which);
619         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
620         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
621         tcg_gen_exit_tb(ctx->base.tb, which);
622     } else {
623         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
624         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
625         tcg_gen_lookup_and_goto_ptr();
626     }
627 }
628 
629 static bool cond_need_sv(int c)
630 {
631     return c == 2 || c == 3 || c == 6;
632 }
633 
634 static bool cond_need_cb(int c)
635 {
636     return c == 4 || c == 5;
637 }
638 
639 /* Need extensions from TCGv_i32 to TCGv_i64. */
640 static bool cond_need_ext(DisasContext *ctx, bool d)
641 {
642     return !(ctx->is_pa20 && d);
643 }
644 
645 /*
646  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
647  * the Parisc 1.1 Architecture Reference Manual for details.
648  */
649 
650 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
651                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
652 {
653     DisasCond cond;
654     TCGv_i64 tmp;
655 
656     switch (cf >> 1) {
657     case 0: /* Never / TR    (0 / 1) */
658         cond = cond_make_f();
659         break;
660     case 1: /* = / <>        (Z / !Z) */
661         if (cond_need_ext(ctx, d)) {
662             tmp = tcg_temp_new_i64();
663             tcg_gen_ext32u_i64(tmp, res);
664             res = tmp;
665         }
666         cond = cond_make_0(TCG_COND_EQ, res);
667         break;
668     case 2: /* < / >=        (N ^ V / !(N ^ V) */
669         tmp = tcg_temp_new_i64();
670         tcg_gen_xor_i64(tmp, res, sv);
671         if (cond_need_ext(ctx, d)) {
672             tcg_gen_ext32s_i64(tmp, tmp);
673         }
674         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
675         break;
676     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
677         /*
678          * Simplify:
679          *   (N ^ V) | Z
680          *   ((res < 0) ^ (sv < 0)) | !res
681          *   ((res ^ sv) < 0) | !res
682          *   (~(res ^ sv) >= 0) | !res
683          *   !(~(res ^ sv) >> 31) | !res
684          *   !(~(res ^ sv) >> 31 & res)
685          */
686         tmp = tcg_temp_new_i64();
687         tcg_gen_eqv_i64(tmp, res, sv);
688         if (cond_need_ext(ctx, d)) {
689             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
690             tcg_gen_and_i64(tmp, tmp, res);
691             tcg_gen_ext32u_i64(tmp, tmp);
692         } else {
693             tcg_gen_sari_i64(tmp, tmp, 63);
694             tcg_gen_and_i64(tmp, tmp, res);
695         }
696         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
697         break;
698     case 4: /* NUV / UV      (!C / C) */
699         /* Only bit 0 of cb_msb is ever set. */
700         cond = cond_make_0(TCG_COND_EQ, cb_msb);
701         break;
702     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
703         tmp = tcg_temp_new_i64();
704         tcg_gen_neg_i64(tmp, cb_msb);
705         tcg_gen_and_i64(tmp, tmp, res);
706         if (cond_need_ext(ctx, d)) {
707             tcg_gen_ext32u_i64(tmp, tmp);
708         }
709         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
710         break;
711     case 6: /* SV / NSV      (V / !V) */
712         if (cond_need_ext(ctx, d)) {
713             tmp = tcg_temp_new_i64();
714             tcg_gen_ext32s_i64(tmp, sv);
715             sv = tmp;
716         }
717         cond = cond_make_0(TCG_COND_LT, sv);
718         break;
719     case 7: /* OD / EV */
720         tmp = tcg_temp_new_i64();
721         tcg_gen_andi_i64(tmp, res, 1);
722         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
723         break;
724     default:
725         g_assert_not_reached();
726     }
727     if (cf & 1) {
728         cond.c = tcg_invert_cond(cond.c);
729     }
730 
731     return cond;
732 }
733 
734 /* Similar, but for the special case of subtraction without borrow, we
735    can use the inputs directly.  This can allow other computation to be
736    deleted as unused.  */
737 
738 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
739                              TCGv_i64 res, TCGv_i64 in1,
740                              TCGv_i64 in2, TCGv_i64 sv)
741 {
742     TCGCond tc;
743     bool ext_uns;
744 
745     switch (cf >> 1) {
746     case 1: /* = / <> */
747         tc = TCG_COND_EQ;
748         ext_uns = true;
749         break;
750     case 2: /* < / >= */
751         tc = TCG_COND_LT;
752         ext_uns = false;
753         break;
754     case 3: /* <= / > */
755         tc = TCG_COND_LE;
756         ext_uns = false;
757         break;
758     case 4: /* << / >>= */
759         tc = TCG_COND_LTU;
760         ext_uns = true;
761         break;
762     case 5: /* <<= / >> */
763         tc = TCG_COND_LEU;
764         ext_uns = true;
765         break;
766     default:
767         return do_cond(ctx, cf, d, res, NULL, sv);
768     }
769 
770     if (cf & 1) {
771         tc = tcg_invert_cond(tc);
772     }
773     if (cond_need_ext(ctx, d)) {
774         TCGv_i64 t1 = tcg_temp_new_i64();
775         TCGv_i64 t2 = tcg_temp_new_i64();
776 
777         if (ext_uns) {
778             tcg_gen_ext32u_i64(t1, in1);
779             tcg_gen_ext32u_i64(t2, in2);
780         } else {
781             tcg_gen_ext32s_i64(t1, in1);
782             tcg_gen_ext32s_i64(t2, in2);
783         }
784         return cond_make_tmp(tc, t1, t2);
785     }
786     return cond_make(tc, in1, in2);
787 }
788 
789 /*
790  * Similar, but for logicals, where the carry and overflow bits are not
791  * computed, and use of them is undefined.
792  *
793  * Undefined or not, hardware does not trap.  It seems reasonable to
794  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
795  * how cases c={2,3} are treated.
796  */
797 
798 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
799                              TCGv_i64 res)
800 {
801     TCGCond tc;
802     bool ext_uns;
803 
804     switch (cf) {
805     case 0:  /* never */
806     case 9:  /* undef, C */
807     case 11: /* undef, C & !Z */
808     case 12: /* undef, V */
809         return cond_make_f();
810 
811     case 1:  /* true */
812     case 8:  /* undef, !C */
813     case 10: /* undef, !C | Z */
814     case 13: /* undef, !V */
815         return cond_make_t();
816 
817     case 2:  /* == */
818         tc = TCG_COND_EQ;
819         ext_uns = true;
820         break;
821     case 3:  /* <> */
822         tc = TCG_COND_NE;
823         ext_uns = true;
824         break;
825     case 4:  /* < */
826         tc = TCG_COND_LT;
827         ext_uns = false;
828         break;
829     case 5:  /* >= */
830         tc = TCG_COND_GE;
831         ext_uns = false;
832         break;
833     case 6:  /* <= */
834         tc = TCG_COND_LE;
835         ext_uns = false;
836         break;
837     case 7:  /* > */
838         tc = TCG_COND_GT;
839         ext_uns = false;
840         break;
841 
842     case 14: /* OD */
843     case 15: /* EV */
844         return do_cond(ctx, cf, d, res, NULL, NULL);
845 
846     default:
847         g_assert_not_reached();
848     }
849 
850     if (cond_need_ext(ctx, d)) {
851         TCGv_i64 tmp = tcg_temp_new_i64();
852 
853         if (ext_uns) {
854             tcg_gen_ext32u_i64(tmp, res);
855         } else {
856             tcg_gen_ext32s_i64(tmp, res);
857         }
858         return cond_make_0_tmp(tc, tmp);
859     }
860     return cond_make_0(tc, res);
861 }
862 
863 /* Similar, but for shift/extract/deposit conditions.  */
864 
865 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
866                              TCGv_i64 res)
867 {
868     unsigned c, f;
869 
870     /* Convert the compressed condition codes to standard.
871        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
872        4-7 are the reverse of 0-3.  */
873     c = orig & 3;
874     if (c == 3) {
875         c = 7;
876     }
877     f = (orig & 4) / 4;
878 
879     return do_log_cond(ctx, c * 2 + f, d, res);
880 }
881 
882 /* Similar, but for unit conditions.  */
883 
884 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
885                               TCGv_i64 in1, TCGv_i64 in2)
886 {
887     DisasCond cond;
888     TCGv_i64 tmp, cb = NULL;
889     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
890 
891     if (cf & 8) {
892         /* Since we want to test lots of carry-out bits all at once, do not
893          * do our normal thing and compute carry-in of bit B+1 since that
894          * leaves us with carry bits spread across two words.
895          */
896         cb = tcg_temp_new_i64();
897         tmp = tcg_temp_new_i64();
898         tcg_gen_or_i64(cb, in1, in2);
899         tcg_gen_and_i64(tmp, in1, in2);
900         tcg_gen_andc_i64(cb, cb, res);
901         tcg_gen_or_i64(cb, cb, tmp);
902     }
903 
904     switch (cf >> 1) {
905     case 0: /* never / TR */
906     case 1: /* undefined */
907     case 5: /* undefined */
908         cond = cond_make_f();
909         break;
910 
911     case 2: /* SBZ / NBZ */
912         /* See hasless(v,1) from
913          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
914          */
915         tmp = tcg_temp_new_i64();
916         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
917         tcg_gen_andc_i64(tmp, tmp, res);
918         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
919         cond = cond_make_0(TCG_COND_NE, tmp);
920         break;
921 
922     case 3: /* SHZ / NHZ */
923         tmp = tcg_temp_new_i64();
924         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
925         tcg_gen_andc_i64(tmp, tmp, res);
926         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
927         cond = cond_make_0(TCG_COND_NE, tmp);
928         break;
929 
930     case 4: /* SDC / NDC */
931         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
932         cond = cond_make_0(TCG_COND_NE, cb);
933         break;
934 
935     case 6: /* SBC / NBC */
936         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
937         cond = cond_make_0(TCG_COND_NE, cb);
938         break;
939 
940     case 7: /* SHC / NHC */
941         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
942         cond = cond_make_0(TCG_COND_NE, cb);
943         break;
944 
945     default:
946         g_assert_not_reached();
947     }
948     if (cf & 1) {
949         cond.c = tcg_invert_cond(cond.c);
950     }
951 
952     return cond;
953 }
954 
955 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
956                           TCGv_i64 cb, TCGv_i64 cb_msb)
957 {
958     if (cond_need_ext(ctx, d)) {
959         TCGv_i64 t = tcg_temp_new_i64();
960         tcg_gen_extract_i64(t, cb, 32, 1);
961         return t;
962     }
963     return cb_msb;
964 }
965 
966 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
967 {
968     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
969 }
970 
971 /* Compute signed overflow for addition.  */
972 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
973                           TCGv_i64 in1, TCGv_i64 in2)
974 {
975     TCGv_i64 sv = tcg_temp_new_i64();
976     TCGv_i64 tmp = tcg_temp_new_i64();
977 
978     tcg_gen_xor_i64(sv, res, in1);
979     tcg_gen_xor_i64(tmp, in1, in2);
980     tcg_gen_andc_i64(sv, sv, tmp);
981 
982     return sv;
983 }
984 
985 /* Compute signed overflow for subtraction.  */
986 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
987                           TCGv_i64 in1, TCGv_i64 in2)
988 {
989     TCGv_i64 sv = tcg_temp_new_i64();
990     TCGv_i64 tmp = tcg_temp_new_i64();
991 
992     tcg_gen_xor_i64(sv, res, in1);
993     tcg_gen_xor_i64(tmp, in1, in2);
994     tcg_gen_and_i64(sv, sv, tmp);
995 
996     return sv;
997 }
998 
999 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1000                    TCGv_i64 in2, unsigned shift, bool is_l,
1001                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1002 {
1003     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1004     unsigned c = cf >> 1;
1005     DisasCond cond;
1006 
1007     dest = tcg_temp_new_i64();
1008     cb = NULL;
1009     cb_msb = NULL;
1010     cb_cond = NULL;
1011 
1012     if (shift) {
1013         tmp = tcg_temp_new_i64();
1014         tcg_gen_shli_i64(tmp, in1, shift);
1015         in1 = tmp;
1016     }
1017 
1018     if (!is_l || cond_need_cb(c)) {
1019         TCGv_i64 zero = tcg_constant_i64(0);
1020         cb_msb = tcg_temp_new_i64();
1021         cb = tcg_temp_new_i64();
1022 
1023         tcg_gen_add2_i64(dest, cb_msb, in1, zero, in2, zero);
1024         if (is_c) {
1025             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1026                              get_psw_carry(ctx, d), zero);
1027         }
1028         tcg_gen_xor_i64(cb, in1, in2);
1029         tcg_gen_xor_i64(cb, cb, dest);
1030         if (cond_need_cb(c)) {
1031             cb_cond = get_carry(ctx, d, cb, cb_msb);
1032         }
1033     } else {
1034         tcg_gen_add_i64(dest, in1, in2);
1035         if (is_c) {
1036             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1037         }
1038     }
1039 
1040     /* Compute signed overflow if required.  */
1041     sv = NULL;
1042     if (is_tsv || cond_need_sv(c)) {
1043         sv = do_add_sv(ctx, dest, in1, in2);
1044         if (is_tsv) {
1045             /* ??? Need to include overflow from shift.  */
1046             gen_helper_tsv(tcg_env, sv);
1047         }
1048     }
1049 
1050     /* Emit any conditional trap before any writeback.  */
1051     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1052     if (is_tc) {
1053         tmp = tcg_temp_new_i64();
1054         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1055         gen_helper_tcond(tcg_env, tmp);
1056     }
1057 
1058     /* Write back the result.  */
1059     if (!is_l) {
1060         save_or_nullify(ctx, cpu_psw_cb, cb);
1061         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1062     }
1063     save_gpr(ctx, rt, dest);
1064 
1065     /* Install the new nullification.  */
1066     cond_free(&ctx->null_cond);
1067     ctx->null_cond = cond;
1068 }
1069 
1070 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1071                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1072 {
1073     TCGv_i64 tcg_r1, tcg_r2;
1074 
1075     if (a->cf) {
1076         nullify_over(ctx);
1077     }
1078     tcg_r1 = load_gpr(ctx, a->r1);
1079     tcg_r2 = load_gpr(ctx, a->r2);
1080     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1081            is_tsv, is_tc, is_c, a->cf, a->d);
1082     return nullify_end(ctx);
1083 }
1084 
1085 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1086                        bool is_tsv, bool is_tc)
1087 {
1088     TCGv_i64 tcg_im, tcg_r2;
1089 
1090     if (a->cf) {
1091         nullify_over(ctx);
1092     }
1093     tcg_im = tcg_constant_i64(a->i);
1094     tcg_r2 = load_gpr(ctx, a->r);
1095     /* All ADDI conditions are 32-bit. */
1096     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1097     return nullify_end(ctx);
1098 }
1099 
1100 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1101                    TCGv_i64 in2, bool is_tsv, bool is_b,
1102                    bool is_tc, unsigned cf, bool d)
1103 {
1104     TCGv_i64 dest, sv, cb, cb_msb, zero, tmp;
1105     unsigned c = cf >> 1;
1106     DisasCond cond;
1107 
1108     dest = tcg_temp_new_i64();
1109     cb = tcg_temp_new_i64();
1110     cb_msb = tcg_temp_new_i64();
1111 
1112     zero = tcg_constant_i64(0);
1113     if (is_b) {
1114         /* DEST,C = IN1 + ~IN2 + C.  */
1115         tcg_gen_not_i64(cb, in2);
1116         tcg_gen_add2_i64(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1117         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, zero);
1118         tcg_gen_xor_i64(cb, cb, in1);
1119         tcg_gen_xor_i64(cb, cb, dest);
1120     } else {
1121         /*
1122          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1123          * operations by seeding the high word with 1 and subtracting.
1124          */
1125         TCGv_i64 one = tcg_constant_i64(1);
1126         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, zero);
1127         tcg_gen_eqv_i64(cb, in1, in2);
1128         tcg_gen_xor_i64(cb, cb, dest);
1129     }
1130 
1131     /* Compute signed overflow if required.  */
1132     sv = NULL;
1133     if (is_tsv || cond_need_sv(c)) {
1134         sv = do_sub_sv(ctx, dest, in1, in2);
1135         if (is_tsv) {
1136             gen_helper_tsv(tcg_env, sv);
1137         }
1138     }
1139 
1140     /* Compute the condition.  We cannot use the special case for borrow.  */
1141     if (!is_b) {
1142         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1143     } else {
1144         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1145     }
1146 
1147     /* Emit any conditional trap before any writeback.  */
1148     if (is_tc) {
1149         tmp = tcg_temp_new_i64();
1150         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(tcg_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     save_or_nullify(ctx, cpu_psw_cb, cb);
1156     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157     save_gpr(ctx, rt, dest);
1158 
1159     /* Install the new nullification.  */
1160     cond_free(&ctx->null_cond);
1161     ctx->null_cond = cond;
1162 }
1163 
1164 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1165                        bool is_tsv, bool is_b, bool is_tc)
1166 {
1167     TCGv_i64 tcg_r1, tcg_r2;
1168 
1169     if (a->cf) {
1170         nullify_over(ctx);
1171     }
1172     tcg_r1 = load_gpr(ctx, a->r1);
1173     tcg_r2 = load_gpr(ctx, a->r2);
1174     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1175     return nullify_end(ctx);
1176 }
1177 
1178 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1179 {
1180     TCGv_i64 tcg_im, tcg_r2;
1181 
1182     if (a->cf) {
1183         nullify_over(ctx);
1184     }
1185     tcg_im = tcg_constant_i64(a->i);
1186     tcg_r2 = load_gpr(ctx, a->r);
1187     /* All SUBI conditions are 32-bit. */
1188     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1189     return nullify_end(ctx);
1190 }
1191 
1192 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1193                       TCGv_i64 in2, unsigned cf, bool d)
1194 {
1195     TCGv_i64 dest, sv;
1196     DisasCond cond;
1197 
1198     dest = tcg_temp_new_i64();
1199     tcg_gen_sub_i64(dest, in1, in2);
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (cond_need_sv(cf >> 1)) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205     }
1206 
1207     /* Form the condition for the compare.  */
1208     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1209 
1210     /* Clear.  */
1211     tcg_gen_movi_i64(dest, 0);
1212     save_gpr(ctx, rt, dest);
1213 
1214     /* Install the new nullification.  */
1215     cond_free(&ctx->null_cond);
1216     ctx->null_cond = cond;
1217 }
1218 
1219 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1220                    TCGv_i64 in2, unsigned cf, bool d,
1221                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1222 {
1223     TCGv_i64 dest = dest_gpr(ctx, rt);
1224 
1225     /* Perform the operation, and writeback.  */
1226     fn(dest, in1, in2);
1227     save_gpr(ctx, rt, dest);
1228 
1229     /* Install the new nullification.  */
1230     cond_free(&ctx->null_cond);
1231     if (cf) {
1232         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1233     }
1234 }
1235 
1236 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1238 {
1239     TCGv_i64 tcg_r1, tcg_r2;
1240 
1241     if (a->cf) {
1242         nullify_over(ctx);
1243     }
1244     tcg_r1 = load_gpr(ctx, a->r1);
1245     tcg_r2 = load_gpr(ctx, a->r2);
1246     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1247     return nullify_end(ctx);
1248 }
1249 
1250 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1251                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1252                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1253 {
1254     TCGv_i64 dest;
1255     DisasCond cond;
1256 
1257     if (cf == 0) {
1258         dest = dest_gpr(ctx, rt);
1259         fn(dest, in1, in2);
1260         save_gpr(ctx, rt, dest);
1261         cond_free(&ctx->null_cond);
1262     } else {
1263         dest = tcg_temp_new_i64();
1264         fn(dest, in1, in2);
1265 
1266         cond = do_unit_cond(cf, d, dest, in1, in2);
1267 
1268         if (is_tc) {
1269             TCGv_i64 tmp = tcg_temp_new_i64();
1270             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1271             gen_helper_tcond(tcg_env, tmp);
1272         }
1273         save_gpr(ctx, rt, dest);
1274 
1275         cond_free(&ctx->null_cond);
1276         ctx->null_cond = cond;
1277     }
1278 }
1279 
1280 #ifndef CONFIG_USER_ONLY
1281 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1282    from the top 2 bits of the base register.  There are a few system
1283    instructions that have a 3-bit space specifier, for which SR0 is
1284    not special.  To handle this, pass ~SP.  */
1285 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1286 {
1287     TCGv_ptr ptr;
1288     TCGv_i64 tmp;
1289     TCGv_i64 spc;
1290 
1291     if (sp != 0) {
1292         if (sp < 0) {
1293             sp = ~sp;
1294         }
1295         spc = tcg_temp_new_i64();
1296         load_spr(ctx, spc, sp);
1297         return spc;
1298     }
1299     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1300         return cpu_srH;
1301     }
1302 
1303     ptr = tcg_temp_new_ptr();
1304     tmp = tcg_temp_new_i64();
1305     spc = tcg_temp_new_i64();
1306 
1307     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1308     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1309     tcg_gen_andi_i64(tmp, tmp, 030);
1310     tcg_gen_trunc_i64_ptr(ptr, tmp);
1311 
1312     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1313     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1314 
1315     return spc;
1316 }
1317 #endif
1318 
1319 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1320                      unsigned rb, unsigned rx, int scale, int64_t disp,
1321                      unsigned sp, int modify, bool is_phys)
1322 {
1323     TCGv_i64 base = load_gpr(ctx, rb);
1324     TCGv_i64 ofs;
1325     TCGv_i64 addr;
1326 
1327     /* Note that RX is mutually exclusive with DISP.  */
1328     if (rx) {
1329         ofs = tcg_temp_new_i64();
1330         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1331         tcg_gen_add_i64(ofs, ofs, base);
1332     } else if (disp || modify) {
1333         ofs = tcg_temp_new_i64();
1334         tcg_gen_addi_i64(ofs, base, disp);
1335     } else {
1336         ofs = base;
1337     }
1338 
1339     *pofs = ofs;
1340     *pgva = addr = tcg_temp_new_i64();
1341     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1342 #ifndef CONFIG_USER_ONLY
1343     if (!is_phys) {
1344         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1345     }
1346 #endif
1347 }
1348 
1349 /* Emit a memory load.  The modify parameter should be
1350  * < 0 for pre-modify,
1351  * > 0 for post-modify,
1352  * = 0 for no base register update.
1353  */
1354 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1355                        unsigned rx, int scale, int64_t disp,
1356                        unsigned sp, int modify, MemOp mop)
1357 {
1358     TCGv_i64 ofs;
1359     TCGv_i64 addr;
1360 
1361     /* Caller uses nullify_over/nullify_end.  */
1362     assert(ctx->null_cond.c == TCG_COND_NEVER);
1363 
1364     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1365              ctx->mmu_idx == MMU_PHYS_IDX);
1366     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1367     if (modify) {
1368         save_gpr(ctx, rb, ofs);
1369     }
1370 }
1371 
1372 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1373                        unsigned rx, int scale, int64_t disp,
1374                        unsigned sp, int modify, MemOp mop)
1375 {
1376     TCGv_i64 ofs;
1377     TCGv_i64 addr;
1378 
1379     /* Caller uses nullify_over/nullify_end.  */
1380     assert(ctx->null_cond.c == TCG_COND_NEVER);
1381 
1382     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1383              ctx->mmu_idx == MMU_PHYS_IDX);
1384     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1385     if (modify) {
1386         save_gpr(ctx, rb, ofs);
1387     }
1388 }
1389 
1390 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1391                         unsigned rx, int scale, int64_t disp,
1392                         unsigned sp, int modify, MemOp mop)
1393 {
1394     TCGv_i64 ofs;
1395     TCGv_i64 addr;
1396 
1397     /* Caller uses nullify_over/nullify_end.  */
1398     assert(ctx->null_cond.c == TCG_COND_NEVER);
1399 
1400     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1401              ctx->mmu_idx == MMU_PHYS_IDX);
1402     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1403     if (modify) {
1404         save_gpr(ctx, rb, ofs);
1405     }
1406 }
1407 
1408 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1409                         unsigned rx, int scale, int64_t disp,
1410                         unsigned sp, int modify, MemOp mop)
1411 {
1412     TCGv_i64 ofs;
1413     TCGv_i64 addr;
1414 
1415     /* Caller uses nullify_over/nullify_end.  */
1416     assert(ctx->null_cond.c == TCG_COND_NEVER);
1417 
1418     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1419              ctx->mmu_idx == MMU_PHYS_IDX);
1420     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1421     if (modify) {
1422         save_gpr(ctx, rb, ofs);
1423     }
1424 }
1425 
1426 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1427                     unsigned rx, int scale, int64_t disp,
1428                     unsigned sp, int modify, MemOp mop)
1429 {
1430     TCGv_i64 dest;
1431 
1432     nullify_over(ctx);
1433 
1434     if (modify == 0) {
1435         /* No base register update.  */
1436         dest = dest_gpr(ctx, rt);
1437     } else {
1438         /* Make sure if RT == RB, we see the result of the load.  */
1439         dest = tcg_temp_new_i64();
1440     }
1441     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1442     save_gpr(ctx, rt, dest);
1443 
1444     return nullify_end(ctx);
1445 }
1446 
1447 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1448                       unsigned rx, int scale, int64_t disp,
1449                       unsigned sp, int modify)
1450 {
1451     TCGv_i32 tmp;
1452 
1453     nullify_over(ctx);
1454 
1455     tmp = tcg_temp_new_i32();
1456     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1457     save_frw_i32(rt, tmp);
1458 
1459     if (rt == 0) {
1460         gen_helper_loaded_fr0(tcg_env);
1461     }
1462 
1463     return nullify_end(ctx);
1464 }
1465 
1466 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1467 {
1468     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1469                      a->disp, a->sp, a->m);
1470 }
1471 
1472 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1473                       unsigned rx, int scale, int64_t disp,
1474                       unsigned sp, int modify)
1475 {
1476     TCGv_i64 tmp;
1477 
1478     nullify_over(ctx);
1479 
1480     tmp = tcg_temp_new_i64();
1481     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1482     save_frd(rt, tmp);
1483 
1484     if (rt == 0) {
1485         gen_helper_loaded_fr0(tcg_env);
1486     }
1487 
1488     return nullify_end(ctx);
1489 }
1490 
1491 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1492 {
1493     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1494                      a->disp, a->sp, a->m);
1495 }
1496 
1497 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1498                      int64_t disp, unsigned sp,
1499                      int modify, MemOp mop)
1500 {
1501     nullify_over(ctx);
1502     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1503     return nullify_end(ctx);
1504 }
1505 
1506 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1507                        unsigned rx, int scale, int64_t disp,
1508                        unsigned sp, int modify)
1509 {
1510     TCGv_i32 tmp;
1511 
1512     nullify_over(ctx);
1513 
1514     tmp = load_frw_i32(rt);
1515     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1516 
1517     return nullify_end(ctx);
1518 }
1519 
1520 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1521 {
1522     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1523                       a->disp, a->sp, a->m);
1524 }
1525 
1526 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1527                        unsigned rx, int scale, int64_t disp,
1528                        unsigned sp, int modify)
1529 {
1530     TCGv_i64 tmp;
1531 
1532     nullify_over(ctx);
1533 
1534     tmp = load_frd(rt);
1535     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1536 
1537     return nullify_end(ctx);
1538 }
1539 
1540 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1541 {
1542     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1543                       a->disp, a->sp, a->m);
1544 }
1545 
1546 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1548 {
1549     TCGv_i32 tmp;
1550 
1551     nullify_over(ctx);
1552     tmp = load_frw0_i32(ra);
1553 
1554     func(tmp, tcg_env, tmp);
1555 
1556     save_frw_i32(rt, tmp);
1557     return nullify_end(ctx);
1558 }
1559 
1560 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1561                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1562 {
1563     TCGv_i32 dst;
1564     TCGv_i64 src;
1565 
1566     nullify_over(ctx);
1567     src = load_frd(ra);
1568     dst = tcg_temp_new_i32();
1569 
1570     func(dst, tcg_env, src);
1571 
1572     save_frw_i32(rt, dst);
1573     return nullify_end(ctx);
1574 }
1575 
1576 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1577                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1578 {
1579     TCGv_i64 tmp;
1580 
1581     nullify_over(ctx);
1582     tmp = load_frd0(ra);
1583 
1584     func(tmp, tcg_env, tmp);
1585 
1586     save_frd(rt, tmp);
1587     return nullify_end(ctx);
1588 }
1589 
1590 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1591                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1592 {
1593     TCGv_i32 src;
1594     TCGv_i64 dst;
1595 
1596     nullify_over(ctx);
1597     src = load_frw0_i32(ra);
1598     dst = tcg_temp_new_i64();
1599 
1600     func(dst, tcg_env, src);
1601 
1602     save_frd(rt, dst);
1603     return nullify_end(ctx);
1604 }
1605 
1606 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1607                         unsigned ra, unsigned rb,
1608                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1609 {
1610     TCGv_i32 a, b;
1611 
1612     nullify_over(ctx);
1613     a = load_frw0_i32(ra);
1614     b = load_frw0_i32(rb);
1615 
1616     func(a, tcg_env, a, b);
1617 
1618     save_frw_i32(rt, a);
1619     return nullify_end(ctx);
1620 }
1621 
1622 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1623                         unsigned ra, unsigned rb,
1624                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1625 {
1626     TCGv_i64 a, b;
1627 
1628     nullify_over(ctx);
1629     a = load_frd0(ra);
1630     b = load_frd0(rb);
1631 
1632     func(a, tcg_env, a, b);
1633 
1634     save_frd(rt, a);
1635     return nullify_end(ctx);
1636 }
1637 
1638 /* Emit an unconditional branch to a direct target, which may or may not
1639    have already had nullification handled.  */
1640 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1641                        unsigned link, bool is_n)
1642 {
1643     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1644         if (link != 0) {
1645             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1646         }
1647         ctx->iaoq_n = dest;
1648         if (is_n) {
1649             ctx->null_cond.c = TCG_COND_ALWAYS;
1650         }
1651     } else {
1652         nullify_over(ctx);
1653 
1654         if (link != 0) {
1655             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1656         }
1657 
1658         if (is_n && use_nullify_skip(ctx)) {
1659             nullify_set(ctx, 0);
1660             gen_goto_tb(ctx, 0, dest, dest + 4);
1661         } else {
1662             nullify_set(ctx, is_n);
1663             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1664         }
1665 
1666         nullify_end(ctx);
1667 
1668         nullify_set(ctx, 0);
1669         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1670         ctx->base.is_jmp = DISAS_NORETURN;
1671     }
1672     return true;
1673 }
1674 
1675 /* Emit a conditional branch to a direct target.  If the branch itself
1676    is nullified, we should have already used nullify_over.  */
1677 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1678                        DisasCond *cond)
1679 {
1680     uint64_t dest = iaoq_dest(ctx, disp);
1681     TCGLabel *taken = NULL;
1682     TCGCond c = cond->c;
1683     bool n;
1684 
1685     assert(ctx->null_cond.c == TCG_COND_NEVER);
1686 
1687     /* Handle TRUE and NEVER as direct branches.  */
1688     if (c == TCG_COND_ALWAYS) {
1689         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1690     }
1691     if (c == TCG_COND_NEVER) {
1692         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1693     }
1694 
1695     taken = gen_new_label();
1696     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1697     cond_free(cond);
1698 
1699     /* Not taken: Condition not satisfied; nullify on backward branches. */
1700     n = is_n && disp < 0;
1701     if (n && use_nullify_skip(ctx)) {
1702         nullify_set(ctx, 0);
1703         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1704     } else {
1705         if (!n && ctx->null_lab) {
1706             gen_set_label(ctx->null_lab);
1707             ctx->null_lab = NULL;
1708         }
1709         nullify_set(ctx, n);
1710         if (ctx->iaoq_n == -1) {
1711             /* The temporary iaoq_n_var died at the branch above.
1712                Regenerate it here instead of saving it.  */
1713             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1714         }
1715         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1716     }
1717 
1718     gen_set_label(taken);
1719 
1720     /* Taken: Condition satisfied; nullify on forward branches.  */
1721     n = is_n && disp >= 0;
1722     if (n && use_nullify_skip(ctx)) {
1723         nullify_set(ctx, 0);
1724         gen_goto_tb(ctx, 1, dest, dest + 4);
1725     } else {
1726         nullify_set(ctx, n);
1727         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1728     }
1729 
1730     /* Not taken: the branch itself was nullified.  */
1731     if (ctx->null_lab) {
1732         gen_set_label(ctx->null_lab);
1733         ctx->null_lab = NULL;
1734         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1735     } else {
1736         ctx->base.is_jmp = DISAS_NORETURN;
1737     }
1738     return true;
1739 }
1740 
1741 /* Emit an unconditional branch to an indirect target.  This handles
1742    nullification of the branch itself.  */
1743 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1744                        unsigned link, bool is_n)
1745 {
1746     TCGv_i64 a0, a1, next, tmp;
1747     TCGCond c;
1748 
1749     assert(ctx->null_lab == NULL);
1750 
1751     if (ctx->null_cond.c == TCG_COND_NEVER) {
1752         if (link != 0) {
1753             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1754         }
1755         next = tcg_temp_new_i64();
1756         tcg_gen_mov_i64(next, dest);
1757         if (is_n) {
1758             if (use_nullify_skip(ctx)) {
1759                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1760                 tcg_gen_addi_i64(next, next, 4);
1761                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1762                 nullify_set(ctx, 0);
1763                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1764                 return true;
1765             }
1766             ctx->null_cond.c = TCG_COND_ALWAYS;
1767         }
1768         ctx->iaoq_n = -1;
1769         ctx->iaoq_n_var = next;
1770     } else if (is_n && use_nullify_skip(ctx)) {
1771         /* The (conditional) branch, B, nullifies the next insn, N,
1772            and we're allowed to skip execution N (no single-step or
1773            tracepoint in effect).  Since the goto_ptr that we must use
1774            for the indirect branch consumes no special resources, we
1775            can (conditionally) skip B and continue execution.  */
1776         /* The use_nullify_skip test implies we have a known control path.  */
1777         tcg_debug_assert(ctx->iaoq_b != -1);
1778         tcg_debug_assert(ctx->iaoq_n != -1);
1779 
1780         /* We do have to handle the non-local temporary, DEST, before
1781            branching.  Since IOAQ_F is not really live at this point, we
1782            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1783         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1784         next = tcg_temp_new_i64();
1785         tcg_gen_addi_i64(next, dest, 4);
1786         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1787 
1788         nullify_over(ctx);
1789         if (link != 0) {
1790             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1791         }
1792         tcg_gen_lookup_and_goto_ptr();
1793         return nullify_end(ctx);
1794     } else {
1795         c = ctx->null_cond.c;
1796         a0 = ctx->null_cond.a0;
1797         a1 = ctx->null_cond.a1;
1798 
1799         tmp = tcg_temp_new_i64();
1800         next = tcg_temp_new_i64();
1801 
1802         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1803         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1804         ctx->iaoq_n = -1;
1805         ctx->iaoq_n_var = next;
1806 
1807         if (link != 0) {
1808             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1809         }
1810 
1811         if (is_n) {
1812             /* The branch nullifies the next insn, which means the state of N
1813                after the branch is the inverse of the state of N that applied
1814                to the branch.  */
1815             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1816             cond_free(&ctx->null_cond);
1817             ctx->null_cond = cond_make_n();
1818             ctx->psw_n_nonzero = true;
1819         } else {
1820             cond_free(&ctx->null_cond);
1821         }
1822     }
1823     return true;
1824 }
1825 
1826 /* Implement
1827  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1828  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1829  *    else
1830  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1831  * which keeps the privilege level from being increased.
1832  */
1833 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1834 {
1835     TCGv_i64 dest;
1836     switch (ctx->privilege) {
1837     case 0:
1838         /* Privilege 0 is maximum and is allowed to decrease.  */
1839         return offset;
1840     case 3:
1841         /* Privilege 3 is minimum and is never allowed to increase.  */
1842         dest = tcg_temp_new_i64();
1843         tcg_gen_ori_i64(dest, offset, 3);
1844         break;
1845     default:
1846         dest = tcg_temp_new_i64();
1847         tcg_gen_andi_i64(dest, offset, -4);
1848         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1849         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1850         break;
1851     }
1852     return dest;
1853 }
1854 
1855 #ifdef CONFIG_USER_ONLY
1856 /* On Linux, page zero is normally marked execute only + gateway.
1857    Therefore normal read or write is supposed to fail, but specific
1858    offsets have kernel code mapped to raise permissions to implement
1859    system calls.  Handling this via an explicit check here, rather
1860    in than the "be disp(sr2,r0)" instruction that probably sent us
1861    here, is the easiest way to handle the branch delay slot on the
1862    aforementioned BE.  */
1863 static void do_page_zero(DisasContext *ctx)
1864 {
1865     TCGv_i64 tmp;
1866 
1867     /* If by some means we get here with PSW[N]=1, that implies that
1868        the B,GATE instruction would be skipped, and we'd fault on the
1869        next insn within the privileged page.  */
1870     switch (ctx->null_cond.c) {
1871     case TCG_COND_NEVER:
1872         break;
1873     case TCG_COND_ALWAYS:
1874         tcg_gen_movi_i64(cpu_psw_n, 0);
1875         goto do_sigill;
1876     default:
1877         /* Since this is always the first (and only) insn within the
1878            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1879         g_assert_not_reached();
1880     }
1881 
1882     /* Check that we didn't arrive here via some means that allowed
1883        non-sequential instruction execution.  Normally the PSW[B] bit
1884        detects this by disallowing the B,GATE instruction to execute
1885        under such conditions.  */
1886     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1887         goto do_sigill;
1888     }
1889 
1890     switch (ctx->iaoq_f & -4) {
1891     case 0x00: /* Null pointer call */
1892         gen_excp_1(EXCP_IMP);
1893         ctx->base.is_jmp = DISAS_NORETURN;
1894         break;
1895 
1896     case 0xb0: /* LWS */
1897         gen_excp_1(EXCP_SYSCALL_LWS);
1898         ctx->base.is_jmp = DISAS_NORETURN;
1899         break;
1900 
1901     case 0xe0: /* SET_THREAD_POINTER */
1902         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1903         tmp = tcg_temp_new_i64();
1904         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1905         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1906         tcg_gen_addi_i64(tmp, tmp, 4);
1907         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1908         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1909         break;
1910 
1911     case 0x100: /* SYSCALL */
1912         gen_excp_1(EXCP_SYSCALL);
1913         ctx->base.is_jmp = DISAS_NORETURN;
1914         break;
1915 
1916     default:
1917     do_sigill:
1918         gen_excp_1(EXCP_ILL);
1919         ctx->base.is_jmp = DISAS_NORETURN;
1920         break;
1921     }
1922 }
1923 #endif
1924 
1925 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1926 {
1927     cond_free(&ctx->null_cond);
1928     return true;
1929 }
1930 
1931 static bool trans_break(DisasContext *ctx, arg_break *a)
1932 {
1933     return gen_excp_iir(ctx, EXCP_BREAK);
1934 }
1935 
1936 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1937 {
1938     /* No point in nullifying the memory barrier.  */
1939     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1940 
1941     cond_free(&ctx->null_cond);
1942     return true;
1943 }
1944 
1945 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1946 {
1947     unsigned rt = a->t;
1948     TCGv_i64 tmp = dest_gpr(ctx, rt);
1949     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1950     save_gpr(ctx, rt, tmp);
1951 
1952     cond_free(&ctx->null_cond);
1953     return true;
1954 }
1955 
1956 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1957 {
1958     unsigned rt = a->t;
1959     unsigned rs = a->sp;
1960     TCGv_i64 t0 = tcg_temp_new_i64();
1961 
1962     load_spr(ctx, t0, rs);
1963     tcg_gen_shri_i64(t0, t0, 32);
1964 
1965     save_gpr(ctx, rt, t0);
1966 
1967     cond_free(&ctx->null_cond);
1968     return true;
1969 }
1970 
1971 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1972 {
1973     unsigned rt = a->t;
1974     unsigned ctl = a->r;
1975     TCGv_i64 tmp;
1976 
1977     switch (ctl) {
1978     case CR_SAR:
1979         if (a->e == 0) {
1980             /* MFSAR without ,W masks low 5 bits.  */
1981             tmp = dest_gpr(ctx, rt);
1982             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1983             save_gpr(ctx, rt, tmp);
1984             goto done;
1985         }
1986         save_gpr(ctx, rt, cpu_sar);
1987         goto done;
1988     case CR_IT: /* Interval Timer */
1989         /* FIXME: Respect PSW_S bit.  */
1990         nullify_over(ctx);
1991         tmp = dest_gpr(ctx, rt);
1992         if (translator_io_start(&ctx->base)) {
1993             gen_helper_read_interval_timer(tmp);
1994             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1995         } else {
1996             gen_helper_read_interval_timer(tmp);
1997         }
1998         save_gpr(ctx, rt, tmp);
1999         return nullify_end(ctx);
2000     case 26:
2001     case 27:
2002         break;
2003     default:
2004         /* All other control registers are privileged.  */
2005         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2006         break;
2007     }
2008 
2009     tmp = tcg_temp_new_i64();
2010     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2011     save_gpr(ctx, rt, tmp);
2012 
2013  done:
2014     cond_free(&ctx->null_cond);
2015     return true;
2016 }
2017 
2018 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2019 {
2020     unsigned rr = a->r;
2021     unsigned rs = a->sp;
2022     TCGv_i64 tmp;
2023 
2024     if (rs >= 5) {
2025         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2026     }
2027     nullify_over(ctx);
2028 
2029     tmp = tcg_temp_new_i64();
2030     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2031 
2032     if (rs >= 4) {
2033         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2034         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2035     } else {
2036         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2037     }
2038 
2039     return nullify_end(ctx);
2040 }
2041 
2042 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2043 {
2044     unsigned ctl = a->t;
2045     TCGv_i64 reg;
2046     TCGv_i64 tmp;
2047 
2048     if (ctl == CR_SAR) {
2049         reg = load_gpr(ctx, a->r);
2050         tmp = tcg_temp_new_i64();
2051         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2052         save_or_nullify(ctx, cpu_sar, tmp);
2053 
2054         cond_free(&ctx->null_cond);
2055         return true;
2056     }
2057 
2058     /* All other control registers are privileged or read-only.  */
2059     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2060 
2061 #ifndef CONFIG_USER_ONLY
2062     nullify_over(ctx);
2063     reg = load_gpr(ctx, a->r);
2064 
2065     switch (ctl) {
2066     case CR_IT:
2067         gen_helper_write_interval_timer(tcg_env, reg);
2068         break;
2069     case CR_EIRR:
2070         gen_helper_write_eirr(tcg_env, reg);
2071         break;
2072     case CR_EIEM:
2073         gen_helper_write_eiem(tcg_env, reg);
2074         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2075         break;
2076 
2077     case CR_IIASQ:
2078     case CR_IIAOQ:
2079         /* FIXME: Respect PSW_Q bit */
2080         /* The write advances the queue and stores to the back element.  */
2081         tmp = tcg_temp_new_i64();
2082         tcg_gen_ld_i64(tmp, tcg_env,
2083                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2084         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2085         tcg_gen_st_i64(reg, tcg_env,
2086                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2087         break;
2088 
2089     case CR_PID1:
2090     case CR_PID2:
2091     case CR_PID3:
2092     case CR_PID4:
2093         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2094 #ifndef CONFIG_USER_ONLY
2095         gen_helper_change_prot_id(tcg_env);
2096 #endif
2097         break;
2098 
2099     default:
2100         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101         break;
2102     }
2103     return nullify_end(ctx);
2104 #endif
2105 }
2106 
2107 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2108 {
2109     TCGv_i64 tmp = tcg_temp_new_i64();
2110 
2111     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2112     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2113     save_or_nullify(ctx, cpu_sar, tmp);
2114 
2115     cond_free(&ctx->null_cond);
2116     return true;
2117 }
2118 
2119 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2120 {
2121     TCGv_i64 dest = dest_gpr(ctx, a->t);
2122 
2123 #ifdef CONFIG_USER_ONLY
2124     /* We don't implement space registers in user mode. */
2125     tcg_gen_movi_i64(dest, 0);
2126 #else
2127     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2128     tcg_gen_shri_i64(dest, dest, 32);
2129 #endif
2130     save_gpr(ctx, a->t, dest);
2131 
2132     cond_free(&ctx->null_cond);
2133     return true;
2134 }
2135 
2136 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2137 {
2138     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2139 #ifndef CONFIG_USER_ONLY
2140     TCGv_i64 tmp;
2141 
2142     nullify_over(ctx);
2143 
2144     tmp = tcg_temp_new_i64();
2145     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2146     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2147     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2148     save_gpr(ctx, a->t, tmp);
2149 
2150     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2151     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2152     return nullify_end(ctx);
2153 #endif
2154 }
2155 
2156 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2157 {
2158     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2159 #ifndef CONFIG_USER_ONLY
2160     TCGv_i64 tmp;
2161 
2162     nullify_over(ctx);
2163 
2164     tmp = tcg_temp_new_i64();
2165     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2166     tcg_gen_ori_i64(tmp, tmp, a->i);
2167     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2168     save_gpr(ctx, a->t, tmp);
2169 
2170     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2171     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2172     return nullify_end(ctx);
2173 #endif
2174 }
2175 
2176 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2177 {
2178     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2179 #ifndef CONFIG_USER_ONLY
2180     TCGv_i64 tmp, reg;
2181     nullify_over(ctx);
2182 
2183     reg = load_gpr(ctx, a->r);
2184     tmp = tcg_temp_new_i64();
2185     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2186 
2187     /* Exit the TB to recognize new interrupts.  */
2188     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2189     return nullify_end(ctx);
2190 #endif
2191 }
2192 
2193 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2194 {
2195     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2196 #ifndef CONFIG_USER_ONLY
2197     nullify_over(ctx);
2198 
2199     if (rfi_r) {
2200         gen_helper_rfi_r(tcg_env);
2201     } else {
2202         gen_helper_rfi(tcg_env);
2203     }
2204     /* Exit the TB to recognize new interrupts.  */
2205     tcg_gen_exit_tb(NULL, 0);
2206     ctx->base.is_jmp = DISAS_NORETURN;
2207 
2208     return nullify_end(ctx);
2209 #endif
2210 }
2211 
2212 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2213 {
2214     return do_rfi(ctx, false);
2215 }
2216 
2217 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2218 {
2219     return do_rfi(ctx, true);
2220 }
2221 
2222 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2223 {
2224     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2225 #ifndef CONFIG_USER_ONLY
2226     nullify_over(ctx);
2227     gen_helper_halt(tcg_env);
2228     ctx->base.is_jmp = DISAS_NORETURN;
2229     return nullify_end(ctx);
2230 #endif
2231 }
2232 
2233 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2234 {
2235     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237     nullify_over(ctx);
2238     gen_helper_reset(tcg_env);
2239     ctx->base.is_jmp = DISAS_NORETURN;
2240     return nullify_end(ctx);
2241 #endif
2242 }
2243 
2244 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2245 {
2246     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2247 #ifndef CONFIG_USER_ONLY
2248     nullify_over(ctx);
2249     gen_helper_getshadowregs(tcg_env);
2250     return nullify_end(ctx);
2251 #endif
2252 }
2253 
2254 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2255 {
2256     if (a->m) {
2257         TCGv_i64 dest = dest_gpr(ctx, a->b);
2258         TCGv_i64 src1 = load_gpr(ctx, a->b);
2259         TCGv_i64 src2 = load_gpr(ctx, a->x);
2260 
2261         /* The only thing we need to do is the base register modification.  */
2262         tcg_gen_add_i64(dest, src1, src2);
2263         save_gpr(ctx, a->b, dest);
2264     }
2265     cond_free(&ctx->null_cond);
2266     return true;
2267 }
2268 
2269 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2270 {
2271     TCGv_i64 dest, ofs;
2272     TCGv_i32 level, want;
2273     TCGv_i64 addr;
2274 
2275     nullify_over(ctx);
2276 
2277     dest = dest_gpr(ctx, a->t);
2278     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2279 
2280     if (a->imm) {
2281         level = tcg_constant_i32(a->ri);
2282     } else {
2283         level = tcg_temp_new_i32();
2284         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2285         tcg_gen_andi_i32(level, level, 3);
2286     }
2287     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2288 
2289     gen_helper_probe(dest, tcg_env, addr, level, want);
2290 
2291     save_gpr(ctx, a->t, dest);
2292     return nullify_end(ctx);
2293 }
2294 
2295 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2296 {
2297     if (ctx->is_pa20) {
2298         return false;
2299     }
2300     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302     TCGv_i64 addr;
2303     TCGv_i64 ofs, reg;
2304 
2305     nullify_over(ctx);
2306 
2307     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2308     reg = load_gpr(ctx, a->r);
2309     if (a->addr) {
2310         gen_helper_itlba_pa11(tcg_env, addr, reg);
2311     } else {
2312         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2313     }
2314 
2315     /* Exit TB for TLB change if mmu is enabled.  */
2316     if (ctx->tb_flags & PSW_C) {
2317         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2318     }
2319     return nullify_end(ctx);
2320 #endif
2321 }
2322 
2323 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2324 {
2325     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2326 #ifndef CONFIG_USER_ONLY
2327     TCGv_i64 addr;
2328     TCGv_i64 ofs;
2329 
2330     nullify_over(ctx);
2331 
2332     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2333     if (a->m) {
2334         save_gpr(ctx, a->b, ofs);
2335     }
2336     if (a->local) {
2337         gen_helper_ptlbe(tcg_env);
2338     } else {
2339         gen_helper_ptlb(tcg_env, addr);
2340     }
2341 
2342     /* Exit TB for TLB change if mmu is enabled.  */
2343     if (ctx->tb_flags & PSW_C) {
2344         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2345     }
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 /*
2351  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2352  * See
2353  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2354  *     page 13-9 (195/206)
2355  */
2356 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2357 {
2358     if (ctx->is_pa20) {
2359         return false;
2360     }
2361     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2362 #ifndef CONFIG_USER_ONLY
2363     TCGv_i64 addr, atl, stl;
2364     TCGv_i64 reg;
2365 
2366     nullify_over(ctx);
2367 
2368     /*
2369      * FIXME:
2370      *  if (not (pcxl or pcxl2))
2371      *    return gen_illegal(ctx);
2372      */
2373 
2374     atl = tcg_temp_new_i64();
2375     stl = tcg_temp_new_i64();
2376     addr = tcg_temp_new_i64();
2377 
2378     tcg_gen_ld32u_i64(stl, tcg_env,
2379                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2380                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2381     tcg_gen_ld32u_i64(atl, tcg_env,
2382                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2383                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2384     tcg_gen_shli_i64(stl, stl, 32);
2385     tcg_gen_or_i64(addr, atl, stl);
2386 
2387     reg = load_gpr(ctx, a->r);
2388     if (a->addr) {
2389         gen_helper_itlba_pa11(tcg_env, addr, reg);
2390     } else {
2391         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2392     }
2393 
2394     /* Exit TB for TLB change if mmu is enabled.  */
2395     if (ctx->tb_flags & PSW_C) {
2396         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2397     }
2398     return nullify_end(ctx);
2399 #endif
2400 }
2401 
2402 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2403 {
2404     if (!ctx->is_pa20) {
2405         return false;
2406     }
2407     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2408 #ifndef CONFIG_USER_ONLY
2409     nullify_over(ctx);
2410     {
2411         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2412         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2413 
2414         if (a->data) {
2415             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2416         } else {
2417             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2418         }
2419     }
2420     /* Exit TB for TLB change if mmu is enabled.  */
2421     if (ctx->tb_flags & PSW_C) {
2422         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2423     }
2424     return nullify_end(ctx);
2425 #endif
2426 }
2427 
2428 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2429 {
2430     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2431 #ifndef CONFIG_USER_ONLY
2432     TCGv_i64 vaddr;
2433     TCGv_i64 ofs, paddr;
2434 
2435     nullify_over(ctx);
2436 
2437     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2438 
2439     paddr = tcg_temp_new_i64();
2440     gen_helper_lpa(paddr, tcg_env, vaddr);
2441 
2442     /* Note that physical address result overrides base modification.  */
2443     if (a->m) {
2444         save_gpr(ctx, a->b, ofs);
2445     }
2446     save_gpr(ctx, a->t, paddr);
2447 
2448     return nullify_end(ctx);
2449 #endif
2450 }
2451 
2452 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2453 {
2454     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2455 
2456     /* The Coherence Index is an implementation-defined function of the
2457        physical address.  Two addresses with the same CI have a coherent
2458        view of the cache.  Our implementation is to return 0 for all,
2459        since the entire address space is coherent.  */
2460     save_gpr(ctx, a->t, tcg_constant_i64(0));
2461 
2462     cond_free(&ctx->null_cond);
2463     return true;
2464 }
2465 
2466 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2467 {
2468     return do_add_reg(ctx, a, false, false, false, false);
2469 }
2470 
2471 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2472 {
2473     return do_add_reg(ctx, a, true, false, false, false);
2474 }
2475 
2476 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2477 {
2478     return do_add_reg(ctx, a, false, true, false, false);
2479 }
2480 
2481 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2482 {
2483     return do_add_reg(ctx, a, false, false, false, true);
2484 }
2485 
2486 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2487 {
2488     return do_add_reg(ctx, a, false, true, false, true);
2489 }
2490 
2491 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2492 {
2493     return do_sub_reg(ctx, a, false, false, false);
2494 }
2495 
2496 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2497 {
2498     return do_sub_reg(ctx, a, true, false, false);
2499 }
2500 
2501 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2502 {
2503     return do_sub_reg(ctx, a, false, false, true);
2504 }
2505 
2506 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2507 {
2508     return do_sub_reg(ctx, a, true, false, true);
2509 }
2510 
2511 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2512 {
2513     return do_sub_reg(ctx, a, false, true, false);
2514 }
2515 
2516 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2517 {
2518     return do_sub_reg(ctx, a, true, true, false);
2519 }
2520 
2521 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2522 {
2523     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2524 }
2525 
2526 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2527 {
2528     return do_log_reg(ctx, a, tcg_gen_and_i64);
2529 }
2530 
2531 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2532 {
2533     if (a->cf == 0) {
2534         unsigned r2 = a->r2;
2535         unsigned r1 = a->r1;
2536         unsigned rt = a->t;
2537 
2538         if (rt == 0) { /* NOP */
2539             cond_free(&ctx->null_cond);
2540             return true;
2541         }
2542         if (r2 == 0) { /* COPY */
2543             if (r1 == 0) {
2544                 TCGv_i64 dest = dest_gpr(ctx, rt);
2545                 tcg_gen_movi_i64(dest, 0);
2546                 save_gpr(ctx, rt, dest);
2547             } else {
2548                 save_gpr(ctx, rt, cpu_gr[r1]);
2549             }
2550             cond_free(&ctx->null_cond);
2551             return true;
2552         }
2553 #ifndef CONFIG_USER_ONLY
2554         /* These are QEMU extensions and are nops in the real architecture:
2555          *
2556          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2557          * or %r31,%r31,%r31 -- death loop; offline cpu
2558          *                      currently implemented as idle.
2559          */
2560         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2561             /* No need to check for supervisor, as userland can only pause
2562                until the next timer interrupt.  */
2563             nullify_over(ctx);
2564 
2565             /* Advance the instruction queue.  */
2566             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2567             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2568             nullify_set(ctx, 0);
2569 
2570             /* Tell the qemu main loop to halt until this cpu has work.  */
2571             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2572                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2573             gen_excp_1(EXCP_HALTED);
2574             ctx->base.is_jmp = DISAS_NORETURN;
2575 
2576             return nullify_end(ctx);
2577         }
2578 #endif
2579     }
2580     return do_log_reg(ctx, a, tcg_gen_or_i64);
2581 }
2582 
2583 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2584 {
2585     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2586 }
2587 
2588 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2589 {
2590     TCGv_i64 tcg_r1, tcg_r2;
2591 
2592     if (a->cf) {
2593         nullify_over(ctx);
2594     }
2595     tcg_r1 = load_gpr(ctx, a->r1);
2596     tcg_r2 = load_gpr(ctx, a->r2);
2597     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2598     return nullify_end(ctx);
2599 }
2600 
2601 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2602 {
2603     TCGv_i64 tcg_r1, tcg_r2;
2604 
2605     if (a->cf) {
2606         nullify_over(ctx);
2607     }
2608     tcg_r1 = load_gpr(ctx, a->r1);
2609     tcg_r2 = load_gpr(ctx, a->r2);
2610     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2611     return nullify_end(ctx);
2612 }
2613 
2614 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2615 {
2616     TCGv_i64 tcg_r1, tcg_r2, tmp;
2617 
2618     if (a->cf) {
2619         nullify_over(ctx);
2620     }
2621     tcg_r1 = load_gpr(ctx, a->r1);
2622     tcg_r2 = load_gpr(ctx, a->r2);
2623     tmp = tcg_temp_new_i64();
2624     tcg_gen_not_i64(tmp, tcg_r2);
2625     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2626     return nullify_end(ctx);
2627 }
2628 
2629 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2630 {
2631     return do_uaddcm(ctx, a, false);
2632 }
2633 
2634 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2635 {
2636     return do_uaddcm(ctx, a, true);
2637 }
2638 
2639 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2640 {
2641     TCGv_i64 tmp;
2642 
2643     nullify_over(ctx);
2644 
2645     tmp = tcg_temp_new_i64();
2646     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2647     if (!is_i) {
2648         tcg_gen_not_i64(tmp, tmp);
2649     }
2650     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2651     tcg_gen_muli_i64(tmp, tmp, 6);
2652     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2653             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2654     return nullify_end(ctx);
2655 }
2656 
2657 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2658 {
2659     return do_dcor(ctx, a, false);
2660 }
2661 
2662 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2663 {
2664     return do_dcor(ctx, a, true);
2665 }
2666 
2667 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2668 {
2669     TCGv_i64 dest, add1, add2, addc, zero, in1, in2;
2670     TCGv_i64 cout;
2671 
2672     nullify_over(ctx);
2673 
2674     in1 = load_gpr(ctx, a->r1);
2675     in2 = load_gpr(ctx, a->r2);
2676 
2677     add1 = tcg_temp_new_i64();
2678     add2 = tcg_temp_new_i64();
2679     addc = tcg_temp_new_i64();
2680     dest = tcg_temp_new_i64();
2681     zero = tcg_constant_i64(0);
2682 
2683     /* Form R1 << 1 | PSW[CB]{8}.  */
2684     tcg_gen_add_i64(add1, in1, in1);
2685     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2686 
2687     /*
2688      * Add or subtract R2, depending on PSW[V].  Proper computation of
2689      * carry requires that we subtract via + ~R2 + 1, as described in
2690      * the manual.  By extracting and masking V, we can produce the
2691      * proper inputs to the addition without movcond.
2692      */
2693     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2694     tcg_gen_xor_i64(add2, in2, addc);
2695     tcg_gen_andi_i64(addc, addc, 1);
2696 
2697     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2698     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2699 
2700     /* Write back the result register.  */
2701     save_gpr(ctx, a->t, dest);
2702 
2703     /* Write back PSW[CB].  */
2704     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2705     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2706 
2707     /* Write back PSW[V] for the division step.  */
2708     cout = get_psw_carry(ctx, false);
2709     tcg_gen_neg_i64(cpu_psw_v, cout);
2710     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2711 
2712     /* Install the new nullification.  */
2713     if (a->cf) {
2714         TCGv_i64 sv = NULL;
2715         if (cond_need_sv(a->cf >> 1)) {
2716             /* ??? The lshift is supposed to contribute to overflow.  */
2717             sv = do_add_sv(ctx, dest, add1, add2);
2718         }
2719         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2720     }
2721 
2722     return nullify_end(ctx);
2723 }
2724 
2725 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2726 {
2727     return do_add_imm(ctx, a, false, false);
2728 }
2729 
2730 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2731 {
2732     return do_add_imm(ctx, a, true, false);
2733 }
2734 
2735 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2736 {
2737     return do_add_imm(ctx, a, false, true);
2738 }
2739 
2740 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2741 {
2742     return do_add_imm(ctx, a, true, true);
2743 }
2744 
2745 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2746 {
2747     return do_sub_imm(ctx, a, false);
2748 }
2749 
2750 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2751 {
2752     return do_sub_imm(ctx, a, true);
2753 }
2754 
2755 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2756 {
2757     TCGv_i64 tcg_im, tcg_r2;
2758 
2759     if (a->cf) {
2760         nullify_over(ctx);
2761     }
2762 
2763     tcg_im = tcg_constant_i64(a->i);
2764     tcg_r2 = load_gpr(ctx, a->r);
2765     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2766 
2767     return nullify_end(ctx);
2768 }
2769 
2770 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2771 {
2772     if (!ctx->is_pa20 && a->size > MO_32) {
2773         return gen_illegal(ctx);
2774     }
2775     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2776                    a->disp, a->sp, a->m, a->size | MO_TE);
2777 }
2778 
2779 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2780 {
2781     assert(a->x == 0 && a->scale == 0);
2782     if (!ctx->is_pa20 && a->size > MO_32) {
2783         return gen_illegal(ctx);
2784     }
2785     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2786 }
2787 
2788 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2789 {
2790     MemOp mop = MO_TE | MO_ALIGN | a->size;
2791     TCGv_i64 zero, dest, ofs;
2792     TCGv_i64 addr;
2793 
2794     if (!ctx->is_pa20 && a->size > MO_32) {
2795         return gen_illegal(ctx);
2796     }
2797 
2798     nullify_over(ctx);
2799 
2800     if (a->m) {
2801         /* Base register modification.  Make sure if RT == RB,
2802            we see the result of the load.  */
2803         dest = tcg_temp_new_i64();
2804     } else {
2805         dest = dest_gpr(ctx, a->t);
2806     }
2807 
2808     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2809              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2810 
2811     /*
2812      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2813      * However actual hardware succeeds with aligned mod 4.
2814      * Detect this case and log a GUEST_ERROR.
2815      *
2816      * TODO: HPPA64 relaxes the over-alignment requirement
2817      * with the ,co completer.
2818      */
2819     gen_helper_ldc_check(addr);
2820 
2821     zero = tcg_constant_i64(0);
2822     tcg_gen_atomic_xchg_i64(dest, addr, zero, ctx->mmu_idx, mop);
2823 
2824     if (a->m) {
2825         save_gpr(ctx, a->b, ofs);
2826     }
2827     save_gpr(ctx, a->t, dest);
2828 
2829     return nullify_end(ctx);
2830 }
2831 
2832 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2833 {
2834     TCGv_i64 ofs, val;
2835     TCGv_i64 addr;
2836 
2837     nullify_over(ctx);
2838 
2839     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2840              ctx->mmu_idx == MMU_PHYS_IDX);
2841     val = load_gpr(ctx, a->r);
2842     if (a->a) {
2843         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2844             gen_helper_stby_e_parallel(tcg_env, addr, val);
2845         } else {
2846             gen_helper_stby_e(tcg_env, addr, val);
2847         }
2848     } else {
2849         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2850             gen_helper_stby_b_parallel(tcg_env, addr, val);
2851         } else {
2852             gen_helper_stby_b(tcg_env, addr, val);
2853         }
2854     }
2855     if (a->m) {
2856         tcg_gen_andi_i64(ofs, ofs, ~3);
2857         save_gpr(ctx, a->b, ofs);
2858     }
2859 
2860     return nullify_end(ctx);
2861 }
2862 
2863 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
2864 {
2865     TCGv_i64 ofs, val;
2866     TCGv_i64 addr;
2867 
2868     if (!ctx->is_pa20) {
2869         return false;
2870     }
2871     nullify_over(ctx);
2872 
2873     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2874              ctx->mmu_idx == MMU_PHYS_IDX);
2875     val = load_gpr(ctx, a->r);
2876     if (a->a) {
2877         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2878             gen_helper_stdby_e_parallel(tcg_env, addr, val);
2879         } else {
2880             gen_helper_stdby_e(tcg_env, addr, val);
2881         }
2882     } else {
2883         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2884             gen_helper_stdby_b_parallel(tcg_env, addr, val);
2885         } else {
2886             gen_helper_stdby_b(tcg_env, addr, val);
2887         }
2888     }
2889     if (a->m) {
2890         tcg_gen_andi_i64(ofs, ofs, ~7);
2891         save_gpr(ctx, a->b, ofs);
2892     }
2893 
2894     return nullify_end(ctx);
2895 }
2896 
2897 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2898 {
2899     int hold_mmu_idx = ctx->mmu_idx;
2900 
2901     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2902     ctx->mmu_idx = MMU_PHYS_IDX;
2903     trans_ld(ctx, a);
2904     ctx->mmu_idx = hold_mmu_idx;
2905     return true;
2906 }
2907 
2908 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2909 {
2910     int hold_mmu_idx = ctx->mmu_idx;
2911 
2912     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2913     ctx->mmu_idx = MMU_PHYS_IDX;
2914     trans_st(ctx, a);
2915     ctx->mmu_idx = hold_mmu_idx;
2916     return true;
2917 }
2918 
2919 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2920 {
2921     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2922 
2923     tcg_gen_movi_i64(tcg_rt, a->i);
2924     save_gpr(ctx, a->t, tcg_rt);
2925     cond_free(&ctx->null_cond);
2926     return true;
2927 }
2928 
2929 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2930 {
2931     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
2932     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
2933 
2934     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
2935     save_gpr(ctx, 1, tcg_r1);
2936     cond_free(&ctx->null_cond);
2937     return true;
2938 }
2939 
2940 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2941 {
2942     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
2943 
2944     /* Special case rb == 0, for the LDI pseudo-op.
2945        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
2946     if (a->b == 0) {
2947         tcg_gen_movi_i64(tcg_rt, a->i);
2948     } else {
2949         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
2950     }
2951     save_gpr(ctx, a->t, tcg_rt);
2952     cond_free(&ctx->null_cond);
2953     return true;
2954 }
2955 
2956 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
2957                     unsigned c, unsigned f, bool d, unsigned n, int disp)
2958 {
2959     TCGv_i64 dest, in2, sv;
2960     DisasCond cond;
2961 
2962     in2 = load_gpr(ctx, r);
2963     dest = tcg_temp_new_i64();
2964 
2965     tcg_gen_sub_i64(dest, in1, in2);
2966 
2967     sv = NULL;
2968     if (cond_need_sv(c)) {
2969         sv = do_sub_sv(ctx, dest, in1, in2);
2970     }
2971 
2972     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
2973     return do_cbranch(ctx, disp, n, &cond);
2974 }
2975 
2976 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2977 {
2978     if (!ctx->is_pa20 && a->d) {
2979         return false;
2980     }
2981     nullify_over(ctx);
2982     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
2983                    a->c, a->f, a->d, a->n, a->disp);
2984 }
2985 
2986 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
2987 {
2988     if (!ctx->is_pa20 && a->d) {
2989         return false;
2990     }
2991     nullify_over(ctx);
2992     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
2993                    a->c, a->f, a->d, a->n, a->disp);
2994 }
2995 
2996 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
2997                     unsigned c, unsigned f, unsigned n, int disp)
2998 {
2999     TCGv_i64 dest, in2, sv, cb_cond;
3000     DisasCond cond;
3001     bool d = false;
3002 
3003     /*
3004      * For hppa64, the ADDB conditions change with PSW.W,
3005      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3006      */
3007     if (ctx->tb_flags & PSW_W) {
3008         d = c >= 5;
3009         if (d) {
3010             c &= 3;
3011         }
3012     }
3013 
3014     in2 = load_gpr(ctx, r);
3015     dest = tcg_temp_new_i64();
3016     sv = NULL;
3017     cb_cond = NULL;
3018 
3019     if (cond_need_cb(c)) {
3020         TCGv_i64 cb = tcg_temp_new_i64();
3021         TCGv_i64 cb_msb = tcg_temp_new_i64();
3022 
3023         tcg_gen_movi_i64(cb_msb, 0);
3024         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3025         tcg_gen_xor_i64(cb, in1, in2);
3026         tcg_gen_xor_i64(cb, cb, dest);
3027         cb_cond = get_carry(ctx, d, cb, cb_msb);
3028     } else {
3029         tcg_gen_add_i64(dest, in1, in2);
3030     }
3031     if (cond_need_sv(c)) {
3032         sv = do_add_sv(ctx, dest, in1, in2);
3033     }
3034 
3035     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3036     save_gpr(ctx, r, dest);
3037     return do_cbranch(ctx, disp, n, &cond);
3038 }
3039 
3040 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3041 {
3042     nullify_over(ctx);
3043     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3044 }
3045 
3046 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3047 {
3048     nullify_over(ctx);
3049     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3050 }
3051 
3052 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3053 {
3054     TCGv_i64 tmp, tcg_r;
3055     DisasCond cond;
3056 
3057     nullify_over(ctx);
3058 
3059     tmp = tcg_temp_new_i64();
3060     tcg_r = load_gpr(ctx, a->r);
3061     if (cond_need_ext(ctx, a->d)) {
3062         /* Force shift into [32,63] */
3063         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3064         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3065     } else {
3066         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3067     }
3068 
3069     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3070     return do_cbranch(ctx, a->disp, a->n, &cond);
3071 }
3072 
3073 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3074 {
3075     TCGv_i64 tmp, tcg_r;
3076     DisasCond cond;
3077     int p;
3078 
3079     nullify_over(ctx);
3080 
3081     tmp = tcg_temp_new_i64();
3082     tcg_r = load_gpr(ctx, a->r);
3083     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3084     tcg_gen_shli_i64(tmp, tcg_r, p);
3085 
3086     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3087     return do_cbranch(ctx, a->disp, a->n, &cond);
3088 }
3089 
3090 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3091 {
3092     TCGv_i64 dest;
3093     DisasCond cond;
3094 
3095     nullify_over(ctx);
3096 
3097     dest = dest_gpr(ctx, a->r2);
3098     if (a->r1 == 0) {
3099         tcg_gen_movi_i64(dest, 0);
3100     } else {
3101         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3102     }
3103 
3104     /* All MOVB conditions are 32-bit. */
3105     cond = do_sed_cond(ctx, a->c, false, dest);
3106     return do_cbranch(ctx, a->disp, a->n, &cond);
3107 }
3108 
3109 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3110 {
3111     TCGv_i64 dest;
3112     DisasCond cond;
3113 
3114     nullify_over(ctx);
3115 
3116     dest = dest_gpr(ctx, a->r);
3117     tcg_gen_movi_i64(dest, a->i);
3118 
3119     /* All MOVBI conditions are 32-bit. */
3120     cond = do_sed_cond(ctx, a->c, false, dest);
3121     return do_cbranch(ctx, a->disp, a->n, &cond);
3122 }
3123 
3124 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3125 {
3126     TCGv_i64 dest, src2;
3127 
3128     if (!ctx->is_pa20 && a->d) {
3129         return false;
3130     }
3131     if (a->c) {
3132         nullify_over(ctx);
3133     }
3134 
3135     dest = dest_gpr(ctx, a->t);
3136     src2 = load_gpr(ctx, a->r2);
3137     if (a->r1 == 0) {
3138         if (a->d) {
3139             tcg_gen_shr_i64(dest, src2, cpu_sar);
3140         } else {
3141             TCGv_i64 tmp = tcg_temp_new_i64();
3142 
3143             tcg_gen_ext32u_i64(dest, src2);
3144             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3145             tcg_gen_shr_i64(dest, dest, tmp);
3146         }
3147     } else if (a->r1 == a->r2) {
3148         if (a->d) {
3149             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3150         } else {
3151             TCGv_i32 t32 = tcg_temp_new_i32();
3152             TCGv_i32 s32 = tcg_temp_new_i32();
3153 
3154             tcg_gen_extrl_i64_i32(t32, src2);
3155             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3156             tcg_gen_andi_i32(s32, s32, 31);
3157             tcg_gen_rotr_i32(t32, t32, s32);
3158             tcg_gen_extu_i32_i64(dest, t32);
3159         }
3160     } else {
3161         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3162 
3163         if (a->d) {
3164             TCGv_i64 t = tcg_temp_new_i64();
3165             TCGv_i64 n = tcg_temp_new_i64();
3166 
3167             tcg_gen_xori_i64(n, cpu_sar, 63);
3168             tcg_gen_shl_i64(t, src2, n);
3169             tcg_gen_shli_i64(t, t, 1);
3170             tcg_gen_shr_i64(dest, src1, cpu_sar);
3171             tcg_gen_or_i64(dest, dest, t);
3172         } else {
3173             TCGv_i64 t = tcg_temp_new_i64();
3174             TCGv_i64 s = tcg_temp_new_i64();
3175 
3176             tcg_gen_concat32_i64(t, src2, src1);
3177             tcg_gen_andi_i64(s, cpu_sar, 31);
3178             tcg_gen_shr_i64(dest, t, s);
3179         }
3180     }
3181     save_gpr(ctx, a->t, dest);
3182 
3183     /* Install the new nullification.  */
3184     cond_free(&ctx->null_cond);
3185     if (a->c) {
3186         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3187     }
3188     return nullify_end(ctx);
3189 }
3190 
3191 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3192 {
3193     unsigned width, sa;
3194     TCGv_i64 dest, t2;
3195 
3196     if (!ctx->is_pa20 && a->d) {
3197         return false;
3198     }
3199     if (a->c) {
3200         nullify_over(ctx);
3201     }
3202 
3203     width = a->d ? 64 : 32;
3204     sa = width - 1 - a->cpos;
3205 
3206     dest = dest_gpr(ctx, a->t);
3207     t2 = load_gpr(ctx, a->r2);
3208     if (a->r1 == 0) {
3209         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3210     } else if (width == TARGET_LONG_BITS) {
3211         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3212     } else {
3213         assert(!a->d);
3214         if (a->r1 == a->r2) {
3215             TCGv_i32 t32 = tcg_temp_new_i32();
3216             tcg_gen_extrl_i64_i32(t32, t2);
3217             tcg_gen_rotri_i32(t32, t32, sa);
3218             tcg_gen_extu_i32_i64(dest, t32);
3219         } else {
3220             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3221             tcg_gen_extract_i64(dest, dest, sa, 32);
3222         }
3223     }
3224     save_gpr(ctx, a->t, dest);
3225 
3226     /* Install the new nullification.  */
3227     cond_free(&ctx->null_cond);
3228     if (a->c) {
3229         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3230     }
3231     return nullify_end(ctx);
3232 }
3233 
3234 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3235 {
3236     unsigned widthm1 = a->d ? 63 : 31;
3237     TCGv_i64 dest, src, tmp;
3238 
3239     if (!ctx->is_pa20 && a->d) {
3240         return false;
3241     }
3242     if (a->c) {
3243         nullify_over(ctx);
3244     }
3245 
3246     dest = dest_gpr(ctx, a->t);
3247     src = load_gpr(ctx, a->r);
3248     tmp = tcg_temp_new_i64();
3249 
3250     /* Recall that SAR is using big-endian bit numbering.  */
3251     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3252     tcg_gen_xori_i64(tmp, tmp, widthm1);
3253 
3254     if (a->se) {
3255         if (!a->d) {
3256             tcg_gen_ext32s_i64(dest, src);
3257             src = dest;
3258         }
3259         tcg_gen_sar_i64(dest, src, tmp);
3260         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3261     } else {
3262         if (!a->d) {
3263             tcg_gen_ext32u_i64(dest, src);
3264             src = dest;
3265         }
3266         tcg_gen_shr_i64(dest, src, tmp);
3267         tcg_gen_extract_i64(dest, dest, 0, a->len);
3268     }
3269     save_gpr(ctx, a->t, dest);
3270 
3271     /* Install the new nullification.  */
3272     cond_free(&ctx->null_cond);
3273     if (a->c) {
3274         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3275     }
3276     return nullify_end(ctx);
3277 }
3278 
3279 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3280 {
3281     unsigned len, cpos, width;
3282     TCGv_i64 dest, src;
3283 
3284     if (!ctx->is_pa20 && a->d) {
3285         return false;
3286     }
3287     if (a->c) {
3288         nullify_over(ctx);
3289     }
3290 
3291     len = a->len;
3292     width = a->d ? 64 : 32;
3293     cpos = width - 1 - a->pos;
3294     if (cpos + len > width) {
3295         len = width - cpos;
3296     }
3297 
3298     dest = dest_gpr(ctx, a->t);
3299     src = load_gpr(ctx, a->r);
3300     if (a->se) {
3301         tcg_gen_sextract_i64(dest, src, cpos, len);
3302     } else {
3303         tcg_gen_extract_i64(dest, src, cpos, len);
3304     }
3305     save_gpr(ctx, a->t, dest);
3306 
3307     /* Install the new nullification.  */
3308     cond_free(&ctx->null_cond);
3309     if (a->c) {
3310         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3311     }
3312     return nullify_end(ctx);
3313 }
3314 
3315 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3316 {
3317     unsigned len, width;
3318     uint64_t mask0, mask1;
3319     TCGv_i64 dest;
3320 
3321     if (!ctx->is_pa20 && a->d) {
3322         return false;
3323     }
3324     if (a->c) {
3325         nullify_over(ctx);
3326     }
3327 
3328     len = a->len;
3329     width = a->d ? 64 : 32;
3330     if (a->cpos + len > width) {
3331         len = width - a->cpos;
3332     }
3333 
3334     dest = dest_gpr(ctx, a->t);
3335     mask0 = deposit64(0, a->cpos, len, a->i);
3336     mask1 = deposit64(-1, a->cpos, len, a->i);
3337 
3338     if (a->nz) {
3339         TCGv_i64 src = load_gpr(ctx, a->t);
3340         tcg_gen_andi_i64(dest, src, mask1);
3341         tcg_gen_ori_i64(dest, dest, mask0);
3342     } else {
3343         tcg_gen_movi_i64(dest, mask0);
3344     }
3345     save_gpr(ctx, a->t, dest);
3346 
3347     /* Install the new nullification.  */
3348     cond_free(&ctx->null_cond);
3349     if (a->c) {
3350         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3351     }
3352     return nullify_end(ctx);
3353 }
3354 
3355 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3356 {
3357     unsigned rs = a->nz ? a->t : 0;
3358     unsigned len, width;
3359     TCGv_i64 dest, val;
3360 
3361     if (!ctx->is_pa20 && a->d) {
3362         return false;
3363     }
3364     if (a->c) {
3365         nullify_over(ctx);
3366     }
3367 
3368     len = a->len;
3369     width = a->d ? 64 : 32;
3370     if (a->cpos + len > width) {
3371         len = width - a->cpos;
3372     }
3373 
3374     dest = dest_gpr(ctx, a->t);
3375     val = load_gpr(ctx, a->r);
3376     if (rs == 0) {
3377         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3378     } else {
3379         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3380     }
3381     save_gpr(ctx, a->t, dest);
3382 
3383     /* Install the new nullification.  */
3384     cond_free(&ctx->null_cond);
3385     if (a->c) {
3386         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3387     }
3388     return nullify_end(ctx);
3389 }
3390 
3391 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3392                        bool d, bool nz, unsigned len, TCGv_i64 val)
3393 {
3394     unsigned rs = nz ? rt : 0;
3395     unsigned widthm1 = d ? 63 : 31;
3396     TCGv_i64 mask, tmp, shift, dest;
3397     uint64_t msb = 1ULL << (len - 1);
3398 
3399     dest = dest_gpr(ctx, rt);
3400     shift = tcg_temp_new_i64();
3401     tmp = tcg_temp_new_i64();
3402 
3403     /* Convert big-endian bit numbering in SAR to left-shift.  */
3404     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3405     tcg_gen_xori_i64(shift, shift, widthm1);
3406 
3407     mask = tcg_temp_new_i64();
3408     tcg_gen_movi_i64(mask, msb + (msb - 1));
3409     tcg_gen_and_i64(tmp, val, mask);
3410     if (rs) {
3411         tcg_gen_shl_i64(mask, mask, shift);
3412         tcg_gen_shl_i64(tmp, tmp, shift);
3413         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3414         tcg_gen_or_i64(dest, dest, tmp);
3415     } else {
3416         tcg_gen_shl_i64(dest, tmp, shift);
3417     }
3418     save_gpr(ctx, rt, dest);
3419 
3420     /* Install the new nullification.  */
3421     cond_free(&ctx->null_cond);
3422     if (c) {
3423         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3424     }
3425     return nullify_end(ctx);
3426 }
3427 
3428 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3429 {
3430     if (!ctx->is_pa20 && a->d) {
3431         return false;
3432     }
3433     if (a->c) {
3434         nullify_over(ctx);
3435     }
3436     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3437                       load_gpr(ctx, a->r));
3438 }
3439 
3440 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3441 {
3442     if (!ctx->is_pa20 && a->d) {
3443         return false;
3444     }
3445     if (a->c) {
3446         nullify_over(ctx);
3447     }
3448     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3449                       tcg_constant_i64(a->i));
3450 }
3451 
3452 static bool trans_be(DisasContext *ctx, arg_be *a)
3453 {
3454     TCGv_i64 tmp;
3455 
3456 #ifdef CONFIG_USER_ONLY
3457     /* ??? It seems like there should be a good way of using
3458        "be disp(sr2, r0)", the canonical gateway entry mechanism
3459        to our advantage.  But that appears to be inconvenient to
3460        manage along side branch delay slots.  Therefore we handle
3461        entry into the gateway page via absolute address.  */
3462     /* Since we don't implement spaces, just branch.  Do notice the special
3463        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3464        goto_tb to the TB containing the syscall.  */
3465     if (a->b == 0) {
3466         return do_dbranch(ctx, a->disp, a->l, a->n);
3467     }
3468 #else
3469     nullify_over(ctx);
3470 #endif
3471 
3472     tmp = tcg_temp_new_i64();
3473     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3474     tmp = do_ibranch_priv(ctx, tmp);
3475 
3476 #ifdef CONFIG_USER_ONLY
3477     return do_ibranch(ctx, tmp, a->l, a->n);
3478 #else
3479     TCGv_i64 new_spc = tcg_temp_new_i64();
3480 
3481     load_spr(ctx, new_spc, a->sp);
3482     if (a->l) {
3483         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3484         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3485     }
3486     if (a->n && use_nullify_skip(ctx)) {
3487         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3488         tcg_gen_addi_i64(tmp, tmp, 4);
3489         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3490         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3491         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3492     } else {
3493         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3494         if (ctx->iaoq_b == -1) {
3495             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3496         }
3497         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3498         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3499         nullify_set(ctx, a->n);
3500     }
3501     tcg_gen_lookup_and_goto_ptr();
3502     ctx->base.is_jmp = DISAS_NORETURN;
3503     return nullify_end(ctx);
3504 #endif
3505 }
3506 
3507 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3508 {
3509     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3510 }
3511 
3512 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3513 {
3514     uint64_t dest = iaoq_dest(ctx, a->disp);
3515 
3516     nullify_over(ctx);
3517 
3518     /* Make sure the caller hasn't done something weird with the queue.
3519      * ??? This is not quite the same as the PSW[B] bit, which would be
3520      * expensive to track.  Real hardware will trap for
3521      *    b  gateway
3522      *    b  gateway+4  (in delay slot of first branch)
3523      * However, checking for a non-sequential instruction queue *will*
3524      * diagnose the security hole
3525      *    b  gateway
3526      *    b  evil
3527      * in which instructions at evil would run with increased privs.
3528      */
3529     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3530         return gen_illegal(ctx);
3531     }
3532 
3533 #ifndef CONFIG_USER_ONLY
3534     if (ctx->tb_flags & PSW_C) {
3535         CPUHPPAState *env = cpu_env(ctx->cs);
3536         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3537         /* If we could not find a TLB entry, then we need to generate an
3538            ITLB miss exception so the kernel will provide it.
3539            The resulting TLB fill operation will invalidate this TB and
3540            we will re-translate, at which point we *will* be able to find
3541            the TLB entry and determine if this is in fact a gateway page.  */
3542         if (type < 0) {
3543             gen_excp(ctx, EXCP_ITLB_MISS);
3544             return true;
3545         }
3546         /* No change for non-gateway pages or for priv decrease.  */
3547         if (type >= 4 && type - 4 < ctx->privilege) {
3548             dest = deposit32(dest, 0, 2, type - 4);
3549         }
3550     } else {
3551         dest &= -4;  /* priv = 0 */
3552     }
3553 #endif
3554 
3555     if (a->l) {
3556         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3557         if (ctx->privilege < 3) {
3558             tcg_gen_andi_i64(tmp, tmp, -4);
3559         }
3560         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3561         save_gpr(ctx, a->l, tmp);
3562     }
3563 
3564     return do_dbranch(ctx, dest, 0, a->n);
3565 }
3566 
3567 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3568 {
3569     if (a->x) {
3570         TCGv_i64 tmp = tcg_temp_new_i64();
3571         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3572         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3573         /* The computation here never changes privilege level.  */
3574         return do_ibranch(ctx, tmp, a->l, a->n);
3575     } else {
3576         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3577         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3578     }
3579 }
3580 
3581 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3582 {
3583     TCGv_i64 dest;
3584 
3585     if (a->x == 0) {
3586         dest = load_gpr(ctx, a->b);
3587     } else {
3588         dest = tcg_temp_new_i64();
3589         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3590         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3591     }
3592     dest = do_ibranch_priv(ctx, dest);
3593     return do_ibranch(ctx, dest, 0, a->n);
3594 }
3595 
3596 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3597 {
3598     TCGv_i64 dest;
3599 
3600 #ifdef CONFIG_USER_ONLY
3601     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3602     return do_ibranch(ctx, dest, a->l, a->n);
3603 #else
3604     nullify_over(ctx);
3605     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3606 
3607     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3608     if (ctx->iaoq_b == -1) {
3609         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3610     }
3611     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3612     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3613     if (a->l) {
3614         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3615     }
3616     nullify_set(ctx, a->n);
3617     tcg_gen_lookup_and_goto_ptr();
3618     ctx->base.is_jmp = DISAS_NORETURN;
3619     return nullify_end(ctx);
3620 #endif
3621 }
3622 
3623 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3624 {
3625     /* All branch target stack instructions implement as nop. */
3626     return ctx->is_pa20;
3627 }
3628 
3629 /*
3630  * Float class 0
3631  */
3632 
3633 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3634 {
3635     tcg_gen_mov_i32(dst, src);
3636 }
3637 
3638 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3639 {
3640     uint64_t ret;
3641 
3642     if (ctx->is_pa20) {
3643         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3644     } else {
3645         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3646     }
3647 
3648     nullify_over(ctx);
3649     save_frd(0, tcg_constant_i64(ret));
3650     return nullify_end(ctx);
3651 }
3652 
3653 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3654 {
3655     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3656 }
3657 
3658 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3659 {
3660     tcg_gen_mov_i64(dst, src);
3661 }
3662 
3663 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3664 {
3665     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3666 }
3667 
3668 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3669 {
3670     tcg_gen_andi_i32(dst, src, INT32_MAX);
3671 }
3672 
3673 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3674 {
3675     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3676 }
3677 
3678 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3679 {
3680     tcg_gen_andi_i64(dst, src, INT64_MAX);
3681 }
3682 
3683 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3684 {
3685     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3686 }
3687 
3688 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3689 {
3690     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3691 }
3692 
3693 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3694 {
3695     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3696 }
3697 
3698 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3699 {
3700     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3701 }
3702 
3703 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3704 {
3705     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3706 }
3707 
3708 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3709 {
3710     tcg_gen_xori_i32(dst, src, INT32_MIN);
3711 }
3712 
3713 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3714 {
3715     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3716 }
3717 
3718 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3719 {
3720     tcg_gen_xori_i64(dst, src, INT64_MIN);
3721 }
3722 
3723 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3724 {
3725     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3726 }
3727 
3728 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3729 {
3730     tcg_gen_ori_i32(dst, src, INT32_MIN);
3731 }
3732 
3733 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3734 {
3735     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3736 }
3737 
3738 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3739 {
3740     tcg_gen_ori_i64(dst, src, INT64_MIN);
3741 }
3742 
3743 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3744 {
3745     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3746 }
3747 
3748 /*
3749  * Float class 1
3750  */
3751 
3752 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3753 {
3754     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3755 }
3756 
3757 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3758 {
3759     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3760 }
3761 
3762 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3763 {
3764     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3765 }
3766 
3767 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3768 {
3769     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3770 }
3771 
3772 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3773 {
3774     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3775 }
3776 
3777 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3778 {
3779     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3780 }
3781 
3782 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3783 {
3784     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3785 }
3786 
3787 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3788 {
3789     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3790 }
3791 
3792 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3793 {
3794     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3795 }
3796 
3797 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3798 {
3799     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3800 }
3801 
3802 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3803 {
3804     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3805 }
3806 
3807 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3808 {
3809     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3810 }
3811 
3812 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3813 {
3814     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3815 }
3816 
3817 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3818 {
3819     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3820 }
3821 
3822 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3823 {
3824     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3825 }
3826 
3827 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3828 {
3829     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3830 }
3831 
3832 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3833 {
3834     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3835 }
3836 
3837 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3838 {
3839     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3840 }
3841 
3842 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3843 {
3844     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3845 }
3846 
3847 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3848 {
3849     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3850 }
3851 
3852 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3853 {
3854     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3855 }
3856 
3857 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3858 {
3859     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3860 }
3861 
3862 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3863 {
3864     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3865 }
3866 
3867 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3868 {
3869     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3870 }
3871 
3872 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3873 {
3874     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3875 }
3876 
3877 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3878 {
3879     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3880 }
3881 
3882 /*
3883  * Float class 2
3884  */
3885 
3886 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3887 {
3888     TCGv_i32 ta, tb, tc, ty;
3889 
3890     nullify_over(ctx);
3891 
3892     ta = load_frw0_i32(a->r1);
3893     tb = load_frw0_i32(a->r2);
3894     ty = tcg_constant_i32(a->y);
3895     tc = tcg_constant_i32(a->c);
3896 
3897     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
3898 
3899     return nullify_end(ctx);
3900 }
3901 
3902 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3903 {
3904     TCGv_i64 ta, tb;
3905     TCGv_i32 tc, ty;
3906 
3907     nullify_over(ctx);
3908 
3909     ta = load_frd0(a->r1);
3910     tb = load_frd0(a->r2);
3911     ty = tcg_constant_i32(a->y);
3912     tc = tcg_constant_i32(a->c);
3913 
3914     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
3915 
3916     return nullify_end(ctx);
3917 }
3918 
3919 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3920 {
3921     TCGv_i64 t;
3922 
3923     nullify_over(ctx);
3924 
3925     t = tcg_temp_new_i64();
3926     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
3927 
3928     if (a->y == 1) {
3929         int mask;
3930         bool inv = false;
3931 
3932         switch (a->c) {
3933         case 0: /* simple */
3934             tcg_gen_andi_i64(t, t, 0x4000000);
3935             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3936             goto done;
3937         case 2: /* rej */
3938             inv = true;
3939             /* fallthru */
3940         case 1: /* acc */
3941             mask = 0x43ff800;
3942             break;
3943         case 6: /* rej8 */
3944             inv = true;
3945             /* fallthru */
3946         case 5: /* acc8 */
3947             mask = 0x43f8000;
3948             break;
3949         case 9: /* acc6 */
3950             mask = 0x43e0000;
3951             break;
3952         case 13: /* acc4 */
3953             mask = 0x4380000;
3954             break;
3955         case 17: /* acc2 */
3956             mask = 0x4200000;
3957             break;
3958         default:
3959             gen_illegal(ctx);
3960             return true;
3961         }
3962         if (inv) {
3963             TCGv_i64 c = tcg_constant_i64(mask);
3964             tcg_gen_or_i64(t, t, c);
3965             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3966         } else {
3967             tcg_gen_andi_i64(t, t, mask);
3968             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3969         }
3970     } else {
3971         unsigned cbit = (a->y ^ 1) - 1;
3972 
3973         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
3974         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3975     }
3976 
3977  done:
3978     return nullify_end(ctx);
3979 }
3980 
3981 /*
3982  * Float class 2
3983  */
3984 
3985 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3986 {
3987     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3988 }
3989 
3990 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3991 {
3992     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3993 }
3994 
3995 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3996 {
3997     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3998 }
3999 
4000 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4001 {
4002     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4003 }
4004 
4005 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4006 {
4007     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4008 }
4009 
4010 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4011 {
4012     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4013 }
4014 
4015 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4016 {
4017     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4018 }
4019 
4020 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4021 {
4022     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4023 }
4024 
4025 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4026 {
4027     TCGv_i64 x, y;
4028 
4029     nullify_over(ctx);
4030 
4031     x = load_frw0_i64(a->r1);
4032     y = load_frw0_i64(a->r2);
4033     tcg_gen_mul_i64(x, x, y);
4034     save_frd(a->t, x);
4035 
4036     return nullify_end(ctx);
4037 }
4038 
4039 /* Convert the fmpyadd single-precision register encodings to standard.  */
4040 static inline int fmpyadd_s_reg(unsigned r)
4041 {
4042     return (r & 16) * 2 + 16 + (r & 15);
4043 }
4044 
4045 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4046 {
4047     int tm = fmpyadd_s_reg(a->tm);
4048     int ra = fmpyadd_s_reg(a->ra);
4049     int ta = fmpyadd_s_reg(a->ta);
4050     int rm2 = fmpyadd_s_reg(a->rm2);
4051     int rm1 = fmpyadd_s_reg(a->rm1);
4052 
4053     nullify_over(ctx);
4054 
4055     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4056     do_fop_weww(ctx, ta, ta, ra,
4057                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4058 
4059     return nullify_end(ctx);
4060 }
4061 
4062 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4063 {
4064     return do_fmpyadd_s(ctx, a, false);
4065 }
4066 
4067 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4068 {
4069     return do_fmpyadd_s(ctx, a, true);
4070 }
4071 
4072 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4073 {
4074     nullify_over(ctx);
4075 
4076     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4077     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4078                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4079 
4080     return nullify_end(ctx);
4081 }
4082 
4083 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4084 {
4085     return do_fmpyadd_d(ctx, a, false);
4086 }
4087 
4088 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4089 {
4090     return do_fmpyadd_d(ctx, a, true);
4091 }
4092 
4093 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4094 {
4095     TCGv_i32 x, y, z;
4096 
4097     nullify_over(ctx);
4098     x = load_frw0_i32(a->rm1);
4099     y = load_frw0_i32(a->rm2);
4100     z = load_frw0_i32(a->ra3);
4101 
4102     if (a->neg) {
4103         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4104     } else {
4105         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4106     }
4107 
4108     save_frw_i32(a->t, x);
4109     return nullify_end(ctx);
4110 }
4111 
4112 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4113 {
4114     TCGv_i64 x, y, z;
4115 
4116     nullify_over(ctx);
4117     x = load_frd0(a->rm1);
4118     y = load_frd0(a->rm2);
4119     z = load_frd0(a->ra3);
4120 
4121     if (a->neg) {
4122         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4123     } else {
4124         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4125     }
4126 
4127     save_frd(a->t, x);
4128     return nullify_end(ctx);
4129 }
4130 
4131 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4132 {
4133     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4134 #ifndef CONFIG_USER_ONLY
4135     if (a->i == 0x100) {
4136         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4137         nullify_over(ctx);
4138         gen_helper_diag_btlb(tcg_env);
4139         return nullify_end(ctx);
4140     }
4141 #endif
4142     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4143     return true;
4144 }
4145 
4146 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4147 {
4148     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4149     int bound;
4150 
4151     ctx->cs = cs;
4152     ctx->tb_flags = ctx->base.tb->flags;
4153     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4154 
4155 #ifdef CONFIG_USER_ONLY
4156     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4157     ctx->mmu_idx = MMU_USER_IDX;
4158     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4159     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4160     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4161 #else
4162     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4163     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4164                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4165                     : MMU_PHYS_IDX);
4166 
4167     /* Recover the IAOQ values from the GVA + PRIV.  */
4168     uint64_t cs_base = ctx->base.tb->cs_base;
4169     uint64_t iasq_f = cs_base & ~0xffffffffull;
4170     int32_t diff = cs_base;
4171 
4172     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4173     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4174 #endif
4175     ctx->iaoq_n = -1;
4176     ctx->iaoq_n_var = NULL;
4177 
4178     /* Bound the number of instructions by those left on the page.  */
4179     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4180     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4181 }
4182 
4183 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4184 {
4185     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4186 
4187     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4188     ctx->null_cond = cond_make_f();
4189     ctx->psw_n_nonzero = false;
4190     if (ctx->tb_flags & PSW_N) {
4191         ctx->null_cond.c = TCG_COND_ALWAYS;
4192         ctx->psw_n_nonzero = true;
4193     }
4194     ctx->null_lab = NULL;
4195 }
4196 
4197 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4198 {
4199     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4200 
4201     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4202 }
4203 
4204 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4205 {
4206     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4207     CPUHPPAState *env = cpu_env(cs);
4208     DisasJumpType ret;
4209 
4210     /* Execute one insn.  */
4211 #ifdef CONFIG_USER_ONLY
4212     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4213         do_page_zero(ctx);
4214         ret = ctx->base.is_jmp;
4215         assert(ret != DISAS_NEXT);
4216     } else
4217 #endif
4218     {
4219         /* Always fetch the insn, even if nullified, so that we check
4220            the page permissions for execute.  */
4221         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4222 
4223         /* Set up the IA queue for the next insn.
4224            This will be overwritten by a branch.  */
4225         if (ctx->iaoq_b == -1) {
4226             ctx->iaoq_n = -1;
4227             ctx->iaoq_n_var = tcg_temp_new_i64();
4228             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4229         } else {
4230             ctx->iaoq_n = ctx->iaoq_b + 4;
4231             ctx->iaoq_n_var = NULL;
4232         }
4233 
4234         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4235             ctx->null_cond.c = TCG_COND_NEVER;
4236             ret = DISAS_NEXT;
4237         } else {
4238             ctx->insn = insn;
4239             if (!decode(ctx, insn)) {
4240                 gen_illegal(ctx);
4241             }
4242             ret = ctx->base.is_jmp;
4243             assert(ctx->null_lab == NULL);
4244         }
4245     }
4246 
4247     /* Advance the insn queue.  Note that this check also detects
4248        a priority change within the instruction queue.  */
4249     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4250         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4251             && use_goto_tb(ctx, ctx->iaoq_b)
4252             && (ctx->null_cond.c == TCG_COND_NEVER
4253                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4254             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4255             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4256             ctx->base.is_jmp = ret = DISAS_NORETURN;
4257         } else {
4258             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4259         }
4260     }
4261     ctx->iaoq_f = ctx->iaoq_b;
4262     ctx->iaoq_b = ctx->iaoq_n;
4263     ctx->base.pc_next += 4;
4264 
4265     switch (ret) {
4266     case DISAS_NORETURN:
4267     case DISAS_IAQ_N_UPDATED:
4268         break;
4269 
4270     case DISAS_NEXT:
4271     case DISAS_IAQ_N_STALE:
4272     case DISAS_IAQ_N_STALE_EXIT:
4273         if (ctx->iaoq_f == -1) {
4274             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4275             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4276 #ifndef CONFIG_USER_ONLY
4277             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4278 #endif
4279             nullify_save(ctx);
4280             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4281                                 ? DISAS_EXIT
4282                                 : DISAS_IAQ_N_UPDATED);
4283         } else if (ctx->iaoq_b == -1) {
4284             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4285         }
4286         break;
4287 
4288     default:
4289         g_assert_not_reached();
4290     }
4291 }
4292 
4293 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4294 {
4295     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4296     DisasJumpType is_jmp = ctx->base.is_jmp;
4297 
4298     switch (is_jmp) {
4299     case DISAS_NORETURN:
4300         break;
4301     case DISAS_TOO_MANY:
4302     case DISAS_IAQ_N_STALE:
4303     case DISAS_IAQ_N_STALE_EXIT:
4304         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4305         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4306         nullify_save(ctx);
4307         /* FALLTHRU */
4308     case DISAS_IAQ_N_UPDATED:
4309         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4310             tcg_gen_lookup_and_goto_ptr();
4311             break;
4312         }
4313         /* FALLTHRU */
4314     case DISAS_EXIT:
4315         tcg_gen_exit_tb(NULL, 0);
4316         break;
4317     default:
4318         g_assert_not_reached();
4319     }
4320 }
4321 
4322 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4323                               CPUState *cs, FILE *logfile)
4324 {
4325     target_ulong pc = dcbase->pc_first;
4326 
4327 #ifdef CONFIG_USER_ONLY
4328     switch (pc) {
4329     case 0x00:
4330         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4331         return;
4332     case 0xb0:
4333         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4334         return;
4335     case 0xe0:
4336         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4337         return;
4338     case 0x100:
4339         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4340         return;
4341     }
4342 #endif
4343 
4344     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4345     target_disas(logfile, cs, pc, dcbase->tb->size);
4346 }
4347 
4348 static const TranslatorOps hppa_tr_ops = {
4349     .init_disas_context = hppa_tr_init_disas_context,
4350     .tb_start           = hppa_tr_tb_start,
4351     .insn_start         = hppa_tr_insn_start,
4352     .translate_insn     = hppa_tr_translate_insn,
4353     .tb_stop            = hppa_tr_tb_stop,
4354     .disas_log          = hppa_tr_disas_log,
4355 };
4356 
4357 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4358                            target_ulong pc, void *host_pc)
4359 {
4360     DisasContext ctx;
4361     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4362 }
4363